--- old/hotspot/.hgignore 2009-08-01 04:05:28.048975957 +0100 +++ new/hotspot/.hgignore 2009-08-01 04:05:27.977295593 +0100 @@ -1,171 +1,8 @@ -^build/solaris/solaris_sparc_compiler1/ -^build/solaris/solaris_sparc_compiler2/ -^build/solaris/solaris_sparc_core/ -^build/solaris/solaris_sparc_kernel/ -^build/solaris/solaris_sparc_docs/ -^build/solaris/jdk-solaris-sparc/ -^build/solaris/export-solaris-sparc/ -^build/solaris/solaris_sparcv9_compiler1/ -^build/solaris/solaris_sparcv9_compiler2/ -^build/solaris/solaris_sparcv9_core/ -^build/solaris/solaris_sparcv9_kernel/ -^build/solaris/solaris_sparcv9_docs/ -^build/solaris/jdk-solaris-sparcv9/ -^build/solaris/export-solaris-sparcv9/ -^build/solaris/solaris_sparc32_compiler1/ -^build/solaris/solaris_sparc32_compiler2/ -^build/solaris/solaris_sparc32_core/ -^build/solaris/solaris_sparc32_kernel/ -^build/solaris/solaris_sparc32_docs/ -^build/solaris/jdk-solaris-sparc32/ -^build/solaris/export-solaris-sparc32/ -^build/solaris/solaris_sparc64_compiler1/ -^build/solaris/solaris_sparc64_compiler2/ -^build/solaris/solaris_sparc64_core/ -^build/solaris/solaris_sparc64_kernel/ -^build/solaris/solaris_sparc64_docs/ -^build/solaris/jdk-solaris-sparc64/ -^build/solaris/export-solaris-sparc64/ -^build/solaris/solaris_i486_compiler1/ -^build/solaris/solaris_i486_compiler2/ -^build/solaris/solaris_i486_core/ -^build/solaris/solaris_i486_kernel/ -^build/solaris/solaris_i486_docs/ -^build/solaris/jdk-solaris-i486/ -^build/solaris/export-solaris-i486/ -^build/solaris/solaris_i386_compiler1/ -^build/solaris/solaris_i386_compiler2/ -^build/solaris/solaris_i386_core/ -^build/solaris/solaris_i386_kernel/ -^build/solaris/solaris_i386_docs/ -^build/solaris/jdk-solaris-i386/ -^build/solaris/export-solaris-i386/ -^build/solaris/solaris_amd64_compiler1/ -^build/solaris/solaris_amd64_compiler2/ -^build/solaris/solaris_amd64_core/ -^build/solaris/solaris_amd64_kernel/ -^build/solaris/solaris_amd64_docs/ -^build/solaris/jdk-solaris-amd64/ -^build/solaris/export-solaris-amd64/ -^build/solaris/solaris_x64_compiler1/ -^build/solaris/solaris_x64_compiler2/ -^build/solaris/solaris_x64_core/ -^build/solaris/solaris_x64_kernel/ -^build/solaris/solaris_x64_docs/ -^build/solaris/jdk-solaris-x64/ -^build/solaris/export-solaris-x64/ -^build/windows/windows_sparc_compiler1/ -^build/windows/windows_sparc_compiler2/ -^build/windows/windows_sparc_core/ -^build/windows/windows_sparc_kernel/ -^build/windows/windows_sparc_docs/ -^build/windows/jdk-windows-sparc/ -^build/windows/export-windows-sparc/ -^build/windows/windows_sparcv9_compiler1/ -^build/windows/windows_sparcv9_compiler2/ -^build/windows/windows_sparcv9_core/ -^build/windows/windows_sparcv9_kernel/ -^build/windows/windows_sparcv9_docs/ -^build/windows/jdk-windows-sparcv9/ -^build/windows/export-windows-sparcv9/ -^build/windows/windows_sparc32_compiler1/ -^build/windows/windows_sparc32_compiler2/ -^build/windows/windows_sparc32_core/ -^build/windows/windows_sparc32_kernel/ -^build/windows/windows_sparc32_docs/ -^build/windows/jdk-windows-sparc32/ -^build/windows/export-windows-sparc32/ -^build/windows/windows_sparc64_compiler1/ -^build/windows/windows_sparc64_compiler2/ -^build/windows/windows_sparc64_core/ -^build/windows/windows_sparc64_kernel/ -^build/windows/windows_sparc64_docs/ -^build/windows/jdk-windows-sparc64/ -^build/windows/export-windows-sparc64/ -^build/windows/windows_i486_compiler1/ -^build/windows/windows_i486_compiler2/ -^build/windows/windows_i486_core/ -^build/windows/windows_i486_kernel/ -^build/windows/windows_i486_docs/ -^build/windows/jdk-windows-i486/ -^build/windows/export-windows-i486/ -^build/windows/windows_i386_compiler1/ -^build/windows/windows_i386_compiler2/ -^build/windows/windows_i386_core/ -^build/windows/windows_i386_kernel/ -^build/windows/windows_i386_docs/ -^build/windows/jdk-windows-i386/ -^build/windows/export-windows-i386/ -^build/windows/windows_amd64_compiler1/ -^build/windows/windows_amd64_compiler2/ -^build/windows/windows_amd64_core/ -^build/windows/windows_amd64_kernel/ -^build/windows/windows_amd64_docs/ -^build/windows/jdk-windows-amd64/ -^build/windows/export-windows-amd64/ -^build/windows/windows_x64_compiler1/ -^build/windows/windows_x64_compiler2/ -^build/windows/windows_x64_core/ -^build/windows/windows_x64_kernel/ -^build/windows/windows_x64_docs/ -^build/windows/jdk-windows-x64/ -^build/windows/export-windows-x64/ -^build/linux/linux_sparc_compiler1/ -^build/linux/linux_sparc_compiler2/ -^build/linux/linux_sparc_core/ -^build/linux/linux_sparc_kernel/ -^build/linux/linux_sparc_docs/ -^build/linux/jdk-linux-sparc/ -^build/linux/export-linux-sparc/ -^build/linux/linux_sparcv9_compiler1/ -^build/linux/linux_sparcv9_compiler2/ -^build/linux/linux_sparcv9_core/ -^build/linux/linux_sparcv9_kernel/ -^build/linux/linux_sparcv9_docs/ -^build/linux/jdk-linux-sparcv9/ -^build/linux/export-linux-sparcv9/ -^build/linux/linux_sparc32_compiler1/ -^build/linux/linux_sparc32_compiler2/ -^build/linux/linux_sparc32_core/ -^build/linux/linux_sparc32_kernel/ -^build/linux/linux_sparc32_docs/ -^build/linux/jdk-linux-sparc32/ -^build/linux/export-linux-sparc32/ -^build/linux/linux_sparc64_compiler1/ -^build/linux/linux_sparc64_compiler2/ -^build/linux/linux_sparc64_core/ -^build/linux/linux_sparc64_kernel/ -^build/linux/linux_sparc64_docs/ -^build/linux/jdk-linux-sparc64/ -^build/linux/export-linux-sparc64/ -^build/linux/linux_i486_compiler1/ -^build/linux/linux_i486_compiler2/ -^build/linux/linux_i486_core/ -^build/linux/linux_i486_kernel/ -^build/linux/linux_i486_docs/ -^build/linux/jdk-linux-i486/ -^build/linux/export-linux-i486/ -^build/linux/linux_i386_compiler1/ -^build/linux/linux_i386_compiler2/ -^build/linux/linux_i386_core/ -^build/linux/linux_i386_kernel/ -^build/linux/linux_i386_docs/ -^build/linux/jdk-linux-i386/ -^build/linux/export-linux-i386/ -^build/linux/linux_amd64_compiler1/ -^build/linux/linux_amd64_compiler2/ -^build/linux/linux_amd64_core/ -^build/linux/linux_amd64_kernel/ -^build/linux/linux_amd64_docs/ -^build/linux/jdk-linux-amd64/ -^build/linux/export-linux-amd64/ -^build/linux/linux_x64_compiler1/ -^build/linux/linux_x64_compiler2/ -^build/linux/linux_x64_core/ -^build/linux/linux_x64_kernel/ -^build/linux/linux_x64_docs/ -^build/linux/jdk-linux-x64/ -^build/linux/export-linux-x64/ +^build/ ^dist/ ^webrev/ ^nbproject/private/ +^src/share/tools/hsdis/bin/ +^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/ +^src/share/tools/IdealGraphVisualizer/build/ +^src/share/tools/IdealGraphVisualizer/dist/ --- old/hotspot/.hgtags 2009-08-01 04:05:28.959673159 +0100 +++ new/hotspot/.hgtags 2009-08-01 04:05:28.889227342 +0100 @@ -15,3 +15,22 @@ e0966f42f76cca6102bb6d5a1ab19dd04f18fa33 jdk6-b14 03b2be188619124f38798681c4939f206d263cef jdk6-b15 ad38cd87d9dfd454a3f1dfd12aa706fe1b666c63 jdk6-b16 +a61af66fc99eb5ec9d50c05b0c599757b1289ceb jdk7-b24 +7836be3e92d0a4f9ee7566f602c91f5609534e66 jdk7-b25 +ad0b851458ff9d1d490ed2d79bb84f75a9fdb753 jdk7-b26 +e3d2692f8442e2d951166dc9bd9a330684754438 jdk7-b27 +c14dab40ed9bf45ad21150bd70c9c80cdf655415 jdk7-b28 +4f91c08b3e4498213a9c5a24898f7d9c38cf86fb jdk7-b29 +d1605aabd0a15ecf93787c47de63073c33fba52d jdk7-b30 +9c2ecc2ffb125f14fab3857fe7689598956348a0 jdk7-b31 +b727c32788a906c04839516ae7443a085185a300 jdk7-b32 +585535ec8a14adafa6bfea65d6975e29094c8cec jdk7-b33 +5251a9cd8eb8743eee647365bee1c8afdc131556 jdk7-b34 +5fa96a5a7e76da7c8dad12486293a0456c2c116c jdk7-b35 +e91159f921a58af3698e6479ea1fc5818da66d09 jdk7-b36 +9ee9cf798b59e7d51f8c0a686959f313867a55d6 jdk7-b37 +d9bc824aa078573829bb66572af847e26e1bd12e jdk7-b38 +49ca90d77f34571b0757ebfcb8a7848ef2696b88 jdk7-b39 +81a0cbe3b28460ce836109934ece03db7afaf9cc jdk7-b40 +f9d938ede1960d18cb7cf23c645b026519c1a678 jdk7-b41 +ad8c8ca4ab0f4c86e74c061958f44a8f4a930f2c jdk7-b42 --- old/hotspot/agent/make/Makefile 2009-08-01 04:05:30.715812778 +0100 +++ new/hotspot/agent/make/Makefile 2009-08-01 04:05:30.639092251 +0100 @@ -1,5 +1,5 @@ # -# Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -241,26 +241,9 @@ ALLFILES := $(patsubst %,$(SRC_DIR)/%,$(FILELIST)) ALLFILES := $(shell /bin/ls $(ALLFILES)) -EXTRA_JAR = ../closed/src/share/lib/maf-1_0.jar # tools.jar is used by the sa-jdi binding CLASSPATH = $(JDK_HOME)/lib/tools.jar - - -noGui := $(shell test -r $(EXTRA_JAR); echo $$?) -ifeq "$(noGui)" "1" - # the EXTRA_JAR is in the closed/ area because of open source issues. If - # it doesn't exist, then delete from ALLFILES the files that depend on it. - # This means that that HSDB, the GUI debugger for HotSpot, won't be built - # in an open source build. - ALLFILES := $(patsubst $(SRC_DIR)/sun/jvm/hotspot/ui/%,,$(ALLFILES)) - ALLFILES := $(patsubst $(SRC_DIR)/sun/jvm/hotspot/bugspot/%,,$(ALLFILES)) - ALLFILES := $(patsubst $(SRC_DIR)/sun/jvm/hotspot/HSDB.java,,$(ALLFILES)) - MSG = \" WARNING:::: $(EXTRA_JAR) is missing; won't build the GUI part of SA\" -else - CLASSPATH := $(EXTRA_JAR)$(CPS)$(CLASSPATH) -endif - CLASSPATH := $(subst \,/,$(CLASSPATH)) # FIXME: autogenerate call to rmic @@ -298,7 +281,7 @@ cp -r $(SRC_DIR)/images/* $(OUTPUT_DIR)/ .PHONY: filelist -filelist: +filelist: $(ALLFILES) @if [ ! -f $(JDK_HOME)/lib/tools.jar ] ; then \ echo "Missing $(JDK_HOME)/lib/tools.jar file. Use 1.6.0 or later version jdk to build SA."; \ echo ""; \ @@ -321,8 +304,11 @@ sizes: filelist wc -l `cat filelist` -cscope: filelist - cscope -b -i filelist -f java.out +cscope: $(ALLFILES) + rm -f java.files + echo $(ALLFILES) > java.files + cscope -b -i java.files -f java.out + rm -f java.files .PHONY: sa.jar sa.jar: --- old/hotspot/agent/make/bugspot.bat 2009-08-01 04:05:31.595891102 +0100 +++ new/hotspot/agent/make/bugspot.bat 2009-08-01 04:05:31.522523940 +0100 @@ -1,5 +1,5 @@ REM -REM Copyright 2002-2003 Sun Microsystems, Inc. All Rights Reserved. +REM Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. REM REM This code is free software; you can redistribute it and/or modify it --- old/hotspot/agent/make/build-pkglist 2009-08-01 04:05:32.519568974 +0100 +++ new/hotspot/agent/make/build-pkglist 2009-08-01 04:05:32.449148597 +0100 @@ -8,4 +8,4 @@ SED=$MKS_HOME/sed SORT=$MKS_HOME/sort -$CD ../src/share/classes; $FIND sun/jvm/hotspot \( -name SCCS -prune \) -o -type d -print | $SED -e 's/\//./g' | $SORT > ../../../make/pkglist.txt +$CD ../src/share/classes; $FIND sun/jvm/hotspot com/sun/java/swing -type d -print | $SED -e 's/\//./g' | $SORT > ../../../make/pkglist.txt --- old/hotspot/agent/make/build.xml 2009-08-01 04:05:33.312948700 +0100 +++ new/hotspot/agent/make/build.xml 2009-08-01 04:05:33.226520935 +0100 @@ -1,6 +1,6 @@ Matcher::_in_arg_limit, unaligned -// h ^ | in | 5 +// h ^ | in | 5 // | | args | 4 Holes in incoming args owned by SELF // | | | | 3 // | | +--------+ @@ -3087,14 +3117,14 @@ // | | pad2 | 2 pad to align old SP // | +--------+ 1 // | | locks | 0 -// | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned +// | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned // | | pad1 | 11 pad to align new SP // | +--------+ // | | | 10 // | | spills | 9 spills // V | | 8 (pad0 slot for callee) // -----------+--------+----> Matcher::_out_arg_limit, unaligned -// ^ | out | 7 +// ^ | out | 7 // | | args | 6 Holes in outgoing args owned by CALLEE // Owned by +--------+ // CALLEE | new out| 6 Empty on Intel, window on Sparc @@ -3102,17 +3132,17 @@ // | SP-+--------+----> Matcher::_new_SP, even aligned // | | | // -// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is +// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is // known from SELF's arguments and the Java calling convention. // Region 6-7 is determined per call site. -// Note 2: If the calling convention leaves holes in the incoming argument +// Note 2: If the calling convention leaves holes in the incoming argument // area, those holes are owned by SELF. Holes in the outgoing area // are owned by the CALLEE. Holes should not be nessecary in the // incoming area, as the Java calling convention is completely under // the control of the AD file. Doubles can be sorted and packed to // avoid holes. Holes in the outgoing arguments may be nessecary for // varargs C calling conventions. -// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is +// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is // even aligned with pad0 as needed. // Region 6 is even aligned. Region 6-7 is NOT even aligned; // region 6-11 is even aligned; it may be padded out more so that @@ -3122,7 +3152,7 @@ // What direction does stack grow in (assumed to be same for native & Java) stack_direction(TOWARDS_LOW); - // These two registers define part of the calling convention + // These two registers define part of the calling convention // between compiled code and the interpreter. inline_cache_reg(R_G5); // Inline Cache Register or methodOop for I2C interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter @@ -3147,7 +3177,7 @@ // Number of stack slots between incoming argument block and the start of // a new frame. The PROLOG must add this many slots to the stack. The - // EPILOG must remove this many slots. + // EPILOG must remove this many slots. in_preserve_stack_slots(0); // Number of outgoing stack slots killed above the out_preserve_stack_slots @@ -3170,7 +3200,7 @@ // Body of function which returns an OptoRegs array locating // arguments either in registers or in stack slots for calling // java - calling_convention %{ + calling_convention %{ (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); %} @@ -3191,17 +3221,17 @@ c_return_value %{ assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); #ifdef _LP64 - static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; - static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; - static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; - static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; + static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; + static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; + static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; + static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; #else // !_LP64 - static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; - static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; - static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; - static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; + static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; + static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; + static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; + static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; #endif - return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], + return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], (is_outgoing?lo_out:lo_in)[ideal_reg] ); %} @@ -3209,17 +3239,17 @@ return_value %{ assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); #ifdef _LP64 - static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; - static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; - static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; - static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; + static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; + static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; + static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; + static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; #else // !_LP64 - static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; - static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; - static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; - static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; + static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; + static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; + static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; + static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; #endif - return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], + return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], (is_outgoing?lo_out:lo_in)[ideal_reg] ); %} @@ -3236,7 +3266,7 @@ ins_attrib ins_pc_relative(0); // Required PC Relative flag ins_attrib ins_short_branch(0); // Required flag: is this instruction a // non-matching short branch variant of some - // long branch? + // long branch? //----------OPERANDS----------------------------------------------------------- // Operand definitions must precede instruction definitions for correct parsing @@ -3251,7 +3281,7 @@ op_cost(0); // formats are generated automatically for constants and base registers - format %{ %} + format %{ %} interface(CONST_INTER); %} @@ -3379,7 +3409,7 @@ op_cost(5); // formats are generated automatically for constants and base registers - format %{ %} + format %{ %} interface(CONST_INTER); %} @@ -3388,7 +3418,7 @@ match(ConP); op_cost(0); - format %{ %} + format %{ %} interface(CONST_INTER); %} @@ -3397,7 +3427,7 @@ match(ConP); op_cost(0); - format %{ %} + format %{ %} interface(CONST_INTER); %} @@ -3406,7 +3436,28 @@ match(ConP); // formats are generated automatically for constants and base registers - format %{ %} + format %{ %} + interface(CONST_INTER); +%} + +// Pointer Immediate +operand immN() +%{ + match(ConN); + + op_cost(10); + format %{ %} + interface(CONST_INTER); +%} + +// NULL Pointer Immediate +operand immN0() +%{ + predicate(n->get_narrowcon() == 0); + match(ConN); + + op_cost(0); + format %{ %} interface(CONST_INTER); %} @@ -3414,7 +3465,7 @@ match(ConL); op_cost(40); // formats are generated automatically for constants and base registers - format %{ %} + format %{ %} interface(CONST_INTER); %} @@ -3423,7 +3474,7 @@ match(ConL); op_cost(0); // formats are generated automatically for constants and base registers - format %{ %} + format %{ %} interface(CONST_INTER); %} @@ -3460,7 +3511,7 @@ #ifdef _LP64 // on 64-bit architectures this comparision is faster predicate(jlong_cast(n->getd()) == 0); -#else +#else predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO)); #endif match(ConD); @@ -3674,6 +3725,14 @@ interface(REG_INTER); %} +operand iRegN() %{ + constraint(ALLOC_IN_RC(int_reg)); + match(RegN); + + format %{ %} + interface(REG_INTER); +%} + // Long Register operand iRegL() %{ constraint(ALLOC_IN_RC(long_reg)); @@ -3707,6 +3766,14 @@ interface(REG_INTER); %} +operand g3RegL() %{ + constraint(ALLOC_IN_RC(g3_regL)); + match(iRegL); + + format %{ %} + interface(REG_INTER); +%} + // Int Register safe // This is 64bit safe operand iRegIsafe() %{ @@ -3889,7 +3956,7 @@ // we have no particularly good way to embed oops in // single instructions. -// Indirect with Register Index +// Indirect with Register Index operand indIndex(iRegP addr, iRegX index) %{ constraint(ALLOC_IN_RC(ptr_reg)); match(AddP addr index); @@ -5007,8 +5074,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDF $src,$dst\t! stkI to regF" %} - opcode(Assembler::ldf_op3); - ins_encode(form3_mem_reg(src, dst)); + opcode(Assembler::ldf_op3); + ins_encode(simple_form3_mem_reg(src, dst)); ins_pipe(floadF_stk); %} @@ -5018,8 +5085,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDDF $src,$dst\t! stkL to regD" %} - opcode(Assembler::lddf_op3); - ins_encode(form3_mem_reg(src, dst)); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg(src, dst)); ins_pipe(floadD_stk); %} @@ -5029,8 +5096,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STF $src,$dst\t! regF to stkI" %} - opcode(Assembler::stf_op3); - ins_encode(form3_mem_reg(dst, src)); + opcode(Assembler::stf_op3); + ins_encode(simple_form3_mem_reg(dst, src)); ins_pipe(fstoreF_stk_reg); %} @@ -5041,7 +5108,7 @@ size(4); format %{ "STDF $src,$dst\t! regD to stkL" %} opcode(Assembler::stdf_op3); - ins_encode(form3_mem_reg(dst, src)); + ins_encode(simple_form3_mem_reg(dst, src)); ins_pipe(fstoreD_stk_reg); %} @@ -5052,7 +5119,7 @@ format %{ "STW $src,$dst.hi\t! long\n\t" "STW R_G0,$dst.lo" %} opcode(Assembler::stw_op3); - ins_encode(form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); + ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); ins_pipe(lstoreI_stk_reg); %} @@ -5062,8 +5129,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STX $src,$dst\t! regL to stkD" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( dst, src ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( dst, src ) ); ins_pipe(istore_stk_reg); %} @@ -5076,8 +5143,8 @@ size(4); format %{ "LDUW $src,$dst\t!stk" %} - opcode(Assembler::lduw_op3); - ins_encode( form3_mem_reg( src, dst ) ); + opcode(Assembler::lduw_op3); + ins_encode(simple_form3_mem_reg( src, dst ) ); ins_pipe(iload_mem); %} @@ -5088,8 +5155,8 @@ size(4); format %{ "STW $src,$dst\t!stk" %} - opcode(Assembler::stw_op3); - ins_encode( form3_mem_reg( dst, src ) ); + opcode(Assembler::stw_op3); + ins_encode(simple_form3_mem_reg( dst, src ) ); ins_pipe(istore_mem_reg); %} @@ -5100,8 +5167,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDX $src,$dst\t! long" %} - opcode(Assembler::ldx_op3); - ins_encode( form3_mem_reg( src, dst ) ); + opcode(Assembler::ldx_op3); + ins_encode(simple_form3_mem_reg( src, dst ) ); ins_pipe(iload_mem); %} @@ -5112,8 +5179,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STX $src,$dst\t! long" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( dst, src ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( dst, src ) ); ins_pipe(istore_mem_reg); %} @@ -5124,8 +5191,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDX $src,$dst\t!ptr" %} - opcode(Assembler::ldx_op3); - ins_encode( form3_mem_reg( src, dst ) ); + opcode(Assembler::ldx_op3); + ins_encode(simple_form3_mem_reg( src, dst ) ); ins_pipe(iload_mem); %} @@ -5135,8 +5202,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STX $src,$dst\t!ptr" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( dst, src ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( dst, src ) ); ins_pipe(istore_mem_reg); %} #else // _LP64 @@ -5145,8 +5212,8 @@ match(Set dst src); ins_cost(MEMORY_REF_COST); format %{ "LDUW $src,$dst\t!ptr" %} - opcode(Assembler::lduw_op3, Assembler::ldst_op); - ins_encode( form3_mem_reg( src, dst ) ); + opcode(Assembler::lduw_op3, Assembler::ldst_op); + ins_encode(simple_form3_mem_reg( src, dst ) ); ins_pipe(iload_mem); %} @@ -5155,8 +5222,8 @@ match(Set dst src); ins_cost(MEMORY_REF_COST); format %{ "STW $src,$dst\t!ptr" %} - opcode(Assembler::stw_op3, Assembler::ldst_op); - ins_encode( form3_mem_reg( dst, src ) ); + opcode(Assembler::stw_op3, Assembler::ldst_op); + ins_encode(simple_form3_mem_reg( dst, src ) ); ins_pipe(istore_mem_reg); %} #endif // _LP64 @@ -5167,7 +5234,7 @@ ins_cost(0); format %{ "NOP ! Alu Pipeline" %} - opcode(Assembler::or_op3, Assembler::arith_op); + opcode(Assembler::or_op3, Assembler::arith_op); ins_encode( form2_nop() ); ins_pipe(ialu_nop_A0); %} @@ -5177,7 +5244,7 @@ ins_cost(0); format %{ "NOP ! Alu Pipeline" %} - opcode(Assembler::or_op3, Assembler::arith_op); + opcode(Assembler::or_op3, Assembler::arith_op); ins_encode( form2_nop() ); ins_pipe(ialu_nop_A1); %} @@ -5217,9 +5284,9 @@ ins_cost(MEMORY_REF_COST); size(4); - format %{ "LDSB $mem,$dst" %} - opcode(Assembler::ldsb_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + format %{ "LDSB $mem,$dst" %} + opcode(Assembler::ldsb_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mask_mem); %} @@ -5229,9 +5296,9 @@ ins_cost(MEMORY_REF_COST); size(4); - format %{ "LDUB $mem,$dst" %} - opcode(Assembler::ldub_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + format %{ "LDUB $mem,$dst" %} + opcode(Assembler::ldub_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mask_mem); %} @@ -5241,9 +5308,9 @@ ins_cost(MEMORY_REF_COST); size(4); - format %{ "LDUB $mem,$dst" %} - opcode(Assembler::ldub_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + format %{ "LDUB $mem,$dst" %} + opcode(Assembler::ldub_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mask_mem); %} @@ -5254,8 +5321,8 @@ size(4); format %{ "LDUH $mem,$dst" %} - opcode(Assembler::lduh_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::lduh_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mask_mem); %} @@ -5265,9 +5332,9 @@ ins_cost(MEMORY_REF_COST); size(4); - format %{ "LDUH $mem,$dst" %} - opcode(Assembler::lduh_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + format %{ "LDUH $mem,$dst" %} + opcode(Assembler::lduh_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mask_mem); %} @@ -5277,9 +5344,9 @@ ins_cost(MEMORY_REF_COST); size(4); - format %{ "LDUW $mem,$dst" %} - opcode(Assembler::lduw_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + format %{ "LDUW $mem,$dst" %} + opcode(Assembler::lduw_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} @@ -5289,8 +5356,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDX $mem,$dst\t! long" %} - opcode(Assembler::ldx_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::ldx_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} @@ -5304,8 +5371,8 @@ "\tLDUW $mem ,$dst\n" "\tSLLX #32, $dst, $dst\n" "\tOR $dst, R_O7, $dst" %} - opcode(Assembler::lduw_op3); - ins_encode( form3_mem_reg_long_unaligned_marshal( mem, dst )); + opcode(Assembler::lduw_op3); + ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); ins_pipe(iload_mem); %} @@ -5315,8 +5382,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDDF $mem,$dst\t! packed8B" %} - opcode(Assembler::lddf_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(floadD_mem); %} @@ -5326,8 +5393,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDDF $mem,$dst\t! packed4C" %} - opcode(Assembler::lddf_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(floadD_mem); %} @@ -5337,8 +5404,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDDF $mem,$dst\t! packed4S" %} - opcode(Assembler::lddf_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(floadD_mem); %} @@ -5348,8 +5415,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDDF $mem,$dst\t! packed2I" %} - opcode(Assembler::lddf_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(floadD_mem); %} @@ -5360,8 +5427,8 @@ size(4); format %{ "LDUW $mem,$dst\t! range" %} - opcode(Assembler::lduw_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::lduw_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} @@ -5372,8 +5439,8 @@ size(4); format %{ "LDF $mem,$dst\t! for fitos/fitod" %} - opcode(Assembler::ldf_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::ldf_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(floadF_mem); %} @@ -5385,15 +5452,35 @@ #ifndef _LP64 format %{ "LDUW $mem,$dst\t! ptr" %} - opcode(Assembler::lduw_op3, 0, REGP_OP); + opcode(Assembler::lduw_op3, 0, REGP_OP); #else format %{ "LDX $mem,$dst\t! ptr" %} - opcode(Assembler::ldx_op3, 0, REGP_OP); + opcode(Assembler::ldx_op3, 0, REGP_OP); #endif ins_encode( form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} +// Load Compressed Pointer +instruct loadN(iRegN dst, memory mem) %{ + match(Set dst (LoadN mem)); + ins_cost(MEMORY_REF_COST); + size(4); + + format %{ "LDUW $mem,$dst\t! compressed ptr" %} + ins_encode %{ + Register base = as_Register($mem$$base); + Register index = as_Register($mem$$index); + Register dst = $dst$$Register; + if (index != G0) { + __ lduw(base, index, dst); + } else { + __ lduw(base, $mem$$disp, dst); + } + %} + ins_pipe(iload_mem); +%} + // Load Klass Pointer instruct loadKlass(iRegP dst, memory mem) %{ match(Set dst (LoadKlass mem)); @@ -5402,24 +5489,45 @@ #ifndef _LP64 format %{ "LDUW $mem,$dst\t! klass ptr" %} - opcode(Assembler::lduw_op3, 0, REGP_OP); + opcode(Assembler::lduw_op3, 0, REGP_OP); #else format %{ "LDX $mem,$dst\t! klass ptr" %} - opcode(Assembler::ldx_op3, 0, REGP_OP); + opcode(Assembler::ldx_op3, 0, REGP_OP); #endif ins_encode( form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} +// Load narrow Klass Pointer +instruct loadNKlass(iRegN dst, memory mem) %{ + match(Set dst (LoadNKlass mem)); + ins_cost(MEMORY_REF_COST); + size(4); + + format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} + + ins_encode %{ + Register base = as_Register($mem$$base); + Register index = as_Register($mem$$index); + Register dst = $dst$$Register; + if (index != G0) { + __ lduw(base, index, dst); + } else { + __ lduw(base, $mem$$disp, dst); + } + %} + ins_pipe(iload_mem); +%} + // Load Short (16bit signed) instruct loadS(iRegI dst, memory mem) %{ match(Set dst (LoadS mem)); ins_cost(MEMORY_REF_COST); size(4); - format %{ "LDSH $mem,$dst" %} - opcode(Assembler::ldsh_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + format %{ "LDSH $mem,$dst" %} + opcode(Assembler::ldsh_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mask_mem); %} @@ -5430,8 +5538,8 @@ size(4); format %{ "LDDF $mem,$dst" %} - opcode(Assembler::lddf_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(floadD_mem); %} @@ -5442,7 +5550,7 @@ size(8); format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" "\tLDF $mem+4,$dst.lo\t!" %} - opcode(Assembler::ldf_op3); + opcode(Assembler::ldf_op3); ins_encode( form3_mem_reg_double_unaligned( mem, dst )); ins_pipe(iload_mem); %} @@ -5454,8 +5562,8 @@ size(4); format %{ "LDF $mem,$dst" %} - opcode(Assembler::ldf_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::ldf_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(floadF_mem); %} @@ -5510,6 +5618,26 @@ ins_pipe(loadConP_poll); %} +instruct loadConN0(iRegN dst, immN0 src) %{ + match(Set dst src); + + size(4); + format %{ "CLR $dst\t! compressed NULL ptr" %} + ins_encode( SetNull( dst ) ); + ins_pipe(ialu_imm); +%} + +instruct loadConN(iRegN dst, immN src) %{ + match(Set dst src); + ins_cost(DEFAULT_COST * 3/2); + format %{ "SET $src,$dst\t! compressed ptr" %} + ins_encode %{ + Register dst = $dst$$Register; + __ set_narrow_oop((jobject)$src$$constant, dst); + %} + ins_pipe(ialu_hi_lo_reg); +%} + instruct loadConL(iRegL dst, immL src, o7RegL tmp) %{ // %%% maybe this should work like loadConD match(Set dst src); @@ -5550,7 +5678,7 @@ #endif format %{ "SETHI hi(&$src),$tmp\t!get float $src from table\n\t" - "LDF [$tmp+lo(&$src)],$dst" %} + "LDF [$tmp+lo(&$src)],$dst" %} ins_encode( LdImmF(src, dst, tmp) ); ins_pipe(loadConFD); %} @@ -5566,7 +5694,7 @@ #endif format %{ "SETHI hi(&$src),$tmp\t!get double $src from table\n\t" - "LDDF [$tmp+lo(&$src)],$dst" %} + "LDDF [$tmp+lo(&$src)],$dst" %} ins_encode( LdImmD(src, dst, tmp) ); ins_pipe(loadConFD); %} @@ -5578,8 +5706,8 @@ match( PrefetchRead mem ); ins_cost(MEMORY_REF_COST); - format %{ "PREFETCH $mem,0\t! Prefetch read-many" %} - opcode(Assembler::prefetch_op3); + format %{ "PREFETCH $mem,0\t! Prefetch read-many" %} + opcode(Assembler::prefetch_op3); ins_encode( form3_mem_prefetch_read( mem ) ); ins_pipe(iload_mem); %} @@ -5588,8 +5716,8 @@ match( PrefetchWrite mem ); ins_cost(MEMORY_REF_COST); - format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %} - opcode(Assembler::prefetch_op3); + format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %} + opcode(Assembler::prefetch_op3); ins_encode( form3_mem_prefetch_write( mem ) ); ins_pipe(iload_mem); %} @@ -5603,8 +5731,8 @@ size(4); format %{ "STB $src,$mem\t! byte" %} - opcode(Assembler::stb_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stb_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(istore_mem_reg); %} @@ -5614,8 +5742,8 @@ size(4); format %{ "STB $src,$mem\t! byte" %} - opcode(Assembler::stb_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stb_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(istore_mem_zero); %} @@ -5625,8 +5753,8 @@ size(4); format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} - opcode(Assembler::stb_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stb_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(istore_mem_zero); %} @@ -5637,8 +5765,8 @@ size(4); format %{ "STH $src,$mem\t! short" %} - opcode(Assembler::sth_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::sth_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(istore_mem_reg); %} @@ -5648,8 +5776,8 @@ size(4); format %{ "STH $src,$mem\t! short" %} - opcode(Assembler::sth_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::sth_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(istore_mem_zero); %} @@ -5660,8 +5788,8 @@ size(4); format %{ "STW $src,$mem" %} - opcode(Assembler::stw_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stw_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(istore_mem_reg); %} @@ -5671,8 +5799,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STX $src,$mem\t! long" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(istore_mem_reg); %} @@ -5682,8 +5810,8 @@ size(4); format %{ "STW $src,$mem" %} - opcode(Assembler::stw_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stw_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(istore_mem_zero); %} @@ -5693,8 +5821,8 @@ size(4); format %{ "STX $src,$mem" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(istore_mem_zero); %} @@ -5705,8 +5833,8 @@ size(4); format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} - opcode(Assembler::stf_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stf_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(fstoreF_mem_reg); %} @@ -5718,10 +5846,10 @@ #ifndef _LP64 format %{ "STW $src,$dst\t! ptr" %} - opcode(Assembler::stw_op3, 0, REGP_OP); + opcode(Assembler::stw_op3, 0, REGP_OP); #else format %{ "STX $src,$dst\t! ptr" %} - opcode(Assembler::stx_op3, 0, REGP_OP); + opcode(Assembler::stx_op3, 0, REGP_OP); #endif ins_encode( form3_mem_reg( dst, src ) ); ins_pipe(istore_mem_spORreg); @@ -5734,15 +5862,53 @@ #ifndef _LP64 format %{ "STW $src,$dst\t! ptr" %} - opcode(Assembler::stw_op3, 0, REGP_OP); + opcode(Assembler::stw_op3, 0, REGP_OP); #else format %{ "STX $src,$dst\t! ptr" %} - opcode(Assembler::stx_op3, 0, REGP_OP); + opcode(Assembler::stx_op3, 0, REGP_OP); #endif ins_encode( form3_mem_reg( dst, R_G0 ) ); ins_pipe(istore_mem_zero); %} +// Store Compressed Pointer +instruct storeN(memory dst, iRegN src) %{ + match(Set dst (StoreN dst src)); + ins_cost(MEMORY_REF_COST); + size(4); + + format %{ "STW $src,$dst\t! compressed ptr" %} + ins_encode %{ + Register base = as_Register($dst$$base); + Register index = as_Register($dst$$index); + Register src = $src$$Register; + if (index != G0) { + __ stw(src, base, index); + } else { + __ stw(src, base, $dst$$disp); + } + %} + ins_pipe(istore_mem_spORreg); +%} + +instruct storeN0(memory dst, immN0 src) %{ + match(Set dst (StoreN dst src)); + ins_cost(MEMORY_REF_COST); + size(4); + + format %{ "STW $src,$dst\t! compressed ptr" %} + ins_encode %{ + Register base = as_Register($dst$$base); + Register index = as_Register($dst$$index); + if (index != G0) { + __ stw(0, base, index); + } else { + __ stw(0, base, $dst$$disp); + } + %} + ins_pipe(istore_mem_zero); +%} + // Store Double instruct storeD( memory mem, regD src) %{ match(Set mem (StoreD mem src)); @@ -5750,8 +5916,8 @@ size(4); format %{ "STDF $src,$mem" %} - opcode(Assembler::stdf_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stdf_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(fstoreD_mem_reg); %} @@ -5761,8 +5927,8 @@ size(4); format %{ "STX $src,$mem" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(fstoreD_mem_zero); %} @@ -5773,8 +5939,8 @@ size(4); format %{ "STF $src,$mem" %} - opcode(Assembler::stf_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stf_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(fstoreF_mem_reg); %} @@ -5784,8 +5950,8 @@ size(4); format %{ "STW $src,$mem\t! storeF0" %} - opcode(Assembler::stw_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stw_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(fstoreF_mem_zero); %} @@ -5795,19 +5961,63 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STDF $src,$mem\t! packed8B" %} - opcode(Assembler::stdf_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stdf_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(fstoreD_mem_reg); %} +// Convert oop pointer into compressed form +instruct encodeHeapOop(iRegN dst, iRegP src) %{ + predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); + match(Set dst (EncodeP src)); + format %{ "encode_heap_oop $src, $dst" %} + ins_encode %{ + __ encode_heap_oop($src$$Register, $dst$$Register); + %} + ins_pipe(ialu_reg); +%} + +instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ + predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); + match(Set dst (EncodeP src)); + format %{ "encode_heap_oop_not_null $src, $dst" %} + ins_encode %{ + __ encode_heap_oop_not_null($src$$Register, $dst$$Register); + %} + ins_pipe(ialu_reg); +%} + +instruct decodeHeapOop(iRegP dst, iRegN src) %{ + predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && + n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); + match(Set dst (DecodeN src)); + format %{ "decode_heap_oop $src, $dst" %} + ins_encode %{ + __ decode_heap_oop($src$$Register, $dst$$Register); + %} + ins_pipe(ialu_reg); +%} + +instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ + predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || + n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); + match(Set dst (DecodeN src)); + format %{ "decode_heap_oop_not_null $src, $dst" %} + ins_encode %{ + __ decode_heap_oop_not_null($src$$Register, $dst$$Register); + %} + ins_pipe(ialu_reg); +%} + + // Store Zero into Aligned Packed Bytes instruct storeA8B0(memory mem, immI0 zero) %{ match(Set mem (Store8B mem zero)); ins_cost(MEMORY_REF_COST); size(4); format %{ "STX $zero,$mem\t! packed8B" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(fstoreD_mem_zero); %} @@ -5817,8 +6027,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STDF $src,$mem\t! packed4C" %} - opcode(Assembler::stdf_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stdf_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(fstoreD_mem_reg); %} @@ -5828,8 +6038,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STX $zero,$mem\t! packed4C" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(fstoreD_mem_zero); %} @@ -5839,8 +6049,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STDF $src,$mem\t! packed2I" %} - opcode(Assembler::stdf_op3); - ins_encode( form3_mem_reg( mem, src ) ); + opcode(Assembler::stdf_op3); + ins_encode(simple_form3_mem_reg( mem, src ) ); ins_pipe(fstoreD_mem_reg); %} @@ -5850,8 +6060,8 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "STX $zero,$mem\t! packed2I" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( mem, R_G0 ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); ins_pipe(fstoreD_mem_zero); %} @@ -5964,8 +6174,8 @@ match(Set stkSlot src); // chain rule ins_cost(MEMORY_REF_COST); format %{ "STDF $src,$stkSlot\t!stk" %} - opcode(Assembler::stdf_op3); - ins_encode(form3_mem_reg(stkSlot, src)); + opcode(Assembler::stdf_op3); + ins_encode(simple_form3_mem_reg(stkSlot, src)); ins_pipe(fstoreD_stk_reg); %} @@ -5974,8 +6184,8 @@ match(Set dst stkSlot); // chain rule ins_cost(MEMORY_REF_COST); format %{ "LDDF $stkSlot,$dst\t!stk" %} - opcode(Assembler::lddf_op3); - ins_encode(form3_mem_reg(stkSlot, dst)); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg(stkSlot, dst)); ins_pipe(floadD_stk); %} @@ -5984,8 +6194,8 @@ match(Set stkSlot src); // chain rule ins_cost(MEMORY_REF_COST); format %{ "STF $src,$stkSlot\t!stk" %} - opcode(Assembler::stf_op3); - ins_encode(form3_mem_reg(stkSlot, src)); + opcode(Assembler::stf_op3); + ins_encode(simple_form3_mem_reg(stkSlot, src)); ins_pipe(fstoreF_stk_reg); %} @@ -6025,7 +6235,7 @@ ins_pipe(ialu_imm); %} -instruct cmovII_U_reg(cmpOp cmp, flagsRegU icc, iRegI dst, iRegI src) %{ +instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); ins_cost(150); size(4); @@ -6034,7 +6244,7 @@ ins_pipe(ialu_reg); %} -instruct cmovII_U_imm(cmpOp cmp, flagsRegU icc, iRegI dst, immI11 src) %{ +instruct cmovII_U_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); ins_cost(140); size(4); @@ -6061,6 +6271,34 @@ ins_pipe(ialu_imm); %} +// Conditional move for RegN. Only cmov(reg,reg). +instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ + match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); + ins_cost(150); + format %{ "MOV$cmp $pcc,$src,$dst" %} + ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); + ins_pipe(ialu_reg); +%} + +// This instruction also works with CmpN so we don't need cmovNN_reg. +instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ + match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); + ins_cost(150); + size(4); + format %{ "MOV$cmp $icc,$src,$dst" %} + ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); + ins_pipe(ialu_reg); +%} + +instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ + match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); + ins_cost(150); + size(4); + format %{ "MOV$cmp $fcc,$src,$dst" %} + ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); + ins_pipe(ialu_reg); +%} + // Conditional move instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); @@ -6078,6 +6316,7 @@ ins_pipe(ialu_imm); %} +// This instruction also works with CmpN so we don't need cmovPN_reg. instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); ins_cost(150); @@ -6129,7 +6368,7 @@ instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); ins_cost(150); - + size(4); format %{ "FMOVS$cmp $icc,$src,$dst" %} opcode(0x101); @@ -6137,7 +6376,7 @@ ins_pipe(int_conditional_float_move); %} -// Conditional move, +// Conditional move, instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); ins_cost(150); @@ -6170,7 +6409,7 @@ ins_pipe(int_conditional_double_move); %} -// Conditional move, +// Conditional move, instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); ins_cost(150); @@ -6228,7 +6467,7 @@ // for this guy. instruct tlsLoadP(g2RegP dst) %{ match(Set dst (ThreadLocal)); - + size(0); ins_cost(0); format %{ "# TLS is in G2" %} @@ -6281,7 +6520,7 @@ size(4); format %{ "ADD $src1,$src2,$dst" %} - opcode(Assembler::add_op3, Assembler::arith_op); + opcode(Assembler::add_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6292,7 +6531,7 @@ size(4); format %{ "ADD $src1,$src2,$dst" %} - opcode(Assembler::add_op3, Assembler::arith_op); + opcode(Assembler::add_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6303,7 +6542,7 @@ size(4); format %{ "ADD $src1,$src2,$dst" %} - opcode(Assembler::add_op3, Assembler::arith_op); + opcode(Assembler::add_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6314,7 +6553,7 @@ size(4); format %{ "ADD $src1,$src2,$dst\t! long" %} - opcode(Assembler::add_op3, Assembler::arith_op); + opcode(Assembler::add_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6324,7 +6563,7 @@ size(4); format %{ "ADD $src1,$con,$dst" %} - opcode(Assembler::add_op3, Assembler::arith_op); + opcode(Assembler::add_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6342,10 +6581,10 @@ #ifndef _LP64 size(4); format %{ "LDUW $mem,$dst\t! ptr" %} - opcode(Assembler::lduw_op3, 0, REGP_OP); + opcode(Assembler::lduw_op3, 0, REGP_OP); #else format %{ "LDX $mem,$dst\t! ptr" %} - opcode(Assembler::ldx_op3, 0, REGP_OP); + opcode(Assembler::ldx_op3, 0, REGP_OP); #endif ins_encode( form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); @@ -6357,46 +6596,37 @@ ins_cost(MEMORY_REF_COST); size(4); format %{ "LDX $mem,$dst\t! long" %} - opcode(Assembler::ldx_op3); - ins_encode( form3_mem_reg( mem, dst ) ); + opcode(Assembler::ldx_op3); + ins_encode(simple_form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); effect( KILL newval ); - format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" + format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" "CMP R_G3,$oldval\t\t! See if we made progress" %} ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); ins_pipe( long_memory_op ); %} -instruct storeLConditional_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ - match(Set res (StoreLConditional mem_ptr (Binary oldval newval))); - effect( USE mem_ptr, KILL ccr, KILL tmp1); - // Marshal the register pairs into V9 64-bit registers, then do the compare-and-swap - format %{ - "MOV $newval,R_O7\n\t" - "CASXA [$mem_ptr],$oldval,R_O7\t! If $oldval==[$mem_ptr] Then store R_O7 into [$mem_ptr], set R_O7=[$mem_ptr] in any case\n\t" - "CMP $oldval,R_O7\t\t! See if we made progress\n\t" - "MOV 1,$res\n\t" - "MOVne xcc,R_G0,$res" - %} - ins_encode( enc_casx(mem_ptr, oldval, newval), - enc_lflags_ne_to_boolean(res) ); +// Conditional-store of an int value. +instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ + match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); + effect( KILL newval ); + format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" + "CMP $oldval,$newval\t\t! See if we made progress" %} + ins_encode( enc_cas(mem_ptr,oldval,newval) ); ins_pipe( long_memory_op ); %} -instruct storeLConditional_flags(iRegP mem_ptr, iRegL oldval, iRegL newval, flagsRegL xcc, o7RegI tmp1, immI0 zero) %{ - match(Set xcc (CmpI (StoreLConditional mem_ptr (Binary oldval newval)) zero)); - effect( USE mem_ptr, KILL tmp1); - // Marshal the register pairs into V9 64-bit registers, then do the compare-and-swap - format %{ - "MOV $newval,R_O7\n\t" - "CASXA [$mem_ptr],$oldval,R_O7\t! If $oldval==[$mem_ptr] Then store R_O7 into [$mem_ptr], set R_O7=[$mem_ptr] in any case\n\t" - "CMP $oldval,R_O7\t\t! See if we made progress" - %} - ins_encode( enc_casx(mem_ptr, oldval, newval)); +// Conditional-store of a long value. +instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ + match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); + effect( KILL newval ); + format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" + "CMP $oldval,$newval\t\t! See if we made progress" %} + ins_encode( enc_cas(mem_ptr,oldval,newval) ); ins_pipe( long_memory_op ); %} @@ -6407,7 +6637,7 @@ effect( USE mem_ptr, KILL ccr, KILL tmp1); format %{ "MOV $newval,O7\n\t" - "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" + "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" "CMP $oldval,O7\t\t! See if we made progress\n\t" "MOV 1,$res\n\t" "MOVne xcc,R_G0,$res" @@ -6423,7 +6653,7 @@ effect( USE mem_ptr, KILL ccr, KILL tmp1); format %{ "MOV $newval,O7\n\t" - "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" + "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" "CMP $oldval,O7\t\t! See if we made progress\n\t" "MOV 1,$res\n\t" "MOVne icc,R_G0,$res" @@ -6436,27 +6666,35 @@ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); effect( USE mem_ptr, KILL ccr, KILL tmp1); -#ifdef _LP64 format %{ "MOV $newval,O7\n\t" - "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" + "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" "CMP $oldval,O7\t\t! See if we made progress\n\t" "MOV 1,$res\n\t" "MOVne xcc,R_G0,$res" %} +#ifdef _LP64 ins_encode( enc_casx(mem_ptr, oldval, newval), enc_lflags_ne_to_boolean(res) ); #else + ins_encode( enc_casi(mem_ptr, oldval, newval), + enc_iflags_ne_to_boolean(res) ); +#endif + ins_pipe( long_memory_op ); +%} + +instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ + match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); + effect( USE mem_ptr, KILL ccr, KILL tmp1); format %{ "MOV $newval,O7\n\t" - "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" + "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" "CMP $oldval,O7\t\t! See if we made progress\n\t" "MOV 1,$res\n\t" "MOVne icc,R_G0,$res" %} ins_encode( enc_casi(mem_ptr, oldval, newval), enc_iflags_ne_to_boolean(res) ); -#endif ins_pipe( long_memory_op ); %} @@ -6468,7 +6706,7 @@ size(4); format %{ "SUB $src1,$src2,$dst" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6479,7 +6717,7 @@ size(4); format %{ "SUB $src1,$src2,$dst" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6489,7 +6727,7 @@ size(4); format %{ "NEG $src2,$dst" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); ins_pipe(ialu_zero_reg); %} @@ -6500,7 +6738,7 @@ size(4); format %{ "SUB $src1,$src2,$dst\t! long" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6511,7 +6749,7 @@ size(4); format %{ "SUB $src1,$con,$dst\t! long" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6522,7 +6760,7 @@ size(4); format %{ "NEG $src2,$dst\t! long" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); ins_pipe(ialu_zero_reg); %} @@ -6535,7 +6773,7 @@ size(4); format %{ "MULX $src1,$src2,$dst" %} - opcode(Assembler::mulx_op3, Assembler::arith_op); + opcode(Assembler::mulx_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(imul_reg_reg); %} @@ -6546,7 +6784,7 @@ size(4); format %{ "MULX $src1,$src2,$dst" %} - opcode(Assembler::mulx_op3, Assembler::arith_op); + opcode(Assembler::mulx_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(imul_reg_imm); %} @@ -6556,7 +6794,7 @@ ins_cost(DEFAULT_COST * 5); size(4); format %{ "MULX $src1,$src2,$dst\t! long" %} - opcode(Assembler::mulx_op3, Assembler::arith_op); + opcode(Assembler::mulx_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(mulL_reg_reg); %} @@ -6567,7 +6805,7 @@ ins_cost(DEFAULT_COST * 5); size(4); format %{ "MULX $src1,$src2,$dst" %} - opcode(Assembler::mulx_op3, Assembler::arith_op); + opcode(Assembler::mulx_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(mulL_reg_imm); %} @@ -6579,7 +6817,7 @@ ins_cost((2+71)*DEFAULT_COST); format %{ "SRA $src2,0,$src2\n\t" - "SRA $src1,0,$src1\n\t" + "SRA $src1,0,$src1\n\t" "SDIVX $src1,$src2,$dst" %} ins_encode( idiv_reg( src1, src2, dst ) ); ins_pipe(sdiv_reg_reg); @@ -6629,7 +6867,7 @@ instruct sra_reg_2( iRegI dst, iRegI src ) %{ effect( DEF dst, USE src ); format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} - opcode(Assembler::sra_op3, Assembler::arith_op); + opcode(Assembler::sra_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6651,7 +6889,7 @@ %} %} -// Register Long Division +// Register Long Division instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ match(Set dst (DivL src1 src2)); ins_cost(DEFAULT_COST*71); @@ -6683,7 +6921,7 @@ ins_encode( irem_reg(src1, src2, dst, temp) ); ins_pipe(sdiv_reg_reg); %} - + // Immediate Remainder instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ match(Set dst (ModI src1 src2)); @@ -6718,7 +6956,7 @@ effect(DEF dst, USE src1, USE src2); size(4); format %{ "MULX $src1,$src2,$dst\t! long" %} - opcode(Assembler::mulx_op3, Assembler::arith_op); + opcode(Assembler::mulx_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(mulL_reg_reg); %} @@ -6728,7 +6966,7 @@ effect(DEF dst, USE src1, USE src2); size(4); format %{ "MULX $src1,$src2,$dst" %} - opcode(Assembler::mulx_op3, Assembler::arith_op); + opcode(Assembler::mulx_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(mulL_reg_imm); %} @@ -6737,7 +6975,7 @@ effect(DEF dst, USE src1, USE src2); size(4); format %{ "SUB $src1,$src2,$dst\t! long" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6746,7 +6984,7 @@ effect(DEF dst, USE src1, USE src2); size(4); format %{ "SUB $src1,$src2,$dst\t! long" %} - opcode(Assembler::sub_op3, Assembler::arith_op); + opcode(Assembler::sub_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6764,7 +7002,7 @@ %} %} -// Register Long Remainder +// Register Long Remainder instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ match(Set dst (ModL src1 src2)); ins_cost(DEFAULT_COST*(71 + 6 + 1)); @@ -6784,7 +7022,7 @@ size(4); format %{ "SLL $src1,$src2,$dst" %} - opcode(Assembler::sll_op3, Assembler::arith_op); + opcode(Assembler::sll_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6795,7 +7033,7 @@ size(4); format %{ "SLL $src1,$src2,$dst" %} - opcode(Assembler::sll_op3, Assembler::arith_op); + opcode(Assembler::sll_op3, Assembler::arith_op); ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6806,7 +7044,7 @@ size(4); format %{ "SLLX $src1,$src2,$dst" %} - opcode(Assembler::sllx_op3, Assembler::arith_op); + opcode(Assembler::sllx_op3, Assembler::arith_op); ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6817,7 +7055,7 @@ size(4); format %{ "SLLX $src1,$src2,$dst" %} - opcode(Assembler::sllx_op3, Assembler::arith_op); + opcode(Assembler::sllx_op3, Assembler::arith_op); ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6827,7 +7065,7 @@ match(Set dst (RShiftI src1 src2)); size(4); format %{ "SRA $src1,$src2,$dst" %} - opcode(Assembler::sra_op3, Assembler::arith_op); + opcode(Assembler::sra_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6838,7 +7076,7 @@ size(4); format %{ "SRA $src1,$src2,$dst" %} - opcode(Assembler::sra_op3, Assembler::arith_op); + opcode(Assembler::sra_op3, Assembler::arith_op); ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6849,7 +7087,7 @@ size(4); format %{ "SRAX $src1,$src2,$dst" %} - opcode(Assembler::srax_op3, Assembler::arith_op); + opcode(Assembler::srax_op3, Assembler::arith_op); ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6860,7 +7098,7 @@ size(4); format %{ "SRAX $src1,$src2,$dst" %} - opcode(Assembler::srax_op3, Assembler::arith_op); + opcode(Assembler::srax_op3, Assembler::arith_op); ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6871,7 +7109,7 @@ size(4); format %{ "SRL $src1,$src2,$dst" %} - opcode(Assembler::srl_op3, Assembler::arith_op); + opcode(Assembler::srl_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6882,7 +7120,7 @@ size(4); format %{ "SRL $src1,$src2,$dst" %} - opcode(Assembler::srl_op3, Assembler::arith_op); + opcode(Assembler::srl_op3, Assembler::arith_op); ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6893,7 +7131,7 @@ size(4); format %{ "SRLX $src1,$src2,$dst" %} - opcode(Assembler::srlx_op3, Assembler::arith_op); + opcode(Assembler::srlx_op3, Assembler::arith_op); ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -6904,7 +7142,7 @@ size(4); format %{ "SRLX $src1,$src2,$dst" %} - opcode(Assembler::srlx_op3, Assembler::arith_op); + opcode(Assembler::srlx_op3, Assembler::arith_op); ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6915,7 +7153,7 @@ match(Set dst (URShiftL (CastP2X src1) src2)); size(4); format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} - opcode(Assembler::srlx_op3, Assembler::arith_op); + opcode(Assembler::srlx_op3, Assembler::arith_op); ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -6924,7 +7162,7 @@ match(Set dst (URShiftI (CastP2X src1) src2)); size(4); format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %} - opcode(Assembler::srl_op3, Assembler::arith_op); + opcode(Assembler::srl_op3, Assembler::arith_op); ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -7044,7 +7282,7 @@ size(4); format %{ "FNEGs $src,$dst" %} - opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); + opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); ins_encode(form3_opf_rs2F_rdF(src, dst)); ins_pipe(faddF_reg); %} @@ -7085,7 +7323,7 @@ size(4); format %{ "AND $src1,$src2,$dst" %} - opcode(Assembler::and_op3, Assembler::arith_op); + opcode(Assembler::and_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7096,7 +7334,7 @@ size(4); format %{ "AND $src1,$src2,$dst" %} - opcode(Assembler::and_op3, Assembler::arith_op); + opcode(Assembler::and_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -7108,7 +7346,7 @@ ins_cost(DEFAULT_COST); size(4); format %{ "AND $src1,$src2,$dst\t! long" %} - opcode(Assembler::and_op3, Assembler::arith_op); + opcode(Assembler::and_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7119,7 +7357,7 @@ ins_cost(DEFAULT_COST); size(4); format %{ "AND $src1,$con,$dst\t! long" %} - opcode(Assembler::and_op3, Assembler::arith_op); + opcode(Assembler::and_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -7131,7 +7369,7 @@ size(4); format %{ "OR $src1,$src2,$dst" %} - opcode(Assembler::or_op3, Assembler::arith_op); + opcode(Assembler::or_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7142,7 +7380,7 @@ size(4); format %{ "OR $src1,$src2,$dst" %} - opcode(Assembler::or_op3, Assembler::arith_op); + opcode(Assembler::or_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -7154,7 +7392,7 @@ ins_cost(DEFAULT_COST); size(4); format %{ "OR $src1,$src2,$dst\t! long" %} - opcode(Assembler::or_op3, Assembler::arith_op); + opcode(Assembler::or_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7166,11 +7404,39 @@ ins_cost(DEFAULT_COST); size(4); format %{ "OR $src1,$con,$dst\t! long" %} - opcode(Assembler::or_op3, Assembler::arith_op); + opcode(Assembler::or_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); ins_pipe(ialu_reg_imm); %} +#ifndef _LP64 + +// Use sp_ptr_RegP to match G2 (TLS register) without spilling. +instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{ + match(Set dst (OrI src1 (CastP2X src2))); + + size(4); + format %{ "OR $src1,$src2,$dst" %} + opcode(Assembler::or_op3, Assembler::arith_op); + ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); + ins_pipe(ialu_reg_reg); +%} + +#else + +instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ + match(Set dst (OrL src1 (CastP2X src2))); + + ins_cost(DEFAULT_COST); + size(4); + format %{ "OR $src1,$src2,$dst\t! long" %} + opcode(Assembler::or_op3, Assembler::arith_op); + ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); + ins_pipe(ialu_reg_reg); +%} + +#endif + // Xor Instructions // Register Xor instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ @@ -7178,7 +7444,7 @@ size(4); format %{ "XOR $src1,$src2,$dst" %} - opcode(Assembler::xor_op3, Assembler::arith_op); + opcode(Assembler::xor_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7189,7 +7455,7 @@ size(4); format %{ "XOR $src1,$src2,$dst" %} - opcode(Assembler::xor_op3, Assembler::arith_op); + opcode(Assembler::xor_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -7201,7 +7467,7 @@ ins_cost(DEFAULT_COST); size(4); format %{ "XOR $src1,$src2,$dst\t! long" %} - opcode(Assembler::xor_op3, Assembler::arith_op); + opcode(Assembler::xor_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7212,7 +7478,7 @@ ins_cost(DEFAULT_COST); size(4); format %{ "XOR $src1,$con,$dst\t! long" %} - opcode(Assembler::xor_op3, Assembler::arith_op); + opcode(Assembler::xor_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); ins_pipe(ialu_reg_imm); %} @@ -7225,7 +7491,7 @@ effect( KILL ccr ); ins_cost(DEFAULT_COST*2); format %{ "CMP R_G0,$src\n\t" - "ADDX R_G0,0,$dst" %} + "ADDX R_G0,0,$dst" %} ins_encode( enc_to_bool( src, dst ) ); ins_pipe(ialu_reg_ialu); %} @@ -7236,7 +7502,7 @@ effect( KILL ccr ); ins_cost(DEFAULT_COST*2); format %{ "CMP R_G0,$src\n\t" - "ADDX R_G0,0,$dst" %} + "ADDX R_G0,0,$dst" %} ins_encode( enc_to_bool( src, dst ) ); ins_pipe(ialu_reg_ialu); %} @@ -7256,9 +7522,9 @@ effect( KILL ccr ); ins_cost(DEFAULT_COST*4); format %{ "CMP $p,$q\n\t" - "MOV #0,$dst\n\t" - "BLT,a .+8\n\t" - "MOV #-1,$dst" %} + "MOV #0,$dst\n\t" + "BLT,a .+8\n\t" + "MOV #-1,$dst" %} ins_encode( enc_ltmask(p,q,dst) ); ins_pipe(ialu_reg_reg_ialu); %} @@ -7269,8 +7535,8 @@ ins_cost(DEFAULT_COST*3); format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" - "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" - "MOVl $tmp,$p\t! p' < 0 ? p'+y : p'" %} + "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" + "MOVl $tmp,$p\t! p' < 0 ? p'+y : p'" %} ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) ); ins_pipe( cadd_cmpltmask ); %} @@ -7281,8 +7547,8 @@ ins_cost(DEFAULT_COST*3); format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" - "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" - "MOVl $tmp,$p\t! p' < 0 ? p'+y : p'" %} + "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" + "MOVl $tmp,$p\t! p' < 0 ? p'+y : p'" %} ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) ); ins_pipe( cadd_cmpltmask ); %} @@ -7294,7 +7560,7 @@ match(Set dst (ConvD2F src)); size(4); format %{ "FDTOS $src,$dst" %} - opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); + opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); ins_encode(form3_opf_rs2D_rdF(src, dst)); ins_pipe(fcvtD2F); %} @@ -7305,10 +7571,10 @@ instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ effect(DEF dst, USE src, KILL fcc0); format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" - "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" - "FDTOI $src,$dst\t! convert in delay slot\n\t" - "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" - "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" + "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" + "FDTOI $src,$dst\t! convert in delay slot\n\t" + "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" + "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" "skip:" %} ins_encode(form_d2i_helper(src,dst)); ins_pipe(fcvtD2I); @@ -7329,10 +7595,10 @@ instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ effect(DEF dst, USE src, KILL fcc0); format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" - "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" - "FDTOX $src,$dst\t! convert in delay slot\n\t" - "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" - "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" + "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" + "FDTOX $src,$dst\t! convert in delay slot\n\t" + "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" + "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" "skip:" %} ins_encode(form_d2l_helper(src,dst)); ins_pipe(fcvtD2L); @@ -7363,10 +7629,10 @@ instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ effect(DEF dst, USE src, KILL fcc0); format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" - "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" - "FSTOI $src,$dst\t! convert in delay slot\n\t" - "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" - "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" + "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" + "FSTOI $src,$dst\t! convert in delay slot\n\t" + "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" + "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" "skip:" %} ins_encode(form_f2i_helper(src,dst)); ins_pipe(fcvtF2I); @@ -7386,10 +7652,10 @@ instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ effect(DEF dst, USE src, KILL fcc0); format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" - "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" - "FSTOX $src,$dst\t! convert in delay slot\n\t" - "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" - "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" + "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" + "FSTOX $src,$dst\t! convert in delay slot\n\t" + "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" + "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" "skip:" %} ins_encode(form_f2l_helper(src,dst)); ins_pipe(fcvtF2L); @@ -7431,8 +7697,8 @@ size(8); format %{ "LDF $mem,$dst\n\t" "FITOD $dst,$dst" %} - opcode(Assembler::ldf_op3, Assembler::fitod_opf); - ins_encode( form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); + opcode(Assembler::ldf_op3, Assembler::fitod_opf); + ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); ins_pipe(floadF_mem); %} @@ -7461,8 +7727,8 @@ size(8); format %{ "LDF $mem,$dst\n\t" "FITOS $dst,$dst" %} - opcode(Assembler::ldf_op3, Assembler::fitos_opf); - ins_encode( form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); + opcode(Assembler::ldf_op3, Assembler::fitos_opf); + ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); ins_pipe(floadF_mem); %} @@ -7471,7 +7737,7 @@ match(Set dst (ConvI2L src)); size(4); format %{ "SRA $src,0,$dst\t! int->long" %} - opcode(Assembler::sra_op3, Assembler::arith_op); + opcode(Assembler::sra_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7481,7 +7747,7 @@ match(Set dst (AndL (ConvI2L src) mask) ); size(4); format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} - opcode(Assembler::srl_op3, Assembler::arith_op); + opcode(Assembler::srl_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7491,7 +7757,7 @@ match(Set dst (AndL src mask) ); size(4); format %{ "SRL $src,0,$dst\t! zero-extend long" %} - opcode(Assembler::srl_op3, Assembler::arith_op); + opcode(Assembler::srl_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); ins_pipe(ialu_reg_reg); %} @@ -7503,8 +7769,8 @@ size(4); format %{ "LDUW $src,$dst\t! MoveF2I" %} - opcode(Assembler::lduw_op3); - ins_encode( form3_mem_reg( src, dst ) ); + opcode(Assembler::lduw_op3); + ins_encode(simple_form3_mem_reg( src, dst ) ); ins_pipe(iload_mem); %} @@ -7515,8 +7781,8 @@ size(4); format %{ "LDF $src,$dst\t! MoveI2F" %} - opcode(Assembler::ldf_op3); - ins_encode(form3_mem_reg(src, dst)); + opcode(Assembler::ldf_op3); + ins_encode(simple_form3_mem_reg(src, dst)); ins_pipe(floadF_stk); %} @@ -7527,8 +7793,8 @@ size(4); format %{ "LDX $src,$dst\t! MoveD2L" %} - opcode(Assembler::ldx_op3); - ins_encode( form3_mem_reg( src, dst ) ); + opcode(Assembler::ldx_op3); + ins_encode(simple_form3_mem_reg( src, dst ) ); ins_pipe(iload_mem); %} @@ -7539,8 +7805,8 @@ size(4); format %{ "LDDF $src,$dst\t! MoveL2D" %} - opcode(Assembler::lddf_op3); - ins_encode(form3_mem_reg(src, dst)); + opcode(Assembler::lddf_op3); + ins_encode(simple_form3_mem_reg(src, dst)); ins_pipe(floadD_stk); %} @@ -7551,8 +7817,8 @@ size(4); format %{ "STF $src,$dst\t!MoveF2I" %} - opcode(Assembler::stf_op3); - ins_encode(form3_mem_reg(dst, src)); + opcode(Assembler::stf_op3); + ins_encode(simple_form3_mem_reg(dst, src)); ins_pipe(fstoreF_stk_reg); %} @@ -7563,8 +7829,8 @@ size(4); format %{ "STW $src,$dst\t!MoveI2F" %} - opcode(Assembler::stw_op3); - ins_encode( form3_mem_reg( dst, src ) ); + opcode(Assembler::stw_op3); + ins_encode(simple_form3_mem_reg( dst, src ) ); ins_pipe(istore_mem_reg); %} @@ -7575,8 +7841,8 @@ size(4); format %{ "STDF $src,$dst\t!MoveD2L" %} - opcode(Assembler::stdf_op3); - ins_encode(form3_mem_reg(dst, src)); + opcode(Assembler::stdf_op3); + ins_encode(simple_form3_mem_reg(dst, src)); ins_pipe(fstoreD_stk_reg); %} @@ -7587,15 +7853,15 @@ size(4); format %{ "STX $src,$dst\t!MoveL2D" %} - opcode(Assembler::stx_op3); - ins_encode( form3_mem_reg( dst, src ) ); + opcode(Assembler::stx_op3); + ins_encode(simple_form3_mem_reg( dst, src ) ); ins_pipe(istore_mem_reg); %} //----------- // Long to Double conversion using V8 opcodes. -// Still useful because cheetah traps and becomes +// Still useful because cheetah traps and becomes // amazingly slow for some common numbers. // Magic constant, 0x43300000 @@ -7622,7 +7888,7 @@ size(8); format %{ "FMOVS $src1.hi,$dst.hi\n\t" "FMOVS $src2.lo,$dst.lo" %} - opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); + opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); ins_pipe(faddD_reg_reg); %} @@ -7725,7 +7991,7 @@ //----------- // Long to Float conversion using V8 opcodes. -// Still useful because cheetah traps and becomes +// Still useful because cheetah traps and becomes // amazingly slow for some common numbers. // Long to Float conversion using fast fxtof @@ -7807,7 +8073,7 @@ size(8); #endif format %{ "SETHI hi(&Repl8($src)),$tmp\t!get Repl8B($src) from table\n\t" - "LDDF [$tmp+lo(&Repl8($src))],$dst" %} + "LDDF [$tmp+lo(&Repl8($src))],$dst" %} ins_encode( LdReplImmI(src, dst, tmp, (8), (1)) ); ins_pipe(loadConFD); %} @@ -7843,7 +8109,7 @@ size(8); #endif format %{ "SETHI hi(&Repl4($src)),$tmp\t!get Repl4C($src) from table\n\t" - "LDDF [$tmp+lo(&Repl4($src))],$dst" %} + "LDDF [$tmp+lo(&Repl4($src))],$dst" %} ins_encode( LdReplImmI(src, dst, tmp, (4), (2)) ); ins_pipe(loadConFD); %} @@ -7879,7 +8145,7 @@ size(8); #endif format %{ "SETHI hi(&Repl4($src)),$tmp\t!get Repl4S($src) from table\n\t" - "LDDF [$tmp+lo(&Repl4($src))],$dst" %} + "LDDF [$tmp+lo(&Repl4($src))],$dst" %} ins_encode( LdReplImmI(src, dst, tmp, (4), (2)) ); ins_pipe(loadConFD); %} @@ -7913,7 +8179,7 @@ size(8); #endif format %{ "SETHI hi(&Repl2($src)),$tmp\t!get Repl2I($src) from table\n\t" - "LDDF [$tmp+lo(&Repl2($src))],$dst" %} + "LDDF [$tmp+lo(&Repl2($src))],$dst" %} ins_encode( LdReplImmI(src, dst, tmp, (2), (4)) ); ins_pipe(loadConFD); %} @@ -7927,7 +8193,7 @@ size(4); format %{ "CMP $op1,$op2" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg); %} @@ -7937,7 +8203,7 @@ size(4); format %{ "CMP $op1,$op2\t! unsigned" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg); %} @@ -7948,7 +8214,7 @@ size(4); format %{ "CMP $op1,$op2" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_imm); %} @@ -7958,7 +8224,7 @@ size(4); format %{ "BTST $op2,$op1" %} - opcode(Assembler::andcc_op3, Assembler::arith_op); + opcode(Assembler::andcc_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg_zero); %} @@ -7968,7 +8234,7 @@ size(4); format %{ "BTST $op2,$op1" %} - opcode(Assembler::andcc_op3, Assembler::arith_op); + opcode(Assembler::andcc_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_imm_zero); %} @@ -7979,7 +8245,7 @@ size(4); format %{ "CMP $op1,$op2\t\t! long" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg); %} @@ -7990,7 +8256,7 @@ size(4); format %{ "CMP $op1,$con\t\t! long" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg); %} @@ -8001,7 +8267,7 @@ size(4); format %{ "BTST $op1,$op2\t\t! long" %} - opcode(Assembler::andcc_op3, Assembler::arith_op); + opcode(Assembler::andcc_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg); %} @@ -8013,7 +8279,7 @@ size(4); format %{ "BTST $op1,$con\t\t! long" %} - opcode(Assembler::andcc_op3, Assembler::arith_op); + opcode(Assembler::andcc_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg); %} @@ -8023,7 +8289,7 @@ size(4); format %{ "CMP $op1,$op2\t! unsigned" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_imm); %} @@ -8034,7 +8300,7 @@ size(4); format %{ "CMP $op1,$op2\t! ptr" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_reg); %} @@ -8044,7 +8310,28 @@ size(4); format %{ "CMP $op1,$op2\t! ptr" %} - opcode(Assembler::subcc_op3, Assembler::arith_op); + opcode(Assembler::subcc_op3, Assembler::arith_op); + ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); + ins_pipe(ialu_cconly_reg_imm); +%} + +// Compare Narrow oops +instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ + match(Set icc (CmpN op1 op2)); + + size(4); + format %{ "CMP $op1,$op2\t! compressed ptr" %} + opcode(Assembler::subcc_op3, Assembler::arith_op); + ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); + ins_pipe(ialu_cconly_reg_reg); +%} + +instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ + match(Set icc (CmpN op1 op2)); + + size(4); + format %{ "CMP $op1,$op2\t! compressed ptr" %} + opcode(Assembler::subcc_op3, Assembler::arith_op); ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); ins_pipe(ialu_cconly_reg_imm); %} @@ -8102,7 +8389,7 @@ size(4); format %{ "FCMPs $fcc,$src1,$src2" %} - opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); + opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); ins_pipe(faddF_fcc_reg_reg_zero); %} @@ -8112,7 +8399,7 @@ size(4); format %{ "FCMPd $fcc,$src1,$src2" %} - opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); + opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); ins_pipe(faddD_fcc_reg_reg_zero); %} @@ -8148,7 +8435,7 @@ match(Jump switch_val); ins_cost(350); - + format %{ "SETHI [hi(table_base)],O7\n\t" "ADD O7, lo(table_base), O7\n\t" "LD [O7+$switch_val], O7\n\t" @@ -8168,7 +8455,7 @@ ins_cost(BRANCH_COST); format %{ "BA $labl" %} // Prim = bits 24-22, Secnd = bits 31-30, Tert = cond - opcode(Assembler::br_op2, Assembler::branch_op, Assembler::always); + opcode(Assembler::br_op2, Assembler::branch_op, Assembler::always); ins_encode( enc_ba( labl ) ); ins_pc_relative(1); ins_pipe(br); @@ -8188,9 +8475,9 @@ ins_pipe(br_cc); %} -// Branch-on-register tests all 64 bits. We assume that values +// Branch-on-register tests all 64 bits. We assume that values // in 64-bit registers always remains zero or sign extended -// unless our code munges the high bits. Interrupts can chop +// unless our code munges the high bits. Interrupts can chop // the high order bits to zero or sign at any time. instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ match(If cmp (CmpI op1 zero)); @@ -8306,13 +8593,13 @@ // NE test is negated from that. // Due to a shortcoming in the ADLC, it mixes up expressions like: -// (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the -// difference between 'Y' and '0L'. The tree-matches for the CmpI sections -// are collapsed internally in the ADLC's dfa-gen code. The match for -// (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the -// foo match ends up with the wrong leaf. One fix is to not match both -// reg-reg and reg-zero forms of long-compare. This is unfortunate because -// both forms beat the trinary form of long-compare and both are very useful +// (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the +// difference between 'Y' and '0L'. The tree-matches for the CmpI sections +// are collapsed internally in the ADLC's dfa-gen code. The match for +// (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the +// foo match ends up with the wrong leaf. One fix is to not match both +// reg-reg and reg-zero forms of long-compare. This is unfortunate because +// both forms beat the trinary form of long-compare and both are very useful // on Intel which has so few registers. instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ @@ -8379,6 +8666,14 @@ ins_pipe(ialu_imm); %} +instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ + match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); + ins_cost(150); + format %{ "MOV$cmp $xcc,$src,$dst" %} + ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); + ins_pipe(ialu_reg); +%} + instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); ins_cost(150); @@ -8418,7 +8713,7 @@ instruct safePoint_poll(iRegP poll) %{ match(SafePoint poll); effect(USE poll); - + size(4); #ifdef _LP64 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} @@ -8454,7 +8749,7 @@ ins_cost(CALL_COST); format %{ "SET (empty),R_G5\n\t" - "CALL,dynamic ; NOP ==> " %} + "CALL,dynamic ; NOP ==> " %} ins_encode( Java_Dynamic_Call( meth ), call_epilog ); ins_pc_relative(1); ins_pipe(call); @@ -8467,7 +8762,7 @@ ins_cost(CALL_COST); format %{ "CALL,runtime" %} ins_encode( Java_To_Runtime( meth ), - call_epilog, adjust_long_from_native_call ); + call_epilog, adjust_long_from_native_call ); ins_pc_relative(1); ins_pipe(simple_call); %} @@ -8479,8 +8774,8 @@ ins_cost(CALL_COST); format %{ "CALL,runtime leaf" %} ins_encode( Java_To_Runtime( meth ), - call_epilog, - adjust_long_from_native_call ); + call_epilog, + adjust_long_from_native_call ); ins_pc_relative(1); ins_pipe(simple_call); %} @@ -8492,8 +8787,8 @@ ins_cost(CALL_COST); format %{ "CALL,runtime leaf nofp" %} ins_encode( Java_To_Runtime( meth ), - call_epilog, - adjust_long_from_native_call ); + call_epilog, + adjust_long_from_native_call ); ins_pc_relative(1); ins_pipe(simple_call); %} @@ -8534,9 +8829,9 @@ match( TailJump jump_target ex_oop ); ins_cost(CALL_COST); format %{ "! discard R_O7\n\t" - "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} + "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} ins_encode(form_jmpl_set_exception_pc(jump_target)); - // opcode(Assembler::jmpl_op3, Assembler::arith_op); + // opcode(Assembler::jmpl_op3, Assembler::arith_op); // The hack duplicates the exception oop into G3, so that CreateEx can use it there. // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); ins_pipe(tail_call); @@ -8558,7 +8853,7 @@ %} -// Rethrow exception: +// Rethrow exception: // The exception oop will come in the first argument position. // Then JUMP (not call) to the rethrow stub code. instruct RethrowException() @@ -8609,6 +8904,7 @@ ins_pipe(partial_subtype_check_pipe); %} + // ============================================================================ // inlined locking and unlocking @@ -8618,7 +8914,7 @@ effect(KILL scratch, TEMP scratch2); ins_cost(100); - size(4*112); // conservative overestimation ... + size(4*112); // conservative overestimation ... format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2, $box" %} ins_encode( Fast_Lock(object, box, scratch, scratch2) ); ins_pipe(long_memory_op); @@ -8647,12 +8943,13 @@ " BRge loop\t\t! Clearing loop\n" " STX G0,[$base+$temp]\t! delay slot" %} ins_encode( enc_Clear_Array(cnt, base, temp) ); - ins_pipe(long_memory_op); + ins_pipe(long_memory_op); %} -instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result, flagsReg ccr) %{ +instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result, + o7RegI tmp3, flagsReg ccr) %{ match(Set result (StrComp str1 str2)); - effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr); + effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr, KILL tmp3); ins_cost(300); format %{ "String Compare $str1,$str2 -> $result" %} ins_encode( enc_String_Compare(str1, str2, tmp1, tmp2, result) ); @@ -8661,13 +8958,13 @@ // ============================================================================ //------------Bytes reverse-------------------------------------------------- - + instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ match(Set dst (ReverseBytesI src)); effect(DEF dst, USE src); - // Op cost is artificially doubled to make sure that load or store - // instructions are preferred over this one which requires a spill + // Op cost is artificially doubled to make sure that load or store + // instructions are preferred over this one which requires a spill // onto a stack slot. ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); size(8); @@ -8681,13 +8978,13 @@ match(Set dst (ReverseBytesL src)); effect(DEF dst, USE src); - // Op cost is artificially doubled to make sure that load or store - // instructions are preferred over this one which requires a spill + // Op cost is artificially doubled to make sure that load or store + // instructions are preferred over this one which requires a spill // onto a stack slot. ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); size(8); format %{ "LDXA $src, $dst\t!asi=primary_little" %} - + opcode(Assembler::ldxa_op3); ins_encode( form3_mem_reg_little(src, dst) ); ins_pipe( iload_mem ); @@ -8748,7 +9045,7 @@ //----------PEEPHOLE RULES----------------------------------------------------- // These must follow all instruction definitions as they use the names // defined in the instructions definitions. -// +// // peepmatch ( root_instr_name [preceeding_instruction]* ); // // peepconstraint %{ @@ -8761,16 +9058,16 @@ // // in the replacement instruction's match rule // // ---------VM FLAGS--------------------------------------------------------- -// +// // All peephole optimizations can be turned off using -XX:-OptoPeephole -// +// // Each peephole rule is given an identifying number starting with zero and // increasing by one in the order seen by the parser. An individual peephole // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# // on the command-line. -// +// // ---------CURRENT LIMITATIONS---------------------------------------------- -// +// // Only match adjacent instructions in same basic block // Only equality constraints // Only constraints between operands, not (0.dest_reg == EAX_enc) @@ -8782,34 +9079,34 @@ // instruct movI(eRegI dst, eRegI src) %{ // match(Set dst (CopyI src)); // %} -// +// // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ // match(Set dst (AddI dst src)); // effect(KILL cr); // %} -// +// // // Change (inc mov) to lea // peephole %{ // // increment preceeded by register-register move // peepmatch ( incI_eReg movI ); -// // require that the destination register of the increment +// // require that the destination register of the increment // // match the destination register of the move // peepconstraint ( 0.dst == 1.dst ); // // construct a replacement instruction that sets // // the destination to ( move's source register + one ) // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); // %} -// +// // // Change load of spilled value to only a spill // instruct storeI(memory mem, eRegI src) %{ // match(Set mem (StoreI mem src)); // %} -// +// // instruct loadI(eRegI dst, memory mem) %{ // match(Set dst (LoadI mem)); // %} -// +// // peephole %{ // peepmatch ( loadI storeI ); // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); --- old/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp 2009-08-01 04:07:33.170155793 +0100 +++ new/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp 2009-08-01 04:07:33.076491781 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)stubGenerator_sparc.cpp 1.236 07/10/05 19:05:32 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -130,6 +127,7 @@ // setup thread register __ ld_ptr(thread.as_address(), G2_thread); + __ reinit_heapbase(); #ifdef ASSERT // make sure we have no pending exceptions @@ -255,7 +253,7 @@ // we can no longer assert of change of SP. // store result depending on type - // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE + // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE // is treated as T_INT) { const Register addr = result .as_in().as_register(); const Register type = result_type.as_in().as_register(); @@ -266,29 +264,29 @@ __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long); __ delayed()->nop(); - // store int result + // store int result __ st(O0, addr, G0); __ BIND(exit); __ ret(); __ delayed()->restore(); - + __ BIND(is_object); __ ba(false, exit); __ delayed()->st_ptr(O0, addr, G0); - + __ BIND(is_float); __ ba(false, exit); __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0); - + __ BIND(is_double); __ ba(false, exit); __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0); - + __ BIND(is_long); #ifdef _LP64 __ ba(false, exit); - __ delayed()->st_long(O0, addr, G0); // store entire long + __ delayed()->st_long(O0, addr, G0); // store entire long #else #if defined(COMPILER2) // All return values are where we want them, except for Longs. C2 returns @@ -300,11 +298,11 @@ // first which would move g1 -> O0/O1 and destroy the exception we were throwing. __ ba(false, exit); - __ delayed()->stx(G1, addr, G0); // store entire long + __ delayed()->stx(G1, addr, G0); // store entire long #else - __ st(O1, addr, BytesPerInt); + __ st(O1, addr, BytesPerInt); __ ba(false, exit); - __ delayed()->st(O0, addr, G0); + __ delayed()->st(O0, addr, G0); #endif /* COMPILER2 */ #endif /* _LP64 */ } @@ -356,7 +354,7 @@ // The pending exception in Thread is converted into a Java-level exception // // Contract with Java-level exception handler: O0 = exception - // O1 = throwing pc + // O1 = throwing pc address generate_forward_exception() { StubCodeMark mark(this, "StubRoutines", "forward_exception"); @@ -495,7 +493,7 @@ __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); // we use O7 linkage so that forward_exception_entry has the issuing PC __ delayed()->restore(); - + RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false); return stub->entry_point(); } @@ -529,7 +527,7 @@ __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 ); __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 ); - // use add to put 20..32 in F20..F32 + // use add to put 20..32 in F20..F32 for (i = 20; i < 32; i += 2) { __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i)); } @@ -537,9 +535,9 @@ // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's for ( i = 0; i < 8; ++i ) { if (i < 6) { - __ set( i, as_iRegister(i)); - __ set(16 + i, as_oRegister(i)); - __ set(24 + i, as_gRegister(i)); + __ set( i, as_iRegister(i)); + __ set(16 + i, as_oRegister(i)); + __ set(24 + i, as_gRegister(i)); } __ set( 8 + i, as_lRegister(i)); } @@ -577,7 +575,7 @@ } // Helper functions for v8 atomic operations. - // + // void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) { if (mark_oop_reg == noreg) { address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(); @@ -607,7 +605,7 @@ // stub is only invoked from Atomic::add(). We do not // want to use call_VM, because _last_java_sp and such // must already be set. - // + // // Save the regs and make space for a C call __ save(SP, -96, SP); __ save_all_globals_into_locals(); @@ -659,7 +657,7 @@ __ mov(O0, O3); // scratch copy of exchange value __ ld(O1, 0, O2); // observe the previous value // try to replace O2 with O3 - __ cas_under_lock(O1, O2, O3, + __ cas_under_lock(O1, O2, O3, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false); __ cmp(O2, O3); __ br(Assembler::notEqual, false, Assembler::pn, retry); @@ -676,7 +674,7 @@ const Register& lock_reg = O2; const Register& lock_ptr_reg = O3; const Register& yield_reg = O4; - + Label retry; Label dontyield; @@ -806,7 +804,7 @@ __ st(value_reg, O1, 0); // %%% only for RMO and PSO - __ membar(Assembler::StoreStore); + __ membar(Assembler::StoreStore); generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); @@ -825,8 +823,8 @@ StubCodeMark mark(this, "StubRoutines", "fence"); address start = __ pc(); - __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore | - Assembler::StoreLoad | Assembler::StoreStore)); + __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore)); __ retl(false); __ delayed()->nop(); @@ -866,7 +864,7 @@ for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize); } - + address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access); BLOCK_COMMENT("call handle_unsafe_access"); __ call(entry_point, relocInfo::runtime_call_type); @@ -899,6 +897,7 @@ // super: O2, argument, not changed // raddr: O7, blown by call address generate_partial_subtype_check() { + __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); address start = __ pc(); Label loop, miss; @@ -917,7 +916,7 @@ #if defined(COMPILER2) && !defined(_LP64) // Do not use a 'save' because it blows the 64-bit O registers. - __ add(SP,-4*wordSize,SP); // Make space for 4 temps + __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned) __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize); __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize); __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize); @@ -937,26 +936,52 @@ Register L2_super = L2; Register L3_index = L3; +#ifdef _LP64 + Register L4_ooptmp = L4; + + if (UseCompressedOops) { + // this must be under UseCompressedOops check, as we rely upon fact + // that L4 not clobbered in C2 on 32-bit platforms, where we do explicit save + // on stack, see several lines above + __ encode_heap_oop(Rsuper, L4_ooptmp); + } +#endif + inc_counter_np(SharedRuntime::_partial_subtype_ctr, L0, L1); __ ld_ptr( Rsub, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 ); - __ lduw(L3,arrayOopDesc::length_offset_in_bytes(),L0_ary_len); - __ add(L3,arrayOopDesc::base_offset_in_bytes(T_OBJECT),L1_ary_ptr); + __ lduw(L3,arrayOopDesc::length_offset_in_bytes(),L0_ary_len); + __ add(L3,arrayOopDesc::base_offset_in_bytes(T_OBJECT),L1_ary_ptr); __ clr(L3_index); // zero index // Load a little early; will load 1 off the end of the array. // Ok for now; revisit if we have other uses of this routine. - __ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early - __ align(CodeEntryAlignment); + if (UseCompressedOops) { + __ lduw(L1_ary_ptr,0,L2_super);// Will load a little early + } else { + __ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early + } + assert(heapOopSize != 0, "heapOopSize should be initialized"); // The scan loop __ BIND(loop); - __ add(L1_ary_ptr,wordSize,L1_ary_ptr); // Bump by OOP size - __ cmp(L3_index,L0_ary_len); + __ add(L1_ary_ptr, heapOopSize, L1_ary_ptr); // Bump by OOP size + __ cmp(L3_index,L0_ary_len); __ br(Assembler::equal,false,Assembler::pn,miss); __ delayed()->inc(L3_index); // Bump index - __ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit - __ brx( Assembler::notEqual, false, Assembler::pt, loop ); - __ delayed()->ld_ptr(L1_ary_ptr,0,L2_super); // Will load a little early + + if (UseCompressedOops) { +#ifdef _LP64 + __ subcc(L2_super,L4_ooptmp,Rret); // Check for match; zero in Rret for a hit + __ br( Assembler::notEqual, false, Assembler::pt, loop ); + __ delayed()->lduw(L1_ary_ptr,0,L2_super);// Will load a little early +#else + ShouldNotReachHere(); +#endif + } else { + __ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit + __ brx( Assembler::notEqual, false, Assembler::pt, loop ); + __ delayed()->ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early + } // Got a hit; report success; set cache. Cache load doesn't // happen here; for speed it is directly emitted by the compiler. @@ -967,10 +992,10 @@ __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1); __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2); __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3); - __ retl(); // Result in Rret is zero; flags set to Z + __ retl(); // Result in Rret is zero; flags set to Z __ delayed()->add(SP,4*wordSize,SP); #else - __ ret(); // Result in Rret is zero; flags set to Z + __ ret(); // Result in Rret is zero; flags set to Z __ delayed()->restore(); #endif @@ -983,10 +1008,10 @@ __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1); __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2); __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3); - __ retl(); // Result in Rret is != 0; flags set to NZ + __ retl(); // Result in Rret is != 0; flags set to NZ __ delayed()->add(SP,4*wordSize,SP); #else - __ ret(); // Result in Rret is != 0; flags set to NZ + __ ret(); // Result in Rret is != 0; flags set to NZ __ delayed()->restore(); #endif @@ -998,11 +1023,11 @@ // address generate_verify_oop_subroutine() { StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); - + address start = __ pc(); - + __ verify_oop_subroutine(); - + return start; } @@ -1085,32 +1110,32 @@ // The input registers are overwritten. // void gen_write_ref_array_pre_barrier(Register addr, Register count) { -#if 0 // G1 only BarrierSet* bs = Universe::heap()->barrier_set(); if (bs->has_write_ref_pre_barrier()) { assert(bs->has_write_ref_array_pre_opt(), - "Else unsupported barrier set."); + "Else unsupported barrier set."); - assert(addr->is_global() && count->is_global(), - "If not, then we have to fix this code to handle more " - "general cases."); - // Get some new fresh output registers. __ save_frame(0); // Save the necessary global regs... will be used after. - __ mov(addr, L0); - __ mov(count, L1); - - __ mov(addr, O0); + if (addr->is_global()) { + __ mov(addr, L0); + } + if (count->is_global()) { + __ mov(count, L1); + } + __ mov(addr->after_save(), O0); // Get the count into O1 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); - __ delayed()->mov(count, O1); - __ mov(L0, addr); - __ mov(L1, count); + __ delayed()->mov(count->after_save(), O1); + if (addr->is_global()) { + __ mov(L0, addr); + } + if (count->is_global()) { + __ mov(L1, count); + } __ restore(); } -#endif // 0 } - // // Generate post-write barrier for array. // @@ -1122,26 +1147,21 @@ // The input registers are overwritten. // void gen_write_ref_array_post_barrier(Register addr, Register count, - Register tmp) { + Register tmp) { BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { -#if 0 // G1 - only case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { - assert(addr->is_global() && count->is_global(), - "If not, then we have to fix this code to handle more " - "general cases."); // Get some new fresh output registers. __ save_frame(0); - __ mov(addr, O0); + __ mov(addr->after_save(), O0); __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); - __ delayed()->mov(count, O1); + __ delayed()->mov(count->after_save(), O1); __ restore(); } break; -#endif // 0 G1 - only case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: { @@ -1151,8 +1171,8 @@ Label L_loop; - __ sll_ptr(count, LogBytesPerOop, count); - __ sub(count, BytesPerOop, count); + __ sll_ptr(count, LogBytesPerHeapOop, count); + __ sub(count, BytesPerHeapOop, count); __ add(count, addr, count); // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) __ srl_ptr(addr, CardTableModRefBS::card_shift, addr); @@ -1168,13 +1188,12 @@ } break; - case BarrierSet::ModRef: + case BarrierSet::ModRef: break; - default : + default : ShouldNotReachHere(); - - } + } } @@ -1183,11 +1202,11 @@ // Inputs: // from - source arrays // to - destination array aligned to 8-bytes - // count - elements count to copy >= the count equivalent to 16 bytes + // count - elements count to copy >= the count equivalent to 16 bytes // count_dec - elements count's decrement equivalent to 16 bytes // L_copy_bytes - copy exit label // - void copy_16_bytes_forward_with_shift(Register from, Register to, + void copy_16_bytes_forward_with_shift(Register from, Register to, Register count, int count_dec, Label& L_copy_bytes) { Label L_loop, L_aligned_copy, L_copy_last_bytes; @@ -1203,7 +1222,7 @@ __ mov(64, right_shift); __ sub(right_shift, left_shift, right_shift); - // + // // Load 2 aligned 8-bytes chunks and use one from previous iteration // to form 2 aligned 8-bytes chunks to store. // @@ -1214,7 +1233,7 @@ __ align(16); __ BIND(L_loop); __ ldx(from, 0, O4); - __ deccc(count, count_dec); // Can we do next iteration after this one? + __ deccc(count, count_dec); // Can we do next iteration after this one? __ ldx(from, 8, G4); __ inc(to, 16); __ inc(from, 16); @@ -1255,13 +1274,13 @@ // Inputs: // end_from - source arrays end address // end_to - destination array end address aligned to 8-bytes - // count - elements count to copy >= the count equivalent to 16 bytes + // count - elements count to copy >= the count equivalent to 16 bytes // count_dec - elements count's decrement equivalent to 16 bytes // L_aligned_copy - aligned copy exit label // L_copy_bytes - copy exit label // - void copy_16_bytes_backward_with_shift(Register end_from, Register end_to, - Register count, int count_dec, + void copy_16_bytes_backward_with_shift(Register end_from, Register end_to, + Register count, int count_dec, Label& L_aligned_copy, Label& L_copy_bytes) { Label L_loop, L_copy_last_bytes; @@ -1277,7 +1296,7 @@ __ mov(64, right_shift); __ sub(right_shift, left_shift, right_shift); - // + // // Load 2 aligned 8-bytes chunks and use one from previous iteration // to form 2 aligned 8-bytes chunks to store. // @@ -1286,7 +1305,7 @@ __ align(16); __ BIND(L_loop); __ ldx(end_from, -8, O4); - __ deccc(count, count_dec); // Can we do next iteration after this one? + __ deccc(count, count_dec); // Can we do next iteration after this one? __ ldx(end_from, -16, G4); __ dec(end_to, 16); __ dec(end_from, 16); @@ -1504,7 +1523,7 @@ // The compare above (count >= 11) guarantes 'count' >= 16 bytes. // Also jump over aligned copy after the copy with shift completed. - copy_16_bytes_backward_with_shift(end_from, end_to, count, 16, + copy_16_bytes_backward_with_shift(end_from, end_to, count, 16, L_aligned_copy, L_copy_byte); } // copy 4 elements (16 bytes) at a time @@ -1743,7 +1762,7 @@ // The compare above (count >= 11) guarantes 'count' >= 16 bytes. // Also jump over aligned copy after the copy with shift completed. - copy_16_bytes_backward_with_shift(end_from, end_to, count, 8, + copy_16_bytes_backward_with_shift(end_from, end_to, count, 8, L_aligned_copy, L_copy_2_bytes); } // copy 4 elements (16 bytes) at a time @@ -1780,8 +1799,8 @@ } // - // Generate core code for disjoint int copy (and oop copy on 32-bit). - // If "aligned" is true, the "from" and "to" addresses are assumed + // Generate core code for disjoint int copy (and oop copy on 32-bit). + // If "aligned" is true, the "from" and "to" addresses are assumed // to be heapword aligned. // // Arguments: @@ -1834,11 +1853,11 @@ __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy); __ delayed()->ld(from, 0, O3); - // + // // Load 2 aligned 8-bytes chunks and use one from previous iteration // to form 2 aligned 8-bytes chunks to store. // - // copy_16_bytes_forward_with_shift() is not used here since this + // copy_16_bytes_forward_with_shift() is not used here since this // code is more optimal. // copy with shift 4 elements (16 bytes) at a time @@ -1965,7 +1984,7 @@ __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4 // copy with shift 4 elements (16 bytes) at a time - // + // // Load 2 aligned 8-bytes chunks and use one from previous iteration // to form 2 aligned 8-bytes chunks to store. // @@ -2054,8 +2073,8 @@ } // - // Generate core code for disjoint long copy (and oop copy on 64-bit). - // "aligned" is ignored, because we must make the stronger + // Generate core code for disjoint long copy (and oop copy on 64-bit). + // "aligned" is ignored, because we must make the stronger // assumption that both addresses are always 64-bit aligned. // // Arguments: @@ -2096,8 +2115,8 @@ } // - // Generate stub for disjoint long copy. - // "aligned" is ignored, because we must make the stronger + // Generate stub for disjoint long copy. + // "aligned" is ignored, because we must make the stronger // assumption that both addresses are always 64-bit aligned. // // Arguments for generated stub: @@ -2127,7 +2146,7 @@ // // Generate core code for conjoint long copy (and oop copy on 64-bit). - // "aligned" is ignored, because we must make the stronger + // "aligned" is ignored, because we must make the stronger // assumption that both addresses are always 64-bit aligned. // // Arguments: @@ -2166,8 +2185,8 @@ __ BIND(L_exit); } - // Generate stub for conjoint long copy. - // "aligned" is ignored, because we must make the stronger + // Generate stub for conjoint long copy. + // "aligned" is ignored, because we must make the stronger // assumption that both addresses are always 64-bit aligned. // // Arguments for generated stub: @@ -2229,7 +2248,12 @@ __ mov(count, G5); gen_write_ref_array_pre_barrier(G1, G5); #ifdef _LP64 - generate_disjoint_long_copy_core(aligned); + assert_clean_int(count, O3); // Make sure 'count' is clean int. + if (UseCompressedOops) { + generate_disjoint_int_copy_core(aligned); + } else { + generate_disjoint_long_copy_core(aligned); + } #else generate_disjoint_int_copy_core(aligned); #endif @@ -2277,10 +2301,14 @@ StubRoutines::arrayof_oop_disjoint_arraycopy() : disjoint_oop_copy_entry; - array_overlap_test(nooverlap_target, LogBytesPerWord); + array_overlap_test(nooverlap_target, LogBytesPerHeapOop); #ifdef _LP64 - generate_conjoint_long_copy_core(aligned); + if (UseCompressedOops) { + generate_conjoint_int_copy_core(aligned); + } else { + generate_conjoint_long_copy_core(aligned); + } #else generate_conjoint_int_copy_core(aligned); #endif @@ -2380,10 +2408,7 @@ StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); - int klass_off = oopDesc::klass_offset_in_bytes(); - - gen_write_ref_array_pre_barrier(G1, G5); - + gen_write_ref_array_pre_barrier(O1, O2); #ifdef ASSERT // We sometimes save a frame (see partial_subtype_check below). @@ -2398,7 +2423,7 @@ { Label L; __ mov(O3, G1); // spill: overlap test smashes O3 __ mov(O4, G4); // spill: overlap test smashes O4 - array_overlap_test(L, LogBytesPerWord); + array_overlap_test(L, LogBytesPerHeapOop); __ stop("checkcast_copy within a single array"); __ bind(L); __ mov(G1, O3); @@ -2432,18 +2457,18 @@ __ bind(store_element); // deccc(G1_remain); // decrement the count (hoisted) - __ st_ptr(G3_oop, O1_to, O5_offset); // store the oop - __ inc(O5_offset, wordSize); // step to next offset + __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop + __ inc(O5_offset, heapOopSize); // step to next offset __ brx(Assembler::zero, true, Assembler::pt, do_card_marks); __ delayed()->set(0, O0); // return -1 on success // ======== loop entry is here ======== __ bind(load_element); - __ ld_ptr(O0_from, O5_offset, G3_oop); // load the oop + __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop __ br_null(G3_oop, true, Assembler::pt, store_element); __ delayed()->deccc(G1_remain); // decrement the count - __ ld_ptr(G3_oop, klass_off, G4_klass); // query the object klass + __ load_klass(G3_oop, G4_klass); // query the object klass generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super, // branch to this on success: @@ -2548,13 +2573,13 @@ // Note: This next instruction may be in the delay slot of a branch: __ add(length, src_pos, end_pos); // src_pos + length - __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length); + __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length); __ cmp(end_pos, array_length); __ br(Assembler::greater, false, Assembler::pn, L_failed); // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length - __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length); + __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length); __ cmp(end_pos, array_length); __ br(Assembler::greater, false, Assembler::pn, L_failed); @@ -2611,9 +2636,9 @@ //assert_clean_int(length, G1); //----------------------------------------------------------------------- - // Assembler stubs will be used for this call to arraycopy + // Assembler stubs will be used for this call to arraycopy // if the following conditions are met: - // + // // (1) src and dst must not be null. // (2) src_pos must not be negative. // (3) dst_pos must not be negative. @@ -2645,17 +2670,23 @@ BLOCK_COMMENT("arraycopy argument klass checks"); // get src->klass() - __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass); + if (UseCompressedOops) { + __ delayed()->nop(); // ??? not good + __ load_klass(src, G3_src_klass); + } else { + __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass); + } #ifdef ASSERT // assert(src->klass() != NULL); BLOCK_COMMENT("assert klasses not null"); { Label L_a, L_b; __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL - __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); + __ delayed()->nop(); __ bind(L_a); __ stop("broken null klass"); __ bind(L_b); + __ load_klass(dst, G4_dst_klass); __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also __ delayed()->mov(G0, G4_dst_klass); // scribble the temp BLOCK_COMMENT("assert done"); @@ -2676,12 +2707,19 @@ // Load 32-bits signed value. Use br() instruction with it to check icc. __ lduw(G3_src_klass, lh_offset, G5_lh); + if (UseCompressedOops) { + __ load_klass(dst, G4_dst_klass); + } // Handle objArrays completely differently... juint objArray_lh = Klass::array_layout_helper(T_OBJECT); __ set(objArray_lh, O5_temp); __ cmp(G5_lh, O5_temp); __ br(Assembler::equal, false, Assembler::pt, L_objArray); - __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); + if (UseCompressedOops) { + __ delayed()->nop(); + } else { + __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); + } // if (src->klass() != dst->klass()) return -1; __ cmp(G3_src_klass, G4_dst_klass); @@ -2765,7 +2803,7 @@ __ delayed()->signx(length, count); // length // objArrayKlass - __ BIND(L_objArray); + __ BIND(L_objArray); // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length Label L_plain_copy, L_checkcast_copy; @@ -2780,8 +2818,8 @@ __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset - __ sll_ptr(src_pos, LogBytesPerOop, src_pos); - __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos); + __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); + __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); __ add(src, src_pos, from); // src_addr __ add(dst, dst_pos, to); // dst_addr __ BIND(L_plain_copy); @@ -2804,8 +2842,8 @@ // Marshal the base address arguments now, freeing registers. __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset - __ sll_ptr(src_pos, LogBytesPerOop, src_pos); - __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos); + __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos); + __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos); __ add(src, src_pos, from); // src_addr __ add(dst, dst_pos, to); // dst_addr __ signx(length, count); // length (reloaded) @@ -2884,7 +2922,7 @@ // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. StubRoutines::_forward_exception_entry = generate_forward_exception(); - StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); + StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); StubRoutines::_catch_exception_entry = generate_catch_exception(); //------------------------------------------------------------------------------------------------------------------------ @@ -2924,7 +2962,7 @@ generate_handler_for_unsafe_access(); // support for verify_oop (must happen after universe_init) - StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); + StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); // arraycopy stubs used by compilers generate_arraycopy_stubs(); @@ -2975,11 +3013,11 @@ if (at_header) { while ((intptr_t)(__ pc()) % icache_line_size != 0) { - __ emit_data(0, relocInfo::none); + __ emit_data(0, relocInfo::none); } } else { while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { - __ nop(); + __ nop(); } } } --- old/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp 2009-08-01 04:07:34.237438468 +0100 +++ new/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp 2009-08-01 04:07:34.142143837 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)templateInterpreter_sparc.cpp 1.2 07/09/25 17:07:42 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -102,9 +99,9 @@ // happened __ empty_expression_stack(); // load exception object - __ call_VM(Oexception, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_ClassCastException), + __ call_VM(Oexception, + CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_ClassCastException), Otos_i); __ should_not_reach_here(); return entry; @@ -115,7 +112,7 @@ address entry = __ pc(); // expression stack must be empty before entering the VM if an exception happened __ empty_expression_stack(); - // convention: expect aberrant index in register G3_scratch, then shuffle the + // convention: expect aberrant index in register G3_scratch, then shuffle the // index to G4_scratch for the VM call __ mov(G3_scratch, G4_scratch); __ set((intptr_t)name, G3_scratch); @@ -140,12 +137,12 @@ Label cont; address entry = __ pc(); -#if !defined(_LP64) && defined(COMPILER2) +#if !defined(_LP64) && defined(COMPILER2) // All return values are where we want them, except for Longs. C2 returns // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. // Since the interpreter will return longs in G1 and O0/O1 in the 32bit // build even if we are returning from interpreted we just do a little - // stupid shuffing. + // stupid shuffing. // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to // do this here. Unfortunately if we did a rethrow we'd see an machepilog node // first which would move g1 -> O0/O1 and destroy the exception we were throwing. @@ -163,11 +160,11 @@ // We remove that possible adjustment here. // All interpreter local registers are untouched. Any result is passed back // in the O0/O1 or float registers. Before continuing, the arguments must be - // popped from the java expression stack; i.e., Lesp must be adjusted. + // popped from the java expression stack; i.e., Lesp must be adjusted. __ mov(Llast_SP, SP); // Remove any adapter added stack space. - + const Register cache = G3_scratch; const Register size = G1_scratch; __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); @@ -181,7 +178,7 @@ return entry; } - + address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { address entry = __ pc(); __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache @@ -194,7 +191,7 @@ __ delayed()->nop(); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); __ should_not_reach_here(); - __ bind(L); + __ bind(L); } __ dispatch_next(state, step); return entry; @@ -216,7 +213,7 @@ case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; - case T_LONG : + case T_LONG : #ifndef _LP64 __ mov(O1, Itos_l2); // move other half of long #endif // ifdef or no ifdef, fall through to the T_INT case @@ -224,7 +221,7 @@ case T_VOID : /* nothing to do */ break; case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; - case T_OBJECT : + case T_OBJECT : __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); __ verify_oop(Itos_i); break; @@ -297,7 +294,7 @@ // Allocate monitor and lock method (asm interpreter) // ebx - methodOop -// +// void InterpreterGenerator::lock_method(void) { const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); __ ld(access_flags, O0); @@ -420,7 +417,7 @@ // that arguments and non-argument locals are in a contigously // addressable memory block => non-argument locals must be // allocated in the caller's frame. - // + // // 2) Create a new stack frame and register window: // The new stack frame must provide space for the standard // register save area, the maximum java expression stack size, @@ -468,7 +465,7 @@ assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); __ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1); __ add(Gargs, Otmp1, Gargs); - + if (native_call) { __ calc_mem_param_words( Glocals_size, Gframe_size ); __ add( Gframe_size, extra_space, Gframe_size); @@ -538,7 +535,7 @@ if (ProfileInterpreter) { #ifdef FAST_DISPATCH - // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since + // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since // they both use I2. assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); #endif // FAST_DISPATCH @@ -594,7 +591,10 @@ address entry = __ pc(); Label slow_path; - if ( UseFastAccessorMethods) { + + // XXX: for compressed oops pointer loading and decoding doesn't fit in + // delay slot and damages G1 + if ( UseFastAccessorMethods && !UseCompressedOops ) { // Check if we need to reach a safepoint and generate full interpreter // frame if so. Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); @@ -609,14 +609,14 @@ __ brx(Assembler::zero, false, Assembler::pn, slow_path); __ delayed()->nop(); - + // read first instruction word and extract bytecode @ 1 and index @ 2 // get first 4 bytes of the bytecodes (big endian!) __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch); __ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch); // move index @ 2 far left then to the right most two bytes. - __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); + __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); @@ -636,7 +636,7 @@ __ cmp(G1_scratch, Bytecodes::_getfield); __ br(Assembler::notEqual, false, Assembler::pn, slow_path); __ delayed()->nop(); - + // Get the type and return field offset from the constant pool cache __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch); __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch); @@ -756,7 +756,7 @@ // Note: checking for negative value instead of overflow // so we have a 'sticky' overflow test (may be of // importance as soon as we have true MT/MP) - Label invocation_counter_overflow; + Label invocation_counter_overflow; Label Lcontinue; if (inc_counter) { generate_counter_incr(&invocation_counter_overflow, NULL, NULL); @@ -792,7 +792,7 @@ // start execution __ verify_thread(); - // JVMTI support + // JVMTI support __ notify_method_entry(); // native call @@ -829,7 +829,7 @@ __ add(mirror, O2); // Calculate current frame size - __ sub(SP, FP, O3); // Calculate negative of current frame size + __ sub(SP, FP, O3); // Calculate negative of current frame size __ save(SP, O3, SP); // Allocate an identical sized frame // Note I7 has leftover trash. Slow signature handler will fill it in @@ -843,7 +843,7 @@ __ mov(I1, Llocals); __ mov(I2, Lscratch2); // save the address of the mirror - + // ONLY Lmethod and Llocals are valid here! @@ -922,12 +922,12 @@ // flush the windows now. We don't care about the current (protection) frame // only the outer frames - __ flush_windows(); + __ flush_windows(); // mark windows as flushed Address flags(G2_thread, - 0, - in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); + 0, + in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); __ set(JavaFrameAnchor::flushed, G3_scratch); __ st(G3_scratch, flags); @@ -946,7 +946,7 @@ #endif // ASSERT __ set(_thread_in_native, G3_scratch); __ st(G3_scratch, thread_state); - + // Call the jni method, using the delay slot to set the JNIEnv* argument. __ save_thread(L7_thread_cache); // save Gthread __ callr(O0, 0); @@ -956,9 +956,10 @@ // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD __ restore_thread(L7_thread_cache); // restore G2_thread + __ reinit_heapbase(); // must we block? - + // Block, if necessary, before resuming in _thread_in_Java state. // In order for GC to work, don't clear the last_Java_sp until after blocking. { Label no_block; @@ -973,7 +974,7 @@ // didn't see any synchronization is progress, and escapes. __ set(_thread_in_native_trans, G3_scratch); __ st(G3_scratch, thread_state); - if(os::is_MP()) { + if(os::is_MP()) { if (UseMembar) { // Force this write out before the read below __ membar(Assembler::StoreLoad); @@ -1002,8 +1003,8 @@ // use a leaf call to leave the last_Java_frame setup undisturbed. save_native_result(); __ call_VM_leaf(L7_thread_cache, - CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), - G2_thread); + CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), + G2_thread); // Restore any method result value restore_native_result(); @@ -1015,7 +1016,7 @@ __ reset_last_Java_frame(); // Move the result handler address - __ mov(Lscratch, G3_scratch); + __ mov(Lscratch, G3_scratch); // return possible result to the outer frame #ifndef __LP64 __ mov(O0, I0); @@ -1025,10 +1026,10 @@ #endif /* __LP64 */ // Move result handler to expected register - __ mov(G3_scratch, Lscratch); + __ mov(G3_scratch, Lscratch); // Back in normal (native) interpreter frame. State is thread_in_native_trans - // switch to thread_in_Java. + // switch to thread_in_Java. __ set(_thread_in_Java, G3_scratch); __ st(G3_scratch, thread_state); @@ -1077,8 +1078,8 @@ __ bind(L); } - // JVMTI support (preserves thread register) - __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); + // JVMTI support (preserves thread register) + __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); if (synchronized) { // save and restore any potential method result value around the unlocking operation @@ -1094,7 +1095,7 @@ // C2 expects long results in G1 we can't tell if we're returning to interpreted // or compiled so just be safe. - + __ sllx(O0, 32, G1); // Shift bits into high G1 __ srl (O1, 0, O1); // Zero extend O1 __ or3 (O1, G1, G1); // OR 64 bits into G1 @@ -1104,7 +1105,7 @@ // dispose of return address and remove activation #ifdef ASSERT { - Label ok; + Label ok; __ cmp(I5_savedSP, FP); __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok); __ delayed()->nop(); @@ -1115,7 +1116,7 @@ #endif if (TraceJumps) { // Move target to register that is recordable - __ mov(Lscratch, G3_scratch); + __ mov(Lscratch, G3_scratch); __ JMP(G3_scratch, 0); } else { __ jmp(Lscratch, 0); @@ -1211,7 +1212,7 @@ __ sll( O1, Interpreter::logStackElementSize(), O1 ); __ sub( Llocals, O2, O2 ); __ sub( Llocals, O1, O1 ); - + __ bind( clear_loop ); __ inc( O2, wordSize ); @@ -1235,7 +1236,7 @@ // Note: checking for negative value instead of overflow // so we have a 'sticky' overflow test (may be of // importance as soon as we have true MT/MP) - Label invocation_counter_overflow; + Label invocation_counter_overflow; Label profile_method; Label profile_method_continue; Label Lcontinue; @@ -1275,7 +1276,7 @@ __ verify_thread(); - // jvmti support + // jvmti support __ notify_method_entry(); // start executing instructions @@ -1295,7 +1296,7 @@ #endif __ set_method_data_pointer(); - + __ ba(false, profile_method_continue); __ delayed()->nop(); } @@ -1323,7 +1324,7 @@ // // When control flow reaches any of the entry types for the interpreter // the following holds -> -// +// // C2 Calling Conventions: // // The entry code below assumes that the following registers are set @@ -1433,16 +1434,16 @@ // frame extension) and monitor_size for monitors. Basically we need to calculate // this exactly like generate_fixed_frame/generate_compute_interpreter_state. // - // + // // The big complicating thing here is that we must ensure that the stack stays properly // aligned. This would be even uglier if monitor size wasn't modulo what the stack // needs to be aligned for). We are given that the sp (fp) is already aligned by // the caller so we must ensure that it is properly aligned for our callee. // - const int rounded_vm_local_words = + const int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); // callee_locals and max_stack are counts, not the size in frame. - const int locals_size = + const int locals_size = round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong); const int max_stack_words = max_stack * Interpreter::stackElementWords(); return (round_to((max_stack_words @@ -1508,7 +1509,7 @@ // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper() // will after it calculates all of the frame's on_stack_size()'s will then figure out the // amount to adjust the caller of the initial (oldest) frame and the calculation will all - // add up. It does seem like it simpler to account for the adjustment here (and remove the + // add up. It does seem like it simpler to account for the adjustment here (and remove the // callee... parameters here). However this would mean that this routine would have to take // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment) // and run the calling loop in the reverse order. This would also would appear to mean making @@ -1541,7 +1542,7 @@ intptr_t* monitors = montop - monitor_size; // preallocate stack space - intptr_t* esp = monitors - 1 - + intptr_t* esp = monitors - 1 - (tempcount * Interpreter::stackElementWords()) - popframe_extra_args; @@ -1564,7 +1565,7 @@ // adjacent to the register window save area. // // Compiled frames do not allocate a varargs area which is why this if - // statement is needed. + // statement is needed. // if (caller->is_compiled_frame()) { locals = fp + frame::register_save_words + local_words - 1; @@ -1657,7 +1658,7 @@ // Entry point in previous activation (i.e., if the caller was interpreted) Interpreter::_rethrow_exception_entry = __ pc(); // O0: exception - + // entry point for exceptions thrown within interpreter code Interpreter::_throw_exception_entry = __ pc(); __ verify_thread(); @@ -1665,15 +1666,15 @@ // O0: exception, i.e. Oexception // Lbcp: exception bcx __ verify_oop(Oexception); - - - // expression stack must be empty before entering the VM in case of an exception + + + // expression stack must be empty before entering the VM in case of an exception __ empty_expression_stack(); // find exception handler address and preserve exception oop // call C routine to find handler and jump to it __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); __ push_ptr(O1); // push exception for exception handler bytecodes - + __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) __ delayed()->nop(); @@ -1689,7 +1690,7 @@ // in current activation // tos: exception // Lbcp: exception bcp - + // // JVMTI PopFrame support // @@ -1857,11 +1858,11 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); Label L; - aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop(); + aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop(); fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop(); dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop(); lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop(); - iep = __ pc(); __ push_i(); + iep = __ pc(); __ push_i(); bep = cep = sep = iep; // there aren't any vep = __ pc(); __ bind(L); // fall through generate_and_dispatch(t); @@ -1870,7 +1871,7 @@ // -------------------------------------------------------------------------------- -InterpreterGenerator::InterpreterGenerator(StubQueue* code) +InterpreterGenerator::InterpreterGenerator(StubQueue* code) : TemplateInterpreterGenerator(code) { generate_all(); // down here so it can be "virtual" } @@ -1890,7 +1891,7 @@ __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); __ mov(Lscratch, O7); // restore return address __ pop(state); - __ retl(); + __ retl(); __ delayed()->nop(); return entry; @@ -1899,7 +1900,7 @@ // helpers for generate_and_dispatch -void TemplateInterpreterGenerator::count_bytecode() { +void TemplateInterpreterGenerator::count_bytecode() { Address c(G3_scratch, (address)&BytecodeCounter::_counter_value); __ load_contents(c, G4_scratch); __ inc(G4_scratch); @@ -1907,7 +1908,7 @@ } -void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { +void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { Address bucket( G3_scratch, (address) &BytecodeHistogram::_counters[t->bytecode()] ); __ load_contents(bucket, G4_scratch); __ inc(G4_scratch); @@ -1915,7 +1916,7 @@ } -void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { +void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { address index_addr = (address)&BytecodePairHistogram::_index; Address index(G3_scratch, index_addr); @@ -1923,7 +1924,7 @@ Address counters(G3_scratch, counters_addr); // get index, shift out old bytecode, bring in new bytecode, and store it - // _index = (_index >> log2_number_of_codes) | + // _index = (_index >> log2_number_of_codes) | // (bytecode << log2_number_of_codes); --- old/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp 2009-08-01 04:07:35.243241300 +0100 +++ new/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp 2009-08-01 04:07:35.143564984 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)templateTable_sparc.cpp 1.262 07/08/29 13:42:19 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -31,6 +28,79 @@ #ifndef CC_INTERP #define __ _masm-> +// Misc helpers + +// Do an oop store like *(base + index + offset) = val +// index can be noreg, +static void do_oop_store(InterpreterMacroAssembler* _masm, + Register base, + Register index, + int offset, + Register val, + Register tmp, + BarrierSet::Name barrier, + bool precise) { + assert(tmp != val && tmp != base && tmp != index, "register collision"); + assert(index == noreg || offset == 0, "only one offset"); + switch (barrier) { +#ifndef SERIALGC + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + { + __ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true); + if (index == noreg ) { + assert(Assembler::is_simm13(offset), "fix this code"); + __ store_heap_oop(val, base, offset); + } else { + __ store_heap_oop(val, base, index); + } + + // No need for post barrier if storing NULL + if (val != G0) { + if (precise) { + if (index == noreg) { + __ add(base, offset, base); + } else { + __ add(base, index, base); + } + } + __ g1_write_barrier_post(base, val, tmp); + } + } + break; +#endif // SERIALGC + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + { + if (index == noreg ) { + assert(Assembler::is_simm13(offset), "fix this code"); + __ store_heap_oop(val, base, offset); + } else { + __ store_heap_oop(val, base, index); + } + // No need for post barrier if storing NULL + if (val != G0) { + if (precise) { + if (index == noreg) { + __ add(base, offset, base); + } else { + __ add(base, index, base); + } + } + __ card_write_barrier_post(base, val, tmp); + } + } + break; + case BarrierSet::ModRef: + case BarrierSet::Other: + ShouldNotReachHere(); + break; + default : + ShouldNotReachHere(); + + } +} + //---------------------------------------------------------------------------------------------------- // Platform-dependent initialization @@ -56,7 +126,7 @@ } //---------------------------------------------------------------------------------------------------- -// Miscelaneous helper routines +// Miscelaneous helper routines Address TemplateTable::at_bcp(int offset) { @@ -168,7 +238,7 @@ } -// %%%%% Should factore most snippet templates across platforms +// %%%%% Should factore most snippet templates across platforms void TemplateTable::bipush() { transition(vtos, itos); @@ -206,7 +276,7 @@ __ delayed()->nop(); __ cmp(O2, JVM_CONSTANT_UnresolvedClassInError); // unresolved class in error state - __ brx(Assembler::equal, true, Assembler::pn, call_ldc); + __ brx(Assembler::equal, true, Assembler::pn, call_ldc); __ delayed()->nop(); __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class @@ -465,8 +535,8 @@ transition(itos, atos); // Otos_i: index // tos: array - __ index_check(O2, Otos_i, LogBytesPerWord, G3_scratch, O3); - __ ld_ptr(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); + __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); + __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); __ verify_oop(Otos_i); } @@ -733,26 +803,27 @@ Label store_ok, is_null, done; transition(vtos, vtos); __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); - __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index + __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array // Otos_i: val // O2: index // O3: array __ verify_oop(Otos_i); - __ index_check_without_pop(O3, O2, LogBytesPerWord, G3_scratch, O1); - + __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); + // do array store check - check for NULL value first __ br_null( Otos_i, false, Assembler::pn, is_null ); - __ delayed()-> - ld_ptr(O3, oopDesc::klass_offset_in_bytes(), O4); // get array klass + __ delayed()->nop(); + + __ load_klass(O3, O4); // get array klass + __ load_klass(Otos_i, O5); // get value klass // do fast instanceof cache test - __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O5); // get value klass __ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4); - + assert(Otos_i == O0, "just checking"); - + // Otos_i: value // O1: addr - offset // O2: index @@ -760,6 +831,8 @@ // O4: array element klass // O5: value klass + // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + // Generate a fast subtype check. Branch to store_ok if no // failure. Throw if failure. __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); @@ -769,18 +842,14 @@ // Store is OK. __ bind(store_ok); - __ st_ptr(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); - // Quote from rememberedSet.hpp: For objArrays, the precise card - // corresponding to the pointer store is dirtied so we don't need to - // scavenge the entire array. - Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); - __ add(element, O1); // address the element precisely - __ store_check(G3_scratch, O1); + do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); + __ ba(false,done); __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value) __ bind(is_null); - __ st_ptr(Otos_i, element); + do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); + __ profile_null_seen(G3_scratch); __ inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value) __ bind(done); @@ -904,7 +973,7 @@ __ store_ptr_and_tag(2, Otos_l1, Otos_l2); // put a at 2 // stack: ..., c, a, c, c (b in reg) __ store_ptr_and_tag(1, G3_scratch, G4_scratch); // put b at 1 - // stack: ..., c, a, b, c + // stack: ..., c, a, b, c } @@ -974,7 +1043,7 @@ switch (op) { case add: __ add(O1, Otos_i, Otos_i); break; case sub: __ sub(O1, Otos_i, Otos_i); break; - // %%%%% Mul may not exist: better to call .mul? + // %%%%% Mul may not exist: better to call .mul? case mul: __ smul(O1, Otos_i, Otos_i); break; case _and: __ and3(O1, Otos_i, Otos_i); break; case _or: __ or3(O1, Otos_i, Otos_i); break; @@ -1010,9 +1079,9 @@ void TemplateTable::idiv() { - // %%%%% Later: ForSPARC/V7 call .sdiv library routine, + // %%%%% Later: ForSPARC/V7 call .sdiv library routine, // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. - + transition(itos, itos); __ pop_i(O1); // get 1st op @@ -1027,7 +1096,7 @@ __ bind(neg); Label ok; - __ tst(Otos_i); + __ tst(Otos_i); __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); const int min_int = 0x80000000; @@ -1035,7 +1104,7 @@ __ cmp(Otos_i, -1); __ br(Assembler::notEqual, false, Assembler::pt, regular); #ifdef _LP64 - // Don't put set in delay slot + // Don't put set in delay slot // Set will turn into multiple instructions in 64 bit mode __ delayed()->nop(); __ set(min_int, G4_scratch); @@ -1056,7 +1125,7 @@ void TemplateTable::irem() { transition(itos, itos); __ mov(Otos_i, O2); // save divisor - idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 + idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 __ smul(Otos_i, O2, Otos_i); __ sub(O1, Otos_i, Otos_i); } @@ -1111,7 +1180,7 @@ void TemplateTable::lshl() { - transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra + transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra __ pop_l(O2); // shift value in O2, O3 #ifdef _LP64 @@ -1123,7 +1192,7 @@ void TemplateTable::lshr() { - transition(itos, ltos); // %%%% see lshl comment + transition(itos, ltos); // %%%% see lshl comment __ pop_l(O2); // shift value in O2, O3 #ifdef _LP64 @@ -1136,7 +1205,7 @@ void TemplateTable::lushr() { - transition(itos, ltos); // %%%% see lshl comment + transition(itos, ltos); // %%%% see lshl comment __ pop_l(O2); // shift value in O2, O3 #ifdef _LP64 @@ -1154,7 +1223,7 @@ case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; - case rem: + case rem: assert(Ftos_f == F0, "just checking"); #ifdef _LP64 // LP64 calling conventions use F1, F3 for passing 2 floats @@ -1181,7 +1250,7 @@ case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; - case rem: + case rem: #ifdef _LP64 // Pass arguments in D0, D2 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); @@ -1253,7 +1322,7 @@ void TemplateTable::convert() { -// %%%%% Factor this first part accross platforms +// %%%%% Factor this first part accross platforms #ifdef ASSERT TosState tos_in = ilgl; TosState tos_out = ilgl; @@ -1296,45 +1365,45 @@ transition(tos_in, tos_out); #endif - + // Conversion Label done; switch (bytecode()) { - case Bytecodes::_i2l: + case Bytecodes::_i2l: #ifdef _LP64 // Sign extend the 32 bits __ sra ( Otos_i, 0, Otos_l ); #else - __ addcc(Otos_i, 0, Otos_l2); - __ br(Assembler::greaterEqual, true, Assembler::pt, done); - __ delayed()->clr(Otos_l1); - __ set(~0, Otos_l1); + __ addcc(Otos_i, 0, Otos_l2); + __ br(Assembler::greaterEqual, true, Assembler::pt, done); + __ delayed()->clr(Otos_l1); + __ set(~0, Otos_l1); #endif break; - + case Bytecodes::_i2f: __ st(Otos_i, __ d_tmp ); __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); __ fitof(FloatRegisterImpl::S, F0, Ftos_f); break; - + case Bytecodes::_i2d: __ st(Otos_i, __ d_tmp); __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); __ fitof(FloatRegisterImpl::D, F0, Ftos_f); break; - - case Bytecodes::_i2b: - __ sll(Otos_i, 24, Otos_i); - __ sra(Otos_i, 24, Otos_i); + + case Bytecodes::_i2b: + __ sll(Otos_i, 24, Otos_i); + __ sra(Otos_i, 24, Otos_i); break; - + case Bytecodes::_i2c: - __ sll(Otos_i, 16, Otos_i); - __ srl(Otos_i, 16, Otos_i); + __ sll(Otos_i, 16, Otos_i); + __ srl(Otos_i, 16, Otos_i); break; - - case Bytecodes::_i2s: + + case Bytecodes::_i2s: __ sll(Otos_i, 16, Otos_i); __ sra(Otos_i, 16, Otos_i); break; @@ -1347,7 +1416,7 @@ __ sra(Otos_l, 0, Otos_i); #endif break; - + case Bytecodes::_l2f: case Bytecodes::_l2d: __ st_long(Otos_l, __ d_tmp); @@ -1355,9 +1424,9 @@ if (VM_Version::v9_instructions_work()) { if (bytecode() == Bytecodes::_l2f) { - __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); + __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); } else { - __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); + __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); } } else { __ call_VM_leaf( @@ -1368,7 +1437,7 @@ ); } break; - + case Bytecodes::_f2i: { Label isNaN; // result must be 0 if value is NaN; test by comparing value to itself @@ -1386,8 +1455,8 @@ __ bind(isNaN); } break; - - case Bytecodes::_f2l: + + case Bytecodes::_f2l: // must uncache tos __ push_f(); #ifdef _LP64 @@ -1398,7 +1467,7 @@ __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); break; - case Bytecodes::_f2d: + case Bytecodes::_f2d: __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); break; @@ -1418,7 +1487,7 @@ ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); break; - + case Bytecodes::_d2f: if (VM_Version::v9_instructions_work()) { __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); @@ -1431,7 +1500,7 @@ __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f)); } break; - + default: ShouldNotReachHere(); } __ bind(done); @@ -1473,7 +1542,7 @@ const Register O1_disp = O1; if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); - + // Handle all the JSR stuff here, then exit. // It's much shorter and cleaner than intermingling with the // non-JSR normal-branch stuff occuring below. @@ -1482,7 +1551,7 @@ __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); __ sub(Lbcp, G3_scratch, G3_scratch); __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i); - + // Bump Lbcp to target of JSR __ add(Lbcp, O1_disp, Lbcp); // Push returnAddress for "ret" on stack @@ -1505,7 +1574,7 @@ __ br( Assembler::positive, false, Assembler::pn, Lforward ); // Bump bytecode pointer by displacement (take the branch) __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr - + // Update Backedge branch separately from invocations const Register G4_invoke_ctr = G4; __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); @@ -1519,7 +1588,7 @@ __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); } } - + __ bind(Lforward); } else // Bump bytecode pointer by displacement (take the branch) @@ -1527,7 +1596,7 @@ // continue with bytecode @ target // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, - // %%%%% and changing dispatch_next to dispatch_only + // %%%%% and changing dispatch_next to dispatch_only __ dispatch_next(vtos); } @@ -1626,8 +1695,8 @@ __ add(Lbcp, BytesPerInt, O1); __ and3(O1, -BytesPerInt, O1); // load lo, hi - __ ld(O1, 1 * BytesPerInt, O2); // Low Byte - __ ld(O1, 2 * BytesPerInt, O3); // High Byte + __ ld(O1, 1 * BytesPerInt, O2); // Low Byte + __ ld(O1, 2 * BytesPerInt, O3); // High Byte #ifdef _LP64 // Sign extend the 32 bits __ sra ( Otos_i, 0, Otos_i ); @@ -1659,7 +1728,7 @@ void TemplateTable::lookupswitch() { transition(itos, itos); __ stop("lookupswitch bytecode should have been rewritten"); -} +} void TemplateTable::fast_linearswitch() { transition(itos, vtos); @@ -1673,7 +1742,7 @@ __ add(O1, 2 * BytesPerInt, O3); // set first pair addr __ ba(false, loop_entry); __ delayed()->add(O3, O2, O2); // counter now points past last pair - + // table search __ bind(loop); __ cmp(O4, Otos_i); @@ -1685,7 +1754,7 @@ __ cmp(O2, O3); __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); __ delayed()->ld(O3, 0, O4); - + // default case __ ld(O1, 0, O4); // get default offset if (ProfileInterpreter) { @@ -1701,7 +1770,7 @@ __ sub(O3, 2*BytesPerInt, O3); __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs __ profile_switch_case(O3, O1, O2, G3_scratch); - + __ bind(continue_execution); } __ add(Lbcp, O4, Lbcp); @@ -1735,19 +1804,19 @@ // // (i.e., if key is within array, i is the correct index) // return i; // } - + // register allocation assert(Otos_i == O0, "alias checking"); const Register Rkey = Otos_i; // already set (tosca) const Register Rarray = O1; const Register Ri = O2; const Register Rj = O3; - const Register Rh = O4; + const Register Rh = O4; const Register Rscratch = O5; - + const int log_entry_size = 3; const int entry_size = 1 << log_entry_size; - + Label found; // Find Array start __ add(Lbcp, 3 * BytesPerInt, Rarray); @@ -1760,7 +1829,7 @@ __ ba(false, entry); __ delayed()->ld( Rarray, -BytesPerInt, Rj); // (Rj is already in the native byte-ordering.) - + // binary search loop { Label loop; __ bind( loop ); @@ -1783,10 +1852,10 @@ Label end_of_if; __ br( Assembler::less, true, Assembler::pt, end_of_if ); __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh - __ mov( Rh, Ri ); // else i = h + __ mov( Rh, Ri ); // else i = h __ bind(end_of_if); // } } - + // while (i+1 < j) __ bind( entry ); __ add( Ri, 1, Rscratch ); @@ -1794,12 +1863,12 @@ __ br( Assembler::less, true, Assembler::pt, loop ); __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; } - + // end of binary search, result index is i (must check again!) Label default_case; Label continue_execution; if (ProfileInterpreter) { - __ mov( Ri, Rh ); // Save index in i for profiling + __ mov( Ri, Rh ); // Save index in i for profiling } __ sll( Ri, log_entry_size, Ri ); __ ld( Rarray, Ri, Rscratch ); @@ -1807,7 +1876,7 @@ __ cmp( Rkey, Rscratch ); __ br( Assembler::notEqual, true, Assembler::pn, default_case ); __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j - + // entry found -> j = offset __ inc( Ri, BytesPerInt ); __ profile_switch_case(Rh, Rj, Rscratch, Rkey); @@ -1818,7 +1887,7 @@ __ ba(false, continue_execution); __ delayed()->nop(); } - + __ bind(default_case); // fall through (if not profiling) __ profile_switch_default(Ri); @@ -1836,7 +1905,7 @@ assert(state == vtos, "only valid state"); __ mov(G0, G3_scratch); __ access_local_ptr(G3_scratch, Otos_i); - __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O2); + __ load_klass(Otos_i, O2); __ set(JVM_ACC_HAS_FINALIZER, G3); __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2); __ andcc(G3, O2, G0); @@ -1865,9 +1934,9 @@ // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of // memory barrier (i.e., it's not sufficient that the interpreter does not // reorder volatile references, the hardware also must not reorder them). -// +// // According to the new Java Memory Model (JMM): -// (1) All volatiles are serialized wrt to each other. +// (1) All volatiles are serialized wrt to each other. // ALSO reads & writes act as aquire & release, so: // (2) A read cannot let unrelated NON-volatile memory refs that happen after // the read float up to before the read. It's OK for non-volatile memory refs @@ -1921,7 +1990,7 @@ default : ShouldNotReachHere(); break; } // first time invocation - must resolve first - __ call_VM(noreg, entry, O1); + __ call_VM(noreg, entry, O1); // Update registers with resolved info __ get_cache_and_index_at_bcp(Rcache, index, 1); __ bind(resolved); @@ -1951,7 +2020,7 @@ const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); // access constant pool cache fields - const int index_offset = in_bytes(cp_base_offset + + const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); if (is_invokevfinal) { @@ -1996,7 +2065,7 @@ bool has_tos) { ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); - if (JvmtiExport::can_post_field_access()) { + if (JvmtiExport::can_post_field_access()) { // Check to see if a field access watch has been set before we take // the time to call into the VM. Label Label1; @@ -2016,7 +2085,7 @@ } else { if (has_tos) { // save object pointer before call_VM() clobbers it - __ mov(Otos_i, Lscratch); + __ push_ptr(Otos_i); // put object on tos where GC wants it. } else { // Load top of stack (do not pop the value off the stack); __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); @@ -2028,7 +2097,7 @@ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), Otos_i, Rcache); if (!is_static && has_tos) { - __ mov(Lscratch, Otos_i); // restore object pointer + __ pop_ptr(Otos_i); // restore object pointer __ verify_oop(Otos_i); } __ get_cache_and_index_at_bcp(Rcache, index, 1); @@ -2057,7 +2126,7 @@ } Label exit; - + Assembler::Membar_mask_bits membar_bits = Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); @@ -2081,7 +2150,7 @@ __ delayed() ->cmp(Rflags, itos); // atos - __ ld_ptr(Rclass, Roffset, Otos_i); + __ load_heap_oop(Rclass, Roffset, Otos_i); __ verify_oop(Otos_i); __ push(atos); if (!is_static) { @@ -2174,7 +2243,7 @@ __ delayed() ->tst(Lscratch); // ftos - __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); + __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); __ push(ftos); if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); @@ -2231,7 +2300,7 @@ Label exit; - Assembler::Membar_mask_bits membar_bits = + Assembler::Membar_mask_bits membar_bits = Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); if (__ membar_has_effect(membar_bits)) { // Get volatile flag @@ -2240,34 +2309,34 @@ } switch (bytecode()) { - case Bytecodes::_fast_bgetfield: + case Bytecodes::_fast_bgetfield: __ ldsb(Otos_i, Roffset, Otos_i); break; - case Bytecodes::_fast_cgetfield: + case Bytecodes::_fast_cgetfield: __ lduh(Otos_i, Roffset, Otos_i); break; - case Bytecodes::_fast_sgetfield: + case Bytecodes::_fast_sgetfield: __ ldsh(Otos_i, Roffset, Otos_i); break; - case Bytecodes::_fast_igetfield: + case Bytecodes::_fast_igetfield: __ ld(Otos_i, Roffset, Otos_i); break; - case Bytecodes::_fast_lgetfield: + case Bytecodes::_fast_lgetfield: __ ld_long(Otos_i, Roffset, Otos_l); break; - case Bytecodes::_fast_fgetfield: - __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); + case Bytecodes::_fast_fgetfield: + __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); break; - case Bytecodes::_fast_dgetfield: - __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); + case Bytecodes::_fast_dgetfield: + __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); break; case Bytecodes::_fast_agetfield: - __ ld_ptr(Otos_i, Roffset, Otos_i); + __ load_heap_oop(Otos_i, Roffset, Otos_i); break; default: ShouldNotReachHere(); } - + if (__ membar_has_effect(membar_bits)) { __ btst(Lscratch, Rflags); __ br(Assembler::zero, false, Assembler::pt, exit); @@ -2282,7 +2351,7 @@ } void TemplateTable::jvmti_post_fast_field_mod() { - if (JvmtiExport::can_post_field_modification()) { + if (JvmtiExport::can_post_field_modification()) { // Check to see if a field modification watch has been set before we take // the time to call into the VM. Label done; @@ -2417,7 +2486,7 @@ jvmti_post_field_mod(Rcache, index, is_static); load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); - Assembler::Membar_mask_bits read_bits = + Assembler::Membar_mask_bits read_bits = Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; @@ -2425,10 +2494,10 @@ if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ and3(Rflags, Lscratch, Lscratch); - + if (__ membar_has_effect(read_bits)) { __ tst(Lscratch); - __ br(Assembler::zero, false, Assembler::pt, notVolatile); + __ br(Assembler::zero, false, Assembler::pt, notVolatile); __ delayed()->nop(); volatile_barrier(read_bits); __ bind(notVolatile); @@ -2451,31 +2520,32 @@ // atos __ pop_ptr(); __ verify_oop(Otos_i); - __ st_ptr(Otos_i, Rclass, Roffset); - __ store_check(G1_scratch, Rclass, Roffset); + + do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); + __ ba(false, checkVolatile); __ delayed()->tst(Lscratch); - - __ bind(notObj); - // cmp(Rflags, itos ); + __ bind(notObj); + + // cmp(Rflags, itos ); __ br(Assembler::notEqual, false, Assembler::pt, notInt); - __ delayed() ->cmp(Rflags, btos ); + __ delayed() ->cmp(Rflags, btos ); // itos __ pop_i(); __ st(Otos_i, Rclass, Roffset); __ ba(false, checkVolatile); __ delayed()->tst(Lscratch); - + __ bind(notInt); - + } else { // putfield with int type most likely, check that first __ cmp(Rflags, itos ); __ br(Assembler::notEqual, false, Assembler::pt, notInt); __ delayed() ->cmp(Rflags, atos ); - + // itos __ pop_i(); pop_and_check_object(Rclass); @@ -2483,23 +2553,24 @@ patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch); __ ba(false, checkVolatile); __ delayed()->tst(Lscratch); - + __ bind(notInt); - // cmp(Rflags, atos ); + // cmp(Rflags, atos ); __ br(Assembler::notEqual, false, Assembler::pt, notObj); - __ delayed() ->cmp(Rflags, btos ); - + __ delayed() ->cmp(Rflags, btos ); + // atos __ pop_ptr(); pop_and_check_object(Rclass); __ verify_oop(Otos_i); - __ st_ptr(Otos_i, Rclass, Roffset); - __ store_check(G1_scratch, Rclass, Roffset); + + do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); + patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch); __ ba(false, checkVolatile); __ delayed()->tst(Lscratch); - - __ bind(notObj); + + __ bind(notObj); } // cmp(Rflags, btos ); @@ -2522,7 +2593,7 @@ __ br(Assembler::notEqual, false, Assembler::pt, notLong); __ delayed() ->cmp(Rflags, ctos ); - // ltos + // ltos __ pop_l(); if (!is_static) pop_and_check_object(Rclass); __ st_long(Otos_l, Rclass, Roffset); @@ -2534,9 +2605,9 @@ __ bind(notLong); - // cmp(Rflags, ctos ); + // cmp(Rflags, ctos ); __ br(Assembler::notEqual, false, Assembler::pt, notChar); - __ delayed() ->cmp(Rflags, stos ); + __ delayed() ->cmp(Rflags, stos ); // ctos (char) __ pop_i(); @@ -2548,10 +2619,10 @@ __ ba(false, checkVolatile); __ delayed()->tst(Lscratch); - __ bind(notChar); - // cmp(Rflags, stos ); + __ bind(notChar); + // cmp(Rflags, stos ); __ br(Assembler::notEqual, false, Assembler::pt, notShort); - __ delayed() ->cmp(Rflags, ftos ); + __ delayed() ->cmp(Rflags, ftos ); // stos (char) __ pop_i(); @@ -2563,7 +2634,7 @@ __ ba(false, checkVolatile); __ delayed()->tst(Lscratch); - __ bind(notShort); + __ bind(notShort); // cmp(Rflags, ftos ); __ br(Assembler::notZero, false, Assembler::pt, notFloat); __ delayed()->nop(); @@ -2571,7 +2642,7 @@ // ftos __ pop_f(); if (!is_static) pop_and_check_object(Rclass); - __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); + __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch); } @@ -2593,7 +2664,7 @@ if (__ membar_has_effect(write_bits)) { // __ tst(Lscratch); in delay slot - __ br(Assembler::zero, false, Assembler::pt, exit); + __ br(Assembler::zero, false, Assembler::pt, exit); __ delayed()->nop(); volatile_barrier(Assembler::StoreLoad); __ bind(exit); @@ -2624,7 +2695,7 @@ __ and3(Rflags, Lscratch, Lscratch); if (__ membar_has_effect(read_bits)) { __ tst(Lscratch); - __ br(Assembler::zero, false, Assembler::pt, notVolatile); + __ br(Assembler::zero, false, Assembler::pt, notVolatile); __ delayed()->nop(); volatile_barrier(read_bits); __ bind(notVolatile); @@ -2642,14 +2713,13 @@ case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; case Bytecodes::_fast_fputfield: - __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); - break; - case Bytecodes::_fast_dputfield: - __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); + __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); + break; + case Bytecodes::_fast_dputfield: + __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); break; case Bytecodes::_fast_aputfield: - __ st_ptr(Otos_i, Rclass, Roffset); - __ store_check(G1_scratch, Rclass, Roffset); + do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); break; default: ShouldNotReachHere(); @@ -2657,7 +2727,7 @@ if (__ membar_has_effect(write_bits)) { __ tst(Lscratch); - __ br(Assembler::zero, false, Assembler::pt, exit); + __ br(Assembler::zero, false, Assembler::pt, exit); __ delayed()->nop(); volatile_barrier(Assembler::StoreLoad); __ bind(exit); @@ -2690,17 +2760,17 @@ __ verify_oop(Rreceiver); __ null_check(Rreceiver); - if (state == atos) { - __ ld_ptr(Rreceiver, Roffset, Otos_i); + if (state == atos) { + __ load_heap_oop(Rreceiver, Roffset, Otos_i); } else if (state == itos) { - __ ld (Rreceiver, Roffset, Otos_i) ; + __ ld (Rreceiver, Roffset, Otos_i) ; } else if (state == ftos) { - __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); + __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); } else { ShouldNotReachHere(); } - Assembler::Membar_mask_bits membar_bits = + Assembler::Membar_mask_bits membar_bits = Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); if (__ membar_has_effect(membar_bits)) { @@ -2711,7 +2781,7 @@ Label notVolatile; __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ btst(Rflags, Lscratch); - __ br(Assembler::zero, false, Assembler::pt, notVolatile); + __ br(Assembler::zero, false, Assembler::pt, notVolatile); __ delayed()->nop(); volatile_barrier(membar_bits); __ bind(notVolatile); @@ -2765,10 +2835,10 @@ load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true); __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore - // Check for vfinal + // Check for vfinal __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch); __ btst(Rret, G4_scratch); - __ br(Assembler::zero, false, Assembler::pt, notFinal); + __ br(Assembler::zero, false, Assembler::pt, notFinal); __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); @@ -2785,15 +2855,15 @@ // get return address Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); __ load_address(table); - __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type + __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); __ sll(Rret, LogBytesPerWord, Rret); - __ ld_ptr(Rtemp, Rret, Rret); // get return address + __ ld_ptr(Rtemp, Rret, Rret); // get return address // get receiver klass __ null_check(O0, oopDesc::klass_offset_in_bytes()); - __ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), Rrecv); + __ load_klass(O0, Rrecv); __ verify_oop(Rrecv); __ profile_virtual_call(Rrecv, O4); @@ -2818,8 +2888,8 @@ // Load receiver from stack slot __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch); __ load_receiver(G4_scratch, O0); - - // receiver NULL check + + // receiver NULL check __ null_check(O0); __ profile_final_call(O4); @@ -2827,11 +2897,11 @@ // get return address Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); __ load_address(table); - __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type + __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); __ sll(Rret, LogBytesPerWord, Rret); - __ ld_ptr(Rtemp, Rret, Rret); // get return address + __ ld_ptr(Rtemp, Rret, Rret); // get return address // do the call @@ -2861,11 +2931,11 @@ // get return address Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); __ load_address(table); - __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type + __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); __ sll(Rret, LogBytesPerWord, Rret); - __ ld_ptr(Rtemp, Rret, Rret); // get return address + __ ld_ptr(Rtemp, Rret, Rret); // get return address // do the call __ call_from_interpreter(Rscratch, Gargs, Rret); @@ -2888,11 +2958,11 @@ // get return address Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); __ load_address(table); - __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type + __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); __ sll(Rret, LogBytesPerWord, Rret); - __ ld_ptr(Rtemp, Rret, Rret); // get return address + __ ld_ptr(Rtemp, Rret, Rret); // get return address // do the call __ call_from_interpreter(Rscratch, Gargs, Rret); @@ -2910,10 +2980,10 @@ Label notFinal; - // Check for vfinal + // Check for vfinal __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch); __ btst(Rflags, Rscratch); - __ br(Assembler::zero, false, Assembler::pt, notFinal); + __ br(Assembler::zero, false, Assembler::pt, notFinal); __ delayed()->nop(); __ profile_final_call(O4); @@ -2953,15 +3023,15 @@ // get return address Address table(Rscratch, (address)Interpreter::return_5_addrs_by_index_table()); __ load_address(table); - __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type + __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); __ sll(Rret, LogBytesPerWord, Rret); - __ ld_ptr(Rscratch, Rret, Rret); // get return address + __ ld_ptr(Rscratch, Rret, Rret); // get return address // get receiver klass __ null_check(O0, oopDesc::klass_offset_in_bytes()); - __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), RklassOop); + __ load_klass(O0, RklassOop); __ verify_oop(RklassOop); // Special case of invokeinterface called for virtual method of @@ -2971,7 +3041,7 @@ Label notMethod; __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch); __ btst(Rflags, Rscratch); - __ br(Assembler::zero, false, Assembler::pt, notMethod); + __ br(Assembler::zero, false, Assembler::pt, notMethod); __ delayed()->nop(); invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags); @@ -3004,7 +3074,7 @@ __ bind(search); - __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); + __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); { Label ok; @@ -3092,12 +3162,12 @@ // get instance_size in instanceKlass (already aligned) //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset); - + // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class __ btst(Klass::_lh_instance_slow_path_bit, Roffset); __ br(Assembler::notZero, false, Assembler::pn, slow_case); __ delayed()->nop(); - + // allocate the instance // 1) Try to allocate in the TLAB // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden @@ -3112,12 +3182,12 @@ Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch; Register RnewTopValue = G1_scratch; Register RendValue = Rscratch; - Register RfreeValue = RnewTopValue; - + Register RfreeValue = RnewTopValue; + // check if we can allocate in the TLAB __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); - __ add(RoldTopValue, Roffset, RnewTopValue); + __ add(RoldTopValue, Roffset, RnewTopValue); // if there is enough space, we do not CAS and do not clear __ cmp(RnewTopValue, RendValue); @@ -3133,7 +3203,7 @@ if (allow_shared_alloc) { // Check if tlab should be discarded (refill_waste_limit >= free) __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); - __ sub(RendValue, RoldTopValue, RfreeValue); + __ sub(RendValue, RoldTopValue, RfreeValue); #ifdef _LP64 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); #else @@ -3150,7 +3220,7 @@ // No allocation in the shared eden. __ br(Assembler::always, false, Assembler::pt, slow_case); __ delayed()->nop(); - } + } } // Allocation in the shared Eden @@ -3168,7 +3238,7 @@ __ ld_ptr(RendValue, 0, RendValue); __ ld_ptr(RtopAddr, 0, RoldTopValue); __ add(RoldTopValue, Roffset, RnewTopValue); - + // RnewTopValue contains the top address after the new object // has been allocated. __ cmp(RnewTopValue, RendValue); @@ -3224,14 +3294,15 @@ __ set((intptr_t)markOopDesc::prototype(), G4_scratch); } __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark - __ st_ptr(RinstanceKlass, RallocatedObject, oopDesc::klass_offset_in_bytes()); // klass + __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed + __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) { SkipIfEqual skip_if( _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); // Trigger dtrace event __ push(atos); - __ call_VM_leaf(noreg, + __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); __ pop(atos); } @@ -3280,7 +3351,7 @@ __ delayed()->nop(); // Get value klass in RobjKlass - __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass + __ load_klass(Otos_i, RobjKlass); // get value klass // Get constant pool tag __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); @@ -3298,13 +3369,14 @@ __ pop_ptr(Otos_i, G3_scratch); // restore receiver __ br(Assembler::always, false, Assembler::pt, resolved); - __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass + __ delayed()->nop(); // Extract target class from constant pool __ bind(quicked); __ add(Roffset, sizeof(constantPoolOopDesc), Roffset); __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); __ bind(resolved); + __ load_klass(Otos_i, RobjKlass); // get value klass // Generate a fast subtype check. Branch to cast_ok if no // failure. Throw exception if failure. @@ -3337,7 +3409,7 @@ __ delayed()->nop(); // Get value klass in RobjKlass - __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass + __ load_klass(Otos_i, RobjKlass); // get value klass // Get constant pool tag __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); @@ -3355,7 +3427,7 @@ __ pop_ptr(Otos_i, G3_scratch); // restore receiver __ br(Assembler::always, false, Assembler::pt, resolved); - __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass + __ delayed()->nop(); // Extract target class from constant pool @@ -3364,6 +3436,7 @@ __ get_constant_pool(Lscratch); __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); __ bind(resolved); + __ load_klass(Otos_i, RobjKlass); // get value klass // Generate a fast subtype check. Branch to cast_ok if no // failure. Return 0 if failure. @@ -3384,9 +3457,9 @@ void TemplateTable::_breakpoint() { // Note: We get here even if we are single stepping.. - // jbug inists on setting breakpoints at every bytecode - // even if we are in single step mode. - + // jbug inists on setting breakpoints at every bytecode + // even if we are in single step mode. + transition(vtos, vtos); // get the unpatched byte code __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); @@ -3407,7 +3480,7 @@ transition(atos, vtos); // This works because exception is cached in Otos_i which is same as O0, - // which is same as what throw_exception_entry_expects + // which is same as what throw_exception_entry_expects assert(Otos_i == Oexception, "see explanation above"); __ verify_oop(Otos_i); @@ -3454,7 +3527,7 @@ __ bind( loop ); - __ verify_oop(O4); // verify each monitor's oop + __ verify_oop(O4); // verify each monitor's oop __ tst(O4); // is this entry unused? if (VM_Version::v9_instructions_work()) __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); @@ -3462,7 +3535,7 @@ Label L; __ br( Assembler::zero, true, Assembler::pn, L ); __ delayed()->mov(O3, O1); // rememeber this one if match - __ bind(L); + __ bind(L); } __ cmp(O4, O0); // check if current entry is for same object @@ -3490,10 +3563,10 @@ __ bind(allocated); } - // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. + // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. // The object has already been poped from the stack, so the expression stack looks correct. __ inc(Lbcp); - + __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object __ lock_object(O1, O0); @@ -3501,7 +3574,7 @@ __ generate_stack_overflow_check(0); // The bcp has already been incremented. Just need to dispatch to next instruction. - __ dispatch_next(vtos); + __ dispatch_next(vtos); } @@ -3518,11 +3591,11 @@ __ ba(false, entry ); // use Lscratch to hold monitor elem to check, start with most recent monitor, // By using a local it survives the call to the C routine. - __ delayed()->mov( Lmonitors, Lscratch ); + __ delayed()->mov( Lmonitors, Lscratch ); __ bind( loop ); - __ verify_oop(O4); // verify each monitor's oop + __ verify_oop(O4); // verify each monitor's oop __ cmp(O4, O0); // check if current entry is for desired object __ brx( Assembler::equal, true, Assembler::pt, found ); __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit @@ -3550,7 +3623,7 @@ void TemplateTable::wide() { transition(vtos, vtos); __ ldub(Lbcp, 1, G3_scratch);// get next bc - __ sll(G3_scratch, LogBytesPerWord, G3_scratch); + __ sll(G3_scratch, LogBytesPerWord, G3_scratch); Address ep(G4_scratch, (address)Interpreter::_wentry_point); __ load_address(ep); __ ld_ptr(ep.base(), G3_scratch, G3_scratch); @@ -3569,7 +3642,7 @@ __ ldub( Lbcp, 3, Lscratch); __ sll( Lscratch, Interpreter::logStackElementSize(), Lscratch); // Lesp points past last_dim, so set to O1 to first_dim address - __ add( Lesp, Lscratch, O1); + __ add( Lesp, Lscratch, O1); call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack } --- old/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp 2009-08-01 04:07:36.385760919 +0100 +++ new/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp 2009-08-01 04:07:36.295227342 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vm_version_sparc.cpp 1.56 07/07/02 18:40:59 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,12 @@ int VM_Version::_features = VM_Version::unknown_m; const char* VM_Version::_features_str = ""; +bool VM_Version::is_niagara1_plus() { + // This is a placeholder until the real test is determined. + return is_niagara1() && + (os::processor_count() > maximum_niagara1_processor_count()); +} + void VM_Version::initialize() { _features = determine_features(); PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); @@ -61,6 +67,15 @@ if (FLAG_IS_DEFAULT(UseInlineCaches)) { UseInlineCaches = false; } +#ifdef _LP64 + // Single issue niagara1 is slower for CompressedOops + // but niagaras after that it's fine. + if (!is_niagara1_plus()) { + if (FLAG_IS_DEFAULT(UseCompressedOops)) { + FLAG_SET_ERGO(bool, UseCompressedOops, false); + } + } +#endif // _LP64 #ifdef COMPILER2 // Indirect branch is the same cost as direct if (FLAG_IS_DEFAULT(UseJumpTables)) { @@ -163,3 +178,13 @@ void VM_Version::revert() { _features = saved_features; } + +unsigned int VM_Version::calc_parallel_worker_threads() { + unsigned int result; + if (is_niagara1_plus()) { + result = nof_parallel_worker_threads(5, 16, 8); + } else { + result = nof_parallel_worker_threads(5, 8, 8); + } + return result; +} --- old/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp 2009-08-01 04:07:37.236668013 +0100 +++ new/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp 2009-08-01 04:07:37.161140297 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vm_version_sparc.hpp 1.33 07/10/04 10:49:21 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,6 +67,11 @@ static bool is_niagara1(int features) { return (features & niagara1_m) == niagara1_m; } + static int maximum_niagara1_processor_count() { return 32; } + // Returns true if the platform is in the niagara line and + // newer than the niagara1. + static bool is_niagara1_plus(); + public: // Initialization static void initialize(); @@ -132,4 +137,7 @@ // Override the Abstract_VM_Version implementation. static uint page_size_count() { return is_sun4v() ? 4 : 2; } + + // Calculates the number of parallel threads + static unsigned int calc_parallel_worker_threads(); }; --- old/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp 2009-08-01 04:07:38.026181914 +0100 +++ new/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp 2009-08-01 04:07:37.947890359 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)vtableStubs_sparc.cpp 1.58 07/07/19 12:19:09 JVM" -#endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +60,7 @@ // get receiver klass address npe_addr = __ pc(); - __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); + __ load_klass(O0, G3_scratch); // set methodOop (in case of interpreted method), and destination address int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); @@ -134,7 +131,7 @@ // get receiver klass (also an implicit null-check) address npe_addr = __ pc(); - __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_klassOop); + __ load_klass(O0, G3_klassOop); __ verify_oop(G3_klassOop); // Push a new window to get some temp registers. This chops the head of all @@ -244,11 +241,16 @@ else { const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets) if (is_vtable_stub) { - const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop + // ld;ld;ld,jmp,nop + const int basic = 5*BytesPerInstWord + + // shift;add for load_klass + (UseCompressedOops ? 2*BytesPerInstWord : 0); return basic + slop; } else { - // save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, sethi, add, ld, ld, jmp, restore, sethi, jmpl, restore - const int basic = (22 LP64_ONLY(+ 12)) * BytesPerInstWord; // worst case extra 6 bytes for each sethi in 64-bit mode + // save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore, sethi, jmpl, restore + const int basic = (22 LP64_ONLY(+ 12)) * BytesPerInstWord + + // shift;add for load_klass + (UseCompressedOops ? 2*BytesPerInstWord : 0); return (basic + slop); } } --- old/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp 2009-08-01 04:07:38.963904070 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp 2009-08-01 04:07:38.885777775 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)c1_CodeStubs_x86.cpp 1.101 07/09/17 09:25:57 JVM" -#endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -46,11 +43,12 @@ __ comisd(input()->as_xmm_double_reg(), ExternalAddress((address)&double_zero)); } else { - __ pushl(rax); + LP64_ONLY(ShouldNotReachHere()); + __ push(rax); __ ftst(); __ fnstsw_ax(); __ sahf(); - __ popl(rax); + __ pop(rax); } Label NaN, do_return; @@ -59,12 +57,12 @@ // input is > 0 -> return maxInt // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff - __ decrement(result()->as_register()); + __ decrement(result()->as_register()); __ jmpb(do_return); // input is NaN -> return 0 __ bind(NaN); - __ xorl(result()->as_register(), result()->as_register()); + __ xorptr(result()->as_register(), result()->as_register()); __ bind(do_return); __ jmp(_continuation); @@ -88,7 +86,7 @@ bool throw_index_out_of_bounds_exception) : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) , _index(index) -{ +{ _info = info == NULL ? NULL : new CodeEmitInfo(info); } @@ -127,7 +125,7 @@ // Implementation of NewInstanceStub NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { - _result = result; + _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); @@ -142,7 +140,7 @@ void NewInstanceStub::emit_code(LIR_Assembler* ce) { assert(__ rsp_offset() == 0, "frame size should be fixed"); __ bind(_entry); - __ movl(rdx, _klass_reg->as_register()); + __ movptr(rdx, _klass_reg->as_register()); __ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -176,7 +174,7 @@ // Implementation of NewObjectArrayStub -NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { +NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { _klass_reg = klass_reg; _result = result; _length = length; @@ -243,13 +241,13 @@ } -// Implementation of patching: +// Implementation of patching: // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) // - Replace original code with a call to the stub // At Runtime: -// - call to stub, jump to runtime +// - call to stub, jump to runtime // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) -// - in runtime: after initializing class, restore original code, reexecute instruction +// - in runtime: after initializing class, restore original code, reexecute instruction int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; @@ -309,10 +307,10 @@ assert(_obj != noreg, "must be a valid register"); Register tmp = rax; if (_obj == tmp) tmp = rbx; - __ pushl(tmp); + __ push(tmp); __ get_thread(tmp); - __ cmpl(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); - __ popl(tmp); + __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); + __ pop(tmp); __ jcc(Assembler::notEqual, call_patch); // access_field patches may execute the patched code before it's @@ -386,7 +384,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { assert(__ rsp_offset() == 0, "frame size should be fixed"); - + __ bind(_entry); // pass the object on stack because all registers must be preserved if (_obj->is_cpu_register()) { @@ -437,7 +435,7 @@ VMReg r_1 = args[i].first(); if (r_1->is_stack()) { int st_off = r_1->reg2stack() * wordSize; - __ movl (Address(rsp, st_off), r[i]); + __ movptr (Address(rsp, st_off), r[i]); } else { assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); } @@ -452,11 +450,56 @@ ce->add_call_info_here(info()); #ifndef PRODUCT - __ increment(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); #endif - + + __ jmp(_continuation); +} + +///////////////////////////////////////////////////////////////////////////// +#ifndef SERIALGC + +void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { + + // At this point we know that marking is in progress + + __ bind(_entry); + assert(pre_val()->is_register(), "Precondition."); + + Register pre_val_reg = pre_val()->as_register(); + + ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false); + + __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); + __ jcc(Assembler::equal, _continuation); + ce->store_parameter(pre_val()->as_register(), 0); + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id))); + __ jmp(_continuation); + +} + +jbyte* G1PostBarrierStub::_byte_map_base = NULL; + +jbyte* G1PostBarrierStub::byte_map_base_slow() { + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->is_a(BarrierSet::G1SATBCTLogging), + "Must be if we're using this."); + return ((G1SATBCardTableModRefBS*)bs)->byte_map_base; +} + +void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { + __ bind(_entry); + assert(addr()->is_register(), "Precondition."); + assert(new_val()->is_register(), "Precondition."); + Register new_val_reg = new_val()->as_register(); + __ cmpptr(new_val_reg, (int32_t) NULL_WORD); + __ jcc(Assembler::equal, _continuation); + ce->store_parameter(addr()->as_register(), 0); + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); __ jmp(_continuation); } +#endif // SERIALGC +///////////////////////////////////////////////////////////////////////////// #undef __ --- old/hotspot/src/cpu/x86/vm/c1_Defs_x86.hpp 2009-08-01 04:07:39.849523594 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_Defs_x86.hpp 2009-08-01 04:07:39.771812943 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)c1_Defs_x86.hpp 1.22 07/05/05 17:04:12 JVM" -#endif /* - * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // native word offsets from memory address (little endian) @@ -39,31 +36,38 @@ // registers enum { - pd_nof_cpu_regs_frame_map = 8, // number of registers used during code emission - pd_nof_fpu_regs_frame_map = 8, // number of registers used during code emission - pd_nof_xmm_regs_frame_map = 8, // number of registers used during code emission - pd_nof_caller_save_cpu_regs_frame_map = 6, // number of registers killed by calls - pd_nof_caller_save_fpu_regs_frame_map = 8, // number of registers killed by calls - pd_nof_caller_save_xmm_regs_frame_map = 8, // number of registers killed by calls - - pd_nof_cpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator + pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission + pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission + pd_nof_xmm_regs_frame_map = XMMRegisterImpl::number_of_registers, // number of registers used during code emission + +#ifdef _LP64 + #define UNALLOCATED 4 // rsp, rbp, r15, r10 +#else + #define UNALLOCATED 2 // rsp, rbp +#endif // LP64 + + pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - UNALLOCATED, // number of registers killed by calls + pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map, // number of registers killed by calls + pd_nof_caller_save_xmm_regs_frame_map = pd_nof_xmm_regs_frame_map, // number of registers killed by calls + + pd_nof_cpu_regs_reg_alloc = pd_nof_caller_save_cpu_regs_frame_map, // number of registers that are visible to register allocator pd_nof_fpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator - pd_nof_cpu_regs_linearscan = 8, // number of registers visible to linear scan - pd_nof_fpu_regs_linearscan = 8, // number of registers visible to linear scan - pd_nof_xmm_regs_linearscan = 8, // number of registers visible to linear scan + pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan + pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan + pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan pd_first_cpu_reg = 0, - pd_last_cpu_reg = 5, + pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11), pd_first_byte_reg = 2, pd_last_byte_reg = 5, pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, pd_last_fpu_reg = pd_first_fpu_reg + 7, pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map, - pd_last_xmm_reg = pd_first_xmm_reg + 7 + pd_last_xmm_reg = pd_first_xmm_reg + pd_nof_xmm_regs_frame_map - 1 }; -// encoding of float value in debug info: +// encoding of float value in debug info: enum { pd_float_saved_as_double = true }; --- old/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp 2009-08-01 04:07:40.680583900 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp 2009-08-01 04:07:40.607458462 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)c1_FrameMap_x86.cpp 1.86 07/09/17 09:25:57 JVM" -#endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -42,10 +39,15 @@ opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type)); } else if (r_1->is_Register()) { Register reg = r_1->as_Register(); - if (r_2->is_Register()) { + if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) { Register reg2 = r_2->as_Register(); +#ifdef _LP64 + assert(reg2 == reg, "must be same register"); + opr = as_long_opr(reg); +#else opr = as_long_opr(reg2, reg); - } else if (type == T_OBJECT) { +#endif // _LP64 + } else if (type == T_OBJECT || type == T_ARRAY) { opr = as_oop_opr(reg); } else { opr = as_opr(reg); @@ -91,18 +93,39 @@ LIR_Opr FrameMap::rdx_oop_opr; LIR_Opr FrameMap::rcx_oop_opr; -LIR_Opr FrameMap::rax_rdx_long_opr; -LIR_Opr FrameMap::rbx_rcx_long_opr; +LIR_Opr FrameMap::long0_opr; +LIR_Opr FrameMap::long1_opr; LIR_Opr FrameMap::fpu0_float_opr; LIR_Opr FrameMap::fpu0_double_opr; LIR_Opr FrameMap::xmm0_float_opr; LIR_Opr FrameMap::xmm0_double_opr; +#ifdef _LP64 + +LIR_Opr FrameMap::r8_opr; +LIR_Opr FrameMap::r9_opr; +LIR_Opr FrameMap::r10_opr; +LIR_Opr FrameMap::r11_opr; +LIR_Opr FrameMap::r12_opr; +LIR_Opr FrameMap::r13_opr; +LIR_Opr FrameMap::r14_opr; +LIR_Opr FrameMap::r15_opr; + +// r10 and r15 can never contain oops since they aren't available to +// the allocator +LIR_Opr FrameMap::r8_oop_opr; +LIR_Opr FrameMap::r9_oop_opr; +LIR_Opr FrameMap::r11_oop_opr; +LIR_Opr FrameMap::r12_oop_opr; +LIR_Opr FrameMap::r13_oop_opr; +LIR_Opr FrameMap::r14_oop_opr; +#endif // _LP64 + LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, }; -XMMRegister FrameMap::_xmm_regs [8] = { 0, }; +XMMRegister FrameMap::_xmm_regs [] = { 0, }; XMMRegister FrameMap::nr2xmmreg(int rnr) { assert(_init_done, "tables not initialized"); @@ -116,18 +139,39 @@ void FrameMap::init() { if (_init_done) return; - assert(nof_cpu_regs == 8, "wrong number of CPU registers"); - map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); rsi_oop_opr = LIR_OprFact::single_cpu_oop(0); - map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); rdi_oop_opr = LIR_OprFact::single_cpu_oop(1); - map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); rbx_oop_opr = LIR_OprFact::single_cpu_oop(2); - map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); rax_oop_opr = LIR_OprFact::single_cpu_oop(3); - map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); rdx_oop_opr = LIR_OprFact::single_cpu_oop(4); - map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); rcx_oop_opr = LIR_OprFact::single_cpu_oop(5); - map_register(6, rsp); rsp_opr = LIR_OprFact::single_cpu(6); - map_register(7, rbp); rbp_opr = LIR_OprFact::single_cpu(7); - - rax_rdx_long_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/); - rbx_rcx_long_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/); + assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers"); + map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); + map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); + map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); + map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); + map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); + map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); + +#ifndef _LP64 + // The unallocatable registers are at the end + map_register(6, rsp); + map_register(7, rbp); +#else + map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); + map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); + map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); + map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9); + map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10); + map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11); + // The unallocatable registers are at the end + map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); + map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); + map_register(14, rsp); + map_register(15, rbp); +#endif // _LP64 + +#ifdef _LP64 + long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/); + long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/); +#else + long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/); + long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/); +#endif // _LP64 fpu0_float_opr = LIR_OprFact::single_fpu(0); fpu0_double_opr = LIR_OprFact::double_fpu(0); xmm0_float_opr = LIR_OprFact::single_xmm(0); @@ -140,6 +184,15 @@ _caller_save_cpu_regs[4] = rdx_opr; _caller_save_cpu_regs[5] = rcx_opr; +#ifdef _LP64 + _caller_save_cpu_regs[6] = r8_opr; + _caller_save_cpu_regs[7] = r9_opr; + _caller_save_cpu_regs[8] = r11_opr; + _caller_save_cpu_regs[9] = r12_opr; + _caller_save_cpu_regs[10] = r13_opr; + _caller_save_cpu_regs[11] = r14_opr; +#endif // _LP64 + _xmm_regs[0] = xmm0; _xmm_regs[1] = xmm1; @@ -150,18 +203,51 @@ _xmm_regs[6] = xmm6; _xmm_regs[7] = xmm7; +#ifdef _LP64 + _xmm_regs[8] = xmm8; + _xmm_regs[9] = xmm9; + _xmm_regs[10] = xmm10; + _xmm_regs[11] = xmm11; + _xmm_regs[12] = xmm12; + _xmm_regs[13] = xmm13; + _xmm_regs[14] = xmm14; + _xmm_regs[15] = xmm15; +#endif // _LP64 + for (int i = 0; i < 8; i++) { _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); + } + + for (int i = 0; i < nof_caller_save_xmm_regs ; i++) { _caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i); } _init_done = true; + rsi_oop_opr = as_oop_opr(rsi); + rdi_oop_opr = as_oop_opr(rdi); + rbx_oop_opr = as_oop_opr(rbx); + rax_oop_opr = as_oop_opr(rax); + rdx_oop_opr = as_oop_opr(rdx); + rcx_oop_opr = as_oop_opr(rcx); + + rsp_opr = as_pointer_opr(rsp); + rbp_opr = as_pointer_opr(rbp); + +#ifdef _LP64 + r8_oop_opr = as_oop_opr(r8); + r9_oop_opr = as_oop_opr(r9); + r11_oop_opr = as_oop_opr(r11); + r12_oop_opr = as_oop_opr(r12); + r13_oop_opr = as_oop_opr(r13); + r14_oop_opr = as_oop_opr(r14); +#endif // _LP64 + VMRegPair regs; BasicType sig_bt = T_OBJECT; SharedRuntime::java_calling_convention(&sig_bt, ®s, 1, true); receiver_opr = as_oop_opr(regs.first()->as_Register()); - assert(receiver_opr == rcx_oop_opr, "rcvr ought to be rcx"); + } @@ -173,7 +259,7 @@ // ----------------mapping----------------------- -// all mapping is based on rbp, addressing, except for simple leaf methods where we access +// all mapping is based on rbp, addressing, except for simple leaf methods where we access // the locals rsp based (and no frame is built) @@ -182,11 +268,11 @@ // +----------+ // | ret addr | <- TOS // +----------+ -// | args | +// | args | // | ...... | // Frame for standard methods -// +// // | .........| <- TOS // | locals | // +----------+ @@ -205,7 +291,7 @@ // framesize + // stack0 stack0 0 <- VMReg // | | | -// ...........|..............|.............| +// ...........|..............|.............| // 0 1 2 3 x x 4 5 6 ... | <- local indices // ^ ^ sp() ( x x indicate link // | | and return addr) --- old/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp 2009-08-01 04:07:41.666951184 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp 2009-08-01 04:07:41.579418878 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)c1_FrameMap_x86.hpp 1.58 07/07/02 16:50:31 JVM" -#endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // On i486 the frame looks as follows: @@ -41,8 +38,13 @@ nof_xmm_regs = pd_nof_xmm_regs_frame_map, nof_caller_save_xmm_regs = pd_nof_caller_save_xmm_regs_frame_map, first_available_sp_in_frame = 0, +#ifndef _LP64 frame_pad_in_bytes = 8, nof_reg_args = 2 +#else + frame_pad_in_bytes = 16, + nof_reg_args = 6 +#endif // _LP64 }; private: @@ -52,7 +54,7 @@ public: static LIR_Opr receiver_opr; - + static LIR_Opr rsi_opr; static LIR_Opr rdi_opr; static LIR_Opr rbx_opr; @@ -68,17 +70,49 @@ static LIR_Opr rax_oop_opr; static LIR_Opr rdx_oop_opr; static LIR_Opr rcx_oop_opr; +#ifdef _LP64 + + static LIR_Opr r8_opr; + static LIR_Opr r9_opr; + static LIR_Opr r10_opr; + static LIR_Opr r11_opr; + static LIR_Opr r12_opr; + static LIR_Opr r13_opr; + static LIR_Opr r14_opr; + static LIR_Opr r15_opr; + + static LIR_Opr r8_oop_opr; + static LIR_Opr r9_oop_opr; + + static LIR_Opr r11_oop_opr; + static LIR_Opr r12_oop_opr; + static LIR_Opr r13_oop_opr; + static LIR_Opr r14_oop_opr; + +#endif // _LP64 - static LIR_Opr rax_rdx_long_opr; - static LIR_Opr rbx_rcx_long_opr; + static LIR_Opr long0_opr; + static LIR_Opr long1_opr; static LIR_Opr fpu0_float_opr; static LIR_Opr fpu0_double_opr; static LIR_Opr xmm0_float_opr; static LIR_Opr xmm0_double_opr; +#ifdef _LP64 + static LIR_Opr as_long_opr(Register r) { + return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); + } + static LIR_Opr as_pointer_opr(Register r) { + return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); + } +#else static LIR_Opr as_long_opr(Register r, Register r2) { return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2)); } + static LIR_Opr as_pointer_opr(Register r) { + return LIR_OprFact::single_cpu(cpu_reg2rnr(r)); + } +#endif // _LP64 // VMReg name for spilled physical FPU stack slot n static VMReg fpu_regname (int n); --- old/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2009-08-01 04:07:42.585979400 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2009-08-01 04:07:42.491817284 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)c1_LIRAssembler_x86.cpp 1.168 07/09/17 09:25:57 JVM" -#endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -116,7 +113,7 @@ LIR_Opr LIR_Assembler::receiverOpr() { - return FrameMap::rcx_oop_opr; + return FrameMap::receiver_opr; } LIR_Opr LIR_Assembler::incomingReceiverOpr() { @@ -124,7 +121,7 @@ } LIR_Opr LIR_Assembler::osrBufferPointer() { - return FrameMap::rcx_opr; + return FrameMap::as_pointer_opr(receiverOpr()->as_register()); } //--------------fpu register translations----------------------- @@ -184,7 +181,7 @@ if (opr->is_single_cpu()) { __ push_reg(opr->as_register()); } else if (opr->is_double_cpu()) { - __ push_reg(opr->as_register_hi()); + NOT_LP64(__ push_reg(opr->as_register_hi())); __ push_reg(opr->as_register_lo()); } else if (opr->is_stack()) { __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); @@ -205,31 +202,45 @@ void LIR_Assembler::pop(LIR_Opr opr) { if (opr->is_single_cpu()) { - __ pop(opr->as_register()); + __ pop_reg(opr->as_register()); } else { ShouldNotReachHere(); } } +bool LIR_Assembler::is_literal_address(LIR_Address* addr) { + return addr->base()->is_illegal() && addr->index()->is_illegal(); +} + //------------------------------------------- + Address LIR_Assembler::as_Address(LIR_Address* addr) { + return as_Address(addr, rscratch1); +} + +Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { if (addr->base()->is_illegal()) { assert(addr->index()->is_illegal(), "must be illegal too"); - //return Address(addr->disp(), relocInfo::none); - // hack for now since this should really return an AddressLiteral - // which will have to await 64bit c1 changes. - return Address(noreg, addr->disp()); + AddressLiteral laddr((address)addr->disp(), relocInfo::none); + if (! __ reachable(laddr)) { + __ movptr(tmp, laddr.addr()); + Address res(tmp, 0); + return res; + } else { + return __ as_Address(laddr); + } } - Register base = addr->base()->as_register(); + Register base = addr->base()->as_pointer_register(); if (addr->index()->is_illegal()) { return Address( base, addr->disp()); - } else if (addr->index()->is_single_cpu()) { - Register index = addr->index()->as_register(); + } else if (addr->index()->is_cpu_register()) { + Register index = addr->index()->as_pointer_register(); return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); } else if (addr->index()->is_constant()) { - int addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); + intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); + assert(Assembler::is_simm32(addr_offset), "must be"); return Address(base, addr_offset); } else { @@ -287,7 +298,7 @@ // All other registers are dead at this point and the locals will be // copied into place by code emitted in the IR. - Register OSR_buf = osrBufferPointer()->as_register(); + Register OSR_buf = osrBufferPointer()->as_pointer_register(); { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = BytesPerWord * method()->max_locals() + (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); @@ -297,16 +308,16 @@ // verify the interpreter's monitor has a non-null object { Label L; - __ cmpl(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); + __ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L); __ stop("locked object is NULL"); __ bind(L); } #endif - __ movl(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes())); - __ movl(frame_map()->address_for_monitor_lock(i), rbx); - __ movl(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes())); - __ movl(frame_map()->address_for_monitor_object(i), rbx); + __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes())); + __ movptr(frame_map()->address_for_monitor_lock(i), rbx); + __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes())); + __ movptr(frame_map()->address_for_monitor_object(i), rbx); } } } @@ -316,10 +327,11 @@ int LIR_Assembler::check_icache() { Register receiver = FrameMap::receiver_opr->as_register(); Register ic_klass = IC_Klass; + const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); if (!VerifyOops) { // insert some nops so that the verified entry point is aligned on CodeEntryAlignment - while ((__ offset() + 9) % CodeEntryAlignment != 0) { + while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { __ nop(); } } @@ -350,7 +362,7 @@ // and cannot block => no GC can happen // The slow case (MonitorAccessStub) uses the first two stack slots // ([esp+0] and [esp+4]), therefore we store the exception at [esp+8] - __ movl (Address(rsp, 2*wordSize), exception); + __ movptr (Address(rsp, 2*wordSize), exception); } Register obj_reg = obj_opr->as_register(); @@ -363,7 +375,7 @@ lock_reg = new_hdr; // compute pointer to BasicLock Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no); - __ leal(lock_reg, lock_addr); + __ lea(lock_reg, lock_addr); // unlock object MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no); // _slow_case_stubs->append(slow_case); @@ -388,14 +400,18 @@ if (exception->is_valid()) { // restore exception - __ movl (exception, Address(rsp, 2 * wordSize)); + __ movptr (exception, Address(rsp, 2 * wordSize)); } } // This specifies the rsp decrement needed to build the frame int LIR_Assembler::initial_frame_size_in_bytes() { // if rounding, must let FrameMap know! - return (frame_map()->framesize() - 2) * BytesPerWord; // subtract two words to account for return address and link + + // The frame_map records size in slots (32bit word) + + // subtract two words to account for return address and link + return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; } @@ -454,7 +470,7 @@ // unwind activation and forward exception to caller // rax,: exception - __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); + __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); assert(code_offset() - offset <= exception_handler_size, "overflow"); @@ -498,43 +514,43 @@ // This is the fast version of java.lang.String.compare; it has not // OSR-entry and therefore, we generate a slow version for OSR's void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { - __ movl (rbx, rcx); // receiver is in rcx - __ movl (rax, arg1->as_register()); + __ movptr (rbx, rcx); // receiver is in rcx + __ movptr (rax, arg1->as_register()); // Get addresses of first characters from both Strings - __ movl (rsi, Address(rax, java_lang_String::value_offset_in_bytes())); - __ movl (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); - __ leal (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); + __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes())); + __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); + __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); // rbx, may be NULL add_debug_info_for_null_check_here(info); - __ movl (rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); - __ movl (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); - __ leal (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); + __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); + __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); + __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); // compute minimum length (in rax) and difference of lengths (on top of stack) if (VM_Version::supports_cmov()) { - __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); - __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); - __ movl (rcx, rbx); - __ subl (rbx, rax); // subtract lengths - __ pushl(rbx); // result - __ cmovl(Assembler::lessEqual, rax, rcx); + __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); + __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); + __ mov (rcx, rbx); + __ subptr (rbx, rax); // subtract lengths + __ push (rbx); // result + __ cmov (Assembler::lessEqual, rax, rcx); } else { Label L; - __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); - __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes())); - __ movl (rax, rbx); - __ subl (rbx, rcx); - __ pushl(rbx); - __ jcc (Assembler::lessEqual, L); - __ movl (rax, rcx); + __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); + __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes())); + __ mov (rax, rbx); + __ subptr (rbx, rcx); + __ push (rbx); + __ jcc (Assembler::lessEqual, L); + __ mov (rax, rcx); __ bind (L); } // is minimum length 0? Label noLoop, haveResult; - __ testl (rax, rax); + __ testptr (rax, rax); __ jcc (Assembler::zero, noLoop); // compare first characters @@ -549,9 +565,9 @@ // set rsi.edi to the end of the arrays (arrays have same length) // negate the index - __ leal(rsi, Address(rsi, rax, Address::times_2, type2aelembytes[T_CHAR])); - __ leal(rdi, Address(rdi, rax, Address::times_2, type2aelembytes[T_CHAR])); - __ negl(rax); + __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR))); + __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR))); + __ negptr(rax); // compare the strings in a loop @@ -568,12 +584,12 @@ // strings are equal up to min length __ bind(noLoop); - __ popl(rax); + __ pop(rax); return_op(LIR_OprFact::illegalOpr); __ bind(haveResult); // leave instruction is going to discard the TOS value - __ movl (rax, rcx); // result of call is in rax, + __ mov (rax, rcx); // result of call is in rax, } @@ -592,6 +608,11 @@ // the poll sets the condition code, but no data registers AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), relocInfo::poll_return_type); + + // NOTE: the requires that the polling page be reachable else the reloc + // goes to the movq that loads the address and not the faulting instruction + // which breaks the signal handler code + __ test32(rax, polling_page); __ ret(0); @@ -609,17 +630,22 @@ } int offset = __ offset(); + + // NOTE: the requires that the polling page be reachable else the reloc + // goes to the movq that loads the address and not the faulting instruction + // which breaks the signal handler code + __ test32(rax, polling_page); return offset; } void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { - if (from_reg != to_reg) __ movl(to_reg, from_reg); + if (from_reg != to_reg) __ mov(to_reg, from_reg); } void LIR_Assembler::swap_reg(Register a, Register b) { - __ xchgl(a, b); + __ xchgptr(a, b); } @@ -637,8 +663,12 @@ case T_LONG: { assert(patch_code == lir_patch_none, "no patching handled here"); - __ movl(dest->as_register_lo(), c->as_jint_lo()); - __ movl(dest->as_register_hi(), c->as_jint_hi()); +#ifdef _LP64 + __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); +#else + __ movptr(dest->as_register_lo(), c->as_jint_lo()); + __ movptr(dest->as_register_hi(), c->as_jint_hi()); +#endif // _LP64 break; } @@ -717,10 +747,15 @@ case T_LONG: // fall through case T_DOUBLE: - __ movl(frame_map()->address_for_slot(dest->double_stack_ix(), - lo_word_offset_in_bytes), c->as_jint_lo_bits()); - __ movl(frame_map()->address_for_slot(dest->double_stack_ix(), - hi_word_offset_in_bytes), c->as_jint_hi_bits()); +#ifdef _LP64 + __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), + lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); +#else + __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), + lo_word_offset_in_bytes), c->as_jint_lo_bits()); + __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), + hi_word_offset_in_bytes), c->as_jint_hi_bits()); +#endif // _LP64 break; default: @@ -734,7 +769,7 @@ LIR_Const* c = src->as_constant_ptr(); LIR_Address* addr = dest->as_address_ptr(); - if (info != NULL) add_debug_info_for_null_check_here(info); + int null_check_here = code_offset(); switch (type) { case T_INT: // fall through case T_FLOAT: @@ -744,16 +779,33 @@ case T_OBJECT: // fall through case T_ARRAY: if (c->as_jobject() == NULL) { - __ movl(as_Address(addr), NULL_WORD); + __ movptr(as_Address(addr), (int32_t)NULL_WORD); } else { - __ movoop(as_Address(addr), c->as_jobject()); + if (is_literal_address(addr)) { + ShouldNotReachHere(); + __ movoop(as_Address(addr, noreg), c->as_jobject()); + } else { + __ movoop(as_Address(addr), c->as_jobject()); + } } break; case T_LONG: // fall through case T_DOUBLE: - __ movl(as_Address_hi(addr), c->as_jint_hi_bits()); - __ movl(as_Address_lo(addr), c->as_jint_lo_bits()); +#ifdef _LP64 + if (is_literal_address(addr)) { + ShouldNotReachHere(); + __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); + } else { + __ movptr(r10, (intptr_t)c->as_jlong_bits()); + null_check_here = code_offset(); + __ movptr(as_Address_lo(addr), r10); + } +#else + // Always reachable in 32bit so this doesn't produce useless move literal + __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); + __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); +#endif // _LP64 break; case T_BOOLEAN: // fall through @@ -769,6 +821,10 @@ default: ShouldNotReachHere(); }; + + if (info != NULL) { + add_debug_info_for_null_check(null_check_here, info); + } } @@ -778,6 +834,13 @@ // move between cpu-registers if (dest->is_single_cpu()) { +#ifdef _LP64 + if (src->type() == T_LONG) { + // Can do LONG -> OBJECT + move_regs(src->as_register_lo(), dest->as_register()); + return; + } +#endif assert(src->is_single_cpu(), "must match"); if (src->type() == T_OBJECT) { __ verify_oop(src->as_register()); @@ -785,13 +848,27 @@ move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { +#ifdef _LP64 + if (src->type() == T_OBJECT || src->type() == T_ARRAY) { + // Surprising to me but we can see move of a long to t_object + __ verify_oop(src->as_register()); + move_regs(src->as_register(), dest->as_register_lo()); + return; + } +#endif assert(src->is_double_cpu(), "must match"); Register f_lo = src->as_register_lo(); Register f_hi = src->as_register_hi(); Register t_lo = dest->as_register_lo(); Register t_hi = dest->as_register_hi(); +#ifdef _LP64 + assert(f_hi == f_lo, "must be same"); + assert(t_hi == t_lo, "must be same"); + move_regs(f_lo, t_lo); +#else assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); + if (f_lo == t_hi && f_hi == t_lo) { swap_reg(f_lo, f_hi); } else if (f_hi == t_lo) { @@ -803,6 +880,7 @@ move_regs(f_lo, t_lo); move_regs(f_hi, t_hi); } +#endif // LP64 // special moves from fpu-register to xmm-register // necessary for method results @@ -844,14 +922,16 @@ Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); if (type == T_OBJECT || type == T_ARRAY) { __ verify_oop(src->as_register()); + __ movptr (dst, src->as_register()); + } else { + __ movl (dst, src->as_register()); } - __ movl (dst, src->as_register()); } else if (src->is_double_cpu()) { Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); - __ movl (dstLO, src->as_register_lo()); - __ movl (dstHI, src->as_register_hi()); + __ movptr (dstLO, src->as_register_lo()); + NOT_LP64(__ movptr (dstHI, src->as_register_hi())); } else if (src->is_single_xmm()) { Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); @@ -888,6 +968,8 @@ } if (patch_code != lir_patch_none) { patch = new PatchingStub(_masm, PatchingStub::access_field_id); + Address toa = as_Address(to_addr); + assert(toa.disp() != 0, "must have"); } if (info != NULL) { add_debug_info_for_null_check_here(info); @@ -921,6 +1003,10 @@ case T_ADDRESS: // fall through case T_ARRAY: // fall through case T_OBJECT: // fall through +#ifdef _LP64 + __ movptr(as_Address(to_addr), src->as_register()); + break; +#endif // _LP64 case T_INT: __ movl(as_Address(to_addr), src->as_register()); break; @@ -928,6 +1014,9 @@ case T_LONG: { Register from_lo = src->as_register_lo(); Register from_hi = src->as_register_hi(); +#ifdef _LP64 + __ movptr(as_Address_lo(to_addr), from_lo); +#else Register base = to_addr->base()->as_register(); Register index = noreg; if (to_addr->index()->is_register()) { @@ -953,6 +1042,7 @@ } __ movl(as_Address_hi(to_addr), from_hi); } +#endif // _LP64 break; } @@ -985,16 +1075,18 @@ assert(dest->is_register(), "should not call otherwise"); if (dest->is_single_cpu()) { - __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); if (type == T_ARRAY || type == T_OBJECT) { + __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); + } else { + __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); } } else if (dest->is_double_cpu()) { Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); - __ movl(dest->as_register_hi(), src_addr_HI); - __ movl(dest->as_register_lo(), src_addr_LO); + __ movptr(dest->as_register_lo(), src_addr_LO); + NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); } else if (dest->is_single_xmm()) { Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); @@ -1022,15 +1114,25 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { if (src->is_single_stack()) { - __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); - __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); + if (type == T_OBJECT || type == T_ARRAY) { + __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); + __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); + } else { + __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); + __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); + } } else if (src->is_double_stack()) { +#ifdef _LP64 + __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); + __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); +#else __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); - // push and pop the part at src + 4, adding 4 for the previous push - __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 4 + 4)); - __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 4 + 4)); + // push and pop the part at src + wordSize, adding wordSize for the previous push + __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); + __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); +#endif // _LP64 } else { ShouldNotReachHere(); @@ -1055,7 +1157,7 @@ // so blow away the value of to_rinfo before loading a // partial word into it. Do it here so that it precedes // the potential patch point below. - __ xorl(dest->as_register(), dest->as_register()); + __ xorptr(dest->as_register(), dest->as_register()); } break; } @@ -1063,6 +1165,7 @@ PatchingStub* patch = NULL; if (patch_code != lir_patch_none) { patch = new PatchingStub(_masm, PatchingStub::access_field_id); + assert(from_addr.disp() != 0, "must have"); } if (info != NULL) { add_debug_info_for_null_check_here(info); @@ -1094,13 +1197,21 @@ case T_ADDRESS: // fall through case T_OBJECT: // fall through case T_ARRAY: // fall through +#ifdef _LP64 + __ movptr(dest->as_register(), from_addr); + break; +#endif // _L64 case T_INT: - __ movl(dest->as_register(), from_addr); + // %%% could this be a movl? this is safer but longer instruction + __ movl2ptr(dest->as_register(), from_addr); break; case T_LONG: { Register to_lo = dest->as_register_lo(); Register to_hi = dest->as_register_hi(); +#ifdef _LP64 + __ movptr(to_lo, as_Address_lo(addr)); +#else Register base = addr->base()->as_register(); Register index = noreg; if (addr->index()->is_register()) { @@ -1112,7 +1223,7 @@ // array access so this code will never have to deal with // patches or null checks. assert(info == NULL && patch == NULL, "must be"); - __ leal(to_hi, as_Address(addr)); + __ lea(to_hi, as_Address(addr)); __ movl(to_lo, Address(to_hi, 0)); __ movl(to_hi, Address(to_hi, BytesPerWord)); } else if (base == to_lo || index == to_lo) { @@ -1135,6 +1246,7 @@ } __ movl(to_hi, as_Address_hi(addr)); } +#endif // _LP64 break; } @@ -1143,12 +1255,13 @@ Register dest_reg = dest->as_register(); assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { - __ movsxb(dest_reg, from_addr); + __ movsbl(dest_reg, from_addr); } else { __ movb(dest_reg, from_addr); __ shll(dest_reg, 24); __ sarl(dest_reg, 24); } + // These are unsigned so the zero extension on 64bit is just what we need break; } @@ -1156,22 +1269,26 @@ Register dest_reg = dest->as_register(); assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { - __ movzxw(dest_reg, from_addr); + __ movzwl(dest_reg, from_addr); } else { __ movw(dest_reg, from_addr); } + // This is unsigned so the zero extension on 64bit is just what we need + // __ movl2ptr(dest_reg, dest_reg); break; } case T_SHORT: { Register dest_reg = dest->as_register(); if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { - __ movsxw(dest_reg, from_addr); + __ movswl(dest_reg, from_addr); } else { __ movw(dest_reg, from_addr); __ shll(dest_reg, 16); __ sarl(dest_reg, 16); } + // Might not be needed in 64bit but certainly doesn't hurt (except for code size) + __ movl2ptr(dest_reg, dest_reg); break; } @@ -1201,7 +1318,7 @@ __ prefetcht0(from_addr); break; case 2: __ prefetcht2(from_addr); break; - default: + default: ShouldNotReachHere(); break; } } else if (VM_Version::supports_3dnow()) { @@ -1224,7 +1341,7 @@ __ prefetcht2(from_addr); break; case 3: __ prefetchw(from_addr); break; - default: + default: ShouldNotReachHere(); break; } } else if (VM_Version::supports_3dnow()) { @@ -1235,7 +1352,7 @@ NEEDS_CLEANUP; // This could be static? Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { - int elem_size = type2aelembytes[type]; + int elem_size = type2aelembytes(type); switch (elem_size) { case 1: return Address::times_1; case 2: return Address::times_2; @@ -1308,10 +1425,14 @@ LIR_Opr dest = op->result_opr(); switch (op->bytecode()) { - case Bytecodes::_i2l: + case Bytecodes::_i2l: +#ifdef _LP64 + __ movl2ptr(dest->as_register_lo(), src->as_register()); +#else move_regs(src->as_register(), dest->as_register_lo()); move_regs(src->as_register(), dest->as_register_hi()); __ sarl(dest->as_register_hi(), 31); +#endif // LP64 break; case Bytecodes::_l2i: @@ -1349,9 +1470,9 @@ case Bytecodes::_i2f: case Bytecodes::_i2d: if (dest->is_single_xmm()) { - __ cvtsi2ss(dest->as_xmm_float_reg(), src->as_register()); + __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); } else if (dest->is_double_xmm()) { - __ cvtsi2sd(dest->as_xmm_double_reg(), src->as_register()); + __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); } else { assert(dest->fpu() == 0, "result must be on TOS"); __ movl(Address(rsp, 0), src->as_register()); @@ -1362,9 +1483,9 @@ case Bytecodes::_f2i: case Bytecodes::_d2i: if (src->is_single_xmm()) { - __ cvttss2si(dest->as_register(), src->as_xmm_float_reg()); + __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); } else if (src->is_double_xmm()) { - __ cvttsd2si(dest->as_register(), src->as_xmm_double_reg()); + __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); } else { assert(src->fpu() == 0, "input must be on TOS"); __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc())); @@ -1385,17 +1506,17 @@ assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); assert(dest->fpu() == 0, "result must be on TOS"); - __ movl(Address(rsp, 0), src->as_register_lo()); - __ movl(Address(rsp, BytesPerWord), src->as_register_hi()); + __ movptr(Address(rsp, 0), src->as_register_lo()); + NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi())); __ fild_d(Address(rsp, 0)); // float result is rounded later through spilling break; - case Bytecodes::_f2l: - case Bytecodes::_d2l: + case Bytecodes::_f2l: + case Bytecodes::_d2l: assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); assert(src->fpu() == 0, "input must be on TOS"); - assert(dest == FrameMap::rax_rdx_long_opr, "runtime stub places result in these registers"); + assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); // instruction sequence too long to inline it here { @@ -1442,7 +1563,7 @@ } else if (len == tmp3) { // everything is ok } else { - __ movl(tmp3, len); + __ mov(tmp3, len); } __ allocate_array(op->obj()->as_register(), len, @@ -1469,31 +1590,32 @@ CodeStub* stub = op->stub(); Label done; - __ cmpl(value, 0); + __ cmpptr(value, (int32_t)NULL_WORD); __ jcc(Assembler::equal, done); add_debug_info_for_null_check_here(op->info_for_exception()); - __ movl(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes())); - __ movl(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes())); + __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes())); + __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes())); // get instance klass - __ movl(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); + __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); // get super_check_offset __ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes())); // See if we get an immediate positive hit - __ cmpl(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1)); + __ cmpptr(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1)); __ jcc(Assembler::equal, done); // check for immediate negative hit __ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()); __ jcc(Assembler::notEqual, *stub->entry()); // check for self - __ cmpl(klass_RInfo, k_RInfo); + __ cmpptr(klass_RInfo, k_RInfo); __ jcc(Assembler::equal, done); - __ pushl(klass_RInfo); - __ pushl(k_RInfo); + __ push(klass_RInfo); + __ push(k_RInfo); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); - __ popl(klass_RInfo); - __ popl(k_RInfo); + __ pop(klass_RInfo); + __ pop(k_RInfo); + // result is a boolean __ cmpl(k_RInfo, 0); __ jcc(Assembler::equal, *stub->entry()); __ bind(done); @@ -1524,10 +1646,14 @@ if (!k->is_loaded()) { jobject2reg_with_patching(k_RInfo, op->info_for_patch()); } else { +#ifdef _LP64 + __ movoop(k_RInfo, k->encoding()); +#else k_RInfo = noreg; +#endif // _LP64 } assert(obj != k_RInfo, "must be different"); - __ cmpl(obj, 0); + __ cmpptr(obj, (int32_t)NULL_WORD); if (op->profiled_method() != NULL) { ciMethod* method = op->profiled_method(); int bci = op->profiled_bci(); @@ -1559,9 +1685,13 @@ // get object classo // not a safepoint as obj null check happens earlier if (k->is_loaded()) { +#ifdef _LP64 + __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); +#else __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding()); +#endif // _LP64 } else { - __ cmpl(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); + __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); } __ jcc(Assembler::notEqual, *stub->entry()); @@ -1569,24 +1699,37 @@ } else { // get object class // not a safepoint as obj null check happens earlier - __ movl(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); + __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); if (k->is_loaded()) { // See if we get an immediate positive hit +#ifdef _LP64 + __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); +#else __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding()); +#endif // _LP64 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { __ jcc(Assembler::notEqual, *stub->entry()); } else { // See if we get an immediate positive hit __ jcc(Assembler::equal, done); // check for self +#ifdef _LP64 + __ cmpptr(klass_RInfo, k_RInfo); +#else __ cmpoop(klass_RInfo, k->encoding()); +#endif // _LP64 __ jcc(Assembler::equal, done); - __ pushl(klass_RInfo); + __ push(klass_RInfo); +#ifdef _LP64 + __ push(k_RInfo); +#else __ pushoop(k->encoding()); +#endif // _LP64 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); - __ popl(klass_RInfo); - __ popl(klass_RInfo); + __ pop(klass_RInfo); + __ pop(klass_RInfo); + // result is a boolean __ cmpl(klass_RInfo, 0); __ jcc(Assembler::equal, *stub->entry()); } @@ -1594,20 +1737,21 @@ } else { __ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes())); // See if we get an immediate positive hit - __ cmpl(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1)); + __ cmpptr(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1)); __ jcc(Assembler::equal, done); // check for immediate negative hit __ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()); __ jcc(Assembler::notEqual, *stub->entry()); // check for self - __ cmpl(klass_RInfo, k_RInfo); + __ cmpptr(klass_RInfo, k_RInfo); __ jcc(Assembler::equal, done); - __ pushl(klass_RInfo); - __ pushl(k_RInfo); + __ push(klass_RInfo); + __ push(k_RInfo); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); - __ popl(klass_RInfo); - __ popl(k_RInfo); + __ pop(klass_RInfo); + __ pop(k_RInfo); + // result is a boolean __ cmpl(k_RInfo, 0); __ jcc(Assembler::equal, *stub->entry()); __ bind(done); @@ -1615,7 +1759,7 @@ } if (dst != obj) { - __ movl(dst, obj); + __ mov(dst, obj); } } else if (code == lir_instanceof) { Register obj = op->object()->as_register(); @@ -1635,29 +1779,33 @@ // so let's do it before loading the class if (!k->is_loaded()) { jobject2reg_with_patching(k_RInfo, op->info_for_patch()); + } else { + LP64_ONLY(__ movoop(k_RInfo, k->encoding())); } assert(obj != k_RInfo, "must be different"); __ verify_oop(obj); if (op->fast_check()) { - __ cmpl(obj, 0); + __ cmpptr(obj, (int32_t)NULL_WORD); __ jcc(Assembler::equal, zero); // get object class // not a safepoint as obj null check happens earlier - if (k->is_loaded()) { - __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding()); + if (LP64_ONLY(false &&) k->is_loaded()) { + NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding())); k_RInfo = noreg; } else { - __ cmpl(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); + __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); } __ jcc(Assembler::equal, one); } else { // get object class // not a safepoint as obj null check happens earlier - __ cmpl(obj, 0); + __ cmpptr(obj, (int32_t)NULL_WORD); __ jcc(Assembler::equal, zero); - __ movl(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); + __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); + +#ifndef _LP64 if (k->is_loaded()) { // See if we get an immediate positive hit __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding()); @@ -1666,40 +1814,43 @@ // check for self __ cmpoop(klass_RInfo, k->encoding()); __ jcc(Assembler::equal, one); - __ pushl(klass_RInfo); + __ push(klass_RInfo); __ pushoop(k->encoding()); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); - __ popl(klass_RInfo); - __ popl(dst); + __ pop(klass_RInfo); + __ pop(dst); __ jmp(done); } } else { +#else + { // YUCK +#endif // LP64 assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers"); __ movl(dst, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes())); // See if we get an immediate positive hit - __ cmpl(k_RInfo, Address(klass_RInfo, dst, Address::times_1)); + __ cmpptr(k_RInfo, Address(klass_RInfo, dst, Address::times_1)); __ jcc(Assembler::equal, one); // check for immediate negative hit __ cmpl(dst, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()); __ jcc(Assembler::notEqual, zero); // check for self - __ cmpl(klass_RInfo, k_RInfo); + __ cmpptr(klass_RInfo, k_RInfo); __ jcc(Assembler::equal, one); - __ pushl(klass_RInfo); - __ pushl(k_RInfo); + __ push(klass_RInfo); + __ push(k_RInfo); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); - __ popl(klass_RInfo); - __ popl(dst); + __ pop(klass_RInfo); + __ pop(dst); __ jmp(done); } } __ bind(zero); - __ xorl(dst, dst); + __ xorptr(dst, dst); __ jmp(done); __ bind(one); - __ movl(dst, 1); + __ movptr(dst, 1); __ bind(done); } else { ShouldNotReachHere(); @@ -1709,8 +1860,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { - if (op->code() == lir_cas_long) { - assert(VM_Version::supports_cx8(), "wrong machine"); + if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); assert(op->new_value()->as_register_lo() == rbx, "wrong register"); @@ -1719,10 +1869,11 @@ if (os::is_MP()) { __ lock(); } - __ cmpxchg8(Address(addr, 0)); + NOT_LP64(__ cmpxchg8(Address(addr, 0))); - } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { - Register addr = op->addr()->as_register(); + } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { + NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) + Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); Register newval = op->new_value()->as_register(); Register cmpval = op->cmp_value()->as_register(); assert(cmpval == rax, "wrong register"); @@ -1733,7 +1884,28 @@ if (os::is_MP()) { __ lock(); } - __ cmpxchg(newval, Address(addr, 0)); + if ( op->code() == lir_cas_obj) { + __ cmpxchgptr(newval, Address(addr, 0)); + } else if (op->code() == lir_cas_int) { + __ cmpxchgl(newval, Address(addr, 0)); + } else { + LP64_ONLY(__ cmpxchgq(newval, Address(addr, 0))); + } +#ifdef _LP64 + } else if (op->code() == lir_cas_long) { + Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); + Register newval = op->new_value()->as_register_lo(); + Register cmpval = op->cmp_value()->as_register_lo(); + assert(cmpval == rax, "wrong register"); + assert(newval != NULL, "new val must be register"); + assert(cmpval != newval, "cmp and new values must be in different registers"); + assert(cmpval != addr, "cmp and addr must be in different registers"); + assert(newval != addr, "new value and addr must be in different registers"); + if (os::is_MP()) { + __ lock(); + } + __ cmpxchgq(newval, Address(addr, 0)); +#endif // _LP64 } else { Unimplemented(); } @@ -1768,17 +1940,17 @@ // optimized version that does not require a branch if (opr2->is_single_cpu()) { assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); - __ cmovl(ncond, result->as_register(), opr2->as_register()); + __ cmov(ncond, result->as_register(), opr2->as_register()); } else if (opr2->is_double_cpu()) { assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); - __ cmovl(ncond, result->as_register_lo(), opr2->as_register_lo()); - __ cmovl(ncond, result->as_register_hi(), opr2->as_register_hi()); + __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); + NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) } else if (opr2->is_single_stack()) { __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); } else if (opr2->is_double_stack()) { - __ cmovl(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); - __ cmovl(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes)); + __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); + NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) } else { ShouldNotReachHere(); } @@ -1854,23 +2026,28 @@ // cpu register - cpu register Register rreg_lo = right->as_register_lo(); Register rreg_hi = right->as_register_hi(); - assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi); + NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); + LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); switch (code) { case lir_add: - __ addl(lreg_lo, rreg_lo); - __ adcl(lreg_hi, rreg_hi); + __ addptr(lreg_lo, rreg_lo); + NOT_LP64(__ adcl(lreg_hi, rreg_hi)); break; case lir_sub: - __ subl(lreg_lo, rreg_lo); - __ sbbl(lreg_hi, rreg_hi); + __ subptr(lreg_lo, rreg_lo); + NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); break; case lir_mul: +#ifdef _LP64 + __ imulq(lreg_lo, rreg_lo); +#else assert(lreg_lo == rax && lreg_hi == rdx, "must be"); __ imull(lreg_hi, rreg_lo); __ imull(rreg_hi, lreg_lo); __ addl (rreg_hi, lreg_hi); __ mull (rreg_lo); __ addl (lreg_hi, rreg_hi); +#endif // _LP64 break; default: ShouldNotReachHere(); @@ -1878,20 +2055,35 @@ } else if (right->is_constant()) { // cpu register - constant +#ifdef _LP64 + jlong c = right->as_constant_ptr()->as_jlong_bits(); + __ movptr(r10, (intptr_t) c); + switch (code) { + case lir_add: + __ addptr(lreg_lo, r10); + break; + case lir_sub: + __ subptr(lreg_lo, r10); + break; + default: + ShouldNotReachHere(); + } +#else jint c_lo = right->as_constant_ptr()->as_jint_lo(); jint c_hi = right->as_constant_ptr()->as_jint_hi(); switch (code) { case lir_add: - __ addl(lreg_lo, c_lo); + __ addptr(lreg_lo, c_lo); __ adcl(lreg_hi, c_hi); break; case lir_sub: - __ subl(lreg_lo, c_lo); + __ subptr(lreg_lo, c_lo); __ sbbl(lreg_hi, c_hi); break; default: ShouldNotReachHere(); } +#endif // _LP64 } else { ShouldNotReachHere(); @@ -1970,7 +2162,7 @@ } } else if (left->is_single_fpu()) { - assert(dest->is_single_fpu(), "fpu stack allocation required"); + assert(dest->is_single_fpu(), "fpu stack allocation required"); if (right->is_single_fpu()) { arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); @@ -2003,7 +2195,7 @@ } } else if (left->is_double_fpu()) { - assert(dest->is_double_fpu(), "fpu stack allocation required"); + assert(dest->is_double_fpu(), "fpu stack allocation required"); if (code == lir_mul_strictfp || code == lir_div_strictfp) { // Double values require special handling for strictfp mul/div on x86 @@ -2068,11 +2260,11 @@ jint c = right->as_constant_ptr()->as_jint(); switch (code) { case lir_add: { - __ increment(laddr, c); + __ incrementl(laddr, c); break; } case lir_sub: { - __ decrement(laddr, c); + __ decrementl(laddr, c); break; } default: ShouldNotReachHere(); @@ -2214,9 +2406,9 @@ } else { Register rright = right->as_register(); switch (code) { - case lir_logic_and: __ andl (reg, rright); break; - case lir_logic_or : __ orl (reg, rright); break; - case lir_logic_xor: __ xorl (reg, rright); break; + case lir_logic_and: __ andptr (reg, rright); break; + case lir_logic_or : __ orptr (reg, rright); break; + case lir_logic_xor: __ xorptr (reg, rright); break; default: ShouldNotReachHere(); } } @@ -2225,6 +2417,21 @@ Register l_lo = left->as_register_lo(); Register l_hi = left->as_register_hi(); if (right->is_constant()) { +#ifdef _LP64 + __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); + switch (code) { + case lir_logic_and: + __ andq(l_lo, rscratch1); + break; + case lir_logic_or: + __ orq(l_lo, rscratch1); + break; + case lir_logic_xor: + __ xorq(l_lo, rscratch1); + break; + default: ShouldNotReachHere(); + } +#else int r_lo = right->as_constant_ptr()->as_jint_lo(); int r_hi = right->as_constant_ptr()->as_jint_hi(); switch (code) { @@ -2242,22 +2449,23 @@ break; default: ShouldNotReachHere(); } +#endif // _LP64 } else { Register r_lo = right->as_register_lo(); Register r_hi = right->as_register_hi(); assert(l_lo != r_hi, "overwriting registers"); switch (code) { case lir_logic_and: - __ andl(l_lo, r_lo); - __ andl(l_hi, r_hi); + __ andptr(l_lo, r_lo); + NOT_LP64(__ andptr(l_hi, r_hi);) break; case lir_logic_or: - __ orl(l_lo, r_lo); - __ orl(l_hi, r_hi); + __ orptr(l_lo, r_lo); + NOT_LP64(__ orptr(l_hi, r_hi);) break; case lir_logic_xor: - __ xorl(l_lo, r_lo); - __ xorl(l_hi, r_hi); + __ xorptr(l_lo, r_lo); + NOT_LP64(__ xorptr(l_hi, r_hi);) break; default: ShouldNotReachHere(); } @@ -2266,6 +2474,9 @@ Register dst_lo = dst->as_register_lo(); Register dst_hi = dst->as_register_hi(); +#ifdef _LP64 + move_regs(l_lo, dst_lo); +#else if (dst_lo == l_hi) { assert(dst_hi != l_lo, "overwriting registers"); move_regs(l_hi, dst_hi); @@ -2275,6 +2486,7 @@ move_regs(l_lo, dst_lo); move_regs(l_hi, dst_hi); } +#endif // _LP64 } } @@ -2309,7 +2521,7 @@ move_regs(lreg, dreg); } else if (code == lir_irem) { Label done; - __ movl(dreg, lreg); + __ mov(dreg, lreg); __ andl(dreg, 0x80000000 | (divisor - 1)); __ jcc(Assembler::positive, done); __ decrement(dreg); @@ -2343,21 +2555,36 @@ Register reg1 = opr1->as_register(); if (opr2->is_single_cpu()) { // cpu register - cpu register - __ cmpl(reg1, opr2->as_register()); + if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { + __ cmpptr(reg1, opr2->as_register()); + } else { + assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); + __ cmpl(reg1, opr2->as_register()); + } } else if (opr2->is_stack()) { // cpu register - stack - __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); + if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { + __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); + } else { + __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); + } } else if (opr2->is_constant()) { // cpu register - constant LIR_Const* c = opr2->as_constant_ptr(); if (c->type() == T_INT) { __ cmpl(reg1, c->as_jint()); - } else if (c->type() == T_OBJECT) { + } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + // In 64bit oops are single register jobject o = c->as_jobject(); if (o == NULL) { - __ cmpl(reg1, NULL_WORD); + __ cmpptr(reg1, (int32_t)NULL_WORD); } else { +#ifdef _LP64 + __ movoop(rscratch1, o); + __ cmpptr(reg1, rscratch1); +#else __ cmpoop(reg1, c->as_jobject()); +#endif // _LP64 } } else { ShouldNotReachHere(); @@ -2376,6 +2603,9 @@ Register xlo = opr1->as_register_lo(); Register xhi = opr1->as_register_hi(); if (opr2->is_double_cpu()) { +#ifdef _LP64 + __ cmpptr(xlo, opr2->as_register_lo()); +#else // cpu register - cpu register Register ylo = opr2->as_register_lo(); Register yhi = opr2->as_register_hi(); @@ -2384,11 +2614,16 @@ if (condition == lir_cond_equal || condition == lir_cond_notEqual) { __ orl(xhi, xlo); } +#endif // _LP64 } else if (opr2->is_constant()) { // cpu register - constant 0 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); +#ifdef _LP64 + __ cmpptr(xlo, (int32_t)opr2->as_jlong()); +#else assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); __ orl(xhi, xlo); +#endif // _LP64 } else { ShouldNotReachHere(); } @@ -2441,16 +2676,28 @@ __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); } else if (opr1->is_address() && opr2->is_constant()) { + LIR_Const* c = opr2->as_constant_ptr(); +#ifdef _LP64 + if (c->type() == T_OBJECT || c->type() == T_ARRAY) { + assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); + __ movoop(rscratch1, c->as_jobject()); + } +#endif // LP64 if (op->info() != NULL) { add_debug_info_for_null_check_here(op->info()); } // special case: address - constant LIR_Address* addr = opr1->as_address_ptr(); - LIR_Const* c = opr2->as_constant_ptr(); if (c->type() == T_INT) { __ cmpl(as_Address(addr), c->as_jint()); - } else if (c->type() == T_OBJECT) { + } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { +#ifdef _LP64 + // %%% Make this explode if addr isn't reachable until we figure out a + // better strategy by giving noreg as the temp for as_Address + __ cmpptr(rscratch1, as_Address(addr, noreg)); +#else __ cmpoop(as_Address(addr), c->as_jobject()); +#endif // _LP64 } else { ShouldNotReachHere(); } @@ -2479,11 +2726,27 @@ } } else { assert(code == lir_cmp_l2i, "check"); +#ifdef _LP64 + Register dest = dst->as_register(); + __ xorptr(dest, dest); + Label high, done; + __ cmpptr(left->as_register_lo(), right->as_register_lo()); + __ jcc(Assembler::equal, done); + __ jcc(Assembler::greater, high); + __ decrement(dest); + __ jmp(done); + __ bind(high); + __ increment(dest); + + __ bind(done); + +#else __ lcmp2int(left->as_register_hi(), left->as_register_lo(), right->as_register_hi(), right->as_register_lo()); move_regs(left->as_register_hi(), dst->as_register()); +#endif // _LP64 } } @@ -2554,7 +2817,8 @@ __ movoop(rbx, (jobject)NULL); // must be set to -1 at code generation time assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); - __ jump(RuntimeAddress((address)-1)); + // On 64bit this will die since it will take a movq & jmp, must be only a jmp + __ jump(RuntimeAddress(__ pc())); assert(__ offset() - start <= call_stub_size, "stub too big") __ end_a_stub(); @@ -2619,6 +2883,14 @@ Register lo = left->as_register_lo(); Register hi = left->as_register_hi(); assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); +#ifdef _LP64 + switch (code) { + case lir_shl: __ shlptr(lo); break; + case lir_shr: __ sarptr(lo); break; + case lir_ushr: __ shrptr(lo); break; + default: ShouldNotReachHere(); + } +#else switch (code) { case lir_shl: __ lshl(hi, lo); break; @@ -2626,6 +2898,7 @@ case lir_ushr: __ lshr(hi, lo, false); break; default: ShouldNotReachHere(); } +#endif // LP64 } else { ShouldNotReachHere(); } @@ -2646,7 +2919,21 @@ default: ShouldNotReachHere(); } } else if (dest->is_double_cpu()) { +#ifndef _LP64 Unimplemented(); +#else + // first move left into dest so that left is not destroyed by the shift + Register value = dest->as_register_lo(); + count = count & 0x1F; // Java spec + + move_regs(left->as_register_lo(), value); + switch (code) { + case lir_shl: __ shlptr(value, count); break; + case lir_shr: __ sarptr(value, count); break; + case lir_ushr: __ shrptr(value, count); break; + default: ShouldNotReachHere(); + } +#endif // _LP64 } else { ShouldNotReachHere(); } @@ -2657,7 +2944,7 @@ assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); - __ movl (Address(rsp, offset_from_rsp_in_bytes), r); + __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); } @@ -2665,7 +2952,7 @@ assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); - __ movl (Address(rsp, offset_from_rsp_in_bytes), c); + __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); } @@ -2713,27 +3000,52 @@ // these are just temporary placements until we need to reload store_parameter(src_pos, 3); store_parameter(src, 4); - assert(src == rcx && src_pos == rdx, "mismatch in calling convention"); + NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) - // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint - __ pushl(length); - __ pushl(dst_pos); - __ pushl(dst); - __ pushl(src_pos); - __ pushl(src); address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); + + // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint +#ifdef _LP64 + // The arguments are in java calling convention so we can trivially shift them to C + // convention + assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); + __ mov(c_rarg0, j_rarg0); + assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); + __ mov(c_rarg1, j_rarg1); + assert_different_registers(c_rarg2, j_rarg3, j_rarg4); + __ mov(c_rarg2, j_rarg2); + assert_different_registers(c_rarg3, j_rarg4); + __ mov(c_rarg3, j_rarg3); +#ifdef _WIN64 + // Allocate abi space for args but be sure to keep stack aligned + __ subptr(rsp, 6*wordSize); + store_parameter(j_rarg4, 4); + __ call(RuntimeAddress(entry)); + __ addptr(rsp, 6*wordSize); +#else + __ mov(c_rarg4, j_rarg4); + __ call(RuntimeAddress(entry)); +#endif // _WIN64 +#else + __ push(length); + __ push(dst_pos); + __ push(dst); + __ push(src_pos); + __ push(src); __ call_VM_leaf(entry, 5); // removes pushed parameter from the stack +#endif // _LP64 + __ cmpl(rax, 0); __ jcc(Assembler::equal, *stub->continuation()); // Reload values from the stack so they are where the stub // expects them. - __ movl (dst, Address(rsp, 0*BytesPerWord)); - __ movl (dst_pos, Address(rsp, 1*BytesPerWord)); - __ movl (length, Address(rsp, 2*BytesPerWord)); - __ movl (src_pos, Address(rsp, 3*BytesPerWord)); - __ movl (src, Address(rsp, 4*BytesPerWord)); + __ movptr (dst, Address(rsp, 0*BytesPerWord)); + __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); + __ movptr (length, Address(rsp, 2*BytesPerWord)); + __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); + __ movptr (src, Address(rsp, 4*BytesPerWord)); __ jmp(*stub->entry()); __ bind(*stub->continuation()); @@ -2742,7 +3054,7 @@ assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); - int elem_size = type2aelembytes[basic_type]; + int elem_size = type2aelembytes(basic_type); int shift_amount; Address::ScaleFactor scale; @@ -2772,13 +3084,15 @@ Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); + // length and pos's are all sign extended at this point on 64bit + // test for NULL if (flags & LIR_OpArrayCopy::src_null_check) { - __ testl(src, src); + __ testptr(src, src); __ jcc(Assembler::zero, *stub->entry()); } if (flags & LIR_OpArrayCopy::dst_null_check) { - __ testl(dst, dst); + __ testptr(dst, dst); __ jcc(Assembler::zero, *stub->entry()); } @@ -2797,19 +3111,19 @@ } if (flags & LIR_OpArrayCopy::src_range_check) { - __ leal(tmp, Address(src_pos, length, Address::times_1, 0)); + __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); __ cmpl(tmp, src_length_addr); __ jcc(Assembler::above, *stub->entry()); } if (flags & LIR_OpArrayCopy::dst_range_check) { - __ leal(tmp, Address(dst_pos, length, Address::times_1, 0)); + __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); __ cmpl(tmp, dst_length_addr); __ jcc(Assembler::above, *stub->entry()); } if (flags & LIR_OpArrayCopy::type_check) { - __ movl(tmp, src_klass_addr); - __ cmpl(tmp, dst_klass_addr); + __ movptr(tmp, src_klass_addr); + __ cmpptr(tmp, dst_klass_addr); __ jcc(Assembler::notEqual, *stub->entry()); } @@ -2825,14 +3139,14 @@ Label known_ok, halt; __ movoop(tmp, default_type->encoding()); if (basic_type != T_OBJECT) { - __ cmpl(tmp, dst_klass_addr); + __ cmpptr(tmp, dst_klass_addr); __ jcc(Assembler::notEqual, halt); - __ cmpl(tmp, src_klass_addr); + __ cmpptr(tmp, src_klass_addr); __ jcc(Assembler::equal, known_ok); } else { - __ cmpl(tmp, dst_klass_addr); + __ cmpptr(tmp, dst_klass_addr); __ jcc(Assembler::equal, known_ok); - __ cmpl(src, dst); + __ cmpptr(src, dst); __ jcc(Assembler::equal, known_ok); } __ bind(halt); @@ -2841,14 +3155,24 @@ } #endif - __ leal(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); - store_parameter(tmp, 0); - __ leal(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); - store_parameter(tmp, 1); if (shift_amount > 0 && basic_type != T_OBJECT) { - __ shll(length, shift_amount); + __ shlptr(length, shift_amount); } + +#ifdef _LP64 + assert_different_registers(c_rarg0, dst, dst_pos, length); + __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + assert_different_registers(c_rarg1, length); + __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + __ mov(c_rarg2, length); + +#else + __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + store_parameter(tmp, 0); + __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + store_parameter(tmp, 1); store_parameter(length, 2); +#endif // _LP64 if (basic_type == T_OBJECT) { __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0); } else { @@ -2948,13 +3272,13 @@ } } } else { - __ movl(recv, Address(recv, oopDesc::klass_offset_in_bytes())); + __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); Label update_done; uint i; for (i = 0; i < VirtualCallData::row_limit(); i++) { Label next_test; // See if the receiver is receiver[n]. - __ cmpl(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); + __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); __ jcc(Assembler::notEqual, next_test); Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); __ addl(data_addr, DataLayout::counter_increment); @@ -2966,9 +3290,9 @@ for (i = 0; i < VirtualCallData::row_limit(); i++) { Label next_test; Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); - __ cmpl(recv_addr, NULL_WORD); + __ cmpptr(recv_addr, (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, next_test); - __ movl(recv_addr, recv); + __ movptr(recv_addr, recv); __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment); if (i < (VirtualCallData::row_limit() - 1)) { __ jmp(update_done); @@ -2988,7 +3312,7 @@ void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { - __ leal(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); + __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); } @@ -3004,6 +3328,11 @@ } else if (left->is_double_cpu()) { Register lo = left->as_register_lo(); +#ifdef _LP64 + Register dst = dest->as_register_lo(); + __ movptr(dst, lo); + __ negptr(dst); +#else Register hi = left->as_register_hi(); __ lneg(hi, lo); if (dest->as_register_lo() == hi) { @@ -3014,6 +3343,7 @@ move_regs(lo, dest->as_register_lo()); move_regs(hi, dest->as_register_hi()); } +#endif // _LP64 } else if (dest->is_single_xmm()) { if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { @@ -3042,8 +3372,9 @@ void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { assert(addr->is_address() && dest->is_register(), "check"); - Register reg = dest->as_register(); - __ leal(dest->as_register(), as_Address(addr->as_address_ptr())); + Register reg; + reg = dest->as_pointer_register(); + __ lea(reg, as_Address(addr->as_address_ptr())); } @@ -3066,9 +3397,13 @@ if (src->is_double_xmm()) { if (dest->is_double_cpu()) { - __ movd(dest->as_register_lo(), src->as_xmm_double_reg()); +#ifdef _LP64 + __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); +#else + __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); __ psrlq(src->as_xmm_double_reg(), 32); - __ movd(dest->as_register_hi(), src->as_xmm_double_reg()); + __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); +#endif // _LP64 } else if (dest->is_double_stack()) { __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); } else if (dest->is_address()) { @@ -3112,7 +3447,8 @@ void LIR_Assembler::membar() { - __ membar(); + // QQQ sparc TSO uses this, + __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); } void LIR_Assembler::membar_acquire() { @@ -3127,7 +3463,12 @@ void LIR_Assembler::get_thread(LIR_Opr result_reg) { assert(result_reg->is_register(), "check"); +#ifdef _LP64 + // __ get_thread(result_reg->as_register_lo()); + __ mov(result_reg->as_register(), r15_thread); +#else __ get_thread(result_reg->as_register()); +#endif // _LP64 } --- old/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp 2009-08-01 04:07:43.863710685 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp 2009-08-01 04:07:43.785858338 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)c1_LIRAssembler_x86.hpp 1.20 07/05/05 17:04:12 JVM" -#endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ private: @@ -39,13 +36,20 @@ address float_constant(float f); address double_constant(double d); + bool is_literal_address(LIR_Address* addr); + + // When we need to use something other than rscratch1 use this + // method. + Address as_Address(LIR_Address* addr, Register tmp); + + public: void store_parameter(Register r, int offset_from_esp_in_words); void store_parameter(jint c, int offset_from_esp_in_words); void store_parameter(jobject c, int offset_from_esp_in_words); - enum { call_stub_size = 15, + enum { call_stub_size = NOT_LP64(15) LP64_ONLY(28), exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), - deopt_handler_size = 10 + deopt_handler_size = NOT_LP64(10) LP64_ONLY(17) }; --- old/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2009-08-01 04:07:44.728088421 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2009-08-01 04:07:44.641748978 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)c1_LIRGenerator_x86.cpp 1.16 07/09/17 09:25:58 JVM" -#endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -80,10 +77,10 @@ switch (type->tag()) { case intTag: opr = FrameMap::rax_opr; break; case objectTag: opr = FrameMap::rax_oop_opr; break; - case longTag: opr = FrameMap::rax_rdx_long_opr; break; + case longTag: opr = FrameMap::long0_opr; break; case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; - + case addressTag: default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; } @@ -120,12 +117,14 @@ bool LIRGenerator::can_inline_as_constant(Value v) const { + if (v->type()->tag() == longTag) return false; return v->type()->tag() != objectTag || (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); } bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { + if (c->type() == T_LONG) return false; return c->type() != T_OBJECT || c->as_jobject() == NULL; } @@ -154,10 +153,17 @@ LIR_Address* addr; if (index_opr->is_constant()) { - int elem_size = type2aelembytes[type]; + int elem_size = type2aelembytes(type); addr = new LIR_Address(array_opr, offset_in_bytes + index_opr->as_jint() * elem_size, type); } else { +#ifdef _LP64 + if (index_opr->type() == T_INT) { + LIR_Opr tmp = new_register(T_LONG); + __ convert(Bytecodes::_i2l, index_opr, tmp); + index_opr = tmp; + } +#endif // _LP64 addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), @@ -167,7 +173,7 @@ // This store will need a precise card mark, so go ahead and // compute the full adddres instead of computing once for the // store and again for the card mark. - LIR_Opr tmp = new_register(T_INT); + LIR_Opr tmp = new_pointer_register(); __ leal(LIR_OprFact::address(addr), tmp); return new LIR_Address(tmp, 0, type); } else { @@ -177,9 +183,8 @@ void LIRGenerator::increment_counter(address counter, int step) { - LIR_Opr temp = new_register(T_INT); - LIR_Opr pointer = new_register(T_INT); - __ move(LIR_OprFact::intConst((int)counter), pointer); + LIR_Opr pointer = new_pointer_register(); + __ move(LIR_OprFact::intptrConst(counter), pointer); LIR_Address* addr = new LIR_Address(pointer, 0, T_INT); increment_counter(addr, step); } @@ -291,14 +296,16 @@ LIR_Opr tmp1 = new_register(objectType); LIR_Opr tmp2 = new_register(objectType); LIR_Opr tmp3 = new_register(objectType); - + CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info); } if (obj_store) { + // Needs GC write barriers. + pre_barrier(LIR_OprFact::address(array_addr), false, NULL); __ move(value.result(), array_addr, null_check_info); - // Seems to be a precise + // Seems to be a precise post_barrier(LIR_OprFact::address(array_addr), value.result()); } else { __ move(value.result(), array_addr, null_check_info); @@ -353,7 +360,7 @@ value.load_item(); LIR_Opr reg = rlock(x); __ negate(value.result(), reg); - + set_result(x, round_item(reg)); } @@ -388,7 +395,7 @@ must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); } } - + if (must_load_both) { // frem and drem destroy also right operand, so move it to a new register right.set_destroys_register(); @@ -416,7 +423,7 @@ } __ move(right.result(), fpu1); // order of left and right operand is important! __ move(left.result(), fpu0); - __ rem (fpu0, fpu1, fpu0); + __ rem (fpu0, fpu1, fpu0); __ move(fpu0, reg); } else { @@ -484,7 +491,7 @@ left.load_item(); right.load_item(); - LIR_Opr reg = FrameMap::rax_rdx_long_opr; + LIR_Opr reg = FrameMap::long0_opr; arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); LIR_Opr result = rlock_result(x); __ move(reg, result); @@ -691,16 +698,16 @@ LIRItem obj (x->argument_at(0), this); // AtomicLong object LIRItem cmp_value (x->argument_at(1), this); // value to compare with field LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value - + // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction - cmp_value.load_item_force(FrameMap::rax_rdx_long_opr); - + cmp_value.load_item_force(FrameMap::long0_opr); + // new value must be in rcx,ebx (hi,lo) - new_value.load_item_force(FrameMap::rbx_rcx_long_opr); - + new_value.load_item_force(FrameMap::long1_opr); + // object pointer register is overwritten with field address obj.load_item(); - + // generate compare-and-swap; produces zero condition if swap occurs int value_offset = sun_misc_AtomicLongCSImpl::value_offset(); LIR_Opr addr = obj.result(); @@ -708,7 +715,7 @@ LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2); - + // generate conditional move of boolean result LIR_Opr result = rlock_result(x); __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result); @@ -723,7 +730,10 @@ LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp assert(obj.type()->tag() == objectTag, "invalid type"); - assert(offset.type()->tag() == intTag, "invalid type"); + + // In 64bit the type can be long, sparc doesn't have this assert + // assert(offset.type()->tag() == intTag, "invalid type"); + assert(cmp.type()->tag() == type->tag(), "invalid type"); assert(val.type()->tag() == type->tag(), "invalid type"); @@ -738,8 +748,8 @@ cmp.load_item_force(FrameMap::rax_opr); val.load_item(); } else if (type == longType) { - cmp.load_item_force(FrameMap::rax_rdx_long_opr); - val.load_item_force(FrameMap::rbx_rcx_long_opr); + cmp.load_item_force(FrameMap::long0_opr); + val.load_item_force(FrameMap::long1_opr); } else { ShouldNotReachHere(); } @@ -748,10 +758,13 @@ __ move(obj.result(), addr); __ add(addr, offset.result(), addr); - + if (type == objectType) { // Write-barrier needed for Object fields. + // Do the pre-write barrier, if any. + pre_barrier(addr, false, NULL); + } LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience - if (type == objectType) + if (type == objectType) __ cas_obj(addr, cmp.result(), val.result(), ill, ill); else if (type == intType) __ cas_int(addr, cmp.result(), val.result(), ill, ill); @@ -760,12 +773,12 @@ else { ShouldNotReachHere(); } - + // generate conditional move of boolean result LIR_Opr result = rlock_result(x); __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result); if (type == objectType) { // Write-barrier needed for Object fields. - // Seems to be precise + // Seems to be precise post_barrier(addr, val.result()); } } @@ -836,12 +849,33 @@ // operands for arraycopy must use fixed registers, otherwise // LinearScan will fail allocation (because arraycopy always needs a // call) + +#ifndef _LP64 src.load_item_force (FrameMap::rcx_oop_opr); src_pos.load_item_force (FrameMap::rdx_opr); dst.load_item_force (FrameMap::rax_oop_opr); dst_pos.load_item_force (FrameMap::rbx_opr); length.load_item_force (FrameMap::rdi_opr); LIR_Opr tmp = (FrameMap::rsi_opr); +#else + + // The java calling convention will give us enough registers + // so that on the stub side the args will be perfect already. + // On the other slow/special case side we call C and the arg + // positions are not similar enough to pick one as the best. + // Also because the java calling convention is a "shifted" version + // of the C convention we can process the java args trivially into C + // args without worry of overwriting during the xfer + + src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); + src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); + dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); + dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); + length.load_item_force (FrameMap::as_opr(j_rarg4)); + + LIR_Opr tmp = FrameMap::as_opr(j_rarg5); +#endif // LP64 + set_no_result(x); int flags; @@ -860,9 +894,9 @@ case T_FLOAT: return FrameMap::fpu0_float_opr; case T_DOUBLE: return FrameMap::fpu0_double_opr; case T_INT: return FrameMap::rax_opr; - case T_LONG: return FrameMap::rax_rdx_long_opr; + case T_LONG: return FrameMap::long0_opr; default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; - } + } } void LIRGenerator::do_Convert(Convert* x) { @@ -959,7 +993,7 @@ LIR_Opr klass_reg = FrameMap::rdx_oop_opr; LIR_Opr len = length.result(); BasicType elem_type = x->elt_type(); - + __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg); CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); @@ -1164,9 +1198,13 @@ LIR_Opr LIRGenerator::getThreadPointer() { +#ifdef _LP64 + return FrameMap::as_pointer_opr(r15_thread); +#else LIR_Opr result = new_register(T_INT); __ get_thread(result); return result; +#endif // } void LIRGenerator::trace_block_entry(BlockBegin* block) { @@ -1253,6 +1291,8 @@ LIR_Address* addr = new LIR_Address(src, offset, type); bool is_obj = (type == T_ARRAY || type == T_OBJECT); if (is_obj) { + // Do the pre-write barrier, if any. + pre_barrier(LIR_OprFact::address(addr), false, NULL); __ move(data, addr); assert(src->is_register(), "must be register"); // Seems to be a precise address --- old/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp 2009-08-01 04:07:45.670806012 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp 2009-08-01 04:07:45.584345248 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)c1_LinearScan_x86.hpp 1.10 07/07/02 16:50:32 JVM" -#endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,22 +19,33 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ inline bool LinearScan::is_processed_reg_num(int reg_num) { +#ifndef _LP64 // rsp and rbp (numbers 6 ancd 7) are ignored assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below"); assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); assert(reg_num >= 0, "invalid reg_num"); return reg_num < 6 || reg_num > 7; +#else + // rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored + assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below"); + assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below"); + assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below"); + assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below"); + assert(reg_num >= 0, "invalid reg_num"); + + return reg_num < 12 || reg_num > 15; +#endif // _LP64 } inline int LinearScan::num_physical_regs(BasicType type) { // Intel requires two cpu registers for long, // but requires only one fpu register for double - if (type == T_LONG) { + if (LP64_ONLY(false &&) type == T_LONG) { return 2; } return 1; --- old/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp 2009-08-01 04:07:46.500621313 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp 2009-08-01 04:07:46.423325331 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)c1_MacroAssembler_x86.cpp 1.60 07/09/17 09:25:58 JVM" -#endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,25 +19,24 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" #include "incls/_c1_MacroAssembler_x86.cpp.incl" int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { - const int aligned_mask = 3; + const int aligned_mask = BytesPerWord -1; const int hdr_offset = oopDesc::mark_offset_in_bytes(); assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); - assert(BytesPerWord == 4, "adjust aligned_mask and code"); Label done; int null_check_offset = -1; verify_oop(obj); // save object being locked into the BasicObjectLock - movl(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); + movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); if (UseBiasedLocking) { assert(scratch != noreg, "should have scratch register at this point"); @@ -50,16 +46,16 @@ } // Load object header - movl(hdr, Address(obj, hdr_offset)); + movptr(hdr, Address(obj, hdr_offset)); // and mark it as unlocked - orl(hdr, markOopDesc::unlocked_value); + orptr(hdr, markOopDesc::unlocked_value); // save unlocked object header into the displaced header location on the stack - movl(Address(disp_hdr, 0), hdr); + movptr(Address(disp_hdr, 0), hdr); // test if object header is still the same (i.e. unlocked), and if so, store the // displaced header address in the object header - if it is not the same, get the // object header instead if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! - cmpxchg(disp_hdr, Address(obj, hdr_offset)); + cmpxchgptr(disp_hdr, Address(obj, hdr_offset)); // if the object header was the same, we're done if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::equal, @@ -79,11 +75,11 @@ // // assuming both the stack pointer and page_size have their least // significant 2 bits cleared and page_size is a power of 2 - subl(hdr, rsp); - andl(hdr, aligned_mask - os::vm_page_size()); + subptr(hdr, rsp); + andptr(hdr, aligned_mask - os::vm_page_size()); // for recursive locking, the result is zero => save it in the displaced header // location (NULL in the displaced hdr location indicates recursive locking) - movl(Address(disp_hdr, 0), hdr); + movptr(Address(disp_hdr, 0), hdr); // otherwise we don't care about the result and handle locking via runtime call jcc(Assembler::notZero, slow_case); // done @@ -93,35 +89,34 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { - const int aligned_mask = 3; + const int aligned_mask = BytesPerWord -1; const int hdr_offset = oopDesc::mark_offset_in_bytes(); assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); - assert(BytesPerWord == 4, "adjust aligned_mask and code"); Label done; if (UseBiasedLocking) { // load object - movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); + movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); biased_locking_exit(obj, hdr, done); } // load displaced header - movl(hdr, Address(disp_hdr, 0)); + movptr(hdr, Address(disp_hdr, 0)); // if the loaded hdr is NULL we had recursive locking - testl(hdr, hdr); + testptr(hdr, hdr); // if we had recursive locking, we are done jcc(Assembler::zero, done); if (!UseBiasedLocking) { // load object - movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); + movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); } verify_oop(obj); // test if object header is pointing to the displaced header, and if so, restore // the displaced header in the object - if the object header is not pointing to // the displaced header, get the object header instead if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! - cmpxchg(hdr, Address(obj, hdr_offset)); + cmpxchgptr(hdr, Address(obj, hdr_offset)); // if the object header was not pointing to the displaced header, // we do unlocking via runtime call jcc(Assembler::notEqual, slow_case); @@ -144,13 +139,14 @@ assert_different_registers(obj, klass, len); if (UseBiasedLocking && !len->is_valid()) { assert_different_registers(obj, klass, len, t1, t2); - movl(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - movl(Address(obj, oopDesc::mark_offset_in_bytes()), t1); + movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); } else { - movl(Address(obj, oopDesc::mark_offset_in_bytes ()), (int)markOopDesc::prototype()); + // This assumes that all prototype bits fit in an int32_t + movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); } - movl(Address(obj, oopDesc::klass_offset_in_bytes()), klass); + movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); if (len->is_valid()) { movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); } @@ -163,25 +159,27 @@ assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different"); assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord"); Register index = len_in_bytes; - subl(index, hdr_size_in_bytes); + // index is positive and ptr sized + subptr(index, hdr_size_in_bytes); jcc(Assembler::zero, done); // initialize topmost word, divide index by 2, check if odd and test if zero // note: for the remaining code to work, index must be a multiple of BytesPerWord #ifdef ASSERT { Label L; - testl(index, BytesPerWord - 1); + testptr(index, BytesPerWord - 1); jcc(Assembler::zero, L); stop("index is not a multiple of BytesPerWord"); bind(L); } #endif - xorl(t1, t1); // use _zero reg to clear memory (shorter code) + xorptr(t1, t1); // use _zero reg to clear memory (shorter code) if (UseIncDec) { - shrl(index, 3); // divide by 8 and set carry flag if bit 2 was set + shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set } else { - shrl(index, 2); // use 2 instructions to avoid partial flag stall - shrl(index, 1); + shrptr(index, 2); // use 2 instructions to avoid partial flag stall + shrptr(index, 1); } +#ifndef _LP64 // index could have been not a multiple of 8 (i.e., bit 2 was set) { Label even; // note: if index was a multiple of 8, than it cannot @@ -189,16 +187,17 @@ // => if it is even, we don't need to check for 0 again jcc(Assembler::carryClear, even); // clear topmost word (no jump needed if conditional assignment would work here) - movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1); + movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1); // index could be 0 now, need to check again jcc(Assembler::zero, done); bind(even); } +#endif // !_LP64 // initialize remaining object fields: rdx is a multiple of 2 now { Label loop; bind(loop); - movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1); - movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1); + movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1); + NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);) decrement(index); jcc(Assembler::notZero, loop); } @@ -221,7 +220,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) { assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "con_size_in_bytes is not multiple of alignment"); - const int hdr_size_in_bytes = oopDesc::header_size_in_bytes(); + const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes(); initialize_header(obj, klass, noreg, t1, t2); @@ -230,36 +229,36 @@ const Register index = t2; const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) if (var_size_in_bytes != noreg) { - movl(index, var_size_in_bytes); + mov(index, var_size_in_bytes); initialize_body(obj, index, hdr_size_in_bytes, t1_zero); } else if (con_size_in_bytes <= threshold) { // use explicit null stores // code size = 2 + 3*n bytes (n = number of fields to clear) - xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) - for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) - movl(Address(obj, i), t1_zero); + xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) + for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) + movptr(Address(obj, i), t1_zero); } else if (con_size_in_bytes > hdr_size_in_bytes) { // use loop to null out the fields // code size = 16 bytes for even n (n = number of fields to clear) // initialize last object field first if odd number of fields - xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) - movl(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); + xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) + movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); // initialize last object field if constant size is odd - if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) - movl(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); + if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) + movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); // initialize remaining object fields: rdx is a multiple of 2 { Label loop; bind(loop); - movl(Address(obj, index, Address::times_8, - hdr_size_in_bytes - (1*BytesPerWord)), t1_zero); - movl(Address(obj, index, Address::times_8, - hdr_size_in_bytes - (2*BytesPerWord)), t1_zero); + movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)), + t1_zero); + NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)), + t1_zero);) decrement(index); jcc(Assembler::notZero, loop); } } - if (DTraceAllocProbes) { + if (DTraceAllocProbes) { assert(obj == rax, "must be"); call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); } @@ -272,17 +271,17 @@ assert_different_registers(obj, len, t1, t2, klass); // determine alignment mask - assert(BytesPerWord == 4, "must be a multiple of 2 for masking code to work"); + assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work"); // check for negative or excessive length - cmpl(len, max_array_allocation_length); + cmpptr(len, (int32_t)max_array_allocation_length); jcc(Assembler::above, slow_case); const Register arr_size = t2; // okay to be the same // align object end - movl(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask); - leal(arr_size, Address(arr_size, len, f)); - andl(arr_size, ~MinObjAlignmentInBytesMask); + movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); + lea(arr_size, Address(arr_size, len, f)); + andptr(arr_size, ~MinObjAlignmentInBytesMask); try_allocate(obj, arr_size, 0, t1, t2, slow_case); @@ -292,7 +291,7 @@ const Register len_zero = len; initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); - if (DTraceAllocProbes) { + if (DTraceAllocProbes) { assert(obj == rax, "must be"); call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); } @@ -304,16 +303,17 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { verify_oop(receiver); - // explicit NULL check not needed since load from [klass_offset] causes a trap + // explicit NULL check not needed since load from [klass_offset] causes a trap // check against inline cache assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); int start_offset = offset(); - cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); + cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); // if icache check fails, then jump to runtime routine // Note: RECEIVER must still contain the receiver! jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); - assert(offset() - start_offset == 9, "check alignment in emit_method_entry"); + const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); + assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); } @@ -367,7 +367,7 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) { if (!VerifyOops) return; Label not_null; - testl(r, r); + testptr(r, r); jcc(Assembler::notZero, not_null); stop("non-null oop required"); bind(not_null); @@ -376,12 +376,12 @@ void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { #ifdef ASSERT - if (inv_rax) movl(rax, 0xDEAD); - if (inv_rbx) movl(rbx, 0xDEAD); - if (inv_rcx) movl(rcx, 0xDEAD); - if (inv_rdx) movl(rdx, 0xDEAD); - if (inv_rsi) movl(rsi, 0xDEAD); - if (inv_rdi) movl(rdi, 0xDEAD); + if (inv_rax) movptr(rax, 0xDEAD); + if (inv_rbx) movptr(rbx, 0xDEAD); + if (inv_rcx) movptr(rcx, 0xDEAD); + if (inv_rdx) movptr(rdx, 0xDEAD); + if (inv_rsi) movptr(rsi, 0xDEAD); + if (inv_rdi) movptr(rdi, 0xDEAD); #endif } --- old/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp 2009-08-01 04:07:47.475984490 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp 2009-08-01 04:07:47.373004259 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)c1_MacroAssembler_x86.hpp 1.36 07/07/02 16:50:32 JVM" -#endif /* - * Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // C1_MacroAssembler contains high-level macros for C1 @@ -91,22 +88,23 @@ // slow_case : exit to slow case implementation if fast allocation fails void allocate_array(Register obj, Register len, Register t, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case); - int rsp_offset() const { return _rsp_offset; } + int rsp_offset() const { return _rsp_offset; } void set_rsp_offset(int n) { _rsp_offset = n; } // Note: NEVER push values directly, but only through following push_xxx functions; // This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset) - void push_jint (jint i) { _rsp_offset++; pushl(i); } + void push_jint (jint i) { _rsp_offset++; push(i); } void push_oop (jobject o) { _rsp_offset++; pushoop(o); } - void push_addr (Address a) { _rsp_offset++; pushl(a); } - void push_reg (Register r) { _rsp_offset++; pushl(r); } - void pop (Register r) { _rsp_offset--; popl (r); assert(_rsp_offset >= 0, "stack offset underflow"); } + // Seems to always be in wordSize + void push_addr (Address a) { _rsp_offset++; pushptr(a); } + void push_reg (Register r) { _rsp_offset++; push(r); } + void pop_reg (Register r) { _rsp_offset--; pop(r); assert(_rsp_offset >= 0, "stack offset underflow"); } void dec_stack (int nof_words) { _rsp_offset -= nof_words; assert(_rsp_offset >= 0, "stack offset underflow"); - addl(rsp, wordSize * nof_words); + addptr(rsp, wordSize * nof_words); } void dec_stack_after_call (int nof_words) { --- old/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2009-08-01 04:07:48.324724475 +0100 +++ new/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2009-08-01 04:07:48.231605389 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)c1_Runtime1_x86.cpp 1.197 07/09/17 09:25:58 JVM" -#endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -33,52 +30,58 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) { // setup registers - const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different"); assert(oop_result1 != thread && oop_result2 != thread, "registers must be different"); assert(args_size >= 0, "illegal args_size"); +#ifdef _LP64 + mov(c_rarg0, thread); + set_num_rt_args(0); // Nothing on stack +#else set_num_rt_args(1 + args_size); // push java thread (becomes first argument of C function) get_thread(thread); - pushl(thread); + push(thread); +#endif // _LP64 set_last_Java_frame(thread, noreg, rbp, NULL); + // do the call call(RuntimeAddress(entry)); int call_offset = offset(); // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); - pushl(rax); + push(rax); { Label L; get_thread(rax); - cmpl(thread, rax); + cmpptr(thread, rax); jcc(Assembler::equal, L); int3(); stop("StubAssembler::call_RT: rdi not callee saved?"); bind(L); } - popl(rax); + pop(rax); #endif reset_last_Java_frame(thread, true, false); // discard thread and arguments - addl(rsp, (1 + args_size)*BytesPerWord); + NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); // check for pending exceptions { Label L; - cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler - movl(rax, Address(thread, Thread::pending_exception_offset())); + movptr(rax, Address(thread, Thread::pending_exception_offset())); // make sure that the vm_results are cleared if (oop_result1->is_valid()) { - movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); + movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); } if (oop_result2->is_valid()) { - movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); } if (frame_size() == no_frame_size) { leave(); @@ -92,13 +95,13 @@ } // get oop results if there are any and reset the values in the thread if (oop_result1->is_valid()) { - movl(oop_result1, Address(thread, JavaThread::vm_result_offset())); - movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); + movptr(oop_result1, Address(thread, JavaThread::vm_result_offset())); + movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); verify_oop(oop_result1); } if (oop_result2->is_valid()) { - movl(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); - movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); + movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); verify_oop(oop_result2); } return call_offset; @@ -106,22 +109,58 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { - pushl(arg1); +#ifdef _LP64 + mov(c_rarg1, arg1); +#else + push(arg1); +#endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 1); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { - pushl(arg2); - pushl(arg1); +#ifdef _LP64 + if (c_rarg1 == arg2) { + if (c_rarg2 == arg1) { + xchgq(arg1, arg2); + } else { + mov(c_rarg2, arg2); + mov(c_rarg1, arg1); + } + } else { + mov(c_rarg1, arg1); + mov(c_rarg2, arg2); + } +#else + push(arg2); + push(arg1); +#endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 2); } int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { - pushl(arg3); - pushl(arg2); - pushl(arg1); +#ifdef _LP64 + // if there is any conflict use the stack + if (arg1 == c_rarg2 || arg1 == c_rarg3 || + arg2 == c_rarg1 || arg1 == c_rarg3 || + arg3 == c_rarg1 || arg1 == c_rarg2) { + push(arg3); + push(arg2); + push(arg1); + pop(c_rarg1); + pop(c_rarg2); + pop(c_rarg3); + } else { + mov(c_rarg1, arg1); + mov(c_rarg2, arg2); + mov(c_rarg3, arg3); + } +#else + push(arg3); + push(arg2); + push(arg1); +#endif // _LP64 return call_RT(oop_result1, oop_result2, entry, 3); } @@ -157,7 +196,7 @@ // + 3: argument with offset 1 // + 4: ... - __ movl(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); + __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); } @@ -173,8 +212,8 @@ #define __ sasm-> -const int float_regs_as_doubles_size_in_words = 16; -const int xmm_regs_as_doubles_size_in_words = 16; +const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; +const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; // Stack layout for saving/restoring all the registers needed during a runtime // call (this includes deoptimization) @@ -183,29 +222,61 @@ // but the code in save_live_registers will take the argument count into // account. // +#ifdef _LP64 + #define SLOT2(x) x, + #define SLOT_PER_WORD 2 +#else + #define SLOT2(x) + #define SLOT_PER_WORD 1 +#endif // _LP64 + enum reg_save_layout { - dummy1, - dummy2, + // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that + // happen and will assert if the stack size we create is misaligned +#ifdef _LP64 + align_dummy_0, align_dummy_1, +#endif // _LP64 + dummy1, SLOT2(dummy1H) // 0, 4 + dummy2, SLOT2(dummy2H) // 8, 12 // Two temps to be used as needed by users of save/restore callee registers - temp_2_off, - temp_1_off, - xmm_regs_as_doubles_off, - float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_words, - fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_words, - fpu_state_end_off = fpu_state_off + FPUStateSizeInWords, - marker = fpu_state_end_off, - extra_space_offset, + temp_2_off, SLOT2(temp_2H_off) // 16, 20 + temp_1_off, SLOT2(temp_1H_off) // 24, 28 + xmm_regs_as_doubles_off, // 32 + float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 + fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 + // fpu_state_end_off is exclusive + fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 + marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 + extra_space_offset, // 360 +#ifdef _LP64 + r15_off = extra_space_offset, r15H_off, // 360, 364 + r14_off, r14H_off, // 368, 372 + r13_off, r13H_off, // 376, 380 + r12_off, r12H_off, // 384, 388 + r11_off, r11H_off, // 392, 396 + r10_off, r10H_off, // 400, 404 + r9_off, r9H_off, // 408, 412 + r8_off, r8H_off, // 416, 420 + rdi_off, rdiH_off, // 424, 428 +#else rdi_off = extra_space_offset, - rsi_off, - rbp_off, - rsp_off, - rbx_off, - rdx_off, - rcx_off, - rax_off, - saved_rbp_off, - return_off, - reg_save_frame_size, // As noted: neglects any parameters to runtime +#endif // _LP64 + rsi_off, SLOT2(rsiH_off) // 432, 436 + rbp_off, SLOT2(rbpH_off) // 440, 444 + rsp_off, SLOT2(rspH_off) // 448, 452 + rbx_off, SLOT2(rbxH_off) // 456, 460 + rdx_off, SLOT2(rdxH_off) // 464, 468 + rcx_off, SLOT2(rcxH_off) // 472, 476 + rax_off, SLOT2(raxH_off) // 480, 484 + saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 + return_off, SLOT2(returnH_off) // 496, 500 + reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 + +#ifdef _WIN64 + c_rarg0_off = rcx_off, +#else + c_rarg0_off = rdi_off, +#endif // WIN64 // equates @@ -232,18 +303,49 @@ static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, bool save_fpu_registers = true) { - int frame_size = reg_save_frame_size + num_rt_args; // args + thread - sasm->set_frame_size(frame_size); + + // In 64bit all the args are in regs so there are no additional stack slots + LP64_ONLY(num_rt_args = 0); + LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) + int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread + sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); // record saved value locations in an OopMap // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread - OopMap* map = new OopMap(frame_size, 0); + OopMap* map = new OopMap(frame_size_in_slots, 0); map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); +#ifdef _LP64 + map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); + map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); + + // This is stupid but needed. + map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); + + map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); + map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); +#endif // _LP64 if (save_fpu_registers) { if (UseSSE < 2) { @@ -291,30 +393,31 @@ bool save_fpu_registers = true) { __ block_comment("save_live_registers"); - int frame_size = reg_save_frame_size + num_rt_args; // args + thread + // 64bit passes the args in regs to the c++ runtime + int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread // frame_size = round_to(frame_size, 4); - sasm->set_frame_size(frame_size); + sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); - __ pushad(); // integer registers + __ pusha(); // integer registers // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); - __ subl(rsp, extra_space_offset * wordSize); + __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); #ifdef ASSERT - __ movl(Address(rsp, marker * wordSize), 0xfeedbeef); + __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); #endif if (save_fpu_registers) { if (UseSSE < 2) { // save FPU stack - __ fnsave(Address(rsp, fpu_state_off * wordSize)); + __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); __ fwait(); #ifdef ASSERT Label ok; - __ cmpw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std()); + __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); __ jccb(Assembler::equal, ok); __ stop("corrupted control word detected"); __ bind(ok); @@ -324,49 +427,59 @@ // since fstp_d can cause FPU stack underflow exceptions. Write it // into the on stack copy and then reload that to make sure that the // current and future values are correct. - __ movw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std()); - __ frstor(Address(rsp, fpu_state_off * wordSize)); + __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); + __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); - // Save the FPU registers in de-opt-able form - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 0)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 8)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 16)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 24)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 32)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 40)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 48)); - __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 56)); + // Save the FPU registers in de-opt-able form + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); + __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); } if (UseSSE >= 2) { // save XMM registers // XMM registers can contain float or double values, but this is not known here, // so always save them as doubles. - // note that float values are _not_ converted automatically, so for float values + // note that float values are _not_ converted automatically, so for float values // the second word contains only garbage data. - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6); - __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); +#ifdef _LP64 + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); + __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); +#endif // _LP64 } else if (UseSSE == 1) { // save XMM registers as float because double not supported without SSE2 - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6); - __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); + __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); } } // FPU stack must be empty now - __ verify_FPU(0, "save_live_registers"); + __ verify_FPU(0, "save_live_registers"); return generate_oop_map(sasm, num_rt_args, save_fpu_registers); } @@ -376,49 +489,59 @@ if (restore_fpu_registers) { if (UseSSE >= 2) { // restore XMM registers - __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0)); - __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8)); - __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16)); - __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24)); - __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32)); - __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40)); - __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48)); - __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56)); + __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); + __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); + __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); + __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); + __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); + __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); + __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); + __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); +#ifdef _LP64 + __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); + __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); + __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); + __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); + __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); + __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); + __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); + __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); +#endif // _LP64 } else if (UseSSE == 1) { // restore XMM registers - __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0)); - __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8)); - __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16)); - __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24)); - __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32)); - __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40)); - __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48)); - __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56)); + __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); + __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); + __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); + __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); + __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); + __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); + __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); + __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); } if (UseSSE < 2) { - __ frstor(Address(rsp, fpu_state_off * wordSize)); + __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); } else { // check that FPU stack is really empty - __ verify_FPU(0, "restore_live_registers"); + __ verify_FPU(0, "restore_live_registers"); } } else { // check that FPU stack is really empty - __ verify_FPU(0, "restore_live_registers"); + __ verify_FPU(0, "restore_live_registers"); } #ifdef ASSERT { Label ok; - __ cmpl(Address(rsp, marker * wordSize), 0xfeedbeef); + __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); __ jcc(Assembler::equal, ok); __ stop("bad offsets in frame"); __ bind(ok); } -#endif +#endif // ASSERT - __ addl(rsp, extra_space_offset * wordSize); + __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); } @@ -426,7 +549,7 @@ __ block_comment("restore_live_registers"); restore_fpu(sasm, restore_fpu_registers); - __ popad(); + __ popa(); } @@ -435,14 +558,35 @@ restore_fpu(sasm, restore_fpu_registers); - __ popl(rdi); - __ popl(rsi); - __ popl(rbp); - __ popl(rbx); // skip this value - __ popl(rbx); - __ popl(rdx); - __ popl(rcx); - __ addl(rsp, 4); +#ifdef _LP64 + __ movptr(r15, Address(rsp, 0)); + __ movptr(r14, Address(rsp, wordSize)); + __ movptr(r13, Address(rsp, 2 * wordSize)); + __ movptr(r12, Address(rsp, 3 * wordSize)); + __ movptr(r11, Address(rsp, 4 * wordSize)); + __ movptr(r10, Address(rsp, 5 * wordSize)); + __ movptr(r9, Address(rsp, 6 * wordSize)); + __ movptr(r8, Address(rsp, 7 * wordSize)); + __ movptr(rdi, Address(rsp, 8 * wordSize)); + __ movptr(rsi, Address(rsp, 9 * wordSize)); + __ movptr(rbp, Address(rsp, 10 * wordSize)); + // skip rsp + __ movptr(rbx, Address(rsp, 12 * wordSize)); + __ movptr(rdx, Address(rsp, 13 * wordSize)); + __ movptr(rcx, Address(rsp, 14 * wordSize)); + + __ addptr(rsp, 16 * wordSize); +#else + + __ pop(rdi); + __ pop(rsi); + __ pop(rbp); + __ pop(rbx); // skip this value + __ pop(rbx); + __ pop(rdx); + __ pop(rcx); + __ addptr(rsp, BytesPerWord); +#endif // _LP64 } @@ -468,10 +612,13 @@ // load argument for exception that is passed as an argument into the stub if (has_argument) { - __ movl(temp_reg, Address(rbp, 2*BytesPerWord)); - __ pushl(temp_reg); +#ifdef _LP64 + __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); +#else + __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); + __ push(temp_reg); +#endif // _LP64 } - int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); OopMapSet* oop_maps = new OopMapSet(); @@ -489,7 +636,7 @@ const Register exception_pc = rdx; // other registers used in this stub const Register real_return_addr = rbx; - const Register thread = rdi; + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); __ block_comment("generate_handle_exception"); @@ -506,19 +653,19 @@ __ verify_not_null_oop(exception_oop); // load address of JavaThread object for thread-local data - __ get_thread(thread); + NOT_LP64(__ get_thread(thread);) #ifdef ASSERT - // check that fields in JavaThread for exception oop and issuing pc are + // check that fields in JavaThread for exception oop and issuing pc are // empty before writing to them Label oop_empty; - __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop already set"); __ bind(oop_empty); Label pc_empty; - __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc already set"); __ bind(pc_empty); @@ -526,17 +673,17 @@ // save exception oop and issuing pc into JavaThread // (exception handler will load it from here) - __ movl(Address(thread, JavaThread::exception_oop_offset()), exception_oop); - __ movl(Address(thread, JavaThread::exception_pc_offset()), exception_pc); + __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); // save real return address (pc that called this stub) - __ movl(real_return_addr, Address(rbp, 1*BytesPerWord)); - __ movl(Address(rsp, temp_1_off * BytesPerWord), real_return_addr); + __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord)); + __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr); // patch throwing pc into return address (has bci & oop map) - __ movl(Address(rbp, 1*BytesPerWord), exception_pc); + __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); - // compute the exception handler. + // compute the exception handler. // the exception oop and the throwing pc are read from the fields in JavaThread int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); oop_maps->add_gc_map(call_offset, oop_map); @@ -551,12 +698,12 @@ // Do we have an exception handler in the nmethod? Label no_handler; Label done; - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, no_handler); // exception handler found // patch the return address -> the stub will directly return to the exception handler - __ movl(Address(rbp, 1*BytesPerWord), rax); + __ movptr(Address(rbp, 1*BytesPerWord), rax); // restore registers restore_live_registers(sasm, save_fpu_registers); @@ -566,23 +713,23 @@ __ ret(0); __ bind(no_handler); - // no exception handler found in this method, so the exception is + // no exception handler found in this method, so the exception is // forwarded to the caller (using the unwind code of the nmethod) // there is no need to restore the registers // restore the real return address that was saved before the RT-call - __ movl(real_return_addr, Address(rsp, temp_1_off * BytesPerWord)); - __ movl(Address(rbp, 1*BytesPerWord), real_return_addr); + __ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size)); + __ movptr(Address(rbp, 1*BytesPerWord), real_return_addr); // load address of JavaThread object for thread-local data - __ get_thread(thread); + NOT_LP64(__ get_thread(thread);) // restore exception oop into rax, (convention for unwind code) - __ movl(exception_oop, Address(thread, JavaThread::exception_oop_offset())); + __ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset())); // clear exception fields in JavaThread because they are no longer needed // (fields must be cleared because they are processed by GC otherwise) - __ movl(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); - __ movl(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); + __ movptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); // pop the stub frame off __ leave(); @@ -598,22 +745,22 @@ // other registers used in this stub const Register exception_pc = rdx; const Register handler_addr = rbx; - const Register thread = rdi; + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // verify that only rax, is valid at this time __ invalidate_registers(false, true, true, true, true, true); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty - __ get_thread(thread); + NOT_LP64(__ get_thread(thread);) Label oop_empty; - __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; - __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); @@ -623,14 +770,14 @@ __ empty_FPU_stack(); // leave activation of nmethod - __ leave(); + __ leave(); // store return address (is on top of stack after leave) - __ movl(exception_pc, Address(rsp, 0)); + __ movptr(exception_pc, Address(rsp, 0)); __ verify_oop(exception_oop); // save exception oop from rax, to stack before call - __ pushl(exception_oop); + __ push(exception_oop); // search the exception handler address of the caller (using the return address) __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc); @@ -638,19 +785,19 @@ // only rax, is valid at this time, all other registers have been destroyed by the call __ invalidate_registers(false, true, true, true, true, true); - + // move result of call into correct register - __ movl(handler_addr, rax); + __ movptr(handler_addr, rax); // restore exception oop in rax, (required convention of exception handler) - __ popl(exception_oop); + __ pop(exception_oop); __ verify_oop(exception_oop); - // get throwing pc (= return address). + // get throwing pc (= return address). // rdx has been destroyed by the call, so it must be set again - // the pop is also necessary to simulate the effect of a ret(0) - __ popl(exception_pc); + // the pop is also necessary to simulate the effect of a ret(0) + __ pop(exception_pc); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); @@ -669,7 +816,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { - // use the maximum number of runtime-arguments here because it is difficult to + // use the maximum number of runtime-arguments here because it is difficult to // distinguish each RT-Call. // Note: This number affects also the RT-Call in generate_handle_exception because // the oop-map is shared for all calls. @@ -680,12 +827,18 @@ OopMap* oop_map = save_live_registers(sasm, num_rt_args); - __ pushl(rax); // push dummy +#ifdef _LP64 + const Register thread = r15_thread; + // No need to worry about dummy + __ mov(c_rarg0, thread); +#else + __ push(rax); // push dummy const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) // push java thread (becomes first argument of C function) __ get_thread(thread); - __ pushl(thread); + __ push(thread); +#endif // _LP64 __ set_last_Java_frame(thread, noreg, rbp, NULL); // do the call __ call(RuntimeAddress(target)); @@ -694,27 +847,29 @@ // verify callee-saved register #ifdef ASSERT guarantee(thread != rax, "change this code"); - __ pushl(rax); + __ push(rax); { Label L; __ get_thread(rax); - __ cmpl(thread, rax); + __ cmpptr(thread, rax); __ jcc(Assembler::equal, L); - __ stop("StubAssembler::call_RT: rdi not callee saved?"); + __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); __ bind(L); } - __ popl(rax); + __ pop(rax); #endif __ reset_last_Java_frame(thread, true, false); - __ popl(rcx); // discard thread arg - __ popl(rcx); // discard dummy +#ifndef _LP64 + __ pop(rcx); // discard thread arg + __ pop(rcx); // discard dummy +#endif // _LP64 // check for pending exceptions { Label L; - __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); // exception pending => remove activation and forward to exception handler - __ testl(rax, rax); // have we deoptimized? + __ testptr(rax, rax); // have we deoptimized? __ jump_cc(Assembler::equal, RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); @@ -722,38 +877,38 @@ // JavaThread, so copy and clear pending exception. // load and clear pending exception - __ movl(rax, Address(thread, Thread::pending_exception_offset())); - __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ movptr(rax, Address(thread, Thread::pending_exception_offset())); + __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); - // check that there is really a valid exception + // check that there is really a valid exception __ verify_not_null_oop(rax); // load throwing pc: this is the return address of the stub - __ movl(rdx, Address(rsp, return_off * BytesPerWord)); + __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); #ifdef ASSERT // check that fields in JavaThread for exception oop and issuing pc are empty Label oop_empty; - __ cmpoop(Address(thread, JavaThread::exception_oop_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, oop_empty); __ stop("exception oop must be empty"); __ bind(oop_empty); Label pc_empty; - __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0); + __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, pc_empty); __ stop("exception pc must be empty"); __ bind(pc_empty); #endif // store exception oop and throwing pc to JavaThread - __ movl(Address(thread, JavaThread::exception_oop_offset()), rax); - __ movl(Address(thread, JavaThread::exception_pc_offset()), rdx); + __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); restore_live_registers(sasm); __ leave(); - __ addl(rsp, 4); // remove return address from stack + __ addptr(rsp, BytesPerWord); // remove return address from stack // Forward the exception directly to deopt blob. We can blow no // registers and must leave throwing pc on the stack. A patch may @@ -770,7 +925,7 @@ Label reexecuteEntry, cont; - __ testl(rax, rax); // have we deoptimized? + __ testptr(rax, rax); // have we deoptimized? __ jcc(Assembler::equal, cont); // no // Will reexecute. Proper return address is already on the stack we just restore @@ -809,21 +964,21 @@ // dispatch to the handler if found. Otherwise unwind and // dispatch to the callers exception handler. - const Register thread = rdi; + const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); const Register exception_oop = rax; const Register exception_pc = rdx; // load pending exception oop into rax, - __ movl(exception_oop, Address(thread, Thread::pending_exception_offset())); + __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); // clear pending exception - __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); // load issuing PC (the return address for this stub) into rdx - __ movl(exception_pc, Address(rbp, 1*BytesPerWord)); + __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); // make sure that the vm_results are cleared (may be unnecessary) - __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); - __ movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); + __ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD); // verify that that there is really a valid exception in rax, __ verify_not_null_oop(exception_oop); @@ -851,7 +1006,7 @@ assert(id == fast_new_instance_init_check_id, "bad StubID"); __ set_info("fast new_instance init check", dont_gc_arguments); } - + if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB && FastTLABRefill) { Label slow_path; @@ -859,9 +1014,9 @@ Register t1 = rbx; Register t2 = rsi; assert_different_registers(klass, obj, obj_size, t1, t2); - - __ pushl(rdi); - __ pushl(rbx); + + __ push(rdi); + __ push(rbx); if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized @@ -889,33 +1044,33 @@ // refilling the TLAB or allocating directly from eden. Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass) - + __ bind(retry_tlab); - // get the instance size + // get the instance size (size is postive so movl is fine for 64bit) __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); - __ popl(rbx); - __ popl(rdi); + __ pop(rbx); + __ pop(rdi); __ ret(0); __ bind(try_eden); - // get the instance size + // get the instance size (size is postive so movl is fine for 64bit) __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ eden_allocate(obj, obj_size, 0, t1, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj); - __ popl(rbx); - __ popl(rdi); + __ pop(rbx); + __ pop(rdi); __ ret(0); __ bind(slow_path); - __ popl(rbx); - __ popl(rdi); + __ pop(rbx); + __ pop(rdi); } - + __ enter(); OopMap* map = save_live_registers(sasm, 2); int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); @@ -951,7 +1106,7 @@ case new_type_array_id: case new_object_array_id: - { + { Register length = rbx; // Incoming Register klass = rdx; // Incoming Register obj = rax; // Result @@ -995,19 +1150,21 @@ // refilling the TLAB or allocating directly from eden. Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx - + __ bind(retry_tlab); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) + // since size is postive movl does right thing on 64bit __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); + // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); - __ shll(arr_size /* by t1=rcx, mod 32 */); - __ shrl(t1, Klass::_lh_header_size_shift); - __ andl(t1, Klass::_lh_header_size_mask); - __ addl(arr_size, t1); - __ addl(arr_size, MinObjAlignmentInBytesMask); // align up - __ andl(arr_size, ~MinObjAlignmentInBytesMask); + __ shlptr(arr_size /* by t1=rcx, mod 32 */); + __ shrptr(t1, Klass::_lh_header_size_shift); + __ andptr(t1, Klass::_lh_header_size_mask); + __ addptr(arr_size, t1); + __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up + __ andptr(arr_size, ~MinObjAlignmentInBytesMask); __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size @@ -1015,24 +1172,26 @@ __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); - __ andl(t1, Klass::_lh_header_size_mask); - __ subl(arr_size, t1); // body length - __ addl(t1, obj); // body start + __ andptr(t1, Klass::_lh_header_size_mask); + __ subptr(arr_size, t1); // body length + __ addptr(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); __ bind(try_eden); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) + // since size is postive movl does right thing on 64bit __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); + // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); - __ shll(arr_size /* by t1=rcx, mod 32 */); - __ shrl(t1, Klass::_lh_header_size_shift); - __ andl(t1, Klass::_lh_header_size_mask); - __ addl(arr_size, t1); - __ addl(arr_size, MinObjAlignmentInBytesMask); // align up - __ andl(arr_size, ~MinObjAlignmentInBytesMask); + __ shlptr(arr_size /* by t1=rcx, mod 32 */); + __ shrptr(t1, Klass::_lh_header_size_shift); + __ andptr(t1, Klass::_lh_header_size_mask); + __ addptr(arr_size, t1); + __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up + __ andptr(arr_size, ~MinObjAlignmentInBytesMask); __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size @@ -1040,9 +1199,9 @@ __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); - __ andl(t1, Klass::_lh_header_size_mask); - __ subl(arr_size, t1); // body length - __ addl(t1, obj); // body start + __ andptr(t1, Klass::_lh_header_size_mask); + __ subptr(arr_size, t1); // body length + __ addptr(t1, obj); // body start __ initialize_body(t1, arr_size, 0, t2); __ verify_oop(obj); __ ret(0); @@ -1091,16 +1250,24 @@ case register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); - + + // This is called via call_runtime so the arguments + // will be place in C abi locations + +#ifdef _LP64 + __ verify_oop(c_rarg0); + __ mov(rax, c_rarg0); +#else // The object is passed on the stack and we haven't pushed a // frame yet so it's one work away from top of stack. - __ movl(rax, Address(rsp, 1 * BytesPerWord)); + __ movptr(rax, Address(rsp, 1 * BytesPerWord)); __ verify_oop(rax); +#endif // _LP64 // load the klass and check the has finalizer flag Label register_finalizer; Register t = rsi; - __ movl(t, Address(rax, oopDesc::klass_offset_in_bytes())); + __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes())); __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(t, JVM_ACC_HAS_FINALIZER); __ jcc(Assembler::notZero, register_finalizer); @@ -1188,46 +1355,49 @@ case slow_subtype_check_id: { enum layout { - rax_off, - rcx_off, - rsi_off, - rdi_off, - saved_rbp_off, - return_off, - sub_off, - super_off, + rax_off, SLOT2(raxH_off) + rcx_off, SLOT2(rcxH_off) + rsi_off, SLOT2(rsiH_off) + rdi_off, SLOT2(rdiH_off) + // saved_rbp_off, SLOT2(saved_rbpH_off) + return_off, SLOT2(returnH_off) + sub_off, SLOT2(subH_off) + super_off, SLOT2(superH_off) framesize }; - + __ set_info("slow_subtype_check", dont_gc_arguments); - __ pushl(rdi); - __ pushl(rsi); - __ pushl(rcx); - __ pushl(rax); - __ movl(rsi, Address(rsp, (super_off - 1) * BytesPerWord)); // super - __ movl(rax, Address(rsp, (sub_off - 1) * BytesPerWord)); // sub - - __ movl(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); - __ movl(rcx,Address(rdi,arrayOopDesc::length_offset_in_bytes())); - __ addl(rdi,arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + __ push(rdi); + __ push(rsi); + __ push(rcx); + __ push(rax); + + // This is called by pushing args and not with C abi + __ movptr(rsi, Address(rsp, (super_off) * VMRegImpl::stack_slot_size)); // super + __ movptr(rax, Address(rsp, (sub_off ) * VMRegImpl::stack_slot_size)); // sub + + __ movptr(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); + // since size is postive movl does right thing on 64bit + __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); + __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); Label miss; __ repne_scan(); __ jcc(Assembler::notEqual, miss); - __ movl(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); - __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 1); // result - __ popl(rax); - __ popl(rcx); - __ popl(rsi); - __ popl(rdi); + __ movptr(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); + __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 1); // result + __ pop(rax); + __ pop(rcx); + __ pop(rsi); + __ pop(rdi); __ ret(0); __ bind(miss); - __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 0); // result - __ popl(rax); - __ popl(rcx); - __ popl(rsi); - __ popl(rdi); + __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 0); // result + __ pop(rax); + __ pop(rcx); + __ pop(rsi); + __ pop(rdi); __ ret(0); } break; @@ -1240,11 +1410,13 @@ StubFrame f(sasm, "monitorenter", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); + // Called with store_parameter and not C abi + f.load_argument(1, rax); // rax,: object f.load_argument(0, rbx); // rbx,: lock address int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); - + oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, map); restore_live_registers(sasm, save_fpu_registers); @@ -1255,12 +1427,14 @@ save_fpu_registers = false; // fall through case monitorexit_id: - { + { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); + // Called with store_parameter and not C abi + f.load_argument(0, rax); // rax,: lock address - + // note: really a leaf routine but must setup last java sp // => use call_RT for now (speed can be improved by // doing last java sp setup manually) @@ -1279,7 +1453,7 @@ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - + case load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); // we should set up register map @@ -1307,9 +1481,9 @@ // the live registers get saved. save_live_registers(sasm, 1); - __ pushl(rax); + __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); - __ popl(rax); + NOT_LP64(__ pop(rax)); restore_live_registers(sasm); } @@ -1319,18 +1493,19 @@ { // rax, and rdx are destroyed, but should be free since the result is returned there // preserve rsi,ecx - __ pushl(rsi); - __ pushl(rcx); - + __ push(rsi); + __ push(rcx); + LP64_ONLY(__ push(rdx);) + // check for NaN Label return0, do_return, return_min_jlong, do_convert; - - Address value_high_word(rsp, 8); - Address value_low_word(rsp, 4); - Address result_high_word(rsp, 16); - Address result_low_word(rsp, 12); - - __ subl(rsp, 20); + + Address value_high_word(rsp, wordSize + 4); + Address value_low_word(rsp, wordSize); + Address result_high_word(rsp, 3*wordSize + 4); + Address result_low_word(rsp, 3*wordSize); + + __ subptr(rsp, 32); // more than enough on 32bit __ fst_d(value_low_word); __ movl(rax, value_high_word); __ andl(rax, 0x7ff00000); @@ -1340,10 +1515,10 @@ __ andl(rax, 0xfffff); __ orl(rax, value_low_word); __ jcc(Assembler::notZero, return0); - + __ bind(do_convert); __ fnstcw(Address(rsp, 0)); - __ movzxw(rax, Address(rsp, 0)); + __ movzwl(rax, Address(rsp, 0)); __ orl(rax, 0xc00); __ movw(Address(rsp, 2), rax); __ fldcw(Address(rsp, 2)); @@ -1351,9 +1526,11 @@ __ fistp_d(result_low_word); __ fldcw(Address(rsp, 0)); __ fwait(); - __ movl(rax, result_low_word); + // This gets the entire long in rax on 64bit + __ movptr(rax, result_low_word); + // testing of high bits __ movl(rdx, result_high_word); - __ movl(rcx, rax); + __ mov(rcx, rax); // What the heck is the point of the next instruction??? __ xorl(rcx, 0x0); __ movl(rsi, 0x80000000); @@ -1363,34 +1540,212 @@ __ fldz(); __ fcomp_d(value_low_word); __ fnstsw_ax(); +#ifdef _LP64 + __ testl(rax, 0x4100); // ZF & CF == 0 + __ jcc(Assembler::equal, return_min_jlong); +#else __ sahf(); __ jcc(Assembler::above, return_min_jlong); +#endif // _LP64 // return max_jlong +#ifndef _LP64 __ movl(rdx, 0x7fffffff); __ movl(rax, 0xffffffff); +#else + __ mov64(rax, CONST64(0x7fffffffffffffff)); +#endif // _LP64 __ jmp(do_return); - + __ bind(return_min_jlong); +#ifndef _LP64 __ movl(rdx, 0x80000000); __ xorl(rax, rax); +#else + __ mov64(rax, CONST64(0x8000000000000000)); +#endif // _LP64 __ jmp(do_return); - + __ bind(return0); __ fpop(); - __ xorl(rdx,rdx); - __ xorl(rax,rax); - +#ifndef _LP64 + __ xorptr(rdx,rdx); + __ xorptr(rax,rax); +#else + __ xorptr(rax, rax); +#endif // _LP64 + __ bind(do_return); - __ addl(rsp, 20); - __ popl(rcx); - __ popl(rsi); + __ addptr(rsp, 32); + LP64_ONLY(__ pop(rdx);) + __ pop(rcx); + __ pop(rsi); __ ret(0); } break; - + +#ifndef SERIALGC + case g1_pre_barrier_slow_id: + { + StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); + // arg0 : previous value of memory + + BarrierSet* bs = Universe::heap()->barrier_set(); + if (bs->kind() != BarrierSet::G1SATBCTLogging) { + __ movptr(rax, (int)id); + __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); + __ should_not_reach_here(); + break; + } + + __ push(rax); + __ push(rdx); + + const Register pre_val = rax; + const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); + const Register tmp = rdx; + + NOT_LP64(__ get_thread(thread);) + + Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_active())); + + Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_buf())); + + + Label done; + Label runtime; + + // Can we store original value in the thread's buffer? + + LP64_ONLY(__ movslq(tmp, queue_index);) +#ifdef _LP64 + __ cmpq(tmp, 0); +#else + __ cmpl(queue_index, 0); +#endif + __ jcc(Assembler::equal, runtime); +#ifdef _LP64 + __ subq(tmp, wordSize); + __ movl(queue_index, tmp); + __ addq(tmp, buffer); +#else + __ subl(queue_index, wordSize); + __ movl(tmp, buffer); + __ addl(tmp, queue_index); +#endif + + // prev_val (rax) + f.load_argument(0, pre_val); + __ movptr(Address(tmp, 0), pre_val); + __ jmp(done); + + __ bind(runtime); + // load the pre-value + __ push(rcx); + f.load_argument(0, rcx); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); + __ pop(rcx); + + __ bind(done); + __ pop(rdx); + __ pop(rax); + } + break; + + case g1_post_barrier_slow_id: + { + StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); + + + // arg0: store_address + Address store_addr(rbp, 2*BytesPerWord); + + BarrierSet* bs = Universe::heap()->barrier_set(); + CardTableModRefBS* ct = (CardTableModRefBS*)bs; + Label done; + Label runtime; + + // At this point we know new_value is non-NULL and the new_value crosses regsion. + // Must check to see if card is already dirty + + const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); + + Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_buf())); + + __ push(rax); + __ push(rdx); + + NOT_LP64(__ get_thread(thread);) + ExternalAddress cardtable((address)ct->byte_map_base); + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + + const Register card_addr = rdx; +#ifdef _LP64 + const Register tmp = rscratch1; + f.load_argument(0, card_addr); + __ shrq(card_addr, CardTableModRefBS::card_shift); + __ lea(tmp, cardtable); + // get the address of the card + __ addq(card_addr, tmp); +#else + const Register card_index = rdx; + f.load_argument(0, card_index); + __ shrl(card_index, CardTableModRefBS::card_shift); + + Address index(noreg, card_index, Address::times_1); + __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); +#endif + + __ cmpb(Address(card_addr, 0), 0); + __ jcc(Assembler::equal, done); + + // storing region crossing non-NULL, card is clean. + // dirty card and log. + + __ movb(Address(card_addr, 0), 0); + + __ cmpl(queue_index, 0); + __ jcc(Assembler::equal, runtime); + __ subl(queue_index, wordSize); + + const Register buffer_addr = rbx; + __ push(rbx); + + __ movptr(buffer_addr, buffer); + +#ifdef _LP64 + __ movslq(rscratch1, queue_index); + __ addptr(buffer_addr, rscratch1); +#else + __ addptr(buffer_addr, queue_index); +#endif + __ movptr(Address(buffer_addr, 0), card_addr); + + __ pop(rbx); + __ jmp(done); + + __ bind(runtime); + NOT_LP64(__ push(rcx);) + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); + NOT_LP64(__ pop(rcx);) + + __ bind(done); + __ pop(rdx); + __ pop(rax); + + } + break; +#endif // !SERIALGC + default: { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); - __ movl(rax, (int)id); + __ movptr(rax, (int)id); __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); __ should_not_reach_here(); } @@ -1400,4 +1755,3 @@ } #undef __ - --- old/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp 2009-08-01 04:07:49.457462821 +0100 +++ new/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp 2009-08-01 04:07:49.362407908 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)cppInterpreter_x86.cpp 1.2 07/09/17 09:58:27 JVM" -#endif /* - * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -36,8 +33,8 @@ // frames look natural in the debugger is a plus. extern "C" void RecursiveInterpreterActivation(interpreterState istate ) { - // - ShouldNotReachHere(); + // + ShouldNotReachHere(); } @@ -47,6 +44,14 @@ Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized // c++ interpreter entry point this holds that entry point label. +// default registers for state and sender_sp +// state and sender_sp are the same on 32bit because we have no choice. +// state could be rsi on 64bit but it is an arg reg and not callee save +// so r13 is better choice. + +const Register state = NOT_LP64(rsi) LP64_ONLY(r13); +const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13); + // NEEDED for JVMTI? // address AbstractInterpreter::_remove_activation_preserving_args_entry; @@ -86,12 +91,11 @@ // implementation bool CppInterpreter::contains(address pc) { return (_code->contains(pc) || - pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation)); + pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation)); } address CppInterpreterGenerator::generate_result_handler_for(BasicType type) { - const Register state = rsi; // current activation object, valid on entry address entry = __ pc(); switch (type) { case T_BOOLEAN: __ c2bool(rax); break; @@ -101,19 +105,22 @@ case T_VOID : // fall thru case T_LONG : // fall thru case T_INT : /* nothing to do */ break; + case T_DOUBLE : case T_FLOAT : - { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); - __ popl(t); // remove return address first - __ pop_dtos_to_rsp(); + { + const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); + __ pop(t); // remove return address first // Must return a result for interpreter or compiler. In SSE // mode, results are returned in xmm0 and the FPU stack must // be empty. if (type == T_FLOAT && UseSSE >= 1) { +#ifndef _LP64 // Load ST0 __ fld_d(Address(rsp, 0)); // Store as float and empty fpu stack __ fstp_s(Address(rsp, 0)); +#endif // !_LP64 // and reload __ movflt(xmm0, Address(rsp, 0)); } else if (type == T_DOUBLE && UseSSE >= 2 ) { @@ -123,13 +130,13 @@ __ fld_d(Address(rsp, 0)); } // and pop the temp - __ addl(rsp, 2 * wordSize); - __ pushl(t); // restore return address + __ addptr(rsp, 2 * wordSize); + __ push(t); // restore return address } break; case T_OBJECT : // retrieve result from frame - __ movl(rax, STATE(_oop_temp)); + __ movptr(rax, STATE(_oop_temp)); // and verify it __ verify_oop(rax); break; @@ -149,61 +156,61 @@ address entry = __ pc(); const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); - __ popl(t); // remove return address first + __ pop(t); // remove return address first switch (type) { - case T_VOID: + case T_VOID: break; - case T_BOOLEAN: + case T_BOOLEAN: #ifdef EXTEND - __ c2bool(rax); + __ c2bool(rax); #endif - __ pushl(rax); + __ push(rax); break; - case T_CHAR : + case T_CHAR : #ifdef EXTEND - __ andl(rax, 0xFFFF); + __ andl(rax, 0xFFFF); #endif - __ pushl(rax); + __ push(rax); break; - case T_BYTE : + case T_BYTE : #ifdef EXTEND - __ sign_extend_byte (rax); + __ sign_extend_byte (rax); #endif - __ pushl(rax); + __ push(rax); break; - case T_SHORT : + case T_SHORT : #ifdef EXTEND - __ sign_extend_short(rax); + __ sign_extend_short(rax); #endif - __ pushl(rax); + __ push(rax); break; - case T_LONG : - __ pushl(rdx); - __ pushl(rax); + case T_LONG : + __ push(rdx); // pushes useless junk on 64bit + __ push(rax); break; - case T_INT : - __ pushl(rax); + case T_INT : + __ push(rax); break; case T_FLOAT : - // Result is in ST(0) + // Result is in ST(0)/xmm0 + __ subptr(rsp, wordSize); if ( UseSSE < 1) { - __ push(ftos); // and save it + __ fstp_s(Address(rsp, 0)); } else { - __ subl(rsp, wordSize); __ movflt(Address(rsp, 0), xmm0); } break; case T_DOUBLE : + __ subptr(rsp, 2*wordSize); if ( UseSSE < 2 ) { - __ push(dtos); // put ST0 on java stack + __ fstp_d(Address(rsp, 0)); } else { - __ subl(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), xmm0); } break; case T_OBJECT : __ verify_oop(rax); // verify it - __ pushl(rax); + __ push(rax); break; default : ShouldNotReachHere(); } @@ -215,7 +222,7 @@ // A result is in the java expression stack of the interpreted method that has just // returned. Place this result on the java expression stack of the caller. // - // The current interpreter activation in rsi is for the method just returning its + // The current interpreter activation in rsi/r13 is for the method just returning its // result. So we know that the result of this method is on the top of the current // execution stack (which is pre-pushed) and will be return to the top of the caller // stack. The top of the callers stack is the bottom of the locals of the current @@ -225,54 +232,53 @@ // of the calling activation. This enable this routine to leave the return address // to the frame manager on the stack and do a vanilla return. // - // On entry: rsi - interpreter state of activation returning a (potential) result - // On Return: rsi - unchanged + // On entry: rsi/r13 - interpreter state of activation returning a (potential) result + // On Return: rsi/r13 - unchanged // rax - new stack top for caller activation (i.e. activation in _prev_link) // // Can destroy rdx, rcx. // address entry = __ pc(); - const Register state = rsi; // current activation object, valid on entry const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); switch (type) { - case T_VOID: - __ movl(rax, STATE(_locals)); // pop parameters get new stack value - __ addl(rax, wordSize); // account for prepush before we return + case T_VOID: + __ movptr(rax, STATE(_locals)); // pop parameters get new stack value + __ addptr(rax, wordSize); // account for prepush before we return break; case T_FLOAT : - case T_BOOLEAN: - case T_CHAR : - case T_BYTE : - case T_SHORT : - case T_INT : + case T_BOOLEAN: + case T_CHAR : + case T_BYTE : + case T_SHORT : + case T_INT : // 1 word result - __ movl(rdx, STATE(_stack)); - __ movl(rax, STATE(_locals)); // address for result + __ movptr(rdx, STATE(_stack)); + __ movptr(rax, STATE(_locals)); // address for result __ movl(rdx, Address(rdx, wordSize)); // get result - __ movl(Address(rax, 0), rdx); // and store it + __ movptr(Address(rax, 0), rdx); // and store it break; - case T_LONG : + case T_LONG : case T_DOUBLE : // return top two words on current expression stack to caller's expression stack // The caller's expression stack is adjacent to the current frame manager's intepretState // except we allocated one extra word for this intepretState so we won't overwrite it // when we return a two word result. - __ movl(rax, STATE(_locals)); // address for result - __ movl(rcx, STATE(_stack)); - __ subl(rax, wordSize); // need addition word besides locals[0] - __ movl(rdx, Address(rcx, 2*wordSize)); // get result word - __ movl(Address(rax, wordSize), rdx); // and store it - __ movl(rdx, Address(rcx, wordSize)); // get result word - __ movl(Address(rax, 0), rdx); // and store it + __ movptr(rax, STATE(_locals)); // address for result + __ movptr(rcx, STATE(_stack)); + __ subptr(rax, wordSize); // need addition word besides locals[0] + __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit) + __ movptr(Address(rax, wordSize), rdx); // and store it + __ movptr(rdx, Address(rcx, wordSize)); // get result word + __ movptr(Address(rax, 0), rdx); // and store it break; case T_OBJECT : - __ movl(rdx, STATE(_stack)); - __ movl(rax, STATE(_locals)); // address for result - __ movl(rdx, Address(rdx, wordSize)); // get result + __ movptr(rdx, STATE(_stack)); + __ movptr(rax, STATE(_locals)); // address for result + __ movptr(rdx, Address(rdx, wordSize)); // get result __ verify_oop(rdx); // verify it - __ movl(Address(rax, 0), rdx); // and store it + __ movptr(Address(rax, 0), rdx); // and store it break; default : ShouldNotReachHere(); } @@ -288,31 +294,29 @@ // frame manager execept in this situation the caller is native code (c1/c2/call_stub) // and so rather than return result onto caller's java expression stack we return the // result in the expected location based on the native abi. - // On entry: rsi - interpreter state of activation returning a (potential) result - // On Return: rsi - unchanged + // On entry: rsi/r13 - interpreter state of activation returning a (potential) result + // On Return: rsi/r13 - unchanged // Other registers changed [rax/rdx/ST(0) as needed for the result returned] address entry = __ pc(); - const Register state = rsi; // current activation object, valid on entry switch (type) { - case T_VOID: + case T_VOID: break; - case T_BOOLEAN: - case T_CHAR : - case T_BYTE : - case T_SHORT : - case T_INT : - __ movl(rdx, STATE(_stack)); // get top of stack + case T_BOOLEAN: + case T_CHAR : + case T_BYTE : + case T_SHORT : + case T_INT : + __ movptr(rdx, STATE(_stack)); // get top of stack __ movl(rax, Address(rdx, wordSize)); // get result word 1 break; - case T_LONG : - __ movl(rdx, STATE(_stack)); // get top of stack - __ movl(rax, Address(rdx, wordSize)); // get result low word - __ movl(rdx, Address(rdx, 2*wordSize)); // get result high word - break; + case T_LONG : + __ movptr(rdx, STATE(_stack)); // get top of stack + __ movptr(rax, Address(rdx, wordSize)); // get result low word + NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word break; case T_FLOAT : - __ movl(rdx, STATE(_stack)); // get top of stack + __ movptr(rdx, STATE(_stack)); // get top of stack if ( UseSSE >= 1) { __ movflt(xmm0, Address(rdx, wordSize)); } else { @@ -320,7 +324,7 @@ } break; case T_DOUBLE : - __ movl(rdx, STATE(_stack)); // get top of stack + __ movptr(rdx, STATE(_stack)); // get top of stack if ( UseSSE > 1) { __ movdbl(xmm0, Address(rdx, wordSize)); } else { @@ -328,8 +332,8 @@ } break; case T_OBJECT : - __ movl(rdx, STATE(_stack)); // get top of stack - __ movl(rax, Address(rdx, wordSize)); // get result word 1 + __ movptr(rdx, STATE(_stack)); // get top of stack + __ movptr(rax, Address(rdx, wordSize)); // get result word 1 __ verify_oop(rax); // verify it break; default : ShouldNotReachHere(); @@ -360,12 +364,12 @@ } else { ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap) } - assert(ret != NULL, "Not initialized"); + assert(ret != NULL, "Not initialized"); return ret; } // C++ Interpreter -void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state, +void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state, const Register locals, const Register sender_sp, bool native) { @@ -411,57 +415,61 @@ if (!native) { #ifdef PRODUCT - __ subl(rsp, 2*wordSize); + __ subptr(rsp, 2*wordSize); #else /* PRODUCT */ - __ pushl((int)NULL); - __ pushl(state); // make it look like a real argument + __ push((int32_t)NULL_WORD); + __ push(state); // make it look like a real argument #endif /* PRODUCT */ } // Now that we are assure of space for stack result, setup typical linkage - __ pushl(rax); + __ push(rax); __ enter(); - __ movl(rax, state); // save current state + __ mov(rax, state); // save current state - __ leal(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter))); - __ movl(state, rsp); + __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter))); + __ mov(state, rsp); - // rsi == state/locals rax == prevstate + // rsi/r13 == state/locals rax == prevstate // initialize the "shadow" frame so that use since C++ interpreter not directly // recursive. Simpler to recurse but we can't trim expression stack as we call // new methods. - __ movl(STATE(_locals), locals); // state->_locals = locals() - __ movl(STATE(_self_link), state); // point to self - __ movl(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state) - __ movl(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp + __ movptr(STATE(_locals), locals); // state->_locals = locals() + __ movptr(STATE(_self_link), state); // point to self + __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state) + __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp +#ifdef _LP64 + __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes() +#else __ get_thread(rax); // get vm's javathread* - __ movl(STATE(_thread), rax); // state->_bcp = codes() - __ movl(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop - __ leal(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base + __ movptr(STATE(_thread), rax); // state->_bcp = codes() +#endif // _LP64 + __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop + __ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base if (native) { - __ movl(STATE(_bcp), (intptr_t)NULL); // state->_bcp = NULL + __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL } else { - __ movl(STATE(_bcp), rdx); // state->_bcp = codes() + __ movptr(STATE(_bcp), rdx); // state->_bcp = codes() } - __ xorl(rdx, rdx); - __ movl(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native) - __ movl(STATE(_mdx), rdx); // state->_mdx = NULL - __ movl(rdx, Address(rbx, methodOopDesc::constants_offset())); - __ movl(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); - __ movl(STATE(_constants), rdx); // state->_constants = constants() - - __ movl(STATE(_method), rbx); // state->_method = method() - __ movl(STATE(_msg), (int) BytecodeInterpreter::method_entry); // state->_msg = initial method entry - __ movl(STATE(_result._to_call._callee), (int) NULL); // state->_result._to_call._callee_callee = NULL + __ xorptr(rdx, rdx); + __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native) + __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL + __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); + __ movptr(STATE(_constants), rdx); // state->_constants = constants() + + __ movptr(STATE(_method), rbx); // state->_method = method() + __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry + __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL - __ movl(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0] - // entries run from -1..x where &monitor[x] == + __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0] + // entries run from -1..x where &monitor[x] == - { + { // Must not attempt to lock method until we enter interpreter as gc won't be able to find the // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack // immediately. @@ -482,36 +490,44 @@ const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); __ movl(rax, access_flags); __ testl(rax, JVM_ACC_STATIC); - __ movl(rax, Address(locals, 0)); // get receiver (assume this is frequent case) + __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case) __ jcc(Assembler::zero, done); - __ movl(rax, Address(rbx, methodOopDesc::constants_offset())); - __ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); - __ movl(rax, Address(rax, mirror_offset)); + __ movptr(rax, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); + __ movptr(rax, Address(rax, mirror_offset)); __ bind(done); // add space for monitor & lock - __ subl(rsp, entry_size); // add space for a monitor entry - __ movl(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object + __ subptr(rsp, entry_size); // add space for a monitor entry + __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object __ bind(not_synced); } - __ movl(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count]) + __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count]) if (native) { - __ movl(STATE(_stack), rsp); // set current expression stack tos - __ movl(STATE(_stack_limit), rsp); + __ movptr(STATE(_stack), rsp); // set current expression stack tos + __ movptr(STATE(_stack_limit), rsp); } else { - __ subl(rsp, wordSize); // pre-push stack - __ movl(STATE(_stack), rsp); // set current expression stack tos + __ subptr(rsp, wordSize); // pre-push stack + __ movptr(STATE(_stack), rsp); // set current expression stack tos // compute full expression stack limit const Address size_of_stack (rbx, methodOopDesc::max_stack_offset()); __ load_unsigned_word(rdx, size_of_stack); // get size of expression stack in words - __ negl(rdx); // so we can subtract in next step + __ negptr(rdx); // so we can subtract in next step // Allocate expression stack - __ leal(rsp, Address(rsp, rdx, Address::times_4)); - __ movl(STATE(_stack_limit), rsp); + __ lea(rsp, Address(rsp, rdx, Address::times_ptr)); + __ movptr(STATE(_stack_limit), rsp); } +#ifdef _LP64 + // Make sure stack is properly aligned and sized for the abi + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) +#endif // _LP64 + + + } // Helpers for commoning out cases in the various type of method entries. @@ -531,16 +547,16 @@ const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); if (ProfileInterpreter) { // %%% Merge this into methodDataOop - __ increment(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); + __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); } // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter + __ movl(rax, backedge_counter); // load backedge counter __ increment(rcx, InvocationCounter::count_increment); __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count - __ addl(rcx, rax); // add both counters + __ movl(invocation_counter, rcx); // save invocation count + __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so // profile_method != NULL == !native_call @@ -555,7 +571,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { // C++ interpreter on entry - // rsi - new interpreter state pointer + // rsi/r13 - new interpreter state pointer // rbp - interpreter frame pointer // rbx - method @@ -566,7 +582,7 @@ // rsp - sender_sp // C++ interpreter only - // rsi - previous interpreter state pointer + // rsi/r13 - previous interpreter state pointer const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); @@ -574,16 +590,14 @@ // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). // The call returns the address of the verified entry point for the method or NULL // if the compilation did not complete (either went background or bailed out). - __ movl(rax, (int)false); + __ movptr(rax, (int32_t)false); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); - // for c++ interpreter can rsi really be munged? - __ leal(rsi, Address(rbp, -sizeof(BytecodeInterpreter))); // restore state - __ movl(rbx, Address(rsi, byte_offset_of(BytecodeInterpreter, _method))); // restore method - __ movl(rdi, Address(rsi, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer + // for c++ interpreter can rsi really be munged? + __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter))); // restore state + __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method + __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer - // Preserve invariant that rsi/rdi contain bcp/locals of sender frame - // and jump to the interpreted entry. __ jmp(*do_continue, relocInfo::none); } @@ -600,7 +614,7 @@ // rbx,: methodOop // C++ Interpreter - // rsi: previous interpreter frame state object + // rsi/r13: previous interpreter frame state object // rdi: &locals[0] // rcx: # of locals // rdx: number of additional locals this frame needs (what we must check) @@ -610,7 +624,7 @@ // rax, // NOTE: since the additional locals are also always pushed (wasn't obvious in - // generate_method_entry) so the guard should work for them too. + // generate_method_entry) so the guard should work for them too. // // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp @@ -631,11 +645,11 @@ // save rsi == caller's bytecode ptr (c++ previous interp. state) // QQQ problem here?? rsi overload???? - __ pushl(rsi); + __ push(state); - const Register thread = rsi; + const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi); - __ get_thread(thread); + NOT_LP64(__ get_thread(thread)); const Address stack_base(thread, Thread::stack_base_offset()); const Address stack_size(thread, Thread::stack_size_offset()); @@ -646,26 +660,26 @@ // Any additional monitors need a check when moving the expression stack const one_monitor = frame::interpreter_frame_monitor_size() * wordSize; __ load_unsigned_word(rax, size_of_stack); // get size of expression stack in words - __ leal(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor)); - __ leal(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size)); + __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor)); + __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size)); #ifdef ASSERT Label stack_base_okay, stack_size_okay; // verify that thread stack base is non-zero - __ cmpl(stack_base, 0); + __ cmpptr(stack_base, (int32_t)0); __ jcc(Assembler::notEqual, stack_base_okay); __ stop("stack base is zero"); __ bind(stack_base_okay); // verify that thread stack size is non-zero - __ cmpl(stack_size, 0); + __ cmpptr(stack_size, (int32_t)0); __ jcc(Assembler::notEqual, stack_size_okay); __ stop("stack size is zero"); __ bind(stack_size_okay); #endif // Add stack base to locals and subtract stack size - __ addl(rax, stack_base); - __ subl(rax, stack_size); + __ addptr(rax, stack_base); + __ subptr(rax, stack_size); // We should have a magic number here for the size of the c++ interpreter frame. // We can't actually tell this ahead of time. The debug version size is around 3k @@ -677,39 +691,40 @@ (StackRedPages+StackYellowPages); // Only need this if we are stack banging which is temporary while // we're debugging. - __ addl(rax, slop + 2*max_pages * page_size); + __ addptr(rax, slop + 2*max_pages * page_size); // check against the current stack bottom - __ cmpl(rsp, rax); + __ cmpptr(rsp, rax); __ jcc(Assembler::above, after_frame_check_pop); - __ popl(rsi); // get saved bcp / (c++ prev state ). + __ pop(state); // get c++ prev state. // throw exception return address becomes throwing pc __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); // all done with frame size check __ bind(after_frame_check_pop); - __ popl(rsi); + __ pop(state); __ bind(after_frame_check); } // Find preallocated monitor and lock method (C++ interpreter) // rbx - methodOop -// +// void InterpreterGenerator::lock_method(void) { - // assumes state == rsi == pointer to current interpreterState - // minimally destroys rax, rdx, rdi + // assumes state == rsi/r13 == pointer to current interpreterState + // minimally destroys rax, rdx|c_rarg1, rdi // // synchronize method - const Register state = rsi; const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; const Address access_flags (rbx, methodOopDesc::access_flags_offset()); + const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1); + // find initial monitor i.e. monitors[-1] - __ movl(rdx, STATE(_monitor_base)); // get monitor bottom limit - __ subl(rdx, entry_size); // point to initial monitor + __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit + __ subptr(monitor, entry_size); // point to initial monitor #ifdef ASSERT { Label L; @@ -724,35 +739,34 @@ { Label done; const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); __ movl(rax, access_flags); - __ movl(rdi, STATE(_locals)); // prepare to get receiver (assume common case) + __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case) __ testl(rax, JVM_ACC_STATIC); - __ movl(rax, Address(rdi, 0)); // get receiver (assume this is frequent case) + __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case) __ jcc(Assembler::zero, done); - __ movl(rax, Address(rbx, methodOopDesc::constants_offset())); - __ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); - __ movl(rax, Address(rax, mirror_offset)); + __ movptr(rax, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); + __ movptr(rax, Address(rax, mirror_offset)); __ bind(done); } #ifdef ASSERT { Label L; - __ cmpl(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // correct object? + __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object? __ jcc(Assembler::equal, L); __ stop("wrong synchronization lobject"); __ bind(L); } #endif // ASSERT - // can destroy rax, rdx, rcx, and (via call_VM) rdi! - __ lock_object(rdx); + // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi! + __ lock_object(monitor); } // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry address InterpreterGenerator::generate_accessor_entry(void) { - // rbx,: methodOop - // rcx: receiver (preserve for slow entry into asm interpreter) + // rbx: methodOop - // rsi: senderSP must preserved for slow path, set SP to it on fast path + // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path Label xreturn_path; @@ -775,21 +789,21 @@ // these conditions first and use slow path if necessary. // rbx,: method // rcx: receiver - __ movl(rax, Address(rsp, wordSize)); + __ movptr(rax, Address(rsp, wordSize)); // check if local 0 != NULL and read field - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, slow_path); - __ movl(rdi, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset())); // read first instruction word and extract bytecode @ 1 and index @ 2 - __ movl(rdx, Address(rbx, methodOopDesc::const_offset())); + __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // Shift codes right to get the index on the right. // The bytecode fetched looks like <0xb4><0x2a> __ shrl(rdx, 2*BitsPerByte); __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); - __ movl(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); + __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); // rax,: local 0 // rbx,: method @@ -797,30 +811,30 @@ // rcx: scratch // rdx: constant pool cache index // rdi: constant pool cache - // rsi: sender sp + // rsi/r13: sender sp // check if getfield has been resolved and read constant pool cache entry // check the validity of the cache entry by testing whether _indices field // contains Bytecode::_getfield in b1 byte. assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); - __ movl(rcx, - Address(rdi, - rdx, - Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); + __ movl(rcx, + Address(rdi, + rdx, + Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ shrl(rcx, 2*BitsPerByte); __ andl(rcx, 0xFF); __ cmpl(rcx, Bytecodes::_getfield); __ jcc(Assembler::notEqual, slow_path); // Note: constant pool entry is not valid before bytecode is resolved - __ movl(rcx, - Address(rdi, - rdx, - Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); - __ movl(rdx, - Address(rdi, - rdx, - Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); + __ movptr(rcx, + Address(rdi, + rdx, + Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); + __ movl(rdx, + Address(rdi, + rdx, + Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); Label notByte, notShort, notChar; const Address field_address (rax, rcx, Address::times_1); @@ -831,6 +845,16 @@ __ shrl(rdx, ConstantPoolCacheEntry::tosBits); // Make sure we don't need to mask rdx for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); +#ifdef _LP64 + Label notObj; + __ cmpl(rdx, atos); + __ jcc(Assembler::notEqual, notObj); + // atos + __ movptr(rax, field_address); + __ jmp(xreturn_path); + + __ bind(notObj); +#endif // _LP64 __ cmpl(rdx, btos); __ jcc(Assembler::notEqual, notByte); __ load_signed_byte(rax, field_address); @@ -851,8 +875,10 @@ __ bind(notChar); #ifdef ASSERT Label okay; +#ifndef _LP64 __ cmpl(rdx, atos); __ jcc(Assembler::equal, okay); +#endif // _LP64 __ cmpl(rdx, itos); __ jcc(Assembler::equal, okay); __ stop("what type is this?"); @@ -864,8 +890,8 @@ __ bind(xreturn_path); // _ireturn/_areturn - __ popl(rdi); // get return address - __ movl(rsp, rsi); // set sp to sender sp + __ pop(rdi); // get return address + __ mov(rsp, sender_sp_on_entry); // set sp to sender sp __ jmp(rdi); // generate a vanilla interpreter entry as the slow path @@ -897,8 +923,8 @@ // rbx: methodOop // rcx: receiver (unused) - // rsi: previous interpreter state (if called from C++ interpreter) must preserve - // in any case. If called via c1/c2/call_stub rsi is junk (to use) but harmless + // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve + // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless // to save/restore. address entry_point = __ pc(); @@ -907,8 +933,7 @@ const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, methodOopDesc::access_flags_offset()); - // rsi == state/locals rdi == prevstate - const Register state = rsi; + // rsi/r13 == state/locals rdi == prevstate const Register locals = rdi; // get parameter size (always needed) @@ -916,33 +941,38 @@ // rbx: methodOop // rcx: size of parameters - __ popl(rax); // get return address + __ pop(rax); // get return address // for natives the size of locals is zero - // compute beginning of parameters /locals - __ leal(locals, Address(rsp, rcx, Address::times_4, -wordSize)); + // compute beginning of parameters /locals + __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize)); // initialize fixed part of activation frame // Assumes rax = return address // allocate and initialize new interpreterState and method expression stack - // IN(locals) -> locals + // IN(locals) -> locals // IN(state) -> previous frame manager state (NULL from stub/c1/c2) // destroys rax, rcx, rdx // OUT (state) -> new interpreterState // OUT(rsp) -> bottom of methods expression stack // save sender_sp - __ movl(rcx, rsi); + __ mov(rcx, sender_sp_on_entry); // start with NULL previous state - __ movl(state, 0); + __ movptr(state, (int32_t)NULL_WORD); generate_compute_interpreter_state(state, locals, rcx, true); #ifdef ASSERT { Label L; - __ movl(rax, STATE(_stack_base)); - __ cmpl(rax, rsp); + __ movptr(rax, STATE(_stack_base)); +#ifdef _LP64 + // duplicate the alignment rsp got after setting stack_base + __ subptr(rax, frame::arg_reg_save_area_bytes); // windows + __ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI) +#endif // _LP64 + __ cmpptr(rax, rsp); __ jcc(Assembler::equal, L); __ stop("broken stack frame setup in interpreter"); __ bind(L); @@ -951,14 +981,15 @@ if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count - __ movl(rax, STATE(_thread)); // get thread + const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax); + NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread // Since at this point in the method invocation the exception handler // would try to exit the monitor of synchronized methods which hasn't // been entered yet, we set the thread local variable // _do_not_unlock_if_synchronized to true. The remove_activation will // check this flag. - const Address do_not_unlock_if_synchronized(rax, + const Address do_not_unlock_if_synchronized(unlock_thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); __ movbool(do_not_unlock_if_synchronized, true); @@ -994,7 +1025,7 @@ bang_stack_shadow_pages(true); // reset the _do_not_unlock_if_synchronized flag - __ movl(rax, STATE(_thread)); // get thread + NOT_LP64(__ movl(rax, STATE(_thread));) // get thread __ movbool(do_not_unlock_if_synchronized, false); @@ -1025,62 +1056,81 @@ // work registers const Register method = rbx; - const Register thread = rdi; - const Register t = rcx; + const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi); + const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1 // allocate space for parameters - __ movl(method, STATE(_method)); + __ movptr(method, STATE(_method)); __ verify_oop(method); __ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset())); __ shll(t, 2); - __ addl(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror - __ subl(rsp, t); - __ andl(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics +#ifdef _LP64 + __ subptr(rsp, t); + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) +#else + __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror + __ subptr(rsp, t); + __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics +#endif // _LP64 // get signature handler Label pending_exception_present; { Label L; - __ movl(t, Address(method, methodOopDesc::signature_handler_offset())); - __ testl(t, t); + __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); + __ testptr(t, t); __ jcc(Assembler::notZero, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false); - __ movl(method, STATE(_method)); - __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ movptr(method, STATE(_method)); + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, pending_exception_present); __ verify_oop(method); - __ movl(t, Address(method, methodOopDesc::signature_handler_offset())); + __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); __ bind(L); } #ifdef ASSERT { Label L; - __ pushl(t); + __ push(t); __ get_thread(t); // get vm's javathread* - __ cmpl(t, STATE(_thread)); + __ cmpptr(t, STATE(_thread)); __ jcc(Assembler::equal, L); __ int3(); __ bind(L); - __ popl(t); + __ pop(t); } -#endif // +#endif // + const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from(); // call signature handler - assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code"); assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code"); - assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code"); + // The generated handlers do not touch RBX (the method oop). - // However, large signatures cannot be cached and are generated + // However, large signatures cannot be cached and are generated // each time here. The slow-path generator will blow RBX // sometime, so we must reload it after the call. - __ movl(rdi, STATE(_locals)); // get the from pointer + __ movptr(from_ptr, STATE(_locals)); // get the from pointer __ call(t); - __ movl(method, STATE(_method)); + __ movptr(method, STATE(_method)); __ verify_oop(method); // result handler is in rax // set result handler - __ movl(STATE(_result_handler), rax); + __ movptr(STATE(_result_handler), rax); + + + // get native function entry point + { Label L; + __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, L); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); + __ movptr(method, STATE(_method)); + __ verify_oop(method); + __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); + __ bind(L); + } // pass mirror handle if static call { Label L; @@ -1089,57 +1139,55 @@ __ testl(t, JVM_ACC_STATIC); __ jcc(Assembler::zero, L); // get mirror - __ movl(t, Address(method, methodOopDesc:: constants_offset())); - __ movl(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); - __ movl(t, Address(t, mirror_offset)); + __ movptr(t, Address(method, methodOopDesc:: constants_offset())); + __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); + __ movptr(t, Address(t, mirror_offset)); // copy mirror into activation object - __ movl(STATE(_oop_temp), t); + __ movptr(STATE(_oop_temp), t); // pass handle to mirror - __ leal(t, STATE(_oop_temp)); - __ movl(Address(rsp, wordSize), t); +#ifdef _LP64 + __ lea(c_rarg1, STATE(_oop_temp)); +#else + __ lea(t, STATE(_oop_temp)); + __ movptr(Address(rsp, wordSize), t); +#endif // _LP64 __ bind(L); } #ifdef ASSERT { Label L; - __ pushl(t); + __ push(t); __ get_thread(t); // get vm's javathread* - __ cmpl(t, STATE(_thread)); + __ cmpptr(t, STATE(_thread)); __ jcc(Assembler::equal, L); __ int3(); __ bind(L); - __ popl(t); - } -#endif // - - // get native function entry point - { Label L; - __ movl(rax, Address(method, methodOopDesc::native_function_offset())); - __ testl(rax, rax); - __ jcc(Assembler::notZero, L); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); - __ movl(method, STATE(_method)); - __ verify_oop(method); - __ movl(rax, Address(method, methodOopDesc::native_function_offset())); - __ bind(L); + __ pop(t); } +#endif // // pass JNIEnv - __ movl(thread, STATE(_thread)); // get thread - __ leal(t, Address(thread, JavaThread::jni_environment_offset())); - __ movl(Address(rsp, 0), t); +#ifdef _LP64 + __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset())); +#else + __ movptr(thread, STATE(_thread)); // get thread + __ lea(t, Address(thread, JavaThread::jni_environment_offset())); + + __ movptr(Address(rsp, 0), t); +#endif // _LP64 + #ifdef ASSERT { Label L; - __ pushl(t); + __ push(t); __ get_thread(t); // get vm's javathread* - __ cmpl(t, STATE(_thread)); + __ cmpptr(t, STATE(_thread)); __ jcc(Assembler::equal, L); __ int3(); __ bind(L); - __ popl(t); + __ pop(t); } -#endif // +#endif // #ifdef ASSERT { Label L; @@ -1157,13 +1205,13 @@ __ set_last_Java_frame(thread, noreg, rbp, __ pc()); - __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); + __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); __ call(rax); // result potentially in rdx:rax or ST0 - __ movl(method, STATE(_method)); - __ movl(thread, STATE(_thread)); // get thread + __ movptr(method, STATE(_method)); + NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread // The potential result is in ST(0) & rdx:rax // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then @@ -1173,7 +1221,7 @@ // It is safe to do these pushes because state is _thread_in_native and return address will be found // via _last_native_pc and not via _last_jave_sp - // Must save the value of ST(0) since it could be destroyed before we get to result handler + // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler { Label Lpush, Lskip; ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); @@ -1182,11 +1230,20 @@ __ cmpptr(STATE(_result_handler), double_handler.addr()); __ jcc(Assembler::notEqual, Lskip); __ bind(Lpush); - __ push(dtos); + __ subptr(rsp, 2*wordSize); + if ( UseSSE < 2 ) { + __ fstp_d(Address(rsp, 0)); + } else { + __ movdbl(Address(rsp, 0), xmm0); + } __ bind(Lskip); } - __ push(ltos); // save rax:rdx for potential use by result handler. + // save rax:rdx for potential use by result handler. + __ push(rax); +#ifndef _LP64 + __ push(rdx); +#endif // _LP64 // Either restore the MXCSR register after returning from the JNI Call // or verify that it wasn't changed. @@ -1195,20 +1252,22 @@ __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); } else if (CheckJNICalls ) { - __ call(RuntimeAddress(StubRoutines::i486::verify_mxcsr_entry())); + __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); } } +#ifndef _LP64 // Either restore the x87 floating pointer control word after returning // from the JNI call or verify that it wasn't changed. if (CheckJNICalls) { - __ call(RuntimeAddress(StubRoutines::i486::verify_fpu_cntrl_wrd_entry())); + __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); } +#endif // _LP64 // change thread state __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - if(os::is_MP()) { + if(os::is_MP()) { // Write serialization page so VM thread can do a pseudo remote membar. // We use the current thread pointer to calculate a thread specific // offset to write to within the page. This minimizes bus traffic @@ -1223,7 +1282,7 @@ SafepointSynchronize::_not_synchronized); // threads running native code and they are expected to self-suspend - // when leaving the _thread_in_native state. We need to check for + // when leaving the _thread_in_native state. We need to check for // pending suspend requests here. Label L; __ jcc(Assembler::notEqual, L); @@ -1234,17 +1293,16 @@ // Don't use call_VM as it will see a possible pending exception and forward it // and never return here preventing us from clearing _last_native_pc down below. // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are - // preserved and correspond to the bcp/locals pointers. So we do a runtime call - // by hand. + // preserved and correspond to the bcp/locals pointers. // - __ pushl(thread); - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, - JavaThread::check_special_condition_for_native_trans))); + + ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), + thread); __ increment(rsp, wordSize); - __ movl(method, STATE(_method)); + __ movptr(method, STATE(_method)); __ verify_oop(method); - __ movl(thread, STATE(_thread)); // get thread + __ movptr(thread, STATE(_thread)); // get thread __ bind(Continue); } @@ -1255,8 +1313,8 @@ __ reset_last_Java_frame(thread, true, true); // reset handle block - __ movl(t, Address(thread, JavaThread::active_handles_offset())); - __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD); + __ movptr(t, Address(thread, JavaThread::active_handles_offset())); + __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); // If result was an oop then unbox and save it in the frame { Label L; @@ -1264,15 +1322,21 @@ ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT)); __ cmpptr(STATE(_result_handler), oop_handler.addr()); __ jcc(Assembler::notEqual, no_oop); - __ pop(ltos); - __ testl(rax, rax); +#ifndef _LP64 + __ pop(rdx); +#endif // _LP64 + __ pop(rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, store_result); // unbox - __ movl(rax, Address(rax, 0)); + __ movptr(rax, Address(rax, 0)); __ bind(store_result); - __ movl(STATE(_oop_temp), rax); + __ movptr(STATE(_oop_temp), rax); // keep stack depth as expected by pushing oop which will eventually be discarded - __ push(ltos); + __ push(rax); +#ifndef _LP64 + __ push(rdx); +#endif // _LP64 __ bind(no_oop); } @@ -1281,9 +1345,9 @@ __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); __ jcc(Assembler::notEqual, no_reguard); - __ pushad(); + __ pusha(); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); - __ popad(); + __ popa(); __ bind(no_reguard); } @@ -1295,27 +1359,27 @@ // Seems that the answer to above is no this is wrong. The old code would see the exception // and forward it before doing the unlocking and notifying jvmdi that method has exited. // This seems wrong need to investigate the spec. - + // handle exceptions (exception handling will handle unlocking!) { Label L; - __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::zero, L); __ bind(pending_exception_present); // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply - // return and let caller deal with exception. This skips the unlocking here which + // return and let caller deal with exception. This skips the unlocking here which // seems wrong but seems to be what asm interpreter did. Can't find this in the spec. // Note: must preverve method in rbx // // remove activation - __ movl(t, STATE(_sender_sp)); + __ movptr(t, STATE(_sender_sp)); __ leave(); // remove frame anchor - __ popl(rdi); // get return address - __ movl(state, STATE(_prev_link)); // get previous state for return - __ movl(rsp, t); // set sp to sender sp - __ pushl(rdi); // [ush throwing pc + __ pop(rdi); // get return address + __ movptr(state, STATE(_prev_link)); // get previous state for return + __ mov(rsp, t); // set sp to sender sp + __ push(rdi); // push throwing pc // The skips unlocking!! This seems to be what asm interpreter does but seems // very wrong. Not clear if this violates the spec. __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); @@ -1329,26 +1393,27 @@ __ jcc(Assembler::zero, L); // the code below should be shared with interpreter macro assembler implementation { Label unlock; + const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1); // BasicObjectLock will be first in list, since this is a synchronized method. However, need - // to check that the object has not been unlocked by an explicit monitorexit bytecode. - __ movl(rdx, STATE(_monitor_base)); - __ subl(rdx, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor + // to check that the object has not been unlocked by an explicit monitorexit bytecode. + __ movptr(monitor, STATE(_monitor_base)); + __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor - __ movl(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); - __ testl(t, t); + __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); + __ testptr(t, t); __ jcc(Assembler::notZero, unlock); - + // Entry already unlocked, need to throw exception __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); __ should_not_reach_here(); - - __ bind(unlock); - __ unlock_object(rdx); + + __ bind(unlock); + __ unlock_object(monitor); // unlock can blow rbx so restore it for path that needs it below - __ movl(method, STATE(_method)); + __ movptr(method, STATE(_method)); } __ bind(L); - } + } // jvmti support // Note: This must happen _after_ handling/throwing any exceptions since @@ -1358,18 +1423,21 @@ __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result - __ pop(ltos); // restore rax/rdx floating result if present still on stack - __ movl(t, STATE(_result_handler)); // get result handler +#ifndef _LP64 + __ pop(rdx); +#endif // _LP64 + __ pop(rax); + __ movptr(t, STATE(_result_handler)); // get result handler __ call(t); // call result handler to convert to tosca form // remove activation - __ movl(t, STATE(_sender_sp)); + __ movptr(t, STATE(_sender_sp)); __ leave(); // remove frame anchor - __ popl(rdi); // get return address - __ movl(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller) - __ movl(rsp, t); // set sp to sender sp + __ pop(rdi); // get return address + __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller) + __ mov(rsp, t); // set sp to sender sp __ jmp(rdi); // invocation counter overflow @@ -1385,7 +1453,6 @@ // Generate entries that will put a result type index into rcx void CppInterpreterGenerator::generate_deopt_handling() { - const Register state = rsi; Label return_from_deopt_common; // Generate entries that will put a result type index into rcx @@ -1437,7 +1504,7 @@ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID)); - // Deopt return common + // Deopt return common // an index is present in rcx that lets us move any possible result being // return to the interpreter's stack // @@ -1452,51 +1519,50 @@ // __ bind(return_from_deopt_common); - __ leal(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); + __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // setup rsp so we can push the "result" as needed. - __ movl(rsp, STATE(_stack)); // trim stack (is prepushed) - __ addl(rsp, wordSize); // undo prepush + __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed) + __ addptr(rsp, wordSize); // undo prepush ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack); - // Address index(noreg, rcx, Address::times_4); - __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_4))); - // __ movl(rcx, Address(noreg, rcx, Address::times_4, int(AbstractInterpreter::_tosca_to_stack))); + // Address index(noreg, rcx, Address::times_ptr); + __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr))); + // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack))); __ call(rcx); // call result converter __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume); - __ leal(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) - __ movl(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, + __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) + __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, // result if any on stack already ) - __ movl(rsp, STATE(_stack_limit)); // restore expression stack to full depth + __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth } // Generate the code to handle a more_monitors message from the c++ interpreter void CppInterpreterGenerator::generate_more_monitors() { - const Register state = rsi; Label entry, loop; const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; - // 1. compute new pointers // rsp: old expression stack top - __ movl(rdx, STATE(_stack_base)); // rdx: old expression stack bottom - __ subl(rsp, entry_size); // move expression stack top limit - __ subl(STATE(_stack), entry_size); // update interpreter stack top - __ movl(STATE(_stack_limit), rsp); // inform interpreter - __ subl(rdx, entry_size); // move expression stack bottom - __ movl(STATE(_stack_base), rdx); // inform interpreter - __ movl(rcx, STATE(_stack)); // set start value for copy loop + // 1. compute new pointers // rsp: old expression stack top + __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom + __ subptr(rsp, entry_size); // move expression stack top limit + __ subptr(STATE(_stack), entry_size); // update interpreter stack top + __ subptr(STATE(_stack_limit), entry_size); // inform interpreter + __ subptr(rdx, entry_size); // move expression stack bottom + __ movptr(STATE(_stack_base), rdx); // inform interpreter + __ movptr(rcx, STATE(_stack)); // set start value for copy loop __ jmp(entry); // 2. move expression stack contents __ bind(loop); - __ movl(rbx, Address(rcx, entry_size)); // load expression stack word from old location - __ movl(Address(rcx, 0), rbx); // and store it at new location - __ addl(rcx, wordSize); // advance to next word + __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location + __ movptr(Address(rcx, 0), rbx); // and store it at new location + __ addptr(rcx, wordSize); // advance to next word __ bind(entry); - __ cmpl(rcx, rdx); // check if bottom reached - __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word + __ cmpptr(rcx, rdx); // check if bottom reached + __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word // now zero the slot so we can find it. - __ movl(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); + __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors); } @@ -1520,7 +1586,7 @@ // // rbx: methodOop // rcx: receiver - unused (retrieved from stack as needed) -// rsi: previous frame manager state (NULL from the call_stub/c1/c2) +// rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2) // // // Stack layout at entry @@ -1542,7 +1608,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { // rbx: methodOop - // rsi: sender sp + // rsi/r13: sender sp // Because we redispatch "recursive" interpreter entries thru this same entry point // the "input" register usage is a little strange and not what you expect coming @@ -1560,30 +1626,29 @@ address entry_point = __ pc(); - // Fast accessor methods share this entry point. + // Fast accessor methods share this entry point. // This works because frame manager is in the same codelet if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path); Label dispatch_entry_2; - __ movl(rcx, rsi); - __ movl(rsi, 0); // no current activation + __ movptr(rcx, sender_sp_on_entry); + __ movptr(state, (int32_t)NULL_WORD); // no current activation __ jmp(dispatch_entry_2); - const Register state = rsi; // current activation object, valid on entry - const Register locals = rdi; + const Register locals = rdi; Label re_dispatch; __ bind(re_dispatch); // save sender sp (doesn't include return address - __ leal(rcx, Address(rsp, wordSize)); + __ lea(rcx, Address(rsp, wordSize)); __ bind(dispatch_entry_2); // save sender sp - __ pushl(rcx); + __ push(rcx); const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset()); @@ -1600,7 +1665,7 @@ // rcx: size of parameters __ load_unsigned_word(rdx, size_of_locals); // get size of locals in words - __ subl(rdx, rcx); // rdx = no. of additional locals + __ subptr(rdx, rcx); // rdx = no. of additional locals // see if we've got enough room on the stack for locals plus overhead. generate_stack_overflow_check(); // C++ @@ -1612,32 +1677,32 @@ // compute beginning of parameters (rdi) - __ leal(locals, Address(rsp, rcx, Address::times_4, wordSize)); + __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize)); // save sender's sp // __ movl(rcx, rsp); // get sender's sp - __ popl(rcx); + __ pop(rcx); // get return address - __ popl(rax); + __ pop(rax); // rdx - # of additional locals // allocate space for locals // explicitly initialize locals { Label exit, loop; - __ testl(rdx, rdx); + __ testl(rdx, rdx); // (32bit ok) __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 __ bind(loop); - __ pushl((int)NULL); // initialize local variables + __ push((int32_t)NULL_WORD); // initialize local variables __ decrement(rdx); // until everything initialized __ jcc(Assembler::greater, loop); __ bind(exit); } - + // Assumes rax = return address // allocate and initialize new interpreterState and method expression stack @@ -1667,17 +1732,21 @@ __ bind(call_interpreter_2); { - const Register thread = rcx; + const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); - __ pushl(state); // push arg to interpreter - __ movl(thread, STATE(_thread)); +#ifdef _LP64 + __ mov(c_rarg0, state); +#else + __ push(state); // push arg to interpreter + __ movptr(thread, STATE(_thread)); +#endif // _LP64 // We can setup the frame anchor with everything we want at this point // as we are thread_in_Java and no safepoints can occur until we go to // vm mode. We do have to clear flags on return from vm but that is it // - __ movl(Address(thread, JavaThread::last_Java_fp_offset()), rbp); - __ movl(Address(thread, JavaThread::last_Java_sp_offset()), rsp); + __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp); + __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp); // Call the interpreter @@ -1685,19 +1754,19 @@ RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks)); __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal); - __ popl(rax); // discard parameter to run + NOT_LP64(__ pop(rax);) // discard parameter to run // // state is preserved since it is callee saved // // reset_last_Java_frame - __ movl(thread, STATE(_thread)); + NOT_LP64(__ movl(thread, STATE(_thread));) __ reset_last_Java_frame(thread, true, true); } // examine msg from interpreter to determine next action - + __ movl(rdx, STATE(_msg)); // Get new message Label call_method; @@ -1706,15 +1775,15 @@ Label bad_msg; Label do_OSR; - __ cmpl(rdx, (int)BytecodeInterpreter::call_method); + __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method); __ jcc(Assembler::equal, call_method); - __ cmpl(rdx, (int)BytecodeInterpreter::return_from_method); + __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method); __ jcc(Assembler::equal, return_from_interpreted_method); - __ cmpl(rdx, (int)BytecodeInterpreter::do_osr); + __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr); __ jcc(Assembler::equal, do_OSR); - __ cmpl(rdx, (int)BytecodeInterpreter::throwing_exception); + __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception); __ jcc(Assembler::equal, throw_exception); - __ cmpl(rdx, (int)BytecodeInterpreter::more_monitors); + __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors); __ jcc(Assembler::notEqual, bad_msg); // Allocate more monitor space, shuffle expression stack.... @@ -1727,8 +1796,8 @@ unctrap_frame_manager_entry = __ pc(); // // Load the registers we need. - __ leal(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); - __ movl(rsp, STATE(_stack_limit)); // restore expression stack to full depth + __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); + __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth __ jmp(call_interpreter_2); @@ -1760,13 +1829,17 @@ Label unwind_and_forward; // restore state pointer. - __ leal(state, Address(rbp, -sizeof(BytecodeInterpreter))); + __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter))); - __ movl(rbx, STATE(_method)); // get method + __ movptr(rbx, STATE(_method)); // get method +#ifdef _LP64 + __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); +#else __ movl(rcx, STATE(_thread)); // get thread // Store exception with interpreter will expect it - __ movl(Address(rcx, Thread::pending_exception_offset()), rax); + __ movptr(Address(rcx, Thread::pending_exception_offset()), rax); +#endif // _LP64 // is current frame vanilla or native? @@ -1775,23 +1848,23 @@ __ jcc(Assembler::zero, return_with_exception); // vanilla interpreted frame, handle directly // We drop thru to unwind a native interpreted frame with a pending exception - // We jump here for the initial interpreter frame with exception pending + // We jump here for the initial interpreter frame with exception pending // We unwind the current acivation and forward it to our caller. __ bind(unwind_and_forward); - + // unwind rbp, return stack to unextended value and re-push return address - __ movl(rcx, STATE(_sender_sp)); + __ movptr(rcx, STATE(_sender_sp)); __ leave(); - __ popl(rdx); - __ movl(rsp, rcx); - __ pushl(rdx); + __ pop(rdx); + __ mov(rsp, rcx); + __ push(rdx); __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); // Return point from a call which returns a result in the native abi // (c1/c2/jni-native). This result must be processed onto the java - // expression stack. + // expression stack. // // A pending exception may be present in which case there is no result present @@ -1804,9 +1877,9 @@ // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases if (UseSSE < 2) { - __ leal(state, Address(rbp, -sizeof(BytecodeInterpreter))); - __ movl(rbx, STATE(_result._to_call._callee)); // get method just executed - __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset())); + __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter))); + __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed + __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset())); __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index __ jcc(Assembler::equal, do_float); __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index @@ -1835,10 +1908,12 @@ __ jmp(done_conv); } +#if 0 // emit a sentinel we can test for when converting an interpreter // entry point to a compiled entry point. __ a_long(Interpreter::return_sentinel); __ a_long((int)compiled_entry); +#endif // Return point to interpreter from compiled/native method @@ -1851,33 +1926,37 @@ // calling convention left it (i.e. params may or may not be present) // Copy the result from tosca and place it on java expression stack. - // Restore rsi as compiled code may not preserve it + // Restore rsi/r13 as compiled code may not preserve it - __ leal(state, Address(rbp, -sizeof(BytecodeInterpreter))); + __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter))); // restore stack to what we had when we left (in case i2c extended it) - __ movl(rsp, STATE(_stack)); - __ leal(rsp, Address(rsp, wordSize)); + __ movptr(rsp, STATE(_stack)); + __ lea(rsp, Address(rsp, wordSize)); // If there is a pending exception then we don't really have a result to process - __ movl(rcx, STATE(_thread)); // get thread - __ cmpl(Address(rcx, Thread::pending_exception_offset()), (int)NULL); - __ jcc(Assembler::notZero, return_with_exception); +#ifdef _LP64 + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); +#else + __ movptr(rcx, STATE(_thread)); // get thread + __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); +#endif / __LP64 + __ jcc(Assembler::notZero, return_with_exception); - // get method just executed - __ movl(rbx, STATE(_result._to_call._callee)); + // get method just executed + __ movptr(rbx, STATE(_result._to_call._callee)); // callee left args on top of expression stack, remove them __ load_unsigned_word(rcx, Address(rbx, methodOopDesc::size_of_parameters_offset())); - __ leal(rsp, Address(rsp, rcx, Address::times_4)); + __ lea(rsp, Address(rsp, rcx, Address::times_ptr)); - __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset())); + __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset())); ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack); - // Address index(noreg, rax, Address::times_4); - __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_4))); - // __ movl(rcx, Address(noreg, rcx, Address::times_4, int(AbstractInterpreter::_tosca_to_stack))); + // Address index(noreg, rax, Address::times_ptr); + __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr))); + // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack))); __ call(rcx); // call result converter __ jmp(resume_interpreter); @@ -1887,28 +1966,28 @@ __ bind(return_with_exception); // Exception present, empty stack - __ movl(rsp, STATE(_stack_base)); + __ movptr(rsp, STATE(_stack_base)); __ jmp(resume_interpreter); // Return from interpreted method we return result appropriate to the caller (i.e. "recursive" // interpreter call, or native) and unwind this interpreter activation. // All monitors should be unlocked. - __ bind(return_from_interpreted_method); + __ bind(return_from_interpreted_method); Label return_to_initial_caller; - __ movl(rbx, STATE(_method)); // get method just executed - __ cmpl(STATE(_prev_link), (int)NULL); // returning from "recursive" interpreter call? + __ movptr(rbx, STATE(_method)); // get method just executed + __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call? __ movl(rax, Address(rbx, methodOopDesc::result_index_offset())); // get result type index __ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2) // Copy result to callers java stack ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack); - // Address index(noreg, rax, Address::times_4); + // Address index(noreg, rax, Address::times_ptr); - __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_4))); - // __ movl(rax, Address(noreg, rax, Address::times_4, int(AbstractInterpreter::_stack_to_stack))); + __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr))); + // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack))); __ call(rax); // call result converter Label unwind_recursive_activation; @@ -1918,12 +1997,12 @@ // result converter left rax pointing to top of the java stack for method we are returning // to. Now all we must do is unwind the state from the completed call - __ movl(state, STATE(_prev_link)); // unwind state + __ movptr(state, STATE(_prev_link)); // unwind state __ leave(); // pop the frame - __ movl(rsp, rax); // unwind stack to remove args + __ mov(rsp, rax); // unwind stack to remove args // Resume the interpreter. The current frame contains the current interpreter - // state object. + // state object. // __ bind(resume_interpreter); @@ -1931,25 +2010,25 @@ // state == interpreterState object for method we are resuming __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume); - __ leal(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) - __ movl(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, + __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) + __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, // result if any on stack already ) - __ movl(rsp, STATE(_stack_limit)); // restore expression stack to full depth - __ jmp(call_interpreter_2); // No need to bang + __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth + __ jmp(call_interpreter_2); // No need to bang - // interpreter returning to native code (call_stub/c1/c2) + // interpreter returning to native code (call_stub/c1/c2) // convert result and unwind initial activation // rax - result index __ bind(return_to_initial_caller); ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi); - // Address index(noreg, rax, Address::times_4); + // Address index(noreg, rax, Address::times_ptr); - __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_4))); + __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr))); __ call(rax); // call result converter Label unwind_initial_activation; - __ bind(unwind_initial_activation); + __ bind(unwind_initial_activation); // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0)) @@ -1967,11 +2046,11 @@ // return restoring the stack to the original sender_sp value - __ movl(rcx, STATE(_sender_sp)); + __ movptr(rcx, STATE(_sender_sp)); __ leave(); - __ popl(rdi); // get return address + __ pop(rdi); // get return address // set stack to sender's sp - __ movl(rsp, rcx); + __ mov(rsp, rcx); __ jmp(rdi); // return to call_stub // OSR request, adjust return address to make current frame into adapter frame @@ -1985,17 +2064,16 @@ // it or is it callstub/compiled? // Move buffer to the expected parameter location - __ movl(rcx, STATE(_result._osr._osr_buf)); + __ movptr(rcx, STATE(_result._osr._osr_buf)); - __ movl(rax, STATE(_result._osr._osr_entry)); + __ movptr(rax, STATE(_result._osr._osr_entry)); - __ cmpl(STATE(_prev_link), (int)NULL); // returning from "recursive" interpreter call? + __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call? __ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2) - // __ movl(state, STATE(_prev_link)); // unwind state - __ movl(rsi, STATE(_sender_sp)); // get sender's sp in expected register + __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register __ leave(); // pop the frame - __ movl(rsp, rsi); // trim any stack expansion + __ mov(rsp, sender_sp_on_entry); // trim any stack expansion // We know we are calling compiled so push specialized return @@ -2009,14 +2087,14 @@ __ bind(remove_initial_frame); - __ movl(rdx, STATE(_sender_sp)); + __ movptr(rdx, STATE(_sender_sp)); __ leave(); // get real return - __ popl(rsi); + __ pop(rsi); // set stack to sender's sp - __ movl(rsp, rdx); + __ mov(rsp, rdx); // repush real return - __ pushl(rsi); + __ push(rsi); // Enter OSR nmethod __ jmp(rax); @@ -2031,10 +2109,10 @@ // stack points to next free location and not top element on expression stack // method expects sp to be pointing to topmost element - __ movl(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top - __ leal(rsp, Address(rsp, wordSize)); + __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top + __ lea(rsp, Address(rsp, wordSize)); - __ movl(rbx, STATE(_result._to_call._callee)); // get method to execute + __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute // don't need a return address if reinvoking interpreter @@ -2050,13 +2128,13 @@ __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter? __ jcc(Assembler::equal, re_dispatch); // yes - __ popl(rax); // pop dummy address + __ pop(rax); // pop dummy address // get specialized entry - __ movl(rax, STATE(_result._to_call._callee_entry_point)); + __ movptr(rax, STATE(_result._to_call._callee_entry_point)); // set sender SP - __ movl(rsi, rsp); + __ mov(sender_sp_on_entry, rsp); // method uses specialized entry, push a return so we look like call stub setup // this path will handle fact that result is returned in registers and not @@ -2076,14 +2154,14 @@ Label unwind_initial_with_pending_exception; __ bind(throw_exception); - __ cmpl(STATE(_prev_link), (int)NULL); // returning from recursive interpreter call? + __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call? __ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2) - __ movl(rax, STATE(_locals)); // pop parameters get new stack value - __ addl(rax, wordSize); // account for prepush before we return + __ movptr(rax, STATE(_locals)); // pop parameters get new stack value + __ addptr(rax, wordSize); // account for prepush before we return __ jmp(unwind_recursive_activation); __ bind(unwind_initial_with_pending_exception); - + // We will unwind the current (initial) interpreter frame and forward // the exception to the caller. We must put the exception in the // expected register and clear pending exception and then forward. @@ -2099,7 +2177,7 @@ bool synchronized = false; address entry_point = NULL; - switch (kind) { + switch (kind) { case Interpreter::zerolocals : break; case Interpreter::zerolocals_synchronized: synchronized = true; break; case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; @@ -2143,7 +2221,7 @@ // total static overhead size. Account for interpreter state object, return // address, saved rbp and 2 words for a "static long no_params() method" issue. - const int overhead_size = sizeof(BytecodeInterpreter)/wordSize + + const int overhead_size = sizeof(BytecodeInterpreter)/wordSize + ( frame::sender_sp_offset - frame::link_offset) + 2; const int method_stack = (method->max_locals() + method->max_stack()) * @@ -2183,9 +2261,9 @@ to_fill->_mdx = NULL; to_fill->_stack = stack; if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) { - to_fill->_msg = deopt_resume2; + to_fill->_msg = deopt_resume2; } else { - to_fill->_msg = method_resume; + to_fill->_msg = method_resume; } to_fill->_result._to_call._bcp_advance = 0; to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone @@ -2213,9 +2291,9 @@ // See generate_compute_interpreter_state. to_fill->_stack_limit = stack_base - (method->max_stack() + 1); to_fill->_monitor_base = (BasicObjectLock*) monitor_base; - + to_fill->_self_link = to_fill; - assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base, + assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base, "Stack top out of range"); } @@ -2240,7 +2318,7 @@ // NOTE: tempcount is the current size of the java expression stack. For top most // frames we will allocate a full sized expression stack and not the curback // version that non-top frames have. - + // Calculate the amount our frame will be adjust by the callee. For top frame // this is zero. @@ -2253,7 +2331,7 @@ int monitor_size = sizeof(BasicObjectLock) * moncount; // First calculate the frame size without any java expression stack - int short_frame_size = size_activation_helper(extra_locals_size, + int short_frame_size = size_activation_helper(extra_locals_size, monitor_size); // Now with full size expression stack @@ -2296,8 +2374,8 @@ // adjust the stack?? HMMM QQQ // if (caller->is_interpreted_frame()) { - // locals must agree with the caller because it will be used to set the - // caller's tos when we return. + // locals must agree with the caller because it will be used to set the + // caller's tos when we return. interpreterState prev = caller->get_interpreterState(); // stack() is prepushed. locals = prev->stack() + method->size_of_parameters(); @@ -2314,16 +2392,16 @@ intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size); /* +1 because stack is always prepushed */ intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord); - - BytecodeInterpreter::layout_interpreterState(cur_state, + + BytecodeInterpreter::layout_interpreterState(cur_state, caller, interpreter_frame, - method, - locals, - stack, - stack_base, - monitor_base, + method, + locals, + stack, + stack_base, + monitor_base, frame_bottom, is_top_frame); --- old/hotspot/src/cpu/x86/vm/disassembler_x86.hpp 2009-08-01 04:07:50.702331781 +0100 +++ new/hotspot/src/cpu/x86/vm/disassembler_x86.hpp 2009-08-01 04:07:50.633233951 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)disassembler_x86.hpp 1.22 07/05/05 17:04:15 JVM" -#endif /* - * Copyright 1997-1999 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,28 +19,13 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ -// The disassembler prints out intel 386 code annotated -// with Java specific information. - -class Disassembler { -#ifndef PRODUCT - private: - typedef address (*decode_func)(address start, DisassemblerEnv* env); - // points the library. - static void* _library; - // points to the decode function. - static decode_func _decode_instruction; - // tries to load library and return whether it succedded. - static bool load_library(); - // decodes one instruction and return the start of the next instruction. - static address decode_instruction(address start, DisassemblerEnv* env); -#endif - public: - static void decode(CodeBlob *cb, outputStream* st = NULL) PRODUCT_RETURN; - static void decode(nmethod* nm, outputStream* st = NULL) PRODUCT_RETURN; - static void decode(u_char* begin, u_char* end, outputStream* st = NULL) PRODUCT_RETURN; -}; + static int pd_instruction_alignment() { + return 1; + } + static const char* pd_cpu_opts() { + return ""; + } --- old/hotspot/src/cpu/x86/vm/dump_x86_32.cpp 2009-08-01 04:07:51.555774201 +0100 +++ new/hotspot/src/cpu/x86/vm/dump_x86_32.cpp 2009-08-01 04:07:51.460660626 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)dump_x86_32.cpp 1.14 07/09/17 09:25:59 JVM" -#endif /* - * Copyright 2004-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -35,8 +32,8 @@ // This method will be called (as any other Klass virtual method) with // the Klass itself as the first argument. Example: // -// oop obj; -// int size = obj->klass()->klass_part()->oop_size(this); +// oop obj; +// int size = obj->klass()->klass_part()->oop_size(this); // // for which the virtual method call is Klass::oop_size(); // @@ -94,35 +91,34 @@ // are on the stack, except that the "this" pointer is in rcx. #else // Expecting to be called with Unix conventions -- the arguments - // are on the stack, including the "this" pointer. + // are on the stack, including the "this" pointer. #endif // In addition, rax was set (above) to the offset of the method in the // table. #ifdef WIN32 - __ pushl(rcx); // save "this" + __ push(rcx); // save "this" #endif - __ movl(rcx, rax); - __ shrl(rcx, 8); // isolate vtable identifier. - __ shll(rcx, LogBytesPerWord); + __ mov(rcx, rax); + __ shrptr(rcx, 8); // isolate vtable identifier. + __ shlptr(rcx, LogBytesPerWord); Address index(noreg, rcx, Address::times_1); ExternalAddress vtbl((address)vtbl_list); __ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address. #ifdef WIN32 - __ popl(rcx); // restore "this" + __ pop(rcx); // restore "this" #else - __ movl(rcx, Address(rsp, 4)); // fetch "this" + __ movptr(rcx, Address(rsp, BytesPerWord)); // fetch "this" #endif - __ movl(Address(rcx, 0), rdx); // update vtable pointer. + __ movptr(Address(rcx, 0), rdx); // update vtable pointer. - __ andl(rax, 0x00ff); // isolate vtable method index - __ shll(rax, LogBytesPerWord); - __ addl(rax, rdx); // address of real method pointer. - __ jmp(Address(rax, 0)); // get real method pointer. + __ andptr(rax, 0x00ff); // isolate vtable method index + __ shlptr(rax, LogBytesPerWord); + __ addptr(rax, rdx); // address of real method pointer. + __ jmp(Address(rax, 0)); // get real method pointer. __ flush(); *mc_top = (char*)__ pc(); } - --- old/hotspot/src/cpu/x86/vm/dump_x86_64.cpp 2009-08-01 04:07:52.442079924 +0100 +++ new/hotspot/src/cpu/x86/vm/dump_x86_64.cpp 2009-08-01 04:07:52.353927678 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)dump_x86_64.cpp 1.13 07/09/17 09:25:59 JVM" -#endif /* - * Copyright 2004-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -35,8 +32,8 @@ // This method will be called (as any other Klass virtual method) with // the Klass itself as the first argument. Example: // -// oop obj; -// int size = obj->klass()->klass_part()->oop_size(this); +// oop obj; +// int size = obj->klass()->klass_part()->oop_size(this); // // for which the virtual method call is Klass::oop_size(); // @@ -93,26 +90,25 @@ // are on the stack and the "this" pointer is in c_rarg0. In addition, rax // was set (above) to the offset of the method in the table. - __ pushq(c_rarg1); // save & free register - __ pushq(c_rarg0); // save "this" - __ movq(c_rarg0, rax); - __ shrq(c_rarg0, 8); // isolate vtable identifier. - __ shlq(c_rarg0, LogBytesPerWord); - __ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list. - __ addq(c_rarg1, c_rarg0); // ptr to list entry. - __ movq(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address. - __ popq(c_rarg0); // restore "this" - __ movq(Address(c_rarg0, 0), c_rarg1); // update vtable pointer. - - __ andq(rax, 0x00ff); // isolate vtable method index - __ shlq(rax, LogBytesPerWord); - __ addq(rax, c_rarg1); // address of real method pointer. - __ popq(c_rarg1); // restore register. - __ movq(rax, Address(rax, 0)); // get real method pointer. - __ jmp(rax); // jump to the real method. + __ push(c_rarg1); // save & free register + __ push(c_rarg0); // save "this" + __ mov(c_rarg0, rax); + __ shrptr(c_rarg0, 8); // isolate vtable identifier. + __ shlptr(c_rarg0, LogBytesPerWord); + __ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list. + __ addptr(c_rarg1, c_rarg0); // ptr to list entry. + __ movptr(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address. + __ pop(c_rarg0); // restore "this" + __ movptr(Address(c_rarg0, 0), c_rarg1); // update vtable pointer. + + __ andptr(rax, 0x00ff); // isolate vtable method index + __ shlptr(rax, LogBytesPerWord); + __ addptr(rax, c_rarg1); // address of real method pointer. + __ pop(c_rarg1); // restore register. + __ movptr(rax, Address(rax, 0)); // get real method pointer. + __ jmp(rax); // jump to the real method. __ flush(); *mc_top = (char*)__ pc(); } - --- old/hotspot/src/cpu/x86/vm/frame_x86.cpp 2009-08-01 04:07:53.322534112 +0100 +++ new/hotspot/src/cpu/x86/vm/frame_x86.cpp 2009-08-01 04:07:53.249085470 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)frame_x86.cpp 1.219 07/09/17 09:36:42 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -40,47 +37,190 @@ address sp = (address)_sp; address fp = (address)_fp; address unextended_sp = (address)_unextended_sp; - bool sp_safe = (sp != NULL && - (sp <= thread->stack_base()) && - (sp >= thread->stack_base() - thread->stack_size())); - bool unextended_sp_safe = (unextended_sp != NULL && - (unextended_sp <= thread->stack_base()) && - (unextended_sp >= thread->stack_base() - thread->stack_size())); - bool fp_safe = (fp != NULL && - (fp <= thread->stack_base()) && - (fp >= thread->stack_base() - thread->stack_size())); - if (sp_safe && unextended_sp_safe && fp_safe) { + // sp must be within the stack + bool sp_safe = (sp <= thread->stack_base()) && + (sp >= thread->stack_base() - thread->stack_size()); + + if (!sp_safe) { + return false; + } + + // unextended sp must be within the stack and above or equal sp + bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) && + (unextended_sp >= sp); + + if (!unextended_sp_safe) { + return false; + } + + // an fp must be within the stack and above (but not equal) sp + bool fp_safe = (fp <= thread->stack_base()) && (fp > sp); + + // We know sp/unextended_sp are safe only fp is questionable here + + // If the current frame is known to the code cache then we can attempt to + // to construct the sender and do some validation of it. This goes a long way + // toward eliminating issues when we get in frame construction code + + if (_cb != NULL ) { + + // First check if frame is complete and tester is reliable // Unfortunately we can only check frame complete for runtime stubs and nmethod // other generic buffer blobs are more problematic so we just assume they are // ok. adapter blobs never have a frame complete and are never ok. - if (_cb != NULL && !_cb->is_frame_complete_at(_pc)) { + + if (!_cb->is_frame_complete_at(_pc)) { if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { return false; } } + // Entry frame checks + if (is_entry_frame()) { + // an entry frame must have a valid fp. + + if (!fp_safe) return false; + + // Validate the JavaCallWrapper an entry frame must have + + address jcw = (address)entry_frame_call_wrapper(); + + bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp); + + return jcw_safe; + + } + + intptr_t* sender_sp = NULL; + address sender_pc = NULL; + + if (is_interpreted_frame()) { + // fp must be safe + if (!fp_safe) { + return false; + } + + sender_pc = (address) this->fp()[return_addr_offset]; + sender_sp = (intptr_t*) addr_at(sender_sp_offset); + + } else { + // must be some sort of compiled/runtime frame + // fp does not have to be safe (although it could be check for c1?) + + sender_sp = _unextended_sp + _cb->frame_size(); + // On Intel the return_address is always the word on the stack + sender_pc = (address) *(sender_sp-1); + } + + // We must always be able to find a recognizable pc + CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + if (sender_pc == NULL || sender_blob == NULL) { + return false; + } + + + // If the potential sender is the interpreter then we can do some more checking + if (Interpreter::contains(sender_pc)) { + + // ebp is always saved in a recognizable place in any code we generate. However + // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp + // is really a frame pointer. + + intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); + bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); + + if (!saved_fp_safe) { + return false; + } + + // construct the potential sender + + frame sender(sender_sp, saved_fp, sender_pc); + + return sender.is_interpreted_frame_valid(thread); + + } + + // Could just be some random pointer within the codeBlob + + if (!sender_blob->instructions_contains(sender_pc)) return false; + + // We should never be able to see an adapter if the current frame is something from code cache + + if ( sender_blob->is_adapter_blob()) { + return false; + } + + // Could be the call_stub + + if (StubRoutines::returns_to_call_stub(sender_pc)) { + intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); + bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); + + if (!saved_fp_safe) { + return false; + } + + // construct the potential sender + + frame sender(sender_sp, saved_fp, sender_pc); + + // Validate the JavaCallWrapper an entry frame must have + address jcw = (address)sender.entry_frame_call_wrapper(); + + bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp()); + + return jcw_safe; + } + + // If the frame size is 0 something is bad because every nmethod has a non-zero frame size + // because the return address counts against the callee's frame. + + if (sender_blob->frame_size() == 0) { + assert(!sender_blob->is_nmethod(), "should count return address at least"); + return false; + } + + // We should never be able to see anything here except an nmethod. If something in the + // code cache (current frame) is called by an entity within the code cache that entity + // should not be anything but the call stub (already covered), the interpreter (already covered) + // or an nmethod. + + assert(sender_blob->is_nmethod(), "Impossible call chain"); + + // Could put some more validation for the potential non-interpreted sender + // frame we'd create by calling sender if I could think of any. Wait for next crash in forte... + + // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb + + // We've validated the potential sender that would be created return true; } - // Note: fp == NULL is not really a prerequisite for this to be safe to - // walk for c2. However we've modified the code such that if we get - // a failure with fp != NULL that we then try with FP == NULL. - // This is basically to mimic what a last_frame would look like if - // c2 had generated it. - if (sp_safe && unextended_sp_safe && fp == NULL) { - // frame must be complete if fp == NULL as fp == NULL is only sensible - // if we are looking at a nmethod and frame complete assures us of that. - if (_cb != NULL && _cb->is_frame_complete_at(_pc) && _cb->is_compiled_by_c2()) { - return true; - } + + // Must be native-compiled frame. Since sender will try and use fp to find + // linkages it must be safe + + if (!fp_safe) { + return false; } - return false; + + // Will the pc we fetch be non-zero (which we'll find at the oldest frame) + + if ( (address) this->fp()[return_addr_offset] == NULL) return false; + + + // could try and do some more potential verification of native frame if we could think of some... + + return true; + } -void frame::patch_pc(Thread* thread, address pc) { +void frame::patch_pc(Thread* thread, address pc) { if (TracePcPatching) { - tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", &((address *)sp())[-1], ((address *)sp())[-1], pc); + tty->print_cr("patch_pc at address" INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "] ", + &((address *)sp())[-1], ((address *)sp())[-1], pc); } - ((address *)sp())[-1] = pc; + ((address *)sp())[-1] = pc; _cb = CodeCache::find_blob(pc); if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) { address orig = (((nmethod*)_cb)->get_original_pc(this)); @@ -173,11 +313,11 @@ // frame of that chunk as the sender JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); assert(!entry_frame_is_first(), "next Java fp must be non zero"); - assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); - map->clear(); + assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); + map->clear(); assert(map->include_argument_oops(), "should be set by clear"); if (jfa->last_Java_pc() != NULL ) { - frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); + frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); return fr; } frame fr(jfa->last_Java_sp(), jfa->last_Java_fp()); @@ -222,7 +362,7 @@ assert(map != NULL, "map must be set"); const bool c1_compiled = _cb->is_compiled_by_c1(); - // frame owned by optimizing compiler + // frame owned by optimizing compiler intptr_t* sender_sp = NULL; assert(_cb->frame_size() >= 0, "must have non-zero frame size"); @@ -233,7 +373,7 @@ // This is the saved value of ebp which may or may not really be an fp. // it is only an fp if the sender is an interpreter frame (or c1?) - + intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); if (map->update_map()) { @@ -263,7 +403,7 @@ return frame(sender_sp, saved_fp, sender_pc); } -frame frame::sender(RegisterMap* map) const { +frame frame::sender(RegisterMap* map) const { // Default is we done have to follow them. The sender_for_xxx will // update it accordingly map->set_include_argument_oops(false); @@ -273,7 +413,7 @@ assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); if (_cb != NULL) { - return sender_for_compiled_frame(map); + return sender_for_compiled_frame(map); } // Must be native-compiled frame, i.e. the marshaling code for native // methods that exists in the core system. @@ -285,7 +425,7 @@ assert(is_interpreted_frame(), "must be interpreter frame"); methodOop method = interpreter_frame_method(); // When unpacking an optimized frame the frame pointer is - // adjusted with: + // adjusted with: int diff = (method->max_locals() - method->size_of_parameters()) * Interpreter::stackElementWords(); return _fp == (fp - diff); @@ -295,7 +435,7 @@ // nothing done here now } -bool frame::is_interpreted_frame_valid() const { +bool frame::is_interpreted_frame_valid(JavaThread* thread) const { // QQQ #ifdef CC_INTERP #else @@ -315,9 +455,45 @@ if (fp() <= sp()) { // this attempts to deal with unsigned comparison above return false; } - if (fp() - sp() > 4096) { // stack frames shouldn't be large. + + // do some validation of frame elements + + // first the method + + methodOop m = *interpreter_frame_method_addr(); + + // validate the method we'd find in this potential sender + if (!Universe::heap()->is_valid_method(m)) return false; + + // stack frames shouldn't be much larger than max_stack elements + + if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) { + return false; + } + + // validate bci/bcx + + intptr_t bcx = interpreter_frame_bcx(); + if (m->validate_bci_from_bcx(bcx) < 0) { return false; } + + // validate constantPoolCacheOop + + constantPoolCacheOop cp = *interpreter_frame_cache_addr(); + + if (cp == NULL || + !Space::is_aligned(cp) || + !Universe::heap()->is_permanent((void*)cp)) return false; + + // validate locals + + address locals = (address) *interpreter_frame_locals_addr(); + + if (locals > thread->stack_base() || locals < (address) fp()) return false; + + // We'd have to be pretty unlucky to be mislead at this point + #endif // CC_INTERP return true; } @@ -353,7 +529,7 @@ } switch (type) { - case T_OBJECT : + case T_OBJECT : case T_ARRAY : { oop obj; if (method->is_native()) { @@ -374,18 +550,18 @@ case T_BYTE : value_result->b = *(jbyte*)tos_addr; break; case T_CHAR : value_result->c = *(jchar*)tos_addr; break; case T_SHORT : value_result->s = *(jshort*)tos_addr; break; - case T_INT : value_result->i = *(jint*)tos_addr; break; + case T_INT : value_result->i = *(jint*)tos_addr; break; case T_LONG : value_result->j = *(jlong*)tos_addr; break; case T_FLOAT : { #ifdef AMD64 value_result->f = *(jfloat*)tos_addr; #else if (method->is_native()) { - jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat - value_result->f = (jfloat)d; + jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat + value_result->f = (jfloat)d; } else { - value_result->f = *(jfloat*)tos_addr; - } + value_result->f = *(jfloat*)tos_addr; + } #endif // AMD64 break; } @@ -398,7 +574,7 @@ } -intptr_t* frame::interpreter_frame_tos_at(jint offset) const { +intptr_t* frame::interpreter_frame_tos_at(jint offset) const { int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); return &interpreter_frame_tos_address()[index]; } --- old/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp 2009-08-01 04:07:54.256315242 +0100 +++ new/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp 2009-08-01 04:07:54.182452053 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)frame_x86.inline.hpp 1.76 07/09/17 09:35:34 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,14 +19,14 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // Inline functions for Intel frames: // Constructors: -inline frame::frame() { +inline frame::frame() { _pc = NULL; _sp = NULL; _unextended_sp = NULL; @@ -38,7 +35,7 @@ _deopt_state = unknown; } -inline frame:: frame(intptr_t* sp, intptr_t* fp, address pc) { +inline frame:: frame(intptr_t* sp, intptr_t* fp, address pc) { _sp = sp; _unextended_sp = sp; _fp = fp; @@ -54,7 +51,7 @@ } } -inline frame:: frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { +inline frame:: frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { _sp = sp; _unextended_sp = unextended_sp; _fp = fp; @@ -75,15 +72,20 @@ _unextended_sp = sp; _fp = fp; _pc = (address)(sp[-1]); - assert(_pc != NULL, "no pc?"); + + // Here's a sticky one. This constructor can be called via AsyncGetCallTrace + // when last_Java_sp is non-null but the pc fetched is junk. If we are truly + // unlucky the junk value could be to a zombied method and we'll die on the + // find_blob call. This is also why we can have no asserts on the validity + // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler + // -> pd_last_frame should use a specialized version of pd_last_frame which could + // call a specilaized frame constructor instead of this one. + // Then we could use the assert below. However this assert is of somewhat dubious + // value. + // assert(_pc != NULL, "no pc?"); + _cb = CodeCache::find_blob(_pc); - // In case of native stubs, the pc retreived here might be - // wrong. (the _last_native_pc will have the right value) - // So do not put add any asserts on the _pc here. - - // QQQ The above comment is wrong and has been wrong for years. This constructor - // should (and MUST) not be called in that situation. In the native situation - // the pc should be supplied to the constructor. + _deopt_state = not_deoptimized; if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) { _pc = (((nmethod*)_cb)->get_original_pc(this)); @@ -109,14 +111,14 @@ // frame. inline intptr_t* frame::id(void) const { return unextended_sp(); } -// Relationals on frames based +// Relationals on frames based // Return true if the frame is younger (more recent activation) than the frame represented by id inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); - return this->id() < id ; } + return this->id() < id ; } // Return true if the frame is older (less recent activation) than the frame represented by id inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); - return this->id() > id ; } + return this->id() > id ; } @@ -140,7 +142,7 @@ return ((interpreterState)addr_at( -sizeof(BytecodeInterpreter)/wordSize )); } -inline intptr_t* frame::sender_sp() const { +inline intptr_t* frame::sender_sp() const { // Hmm this seems awfully expensive QQQ, is this really called with interpreted frames? if (is_interpreted_frame()) { assert(false, "should never happen"); @@ -150,14 +152,14 @@ } } -inline intptr_t** frame::interpreter_frame_locals_addr() const { +inline intptr_t** frame::interpreter_frame_locals_addr() const { assert(is_interpreted_frame(), "must be interpreted"); return &(get_interpreterState()->_locals); } inline intptr_t* frame::interpreter_frame_bcx_addr() const { assert(is_interpreted_frame(), "must be interpreted"); - return (jint*) &(get_interpreterState()->_bcp); + return (intptr_t*) &(get_interpreterState()->_bcp); } @@ -170,14 +172,14 @@ // Method -inline methodOop* frame::interpreter_frame_method_addr() const { +inline methodOop* frame::interpreter_frame_method_addr() const { assert(is_interpreted_frame(), "must be interpreted"); return &(get_interpreterState()->_method); } inline intptr_t* frame::interpreter_frame_mdx_addr() const { assert(is_interpreted_frame(), "must be interpreted"); - return (jint*) &(get_interpreterState()->_mdx); + return (intptr_t*) &(get_interpreterState()->_mdx); } // top of expression stack @@ -189,12 +191,12 @@ #else /* asm interpreter */ inline intptr_t* frame::sender_sp() const { return addr_at( sender_sp_offset); } -inline intptr_t** frame::interpreter_frame_locals_addr() const { - return (intptr_t**)addr_at(interpreter_frame_locals_offset); +inline intptr_t** frame::interpreter_frame_locals_addr() const { + return (intptr_t**)addr_at(interpreter_frame_locals_offset); } inline intptr_t* frame::interpreter_frame_last_sp() const { - return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset); + return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset); } inline intptr_t* frame::interpreter_frame_bcx_addr() const { @@ -216,7 +218,7 @@ // Method -inline methodOop* frame::interpreter_frame_method_addr() const { +inline methodOop* frame::interpreter_frame_method_addr() const { return (methodOop*)addr_at(interpreter_frame_method_offset); } @@ -248,7 +250,7 @@ inline intptr_t* frame::interpreter_frame_expression_stack() const { intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end(); - return monitor_end-1; + return monitor_end-1; } @@ -257,15 +259,15 @@ // Entry frames -inline JavaCallWrapper* frame::entry_frame_call_wrapper() const { - return (JavaCallWrapper*)at(entry_frame_call_wrapper_offset); +inline JavaCallWrapper* frame::entry_frame_call_wrapper() const { + return (JavaCallWrapper*)at(entry_frame_call_wrapper_offset); } // Compiled frames inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) { - return (nof_args - local_index + (local_index < nof_args ? 1: -1)); + return (nof_args - local_index + (local_index < nof_args ? 1: -1)); } inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) { @@ -282,11 +284,10 @@ -inline oop frame::saved_oop_result(RegisterMap* map) const { +inline oop frame::saved_oop_result(RegisterMap* map) const { return *((oop*) map->location(rax->as_VMReg())); } inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { *((oop*) map->location(rax->as_VMReg())) = obj; } - --- old/hotspot/src/cpu/x86/vm/icache_x86.cpp 2009-08-01 04:07:55.141416384 +0100 +++ new/hotspot/src/cpu/x86/vm/icache_x86.cpp 2009-08-01 04:07:55.072314014 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)icache_x86.cpp 1.22 07/09/17 09:33:47 JVM" -#endif /* - * Copyright 1997-2004 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -51,7 +48,7 @@ __ bind(flush_line); __ clflush(Address(addr, 0)); - __ addq(addr, ICache::line_size); + __ addptr(addr, ICache::line_size); __ decrementl(lines); __ jcc(Assembler::notZero, flush_line); @@ -63,7 +60,7 @@ const Address magic(rsp, 3*wordSize); __ lock(); __ addl(Address(rsp, 0), 0); #endif // AMD64 - __ movl(rax, magic); // Handshake with caller to make sure it happened! + __ movptr(rax, magic); // Handshake with caller to make sure it happened! __ ret(0); // Must be set here so StubCodeMark destructor can call the flush stub. --- old/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp 2009-08-01 04:07:55.981556688 +0100 +++ new/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp 2009-08-01 04:07:55.895198088 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)interp_masm_x86_32.cpp 1.172 07/09/17 09:26:18 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -32,8 +29,8 @@ // Implementation of InterpreterMacroAssembler #ifdef CC_INTERP void InterpreterMacroAssembler::get_method(Register reg) { - movl(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize))); - movl(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method))); + movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize))); + movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method))); } #endif // CC_INTERP @@ -56,7 +53,7 @@ // when jvm built with ASSERTs. #ifdef ASSERT { Label L; - cmpl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL"); bind(L); @@ -82,7 +79,7 @@ ) { #ifdef ASSERT { Label L; - cmpl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL"); bind(L); @@ -115,7 +112,7 @@ testl(pop_cond, JavaThread::popframe_pending_bit); jcc(Assembler::zero, L); testl(pop_cond, JavaThread::popframe_processing_bit); - jcc(Assembler::notZero, L); + jcc(Assembler::notZero, L); // Call Interpreter::remove_activation_preserving_args_entry() to get the // address of the same-named entrypoint in the generated interpreter code. call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); @@ -135,10 +132,11 @@ const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() + in_ByteSize(wordSize)); switch (state) { - case atos: movl(rax, oop_addr); - movl(oop_addr, NULL_WORD); + case atos: movptr(rax, oop_addr); + movptr(oop_addr, (int32_t)NULL_WORD); verify_oop(rax, state); break; - case ltos: movl(rdx, val_addr1); // fall through + case ltos: + movl(rdx, val_addr1); // fall through case btos: // fall through case ctos: // fall through case stos: // fall through @@ -149,9 +147,9 @@ default : ShouldNotReachHere(); } // Clean up tos value in the thread object - movl(tos_addr, (int) ilgl); - movl(val_addr, NULL_WORD); - movl(val_addr1, NULL_WORD); + movl(tos_addr, (int32_t) ilgl); + movptr(val_addr, (int32_t)NULL_WORD); + NOT_LP64(movl(val_addr1, (int32_t)NULL_WORD)); } @@ -159,8 +157,8 @@ if (JvmtiExport::can_force_early_return()) { Label L; Register tmp = java_thread; - movl(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset())); - testl(tmp, tmp); + movptr(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset())); + testptr(tmp, tmp); jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; // Initiate earlyret handling only if it is not already being processed. @@ -173,7 +171,7 @@ // Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code. get_thread(java_thread); - movl(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset())); + movptr(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset())); pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); jmp(rax); @@ -186,7 +184,7 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); movl(reg, Address(rsi, bcp_offset)); - bswap(reg); + bswapl(reg); shrl(reg, 16); } @@ -195,9 +193,9 @@ assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(cache != index, "must use different registers"); load_unsigned_word(index, Address(rsi, bcp_offset)); - movl(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); + movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); - shll(index, 2); // convert from field index to ConstantPoolCacheEntry index + shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index } @@ -209,10 +207,10 @@ // convert from field index to ConstantPoolCacheEntry index // and from word offset to byte offset shll(tmp, 2 + LogBytesPerWord); - movl(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); + movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); // skip past the header - addl(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); - addl(cache, tmp); // construct pointer to cache entry + addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); + addptr(cache, tmp); // construct pointer to cache entry } @@ -235,22 +233,22 @@ // if the super-klass is an interface or exceptionally deep in the Java // hierarchy and we have to scan the secondary superclass list the hard way. // See if we get an immediate positive hit - cmpl( rax, Address(Rsub_klass,rcx,Address::times_1) ); + cmpptr( rax, Address(Rsub_klass,rcx,Address::times_1) ); jcc( Assembler::equal,ok_is_subtype ); // Check for immediate negative hit cmpl( rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() ); jcc( Assembler::notEqual, not_subtype ); // Check for self - cmpl( Rsub_klass, rax ); + cmpptr( Rsub_klass, rax ); jcc( Assembler::equal, ok_is_subtype ); // Now do a linear scan of the secondary super-klass chain. - movl( rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()) ); + movptr( rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()) ); // EDI holds the objArrayOop of secondary supers. movl( rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));// Load the array length // Skip to start of data; also clear Z flag incase ECX is zero - addl( rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT) ); + addptr( rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT) ); // Scan ECX words at [EDI] for occurance of EAX // Set NZ/Z based on last compare repne_scan(); @@ -258,7 +256,7 @@ // Not equal? jcc( Assembler::notEqual, not_subtype ); // Must be equal but missed in cache. Update cache. - movl( Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax ); + movptr( Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax ); jmp( ok_is_subtype ); bind(not_subtype); @@ -279,7 +277,6 @@ fld_d(Address(rsp, 0)); } } -#endif // CC_INTERP // Java Expression Stack @@ -287,11 +284,11 @@ void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) { if (TaggedStackInterpreter) { Label okay; - cmpl(Address(rsp, wordSize), (int)t); + cmpptr(Address(rsp, wordSize), (int32_t)t); jcc(Assembler::equal, okay); // Also compare if the stack value is zero, then the tag might // not have been set coming from deopt. - cmpl(Address(rsp, 0), 0); + cmpptr(Address(rsp, 0), 0); jcc(Assembler::equal, okay); stop("Java Expression stack tag value is bad"); bind(okay); @@ -301,43 +298,43 @@ void InterpreterMacroAssembler::pop_ptr(Register r) { debug_only(verify_stack_tag(frame::TagReference)); - popl(r); - if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); + pop(r); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) { - popl(r); + pop(r); // Tag may not be reference for jsr, can be returnAddress - if (TaggedStackInterpreter) popl(tag); + if (TaggedStackInterpreter) pop(tag); } void InterpreterMacroAssembler::pop_i(Register r) { debug_only(verify_stack_tag(frame::TagValue)); - popl(r); - if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); + pop(r); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { debug_only(verify_stack_tag(frame::TagValue)); - popl(lo); - if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); + pop(lo); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); debug_only(verify_stack_tag(frame::TagValue)); - popl(hi); - if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); + pop(hi); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_f() { debug_only(verify_stack_tag(frame::TagValue)); fld_s(Address(rsp, 0)); - addl(rsp, 1 * wordSize); - if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); + addptr(rsp, 1 * wordSize); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_d() { // Write double to stack contiguously and load into ST0 pop_dtos_to_rsp(); fld_d(Address(rsp, 0)); - addl(rsp, 2 * wordSize); + addptr(rsp, 2 * wordSize); } @@ -347,31 +344,31 @@ if (TaggedStackInterpreter) { // Pop double value into scratch registers debug_only(verify_stack_tag(frame::TagValue)); - popl(rax); - addl(rsp, 1* wordSize); + pop(rax); + addptr(rsp, 1* wordSize); debug_only(verify_stack_tag(frame::TagValue)); - popl(rdx); - addl(rsp, 1* wordSize); - pushl(rdx); - pushl(rax); + pop(rdx); + addptr(rsp, 1* wordSize); + push(rdx); + push(rax); } } void InterpreterMacroAssembler::pop_ftos_to_rsp() { if (TaggedStackInterpreter) { debug_only(verify_stack_tag(frame::TagValue)); - popl(rax); - addl(rsp, 1 * wordSize); - pushl(rax); // ftos is at rsp + pop(rax); + addptr(rsp, 1 * wordSize); + push(rax); // ftos is at rsp } } void InterpreterMacroAssembler::pop(TosState state) { switch (state) { case atos: pop_ptr(rax); break; - case btos: // fall through - case ctos: // fall through - case stos: // fall through + case btos: // fall through + case ctos: // fall through + case stos: // fall through case itos: pop_i(rax); break; case ltos: pop_l(rax, rdx); break; case ftos: pop_f(); break; @@ -383,31 +380,31 @@ } void InterpreterMacroAssembler::push_ptr(Register r) { - if (TaggedStackInterpreter) pushl(frame::TagReference); - pushl(r); + if (TaggedStackInterpreter) push(frame::TagReference); + push(r); } void InterpreterMacroAssembler::push_ptr(Register r, Register tag) { - if (TaggedStackInterpreter) pushl(tag); // tag first - pushl(r); + if (TaggedStackInterpreter) push(tag); // tag first + push(r); } void InterpreterMacroAssembler::push_i(Register r) { - if (TaggedStackInterpreter) pushl(frame::TagValue); - pushl(r); + if (TaggedStackInterpreter) push(frame::TagValue); + push(r); } void InterpreterMacroAssembler::push_l(Register lo, Register hi) { - if (TaggedStackInterpreter) pushl(frame::TagValue); - pushl(hi); - if (TaggedStackInterpreter) pushl(frame::TagValue); - pushl(lo); + if (TaggedStackInterpreter) push(frame::TagValue); + push(hi); + if (TaggedStackInterpreter) push(frame::TagValue); + push(lo); } void InterpreterMacroAssembler::push_f() { - if (TaggedStackInterpreter) pushl(frame::TagValue); + if (TaggedStackInterpreter) push(frame::TagValue); // Do not schedule for no AGI! Never write beyond rsp! - subl(rsp, 1 * wordSize); + subptr(rsp, 1 * wordSize); fstp_s(Address(rsp, 0)); } @@ -418,8 +415,8 @@ // high // tag // low - pushl(frame::TagValue); - subl(rsp, 3 * wordSize); + push(frame::TagValue); + subptr(rsp, 3 * wordSize); fstp_d(Address(rsp, 0)); // move high word up to slot n-1 movl(r, Address(rsp, 1*wordSize)); @@ -428,7 +425,7 @@ movl(Address(rsp, 1*wordSize), frame::TagValue); } else { // Do not schedule for no AGI! Never write beyond rsp! - subl(rsp, 2 * wordSize); + subptr(rsp, 2 * wordSize); fstp_d(Address(rsp, 0)); } } @@ -438,9 +435,9 @@ verify_oop(rax, state); switch (state) { case atos: push_ptr(rax); break; - case btos: // fall through - case ctos: // fall through - case stos: // fall through + case btos: // fall through + case ctos: // fall through + case stos: // fall through case itos: push_i(rax); break; case ltos: push_l(rax, rdx); break; case ftos: push_f(); break; @@ -450,22 +447,21 @@ } } -#ifndef CC_INTERP // Tagged stack helpers for swap and dup void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val, Register tag) { - movl(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); + movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); if (TaggedStackInterpreter) { - movl(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); + movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); } } void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val, Register tag) { - movl(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); + movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); if (TaggedStackInterpreter) { - movl(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); + movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); } } @@ -474,10 +470,10 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { - movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int)frame::TagValue); - movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)frame::TagValue); + movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue); + movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue); } else { - movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)tag); + movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag); } } } @@ -485,13 +481,13 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { - movl(Address(rdi, idx, Interpreter::stackElementScale(), - Interpreter::local_tag_offset_in_bytes(1)), (int)frame::TagValue); - movl(Address(rdi, idx, Interpreter::stackElementScale(), - Interpreter::local_tag_offset_in_bytes(0)), (int)frame::TagValue); + movptr(Address(rdi, idx, Interpreter::stackElementScale(), + Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue); + movptr(Address(rdi, idx, Interpreter::stackElementScale(), + Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue); } else { - movl(Address(rdi, idx, Interpreter::stackElementScale(), - Interpreter::local_tag_offset_in_bytes(0)), (int)tag); + movptr(Address(rdi, idx, Interpreter::stackElementScale(), + Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag); } } } @@ -499,7 +495,7 @@ void InterpreterMacroAssembler::tag_local(Register tag, Register idx) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference - movl(Address(rdi, idx, Interpreter::stackElementScale(), + movptr(Address(rdi, idx, Interpreter::stackElementScale(), Interpreter::local_tag_offset_in_bytes(0)), tag); } } @@ -508,10 +504,10 @@ void InterpreterMacroAssembler::tag_local(Register tag, int n) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference - movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag); + movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag); } } - + #ifdef ASSERT void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) { if (TaggedStackInterpreter) { @@ -519,17 +515,17 @@ if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals - cmpl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t); + cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; - cmpl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)t); + cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. - cmpl(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0); + cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); @@ -542,24 +538,24 @@ if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals - cmpl(Address(rdi, idx, Interpreter::stackElementScale(), - Interpreter::local_tag_offset_in_bytes(1)), (int)t); + cmpptr(Address(rdi, idx, Interpreter::stackElementScale(), + Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; cmpl(Address(rdi, idx, Interpreter::stackElementScale(), - Interpreter::local_tag_offset_in_bytes(0)), (int)t); + Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. - cmpl(Address(rdi, idx, Interpreter::stackElementScale(), + cmpptr(Address(rdi, idx, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); - + } } #endif // ASSERT @@ -570,22 +566,22 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) { - pushl(arg_1); + push(arg_1); MacroAssembler::call_VM_leaf_base(entry_point, 1); } void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { - pushl(arg_2); - pushl(arg_1); + push(arg_2); + push(arg_1); MacroAssembler::call_VM_leaf_base(entry_point, 2); } void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { - pushl(arg_3); - pushl(arg_2); - pushl(arg_1); + push(arg_3); + push(arg_2); + push(arg_1); MacroAssembler::call_VM_leaf_base(entry_point, 3); } @@ -594,9 +590,9 @@ // in this thread in which case we must call the i2i entry void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { // set sender sp - leal(rsi, Address(rsp, wordSize)); + lea(rsi, Address(rsp, wordSize)); // record last_sp - movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi); + movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi); if (JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; @@ -632,16 +628,16 @@ verify_FPU(1, state); if (VerifyActivationFrameSize) { Label L; - movl(rcx, rbp); - subl(rcx, rsp); + mov(rcx, rbp); + subptr(rcx, rsp); int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; - cmpl(rcx, min_frame_size); + cmpptr(rcx, min_frame_size); jcc(Assembler::greaterEqual, L); stop("broken stack frame"); bind(L); } if (verifyoop) verify_oop(rax, state); - Address index(noreg, rbx, Address::times_4); + Address index(noreg, rbx, Address::times_ptr); ExternalAddress tbl((address)table); ArrayAddress dispatch(tbl, index); jump(dispatch); @@ -695,7 +691,7 @@ bool install_monitor_exception, bool notify_jvmdi) { // Note: Registers rax, rdx and FPU ST(0) may be in use for the result - // check if synchronized method + // check if synchronized method Label unlocked, unlock, no_unlock; get_thread(rcx); @@ -703,33 +699,33 @@ in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); movbool(rbx, do_not_unlock_if_synchronized); - movl(rdi,rbx); + mov(rdi,rbx); movbool(do_not_unlock_if_synchronized, false); // reset the flag - movl(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags + movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags movl(rcx, Address(rbx, methodOopDesc::access_flags_offset())); testl(rcx, JVM_ACC_SYNCHRONIZED); jcc(Assembler::zero, unlocked); - + // Don't unlock anything if the _do_not_unlock_if_synchronized flag // is set. - movl(rcx,rdi); + mov(rcx,rdi); testbool(rcx); jcc(Assembler::notZero, no_unlock); // unlock monitor push(state); // save result - + // BasicObjectLock will be first in list, since this is a synchronized method. However, need - // to check that the object has not been unlocked by an explicit monitorexit bytecode. + // to check that the object has not been unlocked by an explicit monitorexit bytecode. const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); - leal (rdx, monitor); // address of first monitor - - movl (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); - testl (rax, rax); - jcc (Assembler::notZero, unlock); - + lea (rdx, monitor); // address of first monitor + + movptr (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); + testptr(rax, rax); + jcc (Assembler::notZero, unlock); + pop(state); if (throw_monitor_exception) { empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow @@ -738,7 +734,7 @@ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); should_not_reach_here(); } else { - // Monitor already unlocked during a stack unroll. + // Monitor already unlocked during a stack unroll. // If requested, install an illegal_monitor_state_exception. // Continue with stack unrolling. if (install_monitor_exception) { @@ -748,12 +744,12 @@ jmp(unlocked); } - bind(unlock); - unlock_object(rdx); + bind(unlock); + unlock_object(rdx); pop(state); - // Check that for block-structured locking (i.e., that all locked objects has been unlocked) - bind(unlocked); + // Check that for block-structured locking (i.e., that all locked objects has been unlocked) + bind(unlocked); // rax, rdx: Might contain return value @@ -763,19 +759,19 @@ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize); - + bind(restart); - movl(rcx, monitor_block_top); // points to current entry, starting with top-most entry - leal(rbx, monitor_block_bot); // points to word before bottom of monitor block + movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry + lea(rbx, monitor_block_bot); // points to word before bottom of monitor block jmp(entry); - + // Entry already locked, need to throw exception - bind(exception); + bind(exception); if (throw_monitor_exception) { empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow - // Throw exception + // Throw exception call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); should_not_reach_here(); } else { @@ -783,10 +779,10 @@ // Unlock does not block, so don't have to worry about the frame push(state); - movl(rdx, rcx); + mov(rdx, rcx); unlock_object(rdx); pop(state); - + if (install_monitor_exception) { empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); @@ -794,16 +790,16 @@ jmp(restart); } - + bind(loop); - cmpl(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); // check if current entry is used + cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used jcc(Assembler::notEqual, exception); - - addl(rcx, entry_size); // otherwise advance to next entry + + addptr(rcx, entry_size); // otherwise advance to next entry bind(entry); - cmpl(rcx, rbx); // check if bottom reached - jcc(Assembler::notEqual, loop); // if not at bottom then check this entry - } + cmpptr(rcx, rbx); // check if bottom reached + jcc(Assembler::notEqual, loop); // if not at bottom then check this entry + } bind(no_unlock); @@ -815,22 +811,22 @@ } // remove activation - movl(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp + movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp leave(); // remove frame anchor - popl(ret_addr); // get return address - movl(rsp, rbx); // set sp to sender sp + pop(ret_addr); // get return address + mov(rsp, rbx); // set sp to sender sp if (UseSSE) { // float and double are returned in xmm register in SSE-mode if (state == ftos && UseSSE >= 1) { - subl(rsp, wordSize); + subptr(rsp, wordSize); fstp_s(Address(rsp, 0)); movflt(xmm0, Address(rsp, 0)); - addl(rsp, wordSize); + addptr(rsp, wordSize); } else if (state == dtos && UseSSE >= 2) { - subl(rsp, 2*wordSize); + subptr(rsp, 2*wordSize); fstp_d(Address(rsp, 0)); movdbl(xmm0, Address(rsp, 0)); - addl(rsp, 2*wordSize); + addptr(rsp, 2*wordSize); } } } @@ -856,13 +852,13 @@ const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); - const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes(); + const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes(); Label slow_case; - + // Load object pointer into obj_reg %rcx - movl(obj_reg, Address(lock_reg, obj_offset)); - + movptr(obj_reg, Address(lock_reg, obj_offset)); + if (UseBiasedLocking) { // Note: we use noreg for the temporary register since it's hard // to come up with a free register on all incoming code paths @@ -870,19 +866,19 @@ } // Load immediate 1 into swap_reg %rax, - movl(swap_reg, 1); + movptr(swap_reg, (int32_t)1); // Load (object->mark() | 1) into swap_reg %rax, - orl(swap_reg, Address(obj_reg, 0)); + orptr(swap_reg, Address(obj_reg, 0)); // Save (object->mark() | 1) into BasicLock's displaced header - movl(Address(lock_reg, mark_offset), swap_reg); + movptr(Address(lock_reg, mark_offset), swap_reg); assert(lock_offset == 0, "displached header must be first word in BasicObjectLock"); if (os::is_MP()) { lock(); - } - cmpxchg(lock_reg, Address(obj_reg, 0)); + } + cmpxchgptr(lock_reg, Address(obj_reg, 0)); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); @@ -893,16 +889,16 @@ // 1) (mark & 3) == 0, and // 2) rsp <= mark < mark + os::pagesize() // - // These 3 tests can be done by evaluating the following + // These 3 tests can be done by evaluating the following // expression: ((mark - rsp) & (3 - os::vm_page_size())), // assuming both stack pointer and pagesize have their // least significant 2 bits clear. // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg - subl(swap_reg, rsp); - andl(swap_reg, 3 - os::vm_page_size()); + subptr(swap_reg, rsp); + andptr(swap_reg, 3 - os::vm_page_size()); // Save the test result, for recursive case, the result is zero - movl(Address(lock_reg, mark_offset), swap_reg); + movptr(Address(lock_reg, mark_offset), swap_reg); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, @@ -916,7 +912,7 @@ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); bind(done); - } + } } @@ -924,7 +920,7 @@ // // Argument: rdx : Points to BasicObjectLock structure for lock // Throw an IllegalMonitorException if object is not locked by current thread -// +// // Uses: rax, rbx, rcx, rdx void InterpreterMacroAssembler::unlock_object(Register lock_reg) { assert(lock_reg == rdx, "The argument is only for looks. It must be rdx"); @@ -942,36 +938,36 @@ // Convert from BasicObjectLock structure to object and BasicLock structure // Store the BasicLock address into %rax, - leal(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); + lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); // Load oop into obj_reg(%rcx) - movl(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ())); + movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ())); // Free entry - movl(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); + movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); if (UseBiasedLocking) { biased_locking_exit(obj_reg, header_reg, done); } // Load the old header from BasicLock structure - movl(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes())); + movptr(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes())); // Test for recursion - testl(header_reg, header_reg); + testptr(header_reg, header_reg); // zero for recursive case jcc(Assembler::zero, done); - + // Atomic swap back the old header if (os::is_MP()) lock(); - cmpxchg(header_reg, Address(obj_reg, 0)); + cmpxchgptr(header_reg, Address(obj_reg, 0)); // zero for recursive case jcc(Assembler::zero, done); // Call the runtime routine for slow case. - movl(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj + movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); bind(done); @@ -986,8 +982,8 @@ // Test ImethodDataPtr. If it is null, continue at the specified label void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); - movl(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); - testl(mdp, mdp); + movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); + testptr(mdp, mdp); jcc(Assembler::zero, zero_continue); } @@ -996,13 +992,13 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { assert(ProfileInterpreter, "must be profiling interpreter"); Label zero_continue; - pushl(rax); - pushl(rbx); + push(rax); + push(rbx); get_method(rbx); // Test MDO to avoid the call if it is NULL. - movl(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); - testl(rax, rax); + movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); + testptr(rax, rax); jcc(Assembler::zero, zero_continue); // rbx,: method @@ -1010,53 +1006,55 @@ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi); // rax,: mdi - movl(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); - testl(rbx, rbx); + movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); + testptr(rbx, rbx); jcc(Assembler::zero, zero_continue); - addl(rbx, in_bytes(methodDataOopDesc::data_offset())); - addl(rbx, rax); - movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); + addptr(rbx, in_bytes(methodDataOopDesc::data_offset())); + addptr(rbx, rax); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); bind(zero_continue); - popl(rbx); - popl(rax); + pop(rbx); + pop(rax); } void InterpreterMacroAssembler::verify_method_data_pointer() { assert(ProfileInterpreter, "must be profiling interpreter"); #ifdef ASSERT Label verify_continue; - pushl(rax); - pushl(rbx); - pushl(rcx); - pushl(rdx); + push(rax); + push(rbx); + push(rcx); + push(rdx); test_method_data_pointer(rcx, verify_continue); // If mdp is zero, continue get_method(rbx); // If the mdp is valid, it will point to a DataLayout header which is // consistent with the bcp. The converse is highly probable also. load_unsigned_word(rdx, Address(rcx, in_bytes(DataLayout::bci_offset()))); - addl(rdx, Address(rbx, methodOopDesc::const_offset())); - leal(rdx, Address(rdx, constMethodOopDesc::codes_offset())); - cmpl(rdx, rsi); + addptr(rdx, Address(rbx, methodOopDesc::const_offset())); + lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); + cmpptr(rdx, rsi); jcc(Assembler::equal, verify_continue); // rbx,: method // rsi: bcp // rcx: mdp call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, rsi, rcx); bind(verify_continue); - popl(rdx); - popl(rcx); - popl(rbx); - popl(rax); + pop(rdx); + pop(rcx); + pop(rbx); + pop(rax); #endif // ASSERT } void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) { + // %%% this seems to be used to store counter data which is surely 32bits + // however 64bit side stores 64 bits which seems wrong assert(ProfileInterpreter, "must be profiling interpreter"); Address data(mdp_in, constant); - movl(data, value); + movptr(data, value); } @@ -1076,6 +1074,7 @@ assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" ); assert(ProfileInterpreter, "must be profiling interpreter"); + // %%% 64bit treats this as 64 bit which seems unlikely if (decrement) { // Decrement the register. Set condition codes. addl(data, -DataLayout::counter_increment); @@ -1122,11 +1121,11 @@ Label& not_equal_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); if (test_value_out == noreg) { - cmpl(value, Address(mdp_in, offset)); + cmpptr(value, Address(mdp_in, offset)); } else { // Put the test value into a register, so caller can use it: - movl(test_value_out, Address(mdp_in, offset)); - cmpl(test_value_out, value); + movptr(test_value_out, Address(mdp_in, offset)); + cmpptr(test_value_out, value); } jcc(Assembler::notEqual, not_equal_continue); } @@ -1135,31 +1134,31 @@ void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, offset_of_disp); - addl(mdp_in,disp_address); - movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); + addptr(mdp_in,disp_address); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); - addl(mdp_in, disp_address); - movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); + addptr(mdp_in, disp_address); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) { assert(ProfileInterpreter, "must be profiling interpreter"); - addl(mdp_in, constant); - movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); + addptr(mdp_in, constant); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { assert(ProfileInterpreter, "must be profiling interpreter"); - pushl(return_bci); // save/restore across call_VM + push(return_bci); // save/restore across call_VM call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); - popl(return_bci); + pop(return_bci); } @@ -1175,6 +1174,8 @@ // We inline increment_mdp_data_at to return bumped_count in a register //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); Address data(mdp, in_bytes(JumpData::taken_offset())); + + // %%% 64bit treats these cells as 64 bit but they seem to be 32 bit movl(bumped_count,data); assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" ); addl(bumped_count, DataLayout::counter_increment); @@ -1253,7 +1254,7 @@ record_klass_in_profile(receiver, mdp, reg2); // The method data pointer needs to be updated to reflect the new target. - update_mdp_by_constant(mdp, + update_mdp_by_constant(mdp, in_bytes(VirtualCallData:: virtual_call_data_size())); bind(profile_continue); @@ -1292,7 +1293,7 @@ if (row == start_row) { // Failed the equality check on receiver[n]... Test for null. - testl(reg2, reg2); + testptr(reg2, reg2); if (start_row == last_row) { // The only thing left to do is handle the null case. jcc(Assembler::notZero, done); @@ -1318,7 +1319,7 @@ int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); set_mdp_data_at(mdp, recvr_offset, receiver); int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); - movl(reg2, DataLayout::counter_increment); + movptr(reg2, (int32_t)DataLayout::counter_increment); set_mdp_data_at(mdp, count_offset, reg2); jmp(done); } @@ -1456,13 +1457,15 @@ // If no method data exists, go to profile_continue. test_method_data_pointer(mdp, profile_continue); - // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() - movl(reg2, in_bytes(MultiBranchData::per_case_size())); - imull(index, reg2); - addl(index, in_bytes(MultiBranchData::case_array_offset())); + // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() + movptr(reg2, (int32_t)in_bytes(MultiBranchData::per_case_size())); + // index is positive and so should have correct value if this code were + // used on 64bits + imulptr(index, reg2); + addptr(index, in_bytes(MultiBranchData::case_array_offset())); // Update the case count - increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset())); + increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset())); // The method data pointer needs to be updated. update_mdp_by_offset(mdp, index, in_bytes(MultiBranchData::relative_displacement_offset())); @@ -1487,7 +1490,7 @@ #endif /* CC_INTERP */ - + void InterpreterMacroAssembler::notify_method_entry() { // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to // track stack depth. If it is possible to enter interp_only_mode we add @@ -1509,9 +1512,18 @@ call_VM_leaf( CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), rcx, rbx); } + + // RedefineClasses() tracing support for obsolete method entry + if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { + get_thread(rcx); + get_method(rbx); + call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), + rcx, rbx); + } } - + void InterpreterMacroAssembler::notify_method_exit( TosState state, NotifyMethodExitMode mode) { // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to @@ -1519,7 +1531,7 @@ // the code to check if the event should be sent. if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { Label L; - // Note: frame::interpreter_frame_result has a dependency on how the + // Note: frame::interpreter_frame_result has a dependency on how the // method result is saved across the call to post_method_exit. If this // is changed then the interpreter_frame_result implementation will // need to be updated too. @@ -1538,12 +1550,12 @@ { SkipIfEqual skip_if(this, &DTraceMethodProbes, 0); - push(state); + NOT_CC_INTERP(push(state)); get_thread(rbx); get_method(rcx); call_VM_leaf( CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), rbx, rcx); - pop(state); + NOT_CC_INTERP(pop(state)); } } --- old/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp 2009-08-01 04:07:57.004393752 +0100 +++ new/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp 2009-08-01 04:07:56.926628240 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)interp_masm_x86_32.hpp 1.88 07/08/29 13:42:13 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // This file specializes the assember with interpreter-specific macros @@ -68,15 +65,15 @@ #else - void save_bcp() { movl(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), rsi); } - void restore_bcp() { movl(rsi, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); } - void restore_locals() { movl(rdi, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); } + void save_bcp() { movptr(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), rsi); } + void restore_bcp() { movptr(rsi, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); } + void restore_locals() { movptr(rdi, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); } // Helpers for runtime call arguments/results - void get_method(Register reg) { movl(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); } - void get_constant_pool(Register reg) { get_method(reg); movl(reg, Address(reg, methodOopDesc::constants_offset())); } - void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movl(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); } - void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movl(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); + void get_method(Register reg) { movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); } + void get_constant_pool(Register reg) { get_method(reg); movptr(reg, Address(reg, methodOopDesc::constants_offset())); } + void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); } + void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); } void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset); @@ -85,8 +82,6 @@ // Expression stack void f2ieee(); // truncate ftos to 32bits void d2ieee(); // truncate dtos to 64bits -#endif // CC_INTERP - void pop_ptr(Register r = rax); void pop_ptr(Register r, Register tag); @@ -107,14 +102,25 @@ void pop(TosState state); // transition vtos -> state void push(TosState state); // transition state -> vtos + void pop(Register r ) { ((MacroAssembler*)this)->pop(r); } + + void push(Register r ) { ((MacroAssembler*)this)->push(r); } + void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); } + + // These are dummies to prevent surprise implicit conversions to Register + void pop(void* v ); // Add unimplemented ambiguous method + void push(void* v ); // Add unimplemented ambiguous method + DEBUG_ONLY(void verify_stack_tag(frame::Tag t);) +#endif // CC_INTERP + #ifndef CC_INTERP - void empty_expression_stack() { - movl(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); + void empty_expression_stack() { + movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); // NULL last_sp until next java call - movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); } // Tagged stack helpers for swap and dup @@ -161,16 +167,16 @@ // // Removes the current activation (incl. unlocking of monitors) // and sets up the return address. This code is also used for - // exception unwindwing. In that case, we do not want to throw - // IllegalMonitorStateExceptions, since that might get us into an + // exception unwindwing. In that case, we do not want to throw + // IllegalMonitorStateExceptions, since that might get us into an // infinite rethrow exception loop. // Additionally this code is used for popFrame and earlyReturn. // In popFrame case we want to skip throwing an exception, // installing an exception, and notifying jvmdi. // In earlyReturn case we only want to skip throwing an exception // and installing an exception. - void remove_activation(TosState state, Register ret_addr, - bool throw_monitor_exception = true, + void remove_activation(TosState state, Register ret_addr, + bool throw_monitor_exception = true, bool install_monitor_exception = true, bool notify_jvmdi = true); #endif /* !CC_INTERP */ @@ -179,7 +185,7 @@ void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos #ifndef CC_INTERP void verify_FPU(int stack_depth, TosState state = ftos); // only if +VerifyFPU && (state == ftos || state == dtos) - + #endif /* !CC_INTERP */ // Object locking @@ -234,6 +240,5 @@ // support for jvmti void notify_method_entry(); void notify_method_exit(TosState state, NotifyMethodExitMode mode); - -}; +}; --- old/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp 2009-08-01 04:07:57.939400258 +0100 +++ new/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp 2009-08-01 04:07:57.852903376 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)interp_masm_x86_64.cpp 1.48 07/09/17 09:26:04 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -31,6 +28,15 @@ // Implementation of InterpreterMacroAssembler +#ifdef CC_INTERP +void InterpreterMacroAssembler::get_method(Register reg) { + movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize))); + movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method))); +} +#endif // CC_INTERP + +#ifndef CC_INTERP + void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments) { // interpreter specific @@ -38,11 +44,16 @@ // Note: No need to save/restore bcp & locals (r13 & r14) pointer // since these are callee saved registers and no blocking/ // GC can happen in leaf calls. + // Further Note: DO NOT save/restore bcp/locals. If a caller has + // already saved them so that it can use esi/edi as temporaries + // then a save/restore here will DESTROY the copy the caller + // saved! There used to be a save_bcp() that only happened in + // the ASSERT path (no restore_bcp). Which caused bizarre failures + // when jvm built with ASSERTs. #ifdef ASSERT - save_bcp(); - { + { Label L; - cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD); + cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_leaf_base:" " last_sp != NULL"); @@ -52,24 +63,9 @@ // super call MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); // interpreter specific -#ifdef ASSERT - { - Label L; - cmpq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); - jcc(Assembler::equal, L); - stop("InterpreterMacroAssembler::call_VM_leaf_base:" - " r13 not callee saved?"); - bind(L); - } - { - Label L; - cmpq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); - jcc(Assembler::equal, L); - stop("InterpreterMacroAssembler::call_VM_leaf_base:" - " r14 not callee saved?"); - bind(L); - } -#endif + // Used to ASSERT that r13/r14 were equal to frame's bcp/locals + // but since they may not have been saved (and we don't want to + // save thme here (see note above) the assert is invalid. } void InterpreterMacroAssembler::call_VM_base(Register oop_result, @@ -87,9 +83,9 @@ // assert(java_thread == noreg , "not expecting a precomputed java thread"); save_bcp(); #ifdef ASSERT - { + { Label L; - cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD); + cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_leaf_base:" " last_sp != NULL"); @@ -119,7 +115,7 @@ testl(c_rarg0, JavaThread::popframe_pending_bit); jcc(Assembler::zero, L); testl(c_rarg0, JavaThread::popframe_processing_bit); - jcc(Assembler::notZero, L); + jcc(Assembler::notZero, L); // Call Interpreter::remove_activation_preserving_args_entry() to get the // address of the same-named entrypoint in the generated interpreter code. call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); @@ -130,15 +126,15 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) { - movq(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); + movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); switch (state) { - case atos: movq(rax, oop_addr); - movptr(oop_addr, NULL_WORD); + case atos: movptr(rax, oop_addr); + movptr(oop_addr, (int32_t)NULL_WORD); verify_oop(rax, state); break; - case ltos: movq(rax, val_addr); break; + case ltos: movptr(rax, val_addr); break; case btos: // fall through case ctos: // fall through case stos: // fall through @@ -150,15 +146,15 @@ } // Clean up tos value in the thread object movl(tos_addr, (int) ilgl); - movl(val_addr, (int) NULL_WORD); + movl(val_addr, (int32_t) NULL_WORD); } void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { if (JvmtiExport::can_force_early_return()) { Label L; - movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); - testq(c_rarg0, c_rarg0); + movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); + testptr(c_rarg0, c_rarg0); jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; // Initiate earlyret handling only if it is not already being processed. @@ -170,7 +166,7 @@ // Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code. - movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); + movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset())); call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0); jmp(rax); @@ -190,12 +186,12 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, - Register index, + Register index, int bcp_offset) { assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(cache != index, "must use different registers"); load_unsigned_word(index, Address(r13, bcp_offset)); - movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); + movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index shll(index, 2); @@ -212,10 +208,10 @@ // convert from field index to ConstantPoolCacheEntry index // and from word offset to byte offset shll(tmp, 2 + LogBytesPerWord); - movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); + movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); // skip past the header - addq(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); - addq(cache, tmp); // construct pointer to cache entry + addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); + addptr(cache, tmp); // construct pointer to cache entry } @@ -236,13 +232,13 @@ assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); - Label not_subtype, loop; + Label not_subtype, not_subtype_pop, loop; // Profile the not-null value's klass. profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, rdi // Load the super-klass's check offset into rcx - movl(rcx, Address(rax, sizeof(oopDesc) + + movl(rcx, Address(rax, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes())); // Load from the sub-klass's super-class display list, or a 1-word // cache of the secondary superclass list, or a failing value with a @@ -250,39 +246,56 @@ // exceptionally deep in the Java hierarchy and we have to scan the // secondary superclass list the hard way. See if we get an // immediate positive hit - cmpq(rax, Address(Rsub_klass, rcx, Address::times_1)); + cmpptr(rax, Address(Rsub_klass, rcx, Address::times_1)); jcc(Assembler::equal,ok_is_subtype); // Check for immediate negative hit cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()); jcc( Assembler::notEqual, not_subtype ); // Check for self - cmpq(Rsub_klass, rax); + cmpptr(Rsub_klass, rax); jcc(Assembler::equal, ok_is_subtype); // Now do a linear scan of the secondary super-klass chain. - movq(rdi, Address(Rsub_klass, sizeof(oopDesc) + - Klass::secondary_supers_offset_in_bytes())); + movptr(rdi, Address(Rsub_klass, sizeof(oopDesc) + + Klass::secondary_supers_offset_in_bytes())); // rdi holds the objArrayOop of secondary supers. // Load the array length - movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); + movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); // Skip to start of data; also clear Z flag incase rcx is zero - addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); // Scan rcx words at [rdi] for occurance of rax // Set NZ/Z based on last compare - repne_scan(); - // Not equal? - jcc(Assembler::notEqual, not_subtype); + + // this part is kind tricky, as values in supers array could be 32 or 64 bit wide + // and we store values in objArrays always encoded, thus we need to encode value + // before repne + if (UseCompressedOops) { + push(rax); + encode_heap_oop(rax); + repne_scanl(); + // Not equal? + jcc(Assembler::notEqual, not_subtype_pop); + // restore heap oop here for movq + pop(rax); + } else { + repne_scan(); + jcc(Assembler::notEqual, not_subtype); + } // Must be equal but missed in cache. Update cache. - movq(Address(Rsub_klass, sizeof(oopDesc) + + movptr(Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); jmp(ok_is_subtype); + bind(not_subtype_pop); + // restore heap oop here for miss + if (UseCompressedOops) pop(rax); bind(not_subtype); profile_typecheck_failed(rcx); // blows rcx } + // Java Expression Stack #ifdef ASSERT @@ -294,17 +307,17 @@ if (t == frame::TagCategory2) { tag = frame::TagValue; Label hokay; - cmpq(Address(rsp, 3*wordSize), (int)tag); + cmpptr(Address(rsp, 3*wordSize), (int32_t)tag); jcc(Assembler::equal, hokay); stop("Java Expression stack tag high value is bad"); bind(hokay); } Label okay; - cmpq(Address(rsp, wordSize), (int)tag); + cmpptr(Address(rsp, wordSize), (int32_t)tag); jcc(Assembler::equal, okay); // Also compare if the stack value is zero, then the tag might // not have been set coming from deopt. - cmpq(Address(rsp, 0), 0); + cmpptr(Address(rsp, 0), 0); jcc(Assembler::equal, okay); stop("Java Expression stack tag value is bad"); bind(okay); @@ -314,83 +327,83 @@ void InterpreterMacroAssembler::pop_ptr(Register r) { debug_only(verify_stack_tag(frame::TagReference)); - popq(r); - if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); + pop(r); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) { - popq(r); - if (TaggedStackInterpreter) popq(tag); + pop(r); + if (TaggedStackInterpreter) pop(tag); } void InterpreterMacroAssembler::pop_i(Register r) { - // XXX can't use popq currently, upper half non clean + // XXX can't use pop currently, upper half non clean debug_only(verify_stack_tag(frame::TagValue)); movl(r, Address(rsp, 0)); - addq(rsp, wordSize); - if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); + addptr(rsp, wordSize); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_l(Register r) { debug_only(verify_stack_tag(frame::TagCategory2)); movq(r, Address(rsp, 0)); - addq(rsp, 2 * Interpreter::stackElementSize()); + addptr(rsp, 2 * Interpreter::stackElementSize()); } void InterpreterMacroAssembler::pop_f(XMMRegister r) { debug_only(verify_stack_tag(frame::TagValue)); movflt(r, Address(rsp, 0)); - addq(rsp, wordSize); - if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); + addptr(rsp, wordSize); + if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_d(XMMRegister r) { debug_only(verify_stack_tag(frame::TagCategory2)); movdbl(r, Address(rsp, 0)); - addq(rsp, 2 * Interpreter::stackElementSize()); + addptr(rsp, 2 * Interpreter::stackElementSize()); } void InterpreterMacroAssembler::push_ptr(Register r) { - if (TaggedStackInterpreter) pushq(frame::TagReference); - pushq(r); + if (TaggedStackInterpreter) push(frame::TagReference); + push(r); } void InterpreterMacroAssembler::push_ptr(Register r, Register tag) { - if (TaggedStackInterpreter) pushq(tag); - pushq(r); + if (TaggedStackInterpreter) push(tag); + push(r); } void InterpreterMacroAssembler::push_i(Register r) { - if (TaggedStackInterpreter) pushq(frame::TagValue); - pushq(r); + if (TaggedStackInterpreter) push(frame::TagValue); + push(r); } void InterpreterMacroAssembler::push_l(Register r) { if (TaggedStackInterpreter) { - pushq(frame::TagValue); - subq(rsp, 1 * wordSize); - pushq(frame::TagValue); - subq(rsp, 1 * wordSize); + push(frame::TagValue); + subptr(rsp, 1 * wordSize); + push(frame::TagValue); + subptr(rsp, 1 * wordSize); } else { - subq(rsp, 2 * wordSize); + subptr(rsp, 2 * wordSize); } movq(Address(rsp, 0), r); } void InterpreterMacroAssembler::push_f(XMMRegister r) { - if (TaggedStackInterpreter) pushq(frame::TagValue); - subq(rsp, wordSize); + if (TaggedStackInterpreter) push(frame::TagValue); + subptr(rsp, wordSize); movflt(Address(rsp, 0), r); } void InterpreterMacroAssembler::push_d(XMMRegister r) { if (TaggedStackInterpreter) { - pushq(frame::TagValue); - subq(rsp, 1 * wordSize); - pushq(frame::TagValue); - subq(rsp, 1 * wordSize); + push(frame::TagValue); + subptr(rsp, 1 * wordSize); + push(frame::TagValue); + subptr(rsp, 1 * wordSize); } else { - subq(rsp, 2 * wordSize); + subptr(rsp, 2 * wordSize); } movdbl(Address(rsp, 0), r); } @@ -398,9 +411,9 @@ void InterpreterMacroAssembler::pop(TosState state) { switch (state) { case atos: pop_ptr(); break; - case btos: - case ctos: - case stos: + case btos: + case ctos: + case stos: case itos: pop_i(); break; case ltos: pop_l(); break; case ftos: pop_f(); break; @@ -415,9 +428,9 @@ verify_oop(rax, state); switch (state) { case atos: push_ptr(); break; - case btos: - case ctos: - case stos: + case btos: + case ctos: + case stos: case itos: push_i(); break; case ltos: push_l(); break; case ftos: push_f(); break; @@ -428,20 +441,22 @@ } + + // Tagged stack helpers for swap and dup void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val, Register tag) { - movq(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); + movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); if (TaggedStackInterpreter) { - movq(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); + movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); } } void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val, Register tag) { - movq(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); + movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); if (TaggedStackInterpreter) { - movq(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); + movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); } } @@ -450,12 +465,12 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { - mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), - (intptr_t)frame::TagValue); - mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), - (intptr_t)frame::TagValue); + movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), + (int32_t)frame::TagValue); + movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), + (int32_t)frame::TagValue); } else { - mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (intptr_t)tag); + movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag); } } } @@ -463,13 +478,13 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { - mov64(Address(r14, idx, Address::times_8, - Interpreter::local_tag_offset_in_bytes(1)), (intptr_t)frame::TagValue); - mov64(Address(r14, idx, Address::times_8, - Interpreter::local_tag_offset_in_bytes(0)), (intptr_t)frame::TagValue); + movptr(Address(r14, idx, Address::times_8, + Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue); + movptr(Address(r14, idx, Address::times_8, + Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue); } else { - mov64(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), - (intptr_t)tag); + movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), + (int32_t)tag); } } } @@ -477,7 +492,7 @@ void InterpreterMacroAssembler::tag_local(Register tag, Register idx) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference - movq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag); + movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag); } } @@ -485,10 +500,10 @@ void InterpreterMacroAssembler::tag_local(Register tag, int n) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference - movq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag); + movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag); } } - + #ifdef ASSERT void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) { if (TaggedStackInterpreter) { @@ -496,17 +511,17 @@ if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals - cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t); + cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; - cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int)t); + cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. - cmpq(Address(r14, Interpreter::local_offset_in_bytes(n)), 0); + cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); @@ -519,17 +534,17 @@ if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals - cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int)t); + cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; - cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int)t); + cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. - cmpq(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0); + cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); @@ -546,7 +561,7 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) { if (c_rarg0 != arg_1) { - movq(c_rarg0, arg_1); + mov(c_rarg0, arg_1); } MacroAssembler::call_VM_leaf_base(entry_point, 1); } @@ -558,10 +573,10 @@ assert(c_rarg0 != arg_2, "smashed argument"); assert(c_rarg1 != arg_1, "smashed argument"); if (c_rarg0 != arg_1) { - movq(c_rarg0, arg_1); + mov(c_rarg0, arg_1); } if (c_rarg1 != arg_2) { - movq(c_rarg1, arg_2); + mov(c_rarg1, arg_2); } MacroAssembler::call_VM_leaf_base(entry_point, 2); } @@ -577,13 +592,13 @@ assert(c_rarg2 != arg_1, "smashed argument"); assert(c_rarg2 != arg_2, "smashed argument"); if (c_rarg0 != arg_1) { - movq(c_rarg0, arg_1); + mov(c_rarg0, arg_1); } if (c_rarg1 != arg_2) { - movq(c_rarg1, arg_2); + mov(c_rarg1, arg_2); } if (c_rarg2 != arg_3) { - movq(c_rarg2, arg_3); + mov(c_rarg2, arg_3); } MacroAssembler::call_VM_leaf_base(entry_point, 3); } @@ -592,9 +607,9 @@ // in this thread in which case we must call the i2i entry void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { // set sender sp - leaq(r13, Address(rsp, wordSize)); + lea(r13, Address(rsp, wordSize)); // record last_sp - movq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13); + movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13); if (JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; @@ -625,18 +640,18 @@ dispatch_next(state, step); } -void InterpreterMacroAssembler::dispatch_base(TosState state, +void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, bool verifyoop) { verify_FPU(1, state); if (VerifyActivationFrameSize) { Label L; - movq(rcx, rbp); - subq(rcx, rsp); - int min_frame_size = + mov(rcx, rbp); + subptr(rcx, rsp); + int32_t min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; - cmpq(rcx, min_frame_size); + cmpptr(rcx, (int32_t)min_frame_size); jcc(Assembler::greaterEqual, L); stop("broken stack frame"); bind(L); @@ -665,7 +680,7 @@ // load next bytecode (load before advancing r13 to prevent AGI) load_unsigned_byte(rbx, Address(r13, step)); // advance r13 - incrementq(r13, step); + increment(r13, step); dispatch_base(state, Interpreter::dispatch_table(state)); } @@ -705,36 +720,36 @@ movbool(do_not_unlock_if_synchronized, false); // reset the flag // get method access flags - movq(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); + movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); movl(rcx, Address(rbx, methodOopDesc::access_flags_offset())); testl(rcx, JVM_ACC_SYNCHRONIZED); jcc(Assembler::zero, unlocked); - + // Don't unlock anything if the _do_not_unlock_if_synchronized flag // is set. testbool(rdx); jcc(Assembler::notZero, no_unlock); - + // unlock monitor push(state); // save result - + // BasicObjectLock will be first in list, since this is a // synchronized method. However, need to check that the object has // not been unlocked by an explicit monitorexit bytecode. - const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * + const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int) sizeof(BasicObjectLock)); // We use c_rarg1 so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly - leaq(c_rarg1, monitor); // address of first monitor - - movq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); - testq(rax, rax); + lea(c_rarg1, monitor); // address of first monitor + + movptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); + testptr(rax, rax); jcc(Assembler::notZero, unlock); - + pop(state); if (throw_monitor_exception) { // Entry already unlocked, need to throw exception - call_VM(noreg, CAST_FROM_FN_PTR(address, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); should_not_reach_here(); } else { @@ -742,19 +757,19 @@ // install an illegal_monitor_state_exception. Continue with // stack unrolling. if (install_monitor_exception) { - call_VM(noreg, CAST_FROM_FN_PTR(address, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); } jmp(unlocked); } - bind(unlock); - unlock_object(c_rarg1); + bind(unlock); + unlock_object(c_rarg1); pop(state); // Check that for block-structured locking (i.e., that all locked // objects has been unlocked) - bind(unlocked); + bind(unlocked); // rax: Might contain return value @@ -766,22 +781,22 @@ rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); const Address monitor_block_bot( rbp, frame::interpreter_frame_initial_sp_offset * wordSize); - + bind(restart); // We use c_rarg1 so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly - movq(c_rarg1, monitor_block_top); // points to current entry, starting + movptr(c_rarg1, monitor_block_top); // points to current entry, starting // with top-most entry - leaq(rbx, monitor_block_bot); // points to word before bottom of + lea(rbx, monitor_block_bot); // points to word before bottom of // monitor block jmp(entry); - + // Entry already locked, need to throw exception - bind(exception); + bind(exception); if (throw_monitor_exception) { - // Throw exception - MacroAssembler::call_VM(noreg, + // Throw exception + MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime:: throw_illegal_monitor_state_exception)); should_not_reach_here(); @@ -793,26 +808,26 @@ push(state); unlock_object(c_rarg1); pop(state); - + if (install_monitor_exception) { - call_VM(noreg, CAST_FROM_FN_PTR(address, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime:: new_illegal_monitor_state_exception)); } jmp(restart); } - + bind(loop); // check if current entry is used - cmpq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); + cmpptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL); jcc(Assembler::notEqual, exception); - - addq(c_rarg1, entry_size); // otherwise advance to next entry + + addptr(c_rarg1, entry_size); // otherwise advance to next entry bind(entry); - cmpq(c_rarg1, rbx); // check if bottom reached + cmpptr(c_rarg1, rbx); // check if bottom reached jcc(Assembler::notEqual, loop); // if not at bottom then check this entry - } + } bind(no_unlock); @@ -825,18 +840,20 @@ // remove activation // get sender sp - movq(rbx, - Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); + movptr(rbx, + Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); leave(); // remove frame anchor - popq(ret_addr); // get return address - movq(rsp, rbx); // set sp to sender sp + pop(ret_addr); // get return address + mov(rsp, rbx); // set sp to sender sp } +#endif // C_INTERP + // Lock object // // Args: // c_rarg1: BasicObjectLock to be used for locking -// +// // Kills: // rax // c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs) @@ -845,7 +862,7 @@ assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1"); if (UseHeavyMonitors) { - call_VM(noreg, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); } else { @@ -856,13 +873,13 @@ const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); - const int mark_offset = lock_offset + - BasicLock::displaced_header_offset_in_bytes(); + const int mark_offset = lock_offset + + BasicLock::displaced_header_offset_in_bytes(); Label slow_case; - + // Load object pointer into obj_reg %c_rarg3 - movq(obj_reg, Address(lock_reg, obj_offset)); + movptr(obj_reg, Address(lock_reg, obj_offset)); if (UseBiasedLocking) { biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case); @@ -872,16 +889,16 @@ movl(swap_reg, 1); // Load (object->mark() | 1) into swap_reg %rax - orq(swap_reg, Address(obj_reg, 0)); + orptr(swap_reg, Address(obj_reg, 0)); // Save (object->mark() | 1) into BasicLock's displaced header - movq(Address(lock_reg, mark_offset), swap_reg); + movptr(Address(lock_reg, mark_offset), swap_reg); - assert(lock_offset == 0, + assert(lock_offset == 0, "displached header must be first word in BasicObjectLock"); if (os::is_MP()) lock(); - cmpxchgq(lock_reg, Address(obj_reg, 0)); + cmpxchgptr(lock_reg, Address(obj_reg, 0)); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); @@ -892,16 +909,16 @@ // 1) (mark & 7) == 0, and // 2) rsp <= mark < mark + os::pagesize() // - // These 3 tests can be done by evaluating the following + // These 3 tests can be done by evaluating the following // expression: ((mark - rsp) & (7 - os::vm_page_size())), // assuming both stack pointer and pagesize have their // least significant 3 bits clear. // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg - subq(swap_reg, rsp); - andq(swap_reg, 7 - os::vm_page_size()); + subptr(swap_reg, rsp); + andptr(swap_reg, 7 - os::vm_page_size()); // Save the test result, for recursive case, the result is zero - movq(Address(lock_reg, mark_offset), swap_reg); + movptr(Address(lock_reg, mark_offset), swap_reg); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, @@ -912,12 +929,12 @@ bind(slow_case); // Call the runtime routine for slow case - call_VM(noreg, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); bind(done); - } + } } @@ -927,7 +944,7 @@ // // Args: // c_rarg1: BasicObjectLock for lock -// +// // Kills: // rax // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) @@ -936,7 +953,7 @@ assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1"); if (UseHeavyMonitors) { - call_VM(noreg, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); } else { @@ -950,39 +967,39 @@ // Convert from BasicObjectLock structure to object and BasicLock // structure Store the BasicLock address into %rax - leaq(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); + lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); // Load oop into obj_reg(%c_rarg3) - movq(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); + movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // Free entry - movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); + movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); if (UseBiasedLocking) { biased_locking_exit(obj_reg, header_reg, done); } // Load the old header from BasicLock structure - movq(header_reg, Address(swap_reg, - BasicLock::displaced_header_offset_in_bytes())); + movptr(header_reg, Address(swap_reg, + BasicLock::displaced_header_offset_in_bytes())); // Test for recursion - testq(header_reg, header_reg); + testptr(header_reg, header_reg); // zero for recursive case jcc(Assembler::zero, done); - + // Atomic swap back the old header if (os::is_MP()) lock(); - cmpxchgq(header_reg, Address(obj_reg, 0)); + cmpxchgptr(header_reg, Address(obj_reg, 0)); // zero for recursive case jcc(Assembler::zero, done); // Call the runtime routine for slow case. - movq(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), + movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj - call_VM(noreg, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); @@ -992,12 +1009,13 @@ } } +#ifndef CC_INTERP -void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, +void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); - movq(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); - testq(mdp, mdp); + movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); + testptr(mdp, mdp); jcc(Assembler::zero, zero_continue); } @@ -1006,13 +1024,13 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { assert(ProfileInterpreter, "must be profiling interpreter"); Label zero_continue; - pushq(rax); - pushq(rbx); + push(rax); + push(rbx); get_method(rbx); // Test MDO to avoid the call if it is NULL. - movq(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); - testq(rax, rax); + movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); + testptr(rax, rax); jcc(Assembler::zero, zero_continue); // rbx: method @@ -1020,36 +1038,36 @@ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13); // rax: mdi - movq(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); - testq(rbx, rbx); + movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); + testptr(rbx, rbx); jcc(Assembler::zero, zero_continue); - addq(rbx, in_bytes(methodDataOopDesc::data_offset())); - addq(rbx, rax); - movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); + addptr(rbx, in_bytes(methodDataOopDesc::data_offset())); + addptr(rbx, rax); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); bind(zero_continue); - popq(rbx); - popq(rax); + pop(rbx); + pop(rax); } void InterpreterMacroAssembler::verify_method_data_pointer() { assert(ProfileInterpreter, "must be profiling interpreter"); #ifdef ASSERT Label verify_continue; - pushq(rax); - pushq(rbx); - pushq(c_rarg3); - pushq(c_rarg2); + push(rax); + push(rbx); + push(c_rarg3); + push(c_rarg2); test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue get_method(rbx); // If the mdp is valid, it will point to a DataLayout header which is // consistent with the bcp. The converse is highly probable also. - load_unsigned_word(c_rarg2, + load_unsigned_word(c_rarg2, Address(c_rarg3, in_bytes(DataLayout::bci_offset()))); - addq(c_rarg2, Address(rbx, methodOopDesc::const_offset())); - leaq(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset())); - cmpq(c_rarg2, r13); + addptr(c_rarg2, Address(rbx, methodOopDesc::const_offset())); + lea(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset())); + cmpptr(c_rarg2, r13); jcc(Assembler::equal, verify_continue); // rbx: method // r13: bcp @@ -1057,20 +1075,20 @@ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, r13, c_rarg3); bind(verify_continue); - popq(c_rarg2); - popq(c_rarg3); - popq(rbx); - popq(rax); + pop(c_rarg2); + pop(c_rarg3); + pop(rbx); + pop(rax); #endif // ASSERT } -void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, - int constant, +void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, + int constant, Register value) { assert(ProfileInterpreter, "must be profiling interpreter"); Address data(mdp_in, constant); - movq(data, value); + movptr(data, value); } @@ -1086,22 +1104,24 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Address data, bool decrement) { assert(ProfileInterpreter, "must be profiling interpreter"); + // %%% this does 64bit counters at best it is wasting space + // at worst it is a rare bug when counters overflow if (decrement) { // Decrement the register. Set condition codes. - addq(data, -DataLayout::counter_increment); + addptr(data, (int32_t) -DataLayout::counter_increment); // If the decrement causes the counter to overflow, stay negative Label L; jcc(Assembler::negative, L); - addq(data, DataLayout::counter_increment); + addptr(data, (int32_t) DataLayout::counter_increment); bind(L); } else { assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1"); // Increment the register. Set carry flag. - addq(data, DataLayout::counter_increment); + addptr(data, DataLayout::counter_increment); // If the increment causes the counter to overflow, pull back by 1. - sbbq(data, 0); + sbbptr(data, (int32_t)0); } } @@ -1115,7 +1135,7 @@ increment_mdp_data_at(data, decrement); } -void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, +void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, int flag_byte_constant) { assert(ProfileInterpreter, "must be profiling interpreter"); int header_offset = in_bytes(DataLayout::header_offset()); @@ -1133,11 +1153,11 @@ Label& not_equal_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); if (test_value_out == noreg) { - cmpq(value, Address(mdp_in, offset)); + cmpptr(value, Address(mdp_in, offset)); } else { // Put the test value into a register, so caller can use it: - movq(test_value_out, Address(mdp_in, offset)); - cmpq(test_value_out, value); + movptr(test_value_out, Address(mdp_in, offset)); + cmpptr(test_value_out, value); } jcc(Assembler::notEqual, not_equal_continue); } @@ -1147,40 +1167,40 @@ int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, offset_of_disp); - addq(mdp_in, disp_address); - movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); + addptr(mdp_in, disp_address); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } -void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, +void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); - addq(mdp_in, disp_address); - movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); + addptr(mdp_in, disp_address); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) { assert(ProfileInterpreter, "must be profiling interpreter"); - addq(mdp_in, constant); - movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); + addptr(mdp_in, constant); + movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { assert(ProfileInterpreter, "must be profiling interpreter"); - pushq(return_bci); // save/restore across call_VM - call_VM(noreg, + push(return_bci); // save/restore across call_VM + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); - popq(return_bci); + pop(return_bci); } -void InterpreterMacroAssembler::profile_taken_branch(Register mdp, +void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) { if (ProfileInterpreter) { Label profile_continue; @@ -1193,12 +1213,12 @@ // We inline increment_mdp_data_at to return bumped_count in a register //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); Address data(mdp, in_bytes(JumpData::taken_offset())); - movq(bumped_count, data); + movptr(bumped_count, data); assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1"); - addq(bumped_count, DataLayout::counter_increment); - sbbq(bumped_count, 0); - movq(data, bumped_count); // Store back out + addptr(bumped_count, DataLayout::counter_increment); + sbbptr(bumped_count, 0); + movptr(data, bumped_count); // Store back out // The method data pointer needs to be updated to reflect the new target. update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); @@ -1253,7 +1273,7 @@ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); // The method data pointer needs to be updated to reflect the new target. - update_mdp_by_constant(mdp, + update_mdp_by_constant(mdp, in_bytes(VirtualCallData:: virtual_call_data_size())); bind(profile_continue); @@ -1277,7 +1297,7 @@ record_klass_in_profile(receiver, mdp, reg2); // The method data pointer needs to be updated to reflect the new target. - update_mdp_by_constant(mdp, + update_mdp_by_constant(mdp, in_bytes(VirtualCallData:: virtual_call_data_size())); bind(profile_continue); @@ -1326,7 +1346,7 @@ if (test_for_null_also) { // Failed the equality check on receiver[n]... Test for null. - testq(reg2, reg2); + testptr(reg2, reg2); if (start_row == last_row) { // The only thing left to do is handle the null case. jcc(Assembler::notZero, done); @@ -1391,7 +1411,7 @@ bind (done); } -void InterpreterMacroAssembler::profile_ret(Register return_bci, +void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) { if (ProfileInterpreter) { Label profile_continue; @@ -1407,7 +1427,7 @@ Label next_test; // See if return_bci is equal to bci[n]: - test_mdp_data_at(mdp, + test_mdp_data_at(mdp, in_bytes(RetData::bci_offset(row)), return_bci, noreg, next_test); @@ -1416,7 +1436,7 @@ increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); // The method data pointer needs to be updated to reflect the new target. - update_mdp_by_offset(mdp, + update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row))); jmp(profile_continue); bind(next_test); @@ -1497,11 +1517,11 @@ test_method_data_pointer(mdp, profile_continue); // Update the default case count - increment_mdp_data_at(mdp, + increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset())); // The method data pointer needs to be updated. - update_mdp_by_offset(mdp, + update_mdp_by_offset(mdp, in_bytes(MultiBranchData:: default_displacement_offset())); @@ -1522,17 +1542,17 @@ // Build the base (index * per_case_size_in_bytes()) + // case_array_offset_in_bytes() movl(reg2, in_bytes(MultiBranchData::per_case_size())); - imulq(index, reg2); // XXX l ? - addq(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? + imulptr(index, reg2); // XXX l ? + addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? // Update the case count - increment_mdp_data_at(mdp, - index, - in_bytes(MultiBranchData::relative_count_offset())); + increment_mdp_data_at(mdp, + index, + in_bytes(MultiBranchData::relative_count_offset())); // The method data pointer needs to be updated. - update_mdp_by_offset(mdp, - index, + update_mdp_by_offset(mdp, + index, in_bytes(MultiBranchData:: relative_displacement_offset())); @@ -1541,6 +1561,7 @@ } + void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { if (state == atos) { MacroAssembler::verify_oop(reg); @@ -1549,8 +1570,9 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { } +#endif // !CC_INTERP + - void InterpreterMacroAssembler::notify_method_entry() { // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to // track stack depth. If it is possible to enter interp_only_mode we add @@ -1560,7 +1582,7 @@ movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset())); testl(rdx, rdx); jcc(Assembler::zero, L); - call_VM(noreg, CAST_FROM_FN_PTR(address, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); bind(L); } @@ -1571,9 +1593,17 @@ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), r15_thread, c_rarg1); } + + // RedefineClasses() tracing support for obsolete method entry + if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { + get_method(c_rarg1); + call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), + r15_thread, c_rarg1); + } } - + void InterpreterMacroAssembler::notify_method_exit( TosState state, NotifyMethodExitMode mode) { // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to @@ -1581,26 +1611,29 @@ // the code to check if the event should be sent. if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { Label L; - // Note: frame::interpreter_frame_result has a dependency on how the + // Note: frame::interpreter_frame_result has a dependency on how the // method result is saved across the call to post_method_exit. If this // is changed then the interpreter_frame_result implementation will // need to be updated too. - push(state); + + // For c++ interpreter the result is always stored at a known location in the frame + // template interpreter will leave it on the top of the stack. + NOT_CC_INTERP(push(state);) movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset())); testl(rdx, rdx); jcc(Assembler::zero, L); - call_VM(noreg, + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); bind(L); - pop(state); + NOT_CC_INTERP(pop(state)); } { SkipIfEqual skip(this, &DTraceMethodProbes, false); - push(state); + NOT_CC_INTERP(push(state)); get_method(c_rarg1); call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), r15_thread, c_rarg1); - pop(state); + NOT_CC_INTERP(pop(state)); } } --- old/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp 2009-08-01 04:07:59.060249294 +0100 +++ new/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp 2009-08-01 04:07:58.976288902 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)interp_masm_x86_64.hpp 1.23 07/07/02 16:50:28 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,14 +19,14 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // This file specializes the assember with interpreter-specific macros -class InterpreterMacroAssembler - : public MacroAssembler { +class InterpreterMacroAssembler: public MacroAssembler { +#ifndef CC_INTERP protected: // Interpreter specific version of call_VM_base virtual void call_VM_leaf_base(address entry_point, @@ -47,60 +44,62 @@ // base routine for all dispatches void dispatch_base(TosState state, address* table, bool verifyoop = true); +#endif // CC_INTERP public: - InterpreterMacroAssembler(CodeBuffer* code) - : MacroAssembler(code) - {} + InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {} void load_earlyret_value(TosState state); +#ifdef CC_INTERP + void save_bcp() { /* not needed in c++ interpreter and harmless */ } + void restore_bcp() { /* not needed in c++ interpreter and harmless */ } + + // Helpers for runtime call arguments/results + void get_method(Register reg); + +#else + // Interpreter-specific registers - void save_bcp() - { - movq(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), r13); + void save_bcp() { + movptr(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), r13); } - void restore_bcp() - { - movq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); + void restore_bcp() { + movptr(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); } - void restore_locals() - { - movq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); + void restore_locals() { + movptr(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); } // Helpers for runtime call arguments/results - void get_method(Register reg) - { - movq(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); + void get_method(Register reg) { + movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); } - void get_constant_pool(Register reg) - { + void get_constant_pool(Register reg) { get_method(reg); - movq(reg, Address(reg, methodOopDesc::constants_offset())); + movptr(reg, Address(reg, methodOopDesc::constants_offset())); } - void get_constant_pool_cache(Register reg) - { + void get_constant_pool_cache(Register reg) { get_constant_pool(reg); - movq(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); + movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); } - void get_cpool_and_tags(Register cpool, Register tags) - { + void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); - movq(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); + movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); } void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); - void get_cache_and_index_at_bcp(Register cache, Register index, + void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset); - void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, + void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset); + void pop_ptr(Register r = rax); void pop_i(Register r = rax); void pop_l(Register r = rax); @@ -112,15 +111,23 @@ void push_f(XMMRegister r = xmm0); void push_d(XMMRegister r = xmm0); + void pop(Register r ) { ((MacroAssembler*)this)->pop(r); } + + void push(Register r ) { ((MacroAssembler*)this)->push(r); } + void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); } + void pop(TosState state); // transition vtos -> state void push(TosState state); // transition state -> vtos // Tagged stack support, pop and push both tag and value. void pop_ptr(Register r, Register tag); void push_ptr(Register r, Register tag); +#endif // CC_INTERP DEBUG_ONLY(void verify_stack_tag(frame::Tag t);) +#ifndef CC_INTERP + // Tagged stack helpers for swap and dup void load_ptr_and_tag(int n, Register val, Register tag); void store_ptr_and_tag(int n, Register val, Register tag); @@ -136,19 +143,19 @@ void verify_local_tag(frame::Tag tag, Register idx); #endif // ASSERT + void empty_expression_stack() - { - movq(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * - wordSize)); + { + movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); // NULL last_sp until next java call - movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); } // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls void super_call_VM_leaf(address entry_point); void super_call_VM_leaf(address entry_point, Register arg_1); void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); - void super_call_VM_leaf(address entry_point, + void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); // Generate a subtype check: branch to ok_is_subtype if sub_klass is @@ -176,23 +183,26 @@ // // Removes the current activation (incl. unlocking of monitors) // and sets up the return address. This code is also used for - // exception unwindwing. In that case, we do not want to throw - // IllegalMonitorStateExceptions, since that might get us into an + // exception unwindwing. In that case, we do not want to throw + // IllegalMonitorStateExceptions, since that might get us into an // infinite rethrow exception loop. // Additionally this code is used for popFrame and earlyReturn. // In popFrame case we want to skip throwing an exception, // installing an exception, and notifying jvmdi. // In earlyReturn case we only want to skip throwing an exception // and installing an exception. - void remove_activation(TosState state, Register ret_addr, - bool throw_monitor_exception = true, + void remove_activation(TosState state, Register ret_addr, + bool throw_monitor_exception = true, bool install_monitor_exception = true, bool notify_jvmdi = true); +#endif // CC_INTERP // Object locking void lock_object (Register lock_reg); void unlock_object(Register lock_reg); +#ifndef CC_INTERP + // Interpreter profiling operations void set_method_data_pointer_for_bcp(); void test_method_data_pointer(Register mdp, Label& zero_continue); @@ -240,10 +250,11 @@ // only if +VerifyFPU && (state == ftos || state == dtos) void verify_FPU(int stack_depth, TosState state = ftos); +#endif // !CC_INTERP + typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode; // support for jvmti/dtrace void notify_method_entry(); void notify_method_exit(TosState state, NotifyMethodExitMode mode); }; - --- old/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp 2009-08-01 04:08:00.541192532 +0100 +++ new/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp 2009-08-01 04:08:00.451823941 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)interpreterRT_x86_32.cpp 1.58 07/09/17 09:26:03 JVM" -#endif /* - * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -37,8 +34,8 @@ move(offset(), jni_offset() + 1); } -void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { - move(offset(), jni_offset() + 2); +void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { + move(offset(), jni_offset() + 2); move(offset() + 1, jni_offset() + 1); } @@ -53,13 +50,13 @@ void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) { - __ leal(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset))); - __ cmpl(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), 0); // do not use temp() to avoid AGI + __ lea(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset))); + __ cmpptr(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), (int32_t)NULL_WORD); // do not use temp() to avoid AGI Label L; __ jcc(Assembler::notZero, L); - __ movl(temp(), 0); + __ movptr(temp(), ((int32_t)NULL_WORD)); __ bind(L); - __ movl(Address(to(), to_offset * wordSize), temp()); + __ movptr(Address(to(), to_offset * wordSize), temp()); } @@ -96,21 +93,21 @@ } #endif // ASSERT - virtual void pass_int() { - *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); + virtual void pass_int() { + *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); debug_only(verify_tag(frame::TagValue)); _from -= Interpreter::stackElementSize(); } - virtual void pass_long() { + virtual void pass_long() { _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0)); debug_only(verify_tag(frame::TagValue)); - _to += 2; + _to += 2; _from -= 2*Interpreter::stackElementSize(); } - virtual void pass_object() { + virtual void pass_object() { // pass address of from intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0)); *_to++ = (*(intptr_t*)from_addr == 0) ? NULL : from_addr; @@ -119,7 +116,7 @@ } public: - SlowSignatureHandler(methodHandle method, address from, intptr_t* to) : + SlowSignatureHandler(methodHandle method, address from, intptr_t* to) : NativeSignatureIterator(method) { _from = from; _to = to + (is_static() ? 2 : 1); --- old/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp 2009-08-01 04:08:01.374605711 +0100 +++ new/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp 2009-08-01 04:08:01.291893370 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)interpreterRT_x86_64.cpp 1.27 07/09/17 09:26:03 JVM" -#endif /* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -96,49 +93,49 @@ #ifdef _WIN64 switch (_num_args) { case 0: - __ movq(c_rarg1, src); + __ movptr(c_rarg1, src); _num_args++; break; case 1: - __ movq(c_rarg2, src); + __ movptr(c_rarg2, src); _num_args++; break; case 2: - __ movq(c_rarg3, src); + __ movptr(c_rarg3, src); _num_args++; break; case 3: default: - __ movq(rax, src); - __ movq(Address(to(), _stack_offset), rax); + __ movptr(rax, src); + __ movptr(Address(to(), _stack_offset), rax); _stack_offset += wordSize; break; } #else switch (_num_int_args) { case 0: - __ movq(c_rarg1, src); + __ movptr(c_rarg1, src); _num_int_args++; break; case 1: - __ movq(c_rarg2, src); + __ movptr(c_rarg2, src); _num_int_args++; break; case 2: - __ movq(c_rarg3, src); + __ movptr(c_rarg3, src); _num_int_args++; break; case 3: - __ movq(c_rarg4, src); + __ movptr(c_rarg4, src); _num_int_args++; break; case 4: - __ movq(c_rarg5, src); + __ movptr(c_rarg5, src); _num_int_args++; break; default: - __ movq(rax, src); - __ movq(Address(to(), _stack_offset), rax); + __ movptr(rax, src); + __ movptr(Address(to(), _stack_offset), rax); _stack_offset += wordSize; break; } @@ -174,16 +171,16 @@ if (_num_args < Argument::n_float_register_parameters_c-1) { __ movdbl(as_XMMRegister(++_num_args), src); } else { - __ movq(rax, src); - __ movq(Address(to(), _stack_offset), rax); + __ movptr(rax, src); + __ movptr(Address(to(), _stack_offset), rax); _stack_offset += wordSize; } #else if (_num_fp_args < Argument::n_float_register_parameters_c) { __ movdbl(as_XMMRegister(_num_fp_args++), src); } else { - __ movq(rax, src); - __ movq(Address(to(), _stack_offset), rax); + __ movptr(rax, src); + __ movptr(Address(to(), _stack_offset), rax); _stack_offset += wordSize; } #endif @@ -196,29 +193,29 @@ switch (_num_args) { case 0: assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); - __ leaq(c_rarg1, src); + __ lea(c_rarg1, src); _num_args++; break; case 1: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(c_rarg2, c_rarg2); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, c_rarg2, rax); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, c_rarg2, rax); _num_args++; break; case 2: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(c_rarg3, c_rarg3); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, c_rarg3, rax); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, c_rarg3, rax); _num_args++; break; default: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(temp(), temp()); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, temp(), rax); - __ movq(Address(to(), _stack_offset), temp()); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, temp(), rax); + __ movptr(Address(to(), _stack_offset), temp()); _stack_offset += wordSize; break; } @@ -226,43 +223,43 @@ switch (_num_int_args) { case 0: assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); - __ leaq(c_rarg1, src); + __ lea(c_rarg1, src); _num_int_args++; break; case 1: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(c_rarg2, c_rarg2); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, c_rarg2, rax); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, c_rarg2, rax); _num_int_args++; break; case 2: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(c_rarg3, c_rarg3); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, c_rarg3, rax); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, c_rarg3, rax); _num_int_args++; break; case 3: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(c_rarg4, c_rarg4); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, c_rarg4, rax); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, c_rarg4, rax); _num_int_args++; break; case 4: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(c_rarg5, c_rarg5); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, c_rarg5, rax); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, c_rarg5, rax); _num_int_args++; break; default: - __ leaq(rax, src); + __ lea(rax, src); __ xorl(temp(), temp()); - __ cmpq(src, 0); - __ cmovq(Assembler::notEqual, temp(), rax); - __ movq(Address(to(), _stack_offset), temp()); + __ cmpptr(src, 0); + __ cmov(Assembler::notEqual, temp(), rax); + __ movptr(Address(to(), _stack_offset), temp()); _stack_offset += wordSize; break; } @@ -287,7 +284,7 @@ #ifdef _WIN64 -class SlowSignatureHandler +class SlowSignatureHandler : public NativeSignatureIterator { private: address _from; @@ -305,7 +302,7 @@ virtual void pass_int() { - jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); + jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); debug_only(verify_tag(frame::TagValue)); _from -= Interpreter::stackElementSize(); @@ -318,7 +315,7 @@ } virtual void pass_long() - { + { intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); debug_only(verify_tag(frame::TagValue)); _from -= 2*Interpreter::stackElementSize(); @@ -346,7 +343,7 @@ virtual void pass_float() { - jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); + jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); debug_only(verify_tag(frame::TagValue)); _from -= Interpreter::stackElementSize(); @@ -375,7 +372,7 @@ } public: - SlowSignatureHandler(methodHandle method, address from, intptr_t* to) + SlowSignatureHandler(methodHandle method, address from, intptr_t* to) : NativeSignatureIterator(method) { _from = from; @@ -389,7 +386,7 @@ } }; #else -class SlowSignatureHandler +class SlowSignatureHandler : public NativeSignatureIterator { private: address _from; @@ -409,7 +406,7 @@ virtual void pass_int() { - jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); + jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); debug_only(verify_tag(frame::TagValue)); _from -= Interpreter::stackElementSize(); @@ -422,7 +419,7 @@ } virtual void pass_long() - { + { intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); debug_only(verify_tag(frame::TagValue)); _from -= 2*Interpreter::stackElementSize(); @@ -478,7 +475,7 @@ } public: - SlowSignatureHandler(methodHandle method, address from, intptr_t* to) + SlowSignatureHandler(methodHandle method, address from, intptr_t* to) : NativeSignatureIterator(method) { _from = from; @@ -495,7 +492,7 @@ #endif -IRT_ENTRY(address, +IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, methodOopDesc* method, intptr_t* from, --- old/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp 2009-08-01 04:08:02.265743112 +0100 +++ new/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp 2009-08-01 04:08:02.187537758 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)interpreter_x86_32.cpp 1.378 07/09/17 09:26:02 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -41,7 +38,7 @@ // rcx: temporary // rdi: pointer to locals // rsp: end of copied parameters area - __ movl(rcx, rsp); + __ mov(rcx, rsp); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx); __ ret(0); return entry; @@ -78,8 +75,8 @@ // Code: _return // _return // return w/o popping parameters - __ popl(rax); - __ movl(rsp, rsi); + __ pop(rax); + __ mov(rsp, rsi); __ jmp(rax); __ bind(slow_path); @@ -127,7 +124,7 @@ // } // Universe::is_jdk12x_version() always returns false since // the JDK version is not yet determined when this method is called. - // This method is called during interpreter_init() whereas + // This method is called during interpreter_init() whereas // JDK version is only determined when universe2_init() is called. // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are @@ -138,7 +135,7 @@ __ pushl(Address(rsp, 3*wordSize)); // push hi (and note rsp -= wordSize) __ pushl(Address(rsp, 2*wordSize)); // push lo __ fld_d(Address(rsp, 0)); // get double in ST0 - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); } else { __ fld_d(Address(rsp, 1*wordSize)); } @@ -152,7 +149,7 @@ case Interpreter::java_lang_math_tan : __ trigfunc('t'); break; - case Interpreter::java_lang_math_sqrt: + case Interpreter::java_lang_math_sqrt: __ fsqrt(); break; case Interpreter::java_lang_math_abs: @@ -170,24 +167,24 @@ __ push_fTOS(); __ pop_fTOS(); break; - default : + default : ShouldNotReachHere(); } // return double result in xmm0 for interpreter and compilers. if (UseSSE >= 2) { - __ subl(rsp, 2*wordSize); + __ subptr(rsp, 2*wordSize); __ fstp_d(Address(rsp, 0)); __ movdbl(xmm0, Address(rsp, 0)); - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); } // done, result in FPU ST(0) or XMM0 - __ popl(rdi); // get return address - __ movl(rsp, rsi); // set sp to sender sp + __ pop(rdi); // get return address + __ mov(rsp, rsi); // set sp to sender sp __ jmp(rdi); - return entry_point; + return entry_point; } @@ -205,10 +202,10 @@ // abstract method entry // remove return address. Not really needed, since exception handling throws away expression stack - __ popl(rbx); + __ pop(rbx); // adjust stack to what a normal return would do - __ movl(rsp, rsi); + __ mov(rsp, rsi); // throw exception __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); // the call_VM checks for exception, so we should never return here. @@ -225,7 +222,7 @@ int callee_param_count, int callee_locals, bool is_top_frame) { - return layout_activation(method, + return layout_activation(method, tempcount, popframe_extra_args, moncount, @@ -251,5 +248,3 @@ assert(f->is_interpreted_frame(), "must be interpreted"); } - - --- old/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp 2009-08-01 04:08:03.177265172 +0100 +++ new/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp 2009-08-01 04:08:03.090503034 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)interpreter_x86_64.cpp 1.68 07/09/17 09:26:03 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -38,9 +35,9 @@ // rbx: method // r14: pointer to locals // c_rarg3: first stack arg - wordSize - __ movq(c_rarg3, rsp); + __ mov(c_rarg3, rsp); // adjust rsp - __ subq(rsp, 4 * wordSize); + __ subptr(rsp, 4 * wordSize); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), @@ -73,13 +70,13 @@ case 0: __ movl(rscratch1, Address(rbx, methodOopDesc::access_flags_offset())); __ testl(rscratch1, JVM_ACC_STATIC); - __ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0)); + __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0)); break; case 1: - __ movq(c_rarg2, Address(rsp, wordSize)); + __ movptr(c_rarg2, Address(rsp, wordSize)); break; case 2: - __ movq(c_rarg3, Address(rsp, 2 * wordSize)); + __ movptr(c_rarg3, Address(rsp, 2 * wordSize)); break; default: break; @@ -104,7 +101,7 @@ // restore rsp - __ addq(rsp, 4 * wordSize); + __ addptr(rsp, 4 * wordSize); __ ret(0); @@ -117,9 +114,9 @@ // rbx: method // r14: pointer to locals // c_rarg3: first stack arg - wordSize - __ movq(c_rarg3, rsp); + __ mov(c_rarg3, rsp); // adjust rsp - __ subq(rsp, 14 * wordSize); + __ subptr(rsp, 14 * wordSize); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), @@ -145,7 +142,7 @@ const XMMRegister r = as_XMMRegister(i); Label d, done; - + __ testl(c_rarg3, 1 << i); __ jcc(Assembler::notZero, d); __ movflt(r, Address(rsp, (6 + i) * wordSize)); @@ -158,15 +155,15 @@ // Now handle integrals. Only do c_rarg1 if not static. __ movl(c_rarg3, Address(rbx, methodOopDesc::access_flags_offset())); __ testl(c_rarg3, JVM_ACC_STATIC); - __ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0)); + __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0)); - __ movq(c_rarg2, Address(rsp, wordSize)); - __ movq(c_rarg3, Address(rsp, 2 * wordSize)); - __ movq(c_rarg4, Address(rsp, 3 * wordSize)); - __ movq(c_rarg5, Address(rsp, 4 * wordSize)); + __ movptr(c_rarg2, Address(rsp, wordSize)); + __ movptr(c_rarg3, Address(rsp, 2 * wordSize)); + __ movptr(c_rarg4, Address(rsp, 3 * wordSize)); + __ movptr(c_rarg5, Address(rsp, 4 * wordSize)); // restore rsp - __ addq(rsp, 14 * wordSize); + __ addptr(rsp, 14 * wordSize); __ ret(0); @@ -179,14 +176,13 @@ // Various method entries // -address InterpreterGenerator::generate_math_entry( - AbstractInterpreter::MethodKind kind) { - // rbx: methodOop +address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { - if (!InlineIntrinsics) return NULL; // Generate a vanilla entry + // rbx,: methodOop + // rcx: scratrch + // r13: sender sp - assert(kind == Interpreter::java_lang_math_sqrt, - "Other intrinsics are not special"); + if (!InlineIntrinsics) return NULL; // Generate a vanilla entry address entry_point = __ pc(); @@ -200,6 +196,11 @@ // in order to avoid monotonicity bugs when switching // from interpreter to compiler in the middle of some // computation) + // + // stack: [ ret adr ] <-- rsp + // [ lo(arg) ] + // [ hi(arg) ] + // // Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are // native methods. Interpreter::method_kind(...) does a check for @@ -221,13 +222,49 @@ // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are // java methods. Interpreter::method_kind(...) will select // this entry point for the corresponding methods in JDK 1.3. - __ sqrtsd(xmm0, Address(rsp, wordSize)); + // get argument - __ popq(rax); - __ movq(rsp, r13); + if (kind == Interpreter::java_lang_math_sqrt) { + __ sqrtsd(xmm0, Address(rsp, wordSize)); + } else { + __ fld_d(Address(rsp, wordSize)); + switch (kind) { + case Interpreter::java_lang_math_sin : + __ trigfunc('s'); + break; + case Interpreter::java_lang_math_cos : + __ trigfunc('c'); + break; + case Interpreter::java_lang_math_tan : + __ trigfunc('t'); + break; + case Interpreter::java_lang_math_abs: + __ fabs(); + break; + case Interpreter::java_lang_math_log: + __ flog(); + break; + case Interpreter::java_lang_math_log10: + __ flog10(); + break; + default : + ShouldNotReachHere(); + } + + // return double result in xmm0 for interpreter and compilers. + __ subptr(rsp, 2*wordSize); + // Round to 64bit precision + __ fstp_d(Address(rsp, 0)); + __ movdbl(xmm0, Address(rsp, 0)); + __ addptr(rsp, 2*wordSize); + } + + + __ pop(rax); + __ mov(rsp, r13); __ jmp(rax); - return entry_point; + return entry_point; } @@ -242,10 +279,10 @@ // abstract method entry // remove return address. Not really needed, since exception // handling throws away expression stack - __ popq(rbx); + __ pop(rbx); // adjust stack to what a normal return would do - __ movq(rsp, r13); + __ mov(rsp, r13); // throw exception __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -268,7 +305,7 @@ } address entry_point = __ pc(); - + // If we need a safepoint check, generate full interpreter entry. Label slow_path; __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), @@ -279,8 +316,8 @@ // Code: _return // _return // return w/o popping parameters - __ popq(rax); - __ movq(rsp, r13); + __ pop(rax); + __ mov(rsp, r13); __ jmp(rax); __ bind(slow_path); @@ -289,148 +326,6 @@ } -// Call an accessor method (assuming it is resolved, otherwise drop -// into vanilla (slow path) entry -address InterpreterGenerator::generate_accessor_entry(void) { - // rbx: methodOop - - // r13: senderSP must preserver for slow path, set SP to it on fast path - - address entry_point = __ pc(); - Label xreturn_path; - - // do fastpath for resolved accessor methods - if (UseFastAccessorMethods) { - // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites - // thereof; parameter size = 1 - // Note: We can only use this code if the getfield has been resolved - // and if we don't have a null-pointer exception => check for - // these conditions first and use slow path if necessary. - Label slow_path; - // If we need a safepoint check, generate full interpreter entry. - __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), - SafepointSynchronize::_not_synchronized); - - __ jcc(Assembler::notEqual, slow_path); - // rbx: method - __ movq(rax, Address(rsp, wordSize)); - - // check if local 0 != NULL and read field - __ testq(rax, rax); - __ jcc(Assembler::zero, slow_path); - - __ movq(rdi, Address(rbx, methodOopDesc::constants_offset())); - // read first instruction word and extract bytecode @ 1 and index @ 2 - __ movq(rdx, Address(rbx, methodOopDesc::const_offset())); - __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); - // Shift codes right to get the index on the right. - // The bytecode fetched looks like <0xb4><0x2a> - __ shrl(rdx, 2 * BitsPerByte); - __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); - __ movq(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); - - // rax: local 0 - // rbx: method - // rdx: constant pool cache index - // rdi: constant pool cache - - // check if getfield has been resolved and read constant pool cache entry - // check the validity of the cache entry by testing whether _indices field - // contains Bytecode::_getfield in b1 byte. - assert(in_words(ConstantPoolCacheEntry::size()) == 4, - "adjust shift below"); - __ movl(rcx, - Address(rdi, - rdx, - Address::times_8, - constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::indices_offset())); - __ shrl(rcx, 2 * BitsPerByte); - __ andl(rcx, 0xFF); - __ cmpl(rcx, Bytecodes::_getfield); - __ jcc(Assembler::notEqual, slow_path); - - // Note: constant pool entry is not valid before bytecode is resolved - __ movq(rcx, - Address(rdi, - rdx, - Address::times_8, - constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::f2_offset())); - // edx: flags - __ movl(rdx, - Address(rdi, - rdx, - Address::times_8, - constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::flags_offset())); - - Label notObj, notInt, notByte, notShort; - const Address field_address(rax, rcx, Address::times_1); - - // Need to differentiate between igetfield, agetfield, bgetfield etc. - // because they are different sizes. - // Use the type from the constant pool cache - __ shrl(rdx, ConstantPoolCacheEntry::tosBits); - // Make sure we don't need to mask edx for tosBits after the above shift - ConstantPoolCacheEntry::verify_tosBits(); - - __ cmpl(rdx, atos); - __ jcc(Assembler::notEqual, notObj); - // atos - __ movq(rax, field_address); - __ jmp(xreturn_path); - - __ bind(notObj); - __ cmpl(rdx, itos); - __ jcc(Assembler::notEqual, notInt); - // itos - __ movl(rax, field_address); - __ jmp(xreturn_path); - - __ bind(notInt); - __ cmpl(rdx, btos); - __ jcc(Assembler::notEqual, notByte); - // btos - __ load_signed_byte(rax, field_address); - __ jmp(xreturn_path); - - __ bind(notByte); - __ cmpl(rdx, stos); - __ jcc(Assembler::notEqual, notShort); - // stos - __ load_signed_word(rax, field_address); - __ jmp(xreturn_path); - - __ bind(notShort); -#ifdef ASSERT - Label okay; - __ cmpl(rdx, ctos); - __ jcc(Assembler::equal, okay); - __ stop("what type is this?"); - __ bind(okay); -#endif - // ctos - __ load_unsigned_word(rax, field_address); - - __ bind(xreturn_path); - - // _ireturn/_areturn - __ popq(rdi); - __ movq(rsp, r13); - __ jmp(rdi); - __ ret(0); - - // generate a vanilla interpreter entry as the slow path - __ bind(slow_path); - (void) generate_normal_entry(false); - } else { - (void) generate_normal_entry(false); - } - - return entry_point; -} - // This method tells the deoptimizer how big an interpreted frame must be: int AbstractInterpreter::size_activation(methodOop method, int tempcount, @@ -460,5 +355,3 @@ assert(f->is_interpreted_frame(), "must be interpreted"); } - - --- old/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp 2009-08-01 04:08:04.089057119 +0100 +++ new/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp 2009-08-01 04:08:04.007139157 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)jniFastGetField_x86_32.cpp 1.13 07/09/17 09:26:02 JVM" -#endif /* - * Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -75,25 +72,25 @@ __ testb (rcx, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { - __ movl (rax, rcx); - __ andl (rax, 1); // rax, must end up 0 - __ movl (rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); + __ mov(rax, rcx); + __ andptr(rax, 1); // rax, must end up 0 + __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); // obj, notice rax, is 0. // rdx is data dependent on rcx. } else { - __ movl (rdx, Address(rsp, 2*wordSize)); // obj + __ movptr (rdx, Address(rsp, 2*wordSize)); // obj } - __ movl (rax, Address(rsp, 3*wordSize)); // jfieldID - __ movl (rdx, Address(rdx, 0)); // *obj - __ shrl (rax, 2); // offset + __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID + __ movptr(rdx, Address(rdx, 0)); // *obj + __ shrptr (rax, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { - case T_BOOLEAN: __ movzxb (rax, Address(rdx, rax, Address::times_1)); break; - case T_BYTE: __ movsxb (rax, Address(rdx, rax, Address::times_1)); break; - case T_CHAR: __ movzxw (rax, Address(rdx, rax, Address::times_1)); break; - case T_SHORT: __ movsxw (rax, Address(rdx, rax, Address::times_1)); break; + case T_BOOLEAN: __ movzbl (rax, Address(rdx, rax, Address::times_1)); break; + case T_BYTE: __ movsbl (rax, Address(rdx, rax, Address::times_1)); break; + case T_CHAR: __ movzwl (rax, Address(rdx, rax, Address::times_1)); break; + case T_SHORT: __ movswl (rax, Address(rdx, rax, Address::times_1)); break; case T_INT: __ movl (rax, Address(rdx, rax, Address::times_1)); break; default: ShouldNotReachHere(); } @@ -101,8 +98,8 @@ Address ca1; if (os::is_MP()) { __ lea(rdx, counter); - __ xorl(rdx, rax); - __ xorl(rdx, rax); + __ xorptr(rdx, rax); + __ xorptr(rdx, rax); __ cmp32(rcx, Address(rdx, 0)); // ca1 is the same as ca because // rax, ^ counter_addr ^ rax, = address @@ -187,35 +184,37 @@ ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); - __ pushl (rsi); + __ push (rsi); __ mov32 (rcx, counter); __ testb (rcx, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { - __ movl (rax, rcx); - __ andl (rax, 1); // rax, must end up 0 - __ movl (rdx, Address(rsp, rax, Address::times_1, 3*wordSize)); + __ mov(rax, rcx); + __ andptr(rax, 1); // rax, must end up 0 + __ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize)); // obj, notice rax, is 0. // rdx is data dependent on rcx. } else { - __ movl (rdx, Address(rsp, 3*wordSize)); // obj + __ movptr(rdx, Address(rsp, 3*wordSize)); // obj } - __ movl (rsi, Address(rsp, 4*wordSize)); // jfieldID - __ movl (rdx, Address(rdx, 0)); // *obj - __ shrl (rsi, 2); // offset + __ movptr(rsi, Address(rsp, 4*wordSize)); // jfieldID + __ movptr(rdx, Address(rdx, 0)); // *obj + __ shrptr(rsi, 2); // offset assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small"); speculative_load_pclist[count++] = __ pc(); - __ movl (rax, Address(rdx, rsi, Address::times_1)); + __ movptr(rax, Address(rdx, rsi, Address::times_1)); +#ifndef _LP64 speculative_load_pclist[count] = __ pc(); - __ movl (rdx, Address(rdx, rsi, Address::times_1, 4)); + __ movl(rdx, Address(rdx, rsi, Address::times_1, 4)); +#endif // _LP64 if (os::is_MP()) { - __ lea (rsi, counter); - __ xorl (rsi, rdx); - __ xorl (rsi, rax); - __ xorl (rsi, rdx); - __ xorl (rsi, rax); + __ lea(rsi, counter); + __ xorptr(rsi, rdx); + __ xorptr(rsi, rax); + __ xorptr(rsi, rdx); + __ xorptr(rsi, rax); __ cmp32(rcx, Address(rsi, 0)); // ca1 is the same as ca because // rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address @@ -225,7 +224,7 @@ } __ jcc (Assembler::notEqual, slow); - __ popl (rsi); + __ pop (rsi); #ifndef _WINDOWS __ ret (0); @@ -237,7 +236,7 @@ slowcase_entry_pclist[count-1] = __ pc(); slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); - __ popl (rsi); + __ pop (rsi); address slow_case_addr = jni_GetLongField_addr();; // tail call __ jump (ExternalAddress(slow_case_addr)); @@ -279,23 +278,28 @@ __ testb (rcx, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { - __ movl (rax, rcx); - __ andl (rax, 1); // rax, must end up 0 - __ movl (rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); + __ mov(rax, rcx); + __ andptr(rax, 1); // rax, must end up 0 + __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); // obj, notice rax, is 0. // rdx is data dependent on rcx. } else { - __ movl (rdx, Address(rsp, 2*wordSize)); // obj + __ movptr(rdx, Address(rsp, 2*wordSize)); // obj } - __ movl (rax, Address(rsp, 3*wordSize)); // jfieldID - __ movl (rdx, Address(rdx, 0)); // *obj - __ shrl (rax, 2); // offset + __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID + __ movptr(rdx, Address(rdx, 0)); // *obj + __ shrptr(rax, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { +#ifndef _LP64 case T_FLOAT: __ fld_s (Address(rdx, rax, Address::times_1)); break; case T_DOUBLE: __ fld_d (Address(rdx, rax, Address::times_1)); break; +#else + case T_FLOAT: __ movflt (xmm0, Address(robj, roffset, Address::times_1)); break; + case T_DOUBLE: __ movdbl (xmm0, Address(robj, roffset, Address::times_1)); break; +#endif // _LP64 default: ShouldNotReachHere(); } @@ -304,8 +308,9 @@ __ fst_s (Address(rsp, -4)); __ lea(rdx, counter); __ movl (rax, Address(rsp, -4)); - __ xorl(rdx, rax); - __ xorl(rdx, rax); + // garbage hi-order bits on 64bit are harmless. + __ xorptr(rdx, rax); + __ xorptr(rdx, rax); __ cmp32(rcx, Address(rdx, 0)); // rax, ^ counter_addr ^ rax, = address // ca1 is data dependent on the field @@ -357,4 +362,3 @@ address JNI_FastGetField::generate_fast_get_double_field() { return generate_fast_get_float_field0(T_DOUBLE); } - --- old/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp 2009-08-01 04:08:04.993462817 +0100 +++ new/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp 2009-08-01 04:08:04.910068387 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)jniFastGetField_x86_64.cpp 1.14 07/09/17 09:26:02 JVM" -#endif /* - * Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -70,18 +67,18 @@ ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); __ mov32 (rcounter, counter); - __ movq (robj, c_rarg1); + __ mov (robj, c_rarg1); __ testb (rcounter, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { - __ xorq (robj, rcounter); - __ xorq (robj, rcounter); // obj, since + __ xorptr(robj, rcounter); + __ xorptr(robj, rcounter); // obj, since // robj ^ rcounter ^ rcounter == robj // robj is data dependent on rcounter. } - __ movq (robj, Address(robj, 0)); // *obj - __ movq (roffset, c_rarg2); - __ shrq (roffset, 2); // offset + __ movptr(robj, Address(robj, 0)); // *obj + __ mov (roffset, c_rarg2); + __ shrptr(roffset, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); @@ -98,8 +95,8 @@ if (os::is_MP()) { __ lea(rcounter_addr, counter); // ca is data dependent on rax. - __ xorq (rcounter_addr, rax); - __ xorq (rcounter_addr, rax); + __ xorptr(rcounter_addr, rax); + __ xorptr(rcounter_addr, rax); __ cmpl (rcounter, Address(rcounter_addr, 0)); } else { __ cmp32 (rcounter, counter); @@ -168,18 +165,18 @@ ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); __ mov32 (rcounter, counter); - __ movq (robj, c_rarg1); + __ mov (robj, c_rarg1); __ testb (rcounter, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { - __ xorq (robj, rcounter); - __ xorq (robj, rcounter); // obj, since + __ xorptr(robj, rcounter); + __ xorptr(robj, rcounter); // obj, since // robj ^ rcounter ^ rcounter == robj // robj is data dependent on rcounter. } - __ movq (robj, Address(robj, 0)); // *obj - __ movq (roffset, c_rarg2); - __ shrq (roffset, 2); // offset + __ movptr(robj, Address(robj, 0)); // *obj + __ mov (roffset, c_rarg2); + __ shrptr(roffset, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); @@ -193,8 +190,8 @@ __ lea(rcounter_addr, counter); __ movdq (rax, xmm0); // counter address is data dependent on xmm0. - __ xorq (rcounter_addr, rax); - __ xorq (rcounter_addr, rax); + __ xorptr(rcounter_addr, rax); + __ xorptr(rcounter_addr, rax); __ cmpl (rcounter, Address(rcounter_addr, 0)); } else { __ cmp32 (rcounter, counter); @@ -225,4 +222,3 @@ address JNI_FastGetField::generate_fast_get_double_field() { return generate_fast_get_float_field0(T_DOUBLE); } - --- old/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp 2009-08-01 04:08:05.892677962 +0100 +++ new/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp 2009-08-01 04:08:05.806683336 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)nativeInst_x86.cpp 1.76 07/09/17 09:29:18 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -51,7 +48,7 @@ // on x86, nyi. return return_address() + displacement(); } - + void NativeCall::print() { tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, instruction_address(), destination()); @@ -74,10 +71,10 @@ // the jmp's with the first 4 byte of the new instruction. void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { assert(Patching_lock->is_locked() || - SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); + SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); assert (instr_addr != NULL, "illegal address for code patching"); - NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call + NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call if (os::is_MP()) { guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); } @@ -102,14 +99,14 @@ n_call->wrote(4); // Patch bytes 0-3 - *(jint*)instr_addr = *(jint *)code_buffer; + *(jint*)instr_addr = *(jint *)code_buffer; n_call->wrote(0); #ifdef ASSERT // verify patching for ( int i = 0; i < instruction_size; i++) { - address ptr = (address)((intptr_t)code_buffer + i); + address ptr = (address)((intptr_t)code_buffer + i); int a_byte = (*ptr) & 0xFF; assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); } @@ -136,7 +133,7 @@ // Make sure patching code is locked. No two threads can patch at the same // time but one may be executing this code. assert(Patching_lock->is_locked() || - SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); + SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); // Both C1 and C2 should now be generating code which aligns the patched address // to be within a single cache line except that C1 does not do the alignment on // uniprocessor systems. @@ -149,7 +146,7 @@ // Simple case: The destination lies within a single cache line. set_destination(dest); } else if ((uintptr_t)instruction_address() / cache_line_size == - ((uintptr_t)instruction_address()+1) / cache_line_size) { + ((uintptr_t)instruction_address()+1) / cache_line_size) { // Tricky case: The instruction prefix lies within a single cache line. intptr_t disp = dest - return_address(); #ifdef AMD64 @@ -192,7 +189,7 @@ *(short*)instruction_address() = *(short*)patch_disp; // Invalidate. Opteron requires a flush after every write. wrote(0); - + debug_only(verify()); guarantee(destination() == dest, "patch succeeded"); } else { @@ -226,49 +223,150 @@ //------------------------------------------------------------------- -#ifndef AMD64 +int NativeMovRegMem::instruction_start() const { + int off = 0; + u_char instr_0 = ubyte_at(off); + + // First check to see if we have a (prefixed or not) xor + if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 + instr_0 <= instruction_prefix_wide_hi) { // 0x4f + off++; + instr_0 = ubyte_at(off); + } + + if (instr_0 == instruction_code_xor) { + off += 2; + instr_0 = ubyte_at(off); + } -void NativeMovRegMem::copy_instruction_to(address new_instruction_address) { - int inst_size = instruction_size; + // Now look for the real instruction and the many prefix/size specifiers. - // See if there's an instruction size prefix override. - if ( *(address(this)) == instruction_operandsize_prefix && - *(address(this)+1) != instruction_code_xmm_code ) { // Not SSE instr - inst_size += 1; + if (instr_0 == instruction_operandsize_prefix ) { // 0x66 + off++; // Not SSE instructions + instr_0 = ubyte_at(off); } - if ( *(address(this)) == instruction_extended_prefix ) inst_size += 1; - for (int i = 0; i < instruction_size; i++) { - *(new_instruction_address + i) = *(address(this) + i); + if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3 + instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2 + off++; + instr_0 = ubyte_at(off); } + + if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 + instr_0 <= instruction_prefix_wide_hi) { // 0x4f + off++; + instr_0 = ubyte_at(off); + } + + + if (instr_0 == instruction_extended_prefix ) { // 0x0f + off++; + } + + return off; +} + +address NativeMovRegMem::instruction_address() const { + return addr_at(instruction_start()); +} + +address NativeMovRegMem::next_instruction_address() const { + address ret = instruction_address() + instruction_size; + u_char instr_0 = *(u_char*) instruction_address(); + switch (instr_0) { + case instruction_operandsize_prefix: + + fatal("should have skipped instruction_operandsize_prefix"); + break; + + case instruction_extended_prefix: + fatal("should have skipped instruction_extended_prefix"); + break; + + case instruction_code_mem2reg_movslq: // 0x63 + case instruction_code_mem2reg_movzxb: // 0xB6 + case instruction_code_mem2reg_movsxb: // 0xBE + case instruction_code_mem2reg_movzxw: // 0xB7 + case instruction_code_mem2reg_movsxw: // 0xBF + case instruction_code_reg2mem: // 0x89 (q/l) + case instruction_code_mem2reg: // 0x8B (q/l) + case instruction_code_reg2memb: // 0x88 + case instruction_code_mem2regb: // 0x8a + + case instruction_code_float_s: // 0xd9 fld_s a + case instruction_code_float_d: // 0xdd fld_d a + + case instruction_code_xmm_load: // 0x10 + case instruction_code_xmm_store: // 0x11 + case instruction_code_xmm_lpd: // 0x12 + { + // If there is an SIB then instruction is longer than expected + u_char mod_rm = *(u_char*)(instruction_address() + 1); + if ((mod_rm & 7) == 0x4) { + ret++; + } + } + case instruction_code_xor: + fatal("should have skipped xor lead in"); + break; + + default: + fatal("not a NativeMovRegMem"); + } + return ret; + +} + +int NativeMovRegMem::offset() const{ + int off = data_offset + instruction_start(); + u_char mod_rm = *(u_char*)(instruction_address() + 1); + // nnnn(r12|rsp) isn't coded as simple mod/rm since that is + // the encoding to use an SIB byte. Which will have the nnnn + // field off by one byte + if ((mod_rm & 7) == 0x4) { + off++; + } + return int_at(off); +} + +void NativeMovRegMem::set_offset(int x) { + int off = data_offset + instruction_start(); + u_char mod_rm = *(u_char*)(instruction_address() + 1); + // nnnn(r12|rsp) isn't coded as simple mod/rm since that is + // the encoding to use an SIB byte. Which will have the nnnn + // field off by one byte + if ((mod_rm & 7) == 0x4) { + off++; + } + set_int_at(off, x); } void NativeMovRegMem::verify() { // make sure code pattern is actually a mov [reg+offset], reg instruction u_char test_byte = *(u_char*)instruction_address(); - if ( ! ( (test_byte == instruction_code_reg2memb) - || (test_byte == instruction_code_mem2regb) - || (test_byte == instruction_code_mem2regl) - || (test_byte == instruction_code_reg2meml) - || (test_byte == instruction_code_mem2reg_movzxb ) - || (test_byte == instruction_code_mem2reg_movzxw ) - || (test_byte == instruction_code_mem2reg_movsxb ) - || (test_byte == instruction_code_mem2reg_movsxw ) - || (test_byte == instruction_code_float_s) - || (test_byte == instruction_code_float_d) - || (test_byte == instruction_code_long_volatile) ) ) - { - u_char byte1 = ((u_char*)instruction_address())[1]; - u_char byte2 = ((u_char*)instruction_address())[2]; - if ((test_byte != instruction_code_xmm_ss_prefix && - test_byte != instruction_code_xmm_sd_prefix && - test_byte != instruction_operandsize_prefix) || - byte1 != instruction_code_xmm_code || - (byte2 != instruction_code_xmm_load && - byte2 != instruction_code_xmm_lpd && - byte2 != instruction_code_xmm_store)) { - fatal ("not a mov [reg+offs], reg instruction"); - } + switch (test_byte) { + case instruction_code_reg2memb: // 0x88 movb a, r + case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit) + case instruction_code_mem2regb: // 0x8a movb r, a + case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit) + break; + + case instruction_code_mem2reg_movslq: // 0x63 movsql r, a + case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb) + case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw) + case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb) + case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw) + break; + + case instruction_code_float_s: // 0xd9 fld_s a + case instruction_code_float_d: // 0xdd fld_d a + case instruction_code_xmm_load: // 0x10 movsd xmm, a + case instruction_code_xmm_store: // 0x11 movsd a, xmm + case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a + break; + + default: + fatal ("not a mov [reg+offs], reg instruction"); } } @@ -282,7 +380,14 @@ void NativeLoadAddress::verify() { // make sure code pattern is actually a mov [reg+offset], reg instruction u_char test_byte = *(u_char*)instruction_address(); - if ( ! (test_byte == instruction_code) ) { +#ifdef _LP64 + if ( (test_byte == instruction_prefix_wide || + test_byte == instruction_prefix_wide_extended) ) { + test_byte = *(u_char*)(instruction_address() + 1); + } +#endif // _LP64 + if ( ! ((test_byte == lea_instruction_code) + LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { fatal ("not a lea reg, [reg+offs] instruction"); } } @@ -292,13 +397,11 @@ tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset()); } -#endif // !AMD64 - //-------------------------------------------------------------------------------- void NativeJump::verify() { if (*(u_char*)instruction_address() != instruction_code) { - fatal("not a jump instruction"); + fatal("not a jump instruction"); } } @@ -342,9 +445,9 @@ // Then patches the last byte and then atomically patches the first word (4-bytes), // thus inserting the desired jump // This code is mt-safe with the following conditions: entry point is 4 byte aligned, -// entry point is in same cache line as unverified entry point, and the instruction being +// entry point is in same cache line as unverified entry point, and the instruction being // patched is >= 5 byte (size of patch). -// +// // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. // In C1 the restriction is enforced by CodeEmitter::method_entry // @@ -383,7 +486,7 @@ n_jump->wrote(4); // Patch bytes 0-3 (from jump instruction) - *(int32_t*)verified_entry = *(int32_t *)code_buffer; + *(int32_t*)verified_entry = *(int32_t *)code_buffer; // Invalidate. Opteron requires a flush after every write. n_jump->wrote(0); @@ -392,7 +495,7 @@ void NativePopReg::insert(address code_pos, Register reg) { assert(reg->encoding() < 8, "no space for REX"); assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); - *code_pos = (u_char)(instruction_code | reg->encoding()); + *code_pos = (u_char)(instruction_code | reg->encoding()); ICache::invalidate_range(code_pos, instruction_size); } @@ -405,7 +508,7 @@ void NativeGeneralJump::verify() { assert(((NativeInstruction *)this)->is_jump() || - ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); + ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); } @@ -436,7 +539,7 @@ patch[1] = 0xFE; // jmp to self patch[2] = 0xEB; patch[3] = 0xFE; - + // First patch dummy jmp in place *(int32_t*)instr_addr = *(int32_t *)patch; n_jump->wrote(0); @@ -447,14 +550,14 @@ n_jump->wrote(4); // Patch bytes 0-3 - *(jint*)instr_addr = *(jint *)code_buffer; + *(jint*)instr_addr = *(jint *)code_buffer; n_jump->wrote(0); #ifdef ASSERT // verify patching for ( int i = 0; i < instruction_size; i++) { - address ptr = (address)((intptr_t)code_buffer + i); + address ptr = (address)((intptr_t)code_buffer + i); int a_byte = (*ptr) & 0xFF; assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); } @@ -464,14 +567,18 @@ -address NativeGeneralJump::jump_destination() const { +address NativeGeneralJump::jump_destination() const { int op_code = ubyte_at(0); bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F); int offset = (op_code == 0x0F) ? 2 : 1; int length = offset + ((is_rel32off) ? 4 : 1); - - if (is_rel32off) + + if (is_rel32off) return addr_at(0) + length + int_at(offset); else return addr_at(0) + length + sbyte_at(offset); } + +bool NativeInstruction::is_dtrace_trap() { + return (*(int32_t*)this & 0xff) == 0xcc; +} --- old/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp 2009-08-01 04:08:06.814420320 +0100 +++ new/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp 2009-08-01 04:08:06.727850457 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)nativeInst_x86.hpp 1.81 07/09/17 09:28:55 JVM" -#endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // We have interfaces for the following instructions: @@ -35,7 +32,7 @@ // - - NativeJump // - - NativeIllegalOpCode // - - NativeGeneralJump -// - - NativeReturn +// - - NativeReturn // - - NativeReturnX (return with argument) // - - NativePushConst // - - NativeTstRegMem @@ -53,6 +50,7 @@ }; bool is_nop() { return ubyte_at(0) == nop_instruction_code; } + bool is_dtrace_trap(); inline bool is_call(); inline bool is_illegal(); inline bool is_return(); @@ -66,7 +64,7 @@ s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } - + jint int_at(int offset) const { return *(jint*) addr_at(offset); } intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } @@ -86,7 +84,7 @@ public: // unit test stuff - static void test() {} // override for testing + static void test() {} // override for testing inline friend NativeInstruction* nativeInstruction_at(address address); }; @@ -106,11 +104,11 @@ class NativeCall: public NativeInstruction { public: enum Intel_specific_constants { - instruction_code = 0xE8, - instruction_size = 5, - instruction_offset = 0, - displacement_offset = 1, - return_address_offset = 5 + instruction_code = 0xE8, + instruction_size = 5, + instruction_offset = 0, + displacement_offset = 1, + return_address_offset = 5 }; enum { cache_line_size = BytesPerWord }; // conservative estimate! @@ -134,7 +132,7 @@ void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); } void verify(); void print(); - + // Creation inline friend NativeCall* nativeCall_at(address address); inline friend NativeCall* nativeCall_before(address return_address); @@ -155,7 +153,7 @@ // MT-safe patching of a call instruction. static void insert(address code_pos, address entry); - static void replace_mt_safe(address instr_addr, address code_buffer); + static void replace_mt_safe(address instr_addr, address code_buffer); }; inline NativeCall* nativeCall_at(address address) { @@ -186,12 +184,12 @@ #endif // AMD64 public: enum Intel_specific_constants { - instruction_code = 0xB8, - instruction_size = 1 + rex_size + wordSize, - instruction_offset = 0, - data_offset = 1 + rex_size, - next_instruction_offset = instruction_size, - register_mask = 0x07 + instruction_code = 0xB8, + instruction_size = 1 + rex_size + wordSize, + instruction_offset = 0, + data_offset = 1 + rex_size, + next_instruction_offset = instruction_size, + register_mask = 0x07 }; address instruction_address() const { return addr_at(instruction_offset); } @@ -201,7 +199,7 @@ void verify(); void print(); - + // unit test stuff static void test() {} @@ -237,39 +235,41 @@ } }; -#ifndef AMD64 - // An interface for accessing/manipulating native moves of the form: -// mov[b/w/l] [reg + offset], reg (instruction_code_reg2mem) -// mov[b/w/l] reg, [reg+offset] (instruction_code_mem2reg -// mov[s/z]x[w/b] [reg + offset], reg +// mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) +// mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg +// mov[s/z]x[w/b/q] [reg + offset], reg // fld_s [reg+offset] // fld_d [reg+offset] -// fstp_s [reg + offset] -// fstp_d [reg + offset] +// fstp_s [reg + offset] +// fstp_d [reg + offset] +// mov_literal64 scratch, ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) // // Warning: These routines must be able to handle any instruction sequences // that are generated as a result of the load/store byte,word,long // macros. For example: The load_unsigned_byte instruction generates // an xor reg,reg inst prior to generating the movb instruction. This -// class must skip the xor instruction. +// class must skip the xor instruction. class NativeMovRegMem: public NativeInstruction { public: enum Intel_specific_constants { - instruction_code_xor = 0x33, - instruction_extended_prefix = 0x0F, + instruction_prefix_wide_lo = Assembler::REX, + instruction_prefix_wide_hi = Assembler::REX_WRXB, + instruction_code_xor = 0x33, + instruction_extended_prefix = 0x0F, + instruction_code_mem2reg_movslq = 0x63, instruction_code_mem2reg_movzxb = 0xB6, instruction_code_mem2reg_movsxb = 0xBE, instruction_code_mem2reg_movzxw = 0xB7, instruction_code_mem2reg_movsxw = 0xBF, instruction_operandsize_prefix = 0x66, - instruction_code_reg2meml = 0x89, - instruction_code_mem2regl = 0x8b, - instruction_code_reg2memb = 0x88, - instruction_code_mem2regb = 0x8a, - instruction_code_float_s = 0xd9, - instruction_code_float_d = 0xdd, + instruction_code_reg2mem = 0x89, + instruction_code_mem2reg = 0x8b, + instruction_code_reg2memb = 0x88, + instruction_code_mem2regb = 0x8a, + instruction_code_float_s = 0xd9, + instruction_code_float_d = 0xdd, instruction_code_long_volatile = 0xdf, instruction_code_xmm_ss_prefix = 0xf3, instruction_code_xmm_sd_prefix = 0xf2, @@ -277,80 +277,25 @@ instruction_code_xmm_load = 0x10, instruction_code_xmm_store = 0x11, instruction_code_xmm_lpd = 0x12, - - instruction_size = 4, - instruction_offset = 0, - data_offset = 2, - next_instruction_offset = 4 + + instruction_size = 4, + instruction_offset = 0, + data_offset = 2, + next_instruction_offset = 4 }; - address instruction_address() const { - if (*addr_at(instruction_offset) == instruction_operandsize_prefix && - *addr_at(instruction_offset+1) != instruction_code_xmm_code) { - return addr_at(instruction_offset+1); // Not SSE instructions - } - else if (*addr_at(instruction_offset) == instruction_extended_prefix) { - return addr_at(instruction_offset+1); - } - else if (*addr_at(instruction_offset) == instruction_code_xor) { - return addr_at(instruction_offset+2); - } - else return addr_at(instruction_offset); - } + // helper + int instruction_start() const; - address next_instruction_address() const { - switch (*addr_at(instruction_offset)) { - case instruction_operandsize_prefix: - if (*addr_at(instruction_offset+1) == instruction_code_xmm_code) - return instruction_address() + instruction_size; // SSE instructions - case instruction_extended_prefix: - return instruction_address() + instruction_size + 1; - case instruction_code_reg2meml: - case instruction_code_mem2regl: - case instruction_code_reg2memb: - case instruction_code_mem2regb: - case instruction_code_xor: - return instruction_address() + instruction_size + 2; - default: - return instruction_address() + instruction_size; - } - } - int offset() const{ - if (*addr_at(instruction_offset) == instruction_operandsize_prefix && - *addr_at(instruction_offset+1) != instruction_code_xmm_code) { - return int_at(data_offset+1); // Not SSE instructions - } - else if (*addr_at(instruction_offset) == instruction_extended_prefix) { - return int_at(data_offset+1); - } - else if (*addr_at(instruction_offset) == instruction_code_xor || - *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || - *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || - *addr_at(instruction_offset) == instruction_operandsize_prefix) { - return int_at(data_offset+2); - } - else return int_at(data_offset); - } + address instruction_address() const; - void set_offset(int x) { - if (*addr_at(instruction_offset) == instruction_operandsize_prefix && - *addr_at(instruction_offset+1) != instruction_code_xmm_code) { - set_int_at(data_offset+1, x); // Not SSE instructions - } - else if (*addr_at(instruction_offset) == instruction_extended_prefix) { - set_int_at(data_offset+1, x); - } - else if (*addr_at(instruction_offset) == instruction_code_xor || - *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || - *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || - *addr_at(instruction_offset) == instruction_operandsize_prefix) { - set_int_at(data_offset+2, x); - } - else set_int_at(data_offset, x); - } + address next_instruction_address() const; + + int offset() const; + + void set_offset(int x); void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } - void copy_instruction_to(address new_instruction_address); void verify(); void print (); @@ -387,9 +332,19 @@ // leal reg, [reg + offset] class NativeLoadAddress: public NativeMovRegMem { +#ifdef AMD64 + static const bool has_rex = true; + static const int rex_size = 1; +#else + static const bool has_rex = false; + static const int rex_size = 0; +#endif // AMD64 public: enum Intel_specific_constants { - instruction_code = 0x8D + instruction_prefix_wide = Assembler::REX_W, + instruction_prefix_wide_extended = Assembler::REX_WB, + lea_instruction_code = 0x8D, + mov64_instruction_code = 0xB8 }; void verify(); @@ -408,40 +363,39 @@ } }; -#endif // AMD64 - // jump rel32off class NativeJump: public NativeInstruction { public: enum Intel_specific_constants { - instruction_code = 0xe9, - instruction_size = 5, - instruction_offset = 0, - data_offset = 1, - next_instruction_offset = 5 + instruction_code = 0xe9, + instruction_size = 5, + instruction_offset = 0, + data_offset = 1, + next_instruction_offset = 5 }; address instruction_address() const { return addr_at(instruction_offset); } - address next_instruction_address() const { return addr_at(next_instruction_offset); } + address next_instruction_address() const { return addr_at(next_instruction_offset); } address jump_destination() const { address dest = (int_at(data_offset)+next_instruction_address()); -#ifdef AMD64 // What is this about? + // 32bit used to encode unresolved jmp as jmp -1 + // 64bit can't produce this so it used jump to self. + // Now 32bit and 64bit use jump to self as the unresolved address + // which the inline cache code (and relocs) know about + // return -1 if jump to self dest = (dest == (address) this) ? (address) -1 : dest; -#endif // AMD64 return dest; } void set_jump_destination(address dest) { intptr_t val = dest - next_instruction_address(); -#ifdef AMD64 - if (dest == (address) -1) { // can't encode jump to -1 + if (dest == (address) -1) { val = -5; // jump to self - } else { - assert((labs(val) & 0xFFFFFFFF00000000) == 0, - "must be 32bit offset"); } +#ifdef AMD64 + assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1"); #endif // AMD64 set_int_at(data_offset, (jint)val); } @@ -483,33 +437,33 @@ instruction_size = 5 }; - address instruction_address() const { return addr_at(0); } - address jump_destination() const; + address instruction_address() const { return addr_at(0); } + address jump_destination() const; // Creation inline friend NativeGeneralJump* nativeGeneralJump_at(address address); // Insertion of native general jump instruction static void insert_unconditional(address code_pos, address entry); - static void replace_mt_safe(address instr_addr, address code_buffer); + static void replace_mt_safe(address instr_addr, address code_buffer); void verify(); }; inline NativeGeneralJump* nativeGeneralJump_at(address address) { NativeGeneralJump* jump = (NativeGeneralJump*)(address); - debug_only(jump->verify();) + debug_only(jump->verify();) return jump; } class NativePopReg : public NativeInstruction { public: enum Intel_specific_constants { - instruction_code = 0x58, - instruction_size = 1, - instruction_offset = 0, - data_offset = 1, - next_instruction_offset = 1 + instruction_code = 0x58, + instruction_size = 1, + instruction_offset = 0, + data_offset = 1, + next_instruction_offset = 1 }; // Insert a pop instruction @@ -520,10 +474,10 @@ class NativeIllegalInstruction: public NativeInstruction { public: enum Intel_specific_constants { - instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B - instruction_size = 2, - instruction_offset = 0, - next_instruction_offset = 2 + instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B + instruction_size = 2, + instruction_offset = 0, + next_instruction_offset = 2 }; // Insert illegal opcode as specific address @@ -534,10 +488,10 @@ class NativeReturn: public NativeInstruction { public: enum Intel_specific_constants { - instruction_code = 0xC3, - instruction_size = 1, - instruction_offset = 0, - next_instruction_offset = 1 + instruction_code = 0xC3, + instruction_size = 1, + instruction_offset = 0, + next_instruction_offset = 1 }; }; @@ -545,10 +499,10 @@ class NativeReturnX: public NativeInstruction { public: enum Intel_specific_constants { - instruction_code = 0xC2, - instruction_size = 2, - instruction_offset = 0, - next_instruction_offset = 2 + instruction_code = 0xC2, + instruction_size = 2, + instruction_offset = 0, + next_instruction_offset = 2 }; }; @@ -560,21 +514,25 @@ }; }; -inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } -inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } -inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || - ubyte_at(0) == NativeReturnX::instruction_code; } +inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } +inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } +inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || + ubyte_at(0) == NativeReturnX::instruction_code; } inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || ubyte_at(0) == 0xEB; /* short jump */ } inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } inline bool NativeInstruction::is_safepoint_poll() { #ifdef AMD64 - return ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && - ubyte_at(1) == 0x05 && // 00 rax 101 - ((intptr_t) addr_at(6)) + int_at(2) == (intptr_t) os::get_polling_page(); + if ( ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && + ubyte_at(1) == 0x05 ) { // 00 rax 101 + address fault = addr_at(6) + int_at(2); + return os::is_poll_address(fault); + } else { + return false; + } #else - return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2regl || + return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg || ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ (os::is_poll_address((address)int_at(2))); @@ -589,4 +547,3 @@ return false; #endif // AMD64 } - --- old/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp 2009-08-01 04:08:07.707244843 +0100 +++ new/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp 2009-08-01 04:08:07.624366146 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)register_definitions_x86.cpp 1.13 07/09/17 09:28:17 JVM" -#endif /* - * Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,12 +19,9 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ -// make sure the defines don't screw up the declarations later on in this file -#define DONT_USE_REGISTER_DEFINES - #include "incls/_precompiled.incl" #include "incls/_register_definitions_x86.cpp.incl" @@ -109,6 +103,7 @@ REGISTER_DEFINITION(Register, rscratch1); REGISTER_DEFINITION(Register, rscratch2); +REGISTER_DEFINITION(Register, r12_heapbase); REGISTER_DEFINITION(Register, r15_thread); #endif // AMD64 --- old/hotspot/src/cpu/x86/vm/relocInfo_x86.cpp 2009-08-01 04:08:08.537430716 +0100 +++ new/hotspot/src/cpu/x86/vm/relocInfo_x86.cpp 2009-08-01 04:08:08.460542525 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)relocInfo_x86.cpp 1.19 07/09/17 09:28:01 JVM" -#endif /* - * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -33,11 +30,15 @@ #ifdef AMD64 x += o; typedef Assembler::WhichOperand WhichOperand; - WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64, call32 + WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm, call32, narrow oop assert(which == Assembler::disp32_operand || - which == Assembler::imm64_operand, "format unpacks ok"); - if (which == Assembler::imm64_operand) { + which == Assembler::narrow_oop_operand || + which == Assembler::imm_operand, "format unpacks ok"); + if (which == Assembler::imm_operand) { *pd_address_in_code() = x; + } else if (which == Assembler::narrow_oop_operand) { + address disp = Assembler::locate_operand(addr(), which); + *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x); } else { // Note: Use runtime_call_type relocations for call32_operand. address ip = addr(); @@ -80,11 +81,16 @@ nativeCall_at(addr())->set_destination(x); } else if (ni->is_jump()) { NativeJump* nj = nativeJump_at(addr()); -#ifdef AMD64 + + // Unresolved jumps are recognized by a destination of -1 + // However 64bit can't actually produce such an address + // and encodes a jump to self but jump_destination will + // return a -1 as the signal. We must not relocate this + // jmp or the ic code will not see it as unresolved. + if (nj->jump_destination() == (address) -1) { - x = (address) -1; // retain jump to self + x = addr(); // jump to self } -#endif // AMD64 nj->set_jump_destination(x); } else if (ni->is_cond_jump()) { // %%%% kludge this, for now, until we get a jump_destination method @@ -105,19 +111,19 @@ // we must parse the instruction a bit to find the embedded word. assert(is_data(), "must be a DataRelocation"); typedef Assembler::WhichOperand WhichOperand; - WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64/imm32 + WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32 #ifdef AMD64 assert(which == Assembler::disp32_operand || which == Assembler::call32_operand || - which == Assembler::imm64_operand, "format unpacks ok"); - if (which != Assembler::imm64_operand) { + which == Assembler::imm_operand, "format unpacks ok"); + if (which != Assembler::imm_operand) { // The "address" in the code is a displacement can't return it as // and address* since it is really a jint* ShouldNotReachHere(); return NULL; } #else - assert(which == Assembler::disp32_operand || which == Assembler::imm32_operand, "format unpacks ok"); + assert(which == Assembler::disp32_operand || which == Assembler::imm_operand, "format unpacks ok"); #endif // AMD64 return (address*) Assembler::locate_operand(addr(), which); } @@ -130,11 +136,11 @@ // we must parse the instruction a bit to find the embedded word. assert(is_data(), "must be a DataRelocation"); typedef Assembler::WhichOperand WhichOperand; - WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64/imm32 + WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32 assert(which == Assembler::disp32_operand || which == Assembler::call32_operand || - which == Assembler::imm64_operand, "format unpacks ok"); - if (which != Assembler::imm64_operand) { + which == Assembler::imm_operand, "format unpacks ok"); + if (which != Assembler::imm_operand) { address ip = addr(); address disp = Assembler::locate_operand(ip, which); address next_ip = Assembler::locate_next_instruction(ip); @@ -168,3 +174,44 @@ NativeInstruction* ni = nativeInstruction_at(x); *(short*)ni->addr_at(0) = instrs[0]; } + +void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { +#ifdef _LP64 + typedef Assembler::WhichOperand WhichOperand; + WhichOperand which = (WhichOperand) format(); + // This format is imm but it is really disp32 + which = Assembler::disp32_operand; + address orig_addr = old_addr_for(addr(), src, dest); + NativeInstruction* oni = nativeInstruction_at(orig_addr); + int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); + // This poll_addr is incorrect by the size of the instruction it is irrelevant + intptr_t poll_addr = (intptr_t)oni + *orig_disp; + + NativeInstruction* ni = nativeInstruction_at(addr()); + intptr_t new_disp = poll_addr - (intptr_t) ni; + + int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which); + * disp = (int32_t)new_disp; + +#endif // _LP64 +} + +void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { +#ifdef _LP64 + typedef Assembler::WhichOperand WhichOperand; + WhichOperand which = (WhichOperand) format(); + // This format is imm but it is really disp32 + which = Assembler::disp32_operand; + address orig_addr = old_addr_for(addr(), src, dest); + NativeInstruction* oni = nativeInstruction_at(orig_addr); + int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); + // This poll_addr is incorrect by the size of the instruction it is irrelevant + intptr_t poll_addr = (intptr_t)oni + *orig_disp; + + NativeInstruction* ni = nativeInstruction_at(addr()); + intptr_t new_disp = poll_addr - (intptr_t) ni; + + int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which); + * disp = (int32_t)new_disp; +#endif // _LP64 +} --- old/hotspot/src/cpu/x86/vm/relocInfo_x86.hpp 2009-08-01 04:08:09.414597955 +0100 +++ new/hotspot/src/cpu/x86/vm/relocInfo_x86.hpp 2009-08-01 04:08:09.340986857 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)relocInfo_x86.hpp 1.18 07/05/05 17:04:19 JVM" -#endif /* - * Copyright 1997-1999 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // machine-dependent parts of class relocInfo @@ -32,5 +29,10 @@ offset_unit = 1, // Encodes Assembler::disp32_operand vs. Assembler::imm32_operand. +#ifndef AMD64 format_width = 1 +#else + // vs Assembler::narrow_oop_operand. + format_width = 2 +#endif }; --- old/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp 2009-08-01 04:08:10.214154961 +0100 +++ new/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp 2009-08-01 04:08:10.135882580 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)runtime_x86_32.cpp 1.113 07/09/17 09:26:02 JVM" -#endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ @@ -36,7 +33,7 @@ //------------------------------generate_exception_blob--------------------------- // creates exception blob at the end // Using exception blob, this code is jumped from a compiled method. -// +// // Given an exception pc at a call we call into the runtime for the // handler in this method. This handler might merely restore state // (i.e. callee save registers) unwind the frame and jump to the @@ -44,7 +41,7 @@ // for the nmethod. // // This code is entered with a jmp. -// +// // Arguments: // rax,: exception oop // rdx: exception pc @@ -53,56 +50,56 @@ // rax,: exception oop // rdx: exception pc in caller or ??? // destination: exception handler of caller -// +// // Note: the exception pc MUST be at a call (precise debug information) // Only register rax, rdx, rcx are not callee saved. // void OptoRuntime::generate_exception_blob() { - // Capture info about frame layout - enum layout { - thread_off, // last_java_sp + // Capture info about frame layout + enum layout { + thread_off, // last_java_sp // The frame sender code expects that rbp will be in the "natural" place and // will override any oopMap setting for it. We must therefore force the layout // so that it agrees with the frame sender code. - rbp_off, + rbp_off, return_off, // slot for return address - framesize + framesize }; // allocate space for the code ResourceMark rm; - // setup code generation tools + // setup code generation tools CodeBuffer buffer("exception_blob", 512, 512); MacroAssembler* masm = new MacroAssembler(&buffer); OopMapSet *oop_maps = new OopMapSet(); - address start = __ pc(); + address start = __ pc(); - __ pushl(rdx); - __ subl(rsp, return_off * wordSize); // Prolog! + __ push(rdx); + __ subptr(rsp, return_off * wordSize); // Prolog! // rbp, location is implicitly known - __ movl(Address(rsp,rbp_off *wordSize),rbp); - + __ movptr(Address(rsp,rbp_off *wordSize), rbp); + // Store exception in Thread object. We cannot pass any arguments to the // handle_exception call, since we do not want to make any assumption // about the size of the frame where the exception happened in. __ get_thread(rcx); - __ movl(Address(rcx, JavaThread::exception_oop_offset()), rax); - __ movl(Address(rcx, JavaThread::exception_pc_offset()), rdx); + __ movptr(Address(rcx, JavaThread::exception_oop_offset()), rax); + __ movptr(Address(rcx, JavaThread::exception_pc_offset()), rdx); // This call does all the hard work. It checks if an exception handler - // exists in the method. + // exists in the method. // If so, it returns the handler address. - // If not, it prepares for stack-unwinding, restoring the callee-save + // If not, it prepares for stack-unwinding, restoring the callee-save // registers of the frame being removed. - // - __ movl(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument + // + __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument __ set_last_Java_frame(rcx, noreg, noreg, NULL); - + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); // No registers to map, rbp is known implicitly @@ -111,34 +108,34 @@ __ reset_last_Java_frame(rcx, false, false); // Restore callee-saved registers - __ movl(rbp, Address(rsp, rbp_off * wordSize)); + __ movptr(rbp, Address(rsp, rbp_off * wordSize)); - __ addl(rsp, return_off * wordSize); // Epilog! - __ popl(rdx); // Exception pc + __ addptr(rsp, return_off * wordSize); // Epilog! + __ pop(rdx); // Exception pc // rax,: exception handler for given - + // We have a handler in rax, (could be deopt blob) // rdx - throwing pc, deopt blob will need it. - __ pushl(rax); + __ push(rax); // rcx contains handler address __ get_thread(rcx); // TLS // Get the exception - __ movl(rax, Address(rcx, JavaThread::exception_oop_offset())); + __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset())); // Get the exception pc in case we are deoptimized - __ movl(rdx, Address(rcx, JavaThread::exception_pc_offset())); + __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset())); #ifdef ASSERT - __ movl(Address(rcx, JavaThread::exception_handler_pc_offset()), 0); - __ movl(Address(rcx, JavaThread::exception_pc_offset()), 0); + __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), (int32_t)NULL_WORD); + __ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); #endif // Clear the exception oop so GC no longer processes it as a root. - __ movl(Address(rcx, JavaThread::exception_oop_offset()), 0); + __ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); - __ popl(rcx); + __ pop(rcx); // rax,: exception oop // rcx: exception handler @@ -147,7 +144,7 @@ // ------------- // make sure all code is generated - masm->flush(); + masm->flush(); - _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize); + _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize); } --- old/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp 2009-08-01 04:08:11.147273506 +0100 +++ new/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp 2009-08-01 04:08:11.053611104 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)sharedRuntime_x86_32.cpp 1.56 07/09/17 09:26:01 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -45,7 +42,7 @@ class RegisterSaver { enum { FPU_regs_live = 8 /*for the FPU stack*/+8/*eight more for XMM registers*/ }; // Capture info about frame layout - enum layout { + enum layout { fpu_state_off = 0, fpu_state_end = fpu_state_off+FPUStateSizeInWords-1, st0_off, st0H_off, @@ -55,34 +52,34 @@ st4_off, st4H_off, st5_off, st5H_off, st6_off, st6H_off, - st7_off, st7H_off, + st7_off, st7H_off, - xmm0_off, xmm0H_off, - xmm1_off, xmm1H_off, - xmm2_off, xmm2H_off, - xmm3_off, xmm3H_off, - xmm4_off, xmm4H_off, - xmm5_off, xmm5H_off, - xmm6_off, xmm6H_off, - xmm7_off, xmm7H_off, + xmm0_off, xmm0H_off, + xmm1_off, xmm1H_off, + xmm2_off, xmm2H_off, + xmm3_off, xmm3H_off, + xmm4_off, xmm4H_off, + xmm5_off, xmm5H_off, + xmm6_off, xmm6H_off, + xmm7_off, xmm7H_off, flags_off, - rdi_off, + rdi_off, rsi_off, ignore_off, // extra copy of rbp, rsp_off, rbx_off, rdx_off, rcx_off, - rax_off, + rax_off, // The frame sender code expects that rbp will be in the "natural" place and // will override any oopMap setting for it. We must therefore force the layout // so that it agrees with the frame sender code. - rbp_off, + rbp_off, return_off, // slot for return address - reg_save_size }; + reg_save_size }; - - public: + + public: static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool verify_fpu = true); @@ -118,15 +115,15 @@ assert(FPUStateSizeInWords == 27, "update stack layout"); - // save registers, fpu state, and flags + // save registers, fpu state, and flags // We assume caller has already has return address slot on the stack // We push epb twice in this sequence because we want the real rbp, - // to be under the return like a normal enter and we want to use pushad + // to be under the return like a normal enter and we want to use pusha // We push by hand instead of pusing push __ enter(); - __ pushad(); - __ pushfd(); - __ subl(rsp,FPU_regs_live*sizeof(jdouble)); // Push FPU registers space + __ pusha(); + __ pushf(); + __ subptr(rsp,FPU_regs_live*sizeof(jdouble)); // Push FPU registers space __ push_FPU_state(); // Save FPU state & init if (verify_fpu) { @@ -159,7 +156,7 @@ __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); } - // Save the FPU registers in de-opt-able form + // Save the FPU registers in de-opt-able form __ fstp_d(Address(rsp, st0_off*wordSize)); // st(0) __ fstp_d(Address(rsp, st1_off*wordSize)); // st(1) @@ -196,7 +193,7 @@ // debug-info recordings, as well as let GC find all oops. OopMapSet *oop_maps = new OopMapSet(); - OopMap* map = new OopMap( frame_words, 0 ); + OopMap* map = new OopMap( frame_words, 0 ); #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words) @@ -273,12 +270,12 @@ __ movdbl(xmm7,Address(rsp,xmm7_off*wordSize)); } __ pop_FPU_state(); - __ addl(rsp,FPU_regs_live*sizeof(jdouble)); // Pop FPU registers + __ addptr(rsp, FPU_regs_live*sizeof(jdouble)); // Pop FPU registers - __ popfd(); - __ popad(); + __ popf(); + __ popa(); // Get the rbp, described implicitly by the frame sender code (no oopMap) - __ popl(rbp); + __ pop(rbp); } @@ -289,9 +286,9 @@ // caller of the deoptee has been extracted into the vframeArray // and will be stuffed into the c2i adapter we create for later // restoration so only result registers need to be restored here. - // + // - __ frstor(Address(rsp, 0)); // Restore fpu state + __ frstor(Address(rsp, 0)); // Restore fpu state // Recover XMM & FPU state if( UseSSE == 1 ) { @@ -299,23 +296,23 @@ } else if( UseSSE >= 2 ) { __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize)); } - __ movl(rax, Address(rsp, rax_off*wordSize)); - __ movl(rdx, Address(rsp, rdx_off*wordSize)); + __ movptr(rax, Address(rsp, rax_off*wordSize)); + __ movptr(rdx, Address(rsp, rdx_off*wordSize)); // Pop all of the register save are off the stack except the return address - __ addl(rsp, return_off * wordSize); + __ addptr(rsp, return_off * wordSize); } // The java_calling_convention describes stack locations as ideal slots on // a frame with no abi restrictions. Since we must observe abi restrictions // (like the placement of the register window) the slots must be biased by // the following value. -static int reg2offset_in(VMReg r) { +static int reg2offset_in(VMReg r) { // Account for saved rbp, and return address // This should really be in_preserve_stack_slots return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size; } -static int reg2offset_out(VMReg r) { +static int reg2offset_out(VMReg r) { return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; } @@ -328,7 +325,7 @@ // VMRegImpl::stack0 refers to the first slot 0(sp). // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register // up to RegisterImpl::number_of_registers) are the 32-bit -// integer registers. +// integer registers. // Pass first two oop/int args in registers ECX and EDX. // Pass first two float/double args in registers XMM0 and XMM1. @@ -341,7 +338,7 @@ // --------------------------------------------------------------------------- -// The compiled Java calling convention. +// The compiled Java calling convention. // Pass first two oop/int args in registers ECX and EDX. // Pass first two float/double args in registers XMM0 and XMM1. // Doubles have precedence, so if you pass a mix of floats and doubles @@ -351,12 +348,12 @@ int total_args_passed, int is_outgoing) { uint stack = 0; // Starting stack position for args on stack - - + + // Pass first two oop/int args in registers ECX and EDX. uint reg_arg0 = 9999; uint reg_arg1 = 9999; - + // Pass first two float/double args in registers XMM0 and XMM1. // Doubles have precedence, so if you pass a mix of floats and doubles // the doubles will grab the registers before the floats will. @@ -370,7 +367,7 @@ uint fargs = (UseSSE>=2) ? 2 : UseSSE; uint freg_arg0 = 9999+fargs; uint freg_arg1 = 9999+fargs; - + // Pass doubles & longs aligned on the stack. First count stack slots for doubles int i; for( i = 0; i < total_args_passed; i++) { @@ -385,7 +382,7 @@ } } int dstack = 0; // Separate counter for placing doubles - + // Now pick where all else goes. for( i = 0; i < total_args_passed; i++) { // From the type and the argument number (count) compute the location @@ -419,13 +416,13 @@ regs[i].set1(VMRegImpl::stack2reg(stack++)); } break; - case T_LONG: - assert(sig_bt[i+1] == T_VOID, "missing Half" ); + case T_LONG: + assert(sig_bt[i+1] == T_VOID, "missing Half" ); regs[i].set2(VMRegImpl::stack2reg(dstack)); dstack += 2; break; case T_DOUBLE: - assert(sig_bt[i+1] == T_VOID, "missing Half" ); + assert(sig_bt[i+1] == T_VOID, "missing Half" ); if( freg_arg0 == (uint)i ) { regs[i].set2(xmm0->as_VMReg()); } else if( freg_arg1 == (uint)i ) { @@ -451,22 +448,22 @@ static void patch_callers_callsite(MacroAssembler *masm) { Label L; __ verify_oop(rbx); - __ cmpl(Address(rbx, in_bytes(methodOopDesc::code_offset())), NULL_WORD); + __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); // Schedule the branch target address early. // Call into the VM to patch the caller, then jump to compiled callee // rax, isn't live so capture return address while we easily can - __ movl(rax, Address(rsp, 0)); - __ pushad(); - __ pushfd(); + __ movptr(rax, Address(rsp, 0)); + __ pusha(); + __ pushf(); if (UseSSE == 1) { - __ subl(rsp, 2*wordSize); + __ subptr(rsp, 2*wordSize); __ movflt(Address(rsp, 0), xmm0); __ movflt(Address(rsp, wordSize), xmm1); } if (UseSSE >= 2) { - __ subl(rsp, 4*wordSize); + __ subptr(rsp, 4*wordSize); __ movdbl(Address(rsp, 0), xmm0); __ movdbl(Address(rsp, 2*wordSize), xmm1); } @@ -480,26 +477,26 @@ #endif /* COMPILER2 */ // VM needs caller's callsite - __ pushl(rax); + __ push(rax); // VM needs target method - __ pushl(rbx); + __ push(rbx); __ verify_oop(rbx); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); if (UseSSE == 1) { __ movflt(xmm0, Address(rsp, 0)); __ movflt(xmm1, Address(rsp, wordSize)); - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); } if (UseSSE >= 2) { __ movdbl(xmm0, Address(rsp, 0)); __ movdbl(xmm1, Address(rsp, 2*wordSize)); - __ addl(rsp, 4*wordSize); + __ addptr(rsp, 4*wordSize); } - __ popfd(); - __ popad(); + __ popf(); + __ popa(); __ bind(L); } @@ -509,13 +506,13 @@ if (TaggedStackInterpreter) { int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0); if (sig == T_OBJECT || sig == T_ARRAY) { - __ movl(Address(rsp, tag_offset), frame::TagReference); + __ movptr(Address(rsp, tag_offset), frame::TagReference); } else if (sig == T_LONG || sig == T_DOUBLE) { int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1); - __ movl(Address(rsp, next_tag_offset), frame::TagValue); - __ movl(Address(rsp, tag_offset), frame::TagValue); + __ movptr(Address(rsp, next_tag_offset), frame::TagValue); + __ movptr(Address(rsp, tag_offset), frame::TagValue); } else { - __ movl(Address(rsp, tag_offset), frame::TagValue); + __ movptr(Address(rsp, tag_offset), frame::TagValue); } } } @@ -564,12 +561,12 @@ int extraspace = total_args_passed * Interpreter::stackElementSize(); // Get return address - __ popl(rax); + __ pop(rax); // set senderSP value - __ movl(rsi, rsp); + __ movptr(rsi, rsp); - __ subl(rsp, extraspace); + __ subptr(rsp, extraspace); // Now write the args into the outgoing interpreter space for (int i = 0; i < total_args_passed; i++) { @@ -580,6 +577,8 @@ // st_off points to lowest address on stack. int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize(); + int next_off = st_off - Interpreter::stackElementSize(); + // Say 4 args: // i st_off // 0 12 T_LONG @@ -593,24 +592,31 @@ continue; } - if (r_1->is_stack()) { + if (r_1->is_stack()) { // memory to memory use fpu stack top int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; if (!r_2->is_valid()) { __ movl(rdi, Address(rsp, ld_off)); - __ movl(Address(rsp, st_off), rdi); + __ movptr(Address(rsp, st_off), rdi); tag_stack(masm, sig_bt[i], st_off); } else { // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW // st_off == MSW, st_off-wordSize == LSW - int next_off = st_off - Interpreter::stackElementSize(); - __ movl(rdi, Address(rsp, ld_off)); - __ movl(Address(rsp, next_off), rdi); - __ movl(rdi, Address(rsp, ld_off + wordSize)); - __ movl(Address(rsp, st_off), rdi); + __ movptr(rdi, Address(rsp, ld_off)); + __ movptr(Address(rsp, next_off), rdi); +#ifndef _LP64 + __ movptr(rdi, Address(rsp, ld_off + wordSize)); + __ movptr(Address(rsp, st_off), rdi); +#else +#ifdef ASSERT + // Overwrite the unused slot with known junk + __ mov64(rax, CONST64(0xdeadffffdeadaaaa)); + __ movptr(Address(rsp, st_off), rax); +#endif /* ASSERT */ +#endif // _LP64 tag_stack(masm, sig_bt[i], next_off); } } else if (r_1->is_Register()) { @@ -620,7 +626,22 @@ tag_stack(masm, sig_bt[i], st_off); } else { // long/double in gpr - ShouldNotReachHere(); + NOT_LP64(ShouldNotReachHere()); + // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG + // T_DOUBLE and T_LONG use two slots in the interpreter + if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { + // long/double in gpr +#ifdef ASSERT + // Overwrite the unused slot with known junk + LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab))); + __ movptr(Address(rsp, st_off), rax); +#endif /* ASSERT */ + __ movptr(Address(rsp, next_off), r); + tag_stack(masm, sig_bt[i], next_off); + } else { + __ movptr(Address(rsp, st_off), r); + tag_stack(masm, sig_bt[i], st_off); + } } } else { assert(r_1->is_XMMRegister(), ""); @@ -635,9 +656,9 @@ } // Schedule the branch target address early. - __ movl(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset()))); + __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset()))); // And repush original return address - __ pushl(rax); + __ push(rax); __ jmp(rcx); } @@ -648,11 +669,11 @@ int next_val_off = ld_off - Interpreter::stackElementSize(); if (TaggedStackInterpreter) { // use tag slot temporarily for MSW - __ movl(rsi, Address(saved_sp, ld_off)); - __ movl(Address(saved_sp, next_val_off+wordSize), rsi); + __ movptr(rsi, Address(saved_sp, ld_off)); + __ movptr(Address(saved_sp, next_val_off+wordSize), rsi); __ movdbl(r, Address(saved_sp, next_val_off)); // restore tag - __ movl(Address(saved_sp, next_val_off+wordSize), frame::TagValue); + __ movptr(Address(saved_sp, next_val_off+wordSize), frame::TagValue); } else { __ movdbl(r, Address(saved_sp, next_val_off)); } @@ -672,7 +693,7 @@ // .word Interpreter::return_sentinel // .word address_of_compiled_return_point // return_entry_point: blah_blah_blah - // + // // So we can find the appropriate return point by loading up the word // just prior to the current return address we have on the stack. // @@ -688,7 +709,7 @@ // code goes non-entrant while we get args ready. // Pick up the return address - __ movl(rax, Address(rsp, 0)); + __ movptr(rax, Address(rsp, 0)); // If UseSSE >= 2 then no cleanup is needed on the return to the // interpreter so skip fixing up the return entry point unless @@ -699,10 +720,10 @@ // cleanup than if the interpreter returned to the call stub. ExternalAddress stub_return_address(StubRoutines::_call_stub_return_address); - __ cmp32(rax, stub_return_address.addr()); + __ cmpptr(rax, stub_return_address.addr()); __ jcc(Assembler::notEqual, chk_int); - assert(StubRoutines::i486::get_call_stub_compiled_return() != NULL, "must be set"); - __ lea(rax, ExternalAddress(StubRoutines::i486::get_call_stub_compiled_return())); + assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set"); + __ lea(rax, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return())); __ jmp(skip); // It must be the interpreter since we never get here via a c2i (unlike Azul) @@ -711,13 +732,13 @@ #ifdef ASSERT { Label ok; - __ cmpl(Address(rax, -8), Interpreter::return_sentinel); + __ cmpl(Address(rax, -2*wordSize), Interpreter::return_sentinel); __ jcc(Assembler::equal, ok); __ int3(); __ bind(ok); } #endif // ASSERT - __ movl(rax, Address(rax, -4)); + __ movptr(rax, Address(rax, -wordSize)); __ bind(skip); } @@ -726,7 +747,7 @@ // Must preserve original SP for loading incoming arguments because // we need to align the outgoing SP for compiled code. - __ movl(rdi, rsp); + __ movptr(rdi, rsp); // Cut-out for having no stack args. Since up to 2 int/oop args are passed // in registers, we will occasionally have no stack args. @@ -740,24 +761,24 @@ comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord; // Round up to miminum stack alignment, in wordSize comp_words_on_stack = round_to(comp_words_on_stack, 2); - __ subl(rsp, comp_words_on_stack * wordSize); + __ subptr(rsp, comp_words_on_stack * wordSize); } // Align the outgoing SP - __ andl(rsp, -(StackAlignmentInBytes)); + __ andptr(rsp, -(StackAlignmentInBytes)); // push the return address on the stack (note that pushing, rather // than storing it, yields the correct frame alignment for the callee) - __ pushl(rax); + __ push(rax); // Put saved SP in another register const Register saved_sp = rax; - __ movl(saved_sp, rdi); + __ movptr(saved_sp, rdi); // Will jump to the compiled code just as if compiled code was doing it. // Pre-load the register-jump target early, to schedule it better. - __ movl(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset()))); + __ movptr(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset()))); // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. @@ -769,8 +790,8 @@ continue; } - // Pick up 0, 1 or 2 words from SP+offset. - + // Pick up 0, 1 or 2 words from SP+offset. + assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); // Load in argument order going down. @@ -778,7 +799,7 @@ // Point to interpreter value (vs. tag) int next_off = ld_off - Interpreter::stackElementSize(); // - // + // // VMReg r_1 = regs[i].first(); VMReg r_2 = regs[i].second(); @@ -786,18 +807,18 @@ assert(!r_2->is_valid(), ""); continue; } - if (r_1->is_stack()) { + if (r_1->is_stack()) { // Convert stack slot to an SP offset (+ wordSize to account for return address ) int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize; // We can use rsi as a temp here because compiled code doesn't need rsi as an input // and if we end up going thru a c2i because of a miss a reasonable value of rsi - // we be generated. + // we be generated. if (!r_2->is_valid()) { // __ fld_s(Address(saved_sp, ld_off)); // __ fstp_s(Address(rsp, st_off)); __ movl(rsi, Address(saved_sp, ld_off)); - __ movl(Address(rsp, st_off), rsi); + __ movptr(Address(rsp, st_off), rsi); } else { // Interpreter local[n] == MSW, local[n+1] == LSW however locals // are accessed as negative so LSW is at LOW address @@ -806,20 +827,44 @@ // st_off is LSW (i.e. reg.first()) // __ fld_d(Address(saved_sp, next_off)); // __ fstp_d(Address(rsp, st_off)); - __ movl(rsi, Address(saved_sp, next_off)); - __ movl(Address(rsp, st_off), rsi); - __ movl(rsi, Address(saved_sp, ld_off)); - __ movl(Address(rsp, st_off + wordSize), rsi); + // + // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE + // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case + // So we must adjust where to pick up the data to match the interpreter. + // + // Interpreter local[n] == MSW, local[n+1] == LSW however locals + // are accessed as negative so LSW is at LOW address + + // ld_off is MSW so get LSW + const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? + next_off : ld_off; + __ movptr(rsi, Address(saved_sp, offset)); + __ movptr(Address(rsp, st_off), rsi); +#ifndef _LP64 + __ movptr(rsi, Address(saved_sp, ld_off)); + __ movptr(Address(rsp, st_off + wordSize), rsi); +#endif // _LP64 } } else if (r_1->is_Register()) { // Register argument Register r = r_1->as_Register(); assert(r != rax, "must be different"); if (r_2->is_valid()) { + // + // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE + // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case + // So we must adjust where to pick up the data to match the interpreter. + + const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? + next_off : ld_off; + + // this can be a misaligned move + __ movptr(r, Address(saved_sp, offset)); +#ifndef _LP64 assert(r_2->as_Register() != rax, "need another temporary register"); // Remember r_1 is low address (and LSB on x86) // So r_2 gets loaded from high address regardless of the platform - __ movl(r_2->as_Register(), Address(saved_sp, ld_off)); - __ movl(r, Address(saved_sp, next_off)); + __ movptr(r_2->as_Register(), Address(saved_sp, ld_off)); +#endif // _LP64 } else { __ movl(r, Address(saved_sp, ld_off)); } @@ -844,13 +889,13 @@ // and the vm will find there should this case occur. __ get_thread(rax); - __ movl(Address(rax, JavaThread::callee_target_offset()), rbx); + __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx); // move methodOop to rax, in case we end up in an c2i adapter. // the c2i adapters expect methodOop in rax, (c2) because c2's // resolve stubs return the result (the method) in rax,. - // I'd love to fix this. - __ movl(rax, rbx); + // I'd love to fix this. + __ mov(rax, rbx); __ jmp(rdi); } @@ -886,16 +931,16 @@ Label missed; __ verify_oop(holder); - __ movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); + __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); __ verify_oop(temp); - - __ cmpl(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset())); - __ movl(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset())); + + __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset())); + __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset())); __ jcc(Assembler::notEqual, missed); // Method might have been compiled since the call site was patched to // interpreted if that is the case treat it as a miss so we can get // the call site corrected. - __ cmpl(Address(rbx, in_bytes(methodOopDesc::code_offset())), NULL_WORD); + __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::equal, skip_fixup); __ bind(missed); @@ -910,11 +955,11 @@ return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry); } -int SharedRuntime::c_calling_convention(const BasicType *sig_bt, +int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) { // We return the amount of VMRegImpl stack slots we need to reserve for all -// the arguments NOT counting out_preserve_stack_slots. +// the arguments NOT counting out_preserve_stack_slots. uint stack = 0; // All arguments on stack @@ -936,7 +981,7 @@ case T_DOUBLE: // The stack numbering is reversed from Java // Since C arguments do not get reversed, the ordering for // doubles on the stack must be opposite the Java convention - assert(sig_bt[i+1] == T_VOID, "missing Half" ); + assert(sig_bt[i+1] == T_VOID, "missing Half" ); regs[i].set2(VMRegImpl::stack2reg(stack)); stack += 2; break; @@ -956,22 +1001,25 @@ // stack to stack // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); - __ movl(rax, Address(rbp, reg2offset_in(src.first()))); - __ movl(Address(rsp, reg2offset_out(dst.first())), rax); + __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first()))); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rax); } else { // stack to reg - __ movl(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); + __ movl2ptr(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); } } else if (dst.first()->is_stack()) { // reg to stack - __ movl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); + // no need to sign extend on 64bit + __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); } else { - __ movl(dst.first()->as_Register(), src.first()->as_Register()); + if (dst.first() != src.first()) { + __ mov(dst.first()->as_Register(), src.first()->as_Register()); + } } } // An oop arg. Must pass a handle not the oop itself -static void object_move(MacroAssembler* masm, +static void object_move(MacroAssembler* masm, OopMap* map, int oop_handle_offset, int framesize_in_slots, @@ -980,7 +1028,7 @@ bool is_receiver, int* receiver_offset) { - // Because of the calling conventions we know that src can be a + // Because of the calling conventions we know that src can be a // register or a stack location. dst can only be a stack location. assert(dst.first()->is_stack(), "must be stack"); @@ -990,12 +1038,12 @@ // Oop is already on the stack as an argument Register rHandle = rax; Label nil; - __ xorl(rHandle, rHandle); - __ cmpl(Address(rbp, reg2offset_in(src.first())), NULL_WORD); + __ xorptr(rHandle, rHandle); + __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD); __ jcc(Assembler::equal, nil); - __ leal(rHandle, Address(rbp, reg2offset_in(src.first()))); + __ lea(rHandle, Address(rbp, reg2offset_in(src.first()))); __ bind(nil); - __ movl(Address(rsp, reg2offset_out(dst.first())), rHandle); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); @@ -1010,15 +1058,15 @@ int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset; int offset = oop_slot*VMRegImpl::stack_slot_size; Label skip; - __ movl(Address(rsp, offset), rOop); + __ movptr(Address(rsp, offset), rOop); map->set_oop(VMRegImpl::stack2reg(oop_slot)); - __ xorl(rHandle, rHandle); - __ cmpl(rOop, NULL_WORD); + __ xorptr(rHandle, rHandle); + __ cmpptr(rOop, (int32_t)NULL_WORD); __ jcc(Assembler::equal, skip); - __ leal(rHandle, Address(rsp, offset)); + __ lea(rHandle, Address(rsp, offset)); __ bind(skip); // Store the handle parameter - __ movl(Address(rsp, reg2offset_out(dst.first())), rHandle); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); if (is_receiver) { *receiver_offset = offset; } @@ -1036,7 +1084,7 @@ if (src.first()->is_stack()) { __ movl(rax, Address(rbp, reg2offset_in(src.first()))); - __ movl(Address(rsp, reg2offset_out(dst.first())), rax); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rax); } else { // reg to stack __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister()); @@ -1053,10 +1101,10 @@ if (src.first()->is_stack() && dst.first()->is_stack()) { assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack"); - __ movl(rax, Address(rbp, reg2offset_in(src.first()))); - __ movl(rbx, Address(rbp, reg2offset_in(src.second()))); - __ movl(Address(rsp, reg2offset_out(dst.first())), rax); - __ movl(Address(rsp, reg2offset_out(dst.second())), rbx); + __ movptr(rax, Address(rbp, reg2offset_in(src.first()))); + NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second())))); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rax); + NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx)); } else { ShouldNotReachHere(); } @@ -1077,10 +1125,10 @@ if (src.first()->is_stack()) { // source is all stack - __ movl(rax, Address(rbp, reg2offset_in(src.first()))); - __ movl(rbx, Address(rbp, reg2offset_in(src.second()))); - __ movl(Address(rsp, reg2offset_out(dst.first())), rax); - __ movl(Address(rsp, reg2offset_out(dst.second())), rbx); + __ movptr(rax, Address(rbp, reg2offset_in(src.first()))); + NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second())))); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rax); + NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx)); } else { // reg to stack // No worries about stack alignment @@ -1101,11 +1149,11 @@ break; case T_VOID: break; case T_LONG: - __ movl(Address(rbp, -wordSize), rax); - __ movl(Address(rbp, -2*wordSize), rdx); + __ movptr(Address(rbp, -wordSize), rax); + NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx)); break; default: { - __ movl(Address(rbp, -wordSize), rax); + __ movptr(Address(rbp, -wordSize), rax); } } } @@ -1121,12 +1169,12 @@ __ fld_d(Address(rbp, -2*wordSize)); break; case T_LONG: - __ movl(rax, Address(rbp, -wordSize)); - __ movl(rdx, Address(rbp, -2*wordSize)); + __ movptr(rax, Address(rbp, -wordSize)); + NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize))); break; case T_VOID: break; default: { - __ movl(rax, Address(rbp, -wordSize)); + __ movptr(rax, Address(rbp, -wordSize)); } } } @@ -1177,12 +1225,12 @@ // Now figure out where the args must be stored and how much stack space // they require (neglecting out_preserve_stack_slots but space for storing // the 1st six register arguments). It's weird see int_stk_helper. - // + // int out_arg_slots; out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); // Compute framesize for the wrapper. We need to handlize all oops in - // registers a max of 2 on x86. + // registers a max of 2 on x86. // Calculate the total number of stack slots we will need. @@ -1226,7 +1274,7 @@ // FP-> | | // |---------------------| // | 2 slots for moves | - // |---------------------| + // |---------------------| // | lock box (if sync) | // |---------------------| <- lock_slot_offset (-lock_slot_rbp_offset) // | klass (if static) | @@ -1236,7 +1284,7 @@ // | outbound memory | // | based arguments | // | | - // |---------------------| + // |---------------------| // | | // SP-> | out_preserved_slots | // @@ -1249,7 +1297,7 @@ // **************************************************************************** - // Now compute actual number of stack words we need rounding to make + // Now compute actual number of stack words we need rounding to make // stack properly aligned. stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); @@ -1271,7 +1319,7 @@ __ verify_oop(receiver); - __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes())); + __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes())); __ jcc(Assembler::equal, hit); __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); @@ -1294,23 +1342,23 @@ Label slowCase; Register receiver = rcx; Register result = rax; - __ movl(result, Address(receiver, oopDesc::mark_offset_in_bytes())); - + __ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes())); + // check if locked - __ testl (result, markOopDesc::unlocked_value); + __ testptr(result, markOopDesc::unlocked_value); __ jcc (Assembler::zero, slowCase); - + if (UseBiasedLocking) { // Check if biased and fall through to runtime if so - __ testl (result, markOopDesc::biased_lock_bit_in_place); + __ testptr(result, markOopDesc::biased_lock_bit_in_place); __ jcc (Assembler::notZero, slowCase); } // get hash - __ andl (result, markOopDesc::hash_mask_in_place); + __ andptr(result, markOopDesc::hash_mask_in_place); // test if hashCode exists __ jcc (Assembler::zero, slowCase); - __ shrl (result, markOopDesc::hash_shift); + __ shrptr(result, markOopDesc::hash_shift); __ ret(0); __ bind (slowCase); } @@ -1318,10 +1366,10 @@ // The instruction at the verified entry point must be 5 bytes or longer // because it can be patched on the fly by make_non_entrant. The stack bang - // instruction fits that requirement. + // instruction fits that requirement. // Generate stack overflow check - + if (UseStackBanging) { __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); } else { @@ -1332,7 +1380,7 @@ // Generate a new frame for the wrapper. __ enter(); // -2 because return address is already present and so is saved rbp, - __ subl(rsp, stack_size - 2*wordSize); + __ subptr(rsp, stack_size - 2*wordSize); // Frame is now completed as far a size and linkage. @@ -1379,7 +1427,7 @@ // them. // ----------------- - // The Grand Shuffle + // The Grand Shuffle // // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* // and, if static, the class mirror instead of a receiver. This pretty much @@ -1401,7 +1449,7 @@ // the oops in the caller's frame. Since we are sure to have // more args than the caller doubling is enough to make // sure we can capture all the incoming oop args from the - // caller. + // caller. // OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); @@ -1428,7 +1476,7 @@ break; case T_DOUBLE: - assert( i + 1 < total_in_args && + assert( i + 1 < total_in_args && in_sig_bt[i + 1] == T_VOID && out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); double_move(masm, in_regs[i], out_regs[c_arg]); @@ -1453,13 +1501,13 @@ __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror())); // Now handlize the static class mirror it's known not-null. - __ movl(Address(rsp, klass_offset), oop_handle_reg); + __ movptr(Address(rsp, klass_offset), oop_handle_reg); map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); // Now get the handle - __ leal(oop_handle_reg, Address(rsp, klass_offset)); + __ lea(oop_handle_reg, Address(rsp, klass_offset)); // store the klass handle as second argument - __ movl(Address(rsp, wordSize), oop_handle_reg); + __ movptr(Address(rsp, wordSize), oop_handle_reg); } // Change state to native (we save the return address in the thread, since it might not @@ -1476,16 +1524,24 @@ // We have all of the arguments setup at this point. We must not touch any register // argument registers at this point (what if we save/restore them there are no oop? - { + { SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); __ movoop(rax, JNIHandles::make_local(method())); __ call_VM_leaf( - CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), + CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), + thread, rax); + } + + // RedefineClasses() tracing support for obsolete method entry + if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { + __ movoop(rax, JNIHandles::make_local(method())); + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), thread, rax); } - // These are register definitions we need for locking/unlocking + // These are register definitions we need for locking/unlocking const Register swap_reg = rax; // Must use rax, for cmpxchg instruction const Register obj_reg = rcx; // Will contain the oop const Register lock_reg = rdx; // Address of compiler lock object (BasicLock) @@ -1500,14 +1556,14 @@ const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); // Get the handle (the 2nd argument) - __ movl(oop_handle_reg, Address(rsp, wordSize)); + __ movptr(oop_handle_reg, Address(rsp, wordSize)); // Get address of the box - __ leal(lock_reg, Address(rbp, lock_slot_rbp_offset)); + __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset)); - // Load the oop from the handle - __ movl(obj_reg, Address(oop_handle_reg, 0)); + // Load the oop from the handle + __ movptr(obj_reg, Address(oop_handle_reg, 0)); if (UseBiasedLocking) { // Note that oop_handle_reg is trashed during this call @@ -1515,21 +1571,21 @@ } // Load immediate 1 into swap_reg %rax, - __ movl(swap_reg, 1); + __ movptr(swap_reg, 1); // Load (object->mark() | 1) into swap_reg %rax, - __ orl(swap_reg, Address(obj_reg, 0)); + __ orptr(swap_reg, Address(obj_reg, 0)); // Save (object->mark() | 1) into BasicLock's displaced header - __ movl(Address(lock_reg, mark_word_offset), swap_reg); + __ movptr(Address(lock_reg, mark_word_offset), swap_reg); if (os::is_MP()) { __ lock(); } // src -> dest iff dest == rax, else rax, <- dest - // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg) - __ cmpxchg(lock_reg, Address(obj_reg, 0)); + // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg) + __ cmpxchgptr(lock_reg, Address(obj_reg, 0)); __ jcc(Assembler::equal, lock_done); // Test if the oopMark is an obvious stack pointer, i.e., @@ -1541,18 +1597,18 @@ // least significant 2 bits clear. // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg - __ subl(swap_reg, rsp); - __ andl(swap_reg, 3 - os::vm_page_size()); + __ subptr(swap_reg, rsp); + __ andptr(swap_reg, 3 - os::vm_page_size()); // Save the test result, for recursive case, the result is zero - __ movl(Address(lock_reg, mark_word_offset), swap_reg); + __ movptr(Address(lock_reg, mark_word_offset), swap_reg); __ jcc(Assembler::notEqual, slow_path_lock); // Slow path will re-enter here __ bind(lock_done); if (UseBiasedLocking) { // Re-fetch oop_handle_reg as we trashed it above - __ movl(oop_handle_reg, Address(rsp, wordSize)); + __ movptr(oop_handle_reg, Address(rsp, wordSize)); } } @@ -1562,11 +1618,11 @@ // get JNIEnv* which is first argument to native - __ leal(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset()))); - __ movl(Address(rsp, 0), rdx); + __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset()))); + __ movptr(Address(rsp, 0), rdx); // Now set thread in native - __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); + __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); __ call(RuntimeAddress(method->native_function())); @@ -1575,10 +1631,10 @@ // and continue to do SP relative addressing but we instead switch to FP // relative addressing. - // Unpack native results. + // Unpack native results. switch (ret_type) { case T_BOOLEAN: __ c2bool(rax); break; - case T_CHAR : __ andl(rax, 0xFFFF); break; + case T_CHAR : __ andptr(rax, 0xFFFF); break; case T_BYTE : __ sign_extend_byte (rax); break; case T_SHORT : __ sign_extend_short(rax); break; case T_INT : /* nothing to do */ break; @@ -1601,11 +1657,14 @@ // VM thread changes sync state to synchronizing and suspends threads for GC. // Thread A is resumed to finish this native method, but doesn't block here since it // didn't see any synchronization is progress, and escapes. - __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); + __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - if(os::is_MP()) { + if(os::is_MP()) { if (UseMembar) { - __ membar(); // Force this write out before the read below + // Force this write out before the read below + __ membar(Assembler::Membar_mask_bits( + Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore)); } else { // Write serialization page so VM thread can do a pseudo remote membar. // We use the current thread pointer to calculate a thread specific @@ -1616,7 +1675,7 @@ } if (AlwaysRestoreFPU) { - // Make sure the control word is correct. + // Make sure the control word is correct. __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); } @@ -1639,7 +1698,7 @@ // by hand. // save_native_result(masm, ret_type, stack_slots); - __ pushl(thread); + __ push(thread); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); __ increment(rsp, wordSize); @@ -1662,7 +1721,7 @@ // Handle possible exception (will unlock if necessary) - // native result if any is live + // native result if any is live // Unlock Label slow_path_unlock; @@ -1672,7 +1731,7 @@ Label done; // Get locked oop from the handle we passed to jni - __ movl(obj_reg, Address(oop_handle_reg, 0)); + __ movptr(obj_reg, Address(oop_handle_reg, 0)); if (UseBiasedLocking) { __ biased_locking_exit(obj_reg, rbx, done); @@ -1680,7 +1739,7 @@ // Simple recursive lock? - __ cmpl(Address(rbp, lock_slot_rbp_offset), NULL_WORD); + __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD); __ jcc(Assembler::equal, done); // Must save rax, if if it is live now because cmpxchg must use it @@ -1689,10 +1748,10 @@ } // get old displaced header - __ movl(rbx, Address(rbp, lock_slot_rbp_offset)); + __ movptr(rbx, Address(rbp, lock_slot_rbp_offset)); // get address of the stack lock - __ leal(rax, Address(rbp, lock_slot_rbp_offset)); + __ lea(rax, Address(rbp, lock_slot_rbp_offset)); // Atomic swap old header if oop still contains the stack lock if (os::is_MP()) { @@ -1700,8 +1759,8 @@ } // src -> dest iff dest == rax, else rax, <- dest - // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg) - __ cmpxchg(rbx, Address(obj_reg, 0)); + // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg) + __ cmpxchgptr(rbx, Address(obj_reg, 0)); __ jcc(Assembler::notEqual, slow_path_unlock); // slow path re-enters here @@ -1714,13 +1773,13 @@ } - { + { SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); // Tell dtrace about this method exit save_native_result(masm, ret_type, stack_slots); __ movoop(rax, JNIHandles::make_local(method())); __ call_VM_leaf( - CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), + CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), thread, rax); restore_native_result(masm, ret_type, stack_slots); } @@ -1732,20 +1791,20 @@ // Unpack oop result if (ret_type == T_OBJECT || ret_type == T_ARRAY) { Label L; - __ cmpl(rax, NULL_WORD); + __ cmpptr(rax, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); - __ movl(rax, Address(rax, 0)); + __ movptr(rax, Address(rax, 0)); __ bind(L); __ verify_oop(rax); } // reset handle block - __ movl(rcx, Address(thread, JavaThread::active_handles_offset())); + __ movptr(rcx, Address(thread, JavaThread::active_handles_offset())); - __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), 0); + __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); // Any exception pending? - __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD); + __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, exception_pending); @@ -1785,15 +1844,15 @@ // has last_Java_frame setup. No exceptions so do vanilla call not call_VM // args are (oop obj, BasicLock* lock, JavaThread* thread) - __ pushl(thread); - __ pushl(lock_reg); - __ pushl(obj_reg); + __ push(thread); + __ push(lock_reg); + __ push(obj_reg); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C))); - __ addl(rsp, 3*wordSize); + __ addptr(rsp, 3*wordSize); #ifdef ASSERT { Label L; - __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); + __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); __ jcc(Assembler::equal, L); __ stop("no pending exception allowed on exit from monitorenter"); __ bind(L); @@ -1813,29 +1872,29 @@ } // Save pending exception around call to VM (which contains an EXCEPTION_MARK) - __ pushl(Address(thread, in_bytes(Thread::pending_exception_offset()))); - __ movl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD); + __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset()))); + __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); // should be a peal // +wordSize because of the push above - __ leal(rax, Address(rbp, lock_slot_rbp_offset)); - __ pushl(rax); + __ lea(rax, Address(rbp, lock_slot_rbp_offset)); + __ push(rax); - __ pushl(obj_reg); + __ push(obj_reg); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C))); - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); #ifdef ASSERT { Label L; - __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD); + __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); __ bind(L); } #endif /* ASSERT */ - __ popl(Address(thread, in_bytes(Thread::pending_exception_offset()))); + __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset()))); if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { restore_native_result(masm, ret_type, stack_slots); @@ -1883,6 +1942,381 @@ } +#ifdef HAVE_DTRACE_H +// --------------------------------------------------------------------------- +// Generate a dtrace nmethod for a given signature. The method takes arguments +// in the Java compiled code convention, marshals them to the native +// abi and then leaves nops at the position you would expect to call a native +// function. When the probe is enabled the nops are replaced with a trap +// instruction that dtrace inserts and the trace will cause a notification +// to dtrace. +// +// The probes are only able to take primitive types and java/lang/String as +// arguments. No other java types are allowed. Strings are converted to utf8 +// strings so that from dtrace point of view java strings are converted to C +// strings. There is an arbitrary fixed limit on the total space that a method +// can use for converting the strings. (256 chars per string in the signature). +// So any java string larger then this is truncated. + +nmethod *SharedRuntime::generate_dtrace_nmethod( + MacroAssembler *masm, methodHandle method) { + + // generate_dtrace_nmethod is guarded by a mutex so we are sure to + // be single threaded in this method. + assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); + + // Fill in the signature array, for the calling-convention call. + int total_args_passed = method->size_of_parameters(); + + BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); + VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); + + // The signature we are going to use for the trap that dtrace will see + // java/lang/String is converted. We drop "this" and any other object + // is converted to NULL. (A one-slot java/lang/Long object reference + // is converted to a two-slot long, which is why we double the allocation). + BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2); + VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2); + + int i=0; + int total_strings = 0; + int first_arg_to_pass = 0; + int total_c_args = 0; + + if( !method->is_static() ) { // Pass in receiver first + in_sig_bt[i++] = T_OBJECT; + first_arg_to_pass = 1; + } + + // We need to convert the java args to where a native (non-jni) function + // would expect them. To figure out where they go we convert the java + // signature to a C signature. + + SignatureStream ss(method->signature()); + for ( ; !ss.at_return_type(); ss.next()) { + BasicType bt = ss.type(); + in_sig_bt[i++] = bt; // Collect remaining bits of signature + out_sig_bt[total_c_args++] = bt; + if( bt == T_OBJECT) { + symbolOop s = ss.as_symbol_or_null(); + if (s == vmSymbols::java_lang_String()) { + total_strings++; + out_sig_bt[total_c_args-1] = T_ADDRESS; + } else if (s == vmSymbols::java_lang_Boolean() || + s == vmSymbols::java_lang_Character() || + s == vmSymbols::java_lang_Byte() || + s == vmSymbols::java_lang_Short() || + s == vmSymbols::java_lang_Integer() || + s == vmSymbols::java_lang_Float()) { + out_sig_bt[total_c_args-1] = T_INT; + } else if (s == vmSymbols::java_lang_Long() || + s == vmSymbols::java_lang_Double()) { + out_sig_bt[total_c_args-1] = T_LONG; + out_sig_bt[total_c_args++] = T_VOID; + } + } else if ( bt == T_LONG || bt == T_DOUBLE ) { + in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots + out_sig_bt[total_c_args++] = T_VOID; + } + } + + assert(i==total_args_passed, "validly parsed signature"); + + // Now get the compiled-Java layout as input arguments + int comp_args_on_stack; + comp_args_on_stack = SharedRuntime::java_calling_convention( + in_sig_bt, in_regs, total_args_passed, false); + + // Now figure out where the args must be stored and how much stack space + // they require (neglecting out_preserve_stack_slots). + + int out_arg_slots; + out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); + + // Calculate the total number of stack slots we will need. + + // First count the abi requirement plus all of the outgoing args + int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; + + // Now space for the string(s) we must convert + + int* string_locs = NEW_RESOURCE_ARRAY(int, total_strings + 1); + for (i = 0; i < total_strings ; i++) { + string_locs[i] = stack_slots; + stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size; + } + + // + 2 for return address (which we own) and saved rbp, + + stack_slots += 2; + + // Ok The space we have allocated will look like: + // + // + // FP-> | | + // |---------------------| + // | string[n] | + // |---------------------| <- string_locs[n] + // | string[n-1] | + // |---------------------| <- string_locs[n-1] + // | ... | + // | ... | + // |---------------------| <- string_locs[1] + // | string[0] | + // |---------------------| <- string_locs[0] + // | outbound memory | + // | based arguments | + // | | + // |---------------------| + // | | + // SP-> | out_preserved_slots | + // + // + + // Now compute actual number of stack words we need rounding to make + // stack properly aligned. + stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); + + int stack_size = stack_slots * VMRegImpl::stack_slot_size; + + intptr_t start = (intptr_t)__ pc(); + + // First thing make an ic check to see if we should even be here + + // We are free to use all registers as temps without saving them and + // restoring them except rbp. rbp, is the only callee save register + // as far as the interpreter and the compiler(s) are concerned. + + const Register ic_reg = rax; + const Register receiver = rcx; + Label hit; + Label exception_pending; + + + __ verify_oop(receiver); + __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes())); + __ jcc(Assembler::equal, hit); + + __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); + + // verified entry must be aligned for code patching. + // and the first 5 bytes must be in the same cache line + // if we align at 8 then we will be sure 5 bytes are in the same line + __ align(8); + + __ bind(hit); + + int vep_offset = ((intptr_t)__ pc()) - start; + + + // The instruction at the verified entry point must be 5 bytes or longer + // because it can be patched on the fly by make_non_entrant. The stack bang + // instruction fits that requirement. + + // Generate stack overflow check + + + if (UseStackBanging) { + if (stack_size <= StackShadowPages*os::vm_page_size()) { + __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); + } else { + __ movl(rax, stack_size); + __ bang_stack_size(rax, rbx); + } + } else { + // need a 5 byte instruction to allow MT safe patching to non-entrant + __ fat_nop(); + } + + assert(((int)__ pc() - start - vep_offset) >= 5, + "valid size for make_non_entrant"); + + // Generate a new frame for the wrapper. + __ enter(); + + // -2 because return address is already present and so is saved rbp, + if (stack_size - 2*wordSize != 0) { + __ subl(rsp, stack_size - 2*wordSize); + } + + // Frame is now completed as far a size and linkage. + + int frame_complete = ((intptr_t)__ pc()) - start; + + // First thing we do store all the args as if we are doing the call. + // Since the C calling convention is stack based that ensures that + // all the Java register args are stored before we need to convert any + // string we might have. + + int sid = 0; + int c_arg, j_arg; + int string_reg = 0; + + for (j_arg = first_arg_to_pass, c_arg = 0 ; + j_arg < total_args_passed ; j_arg++, c_arg++ ) { + + VMRegPair src = in_regs[j_arg]; + VMRegPair dst = out_regs[c_arg]; + assert(dst.first()->is_stack() || in_sig_bt[j_arg] == T_VOID, + "stack based abi assumed"); + + switch (in_sig_bt[j_arg]) { + + case T_ARRAY: + case T_OBJECT: + if (out_sig_bt[c_arg] == T_ADDRESS) { + // Any register based arg for a java string after the first + // will be destroyed by the call to get_utf so we store + // the original value in the location the utf string address + // will eventually be stored. + if (src.first()->is_reg()) { + if (string_reg++ != 0) { + simple_move32(masm, src, dst); + } + } + } else if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { + // need to unbox a one-word value + Register in_reg = rax; + if ( src.first()->is_reg() ) { + in_reg = src.first()->as_Register(); + } else { + simple_move32(masm, src, in_reg->as_VMReg()); + } + Label skipUnbox; + __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD); + if ( out_sig_bt[c_arg] == T_LONG ) { + __ movl(Address(rsp, reg2offset_out(dst.second())), NULL_WORD); + } + __ testl(in_reg, in_reg); + __ jcc(Assembler::zero, skipUnbox); + assert(dst.first()->is_stack() && + (!dst.second()->is_valid() || dst.second()->is_stack()), + "value(s) must go into stack slots"); + + BasicType bt = out_sig_bt[c_arg]; + int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); + if ( bt == T_LONG ) { + __ movl(rbx, Address(in_reg, + box_offset + VMRegImpl::stack_slot_size)); + __ movl(Address(rsp, reg2offset_out(dst.second())), rbx); + } + __ movl(in_reg, Address(in_reg, box_offset)); + __ movl(Address(rsp, reg2offset_out(dst.first())), in_reg); + __ bind(skipUnbox); + } else { + // Convert the arg to NULL + __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD); + } + if (out_sig_bt[c_arg] == T_LONG) { + assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); + ++c_arg; // Move over the T_VOID To keep the loop indices in sync + } + break; + + case T_VOID: + break; + + case T_FLOAT: + float_move(masm, src, dst); + break; + + case T_DOUBLE: + assert( j_arg + 1 < total_args_passed && + in_sig_bt[j_arg + 1] == T_VOID, "bad arg list"); + double_move(masm, src, dst); + break; + + case T_LONG : + long_move(masm, src, dst); + break; + + case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); + + default: + simple_move32(masm, src, dst); + } + } + + // Now we must convert any string we have to utf8 + // + + for (sid = 0, j_arg = first_arg_to_pass, c_arg = 0 ; + sid < total_strings ; j_arg++, c_arg++ ) { + + if (out_sig_bt[c_arg] == T_ADDRESS) { + + Address utf8_addr = Address( + rsp, string_locs[sid++] * VMRegImpl::stack_slot_size); + __ leal(rax, utf8_addr); + + // The first string we find might still be in the original java arg + // register + VMReg orig_loc = in_regs[j_arg].first(); + Register string_oop; + + // This is where the argument will eventually reside + Address dest = Address(rsp, reg2offset_out(out_regs[c_arg].first())); + + if (sid == 1 && orig_loc->is_reg()) { + string_oop = orig_loc->as_Register(); + assert(string_oop != rax, "smashed arg"); + } else { + + if (orig_loc->is_reg()) { + // Get the copy of the jls object + __ movl(rcx, dest); + } else { + // arg is still in the original location + __ movl(rcx, Address(rbp, reg2offset_in(orig_loc))); + } + string_oop = rcx; + + } + Label nullString; + __ movl(dest, NULL_WORD); + __ testl(string_oop, string_oop); + __ jcc(Assembler::zero, nullString); + + // Now we can store the address of the utf string as the argument + __ movl(dest, rax); + + // And do the conversion + __ call_VM_leaf(CAST_FROM_FN_PTR( + address, SharedRuntime::get_utf), string_oop, rax); + __ bind(nullString); + } + + if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) { + assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); + ++c_arg; // Move over the T_VOID To keep the loop indices in sync + } + } + + + // Ok now we are done. Need to place the nop that dtrace wants in order to + // patch in the trap + + int patch_offset = ((intptr_t)__ pc()) - start; + + __ nop(); + + + // Return + + __ leave(); + __ ret(0); + + __ flush(); + + nmethod *nm = nmethod::new_dtrace_nmethod( + method, masm->code(), vep_offset, patch_offset, frame_complete, + stack_slots / VMRegImpl::slots_per_word); + return nm; + +} + +#endif // HAVE_DTRACE_H + // this function returns the adjust size (in number of words) to a c2i adapter // activation for use during deoptimization int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) { @@ -1913,8 +2347,8 @@ // ------------- // This code enters when returning to a de-optimized nmethod. A return // address has been pushed on the the stack, and return values are in - // registers. - // If we are doing a normal deopt then we were called from the patched + // registers. + // If we are doing a normal deopt then we were called from the patched // nmethod from the point we returned to the nmethod. So the return // address on the stack is wrong by NativeCall::instruction_size // We will adjust the value to it looks like we have the original return @@ -1948,7 +2382,7 @@ map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); // Normal deoptimization - __ pushl(Deoptimization::Unpack_deopt); + __ push(Deoptimization::Unpack_deopt); __ jmp(cont); int reexecute_offset = __ pc() - start; @@ -1959,9 +2393,9 @@ // No need to update map as each call to save_live_registers will produce identical oopmap (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); - __ pushl(Deoptimization::Unpack_reexecute); + __ push(Deoptimization::Unpack_reexecute); __ jmp(cont); - + int exception_offset = __ pc() - start; // Prolog for exception case @@ -1972,8 +2406,8 @@ // unpack_with_exception_in_tls entry point. __ get_thread(rdi); - __ movl(Address(rdi, JavaThread::exception_pc_offset()), rdx); - __ movl(Address(rdi, JavaThread::exception_oop_offset()), rax); + __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx); + __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax); int exception_in_tls_offset = __ pc() - start; @@ -1986,9 +2420,9 @@ // args are on the stack, no return address) // make room on stack for the return address - // It will be patched later with the throwing pc. The correct value is not + // It will be patched later with the throwing pc. The correct value is not // available now because loading it from memory would destroy registers. - __ pushl(0); + __ push(0); // Save everything in sight. @@ -1998,24 +2432,24 @@ // Now it is safe to overwrite any register // store the correct deoptimization type - __ pushl(Deoptimization::Unpack_exception); + __ push(Deoptimization::Unpack_exception); - // load throwing pc from JavaThread and patch it as the return address + // load throwing pc from JavaThread and patch it as the return address // of the current frame. Then clear the field in JavaThread __ get_thread(rdi); - __ movl(rdx, Address(rdi, JavaThread::exception_pc_offset())); - __ movl(Address(rbp, wordSize), rdx); - __ movl(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD); + __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset())); + __ movptr(Address(rbp, wordSize), rdx); + __ movptr(Address(rdi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); #ifdef ASSERT // verify that there is really an exception oop in JavaThread - __ movl(rax, Address(rdi, JavaThread::exception_oop_offset())); + __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset())); __ verify_oop(rax); // verify that there is no pending exception Label no_pending_exception; - __ movl(rax, Address(rdi, Thread::pending_exception_offset())); - __ testl(rax, rax); + __ movptr(rax, Address(rdi, Thread::pending_exception_offset())); + __ testptr(rax, rax); __ jcc(Assembler::zero, no_pending_exception); __ stop("must not have pending exception here"); __ bind(no_pending_exception); @@ -2028,9 +2462,9 @@ // Call C code. Need thread and this frame, but NOT official VM entry - // crud. We cannot block on this call, no GC can happen. + // crud. We cannot block on this call, no GC can happen. __ get_thread(rcx); - __ pushl(rcx); + __ push(rcx); // fetch_unroll_info needs to call last_java_frame() __ set_last_Java_frame(rcx, noreg, noreg, NULL); @@ -2042,35 +2476,35 @@ oop_maps->add_gc_map( __ pc()-start, map); // Discard arg to fetch_unroll_info - __ popl(rcx); + __ pop(rcx); __ get_thread(rcx); __ reset_last_Java_frame(rcx, false, false); // Load UnrollBlock into EDI - __ movl(rdi, rax); + __ mov(rdi, rax); // Move the unpack kind to a safe place in the UnrollBlock because // we are very short of registers Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()); // retrieve the deopt kind from where we left it. - __ popl(rax); + __ pop(rax); __ movl(unpack_kind, rax); // save the unpack_kind value Label noException; __ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending? __ jcc(Assembler::notEqual, noException); - __ movl(rax, Address(rcx, JavaThread::exception_oop_offset())); - __ movl(rdx, Address(rcx, JavaThread::exception_pc_offset())); - __ movl(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD); - __ movl(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD); + __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset())); + __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset())); + __ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); + __ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); __ verify_oop(rax); // Overwrite the result registers with the exception results. - __ movl(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax); - __ movl(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx); + __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax); + __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx); __ bind(noException); @@ -2083,35 +2517,35 @@ // All of the register save area has been popped of the stack. Only the // return address remains. - // Pop all the frames we must move/replace. - // + // Pop all the frames we must move/replace. + // // Frame picture (youngest to oldest) // 1: self-frame (no frame link) // 2: deopting frame (no frame link) - // 3: caller of deopting frame (could be compiled/interpreted). - // + // 3: caller of deopting frame (could be compiled/interpreted). + // // Note: by leaving the return address of self-frame on the stack // and using the size of frame 2 to adjust the stack // when we are done the return to frame 3 will still be on the stack. // Pop deoptimized frame - __ addl(rsp,Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); + __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); // sp should be pointing at the return address to the caller (3) // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { - __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); + __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); __ bang_stack_size(rbx, rcx); } // Load array of frame pcs into ECX - __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); + __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); - __ popl(rsi); // trash the old pc + __ pop(rsi); // trash the old pc // Load array of frame sizes into ESI - __ movl(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); + __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes()); @@ -2119,7 +2553,7 @@ __ movl(counter, rbx); // Pick up the initial fp we should save - __ movl(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); + __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); // Now adjust the caller's stack to make up for the extra locals // but record the original sp so that we can save it in the skeletal interpreter @@ -2127,55 +2561,56 @@ // value and not the "real" sp value. Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes()); - __ movl(sp_temp, rsp); - __ subl(rsp, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes())); + __ movptr(sp_temp, rsp); + __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes())); + __ subptr(rsp, rbx); // Push interpreter frames in a loop Label loop; __ bind(loop); - __ movl(rbx, Address(rsi, 0)); // Load frame size + __ movptr(rbx, Address(rsi, 0)); // Load frame size #ifdef CC_INTERP - __ subl(rbx, 4*wordSize); // we'll push pc and ebp by hand and + __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and #ifdef ASSERT - __ pushl(0xDEADDEAD); // Make a recognizable pattern - __ pushl(0xDEADDEAD); + __ push(0xDEADDEAD); // Make a recognizable pattern + __ push(0xDEADDEAD); #else /* ASSERT */ - __ subl(rsp, 2*wordSize); // skip the "static long no_param" + __ subptr(rsp, 2*wordSize); // skip the "static long no_param" #endif /* ASSERT */ #else /* CC_INTERP */ - __ subl(rbx, 2*wordSize); // we'll push pc and rbp, by hand + __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand #endif /* CC_INTERP */ - __ pushl(Address(rcx, 0)); // save return address + __ pushptr(Address(rcx, 0)); // save return address __ enter(); // save old & set new rbp, - __ subl(rsp, rbx); // Prolog! - __ movl(rbx, sp_temp); // sender's sp + __ subptr(rsp, rbx); // Prolog! + __ movptr(rbx, sp_temp); // sender's sp #ifdef CC_INTERP - __ movl(Address(rbp, - -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))), - rbx); // Make it walkable + __ movptr(Address(rbp, + -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))), + rbx); // Make it walkable #else /* CC_INTERP */ - // This value is corrected by layout_activation_impl - __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD ); - __ movl(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable + // This value is corrected by layout_activation_impl + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD ); + __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable #endif /* CC_INTERP */ - __ movl(sp_temp, rsp); // pass to next frame - __ addl(rsi, 4); // Bump array pointer (sizes) - __ addl(rcx, 4); // Bump array pointer (pcs) - __ decrement(counter); // decrement counter + __ movptr(sp_temp, rsp); // pass to next frame + __ addptr(rsi, wordSize); // Bump array pointer (sizes) + __ addptr(rcx, wordSize); // Bump array pointer (pcs) + __ decrementl(counter); // decrement counter __ jcc(Assembler::notZero, loop); - __ pushl(Address(rcx, 0)); // save final return address + __ pushptr(Address(rcx, 0)); // save final return address // Re-push self-frame __ enter(); // save old & set new rbp, // Return address and rbp, are in place // We'll push additional args later. Just allocate a full sized - // register save area - __ subl(rsp, (frame_size_in_words-additional_words - 2) * wordSize); + // register save area + __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize); // Restore frame locals after moving the frame - __ movl(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax); - __ movl(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx); + __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax); + __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx); __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize)); // Pop float stack and store in local if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0); if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0); @@ -2184,7 +2619,7 @@ __ pushl(unpack_kind); // get the unpack_kind value __ get_thread(rcx); - __ pushl(rcx); + __ push(rcx); // set last_Java_sp, last_Java_fp __ set_last_Java_frame(rcx, noreg, rbp, NULL); @@ -2197,14 +2632,14 @@ oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 )); // rax, contains the return result type - __ pushl(rax); + __ push(rax); __ get_thread(rcx); __ reset_last_Java_frame(rcx, false, false); // Collect return values - __ movl(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize)); - __ movl(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize)); + __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize)); + __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize)); // Clear floating point stack before returning to interpreter __ empty_FPU_stack(); @@ -2233,7 +2668,7 @@ // Jump to interpreter __ ret(0); - + // ------------- // make sure all code is generated masm->flush(); @@ -2253,7 +2688,7 @@ MacroAssembler* masm = new MacroAssembler(&buffer); enum frame_layout { - arg0_off, // thread sp + 0 // Arg location for + arg0_off, // thread sp + 0 // Arg location for arg1_off, // unloaded_class_index sp + 1 // calling C // The frame sender code expects that rbp will be in the "natural" place and // will override any oopMap setting for it. We must therefore force the layout @@ -2262,15 +2697,15 @@ return_off, // slot for return address sp + 3 framesize }; - + address start = __ pc(); // Push self-frame. - __ subl(rsp, return_off*wordSize); // Epilog! + __ subptr(rsp, return_off*wordSize); // Epilog! // rbp, is an implicitly saved callee saved register (i.e. the calling // convention will save restore it in prolog/epilog) Other than that // there are no callee save registers no that adapter frames are gone. - __ movl(Address(rsp, rbp_off*wordSize),rbp); + __ movptr(Address(rsp, rbp_off*wordSize), rbp); // Clear the floating point exception stack __ empty_FPU_stack(); @@ -2282,8 +2717,8 @@ // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should // capture callee-saved registers as well as return values. - __ movl(Address(rsp, arg0_off*wordSize),rdx); - // argument already in ECX + __ movptr(Address(rsp, arg0_off*wordSize), rdx); + // argument already in ECX __ movl(Address(rsp, arg1_off*wordSize),rcx); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); @@ -2299,26 +2734,27 @@ __ reset_last_Java_frame(rcx, false, false); // Load UnrollBlock into EDI - __ movl(rdi, rax); + __ movptr(rdi, rax); - // Pop all the frames we must move/replace. - // + // Pop all the frames we must move/replace. + // // Frame picture (youngest to oldest) // 1: self-frame (no frame link) // 2: deopting frame (no frame link) // 3: caller of deopting frame (could be compiled/interpreted). // Pop self-frame. We have no frame, and must rely only on EAX and ESP. - __ addl(rsp,(framesize-1)*wordSize); // Epilog! + __ addptr(rsp,(framesize-1)*wordSize); // Epilog! // Pop deoptimized frame - __ addl(rsp,Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); + __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); + __ addptr(rsp, rcx); // sp should be pointing at the return address to the caller (3) - + // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { - __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); + __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); __ bang_stack_size(rbx, rcx); } @@ -2326,10 +2762,10 @@ // Load array of frame pcs into ECX __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); - __ popl(rsi); // trash the pc + __ pop(rsi); // trash the pc // Load array of frame sizes into ESI - __ movl(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); + __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes()); @@ -2337,7 +2773,7 @@ __ movl(counter, rbx); // Pick up the initial fp we should save - __ movl(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); + __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); // Now adjust the caller's stack to make up for the extra locals // but record the original sp so that we can save it in the skeletal interpreter @@ -2345,47 +2781,48 @@ // value and not the "real" sp value. Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes()); - __ movl(sp_temp, rsp); - __ subl(rsp, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes())); + __ movptr(sp_temp, rsp); + __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes())); + __ subptr(rsp, rbx); // Push interpreter frames in a loop Label loop; __ bind(loop); - __ movl(rbx, Address(rsi, 0)); // Load frame size + __ movptr(rbx, Address(rsi, 0)); // Load frame size #ifdef CC_INTERP - __ subl(rbx, 4*wordSize); // we'll push pc and ebp by hand and + __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and #ifdef ASSERT - __ pushl(0xDEADDEAD); // Make a recognizable pattern - __ pushl(0xDEADDEAD); // (parm to RecursiveInterpreter...) + __ push(0xDEADDEAD); // Make a recognizable pattern + __ push(0xDEADDEAD); // (parm to RecursiveInterpreter...) #else /* ASSERT */ - __ subl(rsp, 2*wordSize); // skip the "static long no_param" + __ subptr(rsp, 2*wordSize); // skip the "static long no_param" #endif /* ASSERT */ #else /* CC_INTERP */ - __ subl(rbx, 2*wordSize); // we'll push pc and rbp, by hand + __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand #endif /* CC_INTERP */ - __ pushl(Address(rcx, 0)); // save return address + __ pushptr(Address(rcx, 0)); // save return address __ enter(); // save old & set new rbp, - __ subl(rsp, rbx); // Prolog! - __ movl(rbx, sp_temp); // sender's sp + __ subptr(rsp, rbx); // Prolog! + __ movptr(rbx, sp_temp); // sender's sp #ifdef CC_INTERP - __ movl(Address(rbp, - -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))), - rbx); // Make it walkable + __ movptr(Address(rbp, + -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))), + rbx); // Make it walkable #else /* CC_INTERP */ - // This value is corrected by layout_activation_impl - __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD ); - __ movl(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable + // This value is corrected by layout_activation_impl + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD ); + __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable #endif /* CC_INTERP */ - __ movl(sp_temp, rsp); // pass to next frame - __ addl(rsi, 4); // Bump array pointer (sizes) - __ addl(rcx, 4); // Bump array pointer (pcs) - __ decrement(counter); // decrement counter + __ movptr(sp_temp, rsp); // pass to next frame + __ addptr(rsi, wordSize); // Bump array pointer (sizes) + __ addptr(rcx, wordSize); // Bump array pointer (pcs) + __ decrementl(counter); // decrement counter __ jcc(Assembler::notZero, loop); - __ pushl(Address(rcx, 0)); // save final return address + __ pushptr(Address(rcx, 0)); // save final return address // Re-push self-frame __ enter(); // save old & set new rbp, - __ subl(rsp, (framesize-2) * wordSize); // Prolog! + __ subptr(rsp, (framesize-2) * wordSize); // Prolog! // set last_Java_sp, last_Java_fp @@ -2395,8 +2832,8 @@ // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should // restore return values to their stack-slots with the new SP. - __ movl(Address(rsp,arg0_off*wordSize),rdi); - __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap); + __ movptr(Address(rsp,arg0_off*wordSize),rdi); + __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); // Set an oopmap for the call site oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) ); @@ -2420,30 +2857,30 @@ //------------------------------generate_handler_blob------ // -// Generate a special Compile2Runtime blob that saves all registers, +// Generate a special Compile2Runtime blob that saves all registers, // setup oopmap, and calls safepoint code to stop the compiled code for // a safepoint. // static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { // Account for thread arg in our frame - const int additional_words = 1; + const int additional_words = 1; int frame_size_in_words; - assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); + assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); ResourceMark rm; OopMapSet *oop_maps = new OopMapSet(); OopMap* map; // allocate space for the code - // setup code generation tools + // setup code generation tools CodeBuffer buffer("handler_blob", 1024, 512); MacroAssembler* masm = new MacroAssembler(&buffer); - + const Register java_thread = rdi; // callee-saved for VC++ - address start = __ pc(); - address call_pc = NULL; + address start = __ pc(); + address call_pc = NULL; // If cause_return is true we are at a poll_return and there is // the return address on the stack to the caller on the nmethod @@ -2452,23 +2889,23 @@ // Otherwise we push space for a return address that the safepoint // handler will install later to make the stack walking sensible. if( !cause_return ) - __ pushl(rbx); // Make room for return address (or push it again) + __ push(rbx); // Make room for return address (or push it again) map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false); - + // The following is basically a call_VM. However, we need the precise // address of the call in order to generate an oopmap. Hence, we do all the // work ourselves. // Push thread argument and setup last_Java_sp __ get_thread(java_thread); - __ pushl(java_thread); + __ push(java_thread); __ set_last_Java_frame(java_thread, noreg, noreg, NULL); - // if this was not a poll_return then we need to correct the return address now. + // if this was not a poll_return then we need to correct the return address now. if( !cause_return ) { - __ movl(rax, Address(java_thread, JavaThread::saved_exception_pc_offset())); - __ movl(Address(rbp, wordSize), rax); + __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset())); + __ movptr(Address(rbp, wordSize), rax); } // do the call @@ -2482,7 +2919,7 @@ oop_maps->add_gc_map( __ pc() - start, map); // Discard arg - __ popl(rcx); + __ pop(rcx); Label noException; @@ -2490,7 +2927,7 @@ __ get_thread(java_thread); __ reset_last_Java_frame(java_thread, false, false); - __ cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, noException); // Exception pending @@ -2501,16 +2938,16 @@ __ bind(noException); - // Normal exit, register restoring and exit + // Normal exit, register restoring and exit RegisterSaver::restore_live_registers(masm); __ ret(0); - + // make sure all code is generated - masm->flush(); + masm->flush(); // Fill-out other meta info - return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); + return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); } // @@ -2522,7 +2959,7 @@ // must do any gc of the args. // static RuntimeStub* generate_resolve_blob(address destination, const char* name) { - assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); + assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); // allocate space for the code ResourceMark rm; @@ -2531,13 +2968,13 @@ MacroAssembler* masm = new MacroAssembler(&buffer); int frame_size_words; - enum frame_layout { + enum frame_layout { thread_off, extra_words }; - + OopMapSet *oop_maps = new OopMapSet(); OopMap* map = NULL; - + int start = __ offset(); map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words); @@ -2547,7 +2984,7 @@ const Register thread = rdi; __ get_thread(rdi); - __ pushl(thread); + __ push(thread); __ set_last_Java_frame(thread, noreg, rbp, NULL); __ call(RuntimeAddress(destination)); @@ -2561,20 +2998,20 @@ // rax, contains the address we are going to jump to assuming no exception got installed - __ addl(rsp, wordSize); + __ addptr(rsp, wordSize); // clear last_Java_sp __ reset_last_Java_frame(thread, true, false); // check for pending exceptions Label pending; - __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, pending); // get the returned methodOop - __ movl(rbx, Address(thread, JavaThread::vm_result_offset())); - __ movl(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx); + __ movptr(rbx, Address(thread, JavaThread::vm_result_offset())); + __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx); - __ movl(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax); + __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax); RegisterSaver::restore_live_registers(masm); @@ -2591,13 +3028,13 @@ // exception pending => remove activation and forward to exception handler __ get_thread(thread); - __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); - __ movl(rax, Address(thread, Thread::pending_exception_offset())); + __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); + __ movptr(rax, Address(thread, Thread::pending_exception_offset())); __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); // ------------- // make sure all code is generated - masm->flush(); + masm->flush(); // return the blob // frame_size_words or bytes?? @@ -2622,13 +3059,13 @@ "resolve_static_call"); _polling_page_safepoint_handler_blob = - generate_handler_blob(CAST_FROM_FN_PTR(address, + generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), false); _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); - + generate_deopt_blob(); #ifdef COMPILER2 generate_uncommon_trap_blob(); --- old/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2009-08-01 04:08:12.432979500 +0100 +++ new/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2009-08-01 04:08:12.342236421 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)sharedRuntime_x86_64.cpp 1.44 07/09/17 09:26:01 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -58,7 +55,7 @@ rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, rbp_off2, return_off, return_off2, - framesize + framesize }; }; @@ -66,7 +63,7 @@ // Capture info about frame layout. Layout offsets are in jint // units because compiler frame slots are jints. #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off - enum layout { + enum layout { fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area DEF_XMM_OFFS(0), @@ -114,7 +111,7 @@ reg_save_size // size in compiler stack slots }; - public: + public: static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); static void restore_live_registers(MacroAssembler* masm); @@ -123,6 +120,7 @@ // values on its own static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; } + static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; } static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; } static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; } static int return_offset_in_bytes(void) { return BytesPerInt * return_off; } @@ -155,7 +153,7 @@ __ push_CPU_state(); // Push a multiple of 16 bytes if (frame::arg_reg_save_area_bytes != 0) { // Allocate argument register save area - __ subq(rsp, frame::arg_reg_save_area_bytes); + __ subptr(rsp, frame::arg_reg_save_area_bytes); } // Set an oopmap for the call site. This oopmap will map all @@ -200,7 +198,7 @@ // %%% These should all be a waste but we'll keep things as they were for now if (true) { - map->set_callee_saved(VMRegImpl::stack2reg( raxH_off + additional_frame_slots), + map->set_callee_saved(VMRegImpl::stack2reg( raxH_off + additional_frame_slots), rax->as_VMReg()->next()); map->set_callee_saved(VMRegImpl::stack2reg( rcxH_off + additional_frame_slots), rcx->as_VMReg()->next()); @@ -269,12 +267,12 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) { if (frame::arg_reg_save_area_bytes != 0) { // Pop arg register save area - __ addq(rsp, frame::arg_reg_save_area_bytes); + __ addptr(rsp, frame::arg_reg_save_area_bytes); } // Recover CPU state __ pop_CPU_state(); // Get the rbp described implicitly by the calling convention (no oopMap) - __ popq(rbp); + __ pop(rbp); } void RegisterSaver::restore_result_registers(MacroAssembler* masm) { @@ -288,22 +286,24 @@ // Restore fp result register __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes())); // Restore integer result register - __ movq(rax, Address(rsp, rax_offset_in_bytes())); + __ movptr(rax, Address(rsp, rax_offset_in_bytes())); + __ movptr(rdx, Address(rsp, rdx_offset_in_bytes())); + // Pop all of the register save are off the stack except the return address - __ addq(rsp, return_offset_in_bytes()); + __ addptr(rsp, return_offset_in_bytes()); } // The java_calling_convention describes stack locations as ideal slots on // a frame with no abi restrictions. Since we must observe abi restrictions // (like the placement of the register window) the slots must be biased by // the following value. -static int reg2offset_in(VMReg r) { +static int reg2offset_in(VMReg r) { // Account for saved rbp and return address // This should really be in_preserve_stack_slots return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; } -static int reg2offset_out(VMReg r) { +static int reg2offset_out(VMReg r) { return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; } @@ -316,7 +316,7 @@ // VMRegImpl::stack0 refers to the first slot 0(sp). // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register // up to RegisterImpl::number_of_registers) are the 64-bit -// integer registers. +// integer registers. // Note: the INPUTS in sig_bt are in units of Java argument words, which are // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit @@ -334,7 +334,7 @@ int is_outgoing) { // Create the mapping between argument positions and - // registers. + // registers. static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5 }; @@ -410,18 +410,18 @@ static void patch_callers_callsite(MacroAssembler *masm) { Label L; __ verify_oop(rbx); - __ cmpq(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int)NULL_WORD); + __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); // Save the current stack pointer - __ movq(r13, rsp); + __ mov(r13, rsp); // Schedule the branch target address early. // Call into the VM to patch the caller, then jump to compiled callee // rax isn't live so capture return address while we easily can - __ movq(rax, Address(rsp, 0)); + __ movptr(rax, Address(rsp, 0)); // align stack so push_CPU_state doesn't fault - __ andq(rsp, -(StackAlignmentInBytes)); + __ andptr(rsp, -(StackAlignmentInBytes)); __ push_CPU_state(); @@ -433,20 +433,20 @@ // Allocate argument register save area if (frame::arg_reg_save_area_bytes != 0) { - __ subq(rsp, frame::arg_reg_save_area_bytes); + __ subptr(rsp, frame::arg_reg_save_area_bytes); } - __ movq(c_rarg0, rbx); - __ movq(c_rarg1, rax); + __ mov(c_rarg0, rbx); + __ mov(c_rarg1, rax); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); // De-allocate argument register save area if (frame::arg_reg_save_area_bytes != 0) { - __ addq(rsp, frame::arg_reg_save_area_bytes); + __ addptr(rsp, frame::arg_reg_save_area_bytes); } __ pop_CPU_state(); // restore sp - __ movq(rsp, r13); + __ mov(rsp, r13); __ bind(L); } @@ -455,13 +455,13 @@ if (TaggedStackInterpreter) { int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0); if (sig == T_OBJECT || sig == T_ARRAY) { - __ mov64(Address(rsp, tag_offset), frame::TagReference); + __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagReference); } else if (sig == T_LONG || sig == T_DOUBLE) { int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1); - __ mov64(Address(rsp, next_tag_offset), frame::TagValue); - __ mov64(Address(rsp, tag_offset), frame::TagValue); + __ movptr(Address(rsp, next_tag_offset), (int32_t) frame::TagValue); + __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue); } else { - __ mov64(Address(rsp, tag_offset), frame::TagValue); + __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue); } } } @@ -482,8 +482,8 @@ __ bind(skip_fixup); - // Since all args are passed on the stack, total_args_passed * - // Interpreter::stackElementSize is the space we need. Plus 1 because + // Since all args are passed on the stack, total_args_passed * + // Interpreter::stackElementSize is the space we need. Plus 1 because // we also account for the return address location since // we store it first rather than hold it in rax across all the shuffling @@ -493,15 +493,15 @@ extraspace = round_to(extraspace, 2*wordSize); // Get return address - __ popq(rax); + __ pop(rax); // set senderSP value - __ movq(r13, rsp); + __ mov(r13, rsp); - __ subq(rsp, extraspace); + __ subptr(rsp, extraspace); // Store the return address in the expected location - __ movq(Address(rsp, 0), rax); + __ movptr(Address(rsp, 0), rax); // Now write the args into the outgoing interpreter space for (int i = 0; i < total_args_passed; i++) { @@ -511,7 +511,7 @@ } // offset to start parameters - int st_off = (total_args_passed - i) * Interpreter::stackElementSize() + + int st_off = (total_args_passed - i) * Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes(); int next_off = st_off - Interpreter::stackElementSize(); @@ -534,13 +534,13 @@ assert(!r_2->is_valid(), ""); continue; } - if (r_1->is_stack()) { + if (r_1->is_stack()) { // memory to memory use rax int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; if (!r_2->is_valid()) { // sign extend?? __ movl(rax, Address(rsp, ld_off)); - __ movq(Address(rsp, st_off), rax); + __ movptr(Address(rsp, st_off), rax); tag_stack(masm, sig_bt[i], st_off); } else { @@ -556,7 +556,7 @@ #ifdef ASSERT // Overwrite the unused slot with known junk __ mov64(rax, CONST64(0xdeadffffdeadaaaa)); - __ movq(Address(rsp, st_off), rax); + __ movptr(Address(rsp, st_off), rax); #endif /* ASSERT */ tag_stack(masm, sig_bt[i], next_off); } else { @@ -579,12 +579,12 @@ #ifdef ASSERT // Overwrite the unused slot with known junk __ mov64(rax, CONST64(0xdeadffffdeadaaab)); - __ movq(Address(rsp, st_off), rax); + __ movptr(Address(rsp, st_off), rax); #endif /* ASSERT */ __ movq(Address(rsp, next_off), r); tag_stack(masm, sig_bt[i], next_off); } else { - __ movq(Address(rsp, st_off), r); + __ movptr(Address(rsp, st_off), r); tag_stack(masm, sig_bt[i], st_off); } } @@ -598,7 +598,7 @@ #ifdef ASSERT // Overwrite the unused slot with known junk __ mov64(rax, CONST64(0xdeadffffdeadaaac)); - __ movq(Address(rsp, st_off), rax); + __ movptr(Address(rsp, st_off), rax); #endif /* ASSERT */ __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister()); tag_stack(masm, sig_bt[i], next_off); @@ -607,7 +607,7 @@ } // Schedule the branch target address early. - __ movq(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset()))); + __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset()))); __ jmp(rcx); } @@ -634,7 +634,7 @@ // save code can segv when fxsave instructions find improperly // aligned stack pointer. - __ movq(rax, Address(rsp, 0)); + __ movptr(rax, Address(rsp, 0)); // Cut-out for having no stack args. Since up to 2 int/oop args are passed // in registers, we will occasionally have no stack args. @@ -643,25 +643,25 @@ // Sig words on the stack are greater-than VMRegImpl::stack0. Those in // registers are below. By subtracting stack0, we either get a negative // number (all values in registers) or the maximum stack slot accessed. - + // Convert 4-byte c2 stack slots to words. comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; // Round up to miminum stack alignment, in wordSize comp_words_on_stack = round_to(comp_words_on_stack, 2); - __ subq(rsp, comp_words_on_stack * wordSize); + __ subptr(rsp, comp_words_on_stack * wordSize); } // Ensure compiled code always sees stack at proper alignment - __ andq(rsp, -16); + __ andptr(rsp, -16); // push the return address and misalign the stack that youngest frame always sees // as far as the placement of the call instruction - __ pushq(rax); - + __ push(rax); + // Will jump to the compiled code just as if compiled code was doing it. // Pre-load the register-jump target early, to schedule it better. - __ movq(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset()))); + __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset()))); // Now generate the shuffle code. Pick up all register args and move the // rest through the floating point stack top. @@ -673,8 +673,8 @@ continue; } - // Pick up 0, 1 or 2 words from SP+offset. - + // Pick up 0, 1 or 2 words from SP+offset. + assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); // Load in argument order going down. @@ -686,7 +686,7 @@ // Point to interpreter value (vs. tag) int next_off = ld_off - Interpreter::stackElementSize(); // - // + // // VMReg r_1 = regs[i].first(); VMReg r_2 = regs[i].second(); @@ -694,13 +694,13 @@ assert(!r_2->is_valid(), ""); continue; } - if (r_1->is_stack()) { + if (r_1->is_stack()) { // Convert stack slot to an SP offset (+ wordSize to account for return address ) int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize; if (!r_2->is_valid()) { // sign extend??? __ movl(rax, Address(r13, ld_off)); - __ movq(Address(rsp, st_off), rax); + __ movptr(Address(rsp, st_off), rax); } else { // // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE @@ -711,7 +711,7 @@ // are accessed as negative so LSW is at LOW address // ld_off is MSW so get LSW - const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? + const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? next_off : ld_off; __ movq(rax, Address(r13, offset)); // st_off is LSW (i.e. reg.first()) @@ -726,7 +726,7 @@ // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case // So we must adjust where to pick up the data to match the interpreter. - const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? + const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? next_off : ld_off; // this can be a misaligned move @@ -754,12 +754,12 @@ // is possible. So we stash the desired callee in the thread // and the vm will find there should this case occur. - __ movq(Address(r15_thread, JavaThread::callee_target_offset()), rbx); + __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); // put methodOop where a c2i would expect should we end up there // only needed becaus eof c2 resolve stubs return methodOop as a result in - // rax - __ movq(rax, rbx); + // rax + __ mov(rax, rbx); __ jmp(r11); } @@ -792,11 +792,11 @@ { __ verify_oop(holder); - __ movq(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); + __ load_klass(temp, receiver); __ verify_oop(temp); - - __ cmpq(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset())); - __ movq(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset())); + + __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset())); + __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset())); __ jcc(Assembler::equal, ok); __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); @@ -804,7 +804,7 @@ // Method might have been compiled since the call site was patched to // interpreted if that is the case treat it as a miss so we can get // the call site corrected. - __ cmpq(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int)NULL_WORD); + __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::equal, skip_fixup); __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); } @@ -817,11 +817,11 @@ return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry); } -int SharedRuntime::c_calling_convention(const BasicType *sig_bt, +int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) { // We return the amount of VMRegImpl stack slots we need to reserve for all -// the arguments NOT counting out_preserve_stack_slots. +// the arguments NOT counting out_preserve_stack_slots. // NOTE: These arrays will have to change when c1 is ported #ifdef _WIN64 @@ -919,13 +919,13 @@ } } #ifdef _WIN64 - // windows abi requires that we always allocate enough stack space + // windows abi requires that we always allocate enough stack space // for 4 64bit registers to be stored down. if (stk_args < 8) { stk_args = 8; } #endif // _WIN64 - + return stk_args; } @@ -959,7 +959,7 @@ // An oop arg. Must pass a handle not the oop itself -static void object_move(MacroAssembler* masm, +static void object_move(MacroAssembler* masm, OopMap* map, int oop_handle_offset, int framesize_in_slots, @@ -983,10 +983,10 @@ *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; } - __ cmpq(Address(rbp, reg2offset_in(src.first())), (int)NULL_WORD); - __ leaq(rHandle, Address(rbp, reg2offset_in(src.first()))); + __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD); + __ lea(rHandle, Address(rbp, reg2offset_in(src.first()))); // conditionally move a NULL - __ cmovq(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); + __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); } else { // Oop is in an a register we must store it to the space we reserve @@ -1014,20 +1014,20 @@ map->set_oop(VMRegImpl::stack2reg(oop_slot)); // Store oop in handle area, may be NULL - __ movq(Address(rsp, offset), rOop); + __ movptr(Address(rsp, offset), rOop); if (is_receiver) { *receiver_offset = offset; } - __ cmpq(rOop, (int)NULL); - __ leaq(rHandle, Address(rsp, offset)); + __ cmpptr(rOop, (int32_t)NULL_WORD); + __ lea(rHandle, Address(rsp, offset)); // conditionally move a NULL from the handle area where it was just stored - __ cmovq(Assembler::equal, rHandle, Address(rsp, offset)); + __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); } // If arg is on the stack then place it otherwise it is already in correct reg. if (dst.first()->is_stack()) { - __ movq(Address(rsp, reg2offset_out(dst.first())), rHandle); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); } } @@ -1042,7 +1042,7 @@ if (src.first()->is_stack()) { if (dst.first()->is_stack()) { __ movl(rax, Address(rbp, reg2offset_in(src.first()))); - __ movq(Address(rsp, reg2offset_out(dst.first())), rax); + __ movptr(Address(rsp, reg2offset_out(dst.first())), rax); } else { // stack to reg assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); @@ -1071,7 +1071,7 @@ if (src.is_single_phys_reg() ) { if (dst.is_single_phys_reg()) { if (dst.first() != src.first()) { - __ movq(dst.first()->as_Register(), src.first()->as_Register()); + __ mov(dst.first()->as_Register(), src.first()->as_Register()); } } else { assert(dst.is_single_reg(), "not a stack pair"); @@ -1127,7 +1127,7 @@ break; case T_VOID: break; default: { - __ movq(Address(rbp, -wordSize), rax); + __ movptr(Address(rbp, -wordSize), rax); } } } @@ -1144,7 +1144,7 @@ break; case T_VOID: break; default: { - __ movq(rax, Address(rbp, -wordSize)); + __ movptr(rax, Address(rbp, -wordSize)); } } } @@ -1152,9 +1152,9 @@ static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { for ( int i = first_arg ; i < arg_count ; i++ ) { if (args[i].first()->is_Register()) { - __ pushq(args[i].first()->as_Register()); + __ push(args[i].first()->as_Register()); } else if (args[i].first()->is_XMMRegister()) { - __ subq(rsp, 2*wordSize); + __ subptr(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister()); } } @@ -1163,10 +1163,10 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { if (args[i].first()->is_Register()) { - __ popq(args[i].first()->as_Register()); + __ pop(args[i].first()->as_Register()); } else if (args[i].first()->is_XMMRegister()) { __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0)); - __ addq(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); } } } @@ -1186,7 +1186,7 @@ BasicType ret_type) { // Native nmethod wrappers never take possesion of the oop arguments. // So the caller will gc the arguments. The only thing we need an - // oopMap for is if the call is static + // oopMap for is if the call is static // // An OopMap for lock (and class if static) OopMapSet *oop_maps = new OopMapSet(); @@ -1218,12 +1218,12 @@ // Now figure out where the args must be stored and how much stack space // they require. - // + // int out_arg_slots; out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); // Compute framesize for the wrapper. We need to handlize all oops in - // incoming registers + // incoming registers // Calculate the total number of stack slots we will need. @@ -1267,7 +1267,7 @@ // FP-> | | // |---------------------| // | 2 slots for moves | - // |---------------------| + // |---------------------| // | lock box (if sync) | // |---------------------| <- lock_slot_offset // | klass (if static) | @@ -1277,14 +1277,14 @@ // | outbound memory | // | based arguments | // | | - // |---------------------| + // |---------------------| // | | // SP-> | out_preserved_slots | // // - // Now compute actual number of stack words we need rounding to make + // Now compute actual number of stack words we need rounding to make // stack properly aligned. stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word); @@ -1300,21 +1300,26 @@ const Register ic_reg = rax; const Register receiver = j_rarg0; + const Register tmp = rdx; Label ok; Label exception_pending; __ verify_oop(receiver); - __ cmpq(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes())); + __ push(tmp); // spill (any other registers free here???) + __ load_klass(tmp, receiver); + __ cmpq(ic_reg, tmp); __ jcc(Assembler::equal, ok); + __ pop(tmp); __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); + __ bind(ok); + __ pop(tmp); + // Verified entry point must be aligned __ align(8); - __ bind(ok); - int vep_offset = ((intptr_t)__ pc()) - start; // The instruction at the verified entry point must be 5 bytes or longer @@ -1322,7 +1327,7 @@ // instruction fits that requirement. // Generate stack overflow check - + if (UseStackBanging) { __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); } else { @@ -1333,7 +1338,7 @@ // Generate a new frame for the wrapper. __ enter(); // -2 because return address is already present and so is saved rbp - __ subq(rsp, stack_size - 2*wordSize); + __ subptr(rsp, stack_size - 2*wordSize); // Frame is now completed as far as size and linkage. @@ -1342,9 +1347,9 @@ #ifdef ASSERT { Label L; - __ movq(rax, rsp); - __ andq(rax, -16); // must be 16 byte boundry (see amd64 ABI) - __ cmpq(rax, rsp); + __ mov(rax, rsp); + __ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI) + __ cmpptr(rax, rsp); __ jcc(Assembler::equal, L); __ stop("improperly aligned stack"); __ bind(L); @@ -1366,9 +1371,9 @@ // them. // ----------------- - // The Grand Shuffle + // The Grand Shuffle - // The Java calling convention is either equal (linux) or denser (win64) than the + // The Java calling convention is either equal (linux) or denser (win64) than the // c calling convention. However the because of the jni_env argument the c calling // convention always has at least one more (and two for static) arguments than Java. // Therefore if we move the args from java -> c backwards then we will never have @@ -1383,7 +1388,7 @@ // the oops in the caller's frame. Since we are sure to have // more args than the caller doubling is enough to make // sure we can capture all the incoming oop args from the - // caller. + // caller. // OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); @@ -1403,7 +1408,7 @@ for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) { freg_destroyed[f] = false; } - + #endif /* ASSERT */ @@ -1436,7 +1441,7 @@ break; case T_DOUBLE: - assert( i + 1 < total_in_args && + assert( i + 1 < total_in_args && in_sig_bt[i + 1] == T_VOID && out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); double_move(masm, in_regs[i], out_regs[c_arg]); @@ -1465,13 +1470,13 @@ __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror())); // Now handlize the static class mirror it's known not-null. - __ movq(Address(rsp, klass_offset), oop_handle_reg); + __ movptr(Address(rsp, klass_offset), oop_handle_reg); map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); // Now get the handle - __ leaq(oop_handle_reg, Address(rsp, klass_offset)); + __ lea(oop_handle_reg, Address(rsp, klass_offset)); // store the klass handle as second argument - __ movq(c_rarg1, oop_handle_reg); + __ movptr(c_rarg1, oop_handle_reg); // and protect the arg if we must spill c_arg--; } @@ -1496,7 +1501,18 @@ save_args(masm, total_c_args, c_arg, out_regs); __ movoop(c_rarg1, JNIHandles::make_local(method())); __ call_VM_leaf( - CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), + CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), + r15_thread, c_rarg1); + restore_args(masm, total_c_args, c_arg, out_regs); + } + + // RedefineClasses() tracing support for obsolete method entry + if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { + // protect the args we've loaded + save_args(masm, total_c_args, c_arg, out_regs); + __ movoop(c_rarg1, JNIHandles::make_local(method())); + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), r15_thread, c_rarg1); restore_args(masm, total_c_args, c_arg, out_regs); } @@ -1519,14 +1535,14 @@ const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); // Get the handle (the 2nd argument) - __ movq(oop_handle_reg, c_rarg1); + __ mov(oop_handle_reg, c_rarg1); // Get address of the box - __ leaq(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); + __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); - // Load the oop from the handle - __ movq(obj_reg, Address(oop_handle_reg, 0)); + // Load the oop from the handle + __ movptr(obj_reg, Address(oop_handle_reg, 0)); if (UseBiasedLocking) { __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock); @@ -1536,17 +1552,17 @@ __ movl(swap_reg, 1); // Load (object->mark() | 1) into swap_reg %rax - __ orq(swap_reg, Address(obj_reg, 0)); + __ orptr(swap_reg, Address(obj_reg, 0)); // Save (object->mark() | 1) into BasicLock's displaced header - __ movq(Address(lock_reg, mark_word_offset), swap_reg); + __ movptr(Address(lock_reg, mark_word_offset), swap_reg); if (os::is_MP()) { __ lock(); } // src -> dest iff dest == rax else rax <- dest - __ cmpxchgq(lock_reg, Address(obj_reg, 0)); + __ cmpxchgptr(lock_reg, Address(obj_reg, 0)); __ jcc(Assembler::equal, lock_done); // Hmm should this move to the slow path code area??? @@ -1560,11 +1576,11 @@ // least significant 2 bits clear. // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg - __ subq(swap_reg, rsp); - __ andq(swap_reg, 3 - os::vm_page_size()); + __ subptr(swap_reg, rsp); + __ andptr(swap_reg, 3 - os::vm_page_size()); // Save the test result, for recursive case, the result is zero - __ movq(Address(lock_reg, mark_word_offset), swap_reg); + __ movptr(Address(lock_reg, mark_word_offset), swap_reg); __ jcc(Assembler::notEqual, slow_path_lock); // Slow path will re-enter here @@ -1578,25 +1594,25 @@ // get JNIEnv* which is first argument to native - __ leaq(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset()))); + __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset()))); // Now set thread in native - __ mov64(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); + __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); __ call(RuntimeAddress(method->native_function())); // Either restore the MXCSR register after returning from the JNI Call // or verify that it wasn't changed. if (RestoreMXCSROnJNICalls) { - __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std())); + __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std())); } else if (CheckJNICalls ) { - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry()))); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry()))); } - // Unpack native results. + // Unpack native results. switch (ret_type) { case T_BOOLEAN: __ c2bool(rax); break; case T_CHAR : __ movzwl(rax, rax); break; @@ -1622,13 +1638,13 @@ // VM thread changes sync state to synchronizing and suspends threads for GC. // Thread A is resumed to finish this native method, but doesn't block here since it // didn't see any synchronization is progress, and escapes. - __ mov64(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); + __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - if(os::is_MP()) { + if(os::is_MP()) { if (UseMembar) { // Force this write out before the read below __ membar(Assembler::Membar_mask_bits( - Assembler::LoadLoad | Assembler::LoadStore | + Assembler::LoadLoad | Assembler::LoadStore | Assembler::StoreLoad | Assembler::StoreStore)); } else { // Write serialization page so VM thread can do a pseudo remote membar. @@ -1660,12 +1676,13 @@ // by hand. // save_native_result(masm, ret_type, stack_slots); - __ movq(c_rarg0, r15_thread); - __ movq(r12, rsp); // remember sp - __ subq(rsp, frame::arg_reg_save_area_bytes); // windows - __ andq(rsp, -16); // align stack as required by ABI + __ mov(c_rarg0, r15_thread); + __ mov(r12, rsp); // remember sp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); - __ movq(rsp, r12); // restore sp + __ mov(rsp, r12); // restore sp + __ reinit_heapbase(); // Restore any method result value restore_native_result(masm, ret_type, stack_slots); __ bind(Continue); @@ -1680,7 +1697,7 @@ __ jcc(Assembler::equal, reguard); __ bind(reguard_done); - // native result if any is live + // native result if any is live // Unlock Label unlock_done; @@ -1688,7 +1705,7 @@ if (method->is_synchronized()) { // Get locked oop from the handle we passed to jni - __ movq(obj_reg, Address(oop_handle_reg, 0)); + __ movptr(obj_reg, Address(oop_handle_reg, 0)); Label done; @@ -1698,7 +1715,7 @@ // Simple recursive lock? - __ cmpq(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int)NULL_WORD); + __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD); __ jcc(Assembler::equal, done); // Must save rax if if it is live now because cmpxchg must use it @@ -1708,15 +1725,15 @@ // get address of the stack lock - __ leaq(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); + __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); // get old displaced header - __ movq(old_hdr, Address(rax, 0)); + __ movptr(old_hdr, Address(rax, 0)); // Atomic swap old header if oop still contains the stack lock if (os::is_MP()) { __ lock(); } - __ cmpxchgq(old_hdr, Address(obj_reg, 0)); + __ cmpxchgptr(old_hdr, Address(obj_reg, 0)); __ jcc(Assembler::notEqual, slow_path_unlock); // slow path re-enters here @@ -1728,13 +1745,12 @@ __ bind(done); } - { SkipIfEqual skip(masm, &DTraceMethodProbes, false); save_native_result(masm, ret_type, stack_slots); __ movoop(c_rarg1, JNIHandles::make_local(method())); __ call_VM_leaf( - CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), + CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), r15_thread, c_rarg1); restore_native_result(masm, ret_type, stack_slots); } @@ -1744,23 +1760,23 @@ // Unpack oop result if (ret_type == T_OBJECT || ret_type == T_ARRAY) { Label L; - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, L); - __ movq(rax, Address(rax, 0)); + __ movptr(rax, Address(rax, 0)); __ bind(L); __ verify_oop(rax); } // reset handle block - __ movq(rcx, Address(r15_thread, JavaThread::active_handles_offset())); - __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int)NULL_WORD); + __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset())); + __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); // pop our frame __ leave(); // Any exception pending? - __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); + __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, exception_pending); // Return @@ -1788,9 +1804,9 @@ // protect the args we've loaded save_args(masm, total_c_args, c_arg, out_regs); - __ movq(c_rarg0, obj_reg); - __ movq(c_rarg1, lock_reg); - __ movq(c_rarg2, r15_thread); + __ mov(c_rarg0, obj_reg); + __ mov(c_rarg1, lock_reg); + __ mov(c_rarg2, r15_thread); // Not a leaf but we have last_Java_frame setup as we want __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); @@ -1798,7 +1814,7 @@ #ifdef ASSERT { Label L; - __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); + __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); __ stop("no pending exception allowed on exit from monitorenter"); __ bind(L); @@ -1818,31 +1834,32 @@ save_native_result(masm, ret_type, stack_slots); } - __ leaq(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); + __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); - __ movq(c_rarg0, obj_reg); - __ movq(r12, rsp); // remember sp - __ subq(rsp, frame::arg_reg_save_area_bytes); // windows - __ andq(rsp, -16); // align stack as required by ABI + __ mov(c_rarg0, obj_reg); + __ mov(r12, rsp); // remember sp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI // Save pending exception around call to VM (which contains an EXCEPTION_MARK) // NOTE that obj_reg == rbx currently - __ movq(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset()))); - __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); + __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset()))); + __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C))); - __ movq(rsp, r12); // restore sp + __ mov(rsp, r12); // restore sp + __ reinit_heapbase(); #ifdef ASSERT { Label L; - __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); + __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); __ jcc(Assembler::equal, L); __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); __ bind(L); } #endif /* ASSERT */ - __ movq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx); + __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx); if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { restore_native_result(masm, ret_type, stack_slots); @@ -1851,17 +1868,18 @@ // END Slow path unlock - } // synchronized - + } // synchronized + // SLOW PATH Reguard the stack if needed __ bind(reguard); save_native_result(masm, ret_type, stack_slots); - __ movq(r12, rsp); // remember sp - __ subq(rsp, frame::arg_reg_save_area_bytes); // windows - __ andq(rsp, -16); // align stack as required by ABI + __ mov(r12, rsp); // remember sp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); - __ movq(rsp, r12); // restore sp + __ mov(rsp, r12); // restore sp + __ reinit_heapbase(); restore_native_result(masm, ret_type, stack_slots); // and continue __ jmp(reguard_done); @@ -1882,6 +1900,630 @@ } +#ifdef HAVE_DTRACE_H +// --------------------------------------------------------------------------- +// Generate a dtrace nmethod for a given signature. The method takes arguments +// in the Java compiled code convention, marshals them to the native +// abi and then leaves nops at the position you would expect to call a native +// function. When the probe is enabled the nops are replaced with a trap +// instruction that dtrace inserts and the trace will cause a notification +// to dtrace. +// +// The probes are only able to take primitive types and java/lang/String as +// arguments. No other java types are allowed. Strings are converted to utf8 +// strings so that from dtrace point of view java strings are converted to C +// strings. There is an arbitrary fixed limit on the total space that a method +// can use for converting the strings. (256 chars per string in the signature). +// So any java string larger then this is truncated. + +static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 }; +static bool offsets_initialized = false; + + +nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm, + methodHandle method) { + + + // generate_dtrace_nmethod is guarded by a mutex so we are sure to + // be single threaded in this method. + assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); + + if (!offsets_initialized) { + fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize; + fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize; + fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize; + fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize; + fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize; + fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize; + + fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize; + fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize; + fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize; + fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize; + fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize; + fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize; + fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize; + fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize; + + offsets_initialized = true; + } + // Fill in the signature array, for the calling-convention call. + int total_args_passed = method->size_of_parameters(); + + BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); + VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); + + // The signature we are going to use for the trap that dtrace will see + // java/lang/String is converted. We drop "this" and any other object + // is converted to NULL. (A one-slot java/lang/Long object reference + // is converted to a two-slot long, which is why we double the allocation). + BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2); + VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2); + + int i=0; + int total_strings = 0; + int first_arg_to_pass = 0; + int total_c_args = 0; + + // Skip the receiver as dtrace doesn't want to see it + if( !method->is_static() ) { + in_sig_bt[i++] = T_OBJECT; + first_arg_to_pass = 1; + } + + // We need to convert the java args to where a native (non-jni) function + // would expect them. To figure out where they go we convert the java + // signature to a C signature. + + SignatureStream ss(method->signature()); + for ( ; !ss.at_return_type(); ss.next()) { + BasicType bt = ss.type(); + in_sig_bt[i++] = bt; // Collect remaining bits of signature + out_sig_bt[total_c_args++] = bt; + if( bt == T_OBJECT) { + symbolOop s = ss.as_symbol_or_null(); + if (s == vmSymbols::java_lang_String()) { + total_strings++; + out_sig_bt[total_c_args-1] = T_ADDRESS; + } else if (s == vmSymbols::java_lang_Boolean() || + s == vmSymbols::java_lang_Character() || + s == vmSymbols::java_lang_Byte() || + s == vmSymbols::java_lang_Short() || + s == vmSymbols::java_lang_Integer() || + s == vmSymbols::java_lang_Float()) { + out_sig_bt[total_c_args-1] = T_INT; + } else if (s == vmSymbols::java_lang_Long() || + s == vmSymbols::java_lang_Double()) { + out_sig_bt[total_c_args-1] = T_LONG; + out_sig_bt[total_c_args++] = T_VOID; + } + } else if ( bt == T_LONG || bt == T_DOUBLE ) { + in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots + // We convert double to long + out_sig_bt[total_c_args-1] = T_LONG; + out_sig_bt[total_c_args++] = T_VOID; + } else if ( bt == T_FLOAT) { + // We convert float to int + out_sig_bt[total_c_args-1] = T_INT; + } + } + + assert(i==total_args_passed, "validly parsed signature"); + + // Now get the compiled-Java layout as input arguments + int comp_args_on_stack; + comp_args_on_stack = SharedRuntime::java_calling_convention( + in_sig_bt, in_regs, total_args_passed, false); + + // Now figure out where the args must be stored and how much stack space + // they require (neglecting out_preserve_stack_slots but space for storing + // the 1st six register arguments). It's weird see int_stk_helper. + + int out_arg_slots; + out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); + + // Calculate the total number of stack slots we will need. + + // First count the abi requirement plus all of the outgoing args + int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; + + // Now space for the string(s) we must convert + int* string_locs = NEW_RESOURCE_ARRAY(int, total_strings + 1); + for (i = 0; i < total_strings ; i++) { + string_locs[i] = stack_slots; + stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size; + } + + // Plus the temps we might need to juggle register args + // regs take two slots each + stack_slots += (Argument::n_int_register_parameters_c + + Argument::n_float_register_parameters_c) * 2; + + + // + 4 for return address (which we own) and saved rbp, + + stack_slots += 4; + + // Ok The space we have allocated will look like: + // + // + // FP-> | | + // |---------------------| + // | string[n] | + // |---------------------| <- string_locs[n] + // | string[n-1] | + // |---------------------| <- string_locs[n-1] + // | ... | + // | ... | + // |---------------------| <- string_locs[1] + // | string[0] | + // |---------------------| <- string_locs[0] + // | outbound memory | + // | based arguments | + // | | + // |---------------------| + // | | + // SP-> | out_preserved_slots | + // + // + + // Now compute actual number of stack words we need rounding to make + // stack properly aligned. + stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word); + + int stack_size = stack_slots * VMRegImpl::stack_slot_size; + + intptr_t start = (intptr_t)__ pc(); + + // First thing make an ic check to see if we should even be here + + // We are free to use all registers as temps without saving them and + // restoring them except rbp. rbp, is the only callee save register + // as far as the interpreter and the compiler(s) are concerned. + + const Register ic_reg = rax; + const Register receiver = rcx; + Label hit; + Label exception_pending; + + + __ verify_oop(receiver); + __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes())); + __ jcc(Assembler::equal, hit); + + __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); + + // verified entry must be aligned for code patching. + // and the first 5 bytes must be in the same cache line + // if we align at 8 then we will be sure 5 bytes are in the same line + __ align(8); + + __ bind(hit); + + int vep_offset = ((intptr_t)__ pc()) - start; + + + // The instruction at the verified entry point must be 5 bytes or longer + // because it can be patched on the fly by make_non_entrant. The stack bang + // instruction fits that requirement. + + // Generate stack overflow check + + if (UseStackBanging) { + if (stack_size <= StackShadowPages*os::vm_page_size()) { + __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); + } else { + __ movl(rax, stack_size); + __ bang_stack_size(rax, rbx); + } + } else { + // need a 5 byte instruction to allow MT safe patching to non-entrant + __ fat_nop(); + } + + assert(((uintptr_t)__ pc() - start - vep_offset) >= 5, + "valid size for make_non_entrant"); + + // Generate a new frame for the wrapper. + __ enter(); + + // -4 because return address is already present and so is saved rbp, + if (stack_size - 2*wordSize != 0) { + __ subq(rsp, stack_size - 2*wordSize); + } + + // Frame is now completed as far a size and linkage. + + int frame_complete = ((intptr_t)__ pc()) - start; + + int c_arg, j_arg; + + // State of input register args + + bool live[ConcreteRegisterImpl::number_of_registers]; + + live[j_rarg0->as_VMReg()->value()] = false; + live[j_rarg1->as_VMReg()->value()] = false; + live[j_rarg2->as_VMReg()->value()] = false; + live[j_rarg3->as_VMReg()->value()] = false; + live[j_rarg4->as_VMReg()->value()] = false; + live[j_rarg5->as_VMReg()->value()] = false; + + live[j_farg0->as_VMReg()->value()] = false; + live[j_farg1->as_VMReg()->value()] = false; + live[j_farg2->as_VMReg()->value()] = false; + live[j_farg3->as_VMReg()->value()] = false; + live[j_farg4->as_VMReg()->value()] = false; + live[j_farg5->as_VMReg()->value()] = false; + live[j_farg6->as_VMReg()->value()] = false; + live[j_farg7->as_VMReg()->value()] = false; + + + bool rax_is_zero = false; + + // All args (except strings) destined for the stack are moved first + for (j_arg = first_arg_to_pass, c_arg = 0 ; + j_arg < total_args_passed ; j_arg++, c_arg++ ) { + VMRegPair src = in_regs[j_arg]; + VMRegPair dst = out_regs[c_arg]; + + // Get the real reg value or a dummy (rsp) + + int src_reg = src.first()->is_reg() ? + src.first()->value() : + rsp->as_VMReg()->value(); + + bool useless = in_sig_bt[j_arg] == T_ARRAY || + (in_sig_bt[j_arg] == T_OBJECT && + out_sig_bt[c_arg] != T_INT && + out_sig_bt[c_arg] != T_ADDRESS && + out_sig_bt[c_arg] != T_LONG); + + live[src_reg] = !useless; + + if (dst.first()->is_stack()) { + + // Even though a string arg in a register is still live after this loop + // after the string conversion loop (next) it will be dead so we take + // advantage of that now for simpler code to manage live. + + live[src_reg] = false; + switch (in_sig_bt[j_arg]) { + + case T_ARRAY: + case T_OBJECT: + { + Address stack_dst(rsp, reg2offset_out(dst.first())); + + if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { + // need to unbox a one-word value + Register in_reg = rax; + if ( src.first()->is_reg() ) { + in_reg = src.first()->as_Register(); + } else { + __ movq(rax, Address(rbp, reg2offset_in(src.first()))); + rax_is_zero = false; + } + Label skipUnbox; + __ movptr(Address(rsp, reg2offset_out(dst.first())), + (int32_t)NULL_WORD); + __ testq(in_reg, in_reg); + __ jcc(Assembler::zero, skipUnbox); + + BasicType bt = out_sig_bt[c_arg]; + int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); + Address src1(in_reg, box_offset); + if ( bt == T_LONG ) { + __ movq(in_reg, src1); + __ movq(stack_dst, in_reg); + assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); + ++c_arg; // skip over T_VOID to keep the loop indices in sync + } else { + __ movl(in_reg, src1); + __ movl(stack_dst, in_reg); + } + + __ bind(skipUnbox); + } else if (out_sig_bt[c_arg] != T_ADDRESS) { + // Convert the arg to NULL + if (!rax_is_zero) { + __ xorq(rax, rax); + rax_is_zero = true; + } + __ movq(stack_dst, rax); + } + } + break; + + case T_VOID: + break; + + case T_FLOAT: + // This does the right thing since we know it is destined for the + // stack + float_move(masm, src, dst); + break; + + case T_DOUBLE: + // This does the right thing since we know it is destined for the + // stack + double_move(masm, src, dst); + break; + + case T_LONG : + long_move(masm, src, dst); + break; + + case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); + + default: + move32_64(masm, src, dst); + } + } + + } + + // If we have any strings we must store any register based arg to the stack + // This includes any still live xmm registers too. + + int sid = 0; + + if (total_strings > 0 ) { + for (j_arg = first_arg_to_pass, c_arg = 0 ; + j_arg < total_args_passed ; j_arg++, c_arg++ ) { + VMRegPair src = in_regs[j_arg]; + VMRegPair dst = out_regs[c_arg]; + + if (src.first()->is_reg()) { + Address src_tmp(rbp, fp_offset[src.first()->value()]); + + // string oops were left untouched by the previous loop even if the + // eventual (converted) arg is destined for the stack so park them + // away now (except for first) + + if (out_sig_bt[c_arg] == T_ADDRESS) { + Address utf8_addr = Address( + rsp, string_locs[sid++] * VMRegImpl::stack_slot_size); + if (sid != 1) { + // The first string arg won't be killed until after the utf8 + // conversion + __ movq(utf8_addr, src.first()->as_Register()); + } + } else if (dst.first()->is_reg()) { + if (in_sig_bt[j_arg] == T_FLOAT || in_sig_bt[j_arg] == T_DOUBLE) { + + // Convert the xmm register to an int and store it in the reserved + // location for the eventual c register arg + XMMRegister f = src.first()->as_XMMRegister(); + if (in_sig_bt[j_arg] == T_FLOAT) { + __ movflt(src_tmp, f); + } else { + __ movdbl(src_tmp, f); + } + } else { + // If the arg is an oop type we don't support don't bother to store + // it remember string was handled above. + bool useless = in_sig_bt[j_arg] == T_ARRAY || + (in_sig_bt[j_arg] == T_OBJECT && + out_sig_bt[c_arg] != T_INT && + out_sig_bt[c_arg] != T_LONG); + + if (!useless) { + __ movq(src_tmp, src.first()->as_Register()); + } + } + } + } + if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) { + assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); + ++c_arg; // skip over T_VOID to keep the loop indices in sync + } + } + + // Now that the volatile registers are safe, convert all the strings + sid = 0; + + for (j_arg = first_arg_to_pass, c_arg = 0 ; + j_arg < total_args_passed ; j_arg++, c_arg++ ) { + if (out_sig_bt[c_arg] == T_ADDRESS) { + // It's a string + Address utf8_addr = Address( + rsp, string_locs[sid++] * VMRegImpl::stack_slot_size); + // The first string we find might still be in the original java arg + // register + + VMReg src = in_regs[j_arg].first(); + + // We will need to eventually save the final argument to the trap + // in the von-volatile location dedicated to src. This is the offset + // from fp we will use. + int src_off = src->is_reg() ? + fp_offset[src->value()] : reg2offset_in(src); + + // This is where the argument will eventually reside + VMRegPair dst = out_regs[c_arg]; + + if (src->is_reg()) { + if (sid == 1) { + __ movq(c_rarg0, src->as_Register()); + } else { + __ movq(c_rarg0, utf8_addr); + } + } else { + // arg is still in the original location + __ movq(c_rarg0, Address(rbp, reg2offset_in(src))); + } + Label done, convert; + + // see if the oop is NULL + __ testq(c_rarg0, c_rarg0); + __ jcc(Assembler::notEqual, convert); + + if (dst.first()->is_reg()) { + // Save the ptr to utf string in the origina src loc or the tmp + // dedicated to it + __ movq(Address(rbp, src_off), c_rarg0); + } else { + __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg0); + } + __ jmp(done); + + __ bind(convert); + + __ lea(c_rarg1, utf8_addr); + if (dst.first()->is_reg()) { + __ movq(Address(rbp, src_off), c_rarg1); + } else { + __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg1); + } + // And do the conversion + __ call(RuntimeAddress( + CAST_FROM_FN_PTR(address, SharedRuntime::get_utf))); + + __ bind(done); + } + if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) { + assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); + ++c_arg; // skip over T_VOID to keep the loop indices in sync + } + } + // The get_utf call killed all the c_arg registers + live[c_rarg0->as_VMReg()->value()] = false; + live[c_rarg1->as_VMReg()->value()] = false; + live[c_rarg2->as_VMReg()->value()] = false; + live[c_rarg3->as_VMReg()->value()] = false; + live[c_rarg4->as_VMReg()->value()] = false; + live[c_rarg5->as_VMReg()->value()] = false; + + live[c_farg0->as_VMReg()->value()] = false; + live[c_farg1->as_VMReg()->value()] = false; + live[c_farg2->as_VMReg()->value()] = false; + live[c_farg3->as_VMReg()->value()] = false; + live[c_farg4->as_VMReg()->value()] = false; + live[c_farg5->as_VMReg()->value()] = false; + live[c_farg6->as_VMReg()->value()] = false; + live[c_farg7->as_VMReg()->value()] = false; + } + + // Now we can finally move the register args to their desired locations + + rax_is_zero = false; + + for (j_arg = first_arg_to_pass, c_arg = 0 ; + j_arg < total_args_passed ; j_arg++, c_arg++ ) { + + VMRegPair src = in_regs[j_arg]; + VMRegPair dst = out_regs[c_arg]; + + // Only need to look for args destined for the interger registers (since we + // convert float/double args to look like int/long outbound) + if (dst.first()->is_reg()) { + Register r = dst.first()->as_Register(); + + // Check if the java arg is unsupported and thereofre useless + bool useless = in_sig_bt[j_arg] == T_ARRAY || + (in_sig_bt[j_arg] == T_OBJECT && + out_sig_bt[c_arg] != T_INT && + out_sig_bt[c_arg] != T_ADDRESS && + out_sig_bt[c_arg] != T_LONG); + + + // If we're going to kill an existing arg save it first + if (live[dst.first()->value()]) { + // you can't kill yourself + if (src.first() != dst.first()) { + __ movq(Address(rbp, fp_offset[dst.first()->value()]), r); + } + } + if (src.first()->is_reg()) { + if (live[src.first()->value()] ) { + if (in_sig_bt[j_arg] == T_FLOAT) { + __ movdl(r, src.first()->as_XMMRegister()); + } else if (in_sig_bt[j_arg] == T_DOUBLE) { + __ movdq(r, src.first()->as_XMMRegister()); + } else if (r != src.first()->as_Register()) { + if (!useless) { + __ movq(r, src.first()->as_Register()); + } + } + } else { + // If the arg is an oop type we don't support don't bother to store + // it + if (!useless) { + if (in_sig_bt[j_arg] == T_DOUBLE || + in_sig_bt[j_arg] == T_LONG || + in_sig_bt[j_arg] == T_OBJECT ) { + __ movq(r, Address(rbp, fp_offset[src.first()->value()])); + } else { + __ movl(r, Address(rbp, fp_offset[src.first()->value()])); + } + } + } + live[src.first()->value()] = false; + } else if (!useless) { + // full sized move even for int should be ok + __ movq(r, Address(rbp, reg2offset_in(src.first()))); + } + + // At this point r has the original java arg in the final location + // (assuming it wasn't useless). If the java arg was an oop + // we have a bit more to do + + if (in_sig_bt[j_arg] == T_ARRAY || in_sig_bt[j_arg] == T_OBJECT ) { + if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { + // need to unbox a one-word value + Label skip; + __ testq(r, r); + __ jcc(Assembler::equal, skip); + BasicType bt = out_sig_bt[c_arg]; + int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); + Address src1(r, box_offset); + if ( bt == T_LONG ) { + __ movq(r, src1); + } else { + __ movl(r, src1); + } + __ bind(skip); + + } else if (out_sig_bt[c_arg] != T_ADDRESS) { + // Convert the arg to NULL + __ xorq(r, r); + } + } + + // dst can longer be holding an input value + live[dst.first()->value()] = false; + } + if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) { + assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); + ++c_arg; // skip over T_VOID to keep the loop indices in sync + } + } + + + // Ok now we are done. Need to place the nop that dtrace wants in order to + // patch in the trap + int patch_offset = ((intptr_t)__ pc()) - start; + + __ nop(); + + + // Return + + __ leave(); + __ ret(0); + + __ flush(); + + nmethod *nm = nmethod::new_dtrace_nmethod( + method, masm->code(), vep_offset, patch_offset, frame_complete, + stack_slots / VMRegImpl::slots_per_word); + return nm; + +} + +#endif // HAVE_DTRACE_H + // this function returns the adjust size (in number of words) to a c2i adapter // activation for use during deoptimization int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) { @@ -1908,8 +2550,8 @@ // ------------- // This code enters when returning to a de-optimized nmethod. A return // address has been pushed on the the stack, and return values are in - // registers. - // If we are doing a normal deopt then we were called from the patched + // registers. + // If we are doing a normal deopt then we were called from the patched // nmethod from the point we returned to the nmethod. So the return // address on the stack is wrong by NativeCall::instruction_size // We will adjust the value so it looks like we have the original return @@ -1944,26 +2586,80 @@ map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); // Normal deoptimization. Save exec mode for unpack_frames. - __ movl(r12, Deoptimization::Unpack_deopt); // callee-saved + __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved __ jmp(cont); - + + int reexecute_offset = __ pc() - start; + + // Reexecute case + // return address is the pc describes what bci to do re-execute at + + // No need to update map as each call to save_live_registers will produce identical oopmap + (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); + + __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved + __ jmp(cont); + int exception_offset = __ pc() - start; // Prolog for exception case - // Push throwing pc as return address - __ pushq(rdx); + // all registers are dead at this entry point, except for rax, and + // rdx which contain the exception oop and exception pc + // respectively. Set them in TLS and fall thru to the + // unpack_with_exception_in_tls entry point. + + __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx); + __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax); + + int exception_in_tls_offset = __ pc() - start; + + // new implementation because exception oop is now passed in JavaThread + + // Prolog for exception case + // All registers must be preserved because they might be used by LinearScan + // Exceptiop oop and throwing PC are passed in JavaThread + // tos: stack at point of call to method that threw the exception (i.e. only + // args are on the stack, no return address) + + // make room on stack for the return address + // It will be patched later with the throwing pc. The correct value is not + // available now because loading it from memory would destroy registers. + __ push(0); // Save everything in sight. map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); + // Now it is safe to overwrite any register + // Deopt during an exception. Save exec mode for unpack_frames. - __ movl(r12, Deoptimization::Unpack_exception); // callee-saved + __ movl(r14, Deoptimization::Unpack_exception); // callee-saved + + // load throwing pc from JavaThread and patch it as the return address + // of the current frame. Then clear the field in JavaThread + + __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); + __ movptr(Address(rbp, wordSize), rdx); + __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); + +#ifdef ASSERT + // verify that there is really an exception oop in JavaThread + __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); + __ verify_oop(rax); + + // verify that there is no pending exception + Label no_pending_exception; + __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::zero, no_pending_exception); + __ stop("must not have pending exception here"); + __ bind(no_pending_exception); +#endif __ bind(cont); // Call C code. Need thread and this frame, but NOT official VM entry - // crud. We cannot block on this call, no GC can happen. + // crud. We cannot block on this call, no GC can happen. // // UnrollBlock* fetch_unroll_info(JavaThread* thread) @@ -1972,15 +2668,15 @@ __ set_last_Java_frame(noreg, noreg, NULL); #ifdef ASSERT { Label L; - __ cmpq(Address(r15_thread, + __ cmpptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), - 0); + (int32_t)0); __ jcc(Assembler::equal, L); __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); __ bind(L); } #endif // ASSERT - __ movq(c_rarg0, r15_thread); + __ mov(c_rarg0, r15_thread); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); // Need to have an oopmap that tells fetch_unroll_info where to @@ -1990,7 +2686,25 @@ __ reset_last_Java_frame(false, false); // Load UnrollBlock* into rdi - __ movq(rdi, rax); + __ mov(rdi, rax); + + Label noException; + __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending? + __ jcc(Assembler::notEqual, noException); + __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); + // QQQ this is useless it was NULL above + __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); + __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); + __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); + + __ verify_oop(rax); + + // Overwrite the result registers with the exception results. + __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); + // I think this is useless + __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx); + + __ bind(noException); // Only register save data is on the stack. // Now restore the result registers. Everything else is either dead @@ -2000,43 +2714,43 @@ // All of the register save area has been popped of the stack. Only the // return address remains. - // Pop all the frames we must move/replace. - // + // Pop all the frames we must move/replace. + // // Frame picture (youngest to oldest) // 1: self-frame (no frame link) // 2: deopting frame (no frame link) // 3: caller of deopting frame (could be compiled/interpreted). - // + // // Note: by leaving the return address of self-frame on the stack // and using the size of frame 2 to adjust the stack // when we are done the return to frame 3 will still be on the stack. // Pop deoptimized frame __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); - __ addq(rsp, rcx); + __ addptr(rsp, rcx); // rsp should be pointing at the return address to the caller (3) // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { - __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); + __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); __ bang_stack_size(rbx, rcx); } // Load address of array of frame pcs into rcx - __ movq(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); + __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); // Trash the old pc - __ addq(rsp, wordSize); + __ addptr(rsp, wordSize); // Load address of array of frame sizes into rsi - __ movq(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); + __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); // Load counter into rdx __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); // Pick up the initial fp we should save - __ movq(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); + __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); // Now adjust the caller's stack to make up for the extra locals // but record the original sp so that we can save it in the skeletal interpreter @@ -2045,41 +2759,56 @@ const Register sender_sp = r8; - __ movq(sender_sp, rsp); + __ mov(sender_sp, rsp); __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); - __ subq(rsp, rbx); + __ subptr(rsp, rbx); // Push interpreter frames in a loop Label loop; __ bind(loop); - __ movq(rbx, Address(rsi, 0)); // Load frame size - __ subq(rbx, 2*wordSize); // We'll push pc and ebp by hand - __ pushq(Address(rcx, 0)); // Save return address + __ movptr(rbx, Address(rsi, 0)); // Load frame size +#ifdef CC_INTERP + __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and +#ifdef ASSERT + __ push(0xDEADDEAD); // Make a recognizable pattern + __ push(0xDEADDEAD); +#else /* ASSERT */ + __ subptr(rsp, 2*wordSize); // skip the "static long no_param" +#endif /* ASSERT */ +#else + __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand +#endif // CC_INTERP + __ pushptr(Address(rcx, 0)); // Save return address __ enter(); // Save old & set new ebp - __ subq(rsp, rbx); // Prolog - __ movq(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), - sender_sp); // Make it walkable - // This value is corrected by layout_activation_impl - __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD ); - __ movq(sender_sp, rsp); // Pass sender_sp to next frame - __ addq(rsi, wordSize); // Bump array pointer (sizes) - __ addq(rcx, wordSize); // Bump array pointer (pcs) + __ subptr(rsp, rbx); // Prolog +#ifdef CC_INTERP + __ movptr(Address(rbp, + -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))), + sender_sp); // Make it walkable +#else /* CC_INTERP */ + // This value is corrected by layout_activation_impl + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD ); + __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable +#endif /* CC_INTERP */ + __ mov(sender_sp, rsp); // Pass sender_sp to next frame + __ addptr(rsi, wordSize); // Bump array pointer (sizes) + __ addptr(rcx, wordSize); // Bump array pointer (pcs) __ decrementl(rdx); // Decrement counter __ jcc(Assembler::notZero, loop); - __ pushq(Address(rcx, 0)); // Save final return address + __ pushptr(Address(rcx, 0)); // Save final return address // Re-push self-frame __ enter(); // Save old & set new ebp // Allocate a full sized register save area. // Return address and rbp are in place, so we allocate two less words. - __ subq(rsp, (frame_size_in_words - 2) * wordSize); + __ subptr(rsp, (frame_size_in_words - 2) * wordSize); // Restore frame locals after moving the frame __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0); - __ movq(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); + __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); // Call C code. Need thread but NOT official VM entry // crud. We cannot block on this call, no GC can happen. Call should @@ -2090,8 +2819,8 @@ // Use rbp because the frames look interpreted now __ set_last_Java_frame(noreg, rbp, NULL); - __ movq(c_rarg0, r15_thread); - __ movl(c_rarg1, r12); // second arg: exec_mode + __ mov(c_rarg0, r15_thread); + __ movl(c_rarg1, r14); // second arg: exec_mode __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); // Set an oopmap for the call site @@ -2102,18 +2831,21 @@ // Collect return values __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes())); - __ movq(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes())); + __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes())); + // I think this is useless (throwing pc?) + __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes())); // Pop self-frame. __ leave(); // Epilog // Jump to interpreter __ ret(0); - + // Make sure all code is generated masm->flush(); - _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, frame_size_in_words); + _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); + _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); } #ifdef COMPILER2 @@ -2126,15 +2858,15 @@ MacroAssembler* masm = new MacroAssembler(&buffer); assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); - + address start = __ pc(); // Push self-frame. We get here with a return address on the // stack, so rsp is 8-byte aligned until we allocate our frame. - __ subq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog! + __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog! // No callee saved registers. rbp is assumed implicitly saved - __ movq(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); + __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); // compiler left unloaded_class_index in j_rarg0 move to where the // runtime expects it. @@ -2149,7 +2881,7 @@ // // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); - __ movq(c_rarg0, r15_thread); + __ mov(c_rarg0, r15_thread); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); // Set an oopmap for the call site @@ -2163,54 +2895,54 @@ __ reset_last_Java_frame(false, false); // Load UnrollBlock* into rdi - __ movq(rdi, rax); + __ mov(rdi, rax); - // Pop all the frames we must move/replace. - // + // Pop all the frames we must move/replace. + // // Frame picture (youngest to oldest) // 1: self-frame (no frame link) // 2: deopting frame (no frame link) // 3: caller of deopting frame (could be compiled/interpreted). // Pop self-frame. We have no frame, and must rely only on rax and rsp. - __ addq(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog! + __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog! // Pop deoptimized frame (int) __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock:: size_of_deoptimized_frame_offset_in_bytes())); - __ addq(rsp, rcx); + __ addptr(rsp, rcx); // rsp should be pointing at the return address to the caller (3) // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { - __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); + __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); __ bang_stack_size(rbx, rcx); } // Load address of array of frame pcs into rcx (address*) - __ movq(rcx, - Address(rdi, - Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); + __ movptr(rcx, + Address(rdi, + Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); // Trash the return pc - __ addq(rsp, wordSize); + __ addptr(rsp, wordSize); // Load address of array of frame sizes into rsi (intptr_t*) - __ movq(rsi, Address(rdi, - Deoptimization::UnrollBlock:: - frame_sizes_offset_in_bytes())); + __ movptr(rsi, Address(rdi, + Deoptimization::UnrollBlock:: + frame_sizes_offset_in_bytes())); // Counter - __ movl(rdx, Address(rdi, + __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int) // Pick up the initial fp we should save - __ movq(rbp, - Address(rdi, - Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); + __ movptr(rbp, + Address(rdi, + Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); // Now adjust the caller's stack to make up for the extra locals but // record the original sp so that we can save it in the skeletal @@ -2219,34 +2951,34 @@ const Register sender_sp = r8; - __ movq(sender_sp, rsp); - __ movl(rbx, Address(rdi, + __ mov(sender_sp, rsp); + __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int) - __ subq(rsp, rbx); + __ subptr(rsp, rbx); // Push interpreter frames in a loop Label loop; __ bind(loop); - __ movq(rbx, Address(rsi, 0)); // Load frame size - __ subq(rbx, 2 * wordSize); // We'll push pc and rbp by hand - __ pushq(Address(rcx, 0)); // Save return address - __ enter(); // Save old & set new rbp - __ subq(rsp, rbx); // Prolog - __ movq(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), - sender_sp); // Make it walkable - // This value is corrected by layout_activation_impl - __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD ); - __ movq(sender_sp, rsp); // Pass sender_sp to next frame - __ addq(rsi, wordSize); // Bump array pointer (sizes) - __ addq(rcx, wordSize); // Bump array pointer (pcs) - __ decrementl(rdx); // Decrement counter + __ movptr(rbx, Address(rsi, 0)); // Load frame size + __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand + __ pushptr(Address(rcx, 0)); // Save return address + __ enter(); // Save old & set new rbp + __ subptr(rsp, rbx); // Prolog + __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), + sender_sp); // Make it walkable + // This value is corrected by layout_activation_impl + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD ); + __ mov(sender_sp, rsp); // Pass sender_sp to next frame + __ addptr(rsi, wordSize); // Bump array pointer (sizes) + __ addptr(rcx, wordSize); // Bump array pointer (pcs) + __ decrementl(rdx); // Decrement counter __ jcc(Assembler::notZero, loop); - __ pushq(Address(rcx, 0)); // Save final return address + __ pushptr(Address(rcx, 0)); // Save final return address // Re-push self-frame __ enter(); // Save old & set new rbp - __ subq(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt); + __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt); // Prolog // Use rbp because the frames look interpreted now @@ -2259,7 +2991,7 @@ // // BasicType unpack_frames(JavaThread* thread, int exec_mode); - __ movq(c_rarg0, r15_thread); + __ mov(c_rarg0, r15_thread); __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); @@ -2277,7 +3009,7 @@ // Make sure all code is generated masm->flush(); - _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, + _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); } #endif // COMPILER2 @@ -2285,12 +3017,12 @@ //------------------------------generate_handler_blob------ // -// Generate a special Compile2Runtime blob that saves all registers, +// Generate a special Compile2Runtime blob that saves all registers, // and setup oopmap. // static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { assert(StubRoutines::forward_exception_entry() != NULL, - "must be generated before"); + "must be generated before"); ResourceMark rm; OopMapSet *oop_maps = new OopMapSet(); @@ -2299,19 +3031,19 @@ // Allocate space for the code. Setup code generation tools. CodeBuffer buffer("handler_blob", 2048, 1024); MacroAssembler* masm = new MacroAssembler(&buffer); - - address start = __ pc(); - address call_pc = NULL; + + address start = __ pc(); + address call_pc = NULL; int frame_size_in_words; // Make room for return address (or push it again) if (!cause_return) { - __ pushq(rbx); + __ push(rbx); } - // Save registers, fpu state, and flags + // Save registers, fpu state, and flags map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); - + // The following is basically a call_VM. However, we need the precise // address of the call in order to generate an oopmap. Hence, we do all the // work outselves. @@ -2322,13 +3054,13 @@ // sees an invalid pc. if (!cause_return) { - // overwrite the dummy value we pushed on entry - __ movq(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset())); - __ movq(Address(rbp, wordSize), c_rarg0); + // overwrite the dummy value we pushed on entry + __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset())); + __ movptr(Address(rbp, wordSize), c_rarg0); } // Do the call - __ movq(c_rarg0, r15_thread); + __ mov(c_rarg0, r15_thread); __ call(RuntimeAddress(call_ptr)); // Set an oopmap for the call site. This oopmap will map all @@ -2342,7 +3074,7 @@ __ reset_last_Java_frame(false, false); - __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, noException); // Exception pending @@ -2360,22 +3092,22 @@ __ ret(0); // Make sure all code is generated - masm->flush(); + masm->flush(); // Fill-out other meta info - return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); + return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); } // // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss -// +// // Generate a stub that calls into vm to find out the proper destination // of a java call. All the argument registers are live at this point // but since this is generic code we don't know what they are and the caller // must do any gc of the args. // static RuntimeStub* generate_resolve_blob(address destination, const char* name) { - assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); + assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); // allocate space for the code ResourceMark rm; @@ -2384,10 +3116,10 @@ MacroAssembler* masm = new MacroAssembler(&buffer); int frame_size_in_words; - + OopMapSet *oop_maps = new OopMapSet(); OopMap* map = NULL; - + int start = __ offset(); map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); @@ -2396,7 +3128,7 @@ __ set_last_Java_frame(noreg, noreg, NULL); - __ movq(c_rarg0, r15_thread); + __ mov(c_rarg0, r15_thread); __ call(RuntimeAddress(destination)); @@ -2413,14 +3145,14 @@ __ reset_last_Java_frame(false, false); // check for pending exceptions Label pending; - __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, pending); // get the returned methodOop - __ movq(rbx, Address(r15_thread, JavaThread::vm_result_offset())); - __ movq(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx); + __ movptr(rbx, Address(r15_thread, JavaThread::vm_result_offset())); + __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx); - __ movq(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); + __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); RegisterSaver::restore_live_registers(masm); @@ -2438,12 +3170,12 @@ __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD); - __ movq(rax, Address(r15_thread, Thread::pending_exception_offset())); + __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); // ------------- // make sure all code is generated - masm->flush(); + masm->flush(); // return the blob // frame_size_words or bytes?? @@ -2466,13 +3198,13 @@ _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call"); _polling_page_safepoint_handler_blob = - generate_handler_blob(CAST_FROM_FN_PTR(address, + generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), false); _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); - + generate_deopt_blob(); #ifdef COMPILER2 @@ -2488,7 +3220,7 @@ // creates exception blob at the end // Using exception blob, this code is jumped from a compiled method. // (see emit_exception_handler in x86_64.ad file) -// +// // Given an exception pc at a call we call into the runtime for the // handler in this method. This handler might merely restore state // (i.e. callee save registers) unwind the frame and jump to the @@ -2496,7 +3228,7 @@ // for the nmethod. // // This code is entered with a jmp. -// +// // Arguments: // rax: exception oop // rdx: exception pc @@ -2505,7 +3237,7 @@ // rax: exception oop // rdx: exception pc in caller or ??? // destination: exception handler of caller -// +// // Note: the exception pc MUST be at a call (precise debug information) // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved. // @@ -2519,16 +3251,16 @@ // Allocate space for the code ResourceMark rm; - // Setup code generation tools + // Setup code generation tools CodeBuffer buffer("exception_blob", 2048, 1024); MacroAssembler* masm = new MacroAssembler(&buffer); - address start = __ pc(); + address start = __ pc(); // Exception pc is 'return address' for stack walker - __ pushq(rdx); - __ subq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog + __ push(rdx); + __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog // Save callee-saved registers. See x86_64.ad. @@ -2536,25 +3268,25 @@ // convention will save restore it in prolog/epilog) Other than that // there are no callee save registers now that adapter frames are gone. - __ movq(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); - + __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); + // Store exception in Thread object. We cannot pass any arguments to the // handle_exception call, since we do not want to make any assumption // about the size of the frame where the exception happened in. // c_rarg0 is either rdi (Linux) or rcx (Windows). - __ movq(Address(r15_thread, JavaThread::exception_oop_offset()),rax); - __ movq(Address(r15_thread, JavaThread::exception_pc_offset()), rdx); + __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax); + __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx); // This call does all the hard work. It checks if an exception handler - // exists in the method. + // exists in the method. // If so, it returns the handler address. - // If not, it prepares for stack-unwinding, restoring the callee-save + // If not, it prepares for stack-unwinding, restoring the callee-save // registers of the frame being removed. // // address OptoRuntime::handle_exception_C(JavaThread* thread) __ set_last_Java_frame(noreg, noreg, NULL); - __ movq(c_rarg0, r15_thread); + __ mov(c_rarg0, r15_thread); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); // Set an oopmap for the call site. This oopmap will only be used if we @@ -2575,23 +3307,23 @@ // convention will save restore it in prolog/epilog) Other than that // there are no callee save registers no that adapter frames are gone. - __ movq(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt)); + __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt)); - __ addq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog - __ popq(rdx); // No need for exception pc anymore + __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog + __ pop(rdx); // No need for exception pc anymore // rax: exception handler // We have a handler in rax (could be deopt blob). - __ movq(r8, rax); + __ mov(r8, rax); // Get the exception oop - __ movq(rax, Address(r15_thread, JavaThread::exception_oop_offset())); + __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset())); // Get the exception pc in case we are deoptimized - __ movq(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); + __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); #ifdef ASSERT __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD); - __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD); + __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD); #endif // Clear the exception oop so GC no longer processes it as a root. __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD); @@ -2604,7 +3336,7 @@ __ jmp(r8); // Make sure all code is generated - masm->flush(); + masm->flush(); // Set exception blob _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); --- old/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp 2009-08-01 04:08:13.702519917 +0100 +++ new/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp 2009-08-01 04:08:13.607646711 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)stubGenerator_x86_32.cpp 1.96 07/11/08 08:17:08 JVM" -#endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -33,6 +30,7 @@ // see the comment in stubRoutines.hpp #define __ _masm-> +#define a__ ((Assembler*)_masm)-> #ifdef PRODUCT #define BLOCK_COMMENT(str) /* nothing */ @@ -70,7 +68,7 @@ #define inc_counter_np(counter) (0) #else void inc_counter_np_(int& counter) { - __ increment(ExternalAddress((address)&counter)); + __ incrementl(ExternalAddress((address)&counter)); } #define inc_counter_np(counter) \ BLOCK_COMMENT("inc_counter " #counter); \ @@ -97,11 +95,11 @@ // [ argument word n ] // ... // -N [ argument word 1 ] - // -7 [ Possible padding for stack alignment ] - // -6 [ Possible padding for stack alignment ] - // -5 [ Possible padding for stack alignment ] + // -7 [ Possible padding for stack alignment ] + // -6 [ Possible padding for stack alignment ] + // -5 [ Possible padding for stack alignment ] // -4 [ mxcsr save ] <--- rsp_after_call - // -3 [ saved rbx, ] + // -3 [ saved rbx, ] // -2 [ saved rsi ] // -1 [ saved rdi ] // 0 [ saved rbp, ] <--- rbp, @@ -124,7 +122,7 @@ assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code"); bool sse_save = false; const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()! - const int locals_count_in_bytes (4*wordSize); + const int locals_count_in_bytes (4*wordSize); const Address mxcsr_save (rbp, -4 * wordSize); const Address saved_rbx (rbp, -3 * wordSize); const Address saved_rsi (rbp, -2 * wordSize); @@ -139,17 +137,17 @@ sse_save = UseSSE > 0; // stub code - __ enter(); - __ movl(rcx, parameter_size); // parameter counter - __ shll(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes - __ addl(rcx, locals_count_in_bytes); // reserve space for register saves - __ subl(rsp, rcx); - __ andl(rsp, -(StackAlignmentInBytes)); // Align stack + __ enter(); + __ movptr(rcx, parameter_size); // parameter counter + __ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes + __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves + __ subptr(rsp, rcx); + __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack // save rdi, rsi, & rbx, according to C calling conventions - __ movl(saved_rdi, rdi); - __ movl(saved_rsi, rsi); - __ movl(saved_rbx, rbx); + __ movptr(saved_rdi, rdi); + __ movptr(saved_rsi, rsi); + __ movptr(saved_rbx, rbx); // save and initialize %mxcsr if (sse_save) { Label skip_ldmx; @@ -169,8 +167,8 @@ #ifdef ASSERT // make sure we have no pending exceptions { Label L; - __ movl(rcx, thread); - __ cmpl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); + __ movptr(rcx, thread); + __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); __ stop("StubRoutines::call_stub: entered with pending exception"); __ bind(L); @@ -192,30 +190,30 @@ // source is rdx[rcx: N-1..0] // dest is rsp[rbx: 0..N-1] - __ movl(rdx, parameters); // parameter pointer - __ xorl(rbx, rbx); + __ movptr(rdx, parameters); // parameter pointer + __ xorptr(rbx, rbx); __ BIND(loop); if (TaggedStackInterpreter) { - __ movl(rax, Address(rdx, rcx, Interpreter::stackElementScale(), + __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -2*wordSize)); // get tag - __ movl(Address(rsp, rbx, Interpreter::stackElementScale(), + __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), Interpreter::expr_tag_offset_in_bytes(0)), rax); // store tag } // get parameter - __ movl(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); - __ movl(Address(rsp, rbx, Interpreter::stackElementScale(), + __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); + __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), Interpreter::expr_offset_in_bytes(0)), rax); // store parameter __ increment(rbx); - __ decrement(rcx); + __ decrement(rcx); __ jcc(Assembler::notZero, loop); // call Java function __ BIND(parameters_done); - __ movl(rbx, method); // get methodOop - __ movl(rax, entry_point); // get entry_point - __ movl(rsi, rsp); // set sender sp + __ movptr(rbx, method); // get methodOop + __ movptr(rax, entry_point); // get entry_point + __ mov(rsi, rsp); // set sender sp BLOCK_COMMENT("call Java function"); __ call(rax); @@ -228,7 +226,7 @@ // store result depending on type // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) - __ movl(rdi, result); + __ movptr(rdi, result); Label is_long, is_float, is_double, exit; __ movl(rsi, result_type); __ cmpl(rsi, T_LONG); @@ -246,7 +244,7 @@ __ verify_FPU(0, "generate_call_stub"); // pop parameters - __ leal(rsp, rsp_after_call); + __ lea(rsp, rsp_after_call); // restore %mxcsr if (sse_save) { @@ -254,13 +252,13 @@ } // restore rdi, rsi and rbx, - __ movl(rbx, saved_rbx); - __ movl(rsi, saved_rsi); - __ movl(rdi, saved_rdi); - __ addl(rsp, 4*wordSize); + __ movptr(rbx, saved_rbx); + __ movptr(rsi, saved_rsi); + __ movptr(rdi, saved_rdi); + __ addptr(rsp, 4*wordSize); // return - __ popl(rbp); + __ pop(rbp); __ ret(0); // handle return types different from T_INT @@ -294,7 +292,7 @@ // return above that handles interpreter returns. BLOCK_COMMENT("call_stub_compiled_return:"); - StubRoutines::i486::set_call_stub_compiled_return( __ pc()); + StubRoutines::x86::set_call_stub_compiled_return( __ pc()); #ifdef COMPILER2 if (UseSSE >= 2) { @@ -340,12 +338,12 @@ address start = __ pc(); // get thread directly - __ movl(rcx, thread); + __ movptr(rcx, thread); #ifdef ASSERT // verify that threads correspond { Label L; __ get_thread(rbx); - __ cmpl(rbx, rcx); + __ cmpptr(rbx, rcx); __ jcc(Assembler::equal, L); __ stop("StubRoutines::catch_exception: threads must correspond"); __ bind(L); @@ -353,7 +351,7 @@ #endif // set pending exception __ verify_oop(rax); - __ movl(Address(rcx, Thread::pending_exception_offset()), rax ); + __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); __ lea(Address(rcx, Thread::exception_file_offset ()), ExternalAddress((address)__FILE__)); __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); @@ -364,7 +362,7 @@ return start; } - + //------------------------------------------------------------------------------------------------------------------------ // Continuation point for runtime calls returning with a pending exception. // The pending exception check happened in the runtime or native call stub. @@ -392,7 +390,7 @@ // make sure this code is only executed if there is a pending exception { Label L; __ get_thread(rcx); - __ cmpl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(L); @@ -400,21 +398,21 @@ #endif // compute exception handler into rbx, - __ movl(rax, Address(rsp, 0)); + __ movptr(rax, Address(rsp, 0)); BLOCK_COMMENT("call exception_handler_for_return_address"); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax); - __ movl(rbx, rax); + __ mov(rbx, rax); // setup rax, & rdx, remove return address & clear pending exception __ get_thread(rcx); - __ popl(rdx); - __ movl(rax, Address(rcx, Thread::pending_exception_offset())); - __ movl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); + __ pop(rdx); + __ movptr(rax, Address(rcx, Thread::pending_exception_offset())); + __ movptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); #ifdef ASSERT // make sure exception is set { Label L; - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(L); @@ -430,17 +428,17 @@ return start; } - + //---------------------------------------------------------------------------------------------------- // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) - // + // // xchg exists as far back as 8086, lock needed for MP only // Stack layout immediately after call: // // 0 [ret addr ] <--- rsp // 1 [ ex ] - // 2 [ dest ] + // 2 [ dest ] // // Result: *dest <- ex, return (old *dest) // @@ -450,13 +448,13 @@ StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); address start = __ pc(); - __ pushl(rdx); + __ push(rdx); Address exchange(rsp, 2 * wordSize); Address dest_addr(rsp, 3 * wordSize); __ movl(rax, exchange); - __ movl(rdx, dest_addr); - __ xchg(rax, Address(rdx, 0)); - __ popl(rdx); + __ movptr(rdx, dest_addr); + __ xchgl(rax, Address(rdx, 0)); + __ pop(rdx); __ ret(0); return start; @@ -464,8 +462,8 @@ //---------------------------------------------------------------------------------------------------- // Support for void verify_mxcsr() - // - // This routine is used with -Xcheck:jni to verify that native + // + // This routine is used with -Xcheck:jni to verify that native // JNI code does not return to Java code without restoring the // MXCSR register to our expected state. @@ -479,28 +477,28 @@ if (CheckJNICalls && UseSSE > 0 ) { Label ok_ret; ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); - __ pushl(rax); - __ subl(rsp, wordSize); // allocate a temp location + __ push(rax); + __ subptr(rsp, wordSize); // allocate a temp location __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save); - __ andl(rax, MXCSR_MASK); + __ andl(rax, MXCSR_MASK); __ cmp32(rax, mxcsr_std); __ jcc(Assembler::equal, ok_ret); - + __ warn("MXCSR changed by native JNI code."); __ ldmxcsr(mxcsr_std); __ bind(ok_ret); - __ addl(rsp, wordSize); - __ popl(rax); + __ addptr(rsp, wordSize); + __ pop(rax); } __ ret(0); return start; } - + //--------------------------------------------------------------------------- // Support for void verify_fpu_cntrl_wrd() @@ -517,8 +515,8 @@ if (CheckJNICalls) { Label ok_ret; - __ pushl(rax); - __ subl(rsp, wordSize); // allocate a temp location + __ push(rax); + __ subptr(rsp, wordSize); // allocate a temp location __ fnstcw(fpu_cntrl_wrd_save); __ movl(rax, fpu_cntrl_wrd_save); __ andl(rax, FPU_CNTRL_WRD_MASK); @@ -531,8 +529,8 @@ __ fldcw(fpu_std); __ bind(ok_ret); - __ addl(rsp, wordSize); - __ popl(rax); + __ addptr(rsp, wordSize); + __ pop(rax); } __ ret(0); @@ -541,7 +539,7 @@ } //--------------------------------------------------------------------------- - // Wrapper for slow-case handling of double-to-integer conversion + // Wrapper for slow-case handling of double-to-integer conversion // d2i or f2i fast case failed either because it is nan or because // of under/overflow. // Input: FPU TOS: float value @@ -554,34 +552,34 @@ // Capture info about frame layout enum layout { FPUState_off = 0, rbp_off = FPUStateSizeInWords, - rdi_off, + rdi_off, rsi_off, rcx_off, rbx_off, saved_argument_off, saved_argument_off2, // 2nd half of double - framesize + framesize }; assert(FPUStateSizeInWords == 27, "update stack layout"); // Save outgoing argument to stack across push_FPU_state() - __ subl(rsp, wordSize * 2); + __ subptr(rsp, wordSize * 2); __ fstp_d(Address(rsp, 0)); // Save CPU & FPU state - __ pushl(rbx); - __ pushl(rcx); - __ pushl(rsi); - __ pushl(rdi); - __ pushl(rbp); + __ push(rbx); + __ push(rcx); + __ push(rsi); + __ push(rdi); + __ push(rbp); __ push_FPU_state(); - // push_FPU_state() resets the FP top of stack + // push_FPU_state() resets the FP top of stack // Load original double into FP top of stack __ fld_d(Address(rsp, saved_argument_off * wordSize)); // Store double into stack as outgoing argument - __ subl(rsp, wordSize*2); + __ subptr(rsp, wordSize*2); __ fst_d(Address(rsp, 0)); // Prepare FPU for doing math in C-land @@ -595,12 +593,12 @@ // Restore CPU & FPU state __ pop_FPU_state(); - __ popl(rbp); - __ popl(rdi); - __ popl(rsi); - __ popl(rcx); - __ popl(rbx); - __ addl(rsp, wordSize * 2); + __ pop(rbp); + __ pop(rdi); + __ pop(rsi); + __ pop(rcx); + __ pop(rbx); + __ addptr(rsp, wordSize * 2); __ ret(0); @@ -616,13 +614,13 @@ StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); address start = __ pc(); - __ pushl(0); // hole for return address-to-be - __ pushad(); // push registers + __ push(0); // hole for return address-to-be + __ pusha(); // push registers Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); BLOCK_COMMENT("call handle_unsafe_access"); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); - __ movl(next_pc, rax); // stuff next address - __ popad(); + __ movptr(next_pc, rax); // stuff next address + __ popa(); __ ret(0); // jump to next address return start; @@ -631,11 +629,11 @@ //---------------------------------------------------------------------------------------------------- // Non-destructive plausibility checks for oops - + address generate_verify_oop() { StubCodeMark mark(this, "StubRoutines", "verify_oop"); address start = __ pc(); - + // Incoming arguments on stack after saving rax,: // // [tos ]: saved rdx @@ -644,64 +642,64 @@ // [tos + 3]: char* error message // [tos + 4]: oop object to verify // [tos + 5]: saved rax, - saved by caller and bashed - + Label exit, error; - __ pushfd(); - __ increment(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); - __ pushl(rdx); // save rdx + __ pushf(); + __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); + __ push(rdx); // save rdx // make sure object is 'reasonable' - __ movl(rax, Address(rsp, 4 * wordSize)); // get object - __ testl(rax, rax); + __ movptr(rax, Address(rsp, 4 * wordSize)); // get object + __ testptr(rax, rax); __ jcc(Assembler::zero, exit); // if obj is NULL it is ok - + // Check if the oop is in the right area of memory const int oop_mask = Universe::verify_oop_mask(); const int oop_bits = Universe::verify_oop_bits(); - __ movl(rdx, rax); - __ andl(rdx, oop_mask); - __ cmpl(rdx, oop_bits); + __ mov(rdx, rax); + __ andptr(rdx, oop_mask); + __ cmpptr(rdx, oop_bits); __ jcc(Assembler::notZero, error); // make sure klass is 'reasonable' - __ movl(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass - __ testl(rax, rax); + __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass + __ testptr(rax, rax); __ jcc(Assembler::zero, error); // if klass is NULL it is broken // Check if the klass is in the right area of memory const int klass_mask = Universe::verify_klass_mask(); const int klass_bits = Universe::verify_klass_bits(); - __ movl(rdx, rax); - __ andl(rdx, klass_mask); - __ cmpl(rdx, klass_bits); + __ mov(rdx, rax); + __ andptr(rdx, klass_mask); + __ cmpptr(rdx, klass_bits); __ jcc(Assembler::notZero, error); // make sure klass' klass is 'reasonable' - __ movl(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass - __ testl(rax, rax); + __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass + __ testptr(rax, rax); __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken - __ movl(rdx, rax); - __ andl(rdx, klass_mask); - __ cmpl(rdx, klass_bits); + __ mov(rdx, rax); + __ andptr(rdx, klass_mask); + __ cmpptr(rdx, klass_bits); __ jcc(Assembler::notZero, error); // if klass not in right area // of memory it is broken too. // return if everything seems ok __ bind(exit); - __ movl(rax, Address(rsp, 5 * wordSize)); // get saved rax, back - __ popl(rdx); // restore rdx - __ popfd(); // restore EFLAGS + __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back + __ pop(rdx); // restore rdx + __ popf(); // restore EFLAGS __ ret(3 * wordSize); // pop arguments // handle errors __ bind(error); - __ movl(rax, Address(rsp, 5 * wordSize)); // get saved rax, back - __ popl(rdx); // get saved rdx back - __ popfd(); // get saved EFLAGS off stack -- will be ignored - __ pushad(); // push registers (eip = return address & msg are already pushed) + __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back + __ pop(rdx); // get saved rdx back + __ popf(); // get saved EFLAGS off stack -- will be ignored + __ pusha(); // push registers (eip = return address & msg are already pushed) BLOCK_COMMENT("call MacroAssembler::debug"); - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug))); - __ popad(); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); + __ popa(); __ ret(3 * wordSize); // pop arguments return start; } @@ -714,29 +712,27 @@ // end - element count void gen_write_ref_array_pre_barrier(Register start, Register count) { assert_different_registers(start, count); -#if 0 // G1 only BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { - __ pushad(); // push registers - __ pushl(count); - __ pushl(start); - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); - __ addl(esp, wordSize * 2); - __ popad(); + __ pusha(); // push registers + __ push(count); + __ push(start); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre))); + __ addptr(rsp, 2*wordSize); + __ popa(); } break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::ModRef: break; - default : + default : ShouldNotReachHere(); - + } -#endif // 0 - G1 only } @@ -752,20 +748,18 @@ BarrierSet* bs = Universe::heap()->barrier_set(); assert_different_registers(start, count); switch (bs->kind()) { -#if 0 // G1 only case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { - __ pushad(); // push registers - __ pushl(count); - __ pushl(start); - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); - __ addl(esp, wordSize * 2); - __ popad(); + __ pusha(); // push registers + __ push(count); + __ push(start); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post))); + __ addptr(rsp, 2*wordSize); + __ popa(); } break; -#endif // 0 G1 only case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: @@ -777,26 +771,89 @@ const Register end = count; // elements count; end == start+count-1 assert_different_registers(start, end); - __ leal(end, Address(start, count, Address::times_4, -4)); - __ shrl(start, CardTableModRefBS::card_shift); - __ shrl(end, CardTableModRefBS::card_shift); - __ subl(end, start); // end --> count + __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); + __ shrptr(start, CardTableModRefBS::card_shift); + __ shrptr(end, CardTableModRefBS::card_shift); + __ subptr(end, start); // end --> count __ BIND(L_loop); - ExternalAddress base((address)ct->byte_map_base); - Address index(start, count, Address::times_1, 0); - __ movbyte(ArrayAddress(base, index), 0); + intptr_t disp = (intptr_t) ct->byte_map_base; + Address cardtable(start, count, Address::times_1, disp); + __ movb(cardtable, 0); __ decrement(count); __ jcc(Assembler::greaterEqual, L_loop); } break; - case BarrierSet::ModRef: + case BarrierSet::ModRef: break; - default : + default : ShouldNotReachHere(); - + } } + + // Copy 64 bytes chunks + // + // Inputs: + // from - source array address + // to_from - destination array address - from + // qword_count - 8-bytes element count, negative + // + void xmm_copy_forward(Register from, Register to_from, Register qword_count) { + assert( UseSSE >= 2, "supported cpu only" ); + Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; + // Copy 64-byte chunks + __ jmpb(L_copy_64_bytes); + __ align(16); + __ BIND(L_copy_64_bytes_loop); + + if(UseUnalignedLoadStores) { + __ movdqu(xmm0, Address(from, 0)); + __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); + __ movdqu(xmm1, Address(from, 16)); + __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); + __ movdqu(xmm2, Address(from, 32)); + __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); + __ movdqu(xmm3, Address(from, 48)); + __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); + + } else { + __ movq(xmm0, Address(from, 0)); + __ movq(Address(from, to_from, Address::times_1, 0), xmm0); + __ movq(xmm1, Address(from, 8)); + __ movq(Address(from, to_from, Address::times_1, 8), xmm1); + __ movq(xmm2, Address(from, 16)); + __ movq(Address(from, to_from, Address::times_1, 16), xmm2); + __ movq(xmm3, Address(from, 24)); + __ movq(Address(from, to_from, Address::times_1, 24), xmm3); + __ movq(xmm4, Address(from, 32)); + __ movq(Address(from, to_from, Address::times_1, 32), xmm4); + __ movq(xmm5, Address(from, 40)); + __ movq(Address(from, to_from, Address::times_1, 40), xmm5); + __ movq(xmm6, Address(from, 48)); + __ movq(Address(from, to_from, Address::times_1, 48), xmm6); + __ movq(xmm7, Address(from, 56)); + __ movq(Address(from, to_from, Address::times_1, 56), xmm7); + } + + __ addl(from, 64); + __ BIND(L_copy_64_bytes); + __ subl(qword_count, 8); + __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); + __ addl(qword_count, 8); + __ jccb(Assembler::zero, L_exit); + // + // length is too short, just copy qwords + // + __ BIND(L_copy_8_bytes); + __ movq(xmm0, Address(from, 0)); + __ movq(Address(from, to_from, Address::times_1), xmm0); + __ addl(from, 8); + __ decrement(qword_count); + __ jcc(Assembler::greater, L_copy_8_bytes); + __ BIND(L_exit); + } + // Copy 64 bytes chunks // // Inputs: @@ -805,6 +862,7 @@ // qword_count - 8-bytes element count, negative // void mmx_copy_forward(Register from, Register to_from, Register qword_count) { + assert( VM_Version::supports_mmx(), "supported cpu only" ); Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; // Copy 64-byte chunks __ jmpb(L_copy_64_bytes); @@ -826,7 +884,7 @@ __ movq(Address(from, to_from, Address::times_1, 40), mmx5); __ movq(Address(from, to_from, Address::times_1, 48), mmx6); __ movq(Address(from, to_from, Address::times_1, 56), mmx7); - __ addl(from, 64); + __ addptr(from, 64); __ BIND(L_copy_64_bytes); __ subl(qword_count, 8); __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); @@ -838,15 +896,15 @@ __ BIND(L_copy_8_bytes); __ movq(mmx0, Address(from, 0)); __ movq(Address(from, to_from, Address::times_1), mmx0); - __ addl(from, 8); + __ addptr(from, 8); __ decrement(qword_count); __ jcc(Assembler::greater, L_copy_8_bytes); __ BIND(L_exit); __ emms(); } - address generate_disjoint_copy(BasicType t, bool aligned, - Address::ScaleFactor sf, + address generate_disjoint_copy(BasicType t, bool aligned, + Address::ScaleFactor sf, address* entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); @@ -855,7 +913,7 @@ Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; - int shift = Address::times_4 - sf; + int shift = Address::times_ptr - sf; const Register from = rsi; // source array address const Register to = rdi; // destination array address @@ -864,25 +922,25 @@ const Register saved_to = rdx; // saved destination array address __ enter(); // required for proper stackwalking of RuntimeStub frame - __ pushl(rsi); - __ pushl(rdi); - __ movl(from , Address(rsp, 12+ 4)); - __ movl(to , Address(rsp, 12+ 8)); + __ push(rsi); + __ push(rdi); + __ movptr(from , Address(rsp, 12+ 4)); + __ movptr(to , Address(rsp, 12+ 8)); __ movl(count, Address(rsp, 12+ 12)); if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); gen_write_ref_array_pre_barrier(to, count); - __ movl(saved_to, to); // save 'to' + __ mov(saved_to, to); // save 'to' } *entry = __ pc(); // Entry point from conjoint arraycopy stub. BLOCK_COMMENT("Entry:"); - __ subl(to, from); // to --> to_from + __ subptr(to, from); // to --> to_from __ cmpl(count, 2< to_from + __ subptr(to, from); // to --> to_from if (VM_Version::supports_mmx()) { - mmx_copy_forward(from, to_from, count); + if (UseXMMForArrayCopy) { + xmm_copy_forward(from, to_from, count); + } else { + mmx_copy_forward(from, to_from, count); + } } else { __ jmpb(L_copy_8_bytes); __ align(16); __ BIND(L_copy_8_bytes_loop); __ fild_d(Address(from, 0)); __ fistp_d(Address(from, to_from, Address::times_1)); - __ addl(from, 8); + __ addptr(from, 8); __ BIND(L_copy_8_bytes); __ decrement(count); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); } inc_copy_counter_np(T_LONG); __ leave(); // required for proper stackwalking of RuntimeStub frame - __ xorl(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ ret(0); return start; } - address generate_conjoint_long_copy(address nooverlap_target, + address generate_conjoint_long_copy(address nooverlap_target, address* entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); @@ -1181,20 +1256,20 @@ const Register end_from = rax; // source array end address __ enter(); // required for proper stackwalking of RuntimeStub frame - __ movl(from , Address(rsp, 8+0)); // from - __ movl(to , Address(rsp, 8+4)); // to - __ movl(count, Address(rsp, 8+8)); // count + __ movptr(from , Address(rsp, 8+0)); // from + __ movptr(to , Address(rsp, 8+4)); // to + __ movl2ptr(count, Address(rsp, 8+8)); // count *entry = __ pc(); // Entry point from generic arraycopy stub. BLOCK_COMMENT("Entry:"); // arrays overlap test - __ cmpl(to, from); + __ cmpptr(to, from); RuntimeAddress nooverlap(nooverlap_target); __ jump_cc(Assembler::belowEqual, nooverlap); - __ leal(end_from, Address(from, count, Address::times_8, 0)); - __ cmpl(to, end_from); - __ movl(from, Address(rsp, 8)); // from + __ lea(end_from, Address(from, count, Address::times_8, 0)); + __ cmpptr(to, end_from); + __ movptr(from, Address(rsp, 8)); // from __ jump_cc(Assembler::aboveEqual, nooverlap); __ jmpb(L_copy_8_bytes); @@ -1202,8 +1277,13 @@ __ align(16); __ BIND(L_copy_8_bytes_loop); if (VM_Version::supports_mmx()) { - __ movq(mmx0, Address(from, count, Address::times_8)); - __ movq(Address(to, count, Address::times_8), mmx0); + if (UseXMMForArrayCopy) { + __ movq(xmm0, Address(from, count, Address::times_8)); + __ movq(Address(to, count, Address::times_8), xmm0); + } else { + __ movq(mmx0, Address(from, count, Address::times_8)); + __ movq(Address(to, count, Address::times_8), mmx0); + } } else { __ fild_d(Address(from, count, Address::times_8)); __ fistp_d(Address(to, count, Address::times_8)); @@ -1212,12 +1292,12 @@ __ decrement(count); __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); - if (VM_Version::supports_mmx()) { + if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) { __ emms(); } inc_copy_counter_np(T_LONG); __ leave(); // required for proper stackwalking of RuntimeStub frame - __ xorl(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ ret(0); return start; } @@ -1254,14 +1334,14 @@ Address super_cache_addr( sub_klass, sc_offset); // if the pointers are equal, we are done (e.g., String[] elements) - __ cmpl(sub_klass, super_klass_addr); + __ cmpptr(sub_klass, super_klass_addr); __ jcc(Assembler::equal, L_success); // check the supertype display: - __ movl(temp, super_check_offset_addr); + __ movl2ptr(temp, super_check_offset_addr); Address super_check_addr(sub_klass, temp, Address::times_1, 0); - __ movl(temp, super_check_addr); // load displayed supertype - __ cmpl(temp, super_klass_addr); // test the super type + __ movptr(temp, super_check_addr); // load displayed supertype + __ cmpptr(temp, super_klass_addr); // test the super type __ jcc(Assembler::equal, L_success); // if it was a primary super, we can just fail immediately @@ -1274,31 +1354,31 @@ { // The repne_scan instruction uses fixed registers, which we must spill. // (We need a couple more temps in any case.) - __ pushl(rax); - __ pushl(rcx); - __ pushl(rdi); + __ push(rax); + __ push(rcx); + __ push(rdi); assert_different_registers(sub_klass, rax, rcx, rdi); - __ movl(rdi, secondary_supers_addr); + __ movptr(rdi, secondary_supers_addr); // Load the array length. - __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); + __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); // Skip to start of data. - __ addl(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); // Scan rcx words at [edi] for occurance of rax, // Set NZ/Z based on last compare - __ movl(rax, super_klass_addr); + __ movptr(rax, super_klass_addr); __ repne_scan(); // Unspill the temp. registers: - __ popl(rdi); - __ popl(rcx); - __ popl(rax); + __ pop(rdi); + __ pop(rcx); + __ pop(rax); } __ jcc(Assembler::notEqual, L_failure); // Success. Cache the super we found and proceed in triumph. - __ movl(temp, super_klass_addr); // note: rax, is dead - __ movl(super_cache_addr, temp); + __ movptr(temp, super_klass_addr); // note: rax, is dead + __ movptr(super_cache_addr, temp); if (!fall_through_on_success) __ jmp(L_success); @@ -1341,9 +1421,9 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame - __ pushl(rsi); - __ pushl(rdi); - __ pushl(rbx); + __ push(rsi); + __ push(rdi); + __ push(rbx); Address from_arg(rsp, 16+ 4); // from Address to_arg(rsp, 16+ 8); // to @@ -1352,43 +1432,43 @@ Address ckval_arg(rsp, 16+20); // super_klass // Load up: - __ movl(from, from_arg); - __ movl(to, to_arg); - __ movl(length, length_arg); + __ movptr(from, from_arg); + __ movptr(to, to_arg); + __ movl2ptr(length, length_arg); *entry = __ pc(); // Entry point from generic arraycopy stub. BLOCK_COMMENT("Entry:"); //--------------------------------------------------------------- - // Assembler stub will be used for this call to arraycopy + // Assembler stub will be used for this call to arraycopy // if the two arrays are subtypes of Object[] but the // destination array type is not equal to or a supertype // of the source type. Each element must be separately // checked. // Loop-invariant addresses. They are exclusive end pointers. - Address end_from_addr(from, length, Address::times_4, 0); - Address end_to_addr(to, length, Address::times_4, 0); + Address end_from_addr(from, length, Address::times_ptr, 0); + Address end_to_addr(to, length, Address::times_ptr, 0); Register end_from = from; // re-use Register end_to = to; // re-use Register count = length; // re-use // Loop-variant addresses. They assume post-incremented count < 0. - Address from_element_addr(end_from, count, Address::times_4, 0); - Address to_element_addr(end_to, count, Address::times_4, 0); + Address from_element_addr(end_from, count, Address::times_ptr, 0); + Address to_element_addr(end_to, count, Address::times_ptr, 0); Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); // Copy from low to high addresses, indexed from the end of each array. - __ leal(end_from, end_from_addr); - __ leal(end_to, end_to_addr); gen_write_ref_array_pre_barrier(to, count); + __ lea(end_from, end_from_addr); + __ lea(end_to, end_to_addr); assert(length == count, ""); // else fix next line: - __ negl(count); // negate and test the length + __ negptr(count); // negate and test the length __ jccb(Assembler::notZero, L_load_element); // Empty array: Nothing to do. - __ xorl(rax, rax); // return 0 on (trivial) success + __ xorptr(rax, rax); // return 0 on (trivial) success __ jmp(L_done); // ======== begin loop ======== @@ -1397,22 +1477,22 @@ // for (count = -count; count != 0; count++) // Base pointers src, dst are biased by 8*count,to last element. __ align(16); - + __ BIND(L_store_element); - __ movl(to_element_addr, elem); // store the oop + __ movptr(to_element_addr, elem); // store the oop __ increment(count); // increment the count toward zero __ jccb(Assembler::zero, L_do_card_marks); // ======== loop entry is here ======== __ BIND(L_load_element); - __ movl(elem, from_element_addr); // load the oop - __ testl(elem, elem); + __ movptr(elem, from_element_addr); // load the oop + __ testptr(elem, elem); __ jccb(Assembler::zero, L_store_element); // (Could do a trick here: Remember last successful non-null // element stored and make a quick oop equality check on it.) - __ movl(elem_klass, elem_klass_addr); // query the object klass + __ movptr(elem_klass, elem_klass_addr); // query the object klass generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, &L_store_element, NULL); // (On fall-through, we have failed the element type check.) @@ -1423,25 +1503,25 @@ // Emit GC store barriers for the oops we have copied (length_arg + count), // and report their number to the caller. __ addl(count, length_arg); // transfers = (length - remaining) - __ movl(rax, count); // save the value - __ notl(rax); // report (-1^K) to caller - __ movl(to, to_arg); // reload + __ movl2ptr(rax, count); // save the value + __ notptr(rax); // report (-1^K) to caller + __ movptr(to, to_arg); // reload assert_different_registers(to, count, rax); gen_write_ref_array_post_barrier(to, count); __ jmpb(L_done); // Come here on success only. __ BIND(L_do_card_marks); - __ movl(count, length_arg); - __ movl(to, to_arg); // reload + __ movl2ptr(count, length_arg); + __ movptr(to, to_arg); // reload gen_write_ref_array_post_barrier(to, count); - __ xorl(rax, rax); // return 0 on success + __ xorptr(rax, rax); // return 0 on success // Common exit point (success or failure). __ BIND(L_done); - __ popl(rbx); - __ popl(rdi); - __ popl(rsi); + __ pop(rbx); + __ pop(rdi); + __ pop(rsi); inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1466,10 +1546,10 @@ // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char *name, + address generate_unsafe_copy(const char *name, address byte_copy_entry, - address short_copy_entry, - address int_copy_entry, + address short_copy_entry, + address int_copy_entry, address long_copy_entry) { Label L_long_aligned, L_int_aligned, L_short_aligned; @@ -1483,24 +1563,24 @@ const Register count = rcx; // elements count __ enter(); // required for proper stackwalking of RuntimeStub frame - __ pushl(rsi); - __ pushl(rdi); + __ push(rsi); + __ push(rdi); Address from_arg(rsp, 12+ 4); // from Address to_arg(rsp, 12+ 8); // to Address count_arg(rsp, 12+12); // byte count // Load up: - __ movl(from , from_arg); - __ movl(to , to_arg); - __ movl(count, count_arg); + __ movptr(from , from_arg); + __ movptr(to , to_arg); + __ movl2ptr(count, count_arg); // bump this on entry, not on exit: inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); const Register bits = rsi; - __ movl(bits, from); - __ orl(bits, to); - __ orl(bits, count); + __ mov(bits, from); + __ orptr(bits, to); + __ orptr(bits, count); __ testl(bits, BytesPerLong-1); __ jccb(Assembler::zero, L_long_aligned); @@ -1512,20 +1592,20 @@ __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); __ BIND(L_short_aligned); - __ shrl(count, LogBytesPerShort); // size => short_count + __ shrptr(count, LogBytesPerShort); // size => short_count __ movl(count_arg, count); // update 'count' __ jump(RuntimeAddress(short_copy_entry)); __ BIND(L_int_aligned); - __ shrl(count, LogBytesPerInt); // size => int_count + __ shrptr(count, LogBytesPerInt); // size => int_count __ movl(count_arg, count); // update 'count' __ jump(RuntimeAddress(int_copy_entry)); __ BIND(L_long_aligned); - __ shrl(count, LogBytesPerLong); // size => qword_count + __ shrptr(count, LogBytesPerLong); // size => qword_count __ movl(count_arg, count); // update 'count' - __ popl(rdi); // Do pops here since jlong_arraycopy stub does not do it. - __ popl(rsi); + __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. + __ pop(rsi); __ jump(RuntimeAddress(long_copy_entry)); return start; @@ -1572,10 +1652,10 @@ // rax, == 0 - success // rax, == -1^K - failure, where K is partial transfer count // - address generate_generic_copy(const char *name, + address generate_generic_copy(const char *name, address entry_jbyte_arraycopy, - address entry_jshort_arraycopy, - address entry_jint_arraycopy, + address entry_jshort_arraycopy, + address entry_jint_arraycopy, address entry_oop_arraycopy, address entry_jlong_arraycopy, address entry_checkcast_arraycopy) { @@ -1598,8 +1678,8 @@ address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame - __ pushl(rsi); - __ pushl(rdi); + __ push(rsi); + __ push(rdi); // bump this on entry, not on exit: inc_counter_np(SharedRuntime::_generic_array_copy_ctr); @@ -1612,9 +1692,9 @@ Address LENGTH (rsp, 12+20); //----------------------------------------------------------------------- - // Assembler stub will be used for this call to arraycopy + // Assembler stub will be used for this call to arraycopy // if the following conditions are met: - // + // // (1) src and dst must not be null. // (2) src_pos must not be negative. // (3) dst_pos must not be negative. @@ -1623,7 +1703,7 @@ // (6) src and dst should be arrays. // (7) src_pos + length must not exceed length of src. // (8) dst_pos + length must not exceed length of dst. - // + // const Register src = rax; // source array oop const Register src_pos = rsi; @@ -1632,27 +1712,27 @@ const Register length = rcx; // transfer count // if (src == NULL) return -1; - __ movl(src, SRC); // src oop - __ testl(src, src); + __ movptr(src, SRC); // src oop + __ testptr(src, src); __ jccb(Assembler::zero, L_failed_0); // if (src_pos < 0) return -1; - __ movl(src_pos, SRC_POS); // src_pos + __ movl2ptr(src_pos, SRC_POS); // src_pos __ testl(src_pos, src_pos); __ jccb(Assembler::negative, L_failed_0); // if (dst == NULL) return -1; - __ movl(dst, DST); // dst oop - __ testl(dst, dst); + __ movptr(dst, DST); // dst oop + __ testptr(dst, dst); __ jccb(Assembler::zero, L_failed_0); // if (dst_pos < 0) return -1; - __ movl(dst_pos, DST_POS); // dst_pos + __ movl2ptr(dst_pos, DST_POS); // dst_pos __ testl(dst_pos, dst_pos); __ jccb(Assembler::negative, L_failed_0); // if (length < 0) return -1; - __ movl(length, LENGTH); // length + __ movl2ptr(length, LENGTH); // length __ testl(length, length); __ jccb(Assembler::negative, L_failed_0); @@ -1660,18 +1740,18 @@ Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); const Register rcx_src_klass = rcx; // array klass - __ movl(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); + __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); #ifdef ASSERT // assert(src->klass() != NULL); BLOCK_COMMENT("assert klasses not null"); { Label L1, L2; - __ testl(rcx_src_klass, rcx_src_klass); + __ testptr(rcx_src_klass, rcx_src_klass); __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL __ bind(L1); __ stop("broken null klass"); __ bind(L2); - __ cmpl(dst_klass_addr, 0); + __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); __ jccb(Assembler::equal, L1); // this would be broken also BLOCK_COMMENT("assert done"); } @@ -1695,7 +1775,7 @@ __ jcc(Assembler::equal, L_objArray); // if (src->klass() != dst->klass()) return -1; - __ cmpl(rcx_src_klass, dst_klass_addr); + __ cmpptr(rcx_src_klass, dst_klass_addr); __ jccb(Assembler::notEqual, L_failed_0); const Register rcx_lh = rcx; // layout helper @@ -1729,12 +1809,12 @@ const Register dst_array = dst; // dst array offset const Register rdi_elsize = rdi; // log2 element size - __ movl(rsi_offset, rcx_lh); - __ shrl(rsi_offset, Klass::_lh_header_size_shift); - __ andl(rsi_offset, Klass::_lh_header_size_mask); // array_offset - __ addl(src_array, rsi_offset); // src array offset - __ addl(dst_array, rsi_offset); // dst array offset - __ andl(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize + __ mov(rsi_offset, rcx_lh); + __ shrptr(rsi_offset, Klass::_lh_header_size_shift); + __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset + __ addptr(src_array, rsi_offset); // src array offset + __ addptr(dst_array, rsi_offset); // dst array offset + __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize // next registers should be set before the jump to corresponding stub const Register from = src; // source array address @@ -1746,17 +1826,17 @@ #define COUNT Address(rsp, 12+12) // Only for oop arraycopy BLOCK_COMMENT("scale indexes to element size"); - __ movl(rsi, SRC_POS); // src_pos - __ shll(rsi); // src_pos << rcx (log2 elsize) + __ movl2ptr(rsi, SRC_POS); // src_pos + __ shlptr(rsi); // src_pos << rcx (log2 elsize) assert(src_array == from, ""); - __ addl(from, rsi); // from = src_array + SRC_POS << log2 elsize - __ movl(rdi, DST_POS); // dst_pos - __ shll(rdi); // dst_pos << rcx (log2 elsize) + __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize + __ movl2ptr(rdi, DST_POS); // dst_pos + __ shlptr(rdi); // dst_pos << rcx (log2 elsize) assert(dst_array == to, ""); - __ addl(to, rdi); // to = dst_array + DST_POS << log2 elsize - __ movl(FROM, from); // src_addr - __ movl(rdi_elsize, rcx_lh); // log2 elsize - __ movl(count, LENGTH); // elements count + __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize + __ movptr(FROM, from); // src_addr + __ mov(rdi_elsize, rcx_lh); // log2 elsize + __ movl2ptr(count, LENGTH); // elements count BLOCK_COMMENT("choose copy loop based on element size"); __ cmpl(rdi_elsize, 0); @@ -1770,15 +1850,15 @@ __ cmpl(rdi_elsize, LogBytesPerLong); __ jccb(Assembler::notEqual, L_failed); #endif - __ popl(rdi); // Do pops here since jlong_arraycopy stub does not do it. - __ popl(rsi); + __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. + __ pop(rsi); __ jump(RuntimeAddress(entry_jlong_arraycopy)); __ BIND(L_failed); - __ xorl(rax, rax); - __ notl(rax); // return -1 - __ popl(rdi); - __ popl(rsi); + __ xorptr(rax, rax); + __ notptr(rax); // return -1 + __ pop(rdi); + __ pop(rsi); __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1788,7 +1868,7 @@ Label L_plain_copy, L_checkcast_copy; // test array classes for subtyping - __ cmpl(rcx_src_klass, dst_klass_addr); // usual case is exact equality + __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality __ jccb(Assembler::notEqual, L_checkcast_copy); // Identically typed arrays can be copied without element-wise checks. @@ -1796,15 +1876,15 @@ arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); __ BIND(L_plain_copy); - __ movl(count, LENGTH); // elements count - __ movl(src_pos, SRC_POS); // reload src_pos - __ leal(from, Address(src, src_pos, Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr - __ movl(dst_pos, DST_POS); // reload dst_pos - __ leal(to, Address(dst, dst_pos, Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr - __ movl(FROM, from); // src_addr - __ movl(TO, to); // dst_addr + __ movl2ptr(count, LENGTH); // elements count + __ movl2ptr(src_pos, SRC_POS); // reload src_pos + __ lea(from, Address(src, src_pos, Address::times_ptr, + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr + __ movl2ptr(dst_pos, DST_POS); // reload dst_pos + __ lea(to, Address(dst, dst_pos, Address::times_ptr, + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr + __ movptr(FROM, from); // src_addr + __ movptr(TO, to); // dst_addr __ movl(COUNT, count); // count __ jump(RuntimeAddress(entry_oop_arraycopy)); @@ -1824,37 +1904,37 @@ Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); // Before looking at dst.length, make sure dst is also an objArray. - __ movl(rsi_dst_klass, dst_klass_addr); + __ movptr(rsi_dst_klass, dst_klass_addr); __ cmpl(dst_klass_lh_addr, objArray_lh); __ jccb(Assembler::notEqual, L_failed); // It is safe to examine both src.length and dst.length. - __ movl(src_pos, SRC_POS); // reload rsi + __ movl2ptr(src_pos, SRC_POS); // reload rsi arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); // (Now src_pos and dst_pos are killed, but not src and dst.) // We'll need this temp (don't forget to pop it after the type check). - __ pushl(rbx); + __ push(rbx); Register rbx_src_klass = rbx; - __ movl(rbx_src_klass, rcx_src_klass); // spill away from rcx - __ movl(rsi_dst_klass, dst_klass_addr); + __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx + __ movptr(rsi_dst_klass, dst_klass_addr); Address super_check_offset_addr(rsi_dst_klass, sco_offset); Label L_fail_array_check; generate_type_check(rbx_src_klass, super_check_offset_addr, dst_klass_addr, rdi_temp, NULL, &L_fail_array_check); // (On fall-through, we have passed the array type check.) - __ popl(rbx); + __ pop(rbx); __ jmp(L_plain_copy); __ BIND(L_fail_array_check); // Reshuffle arguments so we can call checkcast_arraycopy: // match initial saves for checkcast_arraycopy - // pushl(rsi); // already done; see above - // pushl(rdi); // already done; see above - // pushl(rbx); // already done; see above + // push(rsi); // already done; see above + // push(rdi); // already done; see above + // push(rbx); // already done; see above // Marshal outgoing arguments now, freeing registers. Address from_arg(rsp, 16+ 4); // from @@ -1869,24 +1949,24 @@ // push rbx, changed the incoming offsets (why not just use rbp,??) // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); - __ movl(rbx, Address(rsi_dst_klass, ek_offset)); - __ movl(length, LENGTH_arg); // reload elements count - __ movl(src_pos, SRC_POS_arg); // reload src_pos - __ movl(dst_pos, DST_POS_arg); // reload dst_pos + __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); + __ movl2ptr(length, LENGTH_arg); // reload elements count + __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos + __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos - __ movl(ckval_arg, rbx); // destination element type + __ movptr(ckval_arg, rbx); // destination element type __ movl(rbx, Address(rbx, sco_offset)); __ movl(ckoff_arg, rbx); // corresponding class check offset __ movl(length_arg, length); // outgoing length argument - __ leal(from, Address(src, src_pos, Address::times_4, + __ lea(from, Address(src, src_pos, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); - __ movl(from_arg, from); + __ movptr(from_arg, from); - __ leal(to, Address(dst, dst_pos, Address::times_4, + __ lea(to, Address(dst, dst_pos, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); - __ movl(to_arg, to); + __ movptr(to_arg, to); __ jump(RuntimeAddress(entry_checkcast_arraycopy)); } @@ -1902,58 +1982,58 @@ address entry_jlong_arraycopy; address entry_checkcast_arraycopy; - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = - generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = + generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, "arrayof_jbyte_disjoint_arraycopy"); - StubRoutines::_arrayof_jbyte_arraycopy = - generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, + StubRoutines::_arrayof_jbyte_arraycopy = + generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, NULL, "arrayof_jbyte_arraycopy"); - StubRoutines::_jbyte_disjoint_arraycopy = - generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, + StubRoutines::_jbyte_disjoint_arraycopy = + generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, "jbyte_disjoint_arraycopy"); - StubRoutines::_jbyte_arraycopy = + StubRoutines::_jbyte_arraycopy = generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, &entry_jbyte_arraycopy, "jbyte_arraycopy"); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, "arrayof_jshort_disjoint_arraycopy"); - StubRoutines::_arrayof_jshort_arraycopy = + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, NULL, "arrayof_jshort_arraycopy"); - StubRoutines::_jshort_disjoint_arraycopy = + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, "jshort_disjoint_arraycopy"); - StubRoutines::_jshort_arraycopy = + StubRoutines::_jshort_arraycopy = generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, &entry_jshort_arraycopy, "jshort_arraycopy"); // Next arrays are always aligned on 4 bytes at least. - StubRoutines::_jint_disjoint_arraycopy = - generate_disjoint_copy(T_INT, true, Address::times_4, &entry, + StubRoutines::_jint_disjoint_arraycopy = + generate_disjoint_copy(T_INT, true, Address::times_4, &entry, "jint_disjoint_arraycopy"); - StubRoutines::_jint_arraycopy = + StubRoutines::_jint_arraycopy = generate_conjoint_copy(T_INT, true, Address::times_4, entry, &entry_jint_arraycopy, "jint_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy = - generate_disjoint_copy(T_OBJECT, true, Address::times_4, &entry, + StubRoutines::_oop_disjoint_arraycopy = + generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, "oop_disjoint_arraycopy"); - StubRoutines::_oop_arraycopy = - generate_conjoint_copy(T_OBJECT, true, Address::times_4, entry, + StubRoutines::_oop_arraycopy = + generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, &entry_oop_arraycopy, "oop_arraycopy"); - StubRoutines::_jlong_disjoint_arraycopy = + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); - StubRoutines::_jlong_arraycopy = - generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, + StubRoutines::_jlong_arraycopy = + generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, "jlong_arraycopy"); - StubRoutines::_arrayof_jint_disjoint_arraycopy = + StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; - StubRoutines::_arrayof_oop_disjoint_arraycopy = + StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; - StubRoutines::_arrayof_jlong_disjoint_arraycopy = + StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; @@ -1965,14 +2045,14 @@ &entry_checkcast_arraycopy); StubRoutines::_unsafe_arraycopy = - generate_unsafe_copy("unsafe_arraycopy", + generate_unsafe_copy("unsafe_arraycopy", entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_jlong_arraycopy); StubRoutines::_generic_arraycopy = - generate_generic_copy("generic_arraycopy", + generate_generic_copy("generic_arraycopy", entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, @@ -1987,8 +2067,8 @@ // the compilers are responsible for supplying a continuation point // if they expect all registers to be preserved. enum layout { - thread_off, // last_java_sp - rbp_off, // callee saved register + thread_off, // last_java_sp + rbp_off, // callee saved register ret_pc, framesize }; @@ -2001,8 +2081,8 @@ //------------------------------------------------------------------------------------------------------------------------ // Continuation point for throwing of implicit exceptions that are not handled in // the current activation. Fabricates an exception oop and initiates normal - // exception dispatching in this frame. - // + // exception dispatching in this frame. + // // Previously the compiler (c2) allowed for callee save registers on Java calls. // This is no longer true after adapter frames were removed but could possibly // be brought back in the future if the interpreter code was reworked and it @@ -2021,7 +2101,7 @@ // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are // either at call sites or otherwise assume that stack unwinding will be initiated, // so caller saved registers were assumed volatile in the compiler. - address generate_throw_exception(const char* name, address runtime_entry, + address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) { int insts_size = 256; @@ -2040,21 +2120,21 @@ Register java_thread = rbx; __ get_thread(java_thread); if (restore_saved_exception_pc) { - __ movl(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset()))); - __ pushl(rax); + __ movptr(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset()))); + __ push(rax); } - + __ enter(); // required for proper stackwalking of RuntimeStub frame // pc and rbp, already pushed - __ subl(rsp, (framesize-2) * wordSize); // prolog + __ subptr(rsp, (framesize-2) * wordSize); // prolog // Frame is now completed as far as size and linkage. int frame_complete = __ pc() - start; // push java thread (becomes first argument of C function) - __ movl(Address(rsp, thread_off * wordSize), java_thread); + __ movptr(Address(rsp, thread_off * wordSize), java_thread); // Set up last_Java_sp and last_Java_fp __ set_last_Java_frame(java_thread, rsp, rbp, NULL); @@ -2063,9 +2143,9 @@ BLOCK_COMMENT("call runtime_entry"); __ call(RuntimeAddress(runtime_entry)); // Generate oop map - OopMap* map = new OopMap(framesize, 0); + OopMap* map = new OopMap(framesize, 0); oop_maps->add_gc_map(__ pc() - start, map); - + // restore the thread (cannot use the pushed argument since arguments // may be overwritten by C code generated by an optimizing compiler); // however can use the register value directly if it is callee saved. @@ -2078,7 +2158,7 @@ // check for pending exceptions #ifdef ASSERT Label L; - __ cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, L); __ should_not_reach_here(); __ bind(L); @@ -2116,7 +2196,7 @@ //--------------------------------------------------------------------------- // Initialization - + void generate_initial() { // Generates all stubs and initializes the entry points @@ -2128,7 +2208,7 @@ StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); - // is referenced by megamorphic call + // is referenced by megamorphic call StubRoutines::_catch_exception_entry = generate_catch_exception(); // These are currently used by Solaris/Intel @@ -2140,8 +2220,8 @@ // platform dependent create_control_words(); - StubRoutines::i486::_verify_mxcsr_entry = generate_verify_mxcsr(); - StubRoutines::i486::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); + StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); + StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, @@ -2151,7 +2231,7 @@ void generate_all() { // Generates all stubs and initializes the entry points - + // These entry points require SharedInfo::stack0 to be set up in non-core builds // and need to be relocatable, so they each fabricate a RuntimeStub internally. StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); @@ -2162,10 +2242,10 @@ StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); //------------------------------------------------------------------------------------------------------------------------ - // entry points that are platform specific + // entry points that are platform specific // support for verify_oop (must happen after universe_init) - StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); + StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); // arraycopy stubs used by compilers generate_arraycopy_stubs(); @@ -2173,7 +2253,7 @@ public: - StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { + StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { if (all) { generate_all(); } else { --- old/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2009-08-01 04:08:14.988778474 +0100 +++ new/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2009-08-01 04:08:14.885663880 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)stubGenerator_x86_64.cpp 1.49 07/10/05 19:12:48 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -33,6 +30,8 @@ // see the comment in stubRoutines.hpp #define __ _masm-> +#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) +#define a__ ((Assembler*)_masm)-> #ifdef PRODUCT #define BLOCK_COMMENT(str) /* nothing */ @@ -161,7 +160,7 @@ entry_point_off = 6, parameters_off = 7, parameter_size_off = 8, - thread_off = 9 + thread_off = 9 #else rsp_after_call_off = -12, mxcsr_off = rsp_after_call_off, @@ -189,7 +188,7 @@ "adjust this code"); StubCodeMark mark(this, "StubRoutines", "call_stub"); address start = __ pc(); - + // same as in generate_catch_exception()! const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); @@ -212,32 +211,32 @@ // stub code __ enter(); - __ subq(rsp, -rsp_after_call_off * wordSize); + __ subptr(rsp, -rsp_after_call_off * wordSize); // save register parameters #ifndef _WIN64 - __ movq(parameters, c_rarg5); // parameters - __ movq(entry_point, c_rarg4); // entry_point + __ movptr(parameters, c_rarg5); // parameters + __ movptr(entry_point, c_rarg4); // entry_point #endif - __ movq(method, c_rarg3); // method - __ movl(result_type, c_rarg2); // result type - __ movq(result, c_rarg1); // result - __ movq(call_wrapper, c_rarg0); // call wrapper + __ movptr(method, c_rarg3); // method + __ movl(result_type, c_rarg2); // result type + __ movptr(result, c_rarg1); // result + __ movptr(call_wrapper, c_rarg0); // call wrapper // save regs belonging to calling function - __ movq(rbx_save, rbx); - __ movq(r12_save, r12); - __ movq(r13_save, r13); - __ movq(r14_save, r14); - __ movq(r15_save, r15); + __ movptr(rbx_save, rbx); + __ movptr(r12_save, r12); + __ movptr(r13_save, r13); + __ movptr(r14_save, r14); + __ movptr(r15_save, r15); #ifdef _WIN64 const Address rdi_save(rbp, rdi_off * wordSize); const Address rsi_save(rbp, rsi_off * wordSize); - __ movq(rsi_save, rsi); - __ movq(rdi_save, rdi); + __ movptr(rsi_save, rsi); + __ movptr(rdi_save, rdi); #else const Address mxcsr_save(rbp, mxcsr_off * wordSize); { @@ -245,7 +244,7 @@ __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save); __ andl(rax, MXCSR_MASK); // Only check control and mask bits - ExternalAddress mxcsr_std(StubRoutines::amd64::mxcsr_std()); + ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std()); __ cmp32(rax, mxcsr_std); __ jcc(Assembler::equal, skip_ldmx); __ ldmxcsr(mxcsr_std); @@ -254,13 +253,14 @@ #endif // Load up thread register - __ movq(r15_thread, thread); + __ movptr(r15_thread, thread); + __ reinit_heapbase(); #ifdef ASSERT // make sure we have no pending exceptions - { + { Label L; - __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); __ stop("StubRoutines::call_stub: entered with pending exception"); __ bind(L); @@ -275,25 +275,25 @@ __ jcc(Assembler::zero, parameters_done); Label loop; - __ movq(c_rarg2, parameters); // parameter pointer - __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 + __ movptr(c_rarg2, parameters); // parameter pointer + __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 __ BIND(loop); if (TaggedStackInterpreter) { - __ movq(rax, Address(c_rarg2, 0)); // get tag - __ addq(c_rarg2, wordSize); // advance to next tag - __ pushq(rax); // pass tag - } - __ movq(rax, Address(c_rarg2, 0)); // get parameter - __ addq(c_rarg2, wordSize); // advance to next parameter - __ decrementl(c_rarg1); // decrement counter - __ pushq(rax); // pass parameter + __ movl(rax, Address(c_rarg2, 0)); // get tag + __ addptr(c_rarg2, wordSize); // advance to next tag + __ push(rax); // pass tag + } + __ movptr(rax, Address(c_rarg2, 0));// get parameter + __ addptr(c_rarg2, wordSize); // advance to next parameter + __ decrementl(c_rarg1); // decrement counter + __ push(rax); // pass parameter __ jcc(Assembler::notZero, loop); // call Java function __ BIND(parameters_done); - __ movq(rbx, method); // get methodOop - __ movq(c_rarg1, entry_point); // get entry_point - __ movq(r13, rsp); // set sender sp + __ movptr(rbx, method); // get methodOop + __ movptr(c_rarg1, entry_point); // get entry_point + __ mov(r13, rsp); // set sender sp BLOCK_COMMENT("call Java function"); __ call(c_rarg1); @@ -302,7 +302,7 @@ // store result depending on type (everything that is not // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) - __ movq(c_rarg0, result); + __ movptr(c_rarg0, result); Label is_long, is_float, is_double, exit; __ movl(c_rarg1, result_type); __ cmpl(c_rarg1, T_OBJECT); @@ -320,16 +320,16 @@ __ BIND(exit); // pop parameters - __ leaq(rsp, rsp_after_call); + __ lea(rsp, rsp_after_call); #ifdef ASSERT // verify that threads correspond - { + { Label L, S; - __ cmpq(r15_thread, thread); + __ cmpptr(r15_thread, thread); __ jcc(Assembler::notEqual, S); __ get_thread(rbx); - __ cmpq(r15_thread, rbx); + __ cmpptr(r15_thread, rbx); __ jcc(Assembler::equal, L); __ bind(S); __ jcc(Assembler::equal, L); @@ -339,24 +339,24 @@ #endif // restore regs belonging to calling function - __ movq(r15, r15_save); - __ movq(r14, r14_save); - __ movq(r13, r13_save); - __ movq(r12, r12_save); - __ movq(rbx, rbx_save); + __ movptr(r15, r15_save); + __ movptr(r14, r14_save); + __ movptr(r13, r13_save); + __ movptr(r12, r12_save); + __ movptr(rbx, rbx_save); #ifdef _WIN64 - __ movq(rdi, rdi_save); - __ movq(rsi, rsi_save); + __ movptr(rdi, rdi_save); + __ movptr(rsi, rsi_save); #else __ ldmxcsr(mxcsr_save); #endif // restore rsp - __ addq(rsp, -rsp_after_call_off * wordSize); + __ addptr(rsp, -rsp_after_call_off * wordSize); // return - __ popq(rbp); + __ pop(rbp); __ ret(0); // handle return types different from T_INT @@ -397,12 +397,12 @@ #ifdef ASSERT // verify that threads correspond - { + { Label L, S; - __ cmpq(r15_thread, thread); + __ cmpptr(r15_thread, thread); __ jcc(Assembler::notEqual, S); __ get_thread(rbx); - __ cmpq(r15_thread, rbx); + __ cmpptr(r15_thread, rbx); __ jcc(Assembler::equal, L); __ bind(S); __ stop("StubRoutines::catch_exception: threads must correspond"); @@ -413,9 +413,9 @@ // set pending exception __ verify_oop(rax); - __ movq(Address(r15_thread, Thread::pending_exception_offset()), rax); + __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); __ lea(rscratch1, ExternalAddress((address)__FILE__)); - __ movq(Address(r15_thread, Thread::exception_file_offset()), rscratch1); + __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); // complete return to VM @@ -425,7 +425,7 @@ return start; } - + // Continuation point for runtime calls returning with a pending // exception. The pending exception check happened in the runtime // or native call stub. The pending exception in Thread is @@ -452,9 +452,9 @@ #ifdef ASSERT // make sure this code is only executed if there is a pending exception - { + { Label L; - __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(L); @@ -462,23 +462,23 @@ #endif // compute exception handler into rbx - __ movq(c_rarg0, Address(rsp, 0)); + __ movptr(c_rarg0, Address(rsp, 0)); BLOCK_COMMENT("call exception_handler_for_return_address"); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0); - __ movq(rbx, rax); + __ mov(rbx, rax); // setup rax & rdx, remove return address & clear pending exception - __ popq(rdx); - __ movq(rax, Address(r15_thread, Thread::pending_exception_offset())); + __ pop(rdx); + __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); #ifdef ASSERT // make sure exception is set - { + { Label L; - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(L); @@ -496,11 +496,11 @@ } // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) - // + // // Arguments : // c_rarg0: exchange_value // c_rarg0: dest - // + // // Result: // *dest <- ex, return (orig *dest) address generate_atomic_xchg() { @@ -515,19 +515,19 @@ } // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) - // + // // Arguments : // c_rarg0: exchange_value // c_rarg1: dest - // + // // Result: // *dest <- ex, return (orig *dest) address generate_atomic_xchg_ptr() { StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); address start = __ pc(); - __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow - __ xchgq(rax, Address(c_rarg1, 0)); // automatic LOCK + __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow + __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK __ ret(0); return start; @@ -535,17 +535,17 @@ // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, // jint compare_value) - // + // // Arguments : // c_rarg0: exchange_value // c_rarg1: dest // c_rarg2: compare_value - // + // // Result: - // if ( compare_value == *dest ) { + // if ( compare_value == *dest ) { // *dest = exchange_value // return compare_value; - // else + // else // return *dest; address generate_atomic_cmpxchg() { StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); @@ -553,25 +553,25 @@ __ movl(rax, c_rarg2); if ( os::is_MP() ) __ lock(); - __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); + __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); __ ret(0); return start; } - // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value, + // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value, // volatile jlong* dest, // jlong compare_value) // Arguments : // c_rarg0: exchange_value // c_rarg1: dest // c_rarg2: compare_value - // + // // Result: - // if ( compare_value == *dest ) { + // if ( compare_value == *dest ) { // *dest = exchange_value // return compare_value; - // else + // else // return *dest; address generate_atomic_cmpxchg_long() { StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); @@ -579,18 +579,18 @@ __ movq(rax, c_rarg2); if ( os::is_MP() ) __ lock(); - __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); + __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); __ ret(0); return start; } // Support for jint atomic::add(jint add_value, volatile jint* dest) - // + // // Arguments : // c_rarg0: add_value // c_rarg1: dest - // + // // Result: // *dest += add_value // return *dest; @@ -598,9 +598,9 @@ StubCodeMark mark(this, "StubRoutines", "atomic_add"); address start = __ pc(); - __ movl(rax, c_rarg0); + __ movl(rax, c_rarg0); if ( os::is_MP() ) __ lock(); - __ xaddl(Address(c_rarg1, 0), c_rarg0); + __ xaddl(Address(c_rarg1, 0), c_rarg0); __ addl(rax, c_rarg0); __ ret(0); @@ -608,11 +608,11 @@ } // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) - // + // // Arguments : // c_rarg0: add_value // c_rarg1: dest - // + // // Result: // *dest += add_value // return *dest; @@ -620,19 +620,19 @@ StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); address start = __ pc(); - __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow + __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow if ( os::is_MP() ) __ lock(); - __ xaddl(Address(c_rarg1, 0), c_rarg0); - __ addl(rax, c_rarg0); + __ xaddptr(Address(c_rarg1, 0), c_rarg0); + __ addptr(rax, c_rarg0); __ ret(0); return start; } // Support for intptr_t OrderAccess::fence() - // + // // Arguments : - // + // // Result: address generate_orderaccess_fence() { StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); @@ -655,19 +655,19 @@ const Address older_fp(rax, 0); address start = __ pc(); - __ enter(); - __ movq(rax, old_fp); // callers fp - __ movq(rax, older_fp); // the frame for ps() - __ popq(rbp); + __ enter(); + __ movptr(rax, old_fp); // callers fp + __ movptr(rax, older_fp); // the frame for ps() + __ pop(rbp); __ ret(0); return start; } - + //---------------------------------------------------------------------------------------------------- // Support for void verify_mxcsr() - // - // This routine is used with -Xcheck:jni to verify that native + // + // This routine is used with -Xcheck:jni to verify that native // JNI code does not return to Java code without restoring the // MXCSR register to our expected state. @@ -679,21 +679,21 @@ if (CheckJNICalls) { Label ok_ret; - __ pushq(rax); - __ subq(rsp, wordSize); // allocate a temp location + __ push(rax); + __ subptr(rsp, wordSize); // allocate a temp location __ stmxcsr(mxcsr_save); __ movl(rax, mxcsr_save); __ andl(rax, MXCSR_MASK); // Only check control and mask bits - __ cmpl(rax, *(int *)(StubRoutines::amd64::mxcsr_std())); + __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std())); __ jcc(Assembler::equal, ok_ret); - + __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); - __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std())); + __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std())); __ bind(ok_ret); - __ addq(rsp, wordSize); - __ popq(rax); + __ addptr(rsp, wordSize); + __ pop(rax); } __ ret(0); @@ -709,10 +709,10 @@ Label L; - __ pushq(rax); - __ pushq(c_rarg3); - __ pushq(c_rarg2); - __ pushq(c_rarg1); + __ push(rax); + __ push(c_rarg3); + __ push(c_rarg2); + __ push(c_rarg1); __ movl(rax, 0x7f800000); __ xorl(c_rarg3, c_rarg3); @@ -727,12 +727,12 @@ __ cmovl(Assembler::positive, c_rarg3, rax); __ bind(L); - __ movq(inout, c_rarg3); + __ movptr(inout, c_rarg3); - __ popq(c_rarg1); - __ popq(c_rarg2); - __ popq(c_rarg3); - __ popq(rax); + __ pop(c_rarg1); + __ pop(c_rarg2); + __ pop(c_rarg3); + __ pop(rax); __ ret(0); @@ -746,10 +746,10 @@ Label L; - __ pushq(rax); - __ pushq(c_rarg3); - __ pushq(c_rarg2); - __ pushq(c_rarg1); + __ push(rax); + __ push(c_rarg3); + __ push(c_rarg2); + __ push(c_rarg1); __ movl(rax, 0x7f800000); __ xorl(c_rarg3, c_rarg3); @@ -761,15 +761,15 @@ __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong __ mov64(c_rarg3, 0x8000000000000000); __ mov64(rax, 0x7fffffffffffffff); - __ cmovq(Assembler::positive, c_rarg3, rax); + __ cmov(Assembler::positive, c_rarg3, rax); __ bind(L); - __ movq(inout, c_rarg3); + __ movptr(inout, c_rarg3); - __ popq(c_rarg1); - __ popq(c_rarg2); - __ popq(c_rarg3); - __ popq(rax); + __ pop(c_rarg1); + __ pop(c_rarg2); + __ pop(c_rarg3); + __ pop(rax); __ ret(0); @@ -784,19 +784,19 @@ Label L; - __ pushq(rax); - __ pushq(c_rarg3); - __ pushq(c_rarg2); - __ pushq(c_rarg1); - __ pushq(c_rarg0); + __ push(rax); + __ push(c_rarg3); + __ push(c_rarg2); + __ push(c_rarg1); + __ push(c_rarg0); __ movl(rax, 0x7ff00000); __ movq(c_rarg2, inout); __ movl(c_rarg3, c_rarg2); - __ movq(c_rarg1, c_rarg2); - __ movq(c_rarg0, c_rarg2); + __ mov(c_rarg1, c_rarg2); + __ mov(c_rarg0, c_rarg2); __ negl(c_rarg3); - __ shrq(c_rarg1, 0x20); + __ shrptr(c_rarg1, 0x20); __ orl(c_rarg3, c_rarg2); __ andl(c_rarg1, 0x7fffffff); __ xorl(c_rarg2, c_rarg2); @@ -804,19 +804,19 @@ __ orl(c_rarg1, c_rarg3); __ cmpl(rax, c_rarg1); __ jcc(Assembler::negative, L); // NaN -> 0 - __ testq(c_rarg0, c_rarg0); // signed ? min_jint : max_jint + __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint __ movl(c_rarg2, 0x80000000); __ movl(rax, 0x7fffffff); - __ cmovl(Assembler::positive, c_rarg2, rax); - + __ cmov(Assembler::positive, c_rarg2, rax); + __ bind(L); - __ movq(inout, c_rarg2); + __ movptr(inout, c_rarg2); - __ popq(c_rarg0); - __ popq(c_rarg1); - __ popq(c_rarg2); - __ popq(c_rarg3); - __ popq(rax); + __ pop(c_rarg0); + __ pop(c_rarg1); + __ pop(c_rarg2); + __ pop(c_rarg3); + __ pop(rax); __ ret(0); @@ -831,19 +831,19 @@ Label L; - __ pushq(rax); - __ pushq(c_rarg3); - __ pushq(c_rarg2); - __ pushq(c_rarg1); - __ pushq(c_rarg0); + __ push(rax); + __ push(c_rarg3); + __ push(c_rarg2); + __ push(c_rarg1); + __ push(c_rarg0); __ movl(rax, 0x7ff00000); __ movq(c_rarg2, inout); __ movl(c_rarg3, c_rarg2); - __ movq(c_rarg1, c_rarg2); - __ movq(c_rarg0, c_rarg2); + __ mov(c_rarg1, c_rarg2); + __ mov(c_rarg0, c_rarg2); __ negl(c_rarg3); - __ shrq(c_rarg1, 0x20); + __ shrptr(c_rarg1, 0x20); __ orl(c_rarg3, c_rarg2); __ andl(c_rarg1, 0x7fffffff); __ xorl(c_rarg2, c_rarg2); @@ -855,15 +855,15 @@ __ mov64(c_rarg2, 0x8000000000000000); __ mov64(rax, 0x7fffffffffffffff); __ cmovq(Assembler::positive, c_rarg2, rax); - + __ bind(L); __ movq(inout, c_rarg2); - __ popq(c_rarg0); - __ popq(c_rarg1); - __ popq(c_rarg2); - __ popq(c_rarg3); - __ popq(rax); + __ pop(c_rarg0); + __ pop(c_rarg1); + __ pop(c_rarg2); + __ pop(c_rarg3); + __ pop(rax); __ ret(0); @@ -890,17 +890,17 @@ StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); address start = __ pc(); - __ pushq(0); // hole for return address-to-be - __ pushaq(); // push registers + __ push(0); // hole for return address-to-be + __ pusha(); // push registers Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); - __ subq(rsp, frame::arg_reg_save_area_bytes); + __ subptr(rsp, frame::arg_reg_save_area_bytes); BLOCK_COMMENT("call handle_unsafe_access"); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); - __ addq(rsp, frame::arg_reg_save_area_bytes); + __ addptr(rsp, frame::arg_reg_save_area_bytes); - __ movq(next_pc, rax); // stuff next address - __ popaq(); + __ movptr(next_pc, rax); // stuff next address + __ popa(); __ ret(0); // jump to next address return start; @@ -910,102 +910,124 @@ // // Arguments: // all args on stack! - // + // // Stack after saving c_rarg3: // [tos + 0]: saved c_rarg3 // [tos + 1]: saved c_rarg2 - // [tos + 2]: saved flags - // [tos + 3]: return address - // * [tos + 4]: error message (char*) - // * [tos + 5]: object to verify (oop) - // * [tos + 6]: saved rax - saved by caller and bashed + // [tos + 2]: saved r12 (several TemplateTable methods use it) + // [tos + 3]: saved flags + // [tos + 4]: return address + // * [tos + 5]: error message (char*) + // * [tos + 6]: object to verify (oop) + // * [tos + 7]: saved rax - saved by caller and bashed // * = popped on exit address generate_verify_oop() { StubCodeMark mark(this, "StubRoutines", "verify_oop"); address start = __ pc(); - + Label exit, error; - __ pushfq(); + __ pushf(); __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); + __ push(r12); + // save c_rarg2 and c_rarg3 - __ pushq(c_rarg2); - __ pushq(c_rarg3); + __ push(c_rarg2); + __ push(c_rarg3); + + enum { + // After previous pushes. + oop_to_verify = 6 * wordSize, + saved_rax = 7 * wordSize, + + // Before the call to MacroAssembler::debug(), see below. + return_addr = 16 * wordSize, + error_msg = 17 * wordSize + }; // get object - __ movq(rax, Address(rsp, 5 * wordSize)); + __ movptr(rax, Address(rsp, oop_to_verify)); // make sure object is 'reasonable' - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, exit); // if obj is NULL it is OK // Check if the oop is in the right area of memory - __ movq(c_rarg2, rax); + __ movptr(c_rarg2, rax); __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask()); - __ andq(c_rarg2, c_rarg3); + __ andptr(c_rarg2, c_rarg3); __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits()); - __ cmpq(c_rarg2, c_rarg3); + __ cmpptr(c_rarg2, c_rarg3); __ jcc(Assembler::notZero, error); + // set r12 to heapbase for load_klass() + __ reinit_heapbase(); + // make sure klass is 'reasonable' - __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass - __ testq(rax, rax); + __ load_klass(rax, rax); // get klass + __ testptr(rax, rax); __ jcc(Assembler::zero, error); // if klass is NULL it is broken // Check if the klass is in the right area of memory - __ movq(c_rarg2, rax); + __ mov(c_rarg2, rax); __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask()); - __ andq(c_rarg2, c_rarg3); + __ andptr(c_rarg2, c_rarg3); __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits()); - __ cmpq(c_rarg2, c_rarg3); + __ cmpptr(c_rarg2, c_rarg3); __ jcc(Assembler::notZero, error); // make sure klass' klass is 'reasonable' - __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes())); - __ testq(rax, rax); + __ load_klass(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken // Check if the klass' klass is in the right area of memory __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask()); - __ andq(rax, c_rarg3); + __ andptr(rax, c_rarg3); __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits()); - __ cmpq(rax, c_rarg3); + __ cmpptr(rax, c_rarg3); __ jcc(Assembler::notZero, error); // return if everything seems ok __ bind(exit); - __ movq(rax, Address(rsp, 6 * wordSize)); // get saved rax back - __ popq(c_rarg3); // restore c_rarg3 - __ popq(c_rarg2); // restore c_rarg2 - __ popfq(); // restore flags + __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back + __ pop(c_rarg3); // restore c_rarg3 + __ pop(c_rarg2); // restore c_rarg2 + __ pop(r12); // restore r12 + __ popf(); // restore flags __ ret(3 * wordSize); // pop caller saved stuff // handle errors __ bind(error); - __ movq(rax, Address(rsp, 6 * wordSize)); // get saved rax back - __ popq(c_rarg3); // get saved c_rarg3 back - __ popq(c_rarg2); // get saved c_rarg2 back - __ popfq(); // get saved flags off stack -- + __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back + __ pop(c_rarg3); // get saved c_rarg3 back + __ pop(c_rarg2); // get saved c_rarg2 back + __ pop(r12); // get saved r12 back + __ popf(); // get saved flags off stack -- // will be ignored - __ pushaq(); // push registers + __ pusha(); // push registers // (rip is already // already pushed) - // debug(char* msg, int64_t regs[]) + // debug(char* msg, int64_t pc, int64_t regs[]) // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and // pushed all the registers, so now the stack looks like: // [tos + 0] 16 saved registers // [tos + 16] return address - // [tos + 17] error message (char*) - - __ movq(c_rarg0, Address(rsp, 17 * wordSize)); // pass address of error message - __ movq(c_rarg1, rsp); // pass address of regs on stack - __ movq(r12, rsp); // remember rsp - __ subq(rsp, frame::arg_reg_save_area_bytes);// windows - __ andq(rsp, -16); // align stack as required by ABI + // * [tos + 17] error message (char*) + // * [tos + 18] object to verify (oop) + // * [tos + 19] saved rax - saved by caller and bashed + // * = popped on exit + + __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message + __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address + __ movq(c_rarg2, rsp); // pass address of regs on stack + __ mov(r12, rsp); // remember rsp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI BLOCK_COMMENT("call MacroAssembler::debug"); - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug))); - __ movq(rsp, r12); // restore rsp - __ popaq(); // pop registers - __ ret(3 * wordSize); // pop caller saved stuff + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); + __ mov(rsp, r12); // restore rsp + __ popa(); // pop registers (includes r12) + __ ret(3 * wordSize); // pop caller saved stuff return start; } @@ -1038,7 +1060,7 @@ assert_different_registers(Rtmp, Rint); __ movslq(Rtmp, Rint); __ cmpq(Rtmp, Rint); - __ jccb(Assembler::equal, L); + __ jcc(Assembler::equal, L); __ stop("high 32-bits of int value are not 0"); __ bind(L); #endif @@ -1067,16 +1089,16 @@ const Register count = c_rarg2; const Register end_from = rax; - __ cmpq(to, from); - __ leaq(end_from, Address(from, count, sf, 0)); + __ cmpptr(to, from); + __ lea(end_from, Address(from, count, sf, 0)); if (NOLp == NULL) { ExternalAddress no_overlap(no_overlap_target); __ jump_cc(Assembler::belowEqual, no_overlap); - __ cmpq(to, end_from); + __ cmpptr(to, end_from); __ jump_cc(Assembler::aboveEqual, no_overlap); } else { __ jcc(Assembler::belowEqual, (*NOLp)); - __ cmpq(to, end_from); + __ cmpptr(to, end_from); __ jcc(Assembler::aboveEqual, (*NOLp)); } } @@ -1098,19 +1120,19 @@ assert(nargs == 3 || nargs == 4, "else fix"); #ifdef _WIN64 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, - "unexpected argument registers"); + "unexpected argument registers"); if (nargs >= 4) - __ movq(rax, r9); // r9 is also saved_rdi - __ movq(saved_rdi, rdi); - __ movq(saved_rsi, rsi); - __ movq(rdi, rcx); // c_rarg0 - __ movq(rsi, rdx); // c_rarg1 - __ movq(rdx, r8); // c_rarg2 + __ mov(rax, r9); // r9 is also saved_rdi + __ movptr(saved_rdi, rdi); + __ movptr(saved_rsi, rsi); + __ mov(rdi, rcx); // c_rarg0 + __ mov(rsi, rdx); // c_rarg1 + __ mov(rdx, r8); // c_rarg2 if (nargs >= 4) - __ movq(rcx, rax); // c_rarg3 (via rax) + __ mov(rcx, rax); // c_rarg3 (via rax) #else assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, - "unexpected argument registers"); + "unexpected argument registers"); #endif } @@ -1118,8 +1140,8 @@ const Register saved_rdi = r9; const Register saved_rsi = r10; #ifdef _WIN64 - __ movq(rdi, saved_rdi); - __ movq(rsi, saved_rsi); + __ movptr(rdi, saved_rdi); + __ movptr(rsi, saved_rsi); #endif } @@ -1131,30 +1153,37 @@ // Destroy no registers! // void gen_write_ref_array_pre_barrier(Register addr, Register count) { -#if 0 // G1 - only - assert_different_registers(addr, c_rarg1); - assert_different_registers(count, c_rarg0); BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { - __ pushaq(); // push registers - __ movq(c_rarg0, addr); - __ movq(c_rarg1, count); - __ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre)); - __ popaq(); + __ pusha(); // push registers + if (count == c_rarg0) { + if (addr == c_rarg1) { + // exactly backwards!! + __ xchgptr(c_rarg1, c_rarg0); + } else { + __ movptr(c_rarg1, count); + __ movptr(c_rarg0, addr); + } + + } else { + __ movptr(c_rarg0, addr); + __ movptr(c_rarg1, count); + } + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre))); + __ popa(); } break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: - case BarrierSet::ModRef: + case BarrierSet::ModRef: break; - default : + default: ShouldNotReachHere(); - + } -#endif // 0 G1 - only } // @@ -1171,24 +1200,22 @@ assert_different_registers(start, end, scratch); BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { -#if 0 // G1 - only case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { - __ pushaq(); // push registers (overkill) + __ pusha(); // push registers (overkill) // must compute element count unless barrier set interface is changed (other platforms supply count) assert_different_registers(start, end, scratch); - __ leaq(scratch, Address(end, wordSize)); - __ subq(scratch, start); - __ shrq(scratch, LogBytesPerWord); - __ movq(c_rarg0, start); - __ movq(c_rarg1, scratch); - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); - __ popaq(); + __ lea(scratch, Address(end, wordSize)); + __ subptr(scratch, start); + __ shrptr(scratch, LogBytesPerWord); + __ mov(c_rarg0, start); + __ mov(c_rarg1, scratch); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post))); + __ popa(); } break; -#endif // 0 G1 - only case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: { @@ -1197,20 +1224,33 @@ Label L_loop; - __ shrq(start, CardTableModRefBS::card_shift); - __ shrq(end, CardTableModRefBS::card_shift); - __ subq(end, start); // number of bytes to copy - - const Register count = end; // 'end' register contains bytes count now - __ lea(scratch, ExternalAddress((address)ct->byte_map_base)); - __ addq(start, scratch); + __ shrptr(start, CardTableModRefBS::card_shift); + __ shrptr(end, CardTableModRefBS::card_shift); + __ subptr(end, start); // number of bytes to copy + + intptr_t disp = (intptr_t) ct->byte_map_base; + if (__ is_simm32(disp)) { + Address cardtable(noreg, noreg, Address::no_scale, disp); + __ lea(scratch, cardtable); + } else { + ExternalAddress cardtable((address)disp); + __ lea(scratch, cardtable); + } + + const Register count = end; // 'end' register contains bytes count now + __ addptr(start, scratch); __ BIND(L_loop); __ movb(Address(start, count, Address::times_1), 0); - __ decrementq(count); + __ decrement(count); __ jcc(Assembler::greaterEqual, L_loop); } - } - } + break; + default: + ShouldNotReachHere(); + + } + } + // Copy big chunks forward // @@ -1222,25 +1262,33 @@ // L_copy_32_bytes - entry label // L_copy_8_bytes - exit label // - void copy_32_bytes_forward(Register end_from, Register end_to, - Register qword_count, Register to, + void copy_32_bytes_forward(Register end_from, Register end_to, + Register qword_count, Register to, Label& L_copy_32_bytes, Label& L_copy_8_bytes) { DEBUG_ONLY(__ stop("enter at entry label, not here")); Label L_loop; __ align(16); __ BIND(L_loop); - __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); - __ movq(Address(end_to, qword_count, Address::times_8, -24), to); - __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); - __ movq(Address(end_to, qword_count, Address::times_8, -16), to); - __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); - __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); - __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); - __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); + if(UseUnalignedLoadStores) { + __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); + __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); + __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); + __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); + + } else { + __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); + __ movq(Address(end_to, qword_count, Address::times_8, -24), to); + __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); + __ movq(Address(end_to, qword_count, Address::times_8, -16), to); + __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); + __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); + __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); + __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); + } __ BIND(L_copy_32_bytes); - __ addq(qword_count, 4); + __ addptr(qword_count, 4); __ jcc(Assembler::lessEqual, L_loop); - __ subq(qword_count, 4); + __ subptr(qword_count, 4); __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords } @@ -1255,25 +1303,33 @@ // L_copy_32_bytes - entry label // L_copy_8_bytes - exit label // - void copy_32_bytes_backward(Register from, Register dest, - Register qword_count, Register to, + void copy_32_bytes_backward(Register from, Register dest, + Register qword_count, Register to, Label& L_copy_32_bytes, Label& L_copy_8_bytes) { DEBUG_ONLY(__ stop("enter at entry label, not here")); Label L_loop; __ align(16); __ BIND(L_loop); - __ movq(to, Address(from, qword_count, Address::times_8, 24)); - __ movq(Address(dest, qword_count, Address::times_8, 24), to); - __ movq(to, Address(from, qword_count, Address::times_8, 16)); - __ movq(Address(dest, qword_count, Address::times_8, 16), to); - __ movq(to, Address(from, qword_count, Address::times_8, 8)); - __ movq(Address(dest, qword_count, Address::times_8, 8), to); - __ movq(to, Address(from, qword_count, Address::times_8, 0)); - __ movq(Address(dest, qword_count, Address::times_8, 0), to); + if(UseUnalignedLoadStores) { + __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); + __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); + __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); + __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); + + } else { + __ movq(to, Address(from, qword_count, Address::times_8, 24)); + __ movq(Address(dest, qword_count, Address::times_8, 24), to); + __ movq(to, Address(from, qword_count, Address::times_8, 16)); + __ movq(Address(dest, qword_count, Address::times_8, 16), to); + __ movq(to, Address(from, qword_count, Address::times_8, 8)); + __ movq(Address(dest, qword_count, Address::times_8, 8), to); + __ movq(to, Address(from, qword_count, Address::times_8, 0)); + __ movq(Address(dest, qword_count, Address::times_8, 0), to); + } __ BIND(L_copy_32_bytes); - __ subq(qword_count, 4); + __ subptr(qword_count, 4); __ jcc(Assembler::greaterEqual, L_loop); - __ addq(qword_count, 4); + __ addptr(qword_count, 4); __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords } @@ -1325,45 +1381,45 @@ // r9 and r10 may be used to save non-volatile registers // 'from', 'to' and 'count' are now valid - __ movq(byte_count, count); - __ shrq(count, 3); // count => qword_count + __ movptr(byte_count, count); + __ shrptr(count, 3); // count => qword_count // Copy from low to high addresses. Use 'to' as scratch. - __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); - __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); - __ negq(qword_count); // make the count negative + __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); + __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); + __ negptr(qword_count); // make the count negative __ jmp(L_copy_32_bytes); // Copy trailing qwords __ BIND(L_copy_8_bytes); __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); - __ incrementq(qword_count); + __ increment(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); // Check for and copy trailing dword __ BIND(L_copy_4_bytes); - __ testq(byte_count, 4); + __ testl(byte_count, 4); __ jccb(Assembler::zero, L_copy_2_bytes); __ movl(rax, Address(end_from, 8)); __ movl(Address(end_to, 8), rax); - __ addq(end_from, 4); - __ addq(end_to, 4); + __ addptr(end_from, 4); + __ addptr(end_to, 4); // Check for and copy trailing word __ BIND(L_copy_2_bytes); - __ testq(byte_count, 2); + __ testl(byte_count, 2); __ jccb(Assembler::zero, L_copy_byte); __ movw(rax, Address(end_from, 8)); __ movw(Address(end_to, 8), rax); - __ addq(end_from, 2); - __ addq(end_to, 2); + __ addptr(end_from, 2); + __ addptr(end_to, 2); // Check for and copy trailing byte __ BIND(L_copy_byte); - __ testq(byte_count, 1); + __ testl(byte_count, 1); __ jccb(Assembler::zero, L_exit); __ movb(rax, Address(end_from, 8)); __ movb(Address(end_to, 8), rax); @@ -1371,7 +1427,7 @@ __ BIND(L_exit); inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1421,28 +1477,28 @@ // r9 and r10 may be used to save non-volatile registers // 'from', 'to' and 'count' are now valid - __ movq(byte_count, count); - __ shrq(count, 3); // count => qword_count + __ movptr(byte_count, count); + __ shrptr(count, 3); // count => qword_count // Copy from high to low addresses. // Check for and copy trailing byte - __ testq(byte_count, 1); + __ testl(byte_count, 1); __ jcc(Assembler::zero, L_copy_2_bytes); __ movb(rax, Address(from, byte_count, Address::times_1, -1)); __ movb(Address(to, byte_count, Address::times_1, -1), rax); - __ decrementq(byte_count); // Adjust for possible trailing word + __ decrement(byte_count); // Adjust for possible trailing word // Check for and copy trailing word __ BIND(L_copy_2_bytes); - __ testq(byte_count, 2); + __ testl(byte_count, 2); __ jcc(Assembler::zero, L_copy_4_bytes); __ movw(rax, Address(from, byte_count, Address::times_1, -2)); __ movw(Address(to, byte_count, Address::times_1, -2), rax); // Check for and copy trailing dword __ BIND(L_copy_4_bytes); - __ testq(byte_count, 4); + __ testl(byte_count, 4); __ jcc(Assembler::zero, L_copy_32_bytes); __ movl(rax, Address(from, qword_count, Address::times_8)); __ movl(Address(to, qword_count, Address::times_8), rax); @@ -1452,12 +1508,12 @@ __ BIND(L_copy_8_bytes); __ movq(rax, Address(from, qword_count, Address::times_8, -8)); __ movq(Address(to, qword_count, Address::times_8, -8), rax); - __ decrementq(qword_count); + __ decrement(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1466,7 +1522,7 @@ inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1519,20 +1575,20 @@ // r9 and r10 may be used to save non-volatile registers // 'from', 'to' and 'count' are now valid - __ movq(word_count, count); - __ shrq(count, 2); // count => qword_count + __ movptr(word_count, count); + __ shrptr(count, 2); // count => qword_count // Copy from low to high addresses. Use 'to' as scratch. - __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); - __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); - __ negq(qword_count); + __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); + __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); + __ negptr(qword_count); __ jmp(L_copy_32_bytes); // Copy trailing qwords __ BIND(L_copy_8_bytes); __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); - __ incrementq(qword_count); + __ increment(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); // Original 'dest' is trashed, so we can't use it as a @@ -1540,17 +1596,17 @@ // Check for and copy trailing dword __ BIND(L_copy_4_bytes); - __ testq(word_count, 2); + __ testl(word_count, 2); __ jccb(Assembler::zero, L_copy_2_bytes); __ movl(rax, Address(end_from, 8)); __ movl(Address(end_to, 8), rax); - __ addq(end_from, 4); - __ addq(end_to, 4); + __ addptr(end_from, 4); + __ addptr(end_to, 4); // Check for and copy trailing word __ BIND(L_copy_2_bytes); - __ testq(word_count, 1); + __ testl(word_count, 1); __ jccb(Assembler::zero, L_exit); __ movw(rax, Address(end_from, 8)); __ movw(Address(end_to, 8), rax); @@ -1558,7 +1614,7 @@ __ BIND(L_exit); inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1608,20 +1664,20 @@ // r9 and r10 may be used to save non-volatile registers // 'from', 'to' and 'count' are now valid - __ movq(word_count, count); - __ shrq(count, 2); // count => qword_count + __ movptr(word_count, count); + __ shrptr(count, 2); // count => qword_count // Copy from high to low addresses. Use 'to' as scratch. // Check for and copy trailing word - __ testq(word_count, 1); + __ testl(word_count, 1); __ jccb(Assembler::zero, L_copy_4_bytes); __ movw(rax, Address(from, word_count, Address::times_2, -2)); __ movw(Address(to, word_count, Address::times_2, -2), rax); // Check for and copy trailing dword __ BIND(L_copy_4_bytes); - __ testq(word_count, 2); + __ testl(word_count, 2); __ jcc(Assembler::zero, L_copy_32_bytes); __ movl(rax, Address(from, qword_count, Address::times_8)); __ movl(Address(to, qword_count, Address::times_8), rax); @@ -1631,12 +1687,12 @@ __ BIND(L_copy_8_bytes); __ movq(rax, Address(from, qword_count, Address::times_8, -8)); __ movq(Address(to, qword_count, Address::times_8, -8), rax); - __ decrementq(qword_count); + __ decrement(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1645,7 +1701,7 @@ inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1655,6 +1711,7 @@ // Arguments: // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary // ignored + // is_oop - true => oop array, so generate store check code // name - stub name string // // Inputs: @@ -1668,9 +1725,9 @@ // // Side Effects: // disjoint_int_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_int_copy(). + // used by generate_conjoint_int_oop_copy(). // - address generate_disjoint_int_copy(bool aligned, const char *name) { + address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1683,47 +1740,62 @@ const Register qword_count = count; const Register end_from = from; // source array end address const Register end_to = to; // destination array end address + const Register saved_to = r11; // saved destination array address // End pointers are inclusive, and if count is not zero they point // to the last unit copied: end_to[0] := end_from[0] __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - disjoint_int_copy_entry = __ pc(); + (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc(); + + if (is_oop) { + // no registers are destroyed by this call + gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); + } + BLOCK_COMMENT("Entry:"); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers + if (is_oop) { + __ movq(saved_to, to); + } + // 'from', 'to' and 'count' are now valid - __ movq(dword_count, count); - __ shrq(count, 1); // count => qword_count + __ movptr(dword_count, count); + __ shrptr(count, 1); // count => qword_count // Copy from low to high addresses. Use 'to' as scratch. - __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); - __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); - __ negq(qword_count); + __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); + __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); + __ negptr(qword_count); __ jmp(L_copy_32_bytes); // Copy trailing qwords __ BIND(L_copy_8_bytes); __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); - __ incrementq(qword_count); + __ increment(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); // Check for and copy trailing dword __ BIND(L_copy_4_bytes); - __ testq(dword_count, 1); // Only byte test since the value is 0 or 1 + __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 __ jccb(Assembler::zero, L_exit); __ movl(rax, Address(end_from, 8)); __ movl(Address(end_to, 8), rax); __ BIND(L_exit); + if (is_oop) { + __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4)); + gen_write_ref_array_post_barrier(saved_to, end_to, rax); + } inc_counter_np(SharedRuntime::_jint_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1737,6 +1809,7 @@ // Arguments: // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary // ignored + // is_oop - true => oop array, so generate store check code // name - stub name string // // Inputs: @@ -1748,12 +1821,12 @@ // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomicly. // - address generate_conjoint_int_copy(bool aligned, const char *name) { + address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); - Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes; + Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; const Register from = rdi; // source array address const Register to = rsi; // destination array address const Register count = rdx; // elements count @@ -1763,22 +1836,29 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - int_copy_entry = __ pc(); + if (is_oop) { + // no registers are destroyed by this call + gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); + } + + (is_oop ? oop_copy_entry : int_copy_entry) = __ pc(); BLOCK_COMMENT("Entry:"); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - array_overlap_test(disjoint_int_copy_entry, Address::times_4); + array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry, + Address::times_4); setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers + assert_clean_int(count, rax); // Make sure 'count' is clean int. // 'from', 'to' and 'count' are now valid - __ movq(dword_count, count); - __ shrq(count, 1); // count => qword_count + __ movptr(dword_count, count); + __ shrptr(count, 1); // count => qword_count // Copy from high to low addresses. Use 'to' as scratch. // Check for and copy trailing dword - __ testq(dword_count, 1); + __ testl(dword_count, 1); __ jcc(Assembler::zero, L_copy_32_bytes); __ movl(rax, Address(from, dword_count, Address::times_4, -4)); __ movl(Address(to, dword_count, Address::times_4, -4), rax); @@ -1788,21 +1868,30 @@ __ BIND(L_copy_8_bytes); __ movq(rax, Address(from, qword_count, Address::times_8, -8)); __ movq(Address(to, qword_count, Address::times_8, -8), rax); - __ decrementq(qword_count); + __ decrement(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); inc_counter_np(SharedRuntime::_jint_array_copy_ctr); + if (is_oop) { + __ jmp(L_exit); + } restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); // Copy in 32-bytes chunks copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); - inc_counter_np(SharedRuntime::_jint_array_copy_ctr); + inc_counter_np(SharedRuntime::_jint_array_copy_ctr); + __ bind(L_exit); + if (is_oop) { + Register end_to = rdx; + __ leaq(end_to, Address(to, dword_count, Address::times_4, -4)); + gen_write_ref_array_post_barrier(to, end_to, rax); + } restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1820,7 +1909,7 @@ // c_rarg1 - destination array address // c_rarg2 - element count, treated as ssize_t, can be zero // - // Side Effects: + // Side Effects: // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the // no-overlap entry point used by generate_conjoint_long_oop_copy(). // @@ -1859,16 +1948,16 @@ // 'from', 'to' and 'qword_count' are now valid // Copy from low to high addresses. Use 'to' as scratch. - __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); - __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); - __ negq(qword_count); + __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); + __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); + __ negptr(qword_count); __ jmp(L_copy_32_bytes); // Copy trailing qwords __ BIND(L_copy_8_bytes); __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); - __ incrementq(qword_count); + __ increment(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); if (is_oop) { @@ -1876,7 +1965,7 @@ } else { inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); } @@ -1892,7 +1981,7 @@ inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); } restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -1926,11 +2015,14 @@ address disjoint_copy_entry = NULL; if (is_oop) { + assert(!UseCompressedOops, "shouldn't be called for compressed oops"); disjoint_copy_entry = disjoint_oop_copy_entry; oop_copy_entry = __ pc(); + array_overlap_test(disjoint_oop_copy_entry, Address::times_8); } else { disjoint_copy_entry = disjoint_long_copy_entry; long_copy_entry = __ pc(); + array_overlap_test(disjoint_long_copy_entry, Address::times_8); } BLOCK_COMMENT("Entry:"); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) @@ -1943,20 +2035,18 @@ if (is_oop) { // Save to and count for store barrier - __ movq(saved_count, qword_count); + __ movptr(saved_count, qword_count); // No registers are destroyed by this call gen_write_ref_array_pre_barrier(to, saved_count); } - // Copy from high to low addresses. Use rcx as scratch. - __ jmp(L_copy_32_bytes); // Copy trailing qwords __ BIND(L_copy_8_bytes); __ movq(rax, Address(from, qword_count, Address::times_8, -8)); __ movq(Address(to, qword_count, Address::times_8, -8), rax); - __ decrementq(qword_count); + __ decrement(qword_count); __ jcc(Assembler::notZero, L_copy_8_bytes); if (is_oop) { @@ -1964,7 +2054,7 @@ } else { inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); } @@ -1974,14 +2064,14 @@ if (is_oop) { __ BIND(L_exit); - __ leaq(rcx, Address(to, saved_count, Address::times_8, -8)); + __ lea(rcx, Address(to, saved_count, Address::times_8, -8)); gen_write_ref_array_post_barrier(to, rcx, rax); inc_counter_np(SharedRuntime::_oop_array_copy_ctr); } else { inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); } restore_arg_regs(); - __ xorq(rax, rax); // return 0 + __ xorptr(rax, rax); // return 0 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -2010,12 +2100,12 @@ Address super_cache_addr( sub_klass, sc_offset); // if the pointers are equal, we are done (e.g., String[] elements) - __ cmpq(super_klass, sub_klass); + __ cmpptr(super_klass, sub_klass); __ jcc(Assembler::equal, L_success); // check the supertype display: Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); - __ cmpq(super_klass, super_check_addr); // test the super type + __ cmpptr(super_klass, super_check_addr); // test the super type __ jcc(Assembler::equal, L_success); // if it was a primary super, we can just fail immediately @@ -2028,31 +2118,38 @@ // This code is rarely used, so simplicity is a virtue here. inc_counter_np(SharedRuntime::_partial_subtype_ctr); { - __ pushq(rax); - __ pushq(rcx); - __ pushq(rdi); + __ push(rax); + __ push(rcx); + __ push(rdi); assert_different_registers(sub_klass, super_klass, rax, rcx, rdi); - __ movq(rdi, secondary_supers_addr); + __ movptr(rdi, secondary_supers_addr); // Load the array length. - __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); + __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); // Skip to start of data. - __ addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); // Scan rcx words at [rdi] for occurance of rax // Set NZ/Z based on last compare - __ movq(rax, super_klass); - __ repne_scan(); + __ movptr(rax, super_klass); + if (UseCompressedOops) { + // Compare against compressed form. Don't need to uncompress because + // looks like orig rax is restored in popq below. + __ encode_heap_oop(rax); + __ repne_scanl(); + } else { + __ repne_scan(); + } // Unspill the temp. registers: - __ popq(rdi); - __ popq(rcx); - __ popq(rax); + __ pop(rdi); + __ pop(rcx); + __ pop(rax); __ jcc(Assembler::notEqual, L_miss); } // Success. Cache the super we found and proceed in triumph. - __ movq(super_cache_addr, super_klass); // note: rax is dead + __ movptr(super_cache_addr, super_klass); // note: rax is dead __ jmp(L_success); // Fall through on failure! @@ -2099,7 +2196,7 @@ const Register r11_klass = r11; // oop._klass //--------------------------------------------------------------- - // Assembler stub will be used for this call to arraycopy + // Assembler stub will be used for this call to arraycopy // if the two arrays are subtypes of Object[] but the // destination array type is not equal to or a supertype // of the source type. Each element must be separately @@ -2118,7 +2215,7 @@ // caller guarantees that the arrays really are different // otherwise, we would have to make conjoint checks { Label L; - array_overlap_test(L, Address::times_8); + array_overlap_test(L, TIMES_OOP); __ stop("checkcast_copy within a single array"); __ bind(L); } @@ -2132,16 +2229,16 @@ saved_rip_offset, saved_rarg0_offset }; - __ subq(rsp, saved_rbp_offset * wordSize); - __ movq(Address(rsp, saved_r13_offset * wordSize), r13); - __ movq(Address(rsp, saved_r14_offset * wordSize), r14); + __ subptr(rsp, saved_rbp_offset * wordSize); + __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); + __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); setup_arg_regs(4); // from => rdi, to => rsi, length => rdx // ckoff => rcx, ckval => r8 // r9 and r10 may be used to save non-volatile registers #ifdef _WIN64 // last argument (#4) is on stack on Win64 const int ckval_offset = saved_rarg0_offset + 4; - __ movq(ckval, Address(rsp, ckval_offset * wordSize)); + __ movptr(ckval, Address(rsp, ckval_offset * wordSize)); #endif // check that int operands are properly extended to size_t @@ -2163,25 +2260,24 @@ #endif //ASSERT // Loop-invariant addresses. They are exclusive end pointers. - Address end_from_addr(from, length, Address::times_8, 0); - Address end_to_addr(to, length, Address::times_8, 0); + Address end_from_addr(from, length, TIMES_OOP, 0); + Address end_to_addr(to, length, TIMES_OOP, 0); // Loop-variant addresses. They assume post-incremented count < 0. - Address from_element_addr(end_from, count, Address::times_8, 0); - Address to_element_addr(end_to, count, Address::times_8, 0); - Address oop_klass_addr(rax_oop, oopDesc::klass_offset_in_bytes()); + Address from_element_addr(end_from, count, TIMES_OOP, 0); + Address to_element_addr(end_to, count, TIMES_OOP, 0); gen_write_ref_array_pre_barrier(to, count); // Copy from low to high addresses, indexed from the end of each array. - __ leaq(end_from, end_from_addr); - __ leaq(end_to, end_to_addr); - __ movq(r14_length, length); // save a copy of the length - assert(length == count, ""); // else fix next line: - __ negq(count); // negate and test the length + __ lea(end_from, end_from_addr); + __ lea(end_to, end_to_addr); + __ movptr(r14_length, length); // save a copy of the length + assert(length == count, ""); // else fix next line: + __ negptr(count); // negate and test the length __ jcc(Assembler::notZero, L_load_element); // Empty array: Nothing to do. - __ xorq(rax, rax); // return 0 on (trivial) success + __ xorptr(rax, rax); // return 0 on (trivial) success __ jmp(L_done); // ======== begin loop ======== @@ -2190,19 +2286,19 @@ // for (count = -count; count != 0; count++) // Base pointers src, dst are biased by 8*(count-1),to last element. __ align(16); - + __ BIND(L_store_element); - __ movq(to_element_addr, rax_oop); // store the oop - __ incrementq(count); // increment the count toward zero + __ store_heap_oop(to_element_addr, rax_oop); // store the oop + __ increment(count); // increment the count toward zero __ jcc(Assembler::zero, L_do_card_marks); // ======== loop entry is here ======== __ BIND(L_load_element); - __ movq(rax_oop, from_element_addr); // load the oop - __ testq(rax_oop, rax_oop); + __ load_heap_oop(rax_oop, from_element_addr); // load the oop + __ testptr(rax_oop, rax_oop); __ jcc(Assembler::zero, L_store_element); - __ movq(r11_klass, oop_klass_addr); // query the object klass + __ load_klass(r11_klass, rax_oop);// query the object klass generate_type_check(r11_klass, ckoff, ckval, L_store_element); // ======== end loop ======== @@ -2211,23 +2307,23 @@ // Emit GC store barriers for the oops we have copied (r14 + rdx), // and report their number to the caller. assert_different_registers(rax, r14_length, count, to, end_to, rcx); - __ leaq(end_to, to_element_addr); - gen_write_ref_array_post_barrier(to, end_to, rcx); - __ movq(rax, r14_length); // original oops - __ addq(rax, count); // K = (original - remaining) oops - __ notq(rax); // report (-1^K) to caller + __ lea(end_to, to_element_addr); + gen_write_ref_array_post_barrier(to, end_to, rscratch1); + __ movptr(rax, r14_length); // original oops + __ addptr(rax, count); // K = (original - remaining) oops + __ notptr(rax); // report (-1^K) to caller __ jmp(L_done); // Come here on success only. __ BIND(L_do_card_marks); - __ addq(end_to, -wordSize); // make an inclusive end pointer - gen_write_ref_array_post_barrier(to, end_to, rcx); - __ xorq(rax, rax); // return 0 on success + __ addptr(end_to, -wordSize); // make an inclusive end pointer + gen_write_ref_array_post_barrier(to, end_to, rscratch1); + __ xorptr(rax, rax); // return 0 on success // Common exit point (success or failure). __ BIND(L_done); - __ movq(r13, Address(rsp, saved_r13_offset * wordSize)); - __ movq(r14, Address(rsp, saved_r14_offset * wordSize)); + __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); + __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); restore_arg_regs(); __ leave(); // required for proper stackwalking of RuntimeStub frame @@ -2270,9 +2366,9 @@ // bump this on entry, not on exit: inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); - __ movq(bits, from); - __ orq(bits, to); - __ orq(bits, size); + __ mov(bits, from); + __ orptr(bits, to); + __ orptr(bits, size); __ testb(bits, BytesPerLong-1); __ jccb(Assembler::zero, L_long_aligned); @@ -2284,15 +2380,15 @@ __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); __ BIND(L_short_aligned); - __ shrq(size, LogBytesPerShort); // size => short_count + __ shrptr(size, LogBytesPerShort); // size => short_count __ jump(RuntimeAddress(short_copy_entry)); __ BIND(L_int_aligned); - __ shrq(size, LogBytesPerInt); // size => int_count + __ shrptr(size, LogBytesPerInt); // size => int_count __ jump(RuntimeAddress(int_copy_entry)); __ BIND(L_long_aligned); - __ shrq(size, LogBytesPerLong); // size => qword_count + __ shrptr(size, LogBytesPerLong); // size => qword_count __ jump(RuntimeAddress(long_copy_entry)); return start; @@ -2386,9 +2482,9 @@ inc_counter_np(SharedRuntime::_generic_array_copy_ctr); //----------------------------------------------------------------------- - // Assembler stub will be used for this call to arraycopy + // Assembler stub will be used for this call to arraycopy // if the following conditions are met: - // + // // (1) src and dst must not be null. // (2) src_pos must not be negative. // (3) dst_pos must not be negative. @@ -2397,10 +2493,10 @@ // (6) src and dst should be arrays. // (7) src_pos + length must not exceed length of src. // (8) dst_pos + length must not exceed length of dst. - // + // // if (src == NULL) return -1; - __ testq(src, src); // src oop + __ testptr(src, src); // src oop size_t j1off = __ offset(); __ jccb(Assembler::zero, L_failed_0); @@ -2409,7 +2505,7 @@ __ jccb(Assembler::negative, L_failed_0); // if (dst == NULL) return -1; - __ testq(dst, dst); // dst oop + __ testptr(dst, dst); // dst oop __ jccb(Assembler::zero, L_failed_0); // if (dst_pos < 0) return -1; @@ -2428,25 +2524,25 @@ // registers used as temp const Register r11_length = r11; // elements count to copy const Register r10_src_klass = r10; // array klass + const Register r9_dst_klass = r9; // dest array klass // if (length < 0) return -1; __ movl(r11_length, C_RARG4); // length (elements count, 32-bits value) __ testl(r11_length, r11_length); __ jccb(Assembler::negative, L_failed_0); - Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); - Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); - __ movq(r10_src_klass, src_klass_addr); + __ load_klass(r10_src_klass, src); #ifdef ASSERT // assert(src->klass() != NULL); BLOCK_COMMENT("assert klasses not null"); { Label L1, L2; - __ testq(r10_src_klass, r10_src_klass); + __ testptr(r10_src_klass, r10_src_klass); __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL __ bind(L1); __ stop("broken null klass"); __ bind(L2); - __ cmpq(dst_klass_addr, 0); + __ load_klass(r9_dst_klass, dst); + __ cmpq(r9_dst_klass, 0); __ jcc(Assembler::equal, L1); // this would be broken also BLOCK_COMMENT("assert done"); } @@ -2473,7 +2569,8 @@ __ jcc(Assembler::equal, L_objArray); // if (src->klass() != dst->klass()) return -1; - __ cmpq(r10_src_klass, dst_klass_addr); + __ load_klass(r9_dst_klass, dst); + __ cmpq(r10_src_klass, r9_dst_klass); __ jcc(Assembler::notEqual, L_failed); // if (!src->is_Array()) return -1; @@ -2504,9 +2601,9 @@ __ movl(r10_offset, rax_lh); __ shrl(r10_offset, Klass::_lh_header_size_shift); - __ andq(r10_offset, Klass::_lh_header_size_mask); // array_offset - __ addq(src, r10_offset); // src array offset - __ addq(dst, r10_offset); // dst array offset + __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset + __ addptr(src, r10_offset); // src array offset + __ addptr(dst, r10_offset); // dst array offset BLOCK_COMMENT("choose copy loop based on element size"); __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize @@ -2521,25 +2618,25 @@ __ BIND(L_copy_bytes); __ cmpl(rax_elsize, 0); __ jccb(Assembler::notEqual, L_copy_shorts); - __ leaq(from, Address(src, src_pos, Address::times_1, 0));// src_addr - __ leaq(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr - __ movslq(count, r11_length); // length + __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr + __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr + __ movl2ptr(count, r11_length); // length __ jump(RuntimeAddress(byte_copy_entry)); __ BIND(L_copy_shorts); __ cmpl(rax_elsize, LogBytesPerShort); __ jccb(Assembler::notEqual, L_copy_ints); - __ leaq(from, Address(src, src_pos, Address::times_2, 0));// src_addr - __ leaq(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr - __ movslq(count, r11_length); // length + __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr + __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr + __ movl2ptr(count, r11_length); // length __ jump(RuntimeAddress(short_copy_entry)); __ BIND(L_copy_ints); __ cmpl(rax_elsize, LogBytesPerInt); __ jccb(Assembler::notEqual, L_copy_longs); - __ leaq(from, Address(src, src_pos, Address::times_4, 0));// src_addr - __ leaq(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr - __ movslq(count, r11_length); // length + __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr + __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr + __ movl2ptr(count, r11_length); // length __ jump(RuntimeAddress(int_copy_entry)); __ BIND(L_copy_longs); @@ -2551,9 +2648,9 @@ __ bind(L); } #endif - __ leaq(from, Address(src, src_pos, Address::times_8, 0));// src_addr - __ leaq(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr - __ movslq(count, r11_length); // length + __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr + __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr + __ movl2ptr(count, r11_length); // length __ jump(RuntimeAddress(long_copy_entry)); // objArrayKlass @@ -2562,18 +2659,19 @@ Label L_plain_copy, L_checkcast_copy; // test array classes for subtyping - __ cmpq(r10_src_klass, dst_klass_addr); // usual case is exact equality + __ load_klass(r9_dst_klass, dst); + __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality __ jcc(Assembler::notEqual, L_checkcast_copy); // Identically typed arrays can be copied without element-wise checks. arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, r10, L_failed); - __ leaq(from, Address(src, src_pos, Address::times_8, + __ lea(from, Address(src, src_pos, TIMES_OOP, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr - __ leaq(to, Address(dst, dst_pos, Address::times_8, + __ lea(to, Address(dst, dst_pos, TIMES_OOP, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr - __ movslq(count, r11_length); // length + __ movl2ptr(count, r11_length); // length __ BIND(L_plain_copy); __ jump(RuntimeAddress(oop_copy_entry)); @@ -2582,7 +2680,7 @@ { // assert(r11_length == C_RARG4); // will reload from here Register r11_dst_klass = r11; - __ movq(r11_dst_klass, dst_klass_addr); + __ load_klass(r11_dst_klass, dst); // Before looking at dst.length, make sure dst is also an objArray. __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh); @@ -2596,13 +2694,13 @@ __ movl(r11_length, C_RARG4); // reload arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, rax, L_failed); - __ movl(r11_dst_klass, dst_klass_addr); // reload + __ load_klass(r11_dst_klass, dst); // reload #endif // Marshal the base address arguments now, freeing registers. - __ leaq(from, Address(src, src_pos, Address::times_8, + __ lea(from, Address(src, src_pos, TIMES_OOP, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); - __ leaq(to, Address(dst, dst_pos, Address::times_8, + __ lea(to, Address(dst, dst_pos, TIMES_OOP, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); __ movl(count, C_RARG4); // length (reloaded) Register sco_temp = c_rarg3; // this register is free now @@ -2620,19 +2718,19 @@ // Fetch destination element klass from the objArrayKlass header. int ek_offset = (klassOopDesc::header_size() * HeapWordSize + objArrayKlass::element_klass_offset_in_bytes()); - __ movq(r11_dst_klass, Address(r11_dst_klass, ek_offset)); + __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); assert_clean_int(sco_temp, rax); // the checkcast_copy loop needs two extra arguments: assert(c_rarg3 == sco_temp, "#3 already in place"); - __ movq(C_RARG4, r11_dst_klass); // dst.klass.element_klass + __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass __ jump(RuntimeAddress(checkcast_copy_entry)); } __ BIND(L_failed); - __ xorq(rax, rax); - __ notq(rax); // return -1 + __ xorptr(rax, rax); + __ notptr(rax); // return -1 __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); @@ -2651,14 +2749,20 @@ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy"); + StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy"); StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy"); StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy"); + + if (UseCompressedOops) { + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy"); + StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy"); + } else { + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy"); + StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy"); + } StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); @@ -2729,18 +2833,18 @@ // thread-local storage and also sets up last_Java_sp slightly // differently than the real call_VM if (restore_saved_exception_pc) { - __ movq(rax, - Address(r15_thread, - in_bytes(JavaThread::saved_exception_pc_offset()))); - __ pushq(rax); + __ movptr(rax, + Address(r15_thread, + in_bytes(JavaThread::saved_exception_pc_offset()))); + __ push(rax); } - + __ enter(); // required for proper stackwalking of RuntimeStub frame assert(is_even(framesize/2), "sp not 16-byte aligned"); // return address and rbp are already in place - __ subq(rsp, (framesize-4) << LogBytesPerInt); // prolog + __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog int frame_complete = __ pc() - start; @@ -2748,7 +2852,7 @@ __ set_last_Java_frame(rsp, rbp, NULL); // Call runtime - __ movq(c_rarg0, r15_thread); + __ movptr(c_rarg0, r15_thread); BLOCK_COMMENT("call runtime_entry"); __ call(RuntimeAddress(runtime_entry)); @@ -2764,8 +2868,8 @@ // check for pending exceptions #ifdef ASSERT Label L; - __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), - (int) NULL); + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), + (int32_t) NULL_WORD); __ jcc(Assembler::notEqual, L); __ should_not_reach_here(); __ bind(L); @@ -2775,9 +2879,9 @@ // codeBlob framesize is in words (not VMRegImpl::slot_size) RuntimeStub* stub = - RuntimeStub::new_runtime_stub(name, + RuntimeStub::new_runtime_stub(name, &code, - frame_complete, + frame_complete, (framesize >> (LogBytesPerWord - LogBytesPerInt)), oop_maps, false); return stub->entry_point(); @@ -2788,7 +2892,7 @@ // Generates all stubs and initializes the entry points // This platform-specific stub is needed by generate_call_stub() - StubRoutines::amd64::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80); + StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80); // entry points that exist in all platforms Note: This is code // that could be shared among different platforms - however the @@ -2798,11 +2902,11 @@ StubRoutines::_forward_exception_entry = generate_forward_exception(); - StubRoutines::_call_stub_entry = + StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); - // is referenced by megamorphic call - StubRoutines::_catch_exception_entry = generate_catch_exception(); + // is referenced by megamorphic call + StubRoutines::_catch_exception_entry = generate_catch_exception(); // atomic calls StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); @@ -2817,27 +2921,27 @@ generate_handler_for_unsafe_access(); // platform dependent - StubRoutines::amd64::_get_previous_fp_entry = generate_get_previous_fp(); + StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); - StubRoutines::amd64::_verify_mxcsr_entry = generate_verify_mxcsr(); + StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); } void generate_all() { // Generates all stubs and initializes the entry points - + // These entry points require SharedInfo::stack0 to be set up in // non-core builds and need to be relocatable, so they each // fabricate a RuntimeStub internally. StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", - CAST_FROM_FN_PTR(address, + CAST_FROM_FN_PTR(address, SharedRuntime:: throw_AbstractMethodError), false); StubRoutines::_throw_IncompatibleClassChangeError_entry = generate_throw_exception("IncompatibleClassChangeError throw_exception", - CAST_FROM_FN_PTR(address, + CAST_FROM_FN_PTR(address, SharedRuntime:: throw_IncompatibleClassChangeError), false); @@ -2850,8 +2954,8 @@ true); StubRoutines::_throw_NullPointerException_entry = - generate_throw_exception("NullPointerException throw_exception", - CAST_FROM_FN_PTR(address, + generate_throw_exception("NullPointerException throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime:: throw_NullPointerException), true); @@ -2865,21 +2969,21 @@ StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, + CAST_FROM_FN_PTR(address, SharedRuntime:: throw_StackOverflowError), false); - // entry points that are platform specific - StubRoutines::amd64::_f2i_fixup = generate_f2i_fixup(); - StubRoutines::amd64::_f2l_fixup = generate_f2l_fixup(); - StubRoutines::amd64::_d2i_fixup = generate_d2i_fixup(); - StubRoutines::amd64::_d2l_fixup = generate_d2l_fixup(); - - StubRoutines::amd64::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); - StubRoutines::amd64::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); - StubRoutines::amd64::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); - StubRoutines::amd64::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); + // entry points that are platform specific + StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); + StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); + StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); + StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); + + StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); + StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); + StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); + StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); // support for verify_oop (must happen after universe_init) StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); @@ -2889,7 +2993,7 @@ } public: - StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { + StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { if (all) { generate_all(); } else { --- old/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp 2009-08-01 04:08:16.258568759 +0100 +++ new/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp 2009-08-01 04:08:16.182951786 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)stubRoutines_x86_32.cpp 1.72 07/09/17 09:26:00 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -31,6 +28,6 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::i486::_verify_mxcsr_entry = NULL; -address StubRoutines::i486::_verify_fpu_cntrl_wrd_entry= NULL; -address StubRoutines::i486::_call_stub_compiled_return = NULL; +address StubRoutines::x86::_verify_mxcsr_entry = NULL; +address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry= NULL; +address StubRoutines::x86::_call_stub_compiled_return = NULL; --- old/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp 2009-08-01 04:08:17.089540366 +0100 +++ new/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp 2009-08-01 04:08:17.011908069 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)stubRoutines_x86_32.hpp 1.71 07/05/05 17:04:20 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // This file holds the platform specific parts of the StubRoutines @@ -30,11 +27,11 @@ // extend it. enum platform_dependent_constants { - code_size1 = 9000, // simply increase if too small (assembler will crash if too small) - code_size2 = 22000 // simply increase if too small (assembler will crash if too small) + code_size1 = 9000, // simply increase if too small (assembler will crash if too small) + code_size2 = 22000 // simply increase if too small (assembler will crash if too small) }; -class i486 { +class x86 { friend class StubGenerator; friend class VMStructs; @@ -43,7 +40,7 @@ // need to adjust the return back to the call stub to a specialized // piece of code that can handle compiled results and cleaning the fpu // stack. The variable holds that location. - static address _call_stub_compiled_return; + static address _call_stub_compiled_return; static address _verify_mxcsr_entry; static address _verify_fpu_cntrl_wrd_entry; static jint _mxcsr_std; @@ -56,5 +53,5 @@ static void set_call_stub_compiled_return(address ret) { _call_stub_compiled_return = ret; } }; - static bool returns_to_call_stub(address return_pc) { return (return_pc == _call_stub_return_address) || - return_pc == i486::get_call_stub_compiled_return(); } + static bool returns_to_call_stub(address return_pc) { return (return_pc == _call_stub_return_address) || + return_pc == x86::get_call_stub_compiled_return(); } --- old/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.cpp 2009-08-01 04:08:17.936634161 +0100 +++ new/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.cpp 2009-08-01 04:08:17.864253457 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)stubRoutines_x86_64.cpp 1.13 07/09/17 09:26:00 JVM" -#endif /* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -31,17 +28,16 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::amd64::_get_previous_fp_entry = NULL; - -address StubRoutines::amd64::_verify_mxcsr_entry = NULL; +address StubRoutines::x86::_get_previous_fp_entry = NULL; -address StubRoutines::amd64::_f2i_fixup = NULL; -address StubRoutines::amd64::_f2l_fixup = NULL; -address StubRoutines::amd64::_d2i_fixup = NULL; -address StubRoutines::amd64::_d2l_fixup = NULL; -address StubRoutines::amd64::_float_sign_mask = NULL; -address StubRoutines::amd64::_float_sign_flip = NULL; -address StubRoutines::amd64::_double_sign_mask = NULL; -address StubRoutines::amd64::_double_sign_flip = NULL; -address StubRoutines::amd64::_mxcsr_std = NULL; +address StubRoutines::x86::_verify_mxcsr_entry = NULL; +address StubRoutines::x86::_f2i_fixup = NULL; +address StubRoutines::x86::_f2l_fixup = NULL; +address StubRoutines::x86::_d2i_fixup = NULL; +address StubRoutines::x86::_d2l_fixup = NULL; +address StubRoutines::x86::_float_sign_mask = NULL; +address StubRoutines::x86::_float_sign_flip = NULL; +address StubRoutines::x86::_double_sign_mask = NULL; +address StubRoutines::x86::_double_sign_flip = NULL; +address StubRoutines::x86::_mxcsr_std = NULL; --- old/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp 2009-08-01 04:08:18.902259943 +0100 +++ new/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp 2009-08-01 04:08:18.824969874 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)stubRoutines_x86_64.hpp 1.13 07/05/05 17:04:08 JVM" -#endif /* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // This file holds the platform specific parts of the StubRoutines @@ -31,19 +28,19 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } -enum platform_dependent_constants +enum platform_dependent_constants { - code_size1 = 9000, // simply increase if too small (assembler will + code_size1 = 19000, // simply increase if too small (assembler will // crash if too small) code_size2 = 22000 // simply increase if too small (assembler will // crash if too small) }; -class amd64 { +class x86 { friend class StubGenerator; private: - static address _get_previous_fp_entry; + static address _get_previous_fp_entry; static address _verify_mxcsr_entry; static address _f2i_fixup; @@ -65,15 +62,15 @@ } static address verify_mxcsr_entry() - { - return _verify_mxcsr_entry; + { + return _verify_mxcsr_entry; } static address f2i_fixup() { return _f2i_fixup; } - + static address f2l_fixup() { return _f2l_fixup; @@ -83,7 +80,7 @@ { return _d2i_fixup; } - + static address d2l_fixup() { return _d2l_fixup; @@ -100,18 +97,17 @@ } static address double_sign_mask() - { - return _double_sign_mask; + { + return _double_sign_mask; } - static address double_sign_flip() - { + static address double_sign_flip() + { return _double_sign_flip; } - static address mxcsr_std() - { + static address mxcsr_std() + { return _mxcsr_std; } }; - --- old/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp 2009-08-01 04:08:19.773544465 +0100 +++ new/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp 2009-08-01 04:08:19.678807421 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)templateInterpreter_x86_32.cpp 1.2 07/09/17 09:23:04 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -43,19 +40,19 @@ // Note: There should be a minimal interpreter frame set up when stack // overflow occurs since we check explicitly for it now. - // + // #ifdef ASSERT { Label L; - __ leal(rax, Address(rbp, + __ lea(rax, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); - __ cmpl(rax, rsp); // rax, = maximal rsp for current rbp, + __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp, // (stack grows negative) __ jcc(Assembler::aboveEqual, L); // check if frame is complete __ stop ("interpreter frame not set up"); __ bind(L); } #endif // ASSERT - // Restore bcp under the assumption that the current frame is still + // Restore bcp under the assumption that the current frame is still // interpreted __ restore_bcp(); @@ -83,14 +80,14 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() { address entry = __ pc(); // object is at TOS - __ popl(rax); + __ pop(rax); // expression stack must be empty before entering the VM if an exception // happened __ empty_expression_stack(); __ empty_FPU_stack(); - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_ClassCastException), + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_ClassCastException), rax); return entry; } @@ -100,7 +97,7 @@ address entry = __ pc(); if (pass_oop) { // object is at TOS - __ popl(rbx); + __ pop(rbx); } // expression stack must be empty before entering the VM if an exception happened __ empty_expression_stack(); @@ -113,7 +110,7 @@ if (message != NULL) { __ lea(rbx, ExternalAddress((address)message)); } else { - __ movl(rbx, NULL_WORD); + __ movptr(rbx, (int32_t)NULL_WORD); } __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx); } @@ -126,7 +123,7 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { address entry = __ pc(); // NULL last_sp until next java call - __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ dispatch_next(state); return entry; } @@ -163,32 +160,32 @@ // In SSE mode, interpreter returns FP results in xmm0 but they need // to end up back on the FPU so it can operate on them. if (state == ftos && UseSSE >= 1) { - __ subl(rsp, wordSize); + __ subptr(rsp, wordSize); __ movflt(Address(rsp, 0), xmm0); __ fld_s(Address(rsp, 0)); - __ addl(rsp, wordSize); + __ addptr(rsp, wordSize); } else if (state == dtos && UseSSE >= 2) { - __ subl(rsp, 2*wordSize); + __ subptr(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), xmm0); __ fld_d(Address(rsp, 0)); - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); } __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter"); // Restore stack bottom in case i2c adjusted stack - __ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); + __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); // and NULL it as marker that rsp is now tos until next java call - __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ restore_bcp(); __ restore_locals(); __ get_cache_and_index_at_bcp(rbx, rcx, 1); - __ movl(rbx, Address(rbx, rcx, - Address::times_4, constantPoolCacheOopDesc::base_offset() + + __ movl(rbx, Address(rbx, rcx, + Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); - __ andl(rbx, 0xFF); - __ leal(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); + __ andptr(rbx, 0xFF); + __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); __ dispatch_next(state, step); return entry; } @@ -199,30 +196,30 @@ // In SSE mode, FP results are in xmm0 if (state == ftos && UseSSE > 0) { - __ subl(rsp, wordSize); + __ subptr(rsp, wordSize); __ movflt(Address(rsp, 0), xmm0); __ fld_s(Address(rsp, 0)); - __ addl(rsp, wordSize); + __ addptr(rsp, wordSize); } else if (state == dtos && UseSSE >= 2) { - __ subl(rsp, 2*wordSize); + __ subptr(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), xmm0); __ fld_d(Address(rsp, 0)); - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); } __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter"); // The stack is not extended by deopt but we must NULL last_sp as this // entry is like a "return". - __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ restore_bcp(); __ restore_locals(); // handle exceptions { Label L; const Register thread = rcx; __ get_thread(thread); - __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); - __ jcc(Assembler::zero, L); + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); + __ jcc(Assembler::zero, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); __ should_not_reach_here(); __ bind(L); @@ -257,14 +254,14 @@ address entry = __ pc(); switch (type) { case T_BOOLEAN: __ c2bool(rax); break; - case T_CHAR : __ andl(rax, 0xFFFF); break; + case T_CHAR : __ andptr(rax, 0xFFFF); break; case T_BYTE : __ sign_extend_byte (rax); break; case T_SHORT : __ sign_extend_short(rax); break; case T_INT : /* nothing to do */ break; case T_DOUBLE : case T_FLOAT : { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); - __ popl(t); // remove return address first + __ pop(t); // remove return address first __ pop_dtos_to_rsp(); // Must return a result for interpreter or compiler. In SSE // mode, results are returned in xmm0 and the FPU stack must @@ -283,13 +280,13 @@ __ fld_d(Address(rsp, 0)); } // and pop the temp - __ addl(rsp, 2 * wordSize); - __ pushl(t); // restore return address + __ addptr(rsp, 2 * wordSize); + __ push(t); // restore return address } break; case T_OBJECT : // retrieve result from frame - __ movl(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); + __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); // and verify it __ verify_oop(rax); break; @@ -325,16 +322,16 @@ const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); if (ProfileInterpreter) { // %%% Merge this into methodDataOop - __ increment(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); + __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); } // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter + __ movl(rax, backedge_counter); // load backedge counter - __ increment(rcx, InvocationCounter::count_increment); + __ incrementl(rcx, InvocationCounter::count_increment); __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count - __ addl(rcx, rax); // add both counters + __ movl(invocation_counter, rcx); // save invocation count + __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so // profile_method != NULL == !native_call @@ -347,7 +344,7 @@ __ jcc(Assembler::less, *profile_method_continue); // if no method data exists, go to profile_method - __ test_method_data_pointer(rax, *profile_method); + __ test_method_data_pointer(rax, *profile_method); } __ cmp32(rcx, @@ -385,13 +382,13 @@ // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). // The call returns the address of the verified entry point for the method or NULL // if the compilation did not complete (either went background or bailed out). - __ movl(rax, (int)false); + __ movptr(rax, (int32_t)false); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); - __ movl(rbx, Address(rbp, method_offset)); // restore methodOop + __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop - // Preserve invariant that rsi/rdi contain bcp/locals of sender frame - // and jump to the interpreted entry. + // Preserve invariant that rsi/rdi contain bcp/locals of sender frame + // and jump to the interpreted entry. __ jmp(*do_continue, relocInfo::none); } @@ -411,7 +408,7 @@ // rax, // NOTE: since the additional locals are also always pushed (wasn't obvious in - // generate_method_entry) so the guard should work for them too. + // generate_method_entry) so the guard should work for them too. // // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp @@ -436,7 +433,7 @@ Label after_frame_check_pop; - __ pushl(rsi); + __ push(rsi); const Register thread = rsi; @@ -446,50 +443,50 @@ const Address stack_size(thread, Thread::stack_size_offset()); // locals + overhead, in bytes - __ leal(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size)); + __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size)); #ifdef ASSERT Label stack_base_okay, stack_size_okay; // verify that thread stack base is non-zero - __ cmpl(stack_base, 0); + __ cmpptr(stack_base, (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, stack_base_okay); __ stop("stack base is zero"); __ bind(stack_base_okay); // verify that thread stack size is non-zero - __ cmpl(stack_size, 0); + __ cmpptr(stack_size, 0); __ jcc(Assembler::notEqual, stack_size_okay); __ stop("stack size is zero"); __ bind(stack_size_okay); #endif // Add stack base to locals and subtract stack size - __ addl(rax, stack_base); - __ subl(rax, stack_size); + __ addptr(rax, stack_base); + __ subptr(rax, stack_size); // Use the maximum number of pages we might bang. const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : (StackRedPages+StackYellowPages); - __ addl(rax, max_pages * page_size); + __ addptr(rax, max_pages * page_size); // check against the current stack bottom - __ cmpl(rsp, rax); + __ cmpptr(rsp, rax); __ jcc(Assembler::above, after_frame_check_pop); - __ popl(rsi); // get saved bcp / (c++ prev state ). + __ pop(rsi); // get saved bcp / (c++ prev state ). - __ popl(rax); // get return address + __ pop(rax); // get return address __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry())); // all done with frame size check __ bind(after_frame_check_pop); - __ popl(rsi); + __ pop(rsi); __ bind(after_frame_check); } // Allocate monitor and lock method (asm interpreter) // rbx, - methodOop -// +// void InterpreterGenerator::lock_method(void) { // synchronize method const Address access_flags (rbx, methodOopDesc::access_flags_offset()); @@ -510,19 +507,19 @@ const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); __ movl(rax, access_flags); __ testl(rax, JVM_ACC_STATIC); - __ movl(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) + __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) __ jcc(Assembler::zero, done); - __ movl(rax, Address(rbx, methodOopDesc::constants_offset())); - __ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); - __ movl(rax, Address(rax, mirror_offset)); + __ movptr(rax, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); + __ movptr(rax, Address(rax, mirror_offset)); __ bind(done); } // add space for monitor & lock - __ subl(rsp, entry_size); // add space for a monitor entry - __ movl(monitor_block_top, rsp); // set new monitor block top - __ movl(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object - __ movl(rdx, rsp); // object address - __ lock_object(rdx); + __ subptr(rsp, entry_size); // add space for a monitor entry + __ movptr(monitor_block_top, rsp); // set new monitor block top + __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object + __ mov(rdx, rsp); // object address + __ lock_object(rdx); } // @@ -531,38 +528,38 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { // initialize fixed part of activation frame - __ pushl(rax); // save return address + __ push(rax); // save return address __ enter(); // save old & set new rbp, - __ pushl(rsi); // set sender sp - __ pushl(NULL_WORD); // leave last_sp as null - __ movl(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop - __ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase - __ pushl(rbx); // save methodOop + __ push(rsi); // set sender sp + __ push((int32_t)NULL_WORD); // leave last_sp as null + __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop + __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase + __ push(rbx); // save methodOop if (ProfileInterpreter) { Label method_data_continue; - __ movl(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); - __ testl(rdx, rdx); + __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); + __ testptr(rdx, rdx); __ jcc(Assembler::zero, method_data_continue); - __ addl(rdx, in_bytes(methodDataOopDesc::data_offset())); + __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset())); __ bind(method_data_continue); - __ pushl(rdx); // set the mdp (method data pointer) + __ push(rdx); // set the mdp (method data pointer) } else { - __ pushl(0); + __ push(0); } - __ movl(rdx, Address(rbx, methodOopDesc::constants_offset())); - __ movl(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); - __ pushl(rdx); // set constant pool cache - __ pushl(rdi); // set locals pointer + __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); + __ push(rdx); // set constant pool cache + __ push(rdi); // set locals pointer if (native_call) { - __ pushl(0); // no bcp + __ push(0); // no bcp } else { - __ pushl(rsi); // set bcp + __ push(rsi); // set bcp } - __ pushl(0); // reserve word for pointer to expression stack bottom - __ movl(Address(rsp, 0), rsp); // set expression stack bottom + __ push(0); // reserve word for pointer to expression stack bottom + __ movptr(Address(rsp, 0), rsp); // set expression stack bottom } // End of helpers @@ -601,21 +598,21 @@ // these conditions first and use slow path if necessary. // rbx,: method // rcx: receiver - __ movl(rax, Address(rsp, wordSize)); + __ movptr(rax, Address(rsp, wordSize)); // check if local 0 != NULL and read field - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, slow_path); - __ movl(rdi, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset())); // read first instruction word and extract bytecode @ 1 and index @ 2 - __ movl(rdx, Address(rbx, methodOopDesc::const_offset())); + __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // Shift codes right to get the index on the right. // The bytecode fetched looks like <0xb4><0x2a> __ shrl(rdx, 2*BitsPerByte); __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); - __ movl(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); + __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); // rax,: local 0 // rbx,: method @@ -629,24 +626,24 @@ // check the validity of the cache entry by testing whether _indices field // contains Bytecode::_getfield in b1 byte. assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); - __ movl(rcx, - Address(rdi, - rdx, - Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); + __ movl(rcx, + Address(rdi, + rdx, + Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ shrl(rcx, 2*BitsPerByte); __ andl(rcx, 0xFF); __ cmpl(rcx, Bytecodes::_getfield); __ jcc(Assembler::notEqual, slow_path); // Note: constant pool entry is not valid before bytecode is resolved - __ movl(rcx, - Address(rdi, - rdx, - Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); - __ movl(rdx, - Address(rdi, - rdx, - Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); + __ movptr(rcx, + Address(rdi, + rdx, + Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); + __ movl(rdx, + Address(rdi, + rdx, + Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); Label notByte, notShort, notChar; const Address field_address (rax, rcx, Address::times_1); @@ -685,13 +682,14 @@ __ bind(okay); #endif // ASSERT // All the rest are a 32 bit wordsize - __ movl(rax, field_address); + // This is ok for now. Since fast accessors should be going away + __ movptr(rax, field_address); __ bind(xreturn_path); // _ireturn/_areturn - __ popl(rdi); // get return address - __ movl(rsp, rsi); // set sp to sender sp + __ pop(rdi); // get return address + __ mov(rsp, rsi); // set sp to sender sp __ jmp(rdi); // generate a vanilla interpreter entry as the slow path @@ -729,24 +727,24 @@ // native calls don't need the stack size check since they have no expression stack // and the arguments are already on the stack and we only add a handful of words - // to the stack + // to the stack // rbx,: methodOop // rcx: size of parameters // rsi: sender sp - __ popl(rax); // get return address + __ pop(rax); // get return address // for natives the size of locals is zero // compute beginning of parameters (rdi) - __ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); + __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); // add 2 zero-initialized slots for native calls // NULL result handler - __ pushl(NULL_WORD); + __ push((int32_t)NULL_WORD); // NULL oop temp (mirror or jni oop result) - __ pushl(NULL_WORD); + __ push((int32_t)NULL_WORD); if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count // initialize fixed part of activation frame @@ -821,8 +819,8 @@ { Label L; const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - __ movl(rax, monitor_block_top); - __ cmpl(rax, rsp); + __ movptr(rax, monitor_block_top); + __ cmpptr(rax, rsp); __ jcc(Assembler::equal, L); __ stop("broken stack frame setup in interpreter"); __ bind(L); @@ -835,25 +833,25 @@ // work registers const Register method = rbx; const Register thread = rdi; - const Register t = rcx; + const Register t = rcx; // allocate space for parameters __ get_method(method); __ verify_oop(method); __ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset())); - __ shll(t, Interpreter::logStackElementSize()); - __ addl(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror - __ subl(rsp, t); - __ andl(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics + __ shlptr(t, Interpreter::logStackElementSize()); + __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror + __ subptr(rsp, t); + __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics // get signature handler { Label L; - __ movl(t, Address(method, methodOopDesc::signature_handler_offset())); - __ testl(t, t); + __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); + __ testptr(t, t); __ jcc(Assembler::notZero, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); __ get_method(method); - __ movl(t, Address(method, methodOopDesc::signature_handler_offset())); + __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); __ bind(L); } @@ -862,7 +860,7 @@ assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code"); assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code"); // The generated handlers do not touch RBX (the method oop). - // However, large signatures cannot be cached and are generated + // However, large signatures cannot be cached and are generated // each time here. The slow-path generator will blow RBX // sometime, so we must reload it after the call. __ call(t); @@ -870,7 +868,7 @@ // result handler is in rax, // set result handler - __ movl(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); + __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); // pass mirror handle if static call { Label L; @@ -879,34 +877,34 @@ __ testl(t, JVM_ACC_STATIC); __ jcc(Assembler::zero, L); // get mirror - __ movl(t, Address(method, methodOopDesc:: constants_offset())); - __ movl(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); - __ movl(t, Address(t, mirror_offset)); + __ movptr(t, Address(method, methodOopDesc:: constants_offset())); + __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); + __ movptr(t, Address(t, mirror_offset)); // copy mirror into activation frame - __ movl(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); + __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); // pass handle to mirror - __ leal(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); - __ movl(Address(rsp, wordSize), t); + __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); + __ movptr(Address(rsp, wordSize), t); __ bind(L); } // get native function entry point { Label L; - __ movl(rax, Address(method, methodOopDesc::native_function_offset())); + __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); - __ cmp32(rax, unsatisfied.addr()); + __ cmpptr(rax, unsatisfied.addr()); __ jcc(Assembler::notEqual, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); __ get_method(method); __ verify_oop(method); - __ movl(rax, Address(method, methodOopDesc::native_function_offset())); + __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); __ bind(L); } // pass JNIEnv __ get_thread(thread); - __ leal(t, Address(thread, JavaThread::jni_environment_offset())); - __ movl(Address(rsp, 0), t); + __ lea(t, Address(thread, JavaThread::jni_environment_offset())); + __ movptr(Address(rsp, 0), t); // set_last_Java_frame_before_call // It is enough that the pc() @@ -924,8 +922,8 @@ } #endif - // Change state to native - __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); + // Change state to native + __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); __ call(rax); // result potentially in rdx:rax or ST0 @@ -937,14 +935,14 @@ __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); } else if (CheckJNICalls ) { - __ call(RuntimeAddress(StubRoutines::i486::verify_mxcsr_entry())); + __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); } } // Either restore the x87 floating pointer control word after returning // from the JNI call or verify that it wasn't changed. if (CheckJNICalls) { - __ call(RuntimeAddress(StubRoutines::i486::verify_fpu_cntrl_wrd_entry())); + __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); } // save potential result in ST(0) & rdx:rax @@ -961,14 +959,14 @@ Label push_double; ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); - __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), + __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), float_handler.addr()); __ jcc(Assembler::equal, push_double); - __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), + __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), double_handler.addr()); __ jcc(Assembler::notEqual, L); __ bind(push_double); - __ push(dtos); + __ push(dtos); __ bind(L); } __ push(ltos); @@ -976,9 +974,12 @@ // change thread state __ get_thread(thread); __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - if(os::is_MP()) { + if(os::is_MP()) { if (UseMembar) { - __ membar(); // Force this write out before the read below + // Force this write out before the read below + __ membar(Assembler::Membar_mask_bits( + Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore)); } else { // Write serialization page so VM thread can do a pseudo remote membar. // We use the current thread pointer to calculate a thread specific @@ -989,7 +990,7 @@ } if (AlwaysRestoreFPU) { - // Make sure the control word is correct. + // Make sure the control word is correct. __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); } @@ -1011,7 +1012,7 @@ // preserved and correspond to the bcp/locals pointers. So we do a runtime call // by hand. // - __ pushl(thread); + __ push(thread); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); __ increment(rsp, wordSize); @@ -1026,24 +1027,24 @@ __ reset_last_Java_frame(thread, true, true); // reset handle block - __ movl(t, Address(thread, JavaThread::active_handles_offset())); - __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), 0); + __ movptr(t, Address(thread, JavaThread::active_handles_offset())); + __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); // If result was an oop then unbox and save it in the frame { Label L; Label no_oop, store_result; ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT)); - __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), + __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), handler.addr()); __ jcc(Assembler::notEqual, no_oop); - __ cmpl(Address(rsp, 0), NULL_WORD); + __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD); __ pop(ltos); - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, store_result); // unbox - __ movl(rax, Address(rax, 0)); + __ movptr(rax, Address(rax, 0)); __ bind(store_result); - __ movl(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax); + __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax); // keep stack depth as expected by pushing oop which will eventually be discarded __ push(ltos); __ bind(no_oop); @@ -1054,24 +1055,24 @@ __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); __ jcc(Assembler::notEqual, no_reguard); - __ pushad(); + __ pusha(); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); - __ popad(); + __ popa(); __ bind(no_reguard); } - // restore rsi to have legal interpreter frame, + // restore rsi to have legal interpreter frame, // i.e., bci == 0 <=> rsi == code_base() // Can't call_VM until bcp is within reasonable. __ get_method(method); // method is junk from thread_in_native to now. __ verify_oop(method); - __ movl(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop - __ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase + __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop + __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase // handle exceptions (exception handling will handle unlocking!) { Label L; - __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::zero, L); // Note: At some point we may want to unify this with the code used in call_VM_base(); // i.e., we should use the StubRoutines::forward_exception code. For now this @@ -1089,24 +1090,24 @@ // the code below should be shared with interpreter macro assembler implementation { Label unlock; // BasicObjectLock will be first in list, since this is a synchronized method. However, need - // to check that the object has not been unlocked by an explicit monitorexit bytecode. + // to check that the object has not been unlocked by an explicit monitorexit bytecode. const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); - __ leal(rdx, monitor); // address of first monitor + __ lea(rdx, monitor); // address of first monitor - __ movl(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); - __ testl(t, t); + __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); + __ testptr(t, t); __ jcc(Assembler::notZero, unlock); - + // Entry already unlocked, need to throw exception __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); __ should_not_reach_here(); - - __ bind(unlock); - __ unlock_object(rdx); + + __ bind(unlock); + __ unlock_object(rdx); } __ bind(L); - } + } // jvmti/dtrace support // Note: This must happen _after_ handling/throwing any exceptions since @@ -1117,14 +1118,14 @@ // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result __ pop(ltos); - __ movl(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); + __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); __ call(t); // remove activation - __ movl(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp + __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp __ leave(); // remove frame anchor - __ popl(rdi); // get return address - __ movl(rsp, t); // set sp to sender sp + __ pop(rdi); // get return address + __ mov(rsp, t); // set sp to sender sp __ jmp(rdi); if (inc_counter) { @@ -1168,10 +1169,10 @@ generate_stack_overflow_check(); // get return address - __ popl(rax); + __ pop(rax); // compute beginning of parameters (rdi) - __ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); + __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); // rdx - # of additional locals // allocate space for locals @@ -1181,8 +1182,10 @@ __ testl(rdx, rdx); __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 __ bind(loop); - if (TaggedStackInterpreter) __ pushl(NULL_WORD); // push tag - __ pushl(NULL_WORD); // initialize local variables + if (TaggedStackInterpreter) { + __ push((int32_t)NULL_WORD); // push tag + } + __ push((int32_t)NULL_WORD); // initialize local variables __ decrement(rdx); // until everything initialized __ jcc(Assembler::greater, loop); __ bind(exit); @@ -1265,8 +1268,8 @@ { Label L; const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - __ movl(rax, monitor_block_top); - __ cmpl(rax, rsp); + __ movptr(rax, monitor_block_top); + __ cmpptr(rax, rsp); __ jcc(Assembler::equal, L); __ stop("broken stack frame setup in interpreter"); __ bind(L); @@ -1275,7 +1278,7 @@ // jvmti support __ notify_method_entry(); - + __ dispatch_next(vtos); // invocation counter overflow @@ -1286,12 +1289,12 @@ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true); - __ movl(rbx, Address(rbp, method_offset)); // restore methodOop - __ movl(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); - __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); + __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop + __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); __ test_method_data_pointer(rax, profile_method_continue); - __ addl(rax, in_bytes(methodDataOopDesc::data_offset())); - __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); + __ addptr(rax, in_bytes(methodDataOopDesc::data_offset())); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); __ jmp(profile_method_continue); } // Handle overflow of counter and compile method @@ -1304,7 +1307,7 @@ //------------------------------------------------------------------------------------------------------------------------ // Entry points -// +// // Here we generate the various kind of entries into the interpreter. // The two main entry type are generic bytecode methods and native call method. // These both come in synchronized and non-synchronized versions but the @@ -1359,7 +1362,7 @@ bool synchronized = false; address entry_point = NULL; - switch (kind) { + switch (kind) { case Interpreter::zerolocals : break; case Interpreter::zerolocals_synchronized: synchronized = true; break; case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; @@ -1420,7 +1423,7 @@ // as determined by a previous call to this method. // It is also guaranteed to be walkable even though it is in a skeletal state // NOTE: return size is in words not bytes - + // fixed size of an interpreter frame: int max_locals = method->max_locals() * Interpreter::stackElementWords(); int extra_locals = (method->max_locals() - method->size_of_parameters()) * @@ -1432,10 +1435,10 @@ // Since the callee parameters already account for the callee's params we only need to account for // the extra locals. - + int size = overhead + - ((callee_locals - callee_param_count)*Interpreter::stackElementWords()) + - (moncount*frame::interpreter_frame_monitor_size()) + + ((callee_locals - callee_param_count)*Interpreter::stackElementWords()) + + (moncount*frame::interpreter_frame_monitor_size()) + tempcount*Interpreter::stackElementWords() + popframe_extra_args; if (interpreter_frame != NULL) { @@ -1455,9 +1458,9 @@ BasicObjectLock* monbot = montop - moncount; interpreter_frame->interpreter_frame_set_monitor_end(monbot); - // Set last_sp - intptr_t* rsp = (intptr_t*) monbot - - tempcount*Interpreter::stackElementWords() - + // Set last_sp + intptr_t* rsp = (intptr_t*) monbot - + tempcount*Interpreter::stackElementWords() - popframe_extra_args; interpreter_frame->interpreter_frame_set_last_sp(rsp); @@ -1465,11 +1468,11 @@ // value for sender_sp that allows walking the stack but isn't // truly correct. Correct the value here. - if (extra_locals != 0 && + if (extra_locals != 0 && interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) { interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals); } - *interpreter_frame->interpreter_frame_cache_addr() = + *interpreter_frame->interpreter_frame_cache_addr() = method->constants()->cache(); } return size; @@ -1484,15 +1487,15 @@ Interpreter::_rethrow_exception_entry = __ pc(); // Restore sp to interpreter_frame_last_sp even though we are going - // to empty the expression stack for the exception processing. - __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + // to empty the expression stack for the exception processing. + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); // rax,: exception // rdx: return address/pc that threw exception __ restore_bcp(); // rsi points to call/send __ restore_locals(); // Entry point for exceptions thrown within interpreter code - Interpreter::_throw_exception_entry = __ pc(); + Interpreter::_throw_exception_entry = __ pc(); // expression stack is undefined here // rax,: exception // rsi: exception bcp @@ -1547,25 +1550,25 @@ // deoptimization blob's unpack entry because of the presence of // adapter frames in C2. Label caller_not_deoptimized; - __ movl(rdx, Address(rbp, frame::return_addr_offset * wordSize)); + __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize)); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx); __ testl(rax, rax); __ jcc(Assembler::notZero, caller_not_deoptimized); // Compute size of arguments for saving when returning to deoptimized caller __ get_method(rax); - __ verify_oop(rax); + __ verify_oop(rax); __ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset()))); - __ shll(rax, Interpreter::logStackElementSize()); + __ shlptr(rax, Interpreter::logStackElementSize()); __ restore_locals(); - __ subl(rdi, rax); - __ addl(rdi, wordSize); + __ subptr(rdi, rax); + __ addptr(rdi, wordSize); // Save these arguments __ get_thread(rcx); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi); - __ remove_activation(vtos, rdx, - /* throw_monitor_exception */ false, + __ remove_activation(vtos, rdx, + /* throw_monitor_exception */ false, /* install_monitor_exception */ false, /* notify_jvmdi */ false); @@ -1579,8 +1582,8 @@ __ bind(caller_not_deoptimized); } - __ remove_activation(vtos, rdx, - /* throw_monitor_exception */ false, + __ remove_activation(vtos, rdx, + /* throw_monitor_exception */ false, /* install_monitor_exception */ false, /* notify_jvmdi */ false); @@ -1595,8 +1598,8 @@ // maintain this kind of invariant all the time we call a small // fixup routine to move the mutated arguments onto the top of our // expression stack if necessary. - __ movl(rax, rsp); - __ movl(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); + __ mov(rax, rsp); + __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ get_thread(rcx); // PC must point into interpreter here __ set_last_Java_frame(rcx, noreg, rbp, __ pc()); @@ -1604,8 +1607,8 @@ __ get_thread(rcx); __ reset_last_Java_frame(rcx, true, true); // Restore the last_sp and null it out - __ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); - __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ restore_bcp(); __ restore_locals(); @@ -1623,17 +1626,17 @@ // end of PopFrame support Interpreter::_remove_activation_entry = __ pc(); - + // preserve exception over this code sequence __ pop_ptr(rax); __ get_thread(rcx); - __ movl(Address(rcx, JavaThread::vm_result_offset()), rax); + __ movptr(Address(rcx, JavaThread::vm_result_offset()), rax); // remove the activation (without doing throws on illegalMonitorExceptions) __ remove_activation(vtos, rdx, false, true, false); // restore exception __ get_thread(rcx); - __ movl(rax, Address(rcx, JavaThread::vm_result_offset())); - __ movl(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD); + __ movptr(rax, Address(rcx, JavaThread::vm_result_offset())); + __ movptr(Address(rcx, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); __ verify_oop(rax); // Inbetween activations - previous activation type unknown yet @@ -1644,12 +1647,12 @@ // rdx: return address/pc that threw exception // rsp: expression stack of caller // rbp,: rbp, of caller - __ pushl(rax); // save exception - __ pushl(rdx); // save return address + __ push(rax); // save exception + __ push(rdx); // save return address __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx); - __ movl(rbx, rax); // save exception handler - __ popl(rdx); // restore return address - __ popl(rax); // restore exception + __ mov(rbx, rax); // save exception handler + __ pop(rdx); // restore return address + __ pop(rax); // restore exception // Note that an "issuing PC" is actually the next PC after the call __ jmp(rbx); // jump to exception handler of caller } @@ -1659,7 +1662,7 @@ // JVMTI ForceEarlyReturn support // address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { - address entry = __ pc(); + address entry = __ pc(); __ restore_bcp(); __ restore_locals(); @@ -1668,7 +1671,7 @@ __ load_earlyret_value(state); __ get_thread(rcx); - __ movl(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset())); + __ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset())); const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); // Clear the earlyret state @@ -1693,7 +1696,7 @@ dep = __ pc(); __ push(dtos); __ jmp(L); lep = __ pc(); __ push(ltos); __ jmp(L); aep = __ pc(); __ push(atos); __ jmp(L); - bep = cep = sep = // fall through + bep = cep = sep = // fall through iep = __ pc(); __ push(itos); // fall through vep = __ pc(); __ bind(L); // fall through generate_and_dispatch(t); @@ -1706,7 +1709,7 @@ -InterpreterGenerator::InterpreterGenerator(StubQueue* code) +InterpreterGenerator::InterpreterGenerator(StubQueue* code) : TemplateInterpreterGenerator(code) { generate_all(); // down here so it can be "virtual" } @@ -1719,12 +1722,12 @@ address entry = __ pc(); // prepare expression stack - __ popl(rcx); // pop return address so expression stack is 'pure' + __ pop(rcx); // pop return address so expression stack is 'pure' __ push(state); // save tosca // pass tosca registers as arguments & call tracer __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); - __ movl(rcx, rax); // make sure return address is not destroyed by pop(state) + __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) __ pop(state); // restore tosca // return @@ -1734,23 +1737,23 @@ } -void TemplateInterpreterGenerator::count_bytecode() { - __ increment(ExternalAddress((address) &BytecodeCounter::_counter_value)); +void TemplateInterpreterGenerator::count_bytecode() { + __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); } -void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { - __ increment(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); +void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { + __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); } -void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { - __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); +void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { + __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); ExternalAddress table((address) BytecodePairHistogram::_counters); Address index(noreg, rbx, Address::times_4); - __ increment(ArrayAddress(table, index)); + __ incrementl(ArrayAddress(table, index)); } --- old/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp 2009-08-01 04:08:20.858940364 +0100 +++ new/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp 2009-08-01 04:08:20.764896127 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)templateInterpreter_x86_64.cpp 1.2 07/09/17 09:23:13 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -30,6 +27,8 @@ #define __ _masm-> +#ifndef CC_INTERP + const int method_offset = frame::interpreter_frame_method_offset * wordSize; const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; @@ -42,11 +41,11 @@ #ifdef ASSERT { Label L; - __ leaq(rax, Address(rbp, - frame::interpreter_frame_monitor_block_top_offset * - wordSize)); - __ cmpq(rax, rsp); // rax = maximal rsp for current rbp (stack - // grows negative) + __ lea(rax, Address(rbp, + frame::interpreter_frame_monitor_block_top_offset * + wordSize)); + __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack + // grows negative) __ jcc(Assembler::aboveEqual, L); // check if frame is complete __ stop ("interpreter frame not set up"); __ bind(L); @@ -87,7 +86,7 @@ address entry = __ pc(); // object is at TOS - __ popq(c_rarg1); + __ pop(c_rarg1); // expression stack must be empty before entering the VM if an // exception happened @@ -107,7 +106,7 @@ address entry = __ pc(); if (pass_oop) { // object is at TOS - __ popq(c_rarg2); + __ pop(c_rarg2); } // expression stack must be empty before entering the VM if an // exception happened @@ -140,7 +139,7 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { address entry = __ pc(); // NULL last_sp until next java call - __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ dispatch_next(state); return entry; } @@ -156,12 +155,13 @@ address entry = __ pc(); // Restore stack bottom in case i2c adjusted stack - __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); + __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); // and NULL it as marker that esp is now tos until next java call - __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ restore_bcp(); __ restore_locals(); + __ get_cache_and_index_at_bcp(rbx, rcx, 1); __ movl(rbx, Address(rbx, rcx, Address::times_8, @@ -169,7 +169,7 @@ 3 * wordSize)); __ andl(rbx, 0xFF); if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter. - __ leaq(rsp, Address(rsp, rbx, Address::times_8)); + __ lea(rsp, Address(rsp, rbx, Address::times_8)); __ dispatch_next(state, step); return entry; } @@ -179,13 +179,13 @@ int step) { address entry = __ pc(); // NULL last_sp until next java call - __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ restore_bcp(); __ restore_locals(); // handle exceptions { Label L; - __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); __ jcc(Assembler::zero, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -232,9 +232,9 @@ case T_VOID : /* nothing to do */ break; case T_FLOAT : /* nothing to do */ break; case T_DOUBLE : /* nothing to do */ break; - case T_OBJECT : + case T_OBJECT : // retrieve result from frame - __ movq(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); + __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); // and verify it __ verify_oop(rax); break; @@ -303,7 +303,7 @@ __ jcc(Assembler::less, *profile_method_continue); // if no method data exists, go to profile_method - __ test_method_data_pointer(rax, *profile_method); + __ test_method_data_pointer(rax, *profile_method); } __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); @@ -339,7 +339,7 @@ InterpreterRuntime::frequency_counter_overflow), c_rarg1); - __ movq(rbx, Address(rbp, method_offset)); // restore methodOop + __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop // Preserve invariant that r13/r14 contain bcp/locals of sender frame // and jump to the interpreted entry. __ jmp(*do_continue, relocInfo::none); @@ -388,36 +388,36 @@ const Address stack_size(r15_thread, Thread::stack_size_offset()); // locals + overhead, in bytes - __ movq(rax, rdx); - __ shll(rax, Interpreter::logStackElementSize()); // 2 slots per parameter. - __ addq(rax, overhead_size); + __ mov(rax, rdx); + __ shlptr(rax, Interpreter::logStackElementSize()); // 2 slots per parameter. + __ addptr(rax, overhead_size); #ifdef ASSERT Label stack_base_okay, stack_size_okay; // verify that thread stack base is non-zero - __ cmpq(stack_base, 0); + __ cmpptr(stack_base, (int32_t)NULL_WORD); __ jcc(Assembler::notEqual, stack_base_okay); __ stop("stack base is zero"); __ bind(stack_base_okay); // verify that thread stack size is non-zero - __ cmpq(stack_size, 0); + __ cmpptr(stack_size, 0); __ jcc(Assembler::notEqual, stack_size_okay); __ stop("stack size is zero"); __ bind(stack_size_okay); #endif // Add stack base to locals and subtract stack size - __ addq(rax, stack_base); - __ subq(rax, stack_size); + __ addptr(rax, stack_base); + __ subptr(rax, stack_size); // add in the red and yellow zone sizes - __ addq(rax, (StackRedPages + StackYellowPages) * page_size); + __ addptr(rax, (StackRedPages + StackYellowPages) * page_size); // check against the current stack bottom - __ cmpq(rsp, rax); + __ cmpptr(rsp, rax); __ jcc(Assembler::above, after_frame_check); - __ popq(rax); // get return address + __ pop(rax); // get return address __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry())); // all done with frame size check @@ -429,7 +429,7 @@ // Args: // rbx: methodOop // r14: locals -// +// // Kills: // rax // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) @@ -461,17 +461,17 @@ __ movl(rax, access_flags); __ testl(rax, JVM_ACC_STATIC); // get receiver (assume this is frequent case) - __ movq(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); + __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); __ jcc(Assembler::zero, done); - __ movq(rax, Address(rbx, methodOopDesc::constants_offset())); - __ movq(rax, Address(rax, - constantPoolOopDesc::pool_holder_offset_in_bytes())); - __ movq(rax, Address(rax, mirror_offset)); + __ movptr(rax, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rax, Address(rax, + constantPoolOopDesc::pool_holder_offset_in_bytes())); + __ movptr(rax, Address(rax, mirror_offset)); #ifdef ASSERT { Label L; - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::notZero, L); __ stop("synchronization object is NULL"); __ bind(L); @@ -482,11 +482,11 @@ } // add space for monitor & lock - __ subq(rsp, entry_size); // add space for a monitor entry - __ movq(monitor_block_top, rsp); // set new monitor block top + __ subptr(rsp, entry_size); // add space for a monitor entry + __ movptr(monitor_block_top, rsp); // set new monitor block top // store object - __ movq(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); - __ movq(c_rarg1, rsp); // object address + __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); + __ movptr(c_rarg1, rsp); // object address __ lock_object(c_rarg1); } @@ -501,40 +501,187 @@ // rdx: cp cache void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { // initialize fixed part of activation frame - __ pushq(rax); // save return address + __ push(rax); // save return address __ enter(); // save old & set new rbp - __ pushq(r13); // set sender sp - __ pushq((int)NULL_WORD); // leave last_sp as null - __ movq(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop - __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase - __ pushq(rbx); // save methodOop + __ push(r13); // set sender sp + __ push((int)NULL_WORD); // leave last_sp as null + __ movptr(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop + __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase + __ push(rbx); // save methodOop if (ProfileInterpreter) { Label method_data_continue; - __ movq(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); - __ testq(rdx, rdx); + __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); + __ testptr(rdx, rdx); __ jcc(Assembler::zero, method_data_continue); - __ addq(rdx, in_bytes(methodDataOopDesc::data_offset())); + __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset())); __ bind(method_data_continue); - __ pushq(rdx); // set the mdp (method data pointer) + __ push(rdx); // set the mdp (method data pointer) } else { - __ pushq(0); + __ push(0); } - __ movq(rdx, Address(rbx, methodOopDesc::constants_offset())); - __ movq(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); - __ pushq(rdx); // set constant pool cache - __ pushq(r14); // set locals pointer + __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset())); + __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); + __ push(rdx); // set constant pool cache + __ push(r14); // set locals pointer if (native_call) { - __ pushq(0); // no bcp + __ push(0); // no bcp } else { - __ pushq(r13); // set bcp + __ push(r13); // set bcp } - __ pushq(0); // reserve word for pointer to expression stack bottom - __ movq(Address(rsp, 0), rsp); // set expression stack bottom + __ push(0); // reserve word for pointer to expression stack bottom + __ movptr(Address(rsp, 0), rsp); // set expression stack bottom } // End of helpers +// Various method entries +//------------------------------------------------------------------------------------------------------------------------ +// +// + +// Call an accessor method (assuming it is resolved, otherwise drop +// into vanilla (slow path) entry +address InterpreterGenerator::generate_accessor_entry(void) { + // rbx: methodOop + + // r13: senderSP must preserver for slow path, set SP to it on fast path + + address entry_point = __ pc(); + Label xreturn_path; + + // do fastpath for resolved accessor methods + if (UseFastAccessorMethods) { + // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites + // thereof; parameter size = 1 + // Note: We can only use this code if the getfield has been resolved + // and if we don't have a null-pointer exception => check for + // these conditions first and use slow path if necessary. + Label slow_path; + // If we need a safepoint check, generate full interpreter entry. + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), + SafepointSynchronize::_not_synchronized); + + __ jcc(Assembler::notEqual, slow_path); + // rbx: method + __ movptr(rax, Address(rsp, wordSize)); + + // check if local 0 != NULL and read field + __ testptr(rax, rax); + __ jcc(Assembler::zero, slow_path); + + __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset())); + // read first instruction word and extract bytecode @ 1 and index @ 2 + __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); + __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); + // Shift codes right to get the index on the right. + // The bytecode fetched looks like <0xb4><0x2a> + __ shrl(rdx, 2 * BitsPerByte); + __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); + __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); + + // rax: local 0 + // rbx: method + // rdx: constant pool cache index + // rdi: constant pool cache + + // check if getfield has been resolved and read constant pool cache entry + // check the validity of the cache entry by testing whether _indices field + // contains Bytecode::_getfield in b1 byte. + assert(in_words(ConstantPoolCacheEntry::size()) == 4, + "adjust shift below"); + __ movl(rcx, + Address(rdi, + rdx, + Address::times_8, + constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::indices_offset())); + __ shrl(rcx, 2 * BitsPerByte); + __ andl(rcx, 0xFF); + __ cmpl(rcx, Bytecodes::_getfield); + __ jcc(Assembler::notEqual, slow_path); + + // Note: constant pool entry is not valid before bytecode is resolved + __ movptr(rcx, + Address(rdi, + rdx, + Address::times_8, + constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::f2_offset())); + // edx: flags + __ movl(rdx, + Address(rdi, + rdx, + Address::times_8, + constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::flags_offset())); + + Label notObj, notInt, notByte, notShort; + const Address field_address(rax, rcx, Address::times_1); + + // Need to differentiate between igetfield, agetfield, bgetfield etc. + // because they are different sizes. + // Use the type from the constant pool cache + __ shrl(rdx, ConstantPoolCacheEntry::tosBits); + // Make sure we don't need to mask edx for tosBits after the above shift + ConstantPoolCacheEntry::verify_tosBits(); + + __ cmpl(rdx, atos); + __ jcc(Assembler::notEqual, notObj); + // atos + __ load_heap_oop(rax, field_address); + __ jmp(xreturn_path); + + __ bind(notObj); + __ cmpl(rdx, itos); + __ jcc(Assembler::notEqual, notInt); + // itos + __ movl(rax, field_address); + __ jmp(xreturn_path); + + __ bind(notInt); + __ cmpl(rdx, btos); + __ jcc(Assembler::notEqual, notByte); + // btos + __ load_signed_byte(rax, field_address); + __ jmp(xreturn_path); + + __ bind(notByte); + __ cmpl(rdx, stos); + __ jcc(Assembler::notEqual, notShort); + // stos + __ load_signed_word(rax, field_address); + __ jmp(xreturn_path); + + __ bind(notShort); +#ifdef ASSERT + Label okay; + __ cmpl(rdx, ctos); + __ jcc(Assembler::equal, okay); + __ stop("what type is this?"); + __ bind(okay); +#endif + // ctos + __ load_unsigned_word(rax, field_address); + + __ bind(xreturn_path); + + // _ireturn/_areturn + __ pop(rdi); + __ mov(rsp, r13); + __ jmp(rdi); + __ ret(0); + + // generate a vanilla interpreter entry as the slow path + __ bind(slow_path); + (void) generate_normal_entry(false); + } else { + (void) generate_normal_entry(false); + } + + return entry_point; +} + // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the // native method than the typical interpreter frame setup. @@ -564,20 +711,20 @@ // rbx: methodOop // rcx: size of parameters // r13: sender sp - __ popq(rax); // get return address + __ pop(rax); // get return address // for natives the size of locals is zero // compute beginning of parameters (r14) if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter. - __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize)); + __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); // add 2 zero-initialized slots for native calls // initialize result_handler slot - __ pushq((int) NULL); - // slot for oop temp + __ push((int) NULL_WORD); + // slot for oop temp // (static native method holder mirror/jni oop result) - __ pushq((int) NULL); + __ push((int) NULL_WORD); if (inc_counter) { __ movl(rcx, invocation_counter); // (pre-)fetch invocation count @@ -654,8 +801,8 @@ Label L; const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - __ movq(rax, monitor_block_top); - __ cmpq(rax, rsp); + __ movptr(rax, monitor_block_top); + __ cmpptr(rax, rsp); __ jcc(Assembler::equal, L); __ stop("broken stack frame setup in interpreter"); __ bind(L); @@ -667,7 +814,7 @@ // work registers const Register method = rbx; - const Register t = r12; + const Register t = r11; // allocate space for parameters __ get_method(method); @@ -677,22 +824,22 @@ methodOopDesc::size_of_parameters_offset())); __ shll(t, Interpreter::logStackElementSize()); - __ subq(rsp, t); - __ subq(rsp, frame::arg_reg_save_area_bytes); // windows - __ andq(rsp, -16); // must be 16 byte boundry (see amd64 ABI) + __ subptr(rsp, t); + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) // get signature handler { Label L; - __ movq(t, Address(method, methodOopDesc::signature_handler_offset())); - __ testq(t, t); + __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); + __ testptr(t, t); __ jcc(Assembler::notZero, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); __ get_method(method); - __ movq(t, Address(method, methodOopDesc::signature_handler_offset())); + __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); __ bind(L); } @@ -714,9 +861,9 @@ // result handler is in rax // set result handler - __ movq(Address(rbp, - (frame::interpreter_frame_result_handler_offset) * wordSize), - rax); + __ movptr(Address(rbp, + (frame::interpreter_frame_result_handler_offset) * wordSize), + rax); // pass mirror handle if static call { @@ -727,25 +874,25 @@ __ testl(t, JVM_ACC_STATIC); __ jcc(Assembler::zero, L); // get mirror - __ movq(t, Address(method, methodOopDesc::constants_offset())); - __ movq(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); - __ movq(t, Address(t, mirror_offset)); + __ movptr(t, Address(method, methodOopDesc::constants_offset())); + __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); + __ movptr(t, Address(t, mirror_offset)); // copy mirror into activation frame - __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), + __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); // pass handle to mirror - __ leaq(c_rarg1, - Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); + __ lea(c_rarg1, + Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); __ bind(L); } // get native function entry point { Label L; - __ movq(rax, Address(method, methodOopDesc::native_function_offset())); + __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); __ movptr(rscratch2, unsatisfied.addr()); - __ cmpq(rax, rscratch2); + __ cmpptr(rax, rscratch2); __ jcc(Assembler::notEqual, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -753,12 +900,12 @@ method); __ get_method(method); __ verify_oop(method); - __ movq(rax, Address(method, methodOopDesc::native_function_offset())); + __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); __ bind(L); } // pass JNIEnv - __ leaq(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); + __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); // It is enough that the pc() points into the right code // segment. It does not have to be the correct return pc. @@ -776,7 +923,7 @@ } #endif - // Change state to native + // Change state to native __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); @@ -785,19 +932,19 @@ __ call(rax); // result potentially in rax or xmm0 - // Depending on runtime options, either restore the MXCSR - // register after returning from the JNI Call or verify that + // Depending on runtime options, either restore the MXCSR + // register after returning from the JNI Call or verify that // it wasn't changed during -Xcheck:jni. if (RestoreMXCSROnJNICalls) { - __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std())); + __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std())); } else if (CheckJNICalls) { - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry()))); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry()))); } // NOTE: The order of these pushes is known to frame::interpreter_frame_result // in order to extract the result of a method call. If the order of these - // pushes change or anything else is added to the stack then the code in + // pushes change or anything else is added to the stack then the code in // interpreter_frame_result must also change. __ push(dtos); @@ -811,7 +958,7 @@ if (UseMembar) { // Force this write out before the read below __ membar(Assembler::Membar_mask_bits( - Assembler::LoadLoad | Assembler::LoadStore | + Assembler::LoadLoad | Assembler::LoadStore | Assembler::StoreLoad | Assembler::StoreStore)); } else { // Write serialization page so VM thread can do a pseudo remote membar. @@ -841,12 +988,13 @@ // preserved and correspond to the bcp/locals pointers. So we do a // runtime call by hand. // - __ movq(c_rarg0, r15_thread); - __ movq(r12, rsp); // remember sp - __ subq(rsp, frame::arg_reg_save_area_bytes); // windows - __ andq(rsp, -16); // align stack as required by ABI + __ mov(c_rarg0, r15_thread); + __ mov(r12, rsp); // remember sp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); - __ movq(rsp, r12); // restore sp + __ mov(rsp, r12); // restore sp + __ reinit_heapbase(); __ bind(Continue); } @@ -857,8 +1005,8 @@ __ reset_last_Java_frame(true, true); // reset handle block - __ movq(t, Address(r15_thread, JavaThread::active_handles_offset())); - __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD); + __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); + __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); // If result is an oop unbox and store it in frame where gc will see it // and result handler will pick it up @@ -866,15 +1014,15 @@ { Label no_oop, store_result; __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); - __ cmpq(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); + __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); __ jcc(Assembler::notEqual, no_oop); // retrieve result __ pop(ltos); - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, store_result); - __ movq(rax, Address(rax, 0)); + __ movptr(rax, Address(rax, 0)); __ bind(store_result); - __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); + __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); // keep stack depth as expected by pushing oop which will eventually be discarde __ push(ltos); __ bind(no_oop); @@ -887,18 +1035,19 @@ JavaThread::stack_guard_yellow_disabled); __ jcc(Assembler::notEqual, no_reguard); - __ pushaq(); // XXX only save smashed registers - __ movq(r12, rsp); // remember sp - __ subq(rsp, frame::arg_reg_save_area_bytes); // windows - __ andq(rsp, -16); // align stack as required by ABI + __ pusha(); // XXX only save smashed registers + __ mov(r12, rsp); // remember sp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); - __ movq(rsp, r12); // restore sp - __ popaq(); // XXX only restore smashed registers + __ mov(rsp, r12); // restore sp + __ popa(); // XXX only restore smashed registers + __ reinit_heapbase(); __ bind(no_reguard); } - + // The method register is junk from after the thread_in_native transition // until here. Also can't call_VM until the bcp has been // restored. Need bcp for throwing exception below so get it now. @@ -907,12 +1056,12 @@ // restore r13 to have legal interpreter frame, i.e., bci == 0 <=> // r13 == code_base() - __ movq(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop - __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase + __ movptr(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop + __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase // handle exceptions (exception handling will handle unlocking!) { Label L; - __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); __ jcc(Assembler::zero, L); // Note: At some point we may want to unify this with the code // used in call_VM_base(); i.e., we should use the @@ -943,18 +1092,18 @@ wordSize - sizeof(BasicObjectLock))); // monitor expect in c_rarg1 for slow unlock path - __ leaq(c_rarg1, monitor); // address of first monitor + __ lea(c_rarg1, monitor); // address of first monitor - __ movq(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); - __ testq(t, t); + __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); + __ testptr(t, t); __ jcc(Assembler::notZero, unlock); - + // Entry already unlocked, need to throw exception __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); __ should_not_reach_here(); - + __ bind(unlock); __ unlock_object(c_rarg1); } @@ -974,17 +1123,17 @@ __ pop(ltos); __ pop(dtos); - __ movq(t, Address(rbp, - (frame::interpreter_frame_result_handler_offset) * wordSize)); + __ movptr(t, Address(rbp, + (frame::interpreter_frame_result_handler_offset) * wordSize)); __ call(t); // remove activation - __ movq(t, Address(rbp, - frame::interpreter_frame_sender_sp_offset * - wordSize)); // get sender sp + __ movptr(t, Address(rbp, + frame::interpreter_frame_sender_sp_offset * + wordSize)); // get sender sp __ leave(); // remove frame anchor - __ popq(rdi); // get return address - __ movq(rsp, t); // set sp to sender sp + __ pop(rdi); // get return address + __ mov(rsp, t); // set sp to sender sp __ jmp(rdi); if (inc_counter) { @@ -1033,11 +1182,11 @@ generate_stack_overflow_check(); // get return address - __ popq(rax); + __ pop(rax); // compute beginning of parameters (r14) if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter. - __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize)); + __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); // rdx - # of additional locals // allocate space for locals @@ -1047,8 +1196,8 @@ __ testl(rdx, rdx); __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 __ bind(loop); - if (TaggedStackInterpreter) __ pushq((int) NULL); // push tag - __ pushq((int) NULL); // initialize local variables + if (TaggedStackInterpreter) __ push((int) NULL_WORD); // push tag + __ push((int) NULL_WORD); // initialize local variables __ decrementl(rdx); // until everything initialized __ jcc(Assembler::greater, loop); __ bind(exit); @@ -1138,8 +1287,8 @@ Label L; const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - __ movq(rax, monitor_block_top); - __ cmpq(rax, rsp); + __ movptr(rax, monitor_block_top); + __ cmpptr(rax, rsp); __ jcc(Assembler::equal, L); __ stop("broken stack frame setup in interpreter"); __ bind(L); @@ -1148,7 +1297,7 @@ // jvmti support __ notify_method_entry(); - + __ dispatch_next(vtos); // invocation counter overflow @@ -1161,14 +1310,14 @@ CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), r13, true); - __ movq(rbx, Address(rbp, method_offset)); // restore methodOop - __ movq(rax, Address(rbx, - in_bytes(methodOopDesc::method_data_offset()))); - __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), - rax); + __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop + __ movptr(rax, Address(rbx, + in_bytes(methodOopDesc::method_data_offset()))); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), + rax); __ test_method_data_pointer(rax, profile_method_continue); - __ addq(rax, in_bytes(methodDataOopDesc::data_offset())); - __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), + __ addptr(rax, in_bytes(methodDataOopDesc::data_offset())); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); __ jmp(profile_method_continue); } @@ -1181,7 +1330,7 @@ } // Entry points -// +// // Here we generate the various kind of entries into the interpreter. // The two main entry type are generic bytecode methods and native // call method. These both come in synchronized and non-synchronized @@ -1305,7 +1454,7 @@ // on the transistion) Since the callee parameters already account // for the callee's params we only need to account for the extra // locals. - int size = overhead + + int size = overhead + (callee_locals - callee_param_count)*Interpreter::stackElementWords() + moncount * frame::interpreter_frame_monitor_size() + tempcount* Interpreter::stackElementWords() + popframe_extra_args; @@ -1329,8 +1478,8 @@ interpreter_frame->interpreter_frame_set_monitor_end(monbot); // Set last_sp - intptr_t* esp = (intptr_t*) monbot - - tempcount*Interpreter::stackElementWords() - + intptr_t* esp = (intptr_t*) monbot - + tempcount*Interpreter::stackElementWords() - popframe_extra_args; interpreter_frame->interpreter_frame_set_last_sp(esp); @@ -1358,18 +1507,19 @@ Interpreter::_rethrow_exception_entry = __ pc(); // Restore sp to interpreter_frame_last_sp even though we are going // to empty the expression stack for the exception processing. - __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); // rax: exception // rdx: return address/pc that threw exception __ restore_bcp(); // r13 points to call/send __ restore_locals(); + __ reinit_heapbase(); // restore r12 as heapbase. // Entry point for exceptions thrown within interpreter code Interpreter::_throw_exception_entry = __ pc(); // expression stack is undefined here // rax: exception // r13: exception bcp __ verify_oop(rax); - __ movq(c_rarg1, rax); + __ mov(c_rarg1, rax); // expression stack must be empty before entering the VM in case of // an exception @@ -1424,7 +1574,7 @@ // deoptimization blob's unpack entry because of the presence of // adapter frames in C2. Label caller_not_deoptimized; - __ movq(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); + __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), c_rarg1); __ testl(rax, rax); @@ -1437,8 +1587,8 @@ size_of_parameters_offset()))); __ shll(rax, Interpreter::logStackElementSize()); __ restore_locals(); // XXX do we need this? - __ subq(r14, rax); - __ addq(r14, wordSize); + __ subptr(r14, rax); + __ addptr(r14, wordSize); // Save these arguments __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization:: @@ -1446,13 +1596,13 @@ r15_thread, rax, r14); __ remove_activation(vtos, rdx, - /* throw_monitor_exception */ false, - /* install_monitor_exception */ false, - /* notify_jvmdi */ false); + /* throw_monitor_exception */ false, + /* install_monitor_exception */ false, + /* notify_jvmdi */ false); // Inform deoptimization that it is responsible for restoring // these arguments - __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), + __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit); // Continue in deoptimization handler @@ -1477,15 +1627,15 @@ // maintain this kind of invariant all the time we call a small // fixup routine to move the mutated arguments onto the top of our // expression stack if necessary. - __ movq(c_rarg1, rsp); - __ movq(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); + __ mov(c_rarg1, rsp); + __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); // PC must point into interpreter here __ set_last_Java_frame(noreg, rbp, __ pc()); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); __ reset_last_Java_frame(true, true); // Restore the last_sp and null it out - __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); - __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); + __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); + __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ restore_bcp(); // XXX do we need this? __ restore_locals(); // XXX do we need this? @@ -1503,15 +1653,15 @@ // end of PopFrame support Interpreter::_remove_activation_entry = __ pc(); - + // preserve exception over this code sequence __ pop_ptr(rax); - __ movq(Address(r15_thread, JavaThread::vm_result_offset()), rax); + __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax); // remove the activation (without doing throws on illegalMonitorExceptions) __ remove_activation(vtos, rdx, false, true, false); // restore exception - __ movq(rax, Address(r15_thread, JavaThread::vm_result_offset())); - __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD); + __ movptr(rax, Address(r15_thread, JavaThread::vm_result_offset())); + __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); __ verify_oop(rax); // In between activations - previous activation type unknown yet @@ -1522,14 +1672,14 @@ // rdx: return address/pc that threw exception // rsp: expression stack of caller // rbp: ebp of caller - __ pushq(rax); // save exception - __ pushq(rdx); // save return address + __ push(rax); // save exception + __ push(rdx); // save return address __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx); - __ movq(rbx, rax); // save exception handler - __ popq(rdx); // restore return address - __ popq(rax); // restore exception + __ mov(rbx, rax); // save exception handler + __ pop(rdx); // restore return address + __ pop(rax); // restore exception // Note that an "issuing PC" is actually the next PC after the call __ jmp(rbx); // jump to exception // handler of caller @@ -1540,14 +1690,14 @@ // JVMTI ForceEarlyReturn support // address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { - address entry = __ pc(); + address entry = __ pc(); __ restore_bcp(); __ restore_locals(); __ empty_expression_stack(); __ load_earlyret_value(state); - __ movq(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); + __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset()); // Clear the earlyret state @@ -1609,21 +1759,21 @@ address entry = __ pc(); __ push(state); - __ pushq(c_rarg0); - __ pushq(c_rarg1); - __ pushq(c_rarg2); - __ pushq(c_rarg3); - __ movq(c_rarg2, rax); // Pass itos + __ push(c_rarg0); + __ push(c_rarg1); + __ push(c_rarg2); + __ push(c_rarg3); + __ mov(c_rarg2, rax); // Pass itos #ifdef _WIN64 - __ movflt(xmm3, xmm0); // Pass ftos + __ movflt(xmm3, xmm0); // Pass ftos #endif __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3); - __ popq(c_rarg3); - __ popq(c_rarg2); - __ popq(c_rarg1); - __ popq(c_rarg0); + __ pop(c_rarg3); + __ pop(c_rarg2); + __ pop(c_rarg1); + __ pop(c_rarg0); __ pop(state); __ ret(0); // return from result handler @@ -1657,10 +1807,11 @@ assert(Interpreter::trace_code(t->tos_in()) != NULL, "entry must have been generated"); - __ movq(r12, rsp); // remember sp - __ andq(rsp, -16); // align stack as required by ABI + __ mov(r12, rsp); // remember sp + __ andptr(rsp, -16); // align stack as required by ABI __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); - __ movq(rsp, r12); // restore sp + __ mov(rsp, r12); // restore sp + __ reinit_heapbase(); } @@ -1673,3 +1824,4 @@ __ bind(L); } #endif // !PRODUCT +#endif // ! CC_INTERP --- old/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp 2009-08-01 04:08:22.001646456 +0100 +++ new/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp 2009-08-01 04:08:21.887751241 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)templateTable_x86_32.cpp 1.323 07/09/17 09:26:00 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -42,7 +39,7 @@ // Address computation // local variables -static inline Address iaddress(int n) { +static inline Address iaddress(int n) { return Address(rdi, Interpreter::local_offset_in_bytes(n)); } @@ -52,18 +49,18 @@ static inline Address daddress(int n) { return laddress(n); } static inline Address aaddress(int n) { return iaddress(n); } -static inline Address iaddress(Register r) { +static inline Address iaddress(Register r) { return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::value_offset_in_bytes()); } -static inline Address laddress(Register r) { +static inline Address laddress(Register r) { return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1)); } -static inline Address haddress(Register r) { +static inline Address haddress(Register r) { return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0)); } static inline Address faddress(Register r) { return iaddress(r); }; -static inline Address daddress(Register r) { +static inline Address daddress(Register r) { assert(!TaggedStackInterpreter, "This doesn't work"); return laddress(r); }; @@ -110,6 +107,78 @@ //---------------------------------------------------------------------------------------------------- // Miscelaneous helper routines +// Store an oop (or NULL) at the address described by obj. +// If val == noreg this means store a NULL + +static void do_oop_store(InterpreterMacroAssembler* _masm, + Address obj, + Register val, + BarrierSet::Name barrier, + bool precise) { + assert(val == noreg || val == rax, "parameter is just for looks"); + switch (barrier) { +#ifndef SERIALGC + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + { + // flatten object address if needed + // We do it regardless of precise because we need the registers + if (obj.index() == noreg && obj.disp() == 0) { + if (obj.base() != rdx) { + __ movl(rdx, obj.base()); + } + } else { + __ leal(rdx, obj); + } + __ get_thread(rcx); + __ save_bcp(); + __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg); + + // Do the actual store + // noreg means NULL + if (val == noreg) { + __ movl(Address(rdx, 0), NULL_WORD); + // No post barrier for NULL + } else { + __ movl(Address(rdx, 0), val); + __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi); + } + __ restore_bcp(); + + } + break; +#endif // SERIALGC + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + { + if (val == noreg) { + __ movl(obj, NULL_WORD); + } else { + __ movl(obj, val); + // flatten object address if needed + if (!precise || (obj.index() == noreg && obj.disp() == 0)) { + __ store_check(obj.base()); + } else { + __ leal(rdx, obj); + __ store_check(rdx); + } + } + } + break; + case BarrierSet::ModRef: + case BarrierSet::Other: + if (val == noreg) { + __ movl(obj, NULL_WORD); + } else { + __ movl(obj, val); + } + break; + default : + ShouldNotReachHere(); + + } +} + Address TemplateTable::at_bcp(int offset) { assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); return Address(rsi, offset); @@ -119,19 +188,21 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc, Register scratch, bool load_bc_into_scratch/*=true*/) { - + if (!RewriteBytecodes) return; // the pair bytecodes have already done the load. - if (load_bc_into_scratch) __ movl(bc, bytecode); + if (load_bc_into_scratch) { + __ movl(bc, bytecode); + } Label patch_done; if (JvmtiExport::can_post_breakpoint()) { Label fast_patch; // if a breakpoint is present we can't rewrite the stream directly - __ movzxb(scratch, at_bcp(0)); + __ movzbl(scratch, at_bcp(0)); __ cmpl(scratch, Bytecodes::_breakpoint); __ jcc(Assembler::notEqual, fast_patch); __ get_method(scratch); - // Let breakpoint table handling rewrite to quicker bytecode + // Let breakpoint table handling rewrite to quicker bytecode __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc); #ifndef ASSERT __ jmpb(patch_done); @@ -172,16 +243,16 @@ void TemplateTable::aconst_null() { transition(vtos, atos); - __ xorl(rax, rax); + __ xorptr(rax, rax); } void TemplateTable::iconst(int value) { transition(vtos, itos); if (value == 0) { - __ xorl(rax, rax); + __ xorptr(rax, rax); } else { - __ movl(rax, value); + __ movptr(rax, value); } } @@ -189,12 +260,12 @@ void TemplateTable::lconst(int value) { transition(vtos, ltos); if (value == 0) { - __ xorl(rax, rax); + __ xorptr(rax, rax); } else { - __ movl(rax, value); + __ movptr(rax, value); } assert(value >= 0, "check this code"); - __ xorl(rdx, rdx); + __ xorptr(rdx, rdx); } @@ -226,7 +297,7 @@ void TemplateTable::sipush() { transition(vtos, itos); __ load_unsigned_word(rax, at_bcp(1)); - __ bswap(rax); + __ bswapl(rax); __ sarl(rax, 16); } @@ -244,7 +315,7 @@ const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; // get type - __ xorl(rdx, rdx); + __ xorptr(rdx, rdx); __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset)); // unresolved string - get the resolved string @@ -274,7 +345,7 @@ __ cmpl(rdx, JVM_CONSTANT_Float); __ jccb(Assembler::notEqual, notFloat); // ftos - __ fld_s( Address(rcx, rbx, Address::times_4, base_offset)); + __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset)); __ push(ftos); __ jmp(Done); @@ -291,13 +362,14 @@ #endif Label isOop; // atos and itos - __ movl(rax, Address(rcx, rbx, Address::times_4, base_offset)); // String is only oop type we will see here __ cmpl(rdx, JVM_CONSTANT_String); __ jccb(Assembler::equal, isOop); + __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset)); __ push(itos); __ jmp(Done); __ bind(isOop); + __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset)); __ push(atos); if (VerifyOops) { @@ -319,14 +391,14 @@ __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double); __ jccb(Assembler::notEqual, Long); // dtos - __ fld_d( Address(rcx, rbx, Address::times_4, base_offset)); + __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset)); __ push(dtos); __ jmpb(Done); __ bind(Long); // ltos - __ movl(rax, Address(rcx, rbx, Address::times_4, base_offset + 0 * wordSize)); - __ movl(rdx, Address(rcx, rbx, Address::times_4, base_offset + 1 * wordSize)); + __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize)); + NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize))); __ push(ltos); @@ -336,13 +408,13 @@ void TemplateTable::locals_index(Register reg, int offset) { __ load_unsigned_byte(reg, at_bcp(offset)); - __ negl(reg); + __ negptr(reg); } void TemplateTable::iload() { transition(vtos, itos); - if (RewriteFrequentPairs) { + if (RewriteFrequentPairs) { Label rewrite, done; // get next byte @@ -390,7 +462,7 @@ __ movl(rax, iaddress(rbx)); debug_only(__ verify_local_tag(frame::TagValue, rbx)); } - + void TemplateTable::fast_iload() { transition(vtos, itos); locals_index(rbx); @@ -402,8 +474,8 @@ void TemplateTable::lload() { transition(vtos, ltos); locals_index(rbx); - __ movl(rax, laddress(rbx)); - __ movl(rdx, haddress(rbx)); + __ movptr(rax, laddress(rbx)); + NOT_LP64(__ movl(rdx, haddress(rbx))); debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); } @@ -424,10 +496,10 @@ // float instruction into ST0 __ movl(rax, laddress(rbx)); __ movl(rdx, haddress(rbx)); - __ pushl(rdx); // push hi first - __ pushl(rax); + __ push(rdx); // push hi first + __ push(rax); __ fld_d(Address(rsp, 0)); - __ addl(rsp, 2*wordSize); + __ addptr(rsp, 2*wordSize); debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); } else { __ fld_d(daddress(rbx)); @@ -438,16 +510,16 @@ void TemplateTable::aload() { transition(vtos, atos); locals_index(rbx); - __ movl(rax, iaddress(rbx)); + __ movptr(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } void TemplateTable::locals_index_wide(Register reg) { __ movl(reg, at_bcp(2)); - __ bswap(reg); + __ bswapl(reg); __ shrl(reg, 16); - __ negl(reg); + __ negptr(reg); } @@ -462,8 +534,8 @@ void TemplateTable::wide_lload() { transition(vtos, ltos); locals_index_wide(rbx); - __ movl(rax, laddress(rbx)); - __ movl(rdx, haddress(rbx)); + __ movptr(rax, laddress(rbx)); + NOT_LP64(__ movl(rdx, haddress(rbx))); debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); } @@ -484,8 +556,8 @@ // float instruction into ST0 __ movl(rax, laddress(rbx)); __ movl(rdx, haddress(rbx)); - __ pushl(rdx); // push hi first - __ pushl(rax); + __ push(rdx); // push hi first + __ push(rax); __ fld_d(Address(rsp, 0)); __ addl(rsp, 2*wordSize); debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); @@ -498,7 +570,7 @@ void TemplateTable::wide_aload() { transition(vtos, atos); locals_index_wide(rbx); - __ movl(rax, iaddress(rbx)); + __ movptr(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } @@ -512,12 +584,13 @@ // destroys rbx, // check array __ null_check(array, arrayOopDesc::length_offset_in_bytes()); + LP64_ONLY(__ movslq(index, index)); // check index __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); if (index != rbx) { // ??? convention: move aberrant index into rbx, for exception message assert(rbx != array, "different registers"); - __ movl(rbx, index); + __ mov(rbx, index); } __ jump_cc(Assembler::aboveEqual, ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); @@ -538,10 +611,10 @@ // rax,: index // rdx: array index_check(rdx, rax); - __ movl(rbx, rax); + __ mov(rbx, rax); // rbx,: index - __ movl(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize)); - __ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)); + __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize)); + NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize))); } @@ -568,7 +641,7 @@ // rdx: array index_check(rdx, rax); // kills rbx, // rax,: index - __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); } @@ -579,7 +652,7 @@ // rax,: index // can do better code for P5 - fix this at some point __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE))); - __ movl(rax, rbx); + __ mov(rax, rbx); } @@ -590,7 +663,7 @@ // rax,: index // can do better code for P5 - may want to improve this at some point __ load_unsigned_word(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); - __ movl(rax, rbx); + __ mov(rax, rbx); } // iload followed by caload frequent pair @@ -605,7 +678,7 @@ index_check(rdx, rax); // rax,: index __ load_unsigned_word(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); - __ movl(rax, rbx); + __ mov(rax, rbx); } void TemplateTable::saload() { @@ -615,7 +688,7 @@ // rax,: index // can do better code for P5 - may want to improve this at some point __ load_signed_word(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT))); - __ movl(rax, rbx); + __ mov(rax, rbx); } @@ -628,8 +701,8 @@ void TemplateTable::lload(int n) { transition(vtos, ltos); - __ movl(rax, laddress(n)); - __ movl(rdx, haddress(n)); + __ movptr(rax, laddress(n)); + NOT_LP64(__ movptr(rdx, haddress(n))); debug_only(__ verify_local_tag(frame::TagCategory2, n)); } @@ -648,10 +721,10 @@ // float instruction into ST0 __ movl(rax, laddress(n)); __ movl(rdx, haddress(n)); - __ pushl(rdx); // push hi first - __ pushl(rax); + __ push(rdx); // push hi first + __ push(rax); __ fld_d(Address(rsp, 0)); - __ addl(rsp, 2*wordSize); // reset rsp + __ addptr(rsp, 2*wordSize); // reset rsp debug_only(__ verify_local_tag(frame::TagCategory2, n)); } else { __ fld_d(daddress(n)); @@ -661,7 +734,7 @@ void TemplateTable::aload(int n) { transition(vtos, atos); - __ movl(rax, aaddress(n)); + __ movptr(rax, aaddress(n)); debug_only(__ verify_local_tag(frame::TagReference, n)); } @@ -675,7 +748,7 @@ // _aload_0, _fast_fgetfield // // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 - // bytecode checks if the next bytecode is either _fast_igetfield, + // bytecode checks if the next bytecode is either _fast_igetfield, // _fast_agetfield or _fast_fgetfield and then rewrites the // current bytecode into a pair bytecode; otherwise it rewrites the current // bytecode into _fast_aload_0 that doesn't do the pair check anymore. @@ -743,8 +816,8 @@ void TemplateTable::lstore() { transition(ltos, vtos); locals_index(rbx); - __ movl(laddress(rbx), rax); - __ movl(haddress(rbx), rdx); + __ movptr(laddress(rbx), rax); + NOT_LP64(__ movptr(haddress(rbx), rdx)); __ tag_local(frame::TagCategory2, rbx); } @@ -762,12 +835,12 @@ locals_index(rbx); if (TaggedStackInterpreter) { // Store double on stack and reload into locals nonadjacently - __ subl(rsp, 2 * wordSize); + __ subptr(rsp, 2 * wordSize); __ fstp_d(Address(rsp, 0)); - __ popl(rax); - __ popl(rdx); - __ movl(laddress(rbx), rax); - __ movl(haddress(rbx), rdx); + __ pop(rax); + __ pop(rdx); + __ movptr(laddress(rbx), rax); + __ movptr(haddress(rbx), rdx); __ tag_local(frame::TagCategory2, rbx); } else { __ fstp_d(daddress(rbx)); @@ -779,7 +852,7 @@ transition(vtos, vtos); __ pop_ptr(rax, rdx); // will need to pop tag too locals_index(rbx); - __ movl(aaddress(rbx), rax); + __ movptr(aaddress(rbx), rax); __ tag_local(rdx, rbx); // need to store same tag in local may be returnAddr } @@ -797,8 +870,8 @@ transition(vtos, vtos); __ pop_l(rax, rdx); locals_index_wide(rbx); - __ movl(laddress(rbx), rax); - __ movl(haddress(rbx), rdx); + __ movptr(laddress(rbx), rax); + NOT_LP64(__ movl(haddress(rbx), rdx)); __ tag_local(frame::TagCategory2, rbx); } @@ -817,7 +890,7 @@ transition(vtos, vtos); __ pop_ptr(rax, rdx); locals_index_wide(rbx); - __ movl(aaddress(rbx), rax); + __ movptr(aaddress(rbx), rax); __ tag_local(rdx, rbx); } @@ -841,8 +914,8 @@ // rdx: high(value) index_check(rcx, rbx); // prefer index in rbx, // rbx,: index - __ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax); - __ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx); + __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax); + NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx)); } @@ -872,21 +945,23 @@ Label is_null, ok_is_subtype, done; transition(vtos, vtos); // stack: ..., array, index, value - __ movl(rax, at_tos()); // Value + __ movptr(rax, at_tos()); // Value __ movl(rcx, at_tos_p1()); // Index - __ movl(rdx, at_tos_p2()); // Array + __ movptr(rdx, at_tos_p2()); // Array + + Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); index_check_without_pop(rdx, rcx); // kills rbx, // do array store check - check for NULL value first - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); // Move subklass into EBX - __ movl(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); // Move superklass into EAX - __ movl(rax, Address(rdx, oopDesc::klass_offset_in_bytes())); - __ movl(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes())); - // Compress array+index*4+12 into a single register. Frees ECX. - __ leal(rdx, Address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes())); + // Compress array+index*wordSize+12 into a single register. Frees ECX. + __ lea(rdx, element_address); // Generate subtype check. Blows ECX. Resets EDI to locals. // Superklass in EAX. Subklass in EBX. @@ -898,19 +973,24 @@ // Come here on success __ bind(ok_is_subtype); - __ movl(rax, at_rsp()); // Value - __ movl(Address(rdx, 0), rax); - __ store_check(rdx); - __ jmpb(done); + + // Get the value to store + __ movptr(rax, at_rsp()); + // and store it with appropriate barrier + do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); + + __ jmp(done); // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx] __ bind(is_null); __ profile_null_seen(rbx); - __ movl(Address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), rax); + + // Store NULL, (noreg means NULL to do_oop_store) + do_oop_store(_masm, element_address, noreg, _bs->kind(), true); // Pop stack arguments __ bind(done); - __ addl(rsp, 3 * Interpreter::stackElementSize()); + __ addptr(rsp, 3 * Interpreter::stackElementSize()); } @@ -950,8 +1030,8 @@ void TemplateTable::lstore(int n) { transition(ltos, vtos); - __ movl(laddress(n), rax); - __ movl(haddress(n), rdx); + __ movptr(laddress(n), rax); + NOT_LP64(__ movptr(haddress(n), rdx)); __ tag_local(frame::TagCategory2, n); } @@ -966,10 +1046,10 @@ void TemplateTable::dstore(int n) { transition(dtos, vtos); if (TaggedStackInterpreter) { - __ subl(rsp, 2 * wordSize); + __ subptr(rsp, 2 * wordSize); __ fstp_d(Address(rsp, 0)); - __ popl(rax); - __ popl(rdx); + __ pop(rax); + __ pop(rdx); __ movl(laddress(n), rax); __ movl(haddress(n), rdx); __ tag_local(frame::TagCategory2, n); @@ -982,20 +1062,20 @@ void TemplateTable::astore(int n) { transition(vtos, vtos); __ pop_ptr(rax, rdx); - __ movl(aaddress(n), rax); + __ movptr(aaddress(n), rax); __ tag_local(rdx, n); } void TemplateTable::pop() { transition(vtos, vtos); - __ addl(rsp, Interpreter::stackElementSize()); + __ addptr(rsp, Interpreter::stackElementSize()); } void TemplateTable::pop2() { transition(vtos, vtos); - __ addl(rsp, 2*Interpreter::stackElementSize()); + __ addptr(rsp, 2*Interpreter::stackElementSize()); } @@ -1102,14 +1182,14 @@ transition(itos, itos); switch (op) { case add : __ pop_i(rdx); __ addl (rax, rdx); break; - case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; + case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; case mul : __ pop_i(rdx); __ imull(rax, rdx); break; case _and : __ pop_i(rdx); __ andl (rax, rdx); break; case _or : __ pop_i(rdx); __ orl (rax, rdx); break; case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; - case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr. - case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr. - case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr. + case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr. + case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr. + case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr. default : ShouldNotReachHere(); } } @@ -1121,7 +1201,7 @@ switch (op) { case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break; case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx); - __ movl(rax, rbx); __ movl(rdx, rcx); break; + __ mov(rax, rbx); __ mov(rdx, rcx); break; case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break; case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break; case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break; @@ -1132,7 +1212,7 @@ void TemplateTable::idiv() { transition(itos, itos); - __ movl(rcx, rax); + __ mov(rcx, rax); __ pop_i(rax); // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If // they are not equal, one could do a normal division (no correction @@ -1144,52 +1224,52 @@ void TemplateTable::irem() { transition(itos, itos); - __ movl(rcx, rax); + __ mov(rcx, rax); __ pop_i(rax); // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If // they are not equal, one could do a normal division (no correction // needed), which may speed up this implementation for the common case. // (see also JVM spec., p.243 & p.271) __ corrected_idivl(rcx); - __ movl(rax, rdx); + __ mov(rax, rdx); } void TemplateTable::lmul() { transition(ltos, ltos); __ pop_l(rbx, rcx); - __ pushl(rcx); __ pushl(rbx); - __ pushl(rdx); __ pushl(rax); + __ push(rcx); __ push(rbx); + __ push(rdx); __ push(rax); __ lmul(2 * wordSize, 0); - __ addl(rsp, 4 * wordSize); // take off temporaries + __ addptr(rsp, 4 * wordSize); // take off temporaries } void TemplateTable::ldiv() { transition(ltos, ltos); __ pop_l(rbx, rcx); - __ pushl(rcx); __ pushl(rbx); - __ pushl(rdx); __ pushl(rax); + __ push(rcx); __ push(rbx); + __ push(rdx); __ push(rax); // check if y = 0 __ orl(rax, rdx); __ jump_cc(Assembler::zero, ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); - __ addl(rsp, 4 * wordSize); // take off temporaries + __ addptr(rsp, 4 * wordSize); // take off temporaries } void TemplateTable::lrem() { transition(ltos, ltos); __ pop_l(rbx, rcx); - __ pushl(rcx); __ pushl(rbx); - __ pushl(rdx); __ pushl(rax); + __ push(rcx); __ push(rbx); + __ push(rdx); __ push(rax); // check if y = 0 __ orl(rax, rdx); __ jump_cc(Assembler::zero, ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); - __ addl(rsp, 4 * wordSize); + __ addptr(rsp, 4 * wordSize); } @@ -1203,7 +1283,7 @@ void TemplateTable::lshr() { transition(itos, ltos); - __ movl(rcx, rax); // get shift count + __ mov(rcx, rax); // get shift count __ pop_l(rax, rdx); // get shift value __ lshr(rdx, rax, true); } @@ -1211,7 +1291,7 @@ void TemplateTable::lushr() { transition(itos, ltos); - __ movl(rcx, rax); // get shift count + __ mov(rcx, rax); // get shift count __ pop_l(rax, rdx); // get shift value __ lshr(rdx, rax); } @@ -1229,14 +1309,14 @@ default : ShouldNotReachHere(); } __ f2ieee(); - __ popl(rax); // pop float thing off + __ pop(rax); // pop float thing off } void TemplateTable::dop2(Operation op) { transition(dtos, dtos); __ pop_dtos_to_rsp(); // pop dtos into rsp - + switch (op) { case add: __ fadd_d (at_rsp()); break; case sub: __ fsubr_d(at_rsp()); break; @@ -1283,8 +1363,8 @@ } __ d2ieee(); // Pop double precision number from rsp. - __ popl(rax); - __ popl(rdx); + __ pop(rax); + __ pop(rdx); } @@ -1324,7 +1404,7 @@ transition(vtos, vtos); __ movl(rdx, at_bcp(4)); // get constant locals_index_wide(rbx); - __ bswap(rdx); // swap bytes & sign-extend constant + __ bswapl(rdx); // swap bytes & sign-extend constant __ sarl(rdx, 16); __ addl(iaddress(rbx), rdx); // Note: should probably use only one movl to get both @@ -1378,62 +1458,65 @@ #endif // ASSERT // Conversion - // (Note: use pushl(rcx)/popl(rcx) for 1/2-word stack-ptr manipulation) + // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation) switch (bytecode()) { case Bytecodes::_i2l: __ extend_sign(rdx, rax); break; case Bytecodes::_i2f: - __ pushl(rax); // store int on tos + __ push(rax); // store int on tos __ fild_s(at_rsp()); // load int to ST0 __ f2ieee(); // truncate to float size - __ popl(rcx); // adjust rsp + __ pop(rcx); // adjust rsp break; case Bytecodes::_i2d: - __ pushl(rax); // add one slot for d2ieee() - __ pushl(rax); // store int on tos + __ push(rax); // add one slot for d2ieee() + __ push(rax); // store int on tos __ fild_s(at_rsp()); // load int to ST0 __ d2ieee(); // truncate to double size - __ popl(rcx); // adjust rsp - __ popl(rcx); + __ pop(rcx); // adjust rsp + __ pop(rcx); break; case Bytecodes::_i2b: __ shll(rax, 24); // truncate upper 24 bits __ sarl(rax, 24); // and sign-extend byte + LP64_ONLY(__ movsbl(rax, rax)); break; case Bytecodes::_i2c: __ andl(rax, 0xFFFF); // truncate upper 16 bits + LP64_ONLY(__ movzwl(rax, rax)); break; case Bytecodes::_i2s: __ shll(rax, 16); // truncate upper 16 bits __ sarl(rax, 16); // and sign-extend short + LP64_ONLY(__ movswl(rax, rax)); break; case Bytecodes::_l2i: /* nothing to do */ break; case Bytecodes::_l2f: - __ pushl(rdx); // store long on tos - __ pushl(rax); + __ push(rdx); // store long on tos + __ push(rax); __ fild_d(at_rsp()); // load long to ST0 __ f2ieee(); // truncate to float size - __ popl(rcx); // adjust rsp - __ popl(rcx); + __ pop(rcx); // adjust rsp + __ pop(rcx); break; case Bytecodes::_l2d: - __ pushl(rdx); // store long on tos - __ pushl(rax); + __ push(rdx); // store long on tos + __ push(rax); __ fild_d(at_rsp()); // load long to ST0 __ d2ieee(); // truncate to double size - __ popl(rcx); // adjust rsp - __ popl(rcx); + __ pop(rcx); // adjust rsp + __ pop(rcx); break; case Bytecodes::_f2i: - __ pushl(rcx); // reserve space for argument + __ push(rcx); // reserve space for argument __ fstp_s(at_rsp()); // pass float argument on stack __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); break; case Bytecodes::_f2l: - __ pushl(rcx); // reserve space for argument + __ push(rcx); // reserve space for argument __ fstp_s(at_rsp()); // pass float argument on stack __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); break; @@ -1441,21 +1524,21 @@ /* nothing to do */ break; case Bytecodes::_d2i: - __ pushl(rcx); // reserve space for argument - __ pushl(rcx); + __ push(rcx); // reserve space for argument + __ push(rcx); __ fstp_d(at_rsp()); // pass double argument on stack __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2); break; case Bytecodes::_d2l: - __ pushl(rcx); // reserve space for argument - __ pushl(rcx); + __ push(rcx); // reserve space for argument + __ push(rcx); __ fstp_d(at_rsp()); // pass double argument on stack __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2); break; case Bytecodes::_d2f: - __ pushl(rcx); // reserve space for f2ieee() + __ push(rcx); // reserve space for f2ieee() __ f2ieee(); // truncate to float size - __ popl(rcx); // adjust rsp + __ pop(rcx); // adjust rsp break; default : ShouldNotReachHere(); @@ -1468,7 +1551,7 @@ // y = rdx:rax __ pop_l(rbx, rcx); // get x = rcx:rbx __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y) - __ movl(rax, rcx); + __ mov(rax, rcx); } @@ -1479,9 +1562,9 @@ } else { __ pop_dtos_to_rsp(); __ fld_d(at_rsp()); - __ popl(rdx); + __ pop(rdx); } - __ popl(rcx); + __ pop(rcx); __ fcmp2int(rax, unordered_result < 0); } @@ -1496,8 +1579,10 @@ // Load up EDX with the branch displacement __ movl(rdx, at_bcp(1)); - __ bswap(rdx); + __ bswapl(rdx); if (!is_wide) __ sarl(rdx, 16); + LP64_ONLY(__ movslq(rdx, rdx)); + // Handle all the JSR stuff here, then exit. // It's much shorter and cleaner than intermingling with the @@ -1507,10 +1592,10 @@ __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0)); // compute return address as bci in rax, - __ leal(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset()))); - __ subl(rax, Address(rcx, methodOopDesc::const_offset())); - // Adjust the bcp in ESI by the displacement in EDX - __ addl(rsi, rdx); + __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset()))); + __ subptr(rax, Address(rcx, methodOopDesc::const_offset())); + // Adjust the bcp in RSI by the displacement in EDX + __ addptr(rsi, rdx); // Push return address __ push_i(rax); // jsr returns vtos @@ -1520,8 +1605,8 @@ // Normal (non-jsr) branch handling - // Adjust the bcp in ESI by the displacement in EDX - __ addl(rsi, rdx); + // Adjust the bcp in RSI by the displacement in EDX + __ addptr(rsi, rdx); assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); Label backedge_counter_overflow; @@ -1538,9 +1623,9 @@ __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch - // increment counter + // increment counter __ movl(rax, Address(rcx, be_offset)); // load backedge counter - __ increment(rax, InvocationCounter::count_increment); // increment counter + __ incrementl(rax, InvocationCounter::count_increment); // increment counter __ movl(Address(rcx, be_offset), rax); // store counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter @@ -1562,13 +1647,13 @@ ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); __ jcc(Assembler::below, dispatch); - // When ProfileInterpreter is on, the backedge_count comes from the - // methodDataOop, which value does not get reset on the call to + // When ProfileInterpreter is on, the backedge_count comes from the + // methodDataOop, which value does not get reset on the call to // frequency_counter_overflow(). To avoid excessive calls to the overflow - // routine while the method is being compiled, add a second test to make + // routine while the method is being compiled, add a second test to make // sure the overflow function is called only once every overflow_frequency. const int overflow_frequency = 1024; - __ andl(rbx, overflow_frequency-1); + __ andptr(rbx, overflow_frequency-1); __ jcc(Assembler::zero, backedge_counter_overflow); } @@ -1599,14 +1684,14 @@ __ bind(profile_method); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi); __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode - __ movl(rcx, Address(rbp, method_offset)); - __ movl(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); - __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); + __ movptr(rcx, Address(rbp, method_offset)); + __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); __ test_method_data_pointer(rcx, dispatch); // offset non-null mdp by MDO::data_offset() + IR::profile_method() - __ addl(rcx, in_bytes(methodDataOopDesc::data_offset())); - __ addl(rcx, rax); - __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); + __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset())); + __ addptr(rcx, rax); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); __ jmp(dispatch); } @@ -1614,8 +1699,8 @@ // invocation counter overflow __ bind(backedge_counter_overflow); - __ negl(rdx); - __ addl(rdx, rsi); // branch bcp + __ negptr(rdx); + __ addptr(rdx, rsi); // branch bcp call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx); __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode @@ -1624,30 +1709,30 @@ // rdx: scratch // rdi: locals pointer // rsi: bcp - __ testl(rax, rax); // test result + __ testptr(rax, rax); // test result __ jcc(Assembler::zero, dispatch); // no osr if null // nmethod may have been invalidated (VM may block upon call_VM return) __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); __ cmpl(rcx, InvalidOSREntryBci); __ jcc(Assembler::equal, dispatch); - - // We have the address of an on stack replacement routine in rax, + + // We have the address of an on stack replacement routine in rax, // We need to prepare to execute the OSR method. First we must // migrate the locals and monitors off of the stack. - __ movl(rsi, rax); // save the nmethod + __ mov(rbx, rax); // save the nmethod const Register thread = rcx; __ get_thread(thread); call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); // rax, is OSR buffer, move it to expected parameter location - __ movl(rcx, rax); + __ mov(rcx, rax); // pop the interpreter frame - __ movl(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp + __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp __ leave(); // remove frame anchor - __ popl(rdi); // get return address - __ movl(rsp, rdx); // set sp to sender sp + __ pop(rdi); // get return address + __ mov(rsp, rdx); // set sp to sender sp Label skip; @@ -1666,32 +1751,32 @@ __ jcc(Assembler::notEqual, chkint); // yes adjust to the specialized call stub return. - assert(StubRoutines::i486::get_call_stub_compiled_return() != NULL, "must be set"); - __ lea(rdi, ExternalAddress(StubRoutines::i486::get_call_stub_compiled_return())); + assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set"); + __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return())); __ jmp(skip); __ bind(chkint); // Are we returning to the interpreter? Look for sentinel - __ cmpl(Address(rdi, -8), Interpreter::return_sentinel); + __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel); __ jcc(Assembler::notEqual, skip); // Adjust to compiled return back to interpreter - __ movl(rdi, Address(rdi, -4)); + __ movptr(rdi, Address(rdi, -wordSize)); __ bind(skip); // Align stack pointer for compiled code (note that caller is // responsible for undoing this fixup by remembering the old SP // in an rbp,-relative location) - __ andl(rsp, -(StackAlignmentInBytes)); + __ andptr(rsp, -(StackAlignmentInBytes)); // push the (possibly adjusted) return address - __ pushl(rdi); + __ push(rdi); // and begin the OSR nmethod - __ jmp(Address(rsi, nmethod::osr_entry_point_offset())); + __ jmp(Address(rbx, nmethod::osr_entry_point_offset())); } } } @@ -1726,7 +1811,7 @@ transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) Label not_taken; - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); @@ -1739,7 +1824,7 @@ // assume branch is more often taken than not (loops use backward branches) Label not_taken; __ pop_ptr(rdx); - __ cmpl(rdx, rax); + __ cmpptr(rdx, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); @@ -1750,12 +1835,12 @@ void TemplateTable::ret() { transition(vtos, vtos); locals_index(rbx); - __ movl(rbx, iaddress(rbx)); // get return bci, compute return bcp + __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); - __ movl(rsi, Address(rax, methodOopDesc::const_offset())); - __ leal(rsi, Address(rsi, rbx, Address::times_1, - constMethodOopDesc::codes_offset())); + __ movptr(rsi, Address(rax, methodOopDesc::const_offset())); + __ lea(rsi, Address(rsi, rbx, Address::times_1, + constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } @@ -1763,11 +1848,11 @@ void TemplateTable::wide_ret() { transition(vtos, vtos); locals_index_wide(rbx); - __ movl(rbx, iaddress(rbx)); // get return bci, compute return bcp + __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); - __ movl(rsi, Address(rax, methodOopDesc::const_offset())); - __ leal(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset())); + __ movptr(rsi, Address(rax, methodOopDesc::const_offset())); + __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } @@ -1776,13 +1861,13 @@ Label default_case, continue_execution; transition(itos, vtos); // align rsi - __ leal(rbx, at_bcp(wordSize)); - __ andl(rbx, -wordSize); + __ lea(rbx, at_bcp(wordSize)); + __ andptr(rbx, -wordSize); // load lo & hi __ movl(rcx, Address(rbx, 1 * wordSize)); __ movl(rdx, Address(rbx, 2 * wordSize)); - __ bswap(rcx); - __ bswap(rdx); + __ bswapl(rcx); + __ bswapl(rdx); // check against lo & hi __ cmpl(rax, rcx); __ jccb(Assembler::less, default_case); @@ -1790,13 +1875,13 @@ __ jccb(Assembler::greater, default_case); // lookup dispatch offset __ subl(rax, rcx); - __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * wordSize)); + __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); __ profile_switch_case(rax, rbx, rcx); // continue execution __ bind(continue_execution); - __ bswap(rdx); + __ bswapl(rdx); __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1)); - __ addl(rsi, rdx); + __ addptr(rsi, rdx); __ dispatch_only(vtos); // handle default __ bind(default_case); @@ -1814,22 +1899,22 @@ void TemplateTable::fast_linearswitch() { transition(itos, vtos); - Label loop_entry, loop, found, continue_execution; - // bswap rax, so we can avoid bswapping the table entries - __ bswap(rax); + Label loop_entry, loop, found, continue_execution; + // bswapl rax, so we can avoid bswapping the table entries + __ bswapl(rax); // align rsi - __ leal(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below) - __ andl(rbx, -wordSize); + __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below) + __ andptr(rbx, -wordSize); // set counter - __ movl(rcx, Address(rbx, wordSize)); - __ bswap(rcx); + __ movl(rcx, Address(rbx, wordSize)); + __ bswapl(rcx); __ jmpb(loop_entry); // table search __ bind(loop); __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize)); __ jccb(Assembler::equal, found); __ bind(loop_entry); - __ decrement(rcx); + __ decrementl(rcx); __ jcc(Assembler::greaterEqual, loop); // default case __ profile_switch_default(rax); @@ -1840,10 +1925,10 @@ __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize)); __ profile_switch_case(rcx, rax, rbx); // continue execution - __ bind(continue_execution); - __ bswap(rdx); + __ bind(continue_execution); + __ bswapl(rdx); __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1)); - __ addl(rsi, rdx); + __ addptr(rsi, rdx); __ dispatch_only(vtos); } @@ -1885,13 +1970,13 @@ // setup array __ save_bcp(); - __ leal(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below) - __ andl(array, -wordSize); + __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below) + __ andptr(array, -wordSize); // initialize i & j __ xorl(i, i); // i = 0; - __ movl(j, Address(array, -wordSize)); // j = length(array); - // Convert j into native byteordering - __ bswap(j); + __ movl(j, Address(array, -wordSize)); // j = length(array); + // Convert j into native byteordering + __ bswapl(j); // and start Label entry; __ jmp(entry); @@ -1909,19 +1994,19 @@ // } // Convert array[h].match to native byte-ordering before compare __ movl(temp, Address(array, h, Address::times_8, 0*wordSize)); - __ bswap(temp); + __ bswapl(temp); __ cmpl(key, temp); if (VM_Version::supports_cmov()) { __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match()) __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match()) } else { Label set_i, end_of_if; - __ jccb(Assembler::greaterEqual, set_i); // { - __ movl(j, h); // j = h; - __ jmp(end_of_if); // } - __ bind(set_i); // else { - __ movl(i, h); // i = h; - __ bind(end_of_if); // } + __ jccb(Assembler::greaterEqual, set_i); // { + __ mov(j, h); // j = h; + __ jmp(end_of_if); // } + __ bind(set_i); // else { + __ mov(i, h); // i = h; + __ bind(end_of_if); // } } // while (i+1 < j) __ bind(entry); @@ -1934,30 +2019,32 @@ Label default_case; // Convert array[i].match to native byte-ordering before compare __ movl(temp, Address(array, i, Address::times_8, 0*wordSize)); - __ bswap(temp); + __ bswapl(temp); __ cmpl(key, temp); __ jcc(Assembler::notEqual, default_case); // entry found -> j = offset __ movl(j , Address(array, i, Address::times_8, 1*wordSize)); __ profile_switch_case(i, key, array); - __ bswap(j); + __ bswapl(j); + LP64_ONLY(__ movslq(j, j)); __ restore_bcp(); __ restore_locals(); // restore rdi __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1)); - - __ addl(rsi, j); + + __ addptr(rsi, j); __ dispatch_only(vtos); // default case -> j = default offset __ bind(default_case); __ profile_switch_default(i); __ movl(j, Address(array, -2*wordSize)); - __ bswap(j); + __ bswapl(j); + LP64_ONLY(__ movslq(j, j)); __ restore_bcp(); __ restore_locals(); // restore rdi __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1)); - __ addl(rsi, j); + __ addptr(rsi, j); __ dispatch_only(vtos); } @@ -1968,8 +2055,8 @@ if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { assert(state == vtos, "only valid state"); - __ movl(rax, aaddress(0)); - __ movl(rdi, Address(rax, oopDesc::klass_offset_in_bytes())); + __ movptr(rax, aaddress(0)); + __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes())); __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; @@ -1991,9 +2078,9 @@ // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of // memory barrier (i.e., it's not sufficient that the interpreter does not // reorder volatile references, the hardware also must not reorder them). -// +// // According to the new Java Memory Model (JMM): -// (1) All volatiles are serialized wrt to each other. +// (1) All volatiles are serialized wrt to each other. // ALSO reads & writes act as aquire & release, so: // (2) A read cannot let unrelated NON-volatile memory refs that happen after // the read float up to before the read. It's OK for non-volatile memory refs @@ -2010,10 +2097,10 @@ // requirement (1) but miss the volatile-store-volatile-load case. This final // case is placed after volatile-stores although it could just as well go // before volatile-loads. -void TemplateTable::volatile_barrier( ) { +void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) { // Helper function to insert a is-volatile test and memory barrier - if( !os::is_MP() ) return; // Not needed on single CPU - __ membar(); + if( !os::is_MP() ) return; // Not needed on single CPU + __ membar(order_constraint); } void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) { @@ -2026,10 +2113,13 @@ const int shift_count = (1 + byte_no)*BitsPerByte; Label resolved; __ get_cache_and_index_at_bcp(Rcache, index, 1); - __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); + __ movl(temp, Address(Rcache, + index, + Address::times_ptr, + constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ shrl(temp, shift_count); - // have we resolved this bytecode? - __ andl(temp, 0xFF); + // have we resolved this bytecode? + __ andptr(temp, 0xFF); __ cmpl(temp, (int)bytecode()); __ jcc(Assembler::equal, resolved); @@ -2046,7 +2136,7 @@ case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; default : ShouldNotReachHere(); break; } - __ movl(temp, (int)bytecode()); + __ movl(temp, (int)bytecode()); __ call_VM(noreg, entry, temp); // Update registers with resolved info __ get_cache_and_index_at_bcp(Rcache, index, 1); @@ -2065,16 +2155,16 @@ ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); // Field offset - __ movl(off, Address(cache, index, Address::times_4, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()))); - // Flags - __ movl(flags, Address(cache, index, Address::times_4, + __ movptr(off, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()))); + // Flags + __ movl(flags, Address(cache, index, Address::times_ptr, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); // klass overwrite register if (is_static) { - __ movl(obj, Address(cache, index, Address::times_4, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); + __ movptr(obj, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); } } @@ -2107,12 +2197,11 @@ resolve_cache_and_index(byte_no, cache, index); - assert(wordSize == 4, "adjust code below"); - __ movl(method, Address(cache, index, Address::times_4, method_offset)); + __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); if (itable_index != noreg) { - __ movl(itable_index, Address(cache, index, Address::times_4, index_offset)); + __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); } - __ movl(flags , Address(cache, index, Address::times_4, flags_offset )); + __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset )); } @@ -2132,11 +2221,11 @@ __ jcc(Assembler::zero, L1); // cache entry pointer - __ addl(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); + __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); __ shll(index, LogBytesPerWord); - __ addl(cache, index); + __ addptr(cache, index); if (is_static) { - __ movl(rax, 0); // NULL object reference + __ xorptr(rax, rax); // NULL object reference } else { __ pop(atos); // Get the object __ verify_oop(rax); @@ -2148,7 +2237,7 @@ rax, cache); __ get_cache_and_index_at_bcp(cache, index, 1); __ bind(L1); - } + } } void TemplateTable::pop_and_check_object(Register r) { @@ -2180,10 +2269,10 @@ __ shrl(flags, ConstantPoolCacheEntry::tosBits); assert(btos == 0, "change code, btos != 0"); // btos - __ andl(flags, 0x0f); + __ andptr(flags, 0x0f); __ jcc(Assembler::notZero, notByte); - __ load_signed_byte(rax, lo ); + __ load_signed_byte(rax, lo ); __ push(btos); // Rewrite bytecode to be faster if (!is_static) { @@ -2221,7 +2310,7 @@ __ cmpl(flags, ctos ); __ jcc(Assembler::notEqual, notChar); - __ load_unsigned_word(rax, lo ); + __ load_unsigned_word(rax, lo ); __ push(ctos); if (!is_static) { patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx); @@ -2247,11 +2336,11 @@ // Generate code as if volatile. There just aren't enough registers to // save that information and this code is faster than the test. - __ fild_d(lo); // Must load atomically - __ subl(rsp,2*wordSize); // Make space for store + __ fild_d(lo); // Must load atomically + __ subptr(rsp,2*wordSize); // Make space for store __ fistp_d(Address(rsp,0)); - __ popl(rax); - __ popl(rdx); + __ pop(rax); + __ pop(rdx); __ push(ltos); // Don't rewrite to _fast_lgetfield for potential volatile case. @@ -2282,7 +2371,7 @@ __ jmpb(Done); __ bind(notDouble); - + __ stop("Bad state"); __ bind(Done); @@ -2322,16 +2411,16 @@ if (is_static) { // Life is simple. Null out the object pointer. - __ xorl(rbx, rbx); + __ xorptr(rbx, rbx); } else { // Life is harder. The stack holds the value on top, followed by the object. // We don't know the size of the value, though; it could be one or two words // depending on its type. As a result, we must find the type to determine where // the object is. Label two_word, valsize_known; - __ movl(rcx, Address(rax, rdx, Address::times_4, in_bytes(cp_base_offset + + __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); - __ movl(rbx, rsp); + __ mov(rbx, rsp); __ shrl(rcx, ConstantPoolCacheEntry::tosBits); // Make sure we don't need to mask rcx for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); @@ -2339,22 +2428,22 @@ __ jccb(Assembler::equal, two_word); __ cmpl(rcx, dtos); __ jccb(Assembler::equal, two_word); - __ addl(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos) + __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos) __ jmpb(valsize_known); __ bind(two_word); - __ addl(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue - + __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue + __ bind(valsize_known); // setup object pointer - __ movl(rbx, Address(rbx, 0)); + __ movptr(rbx, Address(rbx, 0)); } // cache entry pointer - __ addl(rax, in_bytes(cp_base_offset)); + __ addptr(rax, in_bytes(cp_base_offset)); __ shll(rdx, LogBytesPerWord); - __ addl(rax, rdx); + __ addptr(rax, rdx); // object (tos) - __ movl(rcx, rsp); + __ mov(rcx, rsp); // rbx,: object pointer set up above (NULL if static) // rax,: cache entry pointer // rcx: jvalue object on the stack @@ -2420,7 +2509,7 @@ patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx); } __ jmp(Done); - + __ bind(notInt); // atos __ cmpl(flags, atos ); @@ -2429,11 +2518,12 @@ __ pop(atos); if (!is_static) pop_and_check_object(obj); - __ movl(lo, rax ); - __ store_check(obj, lo); // Need to mark card + do_oop_store(_masm, lo, rax, _bs->kind(), false); + if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx); } + __ jmp(Done); __ bind(notObj); @@ -2473,14 +2563,16 @@ __ pop(ltos); // overwrites rdx, do this after testing volatile. if (!is_static) pop_and_check_object(obj); - + // Replace with real volatile test - __ pushl(rdx); - __ pushl(rax); // Must update atomically with FIST - __ fild_d(Address(rsp,0)); // So load into FPU register - __ fistp_d(lo); // and put into memory atomically - __ addl(rsp,2*wordSize); - volatile_barrier(); + __ push(rdx); + __ push(rax); // Must update atomically with FIST + __ fild_d(Address(rsp,0)); // So load into FPU register + __ fistp_d(lo); // and put into memory atomically + __ addptr(rsp, 2*wordSize); + // volatile_barrier(); + volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | + Assembler::StoreStore)); // Don't rewrite volatile version __ jmp(notVolatile); @@ -2488,8 +2580,8 @@ __ pop(ltos); // overwrites rdx if (!is_static) pop_and_check_object(obj); - __ movl(hi, rdx); - __ movl(lo, rax); + NOT_LP64(__ movptr(hi, rdx)); + __ movptr(lo, rax); if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx); } @@ -2522,7 +2614,7 @@ __ jmp(Done); __ bind(notDouble); - + __ stop("Bad state"); __ bind(Done); @@ -2530,7 +2622,8 @@ // Check for volatile store __ testl(rdx, rdx); __ jcc(Assembler::zero, notVolatile); - volatile_barrier( ); + volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | + Assembler::StoreStore)); __ bind(notVolatile); } @@ -2555,10 +2648,10 @@ __ pop_ptr(rbx); // copy the object pointer from tos __ verify_oop(rbx); __ push_ptr(rbx); // put the object pointer back on tos - __ subl(rsp, sizeof(jvalue)); // add space for a jvalue object - __ movl(rcx, rsp); + __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object + __ mov(rcx, rsp); __ push_ptr(rbx); // save object pointer so we can steal rbx, - __ movl(rbx, 0); + __ xorptr(rbx, rbx); const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize); const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize); switch (bytecode()) { // load values into the jvalue object @@ -2566,21 +2659,28 @@ case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break; case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break; case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break; - case Bytecodes::_fast_lputfield: __ movl(hi_value, rdx); __ movl(lo_value, rax); break; + case Bytecodes::_fast_lputfield: + NOT_LP64(__ movptr(hi_value, rdx)); + __ movptr(lo_value, rax); + break; + // need to call fld_s() after fstp_s() to restore the value for below case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break; + // need to call fld_d() after fstp_d() to restore the value for below case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break; + // since rcx is not an object we don't call store_check() here - case Bytecodes::_fast_aputfield: __ movl(lo_value, rax); break; + case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break; + default: ShouldNotReachHere(); } __ pop_ptr(rbx); // restore copy of object pointer // Save rax, and sometimes rdx because call_VM() will clobber them, // then use them for JVM/DI purposes - __ pushl(rax); - if (bytecode() == Bytecodes::_fast_lputfield) __ pushl(rdx); + __ push(rax); + if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx); // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(rax, rdx, 1); __ verify_oop(rbx); @@ -2588,9 +2688,9 @@ // rax,: cache entry pointer // rcx: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx); - if (bytecode() == Bytecodes::_fast_lputfield) __ popl(rdx); // restore high value - __ popl(rax); // restore lower value - __ addl(rsp, sizeof(jvalue)); // release jvalue object space + if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value + __ pop(rax); // restore lower value + __ addptr(rsp, sizeof(jvalue)); // release jvalue object space __ bind(L2); } } @@ -2606,12 +2706,12 @@ __ get_cache_and_index_at_bcp(rcx, rbx, 1); // test for volatile with rdx but rdx is tos register for lputfield. - if (bytecode() == Bytecodes::_fast_lputfield) __ pushl(rdx); - __ movl(rdx, Address(rcx, rbx, Address::times_4, in_bytes(base + + if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx); + __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::flags_offset()))); // replace index with field offset from cache entry - __ movl(rbx, Address(rcx, rbx, Address::times_4, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); + __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO). // volatile_barrier( ); @@ -2623,7 +2723,7 @@ __ testl(rdx, rdx); __ jcc(Assembler::zero, notVolatile); - if (bytecode() == Bytecodes::_fast_lputfield) __ popl(rdx); + if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // Get object from stack pop_and_check_object(rcx); @@ -2638,22 +2738,30 @@ case Bytecodes::_fast_sputfield: // fall through case Bytecodes::_fast_cputfield: __ movw(lo, rax); break; case Bytecodes::_fast_iputfield: __ movl(lo, rax); break; - case Bytecodes::_fast_lputfield: __ movl(hi, rdx); __ movl(lo, rax); break; + case Bytecodes::_fast_lputfield: + NOT_LP64(__ movptr(hi, rdx)); + __ movptr(lo, rax); + break; case Bytecodes::_fast_fputfield: __ fstp_s(lo); break; case Bytecodes::_fast_dputfield: __ fstp_d(lo); break; - case Bytecodes::_fast_aputfield: __ movl(lo, rax); __ store_check(rcx, lo); break; + case Bytecodes::_fast_aputfield: { + do_oop_store(_masm, lo, rax, _bs->kind(), false); + break; + } default: ShouldNotReachHere(); } Label done; - volatile_barrier( ); - __ jmpb(done); + volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | + Assembler::StoreStore)); + // Barriers are so large that short branch doesn't reach! + __ jmp(done); // Same code as above, but don't need rdx to test for volatile. __ bind(notVolatile); - if (bytecode() == Bytecodes::_fast_lputfield) __ popl(rdx); + if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // Get object from stack pop_and_check_object(rcx); @@ -2664,10 +2772,16 @@ case Bytecodes::_fast_sputfield: // fall through case Bytecodes::_fast_cputfield: __ movw(lo, rax); break; case Bytecodes::_fast_iputfield: __ movl(lo, rax); break; - case Bytecodes::_fast_lputfield: __ movl(hi, rdx); __ movl(lo, rax); break; + case Bytecodes::_fast_lputfield: + NOT_LP64(__ movptr(hi, rdx)); + __ movptr(lo, rax); + break; case Bytecodes::_fast_fputfield: __ fstp_s(lo); break; case Bytecodes::_fast_dputfield: __ fstp_d(lo); break; - case Bytecodes::_fast_aputfield: __ movl(lo, rax); __ store_check(rcx, lo); break; + case Bytecodes::_fast_aputfield: { + do_oop_store(_masm, lo, rax, _bs->kind(), false); + break; + } default: ShouldNotReachHere(); } @@ -2700,7 +2814,10 @@ // access constant pool cache __ get_cache_and_index_at_bcp(rcx, rbx, 1); // replace index with field offset from cache entry - __ movl(rbx, Address(rcx, rbx, Address::times_4, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); + __ movptr(rbx, Address(rcx, + rbx, + Address::times_ptr, + in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); // rax,: object @@ -2712,14 +2829,14 @@ // access field switch (bytecode()) { - case Bytecodes::_fast_bgetfield: __ movsxb(rax, lo ); break; + case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break; case Bytecodes::_fast_sgetfield: __ load_signed_word(rax, lo ); break; case Bytecodes::_fast_cgetfield: __ load_unsigned_word(rax, lo ); break; case Bytecodes::_fast_igetfield: __ movl(rax, lo); break; case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break; case Bytecodes::_fast_fgetfield: __ fld_s(lo); break; case Bytecodes::_fast_dgetfield: __ fld_d(lo); break; - case Bytecodes::_fast_agetfield: __ movl(rax, lo); __ verify_oop(rax); break; + case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break; default: ShouldNotReachHere(); } @@ -2731,11 +2848,14 @@ void TemplateTable::fast_xaccess(TosState state) { transition(vtos, state); // get receiver - __ movl(rax, aaddress(0)); + __ movptr(rax, aaddress(0)); debug_only(__ verify_local_tag(frame::TagReference, 0)); // access constant pool cache __ get_cache_and_index_at_bcp(rcx, rdx, 2); - __ movl(rbx, Address(rcx, rdx, Address::times_4, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); + __ movptr(rbx, Address(rcx, + rdx, + Address::times_ptr, + in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()))); // make sure exception is reported in correct bcp range (getfield is next instruction) __ increment(rsi); __ null_check(rax); @@ -2743,7 +2863,7 @@ if (state == itos) { __ movl(rax, lo); } else if (state == atos) { - __ movl(rax, lo); + __ movptr(rax, lo); __ verify_oop(rax); } else if (state == ftos) { __ fld_s(lo); @@ -2758,7 +2878,7 @@ //---------------------------------------------------------------------------------------------------- // Calls -void TemplateTable::count_calls(Register method, Register temp) { +void TemplateTable::count_calls(Register method, Register temp) { // implemented elsewhere ShouldNotReachHere(); } @@ -2774,7 +2894,7 @@ const bool save_flags = is_invokeinterface || is_invokevirtual; // setup registers & access constant pool cache const Register recv = rcx; - const Register flags = rdx; + const Register flags = rdx; assert_different_registers(method, index, recv, flags); // save 'interpreter return address' @@ -2787,7 +2907,7 @@ __ movl(recv, flags); __ andl(recv, 0xFF); // recv count is 0 based? - __ movl(recv, Address(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1))); + __ movptr(recv, Address(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1))); __ verify_oop(recv); } @@ -2797,7 +2917,7 @@ } if (save_flags) { - __ movl(rsi, flags); + __ mov(rsi, flags); } // compute return type @@ -2805,20 +2925,19 @@ // Make sure we don't need to mask flags for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); // load return address - { const int table = - is_invokeinterface - ? (int)Interpreter::return_5_addrs_by_index_table() - : (int)Interpreter::return_3_addrs_by_index_table(); - __ movl(flags, Address(noreg, flags, Address::times_4, table)); + { + ExternalAddress table(is_invokeinterface ? (address)Interpreter::return_5_addrs_by_index_table() : + (address)Interpreter::return_3_addrs_by_index_table()); + __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); } // push return address - __ pushl(flags); + __ push(flags); // Restore flag value from the constant pool cache, and restore rsi // for later null checks. rsi is the bytecode pointer if (save_flags) { - __ movl(flags, rsi); + __ mov(flags, rsi); __ restore_bcp(); } } @@ -2855,16 +2974,16 @@ // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); // Keep recv in rcx for callee expects it there - __ movl(rax, Address(recv, oopDesc::klass_offset_in_bytes())); + __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes())); __ verify_oop(rax); // profile this call __ profile_virtual_call(rax, rdi, rdx); // get target methodOop & entry point - const int base = instanceKlass::vtable_start_offset() * wordSize; + const int base = instanceKlass::vtable_start_offset() * wordSize; assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below"); - __ movl(method, Address(rax, index, Address::times_4, base + vtableEntry::method_offset_in_bytes())); + __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes())); __ jump_from_interpreted(method, rdx); } @@ -2874,8 +2993,8 @@ prepare_invoke(rbx, noreg, byte_no, bytecode()); // rbx,: index - // rcx: receiver - // rdx: flags + // rcx: receiver + // rdx: flags invokevirtual_helper(rbx, rcx, rdx); } @@ -2910,10 +3029,10 @@ void TemplateTable::invokeinterface(int byte_no) { transition(vtos, vtos); prepare_invoke(rax, rbx, byte_no, bytecode()); - + // rax,: Interface // rbx,: index - // rcx: receiver + // rcx: receiver // rdx: flags // Special case of invokeinterface called for virtual method of @@ -2930,42 +3049,42 @@ // Get receiver klass into rdx - also a null check __ restore_locals(); // restore rdi - __ movl(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); + __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); __ verify_oop(rdx); // profile this call __ profile_virtual_call(rdx, rsi, rdi); - __ movl(rdi, rdx); // Save klassOop in rdi + __ mov(rdi, rdx); // Save klassOop in rdi // Compute start of first itableOffsetEntry (which is at the end of the vtable) - const int base = instanceKlass::vtable_start_offset() * wordSize; - assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below"); + const int base = instanceKlass::vtable_start_offset() * wordSize; + assert(vtableEntry::size() * wordSize == (1 << (int)Address::times_ptr), "adjust the scaling in the code below"); __ movl(rsi, Address(rdx, instanceKlass::vtable_length_offset() * wordSize)); // Get length of vtable - __ leal(rdx, Address(rdx, rsi, Address::times_4, base)); + __ lea(rdx, Address(rdx, rsi, Address::times_4, base)); if (HeapWordsPerLong > 1) { // Round up to align_object_offset boundary __ round_to(rdx, BytesPerLong); } Label entry, search, interface_ok; - - __ jmpb(entry); + + __ jmpb(entry); __ bind(search); - __ addl(rdx, itableOffsetEntry::size() * wordSize); - + __ addptr(rdx, itableOffsetEntry::size() * wordSize); + __ bind(entry); // Check that the entry is non-null. A null entry means that the receiver // class doesn't implement the interface, and wasn't the same as the // receiver class checked when the interface was resolved. - __ pushl(rdx); - __ movl(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); - __ testl(rdx, rdx); + __ push(rdx); + __ movptr(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); + __ testptr(rdx, rdx); __ jcc(Assembler::notZero, interface_ok); // throw exception - __ popl(rdx); // pop saved register first. - __ popl(rbx); // pop return address (pushed by prepare_invoke) + __ pop(rdx); // pop saved register first. + __ pop(rbx); // pop return address (pushed by prepare_invoke) __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -2974,15 +3093,15 @@ __ should_not_reach_here(); __ bind(interface_ok); - __ popl(rdx); + __ pop(rdx); - __ cmpl(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); + __ cmpptr(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); __ jcc(Assembler::notEqual, search); - - __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); - __ addl(rdx, rdi); // Add offset to klassOop - assert(itableMethodEntry::size() * wordSize == 4, "adjust the scaling in the code below"); - __ movl(rbx, Address(rdx, rbx, Address::times_4)); + + __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); + __ addptr(rdx, rdi); // Add offset to klassOop + assert(itableMethodEntry::size() * wordSize == (1 << (int)Address::times_ptr), "adjust the scaling in the code below"); + __ movptr(rbx, Address(rdx, rbx, Address::times_ptr)); // rbx,: methodOop to call // rcx: receiver // Check for abstract method error @@ -2990,14 +3109,14 @@ // interpreter entry point and a conditional jump to it in case of a null // method. { Label L; - __ testl(rbx, rbx); + __ testptr(rbx, rbx); __ jcc(Assembler::notZero, L); // throw exception - // note: must restore interpreter registers to canonical - // state for exception handling to work correctly! - __ popl(rbx); // pop return address (pushed by prepare_invoke) - __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) - __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) + // note: must restore interpreter registers to canonical + // state for exception handling to work correctly! + __ pop(rbx); // pop return address (pushed by prepare_invoke) + __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) + __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); // the call_VM checks for exception, so we should never return here. __ should_not_reach_here(); @@ -3022,14 +3141,12 @@ Label initialize_object; // including clearing the fields Label allocate_shared; - ExternalAddress heap_top((address)Universe::heap()->top_addr()); - __ get_cpool_and_tags(rcx, rax); // get instanceKlass - __ movl(rcx, Address(rcx, rdx, Address::times_4, sizeof(constantPoolOopDesc))); - __ pushl(rcx); // save the contexts of klass for initializing the header + __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc))); + __ push(rcx); // save the contexts of klass for initializing the header - // make sure the class we're about to instantiate has been resolved. + // make sure the class we're about to instantiate has been resolved. // Note: slow_case does a pop of stack, which is why we loaded class/pushed above const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class); @@ -3046,7 +3163,7 @@ __ testl(rdx, Klass::_lh_instance_slow_path_bit); __ jcc(Assembler::notZero, slow_case); - // + // // Allocate the instance // 1) Try to allocate in the TLAB // 2) if fail and the object is large allocate in the shared Eden @@ -3060,11 +3177,11 @@ const Register thread = rcx; __ get_thread(thread); - __ movl(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); - __ leal(rbx, Address(rax, rdx, Address::times_1)); - __ cmpl(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); + __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); + __ lea(rbx, Address(rax, rdx, Address::times_1)); + __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); - __ movl(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); + __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); if (ZeroTLAB) { // the fields have been already cleared __ jmp(initialize_header); @@ -3080,11 +3197,13 @@ if (allow_shared_alloc) { __ bind(allocate_shared); + ExternalAddress heap_top((address)Universe::heap()->top_addr()); + Label retry; __ bind(retry); - __ mov32(rax, heap_top); - __ leal(rbx, Address(rax, rdx, Address::times_1)); - __ cmp32(rbx, ExternalAddress((address)Universe::heap()->end_addr())); + __ movptr(rax, heap_top); + __ lea(rbx, Address(rax, rdx, Address::times_1)); + __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr())); __ jcc(Assembler::above, slow_case); // Compare rax, with the top addr, and if still equal, store the new @@ -3094,10 +3213,9 @@ // rax,: object begin // rbx,: object end // rdx: instance size in bytes - if (os::is_MP()) __ lock(); - __ cmpxchgptr(rbx, heap_top); + __ locked_cmpxchgptr(rbx, heap_top); - // if someone beat us on the allocation, try again, otherwise continue + // if someone beat us on the allocation, try again, otherwise continue __ jcc(Assembler::notEqual, retry); } @@ -3127,8 +3245,8 @@ // initialize remaining object fields: rdx was a multiple of 8 { Label loop; __ bind(loop); - __ movl(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx); - __ movl(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx); + __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx); + NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx)); __ decrement(rdx); __ jcc(Assembler::notZero, loop); } @@ -3136,15 +3254,15 @@ // initialize object header only. __ bind(initialize_header); if (UseBiasedLocking) { - __ popl(rcx); // get saved klass back in the register. - __ movl(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - __ movl(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx); + __ pop(rcx); // get saved klass back in the register. + __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx); } else { - __ movl(Address(rax, oopDesc::mark_offset_in_bytes ()), - (int)markOopDesc::prototype()); // header - __ popl(rcx); // get saved klass back in the register. + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), + (int32_t)markOopDesc::prototype()); // header + __ pop(rcx); // get saved klass back in the register. } - __ movl(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass + __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass { SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); @@ -3160,7 +3278,7 @@ // slow case __ bind(slow_case); - __ popl(rcx); // restore stack pointer to what it was when we came in. + __ pop(rcx); // restore stack pointer to what it was when we came in. __ get_constant_pool(rax); __ get_unsigned_2_byte_index_at_bcp(rdx, 1); call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx); @@ -3197,7 +3315,7 @@ void TemplateTable::checkcast() { transition(atos, atos); Label done, is_null, ok_is_subtype, quicked, resolved; - __ testl(rax, rax); // Object is in EAX + __ testptr(rax, rax); // Object is in EAX __ jcc(Assembler::zero, is_null); // Get cpool & tags index @@ -3214,24 +3332,24 @@ // Get superklass in EAX and subklass in EBX __ bind(quicked); - __ movl(rdx, rax); // Save object in EDX; EAX needed for subtype check - __ movl(rax, Address(rcx, rbx, Address::times_4, sizeof(constantPoolOopDesc))); + __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check + __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc))); __ bind(resolved); - __ movl(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); // Generate subtype check. Blows ECX. Resets EDI. Object in EDX. // Superklass in EAX. Subklass in EBX. __ gen_subtype_check( rbx, ok_is_subtype ); // Come here on failure - __ pushl(rdx); + __ push(rdx); // object is at TOS __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); // Come here on success __ bind(ok_is_subtype); - __ movl(rax,rdx); // Restore object in EDX + __ mov(rax,rdx); // Restore object in EDX // Collect counts on whether this check-cast sees NULLs a lot or not. if (ProfileInterpreter) { @@ -3248,7 +3366,7 @@ void TemplateTable::instanceof() { transition(atos, itos); Label done, is_null, ok_is_subtype, quicked, resolved; - __ testl(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); // Get cpool & tags index @@ -3261,13 +3379,13 @@ __ push(atos); call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); __ pop_ptr(rdx); - __ movl(rdx, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes())); __ jmp(resolved); // Get superklass in EAX and subklass in EDX __ bind(quicked); - __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); - __ movl(rax, Address(rcx, rbx, Address::times_4, sizeof(constantPoolOopDesc))); + __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc))); __ bind(resolved); @@ -3299,17 +3417,17 @@ //---------------------------------------------------------------------------------------------------- // Breakpoints void TemplateTable::_breakpoint() { - + // Note: We get here even if we are single stepping.. - // jbug inists on setting breakpoints at every bytecode - // even if we are in single step mode. - + // jbug inists on setting breakpoints at every bytecode + // even if we are in single step mode. + transition(vtos, vtos); // get the unpatched byte code __ get_method(rcx); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi); - __ movl(rbx, rax); + __ mov(rbx, rax); // post the breakpoint event __ get_method(rcx); @@ -3317,7 +3435,7 @@ // complete the execution of original bytecode __ dispatch_only_normal(vtos); -} +} //---------------------------------------------------------------------------------------------------- @@ -3365,63 +3483,63 @@ // find a free slot in the monitor block (result in rdx) { Label entry, loop, exit; - __ movl(rcx, monitor_block_top); // points to current entry, starting with top-most entry - __ leal(rbx, monitor_block_bot); // points to word before bottom of monitor block + __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry + __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block __ jmpb(entry); __ bind(loop); - __ cmpl(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); // check if current entry is used + __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used // TODO - need new func here - kbt if (VM_Version::supports_cmov()) { - __ cmovl(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx + __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx } else { Label L; __ jccb(Assembler::notEqual, L); - __ movl(rdx, rcx); // if not used then remember entry in rdx + __ mov(rdx, rcx); // if not used then remember entry in rdx __ bind(L); } - __ cmpl(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object - __ jccb(Assembler::equal, exit); // if same object then stop searching - __ addl(rcx, entry_size); // otherwise advance to next entry + __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object + __ jccb(Assembler::equal, exit); // if same object then stop searching + __ addptr(rcx, entry_size); // otherwise advance to next entry __ bind(entry); - __ cmpl(rcx, rbx); // check if bottom reached + __ cmpptr(rcx, rbx); // check if bottom reached __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry __ bind(exit); } - __ testl(rdx, rdx); // check if a slot has been found - __ jccb(Assembler::notZero, allocated); // if found, continue with that one + __ testptr(rdx, rdx); // check if a slot has been found + __ jccb(Assembler::notZero, allocated); // if found, continue with that one // allocate one if there's no free slot { Label entry, loop; // 1. compute new pointers // rsp: old expression stack top - __ movl(rdx, monitor_block_bot); // rdx: old expression stack bottom - __ subl(rsp, entry_size); // move expression stack top - __ subl(rdx, entry_size); // move expression stack bottom - __ movl(rcx, rsp); // set start value for copy loop - __ movl(monitor_block_bot, rdx); // set new monitor block top + __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom + __ subptr(rsp, entry_size); // move expression stack top + __ subptr(rdx, entry_size); // move expression stack bottom + __ mov(rcx, rsp); // set start value for copy loop + __ movptr(monitor_block_bot, rdx); // set new monitor block top __ jmp(entry); // 2. move expression stack contents __ bind(loop); - __ movl(rbx, Address(rcx, entry_size)); // load expression stack word from old location - __ movl(Address(rcx, 0), rbx); // and store it at new location - __ addl(rcx, wordSize); // advance to next word + __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location + __ movptr(Address(rcx, 0), rbx); // and store it at new location + __ addptr(rcx, wordSize); // advance to next word __ bind(entry); - __ cmpl(rcx, rdx); // check if bottom reached + __ cmpptr(rcx, rdx); // check if bottom reached __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word } - + // call run-time routine // rdx: points to monitor entry __ bind(allocated); - // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. + // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. // The object has already been poped from the stack, so the expression stack looks correct. __ increment(rsi); - __ movl(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object - __ lock_object(rdx); + __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object + __ lock_object(rdx); // check to make sure this monitor doesn't cause stack overflow after locking __ save_bcp(); // in case of exception @@ -3445,16 +3563,16 @@ // find matching slot { Label entry, loop; - __ movl(rdx, monitor_block_top); // points to current entry, starting with top-most entry - __ leal(rbx, monitor_block_bot); // points to word before bottom of monitor block + __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry + __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block __ jmpb(entry); __ bind(loop); - __ cmpl(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object + __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object __ jcc(Assembler::equal, found); // if same object then stop searching - __ addl(rdx, entry_size); // otherwise advance to next entry + __ addptr(rdx, entry_size); // otherwise advance to next entry __ bind(entry); - __ cmpl(rdx, rbx); // check if bottom reached + __ cmpptr(rdx, rbx); // check if bottom reached __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry } @@ -3466,9 +3584,9 @@ // call run-time routine // rcx: points to monitor entry __ bind(found); - __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) - __ unlock_object(rdx); - __ pop_ptr(rax); // discard object + __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) + __ unlock_object(rdx); + __ pop_ptr(rax); // discard object __ bind(end); } @@ -3479,7 +3597,8 @@ void TemplateTable::wide() { transition(vtos, vtos); __ load_unsigned_byte(rbx, at_bcp(1)); - __ jmp(Address(noreg, rbx, Address::times_4, int(Interpreter::_wentry_point))); + ExternalAddress wtable((address)Interpreter::_wentry_point); + __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr))); // Note: the rsi increment step is part of the individual wide bytecode implementations } @@ -3493,10 +3612,10 @@ // last dim is on top of stack; we want address of first one: // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize // the latter wordSize to point to the beginning of the array. - __ leal( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize)); + __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize)); call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax, __ load_unsigned_byte(rbx, at_bcp(3)); - __ leal(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts + __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts } #endif /* !CC_INTERP */ --- old/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp 2009-08-01 04:08:23.370630020 +0100 +++ new/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp 2009-08-01 04:08:23.298840262 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)templateTable_x86_32.hpp 1.20 07/05/05 17:04:20 JVM" -#endif /* - * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,14 +19,14 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ static void prepare_invoke(Register method, Register index, int byte_no, Bytecodes::Code code); static void invokevirtual_helper(Register index, Register recv, Register flags); - static void volatile_barrier( ); + static void volatile_barrier(Assembler::Membar_mask_bits order_constraint ); // Helpers static void index_check(Register array, Register index); --- old/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp 2009-08-01 04:08:24.266647100 +0100 +++ new/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp 2009-08-01 04:08:24.163188394 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)templateTable_x86_64.cpp 1.58 07/09/17 09:25:59 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,12 +19,14 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" #include "incls/_templateTable_x86_64.cpp.incl" +#ifndef CC_INTERP + #define __ _masm-> // Platform-dependent initialization @@ -38,19 +37,19 @@ // Address computation: local variables -static inline Address iaddress(int n) { +static inline Address iaddress(int n) { return Address(r14, Interpreter::local_offset_in_bytes(n)); } -static inline Address laddress(int n) { - return iaddress(n + 1); +static inline Address laddress(int n) { + return iaddress(n + 1); } static inline Address faddress(int n) { return iaddress(n); } -static inline Address daddress(int n) { +static inline Address daddress(int n) { return laddress(n); } @@ -59,7 +58,7 @@ } static inline Address iaddress(Register r) { - return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes()); + return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes()); } static inline Address laddress(Register r) { @@ -74,12 +73,12 @@ return laddress(r); } -static inline Address aaddress(Register r) { +static inline Address aaddress(Register r) { return iaddress(r); } static inline Address at_rsp() { - return Address(rsp, 0); + return Address(rsp, 0); } // At top of Java expression stack which may be different than esp(). It @@ -87,15 +86,15 @@ static inline Address at_tos () { return Address(rsp, Interpreter::expr_offset_in_bytes(0)); } - + static inline Address at_tos_p1() { return Address(rsp, Interpreter::expr_offset_in_bytes(1)); } - + static inline Address at_tos_p2() { return Address(rsp, Interpreter::expr_offset_in_bytes(2)); } - + static inline Address at_tos_p3() { return Address(rsp, Interpreter::expr_offset_in_bytes(3)); } @@ -116,6 +115,69 @@ // Miscelaneous helper routines +// Store an oop (or NULL) at the address described by obj. +// If val == noreg this means store a NULL + +static void do_oop_store(InterpreterMacroAssembler* _masm, + Address obj, + Register val, + BarrierSet::Name barrier, + bool precise) { + assert(val == noreg || val == rax, "parameter is just for looks"); + switch (barrier) { +#ifndef SERIALGC + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + { + // flatten object address if needed + if (obj.index() == noreg && obj.disp() == 0) { + if (obj.base() != rdx) { + __ movq(rdx, obj.base()); + } + } else { + __ leaq(rdx, obj); + } + __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg); + if (val == noreg) { + __ store_heap_oop(Address(rdx, 0), NULL_WORD); + } else { + __ store_heap_oop(Address(rdx, 0), val); + __ g1_write_barrier_post(rdx, val, r8, rbx); + } + + } + break; +#endif // SERIALGC + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + { + if (val == noreg) { + __ store_heap_oop(obj, NULL_WORD); + } else { + __ store_heap_oop(obj, val); + // flatten object address if needed + if (!precise || (obj.index() == noreg && obj.disp() == 0)) { + __ store_check(obj.base()); + } else { + __ leaq(rdx, obj); + __ store_check(rdx); + } + } + } + break; + case BarrierSet::ModRef: + case BarrierSet::Other: + if (val == noreg) { + __ store_heap_oop(obj, NULL_WORD); + } else { + __ store_heap_oop(obj, val); + } + break; + default : + ShouldNotReachHere(); + + } +} Address TemplateTable::at_bcp(int offset) { assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); @@ -140,9 +202,9 @@ __ cmpl(scratch, Bytecodes::_breakpoint); __ jcc(Assembler::notEqual, fast_patch); __ get_method(scratch); - // Let breakpoint table handling rewrite to quicker bytecode - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + // Let breakpoint table handling rewrite to quicker bytecode + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc); #ifndef ASSERT @@ -275,7 +337,7 @@ __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); __ jccb(Assembler::equal, call_ldc); - // unresolved class in error state - call into runtime to throw the error + // unresolved class in error state - call into runtime to throw the error // from the first resolution attempt __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); __ jccb(Assembler::equal, call_ldc); @@ -301,7 +363,7 @@ __ bind(notFloat); #ifdef ASSERT - { + { Label L; __ cmpl(rdx, JVM_CONSTANT_Integer); __ jcc(Assembler::equal, L); @@ -320,7 +382,7 @@ __ jmp(Done); __ bind(isOop); - __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); + __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset)); __ push_ptr(rax); if (VerifyOops) { @@ -340,7 +402,7 @@ const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; // get type - __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), + __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double); __ jccb(Assembler::notEqual, Long); // dtos @@ -358,19 +420,19 @@ void TemplateTable::locals_index(Register reg, int offset) { __ load_unsigned_byte(reg, at_bcp(offset)); - __ negq(reg); - if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 + __ negptr(reg); + if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2 } void TemplateTable::iload() { transition(vtos, itos); - if (RewriteFrequentPairs) { + if (RewriteFrequentPairs) { Label rewrite, done; const Register bc = c_rarg3; assert(rbx != bc, "register damaged"); // get next byte - __ load_unsigned_byte(rbx, + __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); // if _iload, wait to rewrite to iload2. We only want to rewrite the // last two iloads in a pair. Comparing against fast_iload means that @@ -414,7 +476,7 @@ __ movl(rax, iaddress(rbx)); debug_only(__ verify_local_tag(frame::TagValue, rbx)); } - + void TemplateTable::fast_iload() { transition(vtos, itos); locals_index(rbx); @@ -446,7 +508,7 @@ void TemplateTable::aload() { transition(vtos, atos); locals_index(rbx); - __ movq(rax, aaddress(rbx)); + __ movptr(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } @@ -454,8 +516,8 @@ __ movl(reg, at_bcp(2)); __ bswapl(reg); __ shrl(reg, 16); - __ negq(reg); - if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 + __ negptr(reg); + if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2 } void TemplateTable::wide_iload() { @@ -489,7 +551,7 @@ void TemplateTable::wide_aload() { transition(vtos, atos); locals_index_wide(rbx); - __ movq(rax, aaddress(rbx)); + __ movptr(rax, aaddress(rbx)); debug_only(__ verify_local_tag(frame::TagReference, rbx)); } @@ -498,7 +560,7 @@ // check array __ null_check(array, arrayOopDesc::length_offset_in_bytes()); // sign extend index for use by indexed load - __ movslq(index, index); + __ movl2ptr(index, index); // check index __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); if (index != rbx) { @@ -516,7 +578,7 @@ // eax: index // rdx: array index_check(rdx, rax); // kills rbx - __ movl(rax, Address(rdx, rax, + __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT))); } @@ -527,8 +589,8 @@ // eax: index // rdx: array index_check(rdx, rax); // kills rbx - __ movq(rax, Address(rdx, rbx, - Address::times_8, + __ movq(rax, Address(rdx, rbx, + Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG))); } @@ -538,7 +600,7 @@ // eax: index // rdx: array index_check(rdx, rax); // kills rbx - __ movflt(xmm0, Address(rdx, rax, + __ movflt(xmm0, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT))); } @@ -549,7 +611,7 @@ // eax: index // rdx: array index_check(rdx, rax); // kills rbx - __ movdbl(xmm0, Address(rdx, rax, + __ movdbl(xmm0, Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); } @@ -560,9 +622,9 @@ // eax: index // rdx: array index_check(rdx, rax); // kills rbx - __ movq(rax, Address(rdx, rax, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ load_heap_oop(rax, Address(rdx, rax, + UseCompressedOops ? Address::times_4 : Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); } void TemplateTable::baload() { @@ -571,9 +633,9 @@ // eax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_signed_byte(rax, - Address(rdx, rax, - Address::times_1, + __ load_signed_byte(rax, + Address(rdx, rax, + Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE))); } @@ -583,8 +645,8 @@ // eax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_unsigned_word(rax, - Address(rdx, rax, + __ load_unsigned_word(rax, + Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); } @@ -601,8 +663,8 @@ // rdx: array __ pop_ptr(rdx); index_check(rdx, rax); // kills rbx - __ load_unsigned_word(rax, - Address(rdx, rax, + __ load_unsigned_word(rax, + Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); } @@ -614,7 +676,7 @@ // rdx: array index_check(rdx, rax); // kills rbx __ load_signed_word(rax, - Address(rdx, rax, + Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT))); } @@ -645,7 +707,7 @@ void TemplateTable::aload(int n) { transition(vtos, atos); - __ movq(rax, aaddress(n)); + __ movptr(rax, aaddress(n)); debug_only(__ verify_local_tag(frame::TagReference, n)); } @@ -677,7 +739,7 @@ const Register bc = c_rarg3; assert(rbx != bc, "register damaged"); // get next byte - __ load_unsigned_byte(rbx, + __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); // do actual aload_0 @@ -688,23 +750,23 @@ __ jcc(Assembler::equal, done); // if _igetfield then reqrite to _fast_iaccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == - Bytecodes::_aload_0, + assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == + Bytecodes::_aload_0, "fix bytecode definition"); __ cmpl(rbx, Bytecodes::_fast_igetfield); __ movl(bc, Bytecodes::_fast_iaccess_0); __ jccb(Assembler::equal, rewrite); // if _agetfield then reqrite to _fast_aaccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == - Bytecodes::_aload_0, + assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == + Bytecodes::_aload_0, "fix bytecode definition"); __ cmpl(rbx, Bytecodes::_fast_agetfield); __ movl(bc, Bytecodes::_fast_aaccess_0); __ jccb(Assembler::equal, rewrite); // if _fgetfield then reqrite to _fast_faccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == + assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); __ cmpl(rbx, Bytecodes::_fast_fgetfield); @@ -712,7 +774,7 @@ __ jccb(Assembler::equal, rewrite); // else rewrite to _fast_aload0 - assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == + assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); __ movl(bc, Bytecodes::_fast_aload_0); @@ -760,7 +822,7 @@ transition(vtos, vtos); __ pop_ptr(rax, rdx); // will need to pop tag too locals_index(rbx); - __ movq(aaddress(rbx), rax); + __ movptr(aaddress(rbx), rax); __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr } @@ -800,7 +862,7 @@ transition(vtos, vtos); __ pop_ptr(rax, rdx); // will need to pop tag too locals_index_wide(rbx); - __ movq(aaddress(rbx), rax); + __ movptr(aaddress(rbx), rax); __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr } @@ -812,7 +874,7 @@ // ebx: index // rdx: array index_check(rdx, rbx); // prefer index in ebx - __ movl(Address(rdx, rbx, + __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax); @@ -826,8 +888,8 @@ // ebx: index // rdx: array index_check(rdx, rbx); // prefer index in ebx - __ movq(Address(rdx, rbx, - Address::times_8, + __ movq(Address(rdx, rbx, + Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG)), rax); } @@ -840,8 +902,8 @@ // ebx: index // rdx: array index_check(rdx, rbx); // prefer index in ebx - __ movflt(Address(rdx, rbx, - Address::times_4, + __ movflt(Address(rdx, rbx, + Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)), xmm0); } @@ -854,8 +916,8 @@ // ebx: index // rdx: array index_check(rdx, rbx); // prefer index in ebx - __ movdbl(Address(rdx, rbx, - Address::times_8, + __ movdbl(Address(rdx, rbx, + Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), xmm0); } @@ -864,25 +926,28 @@ Label is_null, ok_is_subtype, done; transition(vtos, vtos); // stack: ..., array, index, value - __ movq(rax, at_tos()); // value + __ movptr(rax, at_tos()); // value __ movl(rcx, at_tos_p1()); // index - __ movq(rdx, at_tos_p2()); // array + __ movptr(rdx, at_tos_p2()); // array + + Address element_address(rdx, rcx, + UseCompressedOops? Address::times_4 : Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + index_check(rdx, rcx); // kills rbx // do array store check - check for NULL value first - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); // Move subklass into rbx - __ movq(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(rbx, rax); // Move superklass into rax - __ movq(rax, Address(rdx, oopDesc::klass_offset_in_bytes())); - __ movq(rax, Address(rax, - sizeof(oopDesc) + - objArrayKlass::element_klass_offset_in_bytes())); - // Compress array + index*8 + 12 into a single register. Frees rcx. - __ leaq(rdx, Address(rdx, rcx, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ load_klass(rax, rdx); + __ movptr(rax, Address(rax, + sizeof(oopDesc) + + objArrayKlass::element_klass_offset_in_bytes())); + // Compress array + index*oopSize + 12 into a single register. Frees rcx. + __ lea(rdx, element_address); // Generate subtype check. Blows rcx, rdi // Superklass in rax. Subklass in rbx. @@ -894,22 +959,23 @@ // Come here on success __ bind(ok_is_subtype); - __ movq(rax, at_tos()); // Value - __ movq(Address(rdx, 0), rax); - __ store_check(rdx); + + // Get the value we will store + __ movptr(rax, at_tos()); + // Now store using the appropriate barrier + do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); __ jmp(done); // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] __ bind(is_null); __ profile_null_seen(rbx); - __ movq(Address(rdx, rcx, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_OBJECT)), - rax); + + // Store a NULL + do_oop_store(_masm, element_address, noreg, _bs->kind(), true); // Pop stack arguments __ bind(done); - __ addq(rsp, 3 * Interpreter::stackElementSize()); + __ addptr(rsp, 3 * Interpreter::stackElementSize()); } void TemplateTable::bastore() { @@ -920,9 +986,9 @@ // ebx: index // rdx: array index_check(rdx, rbx); // prefer index in ebx - __ movb(Address(rdx, rbx, - Address::times_1, - arrayOopDesc::base_offset_in_bytes(T_BYTE)), + __ movb(Address(rdx, rbx, + Address::times_1, + arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax); } @@ -934,7 +1000,7 @@ // ebx: index // rdx: array index_check(rdx, rbx); // prefer index in ebx - __ movw(Address(rdx, rbx, + __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax); @@ -971,18 +1037,18 @@ void TemplateTable::astore(int n) { transition(vtos, vtos); __ pop_ptr(rax, rdx); - __ movq(aaddress(n), rax); + __ movptr(aaddress(n), rax); __ tag_local(rdx, n); } void TemplateTable::pop() { transition(vtos, vtos); - __ addq(rsp, Interpreter::stackElementSize()); + __ addptr(rsp, Interpreter::stackElementSize()); } void TemplateTable::pop2() { transition(vtos, vtos); - __ addq(rsp, 2 * Interpreter::stackElementSize()); + __ addptr(rsp, 2 * Interpreter::stackElementSize()); } void TemplateTable::dup() { @@ -1093,11 +1159,11 @@ void TemplateTable::lop2(Operation op) { transition(ltos, ltos); switch (op) { - case add : __ pop_l(rdx); __ addq (rax, rdx); break; - case sub : __ movq(rdx, rax); __ pop_l(rax); __ subq (rax, rdx); break; - case _and : __ pop_l(rdx); __ andq (rax, rdx); break; - case _or : __ pop_l(rdx); __ orq (rax, rdx); break; - case _xor : __ pop_l(rdx); __ xorq (rax, rdx); break; + case add : __ pop_l(rdx); __ addptr (rax, rdx); break; + case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr (rax, rdx); break; + case _and : __ pop_l(rdx); __ andptr (rax, rdx); break; + case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; + case _xor : __ pop_l(rdx); __ xorptr (rax, rdx); break; default : ShouldNotReachHere(); } } @@ -1133,7 +1199,7 @@ void TemplateTable::ldiv() { transition(ltos, ltos); - __ movq(rcx, rax); + __ mov(rcx, rax); __ pop_l(rax); // generate explicit div0 check __ testq(rcx, rcx); @@ -1148,7 +1214,7 @@ void TemplateTable::lrem() { transition(ltos, ltos); - __ movq(rcx, rax); + __ mov(rcx, rax); __ pop_l(rax); __ testq(rcx, rcx); __ jump_cc(Assembler::zero, @@ -1158,7 +1224,7 @@ // needed), which may speed up this implementation for the common case. // (see also JVM spec., p.243 & p.271) __ corrected_idivq(rcx); // kills rbx - __ movq(rax, rdx); + __ mov(rax, rdx); } void TemplateTable::lshl() { @@ -1187,7 +1253,7 @@ switch (op) { case add: __ addss(xmm0, at_rsp()); - __ addq(rsp, Interpreter::stackElementSize()); + __ addptr(rsp, Interpreter::stackElementSize()); break; case sub: __ movflt(xmm1, xmm0); @@ -1196,7 +1262,7 @@ break; case mul: __ mulss(xmm0, at_rsp()); - __ addq(rsp, Interpreter::stackElementSize()); + __ addptr(rsp, Interpreter::stackElementSize()); break; case div: __ movflt(xmm1, xmm0); @@ -1219,16 +1285,16 @@ switch (op) { case add: __ addsd(xmm0, at_rsp()); - __ addq(rsp, 2 * Interpreter::stackElementSize()); + __ addptr(rsp, 2 * Interpreter::stackElementSize()); break; case sub: __ movdbl(xmm1, xmm0); - __ pop_d(xmm0); + __ pop_d(xmm0); __ subsd(xmm0, xmm1); break; case mul: __ mulsd(xmm0, at_rsp()); - __ addq(rsp, 2 * Interpreter::stackElementSize()); + __ addptr(rsp, 2 * Interpreter::stackElementSize()); break; case div: __ movdbl(xmm1, xmm0); @@ -1273,13 +1339,13 @@ void TemplateTable::fneg() { transition(ftos, ftos); - static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); + static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); __ xorps(xmm0, ExternalAddress((address) float_signflip)); } void TemplateTable::dneg() { transition(dtos, dtos); - static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); + static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); __ xorpd(xmm0, ExternalAddress((address) double_signflip)); } @@ -1476,7 +1542,7 @@ __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx // holds bumped taken count - const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + + const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset(); const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset(); @@ -1489,7 +1555,7 @@ if (!is_wide) { __ sarl(rdx, 16); } - __ movslq(rdx, rdx); + __ movl2ptr(rdx, rdx); // Handle all the JSR stuff here, then exit. // It's much shorter and cleaner than intermingling with the non-JSR @@ -1499,11 +1565,11 @@ __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); // compute return address as bci in rax - __ leaq(rax, at_bcp((is_wide ? 5 : 3) - + __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset()))); - __ subq(rax, Address(rcx, methodOopDesc::const_offset())); + __ subptr(rax, Address(rcx, methodOopDesc::const_offset())); // Adjust the bcp in r13 by the displacement in rdx - __ addq(r13, rdx); + __ addptr(r13, rdx); // jsr returns atos that is not an oop __ push_i(rax); __ dispatch_only(vtos); @@ -1512,10 +1578,10 @@ // Normal (non-jsr) branch handling - // Adjust the bcp in r13 by the displacement in rdx - __ addq(r13, rdx); + // Adjust the bcp in r13 by the displacement in rdx + __ addptr(r13, rdx); - assert(UseLoopCounter || !UseOnStackReplacement, + assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); Label backedge_counter_overflow; Label profile_method; @@ -1531,7 +1597,7 @@ __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch - // increment counter + // increment counter __ movl(rax, Address(rcx, be_offset)); // load backedge counter __ incrementl(rax, InvocationCounter::count_increment); // increment // counter @@ -1563,7 +1629,7 @@ // being compiled, add a second test to make sure the overflow // function is called only once every overflow_frequency. const int overflow_frequency = 1024; - __ andl(rbx, overflow_frequency - 1); + __ andl(rbx, overflow_frequency - 1); __ jcc(Assembler::zero, backedge_counter_overflow); } @@ -1593,32 +1659,32 @@ if (ProfileInterpreter) { // Out-of-line code to allocate method data oop. __ bind(profile_method); - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), r13); __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode - __ movq(rcx, Address(rbp, method_offset)); - __ movq(rcx, Address(rcx, - in_bytes(methodOopDesc::method_data_offset()))); - __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), - rcx); + __ movptr(rcx, Address(rbp, method_offset)); + __ movptr(rcx, Address(rcx, + in_bytes(methodOopDesc::method_data_offset()))); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), + rcx); __ test_method_data_pointer(rcx, dispatch); // offset non-null mdp by MDO::data_offset() + IR::profile_method() - __ addq(rcx, in_bytes(methodDataOopDesc::data_offset())); - __ addq(rcx, rax); - __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), - rcx); + __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset())); + __ addptr(rcx, rax); + __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), + rcx); __ jmp(dispatch); } if (UseOnStackReplacement) { // invocation counter overflow __ bind(backedge_counter_overflow); - __ negq(rdx); - __ addq(rdx, r13); // branch bcp + __ negptr(rdx); + __ addptr(rdx, r13); // branch bcp // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx); __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode @@ -1628,23 +1694,23 @@ // rdx: scratch // r14: locals pointer // r13: bcp - __ testq(rax, rax); // test result + __ testptr(rax, rax); // test result __ jcc(Assembler::zero, dispatch); // no osr if null // nmethod may have been invalidated (VM may block upon call_VM return) __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); __ cmpl(rcx, InvalidOSREntryBci); __ jcc(Assembler::equal, dispatch); - - // We have the address of an on stack replacement routine in eax + + // We have the address of an on stack replacement routine in eax // We need to prepare to execute the OSR method. First we must // migrate the locals and monitors off of the stack. - __ movq(r13, rax); // save the nmethod + __ mov(r13, rax); // save the nmethod call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); // eax is OSR buffer, move it to expected parameter location - __ movq(j_rarg0, rax); + __ mov(j_rarg0, rax); // We use j_rarg definitions here so that registers don't conflict as parameter // registers change across platforms as we are in the midst of a calling @@ -1654,18 +1720,18 @@ const Register sender_sp = j_rarg1; // pop the interpreter frame - __ movq(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp + __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp __ leave(); // remove frame anchor - __ popq(retaddr); // get return address - __ movq(rsp, sender_sp); // set sp to sender sp + __ pop(retaddr); // get return address + __ mov(rsp, sender_sp); // set sp to sender sp // Ensure compiled code always sees stack at proper alignment - __ andq(rsp, -(StackAlignmentInBytes)); + __ andptr(rsp, -(StackAlignmentInBytes)); // unlike x86 we need no specialized return from compiled code // to the interpreter or the call stub. // push the return address - __ pushq(retaddr); + __ push(retaddr); // and begin the OSR nmethod __ jmp(Address(r13, nmethod::osr_entry_point_offset())); @@ -1701,7 +1767,7 @@ transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches) Label not_taken; - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); @@ -1713,7 +1779,7 @@ // assume branch is more often taken than not (loops use backward branches) Label not_taken; __ pop_ptr(rdx); - __ cmpq(rdx, rax); + __ cmpptr(rdx, rax); __ jcc(j_not(cc), not_taken); branch(false, false); __ bind(not_taken); @@ -1723,23 +1789,23 @@ void TemplateTable::ret() { transition(vtos, vtos); locals_index(rbx); - __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp + __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); - __ movq(r13, Address(rax, methodOopDesc::const_offset())); - __ leaq(r13, Address(r13, rbx, Address::times_1, - constMethodOopDesc::codes_offset())); + __ movptr(r13, Address(rax, methodOopDesc::const_offset())); + __ lea(r13, Address(r13, rbx, Address::times_1, + constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } void TemplateTable::wide_ret() { transition(vtos, vtos); locals_index_wide(rbx); - __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp + __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp __ profile_ret(rbx, rcx); __ get_method(rax); - __ movq(r13, Address(rax, methodOopDesc::const_offset())); - __ leaq(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); + __ movptr(r13, Address(rax, methodOopDesc::const_offset())); + __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); __ dispatch_next(vtos); } @@ -1747,8 +1813,8 @@ Label default_case, continue_execution; transition(itos, vtos); // align r13 - __ leaq(rbx, at_bcp(BytesPerInt)); - __ andq(rbx, -BytesPerInt); + __ lea(rbx, at_bcp(BytesPerInt)); + __ andptr(rbx, -BytesPerInt); // load lo & hi __ movl(rcx, Address(rbx, BytesPerInt)); __ movl(rdx, Address(rbx, 2 * BytesPerInt)); @@ -1766,9 +1832,9 @@ // continue execution __ bind(continue_execution); __ bswapl(rdx); - __ movslq(rdx, rdx); + __ movl2ptr(rdx, rdx); __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); - __ addq(r13, rdx); + __ addptr(r13, rdx); __ dispatch_only(vtos); // handle default __ bind(default_case); @@ -1784,16 +1850,16 @@ void TemplateTable::fast_linearswitch() { transition(itos, vtos); - Label loop_entry, loop, found, continue_execution; + Label loop_entry, loop, found, continue_execution; // bswap rax so we can avoid bswapping the table entries __ bswapl(rax); // align r13 - __ leaq(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of - // this instruction (change offsets - // below) - __ andq(rbx, -BytesPerInt); + __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of + // this instruction (change offsets + // below) + __ andptr(rbx, -BytesPerInt); // set counter - __ movl(rcx, Address(rbx, BytesPerInt)); + __ movl(rcx, Address(rbx, BytesPerInt)); __ bswapl(rcx); __ jmpb(loop_entry); // table search @@ -1812,11 +1878,11 @@ __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); __ profile_switch_case(rcx, rax, rbx); // continue execution - __ bind(continue_execution); + __ bind(continue_execution); __ bswapl(rdx); - __ movslq(rdx, rdx); + __ movl2ptr(rdx, rdx); __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); - __ addq(r13, rdx); + __ addptr(r13, rdx); __ dispatch_only(vtos); } @@ -1856,17 +1922,17 @@ const Register temp = rsi; // Find array start - __ leaq(array, at_bcp(3 * BytesPerInt)); // btw: should be able to - // get rid of this - // instruction (change - // offsets below) - __ andq(array, -BytesPerInt); + __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to + // get rid of this + // instruction (change + // offsets below) + __ andptr(array, -BytesPerInt); // Initialize i & j __ xorl(i, i); // i = 0; - __ movl(j, Address(array, -BytesPerInt)); // j = length(array); + __ movl(j, Address(array, -BytesPerInt)); // j = length(array); - // Convert j into native byteordering + // Convert j into native byteordering __ bswapl(j); // And start @@ -1874,7 +1940,7 @@ __ jmp(entry); // binary search loop - { + { Label loop; __ bind(loop); // int h = (i + j) >> 1; @@ -1912,9 +1978,9 @@ __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); __ profile_switch_case(i, key, array); __ bswapl(j); - __ movslq(j, j); + __ movl2ptr(j, j); __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); - __ addq(r13, j); + __ addptr(r13, j); __ dispatch_only(vtos); // default case -> j = default offset @@ -1922,22 +1988,22 @@ __ profile_switch_default(i); __ movl(j, Address(array, -2 * BytesPerInt)); __ bswapl(j); - __ movslq(j, j); + __ movl2ptr(j, j); __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); - __ addq(r13, j); + __ addptr(r13, j); __ dispatch_only(vtos); } void TemplateTable::_return(TosState state) { transition(state, state); - assert(_desc->calls_vm(), + assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { assert(state == vtos, "only valid state"); - __ movq(c_rarg1, aaddress(0)); - __ movq(rdi, Address(c_rarg1, oopDesc::klass_offset_in_bytes())); + __ movptr(c_rarg1, aaddress(0)); + __ load_klass(rdi, c_rarg1); __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; @@ -1959,7 +2025,7 @@ // without some kind of memory barrier (i.e., it's not sufficient that // the interpreter does not reorder volatile references, the hardware // also must not reorder them). -// +// // According to the new Java Memory Model (JMM): // (1) All volatiles are serialized wrt to each other. ALSO reads & // writes act as aquire & release, so: @@ -1980,7 +2046,7 @@ // volatile-store-volatile-load case. This final case is placed after // volatile-stores although it could just as well go before // volatile-loads. -void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits +void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { // Helper function to insert a is-volatile test and memory barrier if (os::is_MP()) { // Not needed on single CPU @@ -1999,12 +2065,12 @@ const int shift_count = (1 + byte_no) * BitsPerByte; Label resolved; __ get_cache_and_index_at_bcp(Rcache, index, 1); - __ movl(temp, Address(Rcache, - index, Address::times_8, - constantPoolCacheOopDesc::base_offset() + + __ movl(temp, Address(Rcache, + index, Address::times_8, + constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ shrl(temp, shift_count); - // have we resolved this bytecode? + // have we resolved this bytecode? __ andl(temp, 0xFF); __ cmpl(temp, (int) bytecode()); __ jcc(Assembler::equal, resolved); @@ -2021,7 +2087,7 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: - case Bytecodes::_invokeinterface: + case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; default: @@ -2047,19 +2113,19 @@ ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); // Field offset - __ movq(off, Address(cache, index, Address::times_8, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f2_offset()))); - // Flags + __ movptr(off, Address(cache, index, Address::times_8, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::f2_offset()))); + // Flags __ movl(flags, Address(cache, index, Address::times_8, - in_bytes(cp_base_offset + + in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); // klass overwrite register if (is_static) { - __ movq(obj, Address(cache, index, Address::times_8, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f1_offset()))); + __ movptr(obj, Address(cache, index, Address::times_8, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::f1_offset()))); } } @@ -2091,9 +2157,9 @@ resolve_cache_and_index(byte_no, cache, index); assert(wordSize == 8, "adjust code below"); - __ movq(method, Address(cache, index, Address::times_8, method_offset)); + __ movptr(method, Address(cache, index, Address::times_8, method_offset)); if (itable_index != noreg) { - __ movq(itable_index, + __ movptr(itable_index, Address(cache, index, Address::times_8, index_offset)); } __ movl(flags , Address(cache, index, Address::times_8, flags_offset)); @@ -2119,24 +2185,24 @@ __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); // cache entry pointer - __ addq(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); + __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); __ shll(c_rarg3, LogBytesPerWord); - __ addq(c_rarg2, c_rarg3); + __ addptr(c_rarg2, c_rarg3); if (is_static) { __ xorl(c_rarg1, c_rarg1); // NULL object reference } else { - __ movq(c_rarg1, at_tos()); // get object pointer without popping it + __ movptr(c_rarg1, at_tos()); // get object pointer without popping it __ verify_oop(c_rarg1); } // c_rarg1: object pointer or NULL // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack - __ call_VM(noreg, CAST_FROM_FN_PTR(address, + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2, c_rarg3); __ get_cache_and_index_at_bcp(cache, index, 1); __ bind(L1); - } + } } void TemplateTable::pop_and_check_object(Register r) { @@ -2161,12 +2227,12 @@ if (!is_static) { // obj is on the stack - pop_and_check_object(obj); + pop_and_check_object(obj); } const Address field(obj, off, Address::times_1); - Label Done, notByte, notInt, notShort, notChar, + Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; __ shrl(flags, ConstantPoolCacheEntry::tosBits); @@ -2187,7 +2253,7 @@ __ cmpl(flags, atos); __ jcc(Assembler::notEqual, notObj); // atos - __ movq(rax, field); + __ load_heap_oop(rax, field); __ push(atos); if (!is_static) { patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); @@ -2314,33 +2380,33 @@ // the object. We don't know the size of the value, though; it // could be one or two words depending on its type. As a result, // we must find the type to determine where the object is. - __ movl(c_rarg3, Address(c_rarg2, rscratch1, - Address::times_8, + __ movl(c_rarg3, Address(c_rarg2, rscratch1, + Address::times_8, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); // Make sure we don't need to mask rcx for tosBits after the // above shift ConstantPoolCacheEntry::verify_tosBits(); - __ movq(c_rarg1, at_tos_p1()); // initially assume a one word jvalue + __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue __ cmpl(c_rarg3, ltos); - __ cmovq(Assembler::equal, - c_rarg1, at_tos_p2()); // ltos (two word jvalue) + __ cmovptr(Assembler::equal, + c_rarg1, at_tos_p2()); // ltos (two word jvalue) __ cmpl(c_rarg3, dtos); - __ cmovq(Assembler::equal, - c_rarg1, at_tos_p2()); // dtos (two word jvalue) + __ cmovptr(Assembler::equal, + c_rarg1, at_tos_p2()); // dtos (two word jvalue) } // cache entry pointer - __ addq(c_rarg2, in_bytes(cp_base_offset)); + __ addptr(c_rarg2, in_bytes(cp_base_offset)); __ shll(rscratch1, LogBytesPerWord); - __ addq(c_rarg2, rscratch1); + __ addptr(c_rarg2, rscratch1); // object (tos) - __ movq(c_rarg3, rsp); + __ mov(c_rarg3, rsp); // c_rarg1: object pointer set up above (NULL if static) // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), c_rarg1, c_rarg2, c_rarg3); __ get_cache_and_index_at_bcp(cache, index, 1); @@ -2374,7 +2440,7 @@ // field address const Address field(obj, off, Address::times_1); - Label notByte, notInt, notShort, notChar, + Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; __ shrl(flags, ConstantPoolCacheEntry::tosBits); @@ -2397,8 +2463,10 @@ // atos __ pop(atos); if (!is_static) pop_and_check_object(obj); - __ movq(field, rax); - __ store_check(obj, field); // Need to mark card + + // Store into the field + do_oop_store(_masm, field, rax, _bs->kind(), false); + if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); } @@ -2513,12 +2581,12 @@ __ pop_ptr(rbx); // copy the object pointer from tos __ verify_oop(rbx); __ push_ptr(rbx); // put the object pointer back on tos - __ subq(rsp, sizeof(jvalue)); // add space for a jvalue object - __ movq(c_rarg3, rsp); + __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object + __ mov(c_rarg3, rsp); const Address field(c_rarg3, 0); switch (bytecode()) { // load values into the jvalue object - case Bytecodes::_fast_aputfield: // fall through + case Bytecodes::_fast_aputfield: __ movq(field, rax); break; case Bytecodes::_fast_lputfield: __ movq(field, rax); break; case Bytecodes::_fast_iputfield: __ movl(field, rax); break; case Bytecodes::_fast_bputfield: __ movb(field, rax); break; @@ -2532,7 +2600,7 @@ // Save rax because call_VM() will clobber it, then use it for // JVMTI purposes - __ pushq(rax); + __ push(rax); // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); __ verify_oop(rbx); @@ -2540,11 +2608,11 @@ // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3); - __ popq(rax); // restore lower value - __ addq(rsp, sizeof(jvalue)); // release jvalue object space + __ pop(rax); // restore lower value + __ addptr(rsp, sizeof(jvalue)); // release jvalue object space __ bind(L2); } } @@ -2560,13 +2628,13 @@ __ get_cache_and_index_at_bcp(rcx, rbx, 1); // test for volatile with rdx - __ movl(rdx, Address(rcx, rbx, Address::times_8, + __ movl(rdx, Address(rcx, rbx, Address::times_8, in_bytes(base + ConstantPoolCacheEntry::flags_offset()))); // replace index with field offset from cache entry - __ movq(rbx, Address(rcx, rbx, Address::times_8, - in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); + __ movptr(rbx, Address(rcx, rbx, Address::times_8, + in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); // [jk] not needed currently // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | @@ -2584,9 +2652,8 @@ // access field switch (bytecode()) { - case Bytecodes::_fast_aputfield: - __ movq(field, rax); - __ store_check(rcx, field); + case Bytecodes::_fast_aputfield: + do_oop_store(_masm, field, rax, _bs->kind(), false); break; case Bytecodes::_fast_lputfield: __ movq(field, rax); @@ -2606,7 +2673,7 @@ __ movflt(field, xmm0); break; case Bytecodes::_fast_dputfield: - __ movdbl(field, xmm0); + __ movdbl(field, xmm0); break; default: ShouldNotReachHere(); @@ -2634,16 +2701,17 @@ __ jcc(Assembler::zero, L1); // access constant pool cache entry __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); - __ movq(r12, rax); // save object pointer before call_VM() clobbers it __ verify_oop(rax); - __ movq(c_rarg1, rax); + __ mov(r12, rax); // save object pointer before call_VM() clobbers it + __ mov(c_rarg1, rax); // c_rarg1: object pointer copied above // c_rarg2: cache entry pointer - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2); - __ movq(rax, r12); // restore object pointer + __ mov(rax, r12); // restore object pointer + __ reinit_heapbase(); __ bind(L1); } @@ -2652,15 +2720,15 @@ // replace index with field offset from cache entry // [jk] not needed currently // if (os::is_MP()) { - // __ movl(rdx, Address(rcx, rbx, Address::times_8, + // __ movl(rdx, Address(rcx, rbx, Address::times_8, // in_bytes(constantPoolCacheOopDesc::base_offset() + // ConstantPoolCacheEntry::flags_offset()))); // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); // __ andl(rdx, 0x1); // } - __ movq(rbx, Address(rcx, rbx, Address::times_8, - in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); + __ movptr(rbx, Address(rcx, rbx, Address::times_8, + in_bytes(constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::f2_offset()))); // rax: object __ verify_oop(rax); @@ -2670,7 +2738,7 @@ // access field switch (bytecode()) { case Bytecodes::_fast_agetfield: - __ movq(rax, field); + __ load_heap_oop(rax, field); __ verify_oop(rax); break; case Bytecodes::_fast_lgetfield: @@ -2679,7 +2747,7 @@ case Bytecodes::_fast_igetfield: __ movl(rax, field); break; - case Bytecodes::_fast_bgetfield: + case Bytecodes::_fast_bgetfield: __ movsbl(rax, field); break; case Bytecodes::_fast_sgetfield: @@ -2698,7 +2766,7 @@ ShouldNotReachHere(); } // [jk] not needed currently - // if (os::is_MP()) { + // if (os::is_MP()) { // Label notVolatile; // __ testl(rdx, rdx); // __ jcc(Assembler::zero, notVolatile); @@ -2711,24 +2779,24 @@ transition(vtos, state); // get receiver - __ movq(rax, aaddress(0)); + __ movptr(rax, aaddress(0)); debug_only(__ verify_local_tag(frame::TagReference, 0)); // access constant pool cache __ get_cache_and_index_at_bcp(rcx, rdx, 2); - __ movq(rbx, - Address(rcx, rdx, Address::times_8, - in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); + __ movptr(rbx, + Address(rcx, rdx, Address::times_8, + in_bytes(constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::f2_offset()))); // make sure exception is reported in correct bcp range (getfield is // next instruction) - __ incrementq(r13); + __ increment(r13); __ null_check(rax); switch (state) { - case itos: + case itos: __ movl(rax, Address(rax, rbx, Address::times_1)); break; case atos: - __ movq(rax, Address(rax, rbx, Address::times_1)); + __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); __ verify_oop(rax); break; case ftos: @@ -2741,7 +2809,7 @@ // [jk] not needed currently // if (os::is_MP()) { // Label notVolatile; - // __ movl(rdx, Address(rcx, rdx, Address::times_8, + // __ movl(rdx, Address(rcx, rdx, Address::times_8, // in_bytes(constantPoolCacheOopDesc::base_offset() + // ConstantPoolCacheEntry::flags_offset()))); // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); @@ -2751,7 +2819,7 @@ // __ bind(notVolatile); // } - __ decrementq(r13); + __ decrement(r13); } @@ -2759,14 +2827,14 @@ //----------------------------------------------------------------------------- // Calls -void TemplateTable::count_calls(Register method, Register temp) { +void TemplateTable::count_calls(Register method, Register temp) { // implemented elsewhere ShouldNotReachHere(); } -void TemplateTable::prepare_invoke(Register method, - Register index, - int byte_no, +void TemplateTable::prepare_invoke(Register method, + Register index, + int byte_no, Bytecodes::Code code) { // determine flags const bool is_invokeinterface = code == Bytecodes::_invokeinterface; @@ -2777,7 +2845,7 @@ const bool save_flags = is_invokeinterface || is_invokevirtual; // setup registers & access constant pool cache const Register recv = rcx; - const Register flags = rdx; + const Register flags = rdx; assert_different_registers(method, index, recv, flags); // save 'interpreter return address' @@ -2790,7 +2858,8 @@ __ movl(recv, flags); __ andl(recv, 0xFF); if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 - __ movq(recv, Address(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1))); + __ movptr(recv, Address(rsp, recv, Address::times_8, + -Interpreter::expr_offset_in_bytes(1))); __ verify_oop(recv); } @@ -2808,15 +2877,15 @@ // Make sure we don't need to mask flags for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); // load return address - { + { ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table()); ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table()); __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3)); - __ movq(flags, Address(rscratch1, flags, Address::times_8)); + __ movptr(flags, Address(rscratch1, flags, Address::times_8)); } // push return address - __ pushq(flags); + __ push(flags); // Restore flag field from the constant pool cache, and restore esi // for later null checks. r13 is the bytecode pointer @@ -2857,7 +2926,7 @@ // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); - __ movq(rax, Address(recv, oopDesc::klass_offset_in_bytes())); + __ load_klass(rax, recv); __ verify_oop(rax); @@ -2865,13 +2934,13 @@ __ profile_virtual_call(rax, r14, rdx); // get target methodOop & entry point - const int base = instanceKlass::vtable_start_offset() * wordSize; - assert(vtableEntry::size() * wordSize == 8, + const int base = instanceKlass::vtable_start_offset() * wordSize; + assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); - __ movq(method, Address(rax, index, - Address::times_8, - base + vtableEntry::method_offset_in_bytes())); - __ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); + __ movptr(method, Address(rax, index, + Address::times_8, + base + vtableEntry::method_offset_in_bytes())); + __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); __ jump_from_interpreted(method, rdx); } @@ -2881,8 +2950,8 @@ prepare_invoke(rbx, noreg, byte_no, bytecode()); // rbx: index - // rcx: receiver - // rdx: flags + // rcx: receiver + // rdx: flags invokevirtual_helper(rbx, rcx, rdx); } @@ -2915,10 +2984,10 @@ void TemplateTable::invokeinterface(int byte_no) { transition(vtos, vtos); prepare_invoke(rax, rbx, byte_no, bytecode()); - + // rax: Interface // rbx: index - // rcx: receiver + // rcx: receiver // rdx: flags // Special case of invokeinterface called for virtual method of @@ -2935,48 +3004,48 @@ // Get receiver klass into rdx - also a null check __ restore_locals(); // restore r14 - __ movq(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdx, rcx); __ verify_oop(rdx); // profile this call __ profile_virtual_call(rdx, r13, r14); - __ movq(r14, rdx); // Save klassOop in r14 + __ mov(r14, rdx); // Save klassOop in r14 // Compute start of first itableOffsetEntry (which is at the end of // the vtable) const int base = instanceKlass::vtable_start_offset() * wordSize; // Get length of vtable - assert(vtableEntry::size() * wordSize == 8, + assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); - __ movl(r13, Address(rdx, + __ movl(r13, Address(rdx, instanceKlass::vtable_length_offset() * wordSize)); - __ leaq(rdx, Address(rdx, r13, Address::times_8, base)); - + __ lea(rdx, Address(rdx, r13, Address::times_8, base)); + if (HeapWordsPerLong > 1) { // Round up to align_object_offset boundary - __ round_to_q(rdx, BytesPerLong); + __ round_to(rdx, BytesPerLong); } Label entry, search, interface_ok; - - __ jmpb(entry); + + __ jmpb(entry); __ bind(search); - __ addq(rdx, itableOffsetEntry::size() * wordSize); - - __ bind(entry); + __ addptr(rdx, itableOffsetEntry::size() * wordSize); + + __ bind(entry); // Check that the entry is non-null. A null entry means that the // receiver class doesn't implement the interface, and wasn't the // same as the receiver class checked when the interface was // resolved. - __ pushq(rdx); - __ movq(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); - __ testq(rdx, rdx); + __ push(rdx); + __ movptr(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); + __ testptr(rdx, rdx); __ jcc(Assembler::notZero, interface_ok); // throw exception - __ popq(rdx); // pop saved register first. - __ popq(rbx); // pop return address (pushed by prepare_invoke) + __ pop(rdx); // pop saved register first. + __ pop(rbx); // pop return address (pushed by prepare_invoke) __ restore_bcp(); // r13 must be correct for exception handler (was // destroyed) __ restore_locals(); // make sure locals pointer is correct as well @@ -2987,44 +3056,44 @@ __ should_not_reach_here(); __ bind(interface_ok); - __ popq(rdx); + __ pop(rdx); - __ cmpq(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); + __ cmpptr(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); __ jcc(Assembler::notEqual, search); - + __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); - __ addq(rdx, r14); // Add offset to klassOop + __ addptr(rdx, r14); // Add offset to klassOop assert(itableMethodEntry::size() * wordSize == 8, "adjust the scaling in the code below"); - __ movq(rbx, Address(rdx, rbx, Address::times_8)); + __ movptr(rbx, Address(rdx, rbx, Address::times_8)); // rbx: methodOop to call // rcx: receiver // Check for abstract method error // Note: This should be done more efficiently via a // throw_abstract_method_error interpreter entry point and a // conditional jump to it in case of a null method. - { + { Label L; - __ testq(rbx, rbx); + __ testptr(rbx, rbx); __ jcc(Assembler::notZero, L); // throw exception // note: must restore interpreter registers to canonical // state for exception handling to work correctly! - __ popq(rbx); // pop return address (pushed by prepare_invoke) + __ pop(rbx); // pop return address (pushed by prepare_invoke) __ restore_bcp(); // r13 must be correct for exception handler // (was destroyed) __ restore_locals(); // make sure locals pointer is correct as // well (was destroyed) - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); // the call_VM checks for exception, so we should never return here. __ should_not_reach_here(); __ bind(L); } - __ movq(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset())); + __ movptr(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset())); // do the call // rcx: receiver @@ -3043,13 +3112,11 @@ Label initialize_header; Label initialize_object; // including clearing the fields Label allocate_shared; - ExternalAddress top((address)Universe::heap()->top_addr()); - ExternalAddress end((address)Universe::heap()->end_addr()); __ get_cpool_and_tags(rsi, rax); // get instanceKlass - __ movq(rsi, Address(rsi, rdx, - Address::times_8, sizeof(constantPoolOopDesc))); + __ movptr(rsi, Address(rsi, rdx, + Address::times_8, sizeof(constantPoolOopDesc))); // make sure the class we're about to instantiate has been // resolved. Note: slow_case does a pop of stack, which is why we @@ -3061,15 +3128,15 @@ // make sure klass is initialized & doesn't have finalizer // make sure klass is fully initialized - __ cmpl(Address(rsi, - instanceKlass::init_state_offset_in_bytes() + - sizeof(oopDesc)), + __ cmpl(Address(rsi, + instanceKlass::init_state_offset_in_bytes() + + sizeof(oopDesc)), instanceKlass::fully_initialized); __ jcc(Assembler::notEqual, slow_case); // get instance_size in instanceKlass (scaled to a count of bytes) - __ movl(rdx, - Address(rsi, + __ movl(rdx, + Address(rsi, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); // test to see if it has a finalizer or is malformed in some way __ testl(rdx, Klass::_lh_instance_slow_path_bit); @@ -3085,11 +3152,11 @@ Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; if (UseTLAB) { - __ movq(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); - __ leaq(rbx, Address(rax, rdx, Address::times_1)); - __ cmpq(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); + __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); + __ lea(rbx, Address(rax, rdx, Address::times_1)); + __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); - __ movq(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); + __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); if (ZeroTLAB) { // the fields have been already cleared __ jmp(initialize_header); @@ -3105,18 +3172,21 @@ if (allow_shared_alloc) { __ bind(allocate_shared); + ExternalAddress top((address)Universe::heap()->top_addr()); + ExternalAddress end((address)Universe::heap()->end_addr()); + const Register RtopAddr = rscratch1; const Register RendAddr = rscratch2; __ lea(RtopAddr, top); __ lea(RendAddr, end); - __ movq(rax, Address(RtopAddr, 0)); + __ movptr(rax, Address(RtopAddr, 0)); // For retries rax gets set by cmpxchgq Label retry; __ bind(retry); - __ leaq(rbx, Address(rax, rdx, Address::times_1)); - __ cmpq(rbx, Address(RendAddr, 0)); + __ lea(rbx, Address(rax, rdx, Address::times_1)); + __ cmpptr(rbx, Address(RendAddr, 0)); __ jcc(Assembler::above, slow_case); // Compare rax with the top addr, and if still equal, store the new @@ -3129,9 +3199,9 @@ if (os::is_MP()) { __ lock(); } - __ cmpxchgq(rbx, Address(RtopAddr, 0)); + __ cmpxchgptr(rbx, Address(RtopAddr, 0)); - // if someone beat us on the allocation, try again, otherwise continue + // if someone beat us on the allocation, try again, otherwise continue __ jcc(Assembler::notEqual, retry); } @@ -3145,12 +3215,12 @@ // Initialize object fields __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop - { + { Label loop; __ bind(loop); __ movq(Address(rax, rdx, Address::times_8, - sizeof(oopDesc) - oopSize), - rcx); + sizeof(oopDesc) - oopSize), + rcx); __ decrementl(rdx); __ jcc(Assembler::notZero, loop); } @@ -3158,13 +3228,15 @@ // initialize object header only. __ bind(initialize_header); if (UseBiasedLocking) { - __ movq(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - __ movq(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); + __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); } else { - __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), (intptr_t) markOopDesc::prototype()); // header (address 0x1) } - __ movq(Address(rax, oopDesc::klass_offset_in_bytes()), rsi); // klass + __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) + __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops + __ store_klass(rax, rsi); // store klass last __ jmp(done); } @@ -3201,7 +3273,7 @@ __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); __ get_constant_pool(c_rarg1); __ movl(c_rarg3, rax); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), c_rarg1, c_rarg2, c_rarg3); } @@ -3214,34 +3286,34 @@ void TemplateTable::checkcast() { transition(atos, atos); Label done, is_null, ok_is_subtype, quicked, resolved; - __ testq(rax, rax); // object is in rax + __ testptr(rax, rax); // object is in rax __ jcc(Assembler::zero, is_null); // Get cpool & tags index __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index // See if bytecode has already been quicked - __ cmpb(Address(rdx, rbx, - Address::times_1, + __ cmpb(Address(rdx, rbx, + Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); - - __ movq(r12, rcx); // save rcx XXX __ push(atos); // save receiver for result, and for GC + __ mov(r12, rcx); // save rcx XXX call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); - __ pop_ptr(rdx); // restore receiver __ movq(rcx, r12); // restore rcx XXX + __ reinit_heapbase(); + __ pop_ptr(rdx); // restore receiver __ jmpb(resolved); // Get superklass in rax and subklass in rbx __ bind(quicked); - __ movq(rdx, rax); // Save object in rdx; rax needed for subtype check - __ movq(rax, Address(rcx, rbx, + __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check + __ movptr(rax, Address(rcx, rbx, Address::times_8, sizeof(constantPoolOopDesc))); __ bind(resolved); - __ movq(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rbx, rdx); // Generate subtype check. Blows rcx, rdi. Object in rdx. // Superklass in rax. Subklass in rbx. @@ -3254,7 +3326,7 @@ // Come here on success __ bind(ok_is_subtype); - __ movq(rax, rdx); // Restore object in rdx + __ mov(rax, rdx); // Restore object in rdx // Collect counts on whether this check-cast sees NULLs a lot or not. if (ProfileInterpreter) { @@ -3270,7 +3342,7 @@ void TemplateTable::instanceof() { transition(atos, itos); Label done, is_null, ok_is_subtype, quicked, resolved; - __ testq(rax, rax); + __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); // Get cpool & tags index @@ -3278,24 +3350,25 @@ __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index // See if bytecode has already been quicked __ cmpb(Address(rdx, rbx, - Address::times_1, + Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class); __ jcc(Assembler::equal, quicked); - __ movq(r12, rcx); // save rcx __ push(atos); // save receiver for result, and for GC + __ mov(r12, rcx); // save rcx call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); - __ pop_ptr(rdx); // restore receiver - __ movq(rdx, Address(rdx, oopDesc::klass_offset_in_bytes())); __ movq(rcx, r12); // restore rcx + __ reinit_heapbase(); + __ pop_ptr(rdx); // restore receiver + __ load_klass(rdx, rdx); __ jmpb(resolved); // Get superklass in rax and subklass in rdx __ bind(quicked); - __ movq(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); - __ movq(rax, Address(rcx, rbx, - Address::times_8, sizeof(constantPoolOopDesc))); + __ load_klass(rdx, rax); + __ movptr(rax, Address(rcx, rbx, + Address::times_8, sizeof(constantPoolOopDesc))); __ bind(resolved); @@ -3327,28 +3400,28 @@ // Breakpoints void TemplateTable::_breakpoint() { // Note: We get here even if we are single stepping.. - // jbug inists on setting breakpoints at every bytecode - // even if we are in single step mode. - + // jbug inists on setting breakpoints at every bytecode + // even if we are in single step mode. + transition(vtos, vtos); // get the unpatched byte code __ get_method(c_rarg1); - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), c_rarg1, r13); - __ movq(rbx, rax); + __ mov(rbx, rax); // post the breakpoint event __ get_method(c_rarg1); - __ call_VM(noreg, + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), c_rarg1, r13); // complete the execution of original bytecode __ dispatch_only_normal(vtos); -} +} //----------------------------------------------------------------------------- // Exceptions @@ -3394,58 +3467,58 @@ __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL // find a free slot in the monitor block (result in c_rarg1) - { + { Label entry, loop, exit; - __ movq(c_rarg3, monitor_block_top); // points to current entry, + __ movptr(c_rarg3, monitor_block_top); // points to current entry, // starting with top-most entry - __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom + __ lea(c_rarg2, monitor_block_bot); // points to word before bottom // of monitor block __ jmpb(entry); __ bind(loop); // check if current entry is used - __ cmpq(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); + __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); // if not used then remember entry in c_rarg1 - __ cmovq(Assembler::equal, c_rarg1, c_rarg3); + __ cmov(Assembler::equal, c_rarg1, c_rarg3); // check if current entry is for same object - __ cmpq(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); + __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); // if same object then stop searching __ jccb(Assembler::equal, exit); // otherwise advance to next entry - __ addq(c_rarg3, entry_size); + __ addptr(c_rarg3, entry_size); __ bind(entry); // check if bottom reached - __ cmpq(c_rarg3, c_rarg2); + __ cmpptr(c_rarg3, c_rarg2); // if not at bottom then check this entry __ jcc(Assembler::notEqual, loop); __ bind(exit); } - __ testq(c_rarg1, c_rarg1); // check if a slot has been found + __ testptr(c_rarg1, c_rarg1); // check if a slot has been found __ jcc(Assembler::notZero, allocated); // if found, continue with that one // allocate one if there's no free slot - { + { Label entry, loop; - // 1. compute new pointers // rsp: old expression stack top - __ movq(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom - __ subq(rsp, entry_size); // move expression stack top - __ subq(c_rarg1, entry_size); // move expression stack bottom - __ movq(c_rarg3, rsp); // set start value for copy loop - __ movq(monitor_block_bot, c_rarg1); // set new monitor block bottom + // 1. compute new pointers // rsp: old expression stack top + __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom + __ subptr(rsp, entry_size); // move expression stack top + __ subptr(c_rarg1, entry_size); // move expression stack bottom + __ mov(c_rarg3, rsp); // set start value for copy loop + __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom __ jmp(entry); // 2. move expression stack contents __ bind(loop); - __ movq(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack - // word from old location - __ movq(Address(c_rarg3, 0), c_rarg2); // and store it at new location - __ addq(c_rarg3, wordSize); // advance to next word + __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack + // word from old location + __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location + __ addptr(c_rarg3, wordSize); // advance to next word __ bind(entry); - __ cmpq(c_rarg3, c_rarg1); // check if bottom reached + __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached __ jcc(Assembler::notEqual, loop); // if not at bottom then // copy next word } - + // call run-time routine // c_rarg1: points to monitor entry __ bind(allocated); @@ -3454,10 +3527,10 @@ // handling for async. exceptions work correctly. // The object has already been poped from the stack, so the // expression stack looks correct. - __ incrementq(r13); + __ increment(r13); - // store object - __ movq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); + // store object + __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); __ lock_object(c_rarg1); // check to make sure this monitor doesn't cause stack overflow after locking @@ -3485,39 +3558,39 @@ Label found; // find matching slot - { + { Label entry, loop; - __ movq(c_rarg1, monitor_block_top); // points to current entry, + __ movptr(c_rarg1, monitor_block_top); // points to current entry, // starting with top-most entry - __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom + __ lea(c_rarg2, monitor_block_bot); // points to word before bottom // of monitor block __ jmpb(entry); __ bind(loop); // check if current entry is for same object - __ cmpq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); + __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); // if same object then stop searching __ jcc(Assembler::equal, found); // otherwise advance to next entry - __ addq(c_rarg1, entry_size); + __ addptr(c_rarg1, entry_size); __ bind(entry); // check if bottom reached - __ cmpq(c_rarg1, c_rarg2); + __ cmpptr(c_rarg1, c_rarg2); // if not at bottom then check this entry __ jcc(Assembler::notEqual, loop); } // error handling. Unlocking was not block-structured - __ call_VM(noreg, CAST_FROM_FN_PTR(address, + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); __ should_not_reach_here(); // call run-time routine // rsi: points to monitor entry __ bind(found); - __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) - __ unlock_object(c_rarg1); - __ pop_ptr(rax); // discard object + __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) + __ unlock_object(c_rarg1); + __ pop_ptr(rax); // discard object } @@ -3539,11 +3612,12 @@ // last dim is on top of stack; we want address of first one: // first_addr = last_addr + (ndims - 1) * wordSize if (TaggedStackInterpreter) __ shll(rax, 1); // index*2 - __ leaq(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); - call_VM(rax, + __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), c_rarg1); __ load_unsigned_byte(rbx, at_bcp(3)); if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2 - __ leaq(rsp, Address(rsp, rbx, Address::times_8)); + __ lea(rsp, Address(rsp, rbx, Address::times_8)); } +#endif // !CC_INTERP --- old/hotspot/src/cpu/x86/vm/vm_version_x86_32.cpp 2009-08-01 04:08:25.566570045 +0100 +++ new/hotspot/src/cpu/x86/vm/vm_version_x86_32.cpp 2009-08-01 04:08:25.471742389 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)vm_version_x86_32.cpp 1.71 07/09/20 10:42:56 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -70,23 +67,23 @@ // // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info); // - __ pushl(rbp); - __ movl(rbp, Address(rsp, 8)); // cpuid_info address - __ pushl(rbx); - __ pushl(rsi); - __ pushfd(); // preserve rbx, and flags - __ popl(rax); - __ pushl(rax); - __ movl(rcx, rax); + __ push(rbp); + __ movptr(rbp, Address(rsp, 8)); // cpuid_info address + __ push(rbx); + __ push(rsi); + __ pushf(); // preserve rbx, and flags + __ pop(rax); + __ push(rax); + __ mov(rcx, rax); // // if we are unable to change the AC flag, we have a 386 // __ xorl(rax, EFL_AC); - __ pushl(rax); - __ popfd(); - __ pushfd(); - __ popl(rax); - __ cmpl(rax, rcx); + __ push(rax); + __ popf(); + __ pushf(); + __ pop(rax); + __ cmpptr(rax, rcx); __ jccb(Assembler::notEqual, detect_486); __ movl(rax, CPU_FAMILY_386); @@ -98,13 +95,13 @@ // not support the "cpuid" instruction. // __ bind(detect_486); - __ movl(rax, rcx); + __ mov(rax, rcx); __ xorl(rax, EFL_ID); - __ pushl(rax); - __ popfd(); - __ pushfd(); - __ popl(rax); - __ cmpl(rcx, rax); + __ push(rax); + __ popf(); + __ pushf(); + __ pop(rax); + __ cmpptr(rcx, rax); __ jccb(Assembler::notEqual, detect_586); __ bind(cpu486); @@ -116,13 +113,13 @@ // at this point, we have a chip which supports the "cpuid" instruction // __ bind(detect_586); - __ xorl(rax, rax); + __ xorptr(rax, rax); __ cpuid(); - __ orl(rax, rax); - __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input - // value of at least 1, we give up and - // assume a 486 - __ leal(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); + __ orptr(rax, rax); + __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input + // value of at least 1, we give up and + // assume a 486 + __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -137,13 +134,13 @@ __ movl(rax, 4); // and rcx already set to 0x0 __ xorl(rcx, rcx); __ cpuid(); - __ pushl(rax); + __ push(rax); __ andl(rax, 0x1f); // Determine if valid cache parameters used __ orl(rax, rax); // rax,[4:0] == 0 indicates invalid cache - __ popl(rax); + __ pop(rax); __ jccb(Assembler::equal, std_cpuid1); - __ leal(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -155,7 +152,7 @@ __ bind(std_cpuid1); __ movl(rax, 1); __ cpuid(); - __ leal(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -174,7 +171,7 @@ // __ movl(rax, 0x80000008); __ cpuid(); - __ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -186,7 +183,7 @@ __ bind(ext_cpuid5); __ movl(rax, 0x80000005); __ cpuid(); - __ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -198,7 +195,7 @@ __ bind(ext_cpuid1); __ movl(rax, 0x80000001); __ cpuid(); - __ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -208,10 +205,10 @@ // return // __ bind(done); - __ popfd(); - __ popl(rsi); - __ popl(rbx); - __ popl(rbp); + __ popf(); + __ pop(rsi); + __ pop(rbx); + __ pop(rbp); __ ret(0); # undef __ @@ -245,9 +242,11 @@ _supports_cx8 = supports_cmpxchg8(); // if the OS doesn't support SSE, we can't use this feature even if the HW does if( !os::supports_sse()) - _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4|CPU_SSE4A); - if (UseSSE < 4) - _cpuFeatures &= ~CPU_SSE4; + _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); + if (UseSSE < 4) { + _cpuFeatures &= ~CPU_SSE4_1; + _cpuFeatures &= ~CPU_SSE4_2; + } if (UseSSE < 3) { _cpuFeatures &= ~CPU_SSE3; _cpuFeatures &= ~CPU_SSSE3; @@ -264,7 +263,7 @@ } char buf[256]; - jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", cores_per_cpu(), threads_per_core(), cpu_family(), _model, _stepping, (supports_cmov() ? ", cmov" : ""), @@ -275,7 +274,8 @@ (supports_sse2() ? ", sse2" : ""), (supports_sse3() ? ", sse3" : ""), (supports_ssse3()? ", ssse3": ""), - (supports_sse4() ? ", sse4" : ""), + (supports_sse4_1() ? ", sse4.1" : ""), + (supports_sse4_2() ? ", sse4.2" : ""), (supports_mmx_ext() ? ", mmxext" : ""), (supports_3dnow() ? ", 3dnow" : ""), (supports_3dnow2() ? ", 3dnowext" : ""), @@ -288,7 +288,7 @@ // older Pentiums which do not support it. if( UseSSE > 4 ) UseSSE=4; if( UseSSE < 0 ) UseSSE=0; - if( !supports_sse4() ) // Drop to 3 if no SSE4 support + if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support UseSSE = MIN2((intx)3,UseSSE); if( !supports_sse3() ) // Drop to 2 if no SSE3 support UseSSE = MIN2((intx)2,UseSSE); @@ -310,6 +310,10 @@ // Use it on new AMD cpus starting from Opteron. UseAddressNop = true; } + if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { + // Use it on new AMD cpus starting from Opteron. + UseNewLongLShift = true; + } if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { if( supports_sse4a() ) { UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron @@ -324,6 +328,20 @@ UseXmmRegToRegMoveAll = false; } } + if( FLAG_IS_DEFAULT(UseXmmI2F) ) { + if( supports_sse4a() ) { + UseXmmI2F = true; + } else { + UseXmmI2F = false; + } + } + if( FLAG_IS_DEFAULT(UseXmmI2D) ) { + if( supports_sse4a() ) { + UseXmmI2D = true; + } else { + UseXmmI2D = false; + } + } } if( is_intel() ) { // Intel cpus specific settings @@ -352,7 +370,7 @@ // For new Intel cpus do the next optimization: // don't align the beginning of a loop if there are enough instructions // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) - // in current fetch line (OptoLoopAlignment) or the padding + // in current fetch line (OptoLoopAlignment) or the padding // is big (> MaxLoopPad). // Set MaxLoopPad to 11 for new Intel cpus to reduce number of // generated NOP instructions. 11 is the largest size of one @@ -360,6 +378,14 @@ MaxLoopPad = 11; } #endif // COMPILER2 + if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) { + UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus + } + if( supports_sse4_2() && supports_ht() ) { // Newest Intel cpus + if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) { + UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus + } + } } } @@ -390,7 +416,7 @@ AllocatePrefetchDistance = allocate_prefetch_distance(); AllocatePrefetchStyle = allocate_prefetch_style(); - if( AllocatePrefetchStyle == 2 && is_intel() && + if( AllocatePrefetchStyle == 2 && is_intel() && cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core AllocatePrefetchDistance = 320; } @@ -398,7 +424,7 @@ #ifndef PRODUCT if (PrintMiscellaneous && Verbose) { - tty->print_cr("Logical CPUs per package: %u", + tty->print_cr("Logical CPUs per core: %u", logical_processors_per_package()); tty->print_cr("UseSSE=%d",UseSSE); tty->print("Allocation: "); --- old/hotspot/src/cpu/x86/vm/vm_version_x86_32.hpp 2009-08-01 04:08:26.634514829 +0100 +++ new/hotspot/src/cpu/x86/vm/vm_version_x86_32.hpp 2009-08-01 04:08:26.552049342 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)vm_version_x86_32.hpp 1.32 07/07/02 16:50:39 JVM" -#endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ class VM_Version: public Abstract_VM_Version { @@ -71,9 +68,9 @@ cmpxchg16: 1, : 4, dca : 1, - : 4, - popcnt : 1, - : 8; + sse4_1 : 1, + sse4_2 : 1, + : 11; } bits; }; @@ -165,8 +162,8 @@ static int _cpu; static int _model; static int _stepping; - static int _cpuFeatures; // features returned by the "cpuid" instruction - // 0 if this instruction is not available + static int _cpuFeatures; // features returned by the "cpuid" instruction + // 0 if this instruction is not available static const char* _features_str; enum { @@ -180,8 +177,9 @@ CPU_SSE2 = (1 << 7), CPU_SSE3 = (1 << 8), // sse3 comes from cpuid 1 (ECX) CPU_SSSE3= (1 << 9), - CPU_SSE4 = (1 <<10), - CPU_SSE4A= (1 <<11) + CPU_SSE4A= (1 <<10), + CPU_SSE4_1 = (1 << 11), + CPU_SSE4_2 = (1 << 12) } cpuFeatureFlags; // cpuid information block. All info derived from executing cpuid with @@ -243,22 +241,14 @@ static CpuidInfo _cpuid_info; // Extractors and predicates - static bool is_extended_cpu_family() { - const uint32_t Extended_Cpu_Family = 0xf; - return _cpuid_info.std_cpuid1_rax.bits.family == Extended_Cpu_Family; - } static uint32_t extended_cpu_family() { uint32_t result = _cpuid_info.std_cpuid1_rax.bits.family; - if (is_extended_cpu_family()) { - result += _cpuid_info.std_cpuid1_rax.bits.ext_family; - } + result += _cpuid_info.std_cpuid1_rax.bits.ext_family; return result; } static uint32_t extended_cpu_model() { uint32_t result = _cpuid_info.std_cpuid1_rax.bits.model; - if (is_extended_cpu_family()) { - result |= _cpuid_info.std_cpuid1_rax.bits.ext_model << 4; - } + result |= _cpuid_info.std_cpuid1_rax.bits.ext_model << 4; return result; } static uint32_t cpu_stepping() { @@ -275,13 +265,13 @@ result |= CPU_CX8; if (_cpuid_info.std_cpuid1_rdx.bits.cmov != 0) result |= CPU_CMOV; - if (_cpuid_info.std_cpuid1_rdx.bits.fxsr != 0 || is_amd() && + if (_cpuid_info.std_cpuid1_rdx.bits.fxsr != 0 || is_amd() && _cpuid_info.ext_cpuid1_rdx.bits.fxsr != 0) result |= CPU_FXSR; // HT flag is set for multi-core processors also. if (threads_per_core() > 1) result |= CPU_HT; - if (_cpuid_info.std_cpuid1_rdx.bits.mmx != 0 || is_amd() && + if (_cpuid_info.std_cpuid1_rdx.bits.mmx != 0 || is_amd() && _cpuid_info.ext_cpuid1_rdx.bits.mmx != 0) result |= CPU_MMX; if (is_amd() && _cpuid_info.ext_cpuid1_rdx.bits.tdnow != 0) @@ -296,6 +286,10 @@ result |= CPU_SSSE3; if (is_amd() && _cpuid_info.ext_cpuid1_rcx.bits.sse4a != 0) result |= CPU_SSE4A; + if (_cpuid_info.std_cpuid1_rcx.bits.sse4_1 != 0) + result |= CPU_SSE4_1; + if (_cpuid_info.std_cpuid1_rcx.bits.sse4_2 != 0) + result |= CPU_SSE4_2; return result; } @@ -339,26 +333,26 @@ static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG' - static uint cores_per_cpu() { + static uint cores_per_cpu() { uint result = 1; if (is_intel()) { result = (_cpuid_info.dcp_cpuid4_rax.bits.cores_per_cpu + 1); } else if (is_amd()) { result = (_cpuid_info.ext_cpuid8_rcx.bits.cores_per_cpu + 1); } - return result; + return result; } - static uint threads_per_core() { + static uint threads_per_core() { uint result = 1; if (_cpuid_info.std_cpuid1_rdx.bits.ht != 0) { - result = _cpuid_info.std_cpuid1_rbx.bits.threads_per_cpu / + result = _cpuid_info.std_cpuid1_rbx.bits.threads_per_cpu / cores_per_cpu(); } - return result; + return result; } - static intx L1_data_cache_line_size() { + static intx L1_data_cache_line_size() { intx result = 0; if (is_intel()) { result = (_cpuid_info.dcp_cpuid4_rbx.bits.L1_line_size + 1); @@ -367,7 +361,7 @@ } if (result < 32) // not defined ? result = 32; // 32 bytes by default on x86 - return result; + return result; } // @@ -383,7 +377,8 @@ static bool supports_sse2() { return (_cpuFeatures & CPU_SSE2) != 0; } static bool supports_sse3() { return (_cpuFeatures & CPU_SSE3) != 0; } static bool supports_ssse3() { return (_cpuFeatures & CPU_SSSE3)!= 0; } - static bool supports_sse4() { return (_cpuFeatures & CPU_SSE4) != 0; } + static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; } + static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; } // // AMD features // @@ -412,7 +407,7 @@ // Athlon - 128 / prefetchnta // Opteron - 256 / prefetchnta // Core - 256 / prefetchnta - // It will be used only when AllocatePrefetchStyle > 0 + // It will be used only when AllocatePrefetchStyle > 0 intx count = AllocatePrefetchDistance; if (count < 0) { // default ? @@ -438,7 +433,7 @@ assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive"); // Return 0 if AllocatePrefetchDistance was not defined or // prefetch instruction is not supported. - return (AllocatePrefetchDistance > 0 && + return (AllocatePrefetchDistance > 0 && (supports_3dnow() || supports_sse())) ? AllocatePrefetchStyle : 0; } }; --- old/hotspot/src/cpu/x86/vm/vm_version_x86_64.cpp 2009-08-01 04:08:27.604254639 +0100 +++ new/hotspot/src/cpu/x86/vm/vm_version_x86_64.cpp 2009-08-01 04:08:27.513107402 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)vm_version_x86_64.cpp 1.25 07/09/20 10:42:57 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -63,17 +60,17 @@ // // rcx and rdx are first and second argument registers on windows - __ pushq(rbp); - __ movq(rbp, c_rarg0); // cpuid_info address - __ pushq(rbx); - __ pushq(rsi); + __ push(rbp); + __ mov(rbp, c_rarg0); // cpuid_info address + __ push(rbx); + __ push(rsi); // // we have a chip which supports the "cpuid" instruction // __ xorl(rax, rax); __ cpuid(); - __ leaq(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -88,13 +85,13 @@ __ movl(rax, 4); __ xorl(rcx, rcx); // L1 cache __ cpuid(); - __ pushq(rax); + __ push(rax); __ andl(rax, 0x1f); // Determine if valid cache parameters used __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache - __ popq(rax); + __ pop(rax); __ jccb(Assembler::equal, std_cpuid1); - __ leaq(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -106,7 +103,7 @@ __ bind(std_cpuid1); __ movl(rax, 1); __ cpuid(); - __ leaq(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -125,7 +122,7 @@ // __ movl(rax, 0x80000008); __ cpuid(); - __ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -137,7 +134,7 @@ __ bind(ext_cpuid5); __ movl(rax, 0x80000005); __ cpuid(); - __ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -149,7 +146,7 @@ __ bind(ext_cpuid1); __ movl(rax, 0x80000001); __ cpuid(); - __ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); + __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 8), rcx); @@ -159,9 +156,9 @@ // return // __ bind(done); - __ popq(rsi); - __ popq(rbx); - __ popq(rbp); + __ pop(rsi); + __ pop(rbx); + __ pop(rbp); __ ret(0); # undef __ @@ -189,8 +186,10 @@ if (!VM_Version::supports_sse2()) { vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); } - if (UseSSE < 4) - _cpuFeatures &= ~CPU_SSE4; + if (UseSSE < 4) { + _cpuFeatures &= ~CPU_SSE4_1; + _cpuFeatures &= ~CPU_SSE4_2; + } if (UseSSE < 3) { _cpuFeatures &= ~CPU_SSE3; _cpuFeatures &= ~CPU_SSSE3; @@ -207,7 +206,7 @@ } char buf[256]; - jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", cores_per_cpu(), threads_per_core(), cpu_family(), _model, _stepping, (supports_cmov() ? ", cmov" : ""), @@ -218,7 +217,8 @@ (supports_sse2() ? ", sse2" : ""), (supports_sse3() ? ", sse3" : ""), (supports_ssse3()? ", ssse3": ""), - (supports_sse4() ? ", sse4" : ""), + (supports_sse4_1() ? ", sse4.1" : ""), + (supports_sse4_2() ? ", sse4.2" : ""), (supports_mmx_ext() ? ", mmxext" : ""), (supports_3dnow() ? ", 3dnow" : ""), (supports_3dnow2() ? ", 3dnowext" : ""), @@ -231,7 +231,7 @@ // older Pentiums which do not support it. if( UseSSE > 4 ) UseSSE=4; if( UseSSE < 0 ) UseSSE=0; - if( !supports_sse4() ) // Drop to 3 if no SSE4 support + if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support UseSSE = MIN2((intx)3,UseSSE); if( !supports_sse3() ) // Drop to 2 if no SSE3 support UseSSE = MIN2((intx)2,UseSSE); @@ -250,7 +250,7 @@ if( is_amd() ) { // AMD cpus specific settings if( FLAG_IS_DEFAULT(UseAddressNop) ) { - // Use it on all AMD cpus starting from Opteron (don't need + // Use it on all AMD cpus starting from Opteron (don't need // a cpu check since only Opteron and new cpus support 64-bits mode). UseAddressNop = true; } @@ -268,6 +268,20 @@ UseXmmRegToRegMoveAll = false; } } + if( FLAG_IS_DEFAULT(UseXmmI2F) ) { + if( supports_sse4a() ) { + UseXmmI2F = true; + } else { + UseXmmI2F = false; + } + } + if( FLAG_IS_DEFAULT(UseXmmI2D) ) { + if( supports_sse4a() ) { + UseXmmI2D = true; + } else { + UseXmmI2D = false; + } + } } if( is_intel() ) { // Intel cpus specific settings @@ -295,7 +309,7 @@ // For new Intel cpus do the next optimization: // don't align the beginning of a loop if there are enough instructions // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) - // in current fetch line (OptoLoopAlignment) or the padding + // in current fetch line (OptoLoopAlignment) or the padding // is big (> MaxLoopPad). // Set MaxLoopPad to 11 for new Intel cpus to reduce number of // generated NOP instructions. 11 is the largest size of one @@ -303,6 +317,14 @@ MaxLoopPad = 11; } #endif // COMPILER2 + if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) { + UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus + } + if( supports_sse4_2() && supports_ht() ) { // Newest Intel cpus + if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) { + UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus + } + } } } @@ -331,7 +353,7 @@ AllocatePrefetchDistance = allocate_prefetch_distance(); AllocatePrefetchStyle = allocate_prefetch_style(); - if( AllocatePrefetchStyle == 2 && is_intel() && + if( AllocatePrefetchStyle == 2 && is_intel() && cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core AllocatePrefetchDistance = 384; } @@ -344,7 +366,7 @@ #ifndef PRODUCT if (PrintMiscellaneous && Verbose) { - tty->print_cr("Logical CPUs per package: %u", + tty->print_cr("Logical CPUs per core: %u", logical_processors_per_package()); tty->print_cr("UseSSE=%d",UseSSE); tty->print("Allocation: "); --- old/hotspot/src/cpu/x86/vm/vm_version_x86_64.hpp 2009-08-01 04:08:28.547092609 +0100 +++ new/hotspot/src/cpu/x86/vm/vm_version_x86_64.hpp 2009-08-01 04:08:28.468620008 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)vm_version_x86_64.hpp 1.15 07/05/05 17:04:09 JVM" -#endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ class VM_Version : public Abstract_VM_Version { @@ -71,9 +68,9 @@ cmpxchg16: 1, : 4, dca : 1, - : 4, - popcnt : 1, - : 8; + sse4_1 : 1, + sse4_2 : 1, + : 11; } bits; }; @@ -165,8 +162,8 @@ static int _cpu; static int _model; static int _stepping; - static int _cpuFeatures; // features returned by the "cpuid" instruction - // 0 if this instruction is not available + static int _cpuFeatures; // features returned by the "cpuid" instruction + // 0 if this instruction is not available static const char* _features_str; enum { @@ -180,8 +177,9 @@ CPU_SSE2 = (1 << 7), CPU_SSE3 = (1 << 8), CPU_SSSE3= (1 << 9), - CPU_SSE4 = (1 <<10), - CPU_SSE4A= (1 <<11) + CPU_SSE4A= (1 <<10), + CPU_SSE4_1 = (1 << 11), + CPU_SSE4_2 = (1 << 12) } cpuFeatureFlags; // cpuid information block. All info derived from executing cpuid with @@ -243,22 +241,14 @@ static CpuidInfo _cpuid_info; // Extractors and predicates - static bool is_extended_cpu_family() { - const uint32_t Extended_Cpu_Family = 0xf; - return _cpuid_info.std_cpuid1_eax.bits.family == Extended_Cpu_Family; - } static uint32_t extended_cpu_family() { uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family; - if (is_extended_cpu_family()) { - result += _cpuid_info.std_cpuid1_eax.bits.ext_family; - } + result += _cpuid_info.std_cpuid1_eax.bits.ext_family; return result; } static uint32_t extended_cpu_model() { uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model; - if (is_extended_cpu_family()) { - result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4; - } + result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4; return result; } static uint32_t cpu_stepping() { @@ -275,13 +265,13 @@ result |= CPU_CX8; if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0) result |= CPU_CMOV; - if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || is_amd() && + if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || is_amd() && _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0) result |= CPU_FXSR; // HT flag is set for multi-core processors also. if (threads_per_core() > 1) result |= CPU_HT; - if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || is_amd() && + if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx != 0) result |= CPU_MMX; if (is_amd() && _cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) @@ -296,6 +286,10 @@ result |= CPU_SSSE3; if (is_amd() && _cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0) result |= CPU_SSE4A; + if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0) + result |= CPU_SSE4_1; + if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0) + result |= CPU_SSE4_2; return result; } @@ -339,26 +333,26 @@ static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG' - static uint cores_per_cpu() { + static uint cores_per_cpu() { uint result = 1; if (is_intel()) { result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); } else if (is_amd()) { result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1); } - return result; + return result; } - static uint threads_per_core() { + static uint threads_per_core() { uint result = 1; if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { - result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / + result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / cores_per_cpu(); } - return result; + return result; } - static intx L1_data_cache_line_size() { + static intx L1_data_cache_line_size() { intx result = 0; if (is_intel()) { result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); @@ -367,7 +361,7 @@ } if (result < 32) // not defined ? result = 32; // 32 bytes by default for other x64 - return result; + return result; } // @@ -383,7 +377,8 @@ static bool supports_sse2() { return (_cpuFeatures & CPU_SSE2) != 0; } static bool supports_sse3() { return (_cpuFeatures & CPU_SSE3) != 0; } static bool supports_ssse3() { return (_cpuFeatures & CPU_SSSE3)!= 0; } - static bool supports_sse4() { return (_cpuFeatures & CPU_SSE4) != 0; } + static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; } + static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; } // // AMD features // @@ -408,7 +403,7 @@ // Pentium 4 - 512 / prefetchnta // Opteron - 256 / prefetchnta // Core - 256 / prefetchnta - // It will be used only when AllocatePrefetchStyle > 0 + // It will be used only when AllocatePrefetchStyle > 0 intx count = AllocatePrefetchDistance; if (count < 0) { // default ? --- old/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp 2009-08-01 04:08:29.858881469 +0100 +++ new/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp 2009-08-01 04:08:29.532375057 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)vtableStubs_x86_32.cpp 1.56 07/10/08 13:01:12 JVM" -#endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -52,7 +49,7 @@ #ifndef PRODUCT if (CountCompiledCalls) { - __ increment(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); + __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); } #endif /* PRODUCT */ @@ -61,11 +58,11 @@ // get receiver klass address npe_addr = __ pc(); - __ movl(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); + __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); // compute entry offset (in words) int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); #ifndef PRODUCT - if (DebugVtables) { + if (DebugVtables) { Label L; // check offset vs vtable length __ cmpl(Address(rax, instanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size()); @@ -79,18 +76,18 @@ const Register method = rbx; // load methodOop and target address - __ movl(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes())); + __ movptr(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes())); if (DebugVtables) { Label L; - __ cmpl(method, NULL_WORD); + __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); - __ cmpl(Address(method, methodOopDesc::from_compiled_offset()), NULL_WORD); + __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L); __ stop("Vtable entry is NULL"); __ bind(L); } - // rax,: receiver klass + // rax,: receiver klass // method (rbx): methodOop // rcx: receiver address ame_addr = __ pc(); @@ -102,7 +99,7 @@ } -VtableStub* VtableStubs::create_itable_stub(int vtable_index) { +VtableStub* VtableStubs::create_itable_stub(int vtable_index) { // Note well: pd_code_size_limit is the absolute minimum we can get away with. If you // add code here, bump the code stub size returned by pd_code_size_limit! const int i486_code_length = VtableStub::pd_code_size_limit(false); @@ -110,76 +107,76 @@ ResourceMark rm; CodeBuffer cb(s->entry_point(), i486_code_length); MacroAssembler* masm = new MacroAssembler(&cb); - + // Entry arguments: // rax,: Interface // rcx: Receiver - + #ifndef PRODUCT if (CountCompiledCalls) { - __ increment(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); + __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); } #endif /* PRODUCT */ // get receiver (need to skip return address on top of stack) - + assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx"); - + // get receiver klass (also an implicit null-check) address npe_addr = __ pc(); - __ movl(rbx, Address(rcx, oopDesc::klass_offset_in_bytes())); + __ movptr(rbx, Address(rcx, oopDesc::klass_offset_in_bytes())); - __ movl(rsi, rbx); // Save klass in free register + __ mov(rsi, rbx); // Save klass in free register // Most registers are in use, so save a few - __ pushl(rdx); - // compute itable entry offset (in words) - const int base = instanceKlass::vtable_start_offset() * wordSize; + __ push(rdx); + // compute itable entry offset (in words) + const int base = instanceKlass::vtable_start_offset() * wordSize; assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below"); __ movl(rdx, Address(rbx, instanceKlass::vtable_length_offset() * wordSize)); // Get length of vtable - __ leal(rbx, Address(rbx, rdx, Address::times_4, base)); + __ lea(rbx, Address(rbx, rdx, Address::times_ptr, base)); if (HeapWordsPerLong > 1) { // Round up to align_object_offset boundary __ round_to(rbx, BytesPerLong); } Label hit, next, entry, throw_icce; - + __ jmpb(entry); __ bind(next); - __ addl(rbx, itableOffsetEntry::size() * wordSize); - + __ addptr(rbx, itableOffsetEntry::size() * wordSize); + __ bind(entry); // If the entry is NULL then we've reached the end of the table // without finding the expected interface, so throw an exception - __ movl(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); - __ testl(rdx, rdx); + __ movptr(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); + __ testptr(rdx, rdx); __ jcc(Assembler::zero, throw_icce); - __ cmpl(rax, rdx); - __ jcc(Assembler::notEqual, next); - + __ cmpptr(rax, rdx); + __ jcc(Assembler::notEqual, next); + // We found a hit, move offset into rbx, __ movl(rdx, Address(rbx, itableOffsetEntry::offset_offset_in_bytes())); - // Compute itableMethodEntry. + // Compute itableMethodEntry. const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) + itableMethodEntry::method_offset_in_bytes(); - - // Get methodOop and entrypoint for compiler + + // Get methodOop and entrypoint for compiler const Register method = rbx; - __ movl(method, Address(rsi, rdx, Address::times_1, method_offset)); + __ movptr(method, Address(rsi, rdx, Address::times_1, method_offset)); // Restore saved register, before possible trap. - __ popl(rdx); + __ pop(rdx); // method (rbx): methodOop // rcx: receiver - + #ifdef ASSERT if (DebugVtables) { Label L1; - __ cmpl(method, NULL_WORD); + __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L1); - __ cmpl(Address(method, methodOopDesc::from_compiled_offset()), NULL_WORD); + __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L1); __ stop("methodOop is null"); __ bind(L1); @@ -191,9 +188,9 @@ __ bind(throw_icce); // Restore saved register - __ popl(rdx); + __ pop(rdx); __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); - + masm->flush(); guarantee(__ pc() <= s->code_end(), "overflowed buffer"); --- old/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp 2009-08-01 04:08:31.206534754 +0100 +++ new/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp 2009-08-01 04:08:31.137071887 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)vtableStubs_x86_64.cpp 1.24 07/10/08 13:01:14 JVM" -#endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -34,8 +31,8 @@ #define __ masm-> #ifndef PRODUCT -extern "C" void bad_compiled_vtable_index(JavaThread* thread, - oop receiver, +extern "C" void bad_compiled_vtable_index(JavaThread* thread, + oop receiver, int index); #endif @@ -59,7 +56,7 @@ // get receiver klass address npe_addr = __ pc(); - __ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); + __ load_klass(rax, j_rarg0); // compute entry offset (in words) int entry_offset = @@ -82,14 +79,14 @@ // load methodOop and target address const Register method = rbx; - __ movq(method, Address(rax, - entry_offset * wordSize + - vtableEntry::method_offset_in_bytes())); + __ movptr(method, Address(rax, + entry_offset * wordSize + + vtableEntry::method_offset_in_bytes())); if (DebugVtables) { Label L; - __ cmpq(method, (int)NULL); + __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L); - __ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD); + __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L); __ stop("Vtable entry is NULL"); __ bind(L); @@ -121,7 +118,7 @@ __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); } #endif - + // Entry arguments: // rax: Interface // j_rarg0: Receiver @@ -129,19 +126,19 @@ // Free registers (non-args) are rax (interface), rbx // get receiver (need to skip return address on top of stack) - + assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); // get receiver klass (also an implicit null-check) address npe_addr = __ pc(); - __ movq(rbx, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); + __ load_klass(rbx, j_rarg0); // If we take a trap while this arg is on the stack we will not // be able to walk the stack properly. This is not an issue except // when there are mistakes in this assembly code that could generate // a spurious fault. Ask me how I know... - __ pushq(j_rarg1); // Most registers are in use, so save one + __ push(j_rarg1); // Most registers are in use, so save one // compute itable entry offset (in words) const int base = instanceKlass::vtable_start_offset() * wordSize; @@ -150,27 +147,27 @@ // Get length of vtable __ movl(j_rarg1, Address(rbx, instanceKlass::vtable_length_offset() * wordSize)); - __ leaq(rbx, Address(rbx, j_rarg1, Address::times_8, base)); + __ lea(rbx, Address(rbx, j_rarg1, Address::times_8, base)); if (HeapWordsPerLong > 1) { // Round up to align_object_offset boundary - __ round_to_q(rbx, BytesPerLong); + __ round_to(rbx, BytesPerLong); } Label hit, next, entry, throw_icce; __ jmpb(entry); __ bind(next); - __ addq(rbx, itableOffsetEntry::size() * wordSize); + __ addptr(rbx, itableOffsetEntry::size() * wordSize); __ bind(entry); // If the entry is NULL then we've reached the end of the table // without finding the expected interface, so throw an exception - __ movq(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); - __ testq(j_rarg1, j_rarg1); + __ movptr(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); + __ testptr(j_rarg1, j_rarg1); __ jcc(Assembler::zero, throw_icce); - __ cmpq(rax, j_rarg1); + __ cmpptr(rax, j_rarg1); __ jccb(Assembler::notEqual, next); // We found a hit, move offset into j_rarg1 @@ -184,13 +181,13 @@ // Get methodOop and entrypoint for compiler // Get klass pointer again - __ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); + __ load_klass(rax, j_rarg0); const Register method = rbx; - __ movq(method, Address(rax, j_rarg1, Address::times_1, method_offset)); + __ movptr(method, Address(rax, j_rarg1, Address::times_1, method_offset)); // Restore saved register, before possible trap. - __ popq(j_rarg1); + __ pop(j_rarg1); // method (rbx): methodOop // j_rarg0: receiver @@ -199,9 +196,9 @@ #ifdef ASSERT if (DebugVtables) { Label L2; - __ cmpq(method, (int)NULL); + __ cmpptr(method, (int32_t)NULL_WORD); __ jcc(Assembler::equal, L2); - __ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD); + __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD); __ jcc(Assembler::notZero, L2); __ stop("compiler entrypoint is null"); __ bind(L2); @@ -212,12 +209,12 @@ // j_rarg0: receiver address ame_addr = __ pc(); __ jmp(Address(method, methodOopDesc::from_compiled_offset())); - + __ bind(throw_icce); // Restore saved register - __ popq(j_rarg1); + __ pop(j_rarg1); __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); - + __ flush(); guarantee(__ pc() <= s->code_end(), "overflowed buffer"); @@ -229,10 +226,12 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) { if (is_vtable_stub) { // Vtable stub size - return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0); + return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) + + (UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long } else { // Itable stub size - return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0); + return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0) + + (UseCompressedOops ? 32 : 0); // 2 leaqs } } --- old/hotspot/src/cpu/x86/vm/x86_32.ad 2009-08-01 04:08:32.234770002 +0100 +++ new/hotspot/src/cpu/x86/vm/x86_32.ad 2009-08-01 04:08:32.078886007 +0100 @@ -1,5 +1,5 @@ // -// Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // // X86 Architecture Description File @@ -32,31 +32,31 @@ register %{ //----------Architecture Description Register Definitions---------------------- // General Registers -// "reg_def" name ( register save type, C convention save type, +// "reg_def" name ( register save type, C convention save type, // ideal register type, encoding ); // Register Save Types: -// +// // NS = No-Save: The register allocator assumes that these registers // can be used without saving upon entry to the method, & // that they do not need to be saved at call sites. -// +// // SOC = Save-On-Call: The register allocator assumes that these registers // can be used without saving upon entry to the method, // but that they must be saved at call sites. -// +// // SOE = Save-On-Entry: The register allocator assumes that these registers // must be saved before using them upon entry to the // method, but they do not need to be saved at call // sites. -// +// // AS = Always-Save: The register allocator assumes that these registers // must be saved before using them upon entry to the // method, & that they must be saved at call sites. // -// Ideal Register Type is used to determine how to save & restore a +// Ideal Register Type is used to determine how to save & restore a // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. -// +// // The encoding number is the actual bit-pattern placed into the opcodes. // General Registers @@ -68,7 +68,7 @@ reg_def ECX(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()); reg_def ESI(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()); reg_def EDI(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()); -// now that adapter frames are gone EBP is always saved and restored by the prolog/epilog code +// now that adapter frames are gone EBP is always saved and restored by the prolog/epilog code reg_def EBP(NS, SOE, Op_RegI, 5, rbp->as_VMReg()); reg_def EDX(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()); reg_def EAX(SOC, SOC, Op_RegI, 0, rax->as_VMReg()); @@ -126,10 +126,10 @@ reg_def XMM7a( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()); reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next()); -// Specify priority of register selection within phases of register -// allocation. Highest priority is first. A useful heuristic is to -// give registers a low priority when they are required by machine -// instructions, like EAX and EDX. Registers which are used as +// Specify priority of register selection within phases of register +// allocation. Highest priority is first. A useful heuristic is to +// give registers a low priority when they are required by machine +// instructions, like EAX and EDX. Registers which are used as // pairs must fall on an even boundry (witness the FPR#L's in this list). // For the Intel integer registers, the equivalent Long pairs are // EDX:EAX, EBX:ECX, and EDI:EBP. @@ -138,13 +138,13 @@ FPR3L, FPR3H, FPR4L, FPR4H, FPR5L, FPR5H, FPR6L, FPR6H, FPR7L, FPR7H ); -alloc_class chunk1( XMM0a, XMM0b, - XMM1a, XMM1b, - XMM2a, XMM2b, - XMM3a, XMM3b, - XMM4a, XMM4b, - XMM5a, XMM5b, - XMM6a, XMM6b, +alloc_class chunk1( XMM0a, XMM0b, + XMM1a, XMM1b, + XMM2a, XMM2b, + XMM3a, XMM3b, + XMM4a, XMM4b, + XMM5a, XMM5b, + XMM6a, XMM6b, XMM7a, XMM7b, EFLAGS); @@ -155,7 +155,7 @@ // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ ) // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ ) // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) -// +// // Class for all registers reg_class any_reg(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP); // Class for general registers @@ -212,9 +212,9 @@ reg_class xmm_reg(XMM0a, XMM1a, XMM2a, XMM3a, XMM4a, XMM5a, XMM6a, XMM7a); // make a double register class for SSE2 registers -reg_class xdb_reg(XMM0a,XMM0b, XMM1a,XMM1b, XMM2a,XMM2b, XMM3a,XMM3b, +reg_class xdb_reg(XMM0a,XMM0b, XMM1a,XMM1b, XMM2a,XMM2b, XMM3a,XMM3b, XMM4a,XMM4b, XMM5a,XMM5b, XMM6a,XMM6b, XMM7a,XMM7b ); - + reg_class dbl_reg( FPR1L,FPR1H, FPR2L,FPR2H, FPR3L,FPR3H, FPR4L,FPR4H, FPR5L,FPR5H, FPR6L,FPR6H, FPR7L,FPR7H ); @@ -222,7 +222,7 @@ reg_class flt_reg0( FPR1L ); reg_class dbl_reg0( FPR1L,FPR1H ); reg_class dbl_reg1( FPR2L,FPR2H ); -reg_class dbl_notreg0( FPR2L,FPR2H, FPR3L,FPR3H, FPR4L,FPR4H, +reg_class dbl_notreg0( FPR2L,FPR2H, FPR3L,FPR3H, FPR4L,FPR4H, FPR5L,FPR5H, FPR6L,FPR6H, FPR7L,FPR7H ); // XMM6 and XMM7 could be used as temporary registers for long, float and @@ -236,7 +236,7 @@ // This is a block of C++ code which provides values, functions, and // definitions necessary in the rest of the architecture description source %{ -#define RELOC_IMM32 Assembler::imm32_operand +#define RELOC_IMM32 Assembler::imm_operand #define RELOC_DISP32 Assembler::disp32_operand #define __ _masm. @@ -263,10 +263,10 @@ static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) // Static initialization during VM startup. -static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); -static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); -static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); -static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); +static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); +static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); +static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); +static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); // !!!!! Special hack to get all type of calls to specify the byte offset // from the start of the call to the point where the return address @@ -344,7 +344,7 @@ // EMIT_OPCODE() w/ relocation information void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) { cbuf.relocate(cbuf.inst_mark() + offset, reloc); - emit_opcode(cbuf, code); + emit_opcode(cbuf, code); } // EMIT_D8() @@ -366,7 +366,7 @@ } // emit 32 bit value and construct relocation entry from relocInfo::relocType -void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc, +void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc, int format) { cbuf.relocate(cbuf.inst_mark(), reloc, format); @@ -375,7 +375,7 @@ } // emit 32 bit value and construct relocation entry from RelocationHolder -void emit_d32_reloc(CodeBuffer &cbuf, int d32, RelocationHolder const& rspec, +void emit_d32_reloc(CodeBuffer &cbuf, int d32, RelocationHolder const& rspec, int format) { #ifdef ASSERT if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) { @@ -408,11 +408,11 @@ if ((index == 0x4) && (scale == 0) && (base != ESP_enc)) { // If no displacement, mode is 0x0; unless base is [EBP] - if ( (displace == 0) && (base != EBP_enc) ) { + if ( (displace == 0) && (base != EBP_enc) ) { emit_rm(cbuf, 0x0, reg_encoding, base); } else { // If 8-bit displacement, mode 0x1 - if ((displace >= -128) && (displace <= 127) + if ((displace >= -128) && (displace <= 127) && !(displace_is_oop) ) { emit_rm(cbuf, 0x1, reg_encoding, base); emit_d8(cbuf, displace); @@ -495,8 +495,8 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { Compile* C = ra_->C; if( C->in_24_bit_fp_mode() ) { - tty->print("FLDCW 24 bit fpu control word"); - tty->print_cr(""); tty->print("\t"); + st->print("FLDCW 24 bit fpu control word"); + st->print_cr(""); st->print("\t"); } int framesize = C->frame_slots() << LogBytesPerInt; @@ -510,22 +510,22 @@ // stack. But the stack safety zone should account for that. // See bugs 4446381, 4468289, 4497237. if (C->need_stack_bang(framesize)) { - tty->print_cr("# stack bang"); tty->print("\t"); + st->print_cr("# stack bang"); st->print("\t"); } - tty->print_cr("PUSHL EBP"); tty->print("\t"); + st->print_cr("PUSHL EBP"); st->print("\t"); if( VerifyStackAtCalls ) { // Majik cookie to verify stack depth - tty->print("PUSH 0xBADB100D\t# Majik cookie for stack depth check"); - tty->print_cr(""); tty->print("\t"); - framesize -= wordSize; + st->print("PUSH 0xBADB100D\t# Majik cookie for stack depth check"); + st->print_cr(""); st->print("\t"); + framesize -= wordSize; } if ((C->in_24_bit_fp_mode() || VerifyStackAtCalls ) && framesize < 128 ) { if (framesize) { - tty->print("SUB ESP,%d\t# Create frame",framesize); + st->print("SUB ESP,%d\t# Create frame",framesize); } } else { - tty->print("SUB ESP,%d\t# Create frame",framesize); + st->print("SUB ESP,%d\t# Create frame",framesize); } } #endif @@ -573,7 +573,7 @@ if( VerifyStackAtCalls ) { // Majik cookie to verify stack depth emit_opcode(cbuf, 0x68); // push 0xbadb100d emit_d32(cbuf, 0xbadb100d); - framesize -= wordSize; + framesize -= wordSize; } if ((C->in_24_bit_fp_mode() || VerifyStackAtCalls ) && framesize < 128 ) { @@ -584,20 +584,20 @@ } } else { emit_opcode(cbuf, 0x81); // sub SP,#framesize - emit_rm(cbuf, 0x3, 0x05, ESP_enc); + emit_rm(cbuf, 0x3, 0x05, ESP_enc); emit_d32(cbuf, framesize); } C->set_frame_complete(cbuf.code_end() - cbuf.code_begin()); -#ifdef ASSERT - if (VerifyStackAtCalls) { +#ifdef ASSERT + if (VerifyStackAtCalls) { Label L; MacroAssembler masm(&cbuf); - masm.pushl(rax); - masm.movl(rax, rsp); - masm.andl(rax, StackAlignmentInBytes-1); - masm.cmpl(rax, StackAlignmentInBytes-wordSize); - masm.popl(rax); + masm.push(rax); + masm.mov(rax, rsp); + masm.andptr(rax, StackAlignmentInBytes-1); + masm.cmpptr(rax, StackAlignmentInBytes-wordSize); + masm.pop(rax); masm.jcc(Assembler::equal, L); masm.stop("Stack is not properly aligned!"); masm.bind(L); @@ -725,18 +725,19 @@ return rc_xmm; } -static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size ) { +static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg, + int opcode, const char *op_str, int size, outputStream* st ) { if( cbuf ) { emit_opcode (*cbuf, opcode ); encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, false); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) tty->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); if( opcode == 0x8B || opcode == 0x89 ) { // MOV - if( is_load ) tty->print("%s %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset); - else tty->print("%s [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]); + if( is_load ) st->print("%s %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset); + else st->print("%s [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]); } else { // FLD, FST, PUSH, POP - tty->print("%s [ESP + #%d]",op_str,offset); + st->print("%s [ESP + #%d]",op_str,offset); } #endif } @@ -745,8 +746,8 @@ } // Helper for XMM registers. Extra opcode bits, limited syntax. -static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load, - int offset, int reg_lo, int reg_hi, int size ) { +static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load, + int offset, int reg_lo, int reg_hi, int size, outputStream* st ) { if( cbuf ) { if( reg_lo+1 == reg_hi ) { // double move? if( is_load && !UseXmmLoadAndClearUpper ) @@ -763,19 +764,19 @@ emit_opcode(*cbuf, is_load ? 0x10 : 0x11 ); encode_RegMem(*cbuf, Matcher::_regEncode[reg_lo], ESP_enc, 0x4, 0, offset, false); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) tty->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); if( reg_lo+1 == reg_hi ) { // double move? - if( is_load ) tty->print("%s %s,[ESP + #%d]", + if( is_load ) st->print("%s %s,[ESP + #%d]", UseXmmLoadAndClearUpper ? "MOVSD " : "MOVLPD", - Matcher::regName[reg_lo], offset); - else tty->print("MOVSD [ESP + #%d],%s", - offset, Matcher::regName[reg_lo]); + Matcher::regName[reg_lo], offset); + else st->print("MOVSD [ESP + #%d],%s", + offset, Matcher::regName[reg_lo]); } else { - if( is_load ) tty->print("MOVSS %s,[ESP + #%d]", - Matcher::regName[reg_lo], offset); - else tty->print("MOVSS [ESP + #%d],%s", - offset, Matcher::regName[reg_lo]); + if( is_load ) st->print("MOVSS %s,[ESP + #%d]", + Matcher::regName[reg_lo], offset); + else st->print("MOVSS [ESP + #%d],%s", + offset, Matcher::regName[reg_lo]); } #endif } @@ -784,8 +785,8 @@ } -static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, - int src_hi, int dst_hi, int size ) { +static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, + int src_hi, int dst_hi, int size, outputStream* st ) { if( UseXmmRegToRegMoveAll ) {//Use movaps,movapd to move between xmm registers if( cbuf ) { if( (src_lo+1 == src_hi && dst_lo+1 == dst_hi) ) { @@ -795,12 +796,12 @@ emit_opcode(*cbuf, 0x28 ); emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst_lo], Matcher::_regEncode[src_lo] ); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) tty->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move? - tty->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); + st->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); } else { - tty->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); + st->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); } #endif } @@ -812,12 +813,12 @@ emit_opcode(*cbuf, 0x10 ); emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst_lo], Matcher::_regEncode[src_lo] ); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) tty->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move? - tty->print("MOVSD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); + st->print("MOVSD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); } else { - tty->print("MOVSS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); + st->print("MOVSS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]); } #endif } @@ -825,28 +826,29 @@ } } -static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size ) { +static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) { if( cbuf ) { emit_opcode(*cbuf, 0x8B ); emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] ); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) tty->print("\n\t"); - tty->print("MOV %s,%s",Matcher::regName[dst],Matcher::regName[src]); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); + st->print("MOV %s,%s",Matcher::regName[dst],Matcher::regName[src]); #endif } return size+2; } -static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi, int offset, int size ) { +static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi, + int offset, int size, outputStream* st ) { if( src_lo != FPR1L_num ) { // Move value to top of FP stack, if not already there if( cbuf ) { emit_opcode( *cbuf, 0xD9 ); // FLD (i.e., push it) emit_d8( *cbuf, 0xC0-1+Matcher::_regEncode[src_lo] ); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) tty->print("\n\t"); - tty->print("FLD %s",Matcher::regName[src_lo]); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); + st->print("FLD %s",Matcher::regName[src_lo]); #endif } size += 2; @@ -864,11 +866,11 @@ assert( !OptoReg::is_valid(src_hi) && !OptoReg::is_valid(dst_hi), "no non-adjacent float-stores" ); } - return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size); + return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st); } uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const { - // Get registers to move + // Get registers to move OptoReg::Name src_second = ra_->get_reg_second(in(1)); OptoReg::Name src_first = ra_->get_reg_first(in(1)); OptoReg::Name dst_second = ra_->get_reg_second(this ); @@ -884,7 +886,7 @@ // Generate spill code! int size = 0; - if( src_first == dst_first && src_second == dst_second ) + if( src_first == dst_first && src_second == dst_second ) return size; // Self copy, no move // -------------------------------------- @@ -892,16 +894,16 @@ if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { if( src_second == dst_first ) { // overlapping stack copy ranges assert( src_second_rc == rc_stack && dst_second_rc == rc_stack, "we only expect a stk-stk copy here" ); - size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size); - size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size); + size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st); + size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st); src_second_rc = dst_second_rc = rc_bad; // flag as already moved the second bits } // move low bits - size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size); - size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size); + size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size, st); + size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size, st); if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { // mov second bits - size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size); - size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size); + size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st); + size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st); } return size; } @@ -909,22 +911,22 @@ // -------------------------------------- // Check for integer reg-reg copy if( src_first_rc == rc_int && dst_first_rc == rc_int ) - size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size); - + size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size, st); + // Check for integer store if( src_first_rc == rc_int && dst_first_rc == rc_stack ) - size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size); + size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st); // Check for integer load if( dst_first_rc == rc_int && src_first_rc == rc_stack ) - size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size); + size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st); // -------------------------------------- // Check for float reg-reg copy if( src_first_rc == rc_float && dst_first_rc == rc_float ) { assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) || (src_first+1 == src_second && dst_first+1 == dst_second), "no non-adjacent float-moves" ); - if( cbuf ) { + if( cbuf ) { // Note the mucking with the register encode to compensate for the 0/1 // indexing issue mentioned in a comment in the reg_def sections @@ -940,18 +942,18 @@ emit_d8 (*cbuf, 0xD0+Matcher::_regEncode[dst_first]-1 ); } #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) st->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); if( src_first != FPR1L_num ) st->print("FLD %s\n\tFSTP %s",Matcher::regName[src_first],Matcher::regName[dst_first]); else st->print( "FST %s", Matcher::regName[dst_first]); #endif } return size + ((src_first != FPR1L_num) ? 2+2 : 2); } - + // Check for float store if( src_first_rc == rc_float && dst_first_rc == rc_stack ) { - return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size); + return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st); } // Check for float load @@ -973,8 +975,8 @@ emit_opcode (*cbuf, 0xDD ); // FSTP ST(i) emit_d8 (*cbuf, 0xD8+Matcher::_regEncode[dst_first] ); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) st->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); st->print("%s ST,[ESP + #%d]\n\tFSTP %s",op_str, offset,Matcher::regName[dst_first]); #endif } @@ -985,42 +987,42 @@ // Check for xmm reg-reg copy if( src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) { assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) || - (src_first+1 == src_second && dst_first+1 == dst_second), + (src_first+1 == src_second && dst_first+1 == dst_second), "no non-adjacent float-moves" ); - return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size); + return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st); } // Check for xmm store if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) { - return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size); + return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size, st); } // Check for float xmm load if( dst_first_rc == rc_xmm && src_first_rc == rc_stack ) { - return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size); + return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st); } // Copy from float reg to xmm reg if( dst_first_rc == rc_xmm && src_first_rc == rc_float ) { // copy to the top of stack from floating point reg - // and use LEA to preserve flags + // and use LEA to preserve flags if( cbuf ) { emit_opcode(*cbuf,0x8D); // LEA ESP,[ESP-8] emit_rm(*cbuf, 0x1, ESP_enc, 0x04); emit_rm(*cbuf, 0x0, 0x04, ESP_enc); emit_d8(*cbuf,0xF8); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) st->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); st->print("LEA ESP,[ESP-8]"); #endif } size += 4; - size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size); + size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size, st); // Copy from the temp memory to the xmm reg. - size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size); + size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size, st); if( cbuf ) { emit_opcode(*cbuf,0x8D); // LEA ESP,[ESP+8] @@ -1028,8 +1030,8 @@ emit_rm(*cbuf, 0x0, 0x04, ESP_enc); emit_d8(*cbuf,0x08); #ifndef PRODUCT - } else if( !do_size ) { - if( size != 0 ) st->print("\n\t"); + } else if( !do_size ) { + if( size != 0 ) st->print("\n\t"); st->print("LEA ESP,[ESP+8]"); #endif } @@ -1041,21 +1043,21 @@ // -------------------------------------------------------------------- // Check for second bits still needing moving. - if( src_second == dst_second ) + if( src_second == dst_second ) return size; // Self copy; no move assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" ); // Check for second word int-int move if( src_second_rc == rc_int && dst_second_rc == rc_int ) - return impl_mov_helper(cbuf,do_size,src_second,dst_second,size); + return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st); // Check for second word integer store if( src_second_rc == rc_int && dst_second_rc == rc_stack ) - return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size); + return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st); // Check for second word integer load if( dst_second_rc == rc_int && src_second_rc == rc_stack ) - return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size); + return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st); Unimplemented(); @@ -1150,7 +1152,8 @@ __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32); // static stub relocation also tags the methodOop in the code-stream. __ movoop(rbx, (jobject)NULL); // method is zapped till fixup time - __ jump(RuntimeAddress((address)-1)); + // This is recognized as unresolved by relocs/nativeInst/ic code + __ jump(RuntimeAddress(__ pc())); __ end_a_stub(); // Update current stubs pointer and restore code_end. @@ -1181,7 +1184,7 @@ #ifdef ASSERT uint code_size = cbuf.code_size(); #endif - masm.cmpl(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); + masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); /* WARNING these NOPs are critical so that verified entry point is properly @@ -1235,7 +1238,7 @@ return 5 + NativeJump::instruction_size; // pushl(); jmp; } -// Emit deopt handler code. +// Emit deopt handler code. int emit_deopt_handler(CodeBuffer& cbuf) { // Note that the code buffer's inst_mark is always relative to insts. @@ -1317,7 +1320,11 @@ // // NOTE: If the platform does not provide any short branch variants, then // this method should return false for offset 0. -bool Matcher::is_short_branch_offset(int offset) { +bool Matcher::is_short_branch_offset(int rule, int offset) { + // the short version of jmpConUCF2 contains multiple branches, + // making the reach slightly less + if (rule == jmpConUCF2_rule) + return (-126 <= offset && offset <= 125); return (-128 <= offset && offset <= 127); } @@ -1343,9 +1350,9 @@ // register first, so they would do better to copy the constant from stack. const bool Matcher::rematerialize_float_constants = true; -// If CPU can load and store mis-aligned doubles directly then no fixup is -// needed. Else we split the double into 2 integer pieces and move it -// piece-by-piece. Only happens when passing doubles into C code as the +// If CPU can load and store mis-aligned doubles directly then no fixup is +// needed. Else we split the double into 2 integer pieces and move it +// piece-by-piece. Only happens when passing doubles into C code as the // Java calling convention forces doubles to be aligned. const bool Matcher::misaligned_doubles_ok = true; @@ -1354,9 +1361,9 @@ // Get the memory operand from the node uint numopnds = node->num_opnds(); // Virtual call for number of operands uint skipped = node->oper_input_base(); // Sum of leaves skipped so far - assert( idx >= skipped, "idx too low in pd_implicit_null_fixup" ); + assert( idx >= skipped, "idx too low in pd_implicit_null_fixup" ); uint opcnt = 1; // First operand - uint num_edges = node->_opnds[1]->num_edges(); // leaves for first operand + uint num_edges = node->_opnds[1]->num_edges(); // leaves for first operand while( idx >= skipped+num_edges ) { skipped += num_edges; opcnt++; // Bump operand count @@ -1410,8 +1417,8 @@ const bool Matcher::int_in_long = false; // Return whether or not this register is ever used as an argument. This -// function is used on startup to build the trampoline stubs in generateOptoStub. -// Registers not mentioned will be killed by the VM call in the trampoline, and +// function is used on startup to build the trampoline stubs in generateOptoStub. +// Registers not mentioned will be killed by the VM call in the trampoline, and // arguments in those registers not be available to the callee. bool Matcher::can_be_java_arg( int reg ) { if( reg == ECX_num || reg == EDX_num ) return true; @@ -1519,7 +1526,7 @@ // rdx: remainder (= rax, irem reg) 0 // // Code sequnce: - // + // // 81 F8 00 00 00 80 cmp rax,80000000h // 0F 85 0B 00 00 00 jne normal_case // 33 D2 xor rdx,edx @@ -1530,7 +1537,7 @@ // F7 F9 idiv rax,ecx // done: // - emit_opcode(cbuf,0x81); emit_d8(cbuf,0xF8); + emit_opcode(cbuf,0x81); emit_d8(cbuf,0xF8); emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00); emit_opcode(cbuf,0x00); emit_d8(cbuf,0x80); // cmp rax,80000000h emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x85); @@ -1627,7 +1634,7 @@ enc_class bswap_long_bytes(eRegL dst) %{ // BSWAP int destlo = $dst$$reg; - int desthi = HIGH_FROM_LOW(destlo); + int desthi = HIGH_FROM_LOW(destlo); // bswap lo emit_opcode(cbuf, 0x0F); emit_cc(cbuf, 0xC8, destlo); @@ -1687,20 +1694,20 @@ // Compare super with sub directly, since super is not in its own SSA. // The compiler used to emit this test, but we fold it in here, // to allow platform-specific tweaking on sparc. - __ cmpl(Reax, Resi); + __ cmpptr(Reax, Resi); __ jcc(Assembler::equal, hit); #ifndef PRODUCT - __ increment(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); + __ incrementl(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); #endif //PRODUCT - __ movl(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); + __ movptr(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); __ movl(Recx,Address(Redi,arrayOopDesc::length_offset_in_bytes())); - __ addl(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + __ addptr(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT)); __ repne_scan(); __ jcc(Assembler::notEqual, miss); - __ movl(Address(Resi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()),Reax); + __ movptr(Address(Resi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()),Reax); __ bind(hit); if( $primary ) - __ xorl(Redi,Redi); + __ xorptr(Redi,Redi); __ bind(miss); %} @@ -1749,15 +1756,15 @@ // optimizer if the C function is a pure function. __ ffree(0); } else if (rt == T_FLOAT) { - __ leal(rsp, Address(rsp, -4)); + __ lea(rsp, Address(rsp, -4)); __ fstp_s(Address(rsp, 0)); __ movflt(xmm0, Address(rsp, 0)); - __ leal(rsp, Address(rsp, 4)); + __ lea(rsp, Address(rsp, 4)); } else if (rt == T_DOUBLE) { - __ leal(rsp, Address(rsp, -8)); + __ lea(rsp, Address(rsp, -8)); __ fstp_d(Address(rsp, 0)); __ movdbl(xmm0, Address(rsp, 0)); - __ leal(rsp, Address(rsp, 8)); + __ lea(rsp, Address(rsp, 8)); } } %} @@ -1781,21 +1788,21 @@ enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine - // who we intended to call. + // who we intended to call. cbuf.set_inst_mark(); - $$$emit8$primary; - if ( !_method ) { + $$$emit8$primary; + if ( !_method ) { emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), runtime_call_Relocation::spec(), RELOC_IMM32 ); - } else if(_optimized_virtual) { + } else if(_optimized_virtual) { emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), opt_virtual_call_Relocation::spec(), RELOC_IMM32 ); - } else { + } else { emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), static_call_Relocation::spec(), RELOC_IMM32 ); } if( _method ) { // Emit stub for static call - emit_java_to_interp(cbuf); + emit_java_to_interp(cbuf); } %} @@ -1829,28 +1836,28 @@ enc_class Xor_Reg (eRegI dst) %{ emit_opcode(cbuf, 0x33); - emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg); + emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg); %} -// Following encoding is no longer used, but may be restored if calling +// Following encoding is no longer used, but may be restored if calling // convention changes significantly. // Became: Xor_Reg(EBP), Java_To_Runtime( labl ) -// +// // enc_class Java_Interpreter_Call (label labl) %{ // JAVA INTERPRETER CALL // // int ic_reg = Matcher::inline_cache_reg(); // // int ic_encode = Matcher::_regEncode[ic_reg]; // // int imo_reg = Matcher::interpreter_method_oop_reg(); // // int imo_encode = Matcher::_regEncode[imo_reg]; -// +// // // // Interpreter expects method_oop in EBX, currently a callee-saved register, // // // so we load it immediately before the call // // emit_opcode(cbuf, 0x8B); // MOV imo_reg,ic_reg # method_oop // // emit_rm(cbuf, 0x03, imo_encode, ic_encode ); // R/M byte -// +// // // xor rbp,ebp // emit_opcode(cbuf, 0x33); -// emit_rm(cbuf, 0x3, EBP_enc, EBP_enc); -// +// emit_rm(cbuf, 0x3, EBP_enc, EBP_enc); +// // // CALL to interpreter. // cbuf.set_inst_mark(); // $$$emit8$primary; @@ -1886,7 +1893,7 @@ if (src_con == 0) { // xor dst, dst emit_opcode(cbuf, 0x33); - emit_rm(cbuf, 0x3, dst_enc, dst_enc); + emit_rm(cbuf, 0x3, dst_enc, dst_enc); } else { emit_opcode(cbuf, $primary + dst_enc); emit_d32(cbuf, src_con); @@ -1901,7 +1908,7 @@ if (src_con == 0) { // xor dst, dst emit_opcode(cbuf, 0x33); - emit_rm(cbuf, 0x3, dst_enc, dst_enc); + emit_rm(cbuf, 0x3, dst_enc, dst_enc); } else { emit_opcode(cbuf, $primary + dst_enc); emit_d32(cbuf, src_con); @@ -1985,7 +1992,7 @@ %} enc_class MovL2XD_reg(regXD dst, eRegL src, regXD tmp) %{ - { // MOVD $dst,$src.lo + { // MOVD $dst,$src.lo emit_opcode(cbuf,0x66); emit_opcode(cbuf,0x0F); emit_opcode(cbuf,0x6E); @@ -2110,7 +2117,7 @@ // Cmp-xchg long value. // Note: we need to swap rbx, and rcx before and after the // cmpxchg8 instruction because the instruction uses - // rcx as the high order word of the new value to store but + // rcx as the high order word of the new value to store but // our register encoding uses rbx,. enc_class enc_cmpxchg8(eSIRegP mem_ptr) %{ @@ -2131,7 +2138,7 @@ enc_class enc_cmpxchg(eSIRegP mem_ptr) %{ // [Lock] - if( os::is_MP() ) + if( os::is_MP() ) emit_opcode(cbuf,0xF0); // CMPXCHG [Eptr] @@ -2178,7 +2185,7 @@ assert( !$mem->disp_is_oop(), "Cannot add 4 to oop" ); encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, false/*disp_is_oop*/); %} - + enc_class move_long_small_shift( eRegL dst, immI_1_31 cnt ) %{ int r1, r2; if( $tertiary == 0xA4 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW($dst$$reg); } @@ -2193,7 +2200,7 @@ %} enc_class move_long_big_shift_sign( eRegL dst, immI_32_63 cnt ) %{ - emit_opcode( cbuf, 0x8B ); // Move + emit_opcode( cbuf, 0x8B ); // Move emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg)); emit_d8(cbuf,$primary); emit_rm(cbuf, 0x3, $secondary, $dst$$reg); @@ -2233,7 +2240,7 @@ %} // !!!!! Special Custom Code used by MemMove, and stack access instructions !!!!! - // + // // Clone of RegMem except the RM-byte's reg/opcode field is an ADLC-time constant // and it never needs relocation information. // Frequently used to move data between FPU's Stack Top and memory. @@ -2291,7 +2298,7 @@ emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg); %} - enc_class enc_FP_store(memory mem, regD src) %{ + enc_class enc_FP_store(memory mem, regD src) %{ // If src is FPR1, we can just FST to store it. // Else we need to FLD it to FPR1, then FSTP to store/pop it. int reg_encoding = 0x2; // Just store @@ -2320,7 +2327,7 @@ // SETLT $dst emit_opcode(cbuf,0x0F); emit_opcode(cbuf,0x9C); - emit_rm( cbuf, 0x3, 0x4, $dst$$reg ); + emit_rm( cbuf, 0x3, 0x4, $dst$$reg ); %} enc_class enc_cmpLTP(ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp) %{ // cadd_cmpLT @@ -2377,7 +2384,7 @@ emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg ); // CLR $dst.lo emit_opcode(cbuf, 0x33); - emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg); + emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg); // small: // SHLD $dst.hi,$dst.lo,$shift emit_opcode(cbuf,0x0F); @@ -2401,7 +2408,7 @@ emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg) ); // CLR $dst.hi emit_opcode(cbuf, 0x33); - emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($dst$$reg)); + emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($dst$$reg)); // small: // SHRD $dst.lo,$dst.hi,$shift emit_opcode(cbuf,0x0F); @@ -2539,7 +2546,7 @@ %} // Push FPU's double to a stack-slot, and pop FPU-stack - enc_class Pop_Mem_Reg_D( stackSlotD dst, regD src ) %{ + enc_class Pop_Mem_Reg_D( stackSlotD dst, regD src ) %{ int pop = 0x02; if ($src$$reg != FPR1L_enc) { emit_opcode( cbuf, 0xD9 ); // FLD ST(i-1) @@ -2550,7 +2557,7 @@ %} // Push FPU's double to a FPU-stack-slot, and pop FPU-stack - enc_class Pop_Reg_Reg_D( regD dst, regF src ) %{ + enc_class Pop_Reg_Reg_D( regD dst, regF src ) %{ int pop = 0xD0 - 1; // -1 since we skip FLD if ($src$$reg != FPR1L_enc) { emit_opcode( cbuf, 0xD9 ); // FLD ST(src-1) @@ -2806,7 +2813,7 @@ // // test rax,0x0400 // emit_opcode( cbuf, 0xA9 ); // emit_d32 ( cbuf, 0x00000400 ); - // + // // jz exit (no unordered comparison) emit_opcode( cbuf, 0x74 ); emit_d8 ( cbuf, 0x02 ); @@ -2819,7 +2826,7 @@ enc_class cmpF_P6_fixup() %{ // Fixup the integer flags in case comparison involved a NaN - // + // // JNP exit (no unordered comparison, P-flag is set by NaN) emit_opcode( cbuf, 0x7B ); emit_d8 ( cbuf, 0x03 ); @@ -2876,7 +2883,7 @@ emit_d32( cbuf, 1 ); %} - + // XMM version of CmpF_Result. Because the XMM compare // instructions set the EFLAGS directly. It becomes simpler than // the float version above. @@ -2888,10 +2895,10 @@ __ jccb(Assembler::equal, done); __ jccb(Assembler::above, inc); __ bind(nan); - __ decrement(as_Register($dst$$reg)); + __ decrement(as_Register($dst$$reg)); // NO L qqq __ jmpb(done); __ bind(inc); - __ increment(as_Register($dst$$reg)); + __ increment(as_Register($dst$$reg)); // NO L qqq __ bind(done); %} @@ -2985,7 +2992,7 @@ // MOV $tmp,$src.lo encode_Copy( cbuf, $tmp$$reg, $src$$reg ); // IMUL $tmp,EDX - emit_opcode( cbuf, 0x0F ); + emit_opcode( cbuf, 0x0F ); emit_opcode( cbuf, 0xAF ); emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) ); // MOV EDX,$src.hi @@ -3039,7 +3046,7 @@ // Restore stack emit_opcode(cbuf, 0x83); // add SP, #framesize emit_rm(cbuf, 0x3, 0x00, ESP_enc); - emit_d8(cbuf, 4*4); + emit_d8(cbuf, 4*4); %} enc_class long_mod( eRegL src1, eRegL src2 ) %{ @@ -3058,7 +3065,7 @@ // Restore stack emit_opcode(cbuf, 0x83); // add SP, #framesize emit_rm(cbuf, 0x3, 0x00, ESP_enc); - emit_d8(cbuf, 4*4); + emit_d8(cbuf, 4*4); %} enc_class long_cmp_flags0( eRegL src, eRegI tmp ) %{ @@ -3105,7 +3112,7 @@ emit_opcode( cbuf, 0x1B ); emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src$$reg) ); %} - + // Sniff, sniff... smells like Gnu Superoptimizer enc_class neg_long( eRegL dst ) %{ emit_opcode(cbuf,0xF7); // NEG hi @@ -3158,43 +3165,43 @@ enc_class mov_i2x(regXD dst, eRegI src) %{ MacroAssembler _masm(&cbuf); - __ movd(as_XMMRegister($dst$$reg), as_Register($src$$reg)); + __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg)); %} - // Because the transitions from emitted code to the runtime - // monitorenter/exit helper stubs are so slow it's critical that - // we inline both the stack-locking fast-path and the inflated fast path. + // Because the transitions from emitted code to the runtime + // monitorenter/exit helper stubs are so slow it's critical that + // we inline both the stack-locking fast-path and the inflated fast path. // // See also: cmpFastLock and cmpFastUnlock. - // - // What follows is a specialized inline transliteration of the code - // in slow_enter() and slow_exit(). If we're concerned about I$ bloat - // another option would be to emit TrySlowEnter and TrySlowExit methods - // at startup-time. These methods would accept arguments as - // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure - // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply - // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit. + // + // What follows is a specialized inline transliteration of the code + // in slow_enter() and slow_exit(). If we're concerned about I$ bloat + // another option would be to emit TrySlowEnter and TrySlowExit methods + // at startup-time. These methods would accept arguments as + // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure + // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply + // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit. // In practice, however, the # of lock sites is bounded and is usually small. // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer // if the processor uses simple bimodal branch predictors keyed by EIP // Since the helper routines would be called from multiple synchronization - // sites. + // sites. // - // An even better approach would be write "MonitorEnter()" and "MonitorExit()" - // in java - using j.u.c and unsafe - and just bind the lock and unlock sites - // to those specialized methods. That'd give us a mostly platform-independent - // implementation that the JITs could optimize and inline at their pleasure. - // Done correctly, the only time we'd need to cross to native could would be - // to park() or unpark() threads. We'd also need a few more unsafe operators - // to (a) prevent compiler-JIT reordering of non-volatile accesses, and - // (b) explicit barriers or fence operations. + // An even better approach would be write "MonitorEnter()" and "MonitorExit()" + // in java - using j.u.c and unsafe - and just bind the lock and unlock sites + // to those specialized methods. That'd give us a mostly platform-independent + // implementation that the JITs could optimize and inline at their pleasure. + // Done correctly, the only time we'd need to cross to native could would be + // to park() or unpark() threads. We'd also need a few more unsafe operators + // to (a) prevent compiler-JIT reordering of non-volatile accesses, and + // (b) explicit barriers or fence operations. // - // TODO: + // TODO: // - // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr). - // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals. - // Given TLAB allocation, Self is usually manifested in a register, so passing it into + // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr). + // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals. + // Given TLAB allocation, Self is usually manifested in a register, so passing it into // the lock operators would typically be faster than reifying Self. // // * Ideally I'd define the primitives as: @@ -3202,55 +3209,55 @@ // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED // Unfortunately ADLC bugs prevent us from expressing the ideal form. // Instead, we're stuck with a rather awkward and brittle register assignments below. - // Furthermore the register assignments are overconstrained, possibly resulting in - // sub-optimal code near the synchronization site. + // Furthermore the register assignments are overconstrained, possibly resulting in + // sub-optimal code near the synchronization site. // // * Eliminate the sp-proximity tests and just use "== Self" tests instead. // Alternately, use a better sp-proximity test. - // + // // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value. // Either one is sufficient to uniquely identify a thread. - // TODO: eliminate use of sp in _owner and use get_thread(tr) instead. + // TODO: eliminate use of sp in _owner and use get_thread(tr) instead. // // * Intrinsify notify() and notifyAll() for the common cases where the - // object is locked by the calling thread but the waitlist is empty. - // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll(). + // object is locked by the calling thread but the waitlist is empty. + // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll(). // // * use jccb and jmpb instead of jcc and jmp to improve code density. - // But beware of excessive branch density on AMD Opterons. + // But beware of excessive branch density on AMD Opterons. // // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success // or failure of the fast-path. If the fast-path fails then we pass // control to the slow-path, typically in C. In Fast_Lock and // Fast_Unlock we often branch to DONE_LABEL, just to find that C2 - // will emit a conditional branch immediately after the node. + // will emit a conditional branch immediately after the node. // So we have branches to branches and lots of ICC.ZF games. // Instead, it might be better to have C2 pass a "FailureLabel" // into Fast_Lock and Fast_Unlock. In the case of success, control // will drop through the node. ICC.ZF is undefined at exit. - // In the case of failure, the node will branch directly to the - // FailureLabel + // In the case of failure, the node will branch directly to the + // FailureLabel + - // obj: object to lock // box: on-stack box address (displaced header location) - KILLED // rax,: tmp -- KILLED // scr: tmp -- KILLED enc_class Fast_Lock( eRegP obj, eRegP box, eAXRegI tmp, eRegP scr ) %{ - + Register objReg = as_Register($obj$$reg); Register boxReg = as_Register($box$$reg); Register tmpReg = as_Register($tmp$$reg); Register scrReg = as_Register($scr$$reg); // Ensure the register assignents are disjoint - guarantee (objReg != boxReg, "") ; - guarantee (objReg != tmpReg, "") ; - guarantee (objReg != scrReg, "") ; - guarantee (boxReg != tmpReg, "") ; - guarantee (boxReg != scrReg, "") ; - guarantee (tmpReg == as_Register(EAX_enc), "") ; - + guarantee (objReg != boxReg, "") ; + guarantee (objReg != tmpReg, "") ; + guarantee (objReg != scrReg, "") ; + guarantee (boxReg != tmpReg, "") ; + guarantee (boxReg != scrReg, "") ; + guarantee (tmpReg == as_Register(EAX_enc), "") ; + MacroAssembler masm(&cbuf); if (_counters != NULL) { @@ -3259,8 +3266,8 @@ if (EmitSync & 1) { // set box->dhw = unused_mark (3) // Force all sync thru slow-path: slow_enter() and slow_exit() - masm.movl (Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ; - masm.cmpl (rsp, 0) ; + masm.movptr (Address(boxReg, 0), int32_t(markOopDesc::unused_mark())) ; + masm.cmpptr (rsp, (int32_t)0) ; } else if (EmitSync & 2) { Label DONE_LABEL ; @@ -3269,16 +3276,16 @@ masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters); } - masm.movl (tmpReg, Address(objReg, 0)) ; // fetch markword - masm.orl (tmpReg, 0x1); - masm.movl (Address(boxReg, 0), tmpReg); // Anticipate successful CAS + masm.movptr(tmpReg, Address(objReg, 0)) ; // fetch markword + masm.orptr (tmpReg, 0x1); + masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS if (os::is_MP()) { masm.lock(); } - masm.cmpxchg(boxReg, Address(objReg, 0)); // Updates tmpReg + masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg masm.jcc(Assembler::equal, DONE_LABEL); // Recursive locking - masm.subl(tmpReg, rsp); - masm.andl(tmpReg, 0xFFFFF003 ); - masm.movl(Address(boxReg, 0), tmpReg); + masm.subptr(tmpReg, rsp); + masm.andptr(tmpReg, (int32_t) 0xFFFFF003 ); + masm.movptr(Address(boxReg, 0), tmpReg); masm.bind(DONE_LABEL) ; } else { // Possible cases that we'll encounter in fast_lock @@ -3299,26 +3306,26 @@ // -- by other // - Label IsInflated, DONE_LABEL, PopDone ; + Label IsInflated, DONE_LABEL, PopDone ; // TODO: optimize away redundant LDs of obj->mark and improve the markword triage - // order to reduce the number of conditional branches in the most common cases. + // order to reduce the number of conditional branches in the most common cases. // Beware -- there's a subtle invariant that fetch of the markword // at [FETCH], below, will never observe a biased encoding (*101b). // If this invariant is not held we risk exclusion (safety) failure. - if (UseBiasedLocking) { + if (UseBiasedLocking && !UseOptoBiasInlining) { masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters); } - masm.movl (tmpReg, Address(objReg, 0)) ; // [FETCH] - masm.testl (tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral) + masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH] + masm.testptr(tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral) masm.jccb (Assembler::notZero, IsInflated) ; // Attempt stack-locking ... - masm.orl (tmpReg, 0x1); - masm.movl (Address(boxReg, 0), tmpReg); // Anticipate successful CAS + masm.orptr (tmpReg, 0x1); + masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS if (os::is_MP()) { masm.lock(); } - masm.cmpxchg(boxReg, Address(objReg, 0)); // Updates tmpReg + masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg if (_counters != NULL) { masm.cond_inc32(Assembler::equal, ExternalAddress((address)_counters->fast_path_entry_count_addr())); @@ -3326,9 +3333,9 @@ masm.jccb (Assembler::equal, DONE_LABEL); // Recursive locking - masm.subl(tmpReg, rsp); - masm.andl(tmpReg, 0xFFFFF003 ); - masm.movl(Address(boxReg, 0), tmpReg); + masm.subptr(tmpReg, rsp); + masm.andptr(tmpReg, 0xFFFFF003 ); + masm.movptr(Address(boxReg, 0), tmpReg); if (_counters != NULL) { masm.cond_inc32(Assembler::equal, ExternalAddress((address)_counters->fast_path_entry_count_addr())); @@ -3337,76 +3344,73 @@ masm.bind (IsInflated) ; - // The object is inflated. + // The object is inflated. // // TODO-FIXME: eliminate the ugly use of manifest constants: - // Use markOopDesc::monitor_value instead of "2". - // use markOop::unused_mark() instead of "3". - // The tmpReg value is an objectMonitor reference ORed with - // markOopDesc::monitor_value (2). We can either convert tmpReg to an - // objectmonitor pointer by masking off the "2" bit or we can just - // use tmpReg as an objectmonitor pointer but bias the objectmonitor + // Use markOopDesc::monitor_value instead of "2". + // use markOop::unused_mark() instead of "3". + // The tmpReg value is an objectMonitor reference ORed with + // markOopDesc::monitor_value (2). We can either convert tmpReg to an + // objectmonitor pointer by masking off the "2" bit or we can just + // use tmpReg as an objectmonitor pointer but bias the objectmonitor // field offsets with "-2" to compensate for and annul the low-order tag bit. // - // I use the latter as it avoids AGI stalls. - // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]" - // instead of "mov r, [tmpReg+OFFSETOF(Owner)]". + // I use the latter as it avoids AGI stalls. + // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]" + // instead of "mov r, [tmpReg+OFFSETOF(Owner)]". // #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2) - // boxReg refers to the on-stack BasicLock in the current frame. + // boxReg refers to the on-stack BasicLock in the current frame. // We'd like to write: // set box->_displaced_header = markOop::unused_mark(). Any non-0 value suffices. // This is convenient but results a ST-before-CAS penalty. The following CAS suffers - // additional latency as we have another ST in the store buffer that must drain. + // additional latency as we have another ST in the store buffer that must drain. if (EmitSync & 8192) { - masm.movl (Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty + masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty masm.get_thread (scrReg) ; - masm.movl (boxReg, tmpReg); // consider: LEA box, [tmp-2] - masm.movl (tmpReg, 0); // consider: xor vs mov + masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2] + masm.movptr(tmpReg, 0); // consider: xor vs mov if (os::is_MP()) { masm.lock(); } - masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; } else if ((EmitSync & 128) == 0) { // avoid ST-before-CAS - masm.movl (scrReg, boxReg) ; - masm.movl (boxReg, tmpReg); // consider: LEA box, [tmp-2] + masm.movptr(scrReg, boxReg) ; + masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2] // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { - // prefetchw [eax + Offset(_owner)-2] - masm.emit_raw (0x0F) ; - masm.emit_raw (0x0D) ; - masm.emit_raw (0x48) ; - masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ; + // prefetchw [eax + Offset(_owner)-2] + masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); } - if ((EmitSync & 64) == 0) { + if ((EmitSync & 64) == 0) { // Optimistic form: consider XORL tmpReg,tmpReg - masm.movl (tmpReg, 0 ) ; + masm.movptr(tmpReg, 0 ) ; } else { // Can suffer RTS->RTO upgrades on shared or cold $ lines // Test-And-CAS instead of CAS - masm.movl (tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner - masm.testl (tmpReg, tmpReg) ; // Locked ? + masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner + masm.testptr(tmpReg, tmpReg) ; // Locked ? masm.jccb (Assembler::notZero, DONE_LABEL) ; } - // Appears unlocked - try to swing _owner from null to non-null. + // Appears unlocked - try to swing _owner from null to non-null. // Ideally, I'd manifest "Self" with get_thread and then attempt - // to CAS the register containing Self into m->Owner. + // to CAS the register containing Self into m->Owner. // But we don't have enough registers, so instead we can either try to CAS // rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds - // we later store "Self" into m->Owner. Transiently storing a stack address - // (rsp or the address of the box) into m->owner is harmless. - // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. + // we later store "Self" into m->Owner. Transiently storing a stack address + // (rsp or the address of the box) into m->owner is harmless. + // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. if (os::is_MP()) { masm.lock(); } - masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - masm.movl (Address(scrReg, 0), 3) ; // box->_displaced_header = 3 + masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.movptr(Address(scrReg, 0), 3) ; // box->_displaced_header = 3 masm.jccb (Assembler::notZero, DONE_LABEL) ; masm.get_thread (scrReg) ; // beware: clobbers ICCs - masm.movl (Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ; - masm.xorl (boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success + masm.movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ; + masm.xorptr(boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success // If the CAS fails we can either retry or pass control to the slow-path. // We use the latter tactic. @@ -3416,37 +3420,34 @@ // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it. // Intentional fall-through into DONE_LABEL ... } else { - masm.movl (Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty - masm.movl (boxReg, tmpReg) ; + masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty + masm.movptr(boxReg, tmpReg) ; // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { - // prefetchw [eax + Offset(_owner)-2] - masm.emit_raw (0x0F) ; - masm.emit_raw (0x0D) ; - masm.emit_raw (0x48) ; - masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ; + // prefetchw [eax + Offset(_owner)-2] + masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); } - if ((EmitSync & 64) == 0) { + if ((EmitSync & 64) == 0) { // Optimistic form - masm.xorl (tmpReg, tmpReg) ; + masm.xorptr (tmpReg, tmpReg) ; } else { // Can suffer RTS->RTO upgrades on shared or cold $ lines - masm.movl (tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner - masm.testl (tmpReg, tmpReg) ; // Locked ? + masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner + masm.testptr(tmpReg, tmpReg) ; // Locked ? masm.jccb (Assembler::notZero, DONE_LABEL) ; } - // Appears unlocked - try to swing _owner from null to non-null. - // Use either "Self" (in scr) or rsp as thread identity in _owner. - // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. - masm.get_thread (scrReg) ; - if (os::is_MP()) { masm.lock(); } - masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + // Appears unlocked - try to swing _owner from null to non-null. + // Use either "Self" (in scr) or rsp as thread identity in _owner. + // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. + masm.get_thread (scrReg) ; + if (os::is_MP()) { masm.lock(); } + masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - // If the CAS fails we can either retry or pass control to the slow-path. - // We use the latter tactic. + // If the CAS fails we can either retry or pass control to the slow-path. + // We use the latter tactic. // Pass the CAS result in the icc.ZFlag into DONE_LABEL // If the CAS was successful ... // Self has acquired the lock @@ -3457,48 +3458,48 @@ // DONE_LABEL is a hot target - we'd really like to place it at the // start of cache line by padding with NOPs. // See the AMD and Intel software optimization manuals for the - // most efficient "long" NOP encodings. - // Unfortunately none of our alignment mechanisms suffice. - masm.bind(DONE_LABEL); + // most efficient "long" NOP encodings. + // Unfortunately none of our alignment mechanisms suffice. + masm.bind(DONE_LABEL); // Avoid branch-to-branch on AMD processors // This appears to be superstition. - if (EmitSync & 32) masm.nop() ; - + if (EmitSync & 32) masm.nop() ; + - // At DONE_LABEL the icc ZFlag is set as follows ... + // At DONE_LABEL the icc ZFlag is set as follows ... // Fast_Unlock uses the same protocol. // ZFlag == 1 -> Success // ZFlag == 0 -> Failure - force control through the slow-path } %} - - // obj: object to unlock + + // obj: object to unlock // box: box address (displaced header location), killed. Must be EAX. - // rbx,: killed tmp; cannot be obj nor box. + // rbx,: killed tmp; cannot be obj nor box. // // Some commentary on balanced locking: // - // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites. + // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites. // Methods that don't have provably balanced locking are forced to run in the - // interpreter - such methods won't be compiled to use fast_lock and fast_unlock. + // interpreter - such methods won't be compiled to use fast_lock and fast_unlock. // The interpreter provides two properties: - // I1: At return-time the interpreter automatically and quietly unlocks any + // I1: At return-time the interpreter automatically and quietly unlocks any // objects acquired the current activation (frame). Recall that the - // interpreter maintains an on-stack list of locks currently held by - // a frame. + // interpreter maintains an on-stack list of locks currently held by + // a frame. // I2: If a method attempts to unlock an object that is not held by the - // the frame the interpreter throws IMSX. + // the frame the interpreter throws IMSX. // // Lets say A(), which has provably balanced locking, acquires O and then calls B(). - // B() doesn't have provably balanced locking so it runs in the interpreter. - // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O - // is still locked by A(). + // B() doesn't have provably balanced locking so it runs in the interpreter. + // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O + // is still locked by A(). // - // The only other source of unbalanced locking would be JNI. The "Java Native Interface: + // The only other source of unbalanced locking would be JNI. The "Java Native Interface: // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter // should not be unlocked by "normal" java-level locking and vice-versa. The specification - // doesn't specify what will occur if a program engages in such mixed-mode locking, however. + // doesn't specify what will occur if a program engages in such mixed-mode locking, however. enc_class Fast_Unlock( nabxRegP obj, eAXRegP box, eRegP tmp) %{ @@ -3506,124 +3507,121 @@ Register boxReg = as_Register($box$$reg); Register tmpReg = as_Register($tmp$$reg); - guarantee (objReg != boxReg, "") ; - guarantee (objReg != tmpReg, "") ; - guarantee (boxReg != tmpReg, "") ; - guarantee (boxReg == as_Register(EAX_enc), "") ; + guarantee (objReg != boxReg, "") ; + guarantee (objReg != tmpReg, "") ; + guarantee (boxReg != tmpReg, "") ; + guarantee (boxReg == as_Register(EAX_enc), "") ; MacroAssembler masm(&cbuf); - if (EmitSync & 4) { + if (EmitSync & 4) { // Disable - inhibit all inlining. Force control through the slow-path - masm.cmpl (rsp, 0) ; + masm.cmpptr (rsp, 0) ; } else if (EmitSync & 8) { - Label DONE_LABEL ; + Label DONE_LABEL ; if (UseBiasedLocking) { masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); } // classic stack-locking code ... - masm.movl (tmpReg, Address(boxReg, 0)) ; - masm.testl (tmpReg, tmpReg) ; + masm.movptr(tmpReg, Address(boxReg, 0)) ; + masm.testptr(tmpReg, tmpReg) ; masm.jcc (Assembler::zero, DONE_LABEL) ; if (os::is_MP()) { masm.lock(); } - masm.cmpxchg(tmpReg, Address(objReg, 0)); // Uses EAX which is box + masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box masm.bind(DONE_LABEL); } else { - Label DONE_LABEL, Stacked, CheckSucc, Inflated ; + Label DONE_LABEL, Stacked, CheckSucc, Inflated ; // Critically, the biased locking test must have precedence over - // and appear before the (box->dhw == 0) recursive stack-lock test. - if (UseBiasedLocking) { + // and appear before the (box->dhw == 0) recursive stack-lock test. + if (UseBiasedLocking && !UseOptoBiasInlining) { masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); } - masm.cmpl (Address(boxReg, 0), 0) ; // Examine the displaced header - masm.movl (tmpReg, Address(objReg, 0)) ; // Examine the object's markword + masm.cmpptr(Address(boxReg, 0), 0) ; // Examine the displaced header + masm.movptr(tmpReg, Address(objReg, 0)) ; // Examine the object's markword masm.jccb (Assembler::zero, DONE_LABEL) ; // 0 indicates recursive stack-lock - masm.testl (tmpReg, 0x02) ; // Inflated? + masm.testptr(tmpReg, 0x02) ; // Inflated? masm.jccb (Assembler::zero, Stacked) ; - masm.bind (Inflated) ; + masm.bind (Inflated) ; // It's inflated. - // Despite our balanced locking property we still check that m->_owner == Self - // as java routines or native JNI code called by this thread might - // have released the lock. + // Despite our balanced locking property we still check that m->_owner == Self + // as java routines or native JNI code called by this thread might + // have released the lock. // Refer to the comments in synchronizer.cpp for how we might encode extra - // state in _succ so we can avoid fetching EntryList|cxq. - // + // state in _succ so we can avoid fetching EntryList|cxq. + // // I'd like to add more cases in fast_lock() and fast_unlock() -- // such as recursive enter and exit -- but we have to be wary of - // I$ bloat, T$ effects and BP$ effects. + // I$ bloat, T$ effects and BP$ effects. // // If there's no contention try a 1-0 exit. That is, exit without // a costly MEMBAR or CAS. See synchronizer.cpp for details on how - // we detect and recover from the race that the 1-0 exit admits. + // we detect and recover from the race that the 1-0 exit admits. // // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier // before it STs null into _owner, releasing the lock. Updates // to data protected by the critical section must be visible before // we drop the lock (and thus before any other thread could acquire - // the lock and observe the fields protected by the lock). + // the lock and observe the fields protected by the lock). // IA32's memory-model is SPO, so STs are ordered with respect to - // each other and there's no need for an explicit barrier (fence). - // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. + // each other and there's no need for an explicit barrier (fence). + // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. - masm.get_thread (boxReg) ; + masm.get_thread (boxReg) ; if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) { - // prefetchw [ebx + Offset(_owner)-2] - masm.emit_raw (0x0F) ; - masm.emit_raw (0x0D) ; - masm.emit_raw (0x4B) ; - masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ; + // prefetchw [ebx + Offset(_owner)-2] + masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2)); } - + // Note that we could employ various encoding schemes to reduce - // the number of loads below (currently 4) to just 2 or 3. + // the number of loads below (currently 4) to just 2 or 3. // Refer to the comments in synchronizer.cpp. // In practice the chain of fetches doesn't seem to impact performance, however. - if ((EmitSync & 65536) == 0 && (EmitSync & 256)) { + if ((EmitSync & 65536) == 0 && (EmitSync & 256)) { // Attempt to reduce branch density - AMD's branch predictor. - masm.xorl (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - masm.orl (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; - masm.orl (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; - masm.orl (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; + masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; + masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; + masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; masm.jccb (Assembler::notZero, DONE_LABEL) ; - masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; + masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; masm.jmpb (DONE_LABEL) ; } else { - masm.xorl (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - masm.orl (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; + masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; masm.jccb (Assembler::notZero, DONE_LABEL) ; - masm.movl (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; - masm.orl (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; + masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; + masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; masm.jccb (Assembler::notZero, CheckSucc) ; - masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; + masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; masm.jmpb (DONE_LABEL) ; } // The Following code fragment (EmitSync & 65536) improves the performance of - // contended applications and contended synchronization microbenchmarks. + // contended applications and contended synchronization microbenchmarks. // Unfortunately the emission of the code - even though not executed - causes regressions // in scimark and jetstream, evidently because of $ effects. Replacing the code - // with an equal number of never-executed NOPs results in the same regression. - // We leave it off by default. + // with an equal number of never-executed NOPs results in the same regression. + // We leave it off by default. - if ((EmitSync & 65536) != 0) { - Label LSuccess, LGoSlowPath ; + if ((EmitSync & 65536) != 0) { + Label LSuccess, LGoSlowPath ; masm.bind (CheckSucc) ; // Optional pre-test ... it's safe to elide this if ((EmitSync & 16) == 0) { - masm.cmpl (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; + masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; masm.jccb (Assembler::zero, LGoSlowPath) ; } // We have a classic Dekker-style idiom: // ST m->_owner = 0 ; MEMBAR; LD m->_succ // There are a number of ways to implement the barrier: - // (1) lock:andl &m->_owner, 0 + // (1) lock:andl &m->_owner, 0 // is fast, but mask doesn't currently support the "ANDL M,IMM32" form. // LOCK: ANDL [ebx+Offset(_Owner)-2], 0 // Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8 @@ -3631,10 +3629,10 @@ // In older IA32 processors MFENCE is slower than lock:add or xchg // particularly if the write-buffer is full as might be the case if // if stores closely precede the fence or fence-equivalent instruction. - // In more modern implementations MFENCE appears faster, however. + // In more modern implementations MFENCE appears faster, however. // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack // The $lines underlying the top-of-stack should be in M-state. - // The locked add instruction is serializing, of course. + // The locked add instruction is serializing, of course. // (4) Use xchg, which is serializing // mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0. @@ -3643,73 +3641,71 @@ // we just stored into _owner, it's likely that the $line // remains in M-state for the lock:orl. // - // We currently use (3), although it's likely that switching to (2) + // We currently use (3), although it's likely that switching to (2) // is correct for the future. - masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; + masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; if (os::is_MP()) { if (VM_Version::supports_sse2() && 1 == FenceInstruction) { - masm.emit_raw (0x0F) ; // MFENCE ... - masm.emit_raw (0xAE) ; - masm.emit_raw (0xF0) ; + masm.mfence(); } else { - masm.lock () ; masm.addl (Address(rsp, 0), 0) ; + masm.lock () ; masm.addptr(Address(rsp, 0), 0) ; } } // Ratify _succ remains non-null - masm.cmpl (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; + masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; masm.jccb (Assembler::notZero, LSuccess) ; - masm.xorl (boxReg, boxReg) ; // box is really EAX + masm.xorptr(boxReg, boxReg) ; // box is really EAX if (os::is_MP()) { masm.lock(); } - masm.cmpxchg(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + masm.cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); masm.jccb (Assembler::notEqual, LSuccess) ; // Since we're low on registers we installed rsp as a placeholding in _owner. // Now install Self over rsp. This is safe as we're transitioning from // non-null to non=null masm.get_thread (boxReg) ; - masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ; + masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ; // Intentional fall-through into LGoSlowPath ... masm.bind (LGoSlowPath) ; - masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure + masm.orptr(boxReg, 1) ; // set ICC.ZF=0 to indicate failure masm.jmpb (DONE_LABEL) ; masm.bind (LSuccess) ; - masm.xorl (boxReg, boxReg) ; // set ICC.ZF=1 to indicate success + masm.xorptr(boxReg, boxReg) ; // set ICC.ZF=1 to indicate success masm.jmpb (DONE_LABEL) ; } - + masm.bind (Stacked) ; - // It's not inflated and it's not recursively stack-locked and it's not biased. - // It must be stack-locked. + // It's not inflated and it's not recursively stack-locked and it's not biased. + // It must be stack-locked. // Try to reset the header to displaced header. // The "box" value on the stack is stable, so we can reload // and be assured we observe the same value as above. - masm.movl (tmpReg, Address(boxReg, 0)) ; + masm.movptr(tmpReg, Address(boxReg, 0)) ; if (os::is_MP()) { masm.lock(); } - masm.cmpxchg(tmpReg, Address(objReg, 0)); // Uses EAX which is box + masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box // Intention fall-thru into DONE_LABEL - + // DONE_LABEL is a hot target - we'd really like to place it at the // start of cache line by padding with NOPs. // See the AMD and Intel software optimization manuals for the - // most efficient "long" NOP encodings. - // Unfortunately none of our alignment mechanisms suffice. + // most efficient "long" NOP encodings. + // Unfortunately none of our alignment mechanisms suffice. if ((EmitSync & 65536) == 0) { - masm.bind (CheckSucc) ; + masm.bind (CheckSucc) ; } masm.bind(DONE_LABEL); // Avoid branch to branch on AMD processors - if (EmitSync & 32768) { masm.nop() ; } + if (EmitSync & 32768) { masm.nop() ; } } %} enc_class enc_String_Compare() %{ - Label ECX_GOOD_LABEL, LENGTH_DIFF_LABEL, - POP_LABEL, DONE_LABEL, CONT_LABEL, + Label ECX_GOOD_LABEL, LENGTH_DIFF_LABEL, + POP_LABEL, DONE_LABEL, CONT_LABEL, WHILE_HEAD_LABEL; MacroAssembler masm(&cbuf); @@ -3720,12 +3716,12 @@ int count_offset = java_lang_String::count_offset_in_bytes(); int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); - masm.movl(rax, Address(rsi, value_offset)); + masm.movptr(rax, Address(rsi, value_offset)); masm.movl(rcx, Address(rsi, offset_offset)); - masm.leal(rax, Address(rax, rcx, Address::times_2, base_offset)); - masm.movl(rbx, Address(rdi, value_offset)); + masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset)); + masm.movptr(rbx, Address(rdi, value_offset)); masm.movl(rcx, Address(rdi, offset_offset)); - masm.leal(rbx, Address(rbx, rcx, Address::times_2, base_offset)); + masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset)); // Compute the minimum of the string lengths(rsi) and the // difference of the string lengths (stack) @@ -3736,41 +3732,41 @@ masm.movl(rsi, Address(rsi, count_offset)); masm.movl(rcx, rdi); masm.subl(rdi, rsi); - masm.pushl(rdi); + masm.push(rdi); masm.cmovl(Assembler::lessEqual, rsi, rcx); } else { masm.movl(rdi, Address(rdi, count_offset)); masm.movl(rcx, Address(rsi, count_offset)); masm.movl(rsi, rdi); masm.subl(rdi, rcx); - masm.pushl(rdi); + masm.push(rdi); masm.jcc(Assembler::lessEqual, ECX_GOOD_LABEL); masm.movl(rsi, rcx); // rsi holds min, rcx is unused } - + // Is the minimum length zero? masm.bind(ECX_GOOD_LABEL); masm.testl(rsi, rsi); masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL); - + // Load first characters masm.load_unsigned_word(rcx, Address(rbx, 0)); masm.load_unsigned_word(rdi, Address(rax, 0)); - + // Compare first characters masm.subl(rcx, rdi); masm.jcc(Assembler::notZero, POP_LABEL); - masm.decrement(rsi); + masm.decrementl(rsi); masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL); { // Check after comparing first character to see if strings are equivalent Label LSkip2; // Check if the strings start at same location - masm.cmpl(rbx,rax); + masm.cmpptr(rbx,rax); masm.jcc(Assembler::notEqual, LSkip2); - + // Check if the length difference is zero (from stack) masm.cmpl(Address(rsp, 0), 0x0); masm.jcc(Assembler::equal, LENGTH_DIFF_LABEL); @@ -3780,8 +3776,8 @@ } // Shift rax, and rbx, to the end of the arrays, negate min - masm.leal(rax, Address(rax, rsi, Address::times_2, 2)); - masm.leal(rbx, Address(rbx, rsi, Address::times_2, 2)); + masm.lea(rax, Address(rax, rsi, Address::times_2, 2)); + masm.lea(rbx, Address(rbx, rsi, Address::times_2, 2)); masm.negl(rsi); // Compare the rest of the characters @@ -3790,34 +3786,106 @@ masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0)); masm.subl(rcx, rdi); masm.jcc(Assembler::notZero, POP_LABEL); - masm.increment(rsi); + masm.incrementl(rsi); masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL); - + // Strings are equal up to min length. Return the length difference. masm.bind(LENGTH_DIFF_LABEL); - masm.popl(rcx); + masm.pop(rcx); masm.jmp(DONE_LABEL); // Discard the stored length difference masm.bind(POP_LABEL); - masm.addl(rsp, 4); + masm.addptr(rsp, 4); // That's it masm.bind(DONE_LABEL); %} + enc_class enc_Array_Equals(eDIRegP ary1, eSIRegP ary2, eAXRegI tmp1, eBXRegI tmp2, eCXRegI result) %{ + Label TRUE_LABEL, FALSE_LABEL, DONE_LABEL, COMPARE_LOOP_HDR, COMPARE_LOOP; + MacroAssembler masm(&cbuf); + + Register ary1Reg = as_Register($ary1$$reg); + Register ary2Reg = as_Register($ary2$$reg); + Register tmp1Reg = as_Register($tmp1$$reg); + Register tmp2Reg = as_Register($tmp2$$reg); + Register resultReg = as_Register($result$$reg); + + int length_offset = arrayOopDesc::length_offset_in_bytes(); + int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); + + // Check the input args + masm.cmpl(ary1Reg, ary2Reg); + masm.jcc(Assembler::equal, TRUE_LABEL); + masm.testl(ary1Reg, ary1Reg); + masm.jcc(Assembler::zero, FALSE_LABEL); + masm.testl(ary2Reg, ary2Reg); + masm.jcc(Assembler::zero, FALSE_LABEL); + + // Check the lengths + masm.movl(tmp2Reg, Address(ary1Reg, length_offset)); + masm.movl(resultReg, Address(ary2Reg, length_offset)); + masm.cmpl(tmp2Reg, resultReg); + masm.jcc(Assembler::notEqual, FALSE_LABEL); + masm.testl(resultReg, resultReg); + masm.jcc(Assembler::zero, TRUE_LABEL); + + // Get the number of 4 byte vectors to compare + masm.shrl(resultReg, 1); + + // Check for odd-length arrays + masm.andl(tmp2Reg, 1); + masm.testl(tmp2Reg, tmp2Reg); + masm.jcc(Assembler::zero, COMPARE_LOOP_HDR); + + // Compare 2-byte "tail" at end of arrays + masm.load_unsigned_word(tmp1Reg, Address(ary1Reg, resultReg, Address::times_4, base_offset)); + masm.load_unsigned_word(tmp2Reg, Address(ary2Reg, resultReg, Address::times_4, base_offset)); + masm.cmpl(tmp1Reg, tmp2Reg); + masm.jcc(Assembler::notEqual, FALSE_LABEL); + masm.testl(resultReg, resultReg); + masm.jcc(Assembler::zero, TRUE_LABEL); + + // Setup compare loop + masm.bind(COMPARE_LOOP_HDR); + // Shift tmp1Reg and tmp2Reg to the last 4-byte boundary of the arrays + masm.leal(tmp1Reg, Address(ary1Reg, resultReg, Address::times_4, base_offset)); + masm.leal(tmp2Reg, Address(ary2Reg, resultReg, Address::times_4, base_offset)); + masm.negl(resultReg); + + // 4-byte-wide compare loop + masm.bind(COMPARE_LOOP); + masm.movl(ary1Reg, Address(tmp1Reg, resultReg, Address::times_4, 0)); + masm.movl(ary2Reg, Address(tmp2Reg, resultReg, Address::times_4, 0)); + masm.cmpl(ary1Reg, ary2Reg); + masm.jcc(Assembler::notEqual, FALSE_LABEL); + masm.increment(resultReg); + masm.jcc(Assembler::notZero, COMPARE_LOOP); + + masm.bind(TRUE_LABEL); + masm.movl(resultReg, 1); // return true + masm.jmp(DONE_LABEL); + + masm.bind(FALSE_LABEL); + masm.xorl(resultReg, resultReg); // return false + + // That's it + masm.bind(DONE_LABEL); + %} + enc_class enc_pop_rdx() %{ emit_opcode(cbuf,0x5A); %} - + enc_class enc_rethrow() %{ cbuf.set_inst_mark(); emit_opcode(cbuf, 0xE9); // jmp entry - emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.code_end())-4, + emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.code_end())-4, runtime_call_Relocation::spec(), RELOC_IMM32 ); %} - + // Convert a double to an int. Java semantics require we do complex // manglelations in the corner cases. So we set the rounding mode to // 'zero', store the darned double down as an int, and reset the @@ -3846,8 +3914,8 @@ // Restore the rounding mode; mask the exception emit_opcode(cbuf,0xD9); // FLDCW std/24-bit mode emit_opcode(cbuf,0x2D); - emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode() - ? (int)StubRoutines::addr_fpu_cntrl_wrd_24() + emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode() + ? (int)StubRoutines::addr_fpu_cntrl_wrd_24() : (int)StubRoutines::addr_fpu_cntrl_wrd_std()); // Load the converted int; adjust CPU stack @@ -3882,8 +3950,8 @@ // Restore the rounding mode; mask the exception emit_opcode(cbuf,0xD9); // FLDCW std/24-bit mode emit_opcode(cbuf,0x2D); - emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode() - ? (int)StubRoutines::addr_fpu_cntrl_wrd_24() + emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode() + ? (int)StubRoutines::addr_fpu_cntrl_wrd_24() : (int)StubRoutines::addr_fpu_cntrl_wrd_std()); // Load the converted int; adjust CPU stack @@ -3894,7 +3962,7 @@ emit_d32 (cbuf,0x80000000); // 0x80000000 emit_opcode(cbuf,0x75); // JNE around_slow_call emit_d8 (cbuf,0x07+4); // Size of slow_call - emit_opcode(cbuf,0x85); // TEST EAX,EAX + emit_opcode(cbuf,0x85); // TEST EAX,EAX emit_opcode(cbuf,0xC0); // 2/rax,/rax, emit_opcode(cbuf,0x75); // JNE around_slow_call emit_d8 (cbuf,0x07); // Size of slow_call @@ -3918,10 +3986,10 @@ emit_opcode (cbuf, 0x0F ); emit_opcode (cbuf, 0x11 ); encode_RegMem(cbuf, $src$$reg, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0xD9 ); // FLD_S [ESP] encode_RegMem(cbuf, 0x0, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0xD9); // FLDCW trunc emit_opcode(cbuf,0x2D); emit_d32(cbuf,(int)StubRoutines::addr_fpu_cntrl_wrd_trunc()); @@ -3938,7 +4006,7 @@ emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode() ? (int)StubRoutines::addr_fpu_cntrl_wrd_24() : (int)StubRoutines::addr_fpu_cntrl_wrd_std()); - + // Load the converted int; adjust CPU stack emit_opcode(cbuf,0x58); // POP EAX @@ -3966,10 +4034,10 @@ emit_opcode (cbuf, 0x0F ); emit_opcode (cbuf, 0x11 ); encode_RegMem(cbuf, $src$$reg, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0xD9 ); // FLD_S [ESP] encode_RegMem(cbuf, 0x0, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0x83); // ADD ESP,4 emit_opcode(cbuf,0xC4); emit_d8(cbuf,0x04); @@ -3980,21 +4048,21 @@ emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); // Carry on here... %} - + enc_class XD2L_encoding( regXD src ) %{ // Allocate a word emit_opcode(cbuf,0x83); // SUB ESP,8 emit_opcode(cbuf,0xEC); emit_d8(cbuf,0x08); - + emit_opcode (cbuf, 0xF2 ); // MOVSD [ESP], src emit_opcode (cbuf, 0x0F ); emit_opcode (cbuf, 0x11 ); encode_RegMem(cbuf, $src$$reg, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0xDD ); // FLD_D [ESP] encode_RegMem(cbuf, 0x0, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0xD9); // FLDCW trunc emit_opcode(cbuf,0x2D); emit_d32(cbuf,(int)StubRoutines::addr_fpu_cntrl_wrd_trunc()); @@ -4035,15 +4103,15 @@ emit_opcode(cbuf,0x83); // SUB ESP,8 emit_opcode(cbuf,0xEC); emit_d8(cbuf,0x08); - + emit_opcode (cbuf, 0xF2 ); // MOVSD [ESP], src emit_opcode (cbuf, 0x0F ); emit_opcode (cbuf, 0x11 ); encode_RegMem(cbuf, $src$$reg, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0xDD ); // FLD_D [ESP] encode_RegMem(cbuf, 0x0, ESP_enc, 0x4, 0, 0, false); - + emit_opcode(cbuf,0x83); // ADD ESP,8 emit_opcode(cbuf,0xC4); emit_d8(cbuf,0x08); @@ -4145,7 +4213,7 @@ emit_rm(cbuf, 0x0, $dst$$reg, 0x5); emit_d32(cbuf, (int)signmask_address); %} - + enc_class AbsXD_encoding(regXD dst) %{ address signmask_address=(address)double_signmask_pool; // andpd:\tANDPD $dst,[signconst] @@ -4164,7 +4232,7 @@ emit_rm(cbuf, 0x0, $dst$$reg, 0x5); emit_d32(cbuf, (int)signmask_address); %} - + enc_class NegXD_encoding(regXD dst) %{ address signmask_address=(address)double_signflip_pool; // andpd:\tXORPD $dst,[signconst] @@ -4190,7 +4258,7 @@ %} enc_class FAddP_reg_ST( eRegF src2 ) %{ - // FADDP src2,ST /* DE C0+i */ + // FADDP src2,ST /* DE C0+i */ emit_opcode(cbuf, 0xDE); emit_opcode(cbuf, 0xC0 + $src2$$reg); %} @@ -4212,7 +4280,7 @@ emit_opcode(cbuf, 0xD8); emit_opcode(cbuf, 0xC0 + $src1$$reg); - // FMUL ST,src2 /* D8 C*+i */ + // FMUL ST,src2 /* D8 C*+i */ emit_opcode(cbuf, 0xD8); emit_opcode(cbuf, 0xC8 + $src2$$reg); %} @@ -4224,7 +4292,7 @@ emit_opcode(cbuf, 0xD8); emit_opcode(cbuf, 0xC0 + $src1$$reg); - // FMULP src2,ST /* DE C8+i */ + // FMULP src2,ST /* DE C8+i */ emit_opcode(cbuf, 0xDE); emit_opcode(cbuf, 0xC8 + $src2$$reg); %} @@ -4243,7 +4311,8 @@ enc_class enc_membar_volatile %{ MacroAssembler masm(&cbuf); - masm.membar(); + masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad | + Assembler::StoreStore)); %} // Atomically load the volatile long @@ -4364,7 +4433,7 @@ %} enc_class enc_storeLX_reg_volatile( memory mem, eRegL src, regXD tmp, regXD tmp2) %{ - { // MOVD $tmp,$src.lo + { // MOVD $tmp,$src.lo emit_opcode(cbuf,0x66); emit_opcode(cbuf,0x0F); emit_opcode(cbuf,0x6E); @@ -4418,10 +4487,10 @@ // | (to get allocators register number // G Owned by | | v add OptoReg::stack0()) // r CALLER | | -// o | +--------+ pad to even-align allocators stack-slot +// o | +--------+ pad to even-align allocators stack-slot // w V | pad0 | numbers; owned by CALLER // t -----------+--------+----> Matcher::_in_arg_limit, unaligned -// h ^ | in | 5 +// h ^ | in | 5 // | | args | 4 Holes in incoming args owned by SELF // | | | | 3 // | | +--------+ @@ -4434,14 +4503,14 @@ // | | pad2 | 2 pad to align old SP // | +--------+ 1 // | | locks | 0 -// | +--------+----> OptoReg::stack0(), even aligned +// | +--------+----> OptoReg::stack0(), even aligned // | | pad1 | 11 pad to align new SP // | +--------+ // | | | 10 // | | spills | 9 spills // V | | 8 (pad0 slot for callee) // -----------+--------+----> Matcher::_out_arg_limit, unaligned -// ^ | out | 7 +// ^ | out | 7 // | | args | 6 Holes in outgoing args owned by CALLEE // Owned by +--------+ // CALLEE | new out| 6 Empty on Intel, window on Sparc @@ -4449,17 +4518,17 @@ // | SP-+--------+----> Matcher::_new_SP, even aligned // | | | // -// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is +// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is // known from SELF's arguments and the Java calling convention. // Region 6-7 is determined per call site. -// Note 2: If the calling convention leaves holes in the incoming argument +// Note 2: If the calling convention leaves holes in the incoming argument // area, those holes are owned by SELF. Holes in the outgoing area // are owned by the CALLEE. Holes should not be nessecary in the // incoming area, as the Java calling convention is completely under // the control of the AD file. Doubles can be sorted and packed to // avoid holes. Holes in the outgoing arguments may be nessecary for // varargs C calling conventions. -// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is +// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is // even aligned with pad0 as needed. // Region 6 is even aligned. Region 6-7 is NOT even aligned; // region 6-11 is even aligned; it may be padded out more so that @@ -4469,20 +4538,20 @@ // What direction does stack grow in (assumed to be same for C & Java) stack_direction(TOWARDS_LOW); - // These three registers define part of the calling convention + // These three registers define part of the calling convention // between compiled code and the interpreter. - inline_cache_reg(EAX); // Inline Cache Register + inline_cache_reg(EAX); // Inline Cache Register interpreter_method_oop_reg(EBX); // Method Oop Register when calling interpreter // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] - cisc_spilling_operand_name(indOffset32); + cisc_spilling_operand_name(indOffset32); // Number of stack slots consumed by locking an object sync_stack_slots(1); // Compiled code's Frame Pointer frame_pointer(ESP); - // Interpreter stores its frame pointer in a register which is + // Interpreter stores its frame pointer in a register which is // stored to the stack by I2CAdaptors. // I2CAdaptors convert from interpreted java to compiled java. interpreter_frame_pointer(EBP); @@ -4491,7 +4560,7 @@ // Alignment size in bytes (128-bit -> 16 bytes) stack_alignment(StackAlignmentInBytes); - // Number of stack slots between incoming argument block and the start of + // Number of stack slots between incoming argument block and the start of // a new frame. The PROLOG must add this many slots to the stack. The // EPILOG must remove this many slots. Intel needs one slot for // return address and one for rbp, (must save rbp) @@ -4507,7 +4576,7 @@ // stack slot. // Ret Addr is on stack in slot 0 if no locks or verification or alignment. // Otherwise, it is above the locks and verification slot and alignment word - return_addr(STACK - 1 + + return_addr(STACK - 1 + round_to(1+VerifyStackAtCalls+ Compile::current()->fixed_slots(), (StackAlignmentInBytes/wordSize))); @@ -4518,7 +4587,7 @@ // offsets are based on outgoing arguments, i.e. a CALLER setting up // arguments for a CALLEE. Incoming stack arguments are // automatically biased by the preserve_stack_slots field above. - calling_convention %{ + calling_convention %{ // No difference between ingoing/outgoing just pass false SharedRuntime::java_calling_convention(sig_bt, regs, length, false); %} @@ -4530,7 +4599,7 @@ // offsets are based on outgoing arguments, i.e. a CALLER setting up // arguments for a CALLEE. Incoming stack arguments are // automatically biased by the preserve_stack_slots field above. - c_calling_convention %{ + c_calling_convention %{ // This is obviously always outgoing (void) SharedRuntime::c_calling_convention(sig_bt, regs, length); %} @@ -4538,8 +4607,8 @@ // Location of C & interpreter return values c_return_value %{ assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); - static int lo[Op_RegL+1] = { 0, 0, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num }; - static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num }; + static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num }; + static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num }; // in SSE2+ mode we want to keep the FPU stack clean so pretend // that C functions return float and double results in XMM0. @@ -4554,8 +4623,8 @@ // Location of return values return_value %{ assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); - static int lo[Op_RegL+1] = { 0, 0, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num }; - static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num }; + static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num }; + static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num }; if( ideal_reg == Op_RegD && UseSSE>=2 ) return OptoRegPair(XMM0b_num,XMM0a_num); if( ideal_reg == Op_RegF && UseSSE>=1 ) @@ -4682,6 +4751,33 @@ interface(CONST_INTER); %} +operand immI_1() %{ + predicate( n->get_int() == 1 ); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immI_2() %{ + predicate( n->get_int() == 2 ); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + +operand immI_3() %{ + predicate( n->get_int() == 3 ); + match(ConI); + + op_cost(0); + format %{ %} + interface(CONST_INTER); +%} + // Pointer Immediate operand immP() %{ match(ConP); @@ -4720,6 +4816,16 @@ interface(CONST_INTER); %} +// Long Immediate zero +operand immL_M1() %{ + predicate( n->get_long() == -1L ); + match(ConL); + op_cost(0); + + format %{ %} + interface(CONST_INTER); +%} + // Long immediate from 0 to 127. // Used for a shorter form of long mul by 10. operand immL_127() %{ @@ -4763,7 +4869,7 @@ interface(CONST_INTER); %} -// Double Immediate +// Double Immediate operand immD1() %{ predicate( UseSSE<=1 && n->getd() == 1.0 ); match(ConD); @@ -4986,12 +5092,12 @@ %} // // This operand was used by cmpFastUnlock, but conflicted with 'object' reg -// // +// // operand eSIRegI(xRegI reg) %{ constraint(ALLOC_IN_RC(esi_reg)); match(reg); match(eRegI); - + format %{ "ESI" %} interface(REG_INTER); %} @@ -5172,6 +5278,15 @@ interface(REG_INTER); %} +operand eFlagsRegUCF() %{ + constraint(ALLOC_IN_RC(int_flags)); + match(RegFlags); + predicate(false); + + format %{ "EFLAGS_U_CF" %} + interface(REG_INTER); +%} + // Condition Code Register used by long compare operand flagsReg_long_LTGE() %{ constraint(ALLOC_IN_RC(int_flags)); @@ -5387,7 +5502,7 @@ // // Indirect Memory Times Scale Plus Offset Operand // operand indScaleOffset(immP off, eRegI ireg, immI2 scale) %{ // match(AddP off (LShiftI ireg scale)); -// +// // op_cost(10); // format %{"[$off + $ireg << $scale]" %} // interface(MEMORY_INTER) %{ @@ -5398,7 +5513,7 @@ // %} // %} -// Indirect Memory Times Scale Plus Index Register +// Indirect Memory Times Scale Plus Index Register operand indIndexScale(eRegP reg, eRegI ireg, immI2 scale) %{ match(AddP reg (LShiftI ireg scale)); @@ -5427,7 +5542,7 @@ %} //----------Load Long Memory Operands------------------------------------------ -// The load-long idiom will use it's address expression again after loading +// The load-long idiom will use it's address expression again after loading // the first word of the long. If the load-long destination overlaps with // registers used in the addressing expression, the 2nd half will be loaded // from a clobbered address. Fix this by requiring that load-long use @@ -5599,7 +5714,7 @@ %} %} -// Indirect Memory Times Scale Plus Index Register +// Indirect Memory Times Scale Plus Index Register operand indIndexScale_win95_safe(eRegP_no_EBP reg, eRegI ireg, immI2 scale) %{ match(AddP reg (LShiftI ireg scale)); @@ -5649,12 +5764,12 @@ format %{ "" %} interface(COND_INTER) %{ - equal(0x4); - not_equal(0x5); - less(0xC); - greater_equal(0xD); - less_equal(0xE); - greater(0xF); + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0xC, "l"); + greater_equal(0xD, "ge"); + less_equal(0xE, "le"); + greater(0xF, "g"); %} %} @@ -5666,12 +5781,47 @@ format %{ "" %} interface(COND_INTER) %{ - equal(0x4); - not_equal(0x5); - less(0x2); - greater_equal(0x3); - less_equal(0x6); - greater(0x7); + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0x2, "b"); + greater_equal(0x3, "nb"); + less_equal(0x6, "be"); + greater(0x7, "nbe"); + %} +%} + +// Floating comparisons that don't require any fixup for the unordered case +operand cmpOpUCF() %{ + match(Bool); + predicate(n->as_Bool()->_test._test == BoolTest::lt || + n->as_Bool()->_test._test == BoolTest::ge || + n->as_Bool()->_test._test == BoolTest::le || + n->as_Bool()->_test._test == BoolTest::gt); + format %{ "" %} + interface(COND_INTER) %{ + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0x2, "b"); + greater_equal(0x3, "nb"); + less_equal(0x6, "be"); + greater(0x7, "nbe"); + %} +%} + + +// Floating comparisons that can be fixed up with extra conditional jumps +operand cmpOpUCF2() %{ + match(Bool); + predicate(n->as_Bool()->_test._test == BoolTest::ne || + n->as_Bool()->_test._test == BoolTest::eq); + format %{ "" %} + interface(COND_INTER) %{ + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0x2, "b"); + greater_equal(0x3, "nb"); + less_equal(0x6, "be"); + greater(0x7, "nbe"); %} %} @@ -5696,12 +5846,12 @@ format %{ "" %} interface(COND_INTER) %{ - equal(0x4); - not_equal(0x5); - less(0xF); - greater_equal(0xE); - less_equal(0xD); - greater(0xC); + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0xF, "g"); + greater_equal(0xE, "le"); + less_equal(0xD, "ge"); + greater(0xC, "l"); %} %} @@ -5715,7 +5865,7 @@ opclass memory(direct, indirect, indOffset8, indOffset32, indOffset32X, indIndexOffset, indIndex, indIndexScale, indIndexScaleOffset); -// Long memory operations are encoded in 2 instructions and a +4 offset. +// Long memory operations are encoded in 2 instructions and a +4 offset. // This means some kind of offset is always required and you cannot use // an oop as the offset (done when working on static globals). opclass long_memory(direct, indirect, indOffset8, indOffset32, indIndexOffset, @@ -5744,11 +5894,11 @@ // Generic P2/P3 pipeline // 3 decoders, only D0 handles big operands; a "bundle" is the limit of // 3 instructions decoded per cycle. -// 2 load/store ops per cycle, 1 branch, 1 FPU, +// 2 load/store ops per cycle, 1 branch, 1 FPU, // 2 ALU op, only ALU0 handles mul/div instructions. -resources( D0, D1, D2, DECODE = D0 | D1 | D2, - MS0, MS1, MEM = MS0 | MS1, - BR, FPU, +resources( D0, D1, D2, DECODE = D0 | D1 | D2, + MS0, MS1, MEM = MS0 | MS1, + BR, FPU, ALU0, ALU1, ALU = ALU0 | ALU1 ); //----------PIPELINE DESCRIPTION----------------------------------------------- @@ -5865,7 +6015,7 @@ %{ single_instruction; mem : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only MEM : S3; // any mem %} @@ -6196,21 +6346,21 @@ %} //----------INSTRUCTIONS------------------------------------------------------- -// -// match -- States which machine-independent subtree may be replaced +// +// match -- States which machine-independent subtree may be replaced // by this instruction. // ins_cost -- The estimated cost of this instruction is used by instruction -// selection to identify a minimum cost tree of machine -// instructions that matches a tree of machine-independent +// selection to identify a minimum cost tree of machine +// instructions that matches a tree of machine-independent // instructions. // format -- A string providing the disassembly for this instruction. -// The value of an instruction's operand may be inserted +// The value of an instruction's operand may be inserted // by referring to it with a '$' prefix. -// opcode -- Three instruction opcodes may be provided. These are referred +// opcode -- Three instruction opcodes may be provided. These are referred // to within an encode class as $primary, $secondary, and $tertiary -// respectively. The primary opcode is commonly used to -// indicate the type of machine instruction, while secondary -// and tertiary are often used for prefix options or addressing +// respectively. The primary opcode is commonly used to +// indicate the type of machine instruction, while secondary +// and tertiary are often used for prefix options or addressing // modes. // ins_encode -- A list of encode classes with parameters. The encode class // name must have been defined in an 'enc_class' specification @@ -6225,15 +6375,15 @@ ins_encode( OpcP, OpcSReg(dst) ); ins_pipe( ialu_reg ); %} - + instruct bytes_reverse_long(eRegL dst) %{ match(Set dst (ReverseBytesL dst)); format %{ "BSWAP $dst.lo\n\t" - "BSWAP $dst.hi\n\t" + "BSWAP $dst.hi\n\t" "XCHG $dst.lo $dst.hi" %} - ins_cost(125); + ins_cost(125); ins_encode( bswap_long_bytes(dst) ); ins_pipe( ialu_reg_reg); %} @@ -6285,7 +6435,7 @@ ins_pipe( ialu_reg_mem ); %} -// Load Long. Cannot clobber address while loading, so restrict address +// Load Long. Cannot clobber address while loading, so restrict address // register to ESI instruct loadL(eRegL dst, load_long_memory mem) %{ predicate(!((LoadLNode*)n)->require_atomic_access()); @@ -6300,7 +6450,7 @@ %} // Volatile Load Long. Must be atomic, so do 64-bit FILD -// then store it down to the stack and reload on the int +// then store it down to the stack and reload on the int // side. instruct loadL_volatile(stackSlotL dst, memory mem) %{ predicate(UseSSE<=1 && ((LoadLNode*)n)->require_atomic_access()); @@ -6554,7 +6704,7 @@ instruct loadConI0(eRegI dst, immI0 src, eFlagsReg cr) %{ match(Set dst src); effect(KILL cr); - + ins_cost(50); format %{ "XOR $dst,$dst" %} opcode(0x33); /* + rd */ @@ -6660,7 +6810,7 @@ format %{ "MOV $dst,$src" %} opcode(0x8B); ins_encode( OpcP, RegMem(dst,src)); - ins_pipe( ialu_reg_mem ); + ins_pipe( ialu_reg_mem ); %} instruct loadSSL(eRegL dst, stackSlotL src) %{ @@ -6682,7 +6832,7 @@ format %{ "MOV $dst,$src" %} opcode(0x8B); ins_encode( OpcP, RegMem(dst,src)); - ins_pipe( ialu_reg_mem ); + ins_pipe( ialu_reg_mem ); %} // Load Stack Slot @@ -6695,7 +6845,7 @@ opcode(0xD9); /* D9 /0, FLD m32real */ ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src), Pop_Reg_F(dst) ); - ins_pipe( fpu_reg_mem ); + ins_pipe( fpu_reg_mem ); %} // Load Stack Slot @@ -6708,7 +6858,7 @@ opcode(0xDD); /* DD /0, FLD m64real */ ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src), Pop_Reg_D(dst) ); - ins_pipe( fpu_reg_mem ); + ins_pipe( fpu_reg_mem ); %} // Prefetch instructions. @@ -6832,7 +6982,7 @@ format %{ "MOV8 $mem,$src" %} opcode(0x88); ins_encode( OpcP, RegMem( src, mem ) ); - ins_pipe( ialu_mem_reg ); + ins_pipe( ialu_mem_reg ); %} // Store Char/Short @@ -6843,7 +6993,7 @@ format %{ "MOV16 $mem,$src" %} opcode(0x89, 0x66); ins_encode( OpcS, OpcP, RegMem( src, mem ) ); - ins_pipe( ialu_mem_reg ); + ins_pipe( ialu_mem_reg ); %} // Store Integer @@ -6854,7 +7004,7 @@ format %{ "MOV $mem,$src" %} opcode(0x89); ins_encode( OpcP, RegMem( src, mem ) ); - ins_pipe( ialu_mem_reg ); + ins_pipe( ialu_mem_reg ); %} // Store Long @@ -6923,7 +7073,7 @@ format %{ "MOV $mem,$src" %} opcode(0x89); ins_encode( OpcP, RegMem( src, mem ) ); - ins_pipe( ialu_mem_reg ); + ins_pipe( ialu_mem_reg ); %} // Store Integer Immediate @@ -6934,7 +7084,7 @@ format %{ "MOV $mem,$src" %} opcode(0xC7); /* C7 /0 */ ins_encode( OpcP, RMopc_Mem(0x00,mem), Con32( src )); - ins_pipe( ialu_mem_imm ); + ins_pipe( ialu_mem_imm ); %} // Store Short/Char Immediate @@ -6946,7 +7096,7 @@ format %{ "MOV16 $mem,$src" %} opcode(0xC7); /* C7 /0 Same as 32 store immediate with prefix */ ins_encode( SizePrefix, OpcP, RMopc_Mem(0x00,mem), Con16( src )); - ins_pipe( ialu_mem_imm ); + ins_pipe( ialu_mem_imm ); %} // Store Pointer Immediate; null pointers or constant oops that do not @@ -6958,7 +7108,7 @@ format %{ "MOV $mem,$src" %} opcode(0xC7); /* C7 /0 */ ins_encode( OpcP, RMopc_Mem(0x00,mem), Con32( src )); - ins_pipe( ialu_mem_imm ); + ins_pipe( ialu_mem_imm ); %} // Store Byte Immediate @@ -6969,7 +7119,7 @@ format %{ "MOV8 $mem,$src" %} opcode(0xC6); /* C6 /0 */ ins_encode( OpcP, RMopc_Mem(0x00,mem), Con8or32( src )); - ins_pipe( ialu_mem_imm ); + ins_pipe( ialu_mem_imm ); %} // Store Aligned Packed Byte XMM register to memory @@ -7010,7 +7160,7 @@ format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %} opcode(0xC6); /* C6 /0 */ ins_encode( OpcP, RMopc_Mem(0x00,mem), Con8or32( src )); - ins_pipe( ialu_mem_imm ); + ins_pipe( ialu_mem_imm ); %} // Store Double @@ -7257,7 +7407,7 @@ ins_pipe( pipe_cmov_reg ); %} -instruct cmovI_regU( eRegI dst, eRegI src, eFlagsRegU cr, cmpOpU cop ) %{ +instruct cmovI_regU( cmpOpU cop, eFlagsRegU cr, eRegI dst, eRegI src ) %{ predicate(VM_Version::supports_cmov() ); match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); ins_cost(200); @@ -7267,6 +7417,15 @@ ins_pipe( pipe_cmov_reg ); %} +instruct cmovI_regUCF( cmpOpUCF cop, eFlagsRegUCF cr, eRegI dst, eRegI src ) %{ + predicate(VM_Version::supports_cmov() ); + match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovI_regU(cop, cr, dst, src); + %} +%} + // Conditional move instruct cmovI_mem(cmpOp cop, eFlagsReg cr, eRegI dst, memory src) %{ predicate(VM_Version::supports_cmov() ); @@ -7279,7 +7438,7 @@ %} // Conditional move -instruct cmovI_memu(cmpOpU cop, eFlagsRegU cr, eRegI dst, memory src) %{ +instruct cmovI_memU(cmpOpU cop, eFlagsRegU cr, eRegI dst, memory src) %{ predicate(VM_Version::supports_cmov() ); match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); ins_cost(250); @@ -7289,6 +7448,15 @@ ins_pipe( pipe_cmov_mem ); %} +instruct cmovI_memUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegI dst, memory src) %{ + predicate(VM_Version::supports_cmov() ); + match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); + ins_cost(250); + expand %{ + cmovI_memU(cop, cr, dst, src); + %} +%} + // Conditional move instruct cmovP_reg(eRegP dst, eRegP src, eFlagsReg cr, cmpOp cop ) %{ predicate(VM_Version::supports_cmov() ); @@ -7316,7 +7484,7 @@ %} // Conditional move -instruct cmovP_regU(eRegP dst, eRegP src, eFlagsRegU cr, cmpOpU cop ) %{ +instruct cmovP_regU(cmpOpU cop, eFlagsRegU cr, eRegP dst, eRegP src ) %{ predicate(VM_Version::supports_cmov() ); match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); ins_cost(200); @@ -7326,6 +7494,15 @@ ins_pipe( pipe_cmov_reg ); %} +instruct cmovP_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegP dst, eRegP src ) %{ + predicate(VM_Version::supports_cmov() ); + match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovP_regU(cop, cr, dst, src); + %} +%} + // DISABLED: Requires the ADLC to emit a bottom_type call that // correctly meets the two pointer arguments; one is an incoming // register but the other is a memory operand. ALSO appears to @@ -7455,6 +7632,15 @@ ins_pipe( pipe_slow ); %} +instruct fcmovX_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regX dst, regX src) %{ + predicate (UseSSE>=1); + match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + fcmovX_regU(cop, cr, dst, src); + %} +%} + // unsigned version instruct fcmovXD_regU(cmpOpU cop, eFlagsRegU cr, regXD dst, regXD src) %{ predicate (UseSSE>=2); @@ -7473,6 +7659,15 @@ ins_pipe( pipe_slow ); %} +instruct fcmovXD_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regXD dst, regXD src) %{ + predicate (UseSSE>=2); + match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + fcmovXD_regU(cop, cr, dst, src); + %} +%} + instruct cmovL_reg(cmpOp cop, eFlagsReg cr, eRegL dst, eRegL src) %{ predicate(VM_Version::supports_cmov() ); match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); @@ -7480,7 +7675,7 @@ format %{ "CMOV$cop $dst.lo,$src.lo\n\t" "CMOV$cop $dst.hi,$src.hi" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) ); + ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) ); ins_pipe( pipe_cmov_reg_long ); %} @@ -7491,10 +7686,19 @@ format %{ "CMOV$cop $dst.lo,$src.lo\n\t" "CMOV$cop $dst.hi,$src.hi" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) ); + ins_encode( enc_cmov(cop), RegReg_Lo2( dst, src ), enc_cmov(cop), RegReg_Hi2( dst, src ) ); ins_pipe( pipe_cmov_reg_long ); %} +instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{ + predicate(VM_Version::supports_cmov() ); + match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovL_regU(cop, cr, dst, src); + %} +%} + //----------Arithmetic Instructions-------------------------------------------- //----------Addition Instructions---------------------------------------------- // Integer Addition Instructions @@ -7726,33 +7930,36 @@ ins_pipe( pipe_cmpxchg ); %} -// Conditional-store of a long value -// Returns a boolean value (0/1) on success. Implemented with a CMPXCHG8 on Intel. -// mem_ptr can actually be in either ESI or EDI -instruct storeLConditional( eRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{ - match(Set res (StoreLConditional mem_ptr (Binary oldval newval))); - effect(KILL cr); - // EDX:EAX is killed if there is contention, but then it's also unused. - // In the common case of no contention, EDX:EAX holds the new oop address. - format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" - "MOV $res,0\n\t" - "JNE,s fail\n\t" - "MOV $res,1\n" - "fail:" %} - ins_encode( enc_cmpxchg8(mem_ptr), - enc_flags_ne_to_boolean(res) ); +// Conditional-store of an int value. +// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel. +instruct storeIConditional( memory mem, eAXRegI oldval, eRegI newval, eFlagsReg cr ) %{ + match(Set cr (StoreIConditional mem (Binary oldval newval))); + effect(KILL oldval); + format %{ "CMPXCHG $mem,$newval\t# If EAX==$mem Then store $newval into $mem" %} + ins_encode( lock_prefix, Opcode(0x0F), Opcode(0xB1), RegMem(newval, mem) ); ins_pipe( pipe_cmpxchg ); %} -// Conditional-store of a long value -// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG8 on Intel. -// mem_ptr can actually be in either ESI or EDI -instruct storeLConditional_flags( eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr, immI0 zero ) %{ - match(Set cr (CmpI (StoreLConditional mem_ptr (Binary oldval newval)) zero)); - // EDX:EAX is killed if there is contention, but then it's also unused. - // In the common case of no contention, EDX:EAX holds the new oop address. - format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %} - ins_encode( enc_cmpxchg8(mem_ptr) ); +// Conditional-store of a long value. +// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG8 on Intel. +instruct storeLConditional( memory mem, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{ + match(Set cr (StoreLConditional mem (Binary oldval newval))); + effect(KILL oldval); + format %{ "XCHG EBX,ECX\t# correct order for CMPXCHG8 instruction\n\t" + "CMPXCHG8 $mem,ECX:EBX\t# If EDX:EAX==$mem Then store ECX:EBX into $mem\n\t" + "XCHG EBX,ECX" + %} + ins_encode %{ + // Note: we need to swap rbx, and rcx before and after the + // cmpxchg8 instruction because the instruction uses + // rcx as the high order word of the new value to store but + // our register encoding uses rbx. + __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc)); + if( os::is_MP() ) + __ lock(); + __ cmpxchg8(Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp)); + __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc)); + %} ins_pipe( pipe_cmpxchg ); %} @@ -7979,7 +8186,7 @@ effect(KILL flags); ins_cost(300); - format %{ "MUL $dst,$src1" %} + format %{ "MUL $dst,$src1" %} ins_encode( long_uint_multiply(dst, src1) ); ins_pipe( ialu_reg_reg_alu0 ); @@ -8219,6 +8426,7 @@ ins_pipe( ialu_reg ); %} + // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24. // This idiom is used by the compiler for the i2b bytecode. instruct i2b(eRegI dst, xRegI src, immI_24 twentyfour, eFlagsReg cr) %{ @@ -8336,6 +8544,18 @@ ins_pipe( ialu_reg_reg ); %} +instruct orI_eReg_castP2X(eRegI dst, eRegP src, eFlagsReg cr) %{ + match(Set dst (OrI dst (CastP2X src))); + effect(KILL cr); + + size(2); + format %{ "OR $dst,$src" %} + opcode(0x0B); + ins_encode( OpcP, RegReg( dst, src) ); + ins_pipe( ialu_reg_reg ); +%} + + // Or Register with Immediate instruct orI_eReg_imm(eRegI dst, immI src, eFlagsReg cr) %{ match(Set dst (OrI dst src)); @@ -8385,8 +8605,8 @@ ins_pipe( ialu_mem_imm ); %} -// ROL/ROR -// ROL expand +// ROL/ROR +// ROL expand instruct rolI_eReg_imm1(eRegI dst, immI1 shift, eFlagsReg cr) %{ effect(USE_DEF dst, USE shift, KILL cr); @@ -8398,7 +8618,7 @@ instruct rolI_eReg_imm8(eRegI dst, immI8 shift, eFlagsReg cr) %{ effect(USE_DEF dst, USE shift, KILL cr); - + format %{ "ROL $dst, $shift" %} opcode(0xC1, 0x0); /*Opcode /C1 /0 */ ins_encode( RegOpcImm(dst, shift) ); @@ -8452,7 +8672,7 @@ %} %} -// ROR expand +// ROR expand instruct rorI_eReg_imm1(eRegI dst, immI1 shift, eFlagsReg cr) %{ effect(USE_DEF dst, USE shift, KILL cr); @@ -8490,7 +8710,7 @@ %} %} -// ROR 32bit by immI8 once +// ROR 32bit by immI8 once instruct rorI_eReg_i8(eRegI dst, immI8 rshift, immI8 lshift, eFlagsReg cr) %{ predicate( 0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f)); match(Set dst ( OrI (URShiftI dst rshift) (LShiftI dst lshift))); @@ -8500,7 +8720,7 @@ %} %} -// ROR 32bit var by var once +// ROR 32bit var by var once instruct rorI_eReg_Var_C0(ncxRegI dst, eCXRegI shift, immI0 zero, eFlagsReg cr) %{ match(Set dst ( OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift)))); @@ -8509,7 +8729,7 @@ %} %} -// ROR 32bit var by var once +// ROR 32bit var by var once instruct rorI_eReg_Var_C32(ncxRegI dst, eCXRegI shift, immI_32 c32, eFlagsReg cr) %{ match(Set dst ( OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift)))); @@ -8531,6 +8751,18 @@ ins_pipe( ialu_reg_reg ); %} +// Xor Register with Immediate -1 +instruct xorI_eReg_im1(eRegI dst, immI_M1 imm) %{ + match(Set dst (XorI dst imm)); + + size(2); + format %{ "NOT $dst" %} + ins_encode %{ + __ notl($dst$$Register); + %} + ins_pipe( ialu_reg ); +%} + // Xor Register with Immediate instruct xorI_eReg_imm(eRegI dst, immI src, eFlagsReg cr) %{ match(Set dst (XorI dst src)); @@ -8590,7 +8822,7 @@ instruct ci2b( eRegI dst, eRegI src, eFlagsReg cr ) %{ effect( USE_DEF dst, USE src, KILL cr ); - + size(4); format %{ "NEG $dst\n\t" "ADC $dst,$src" %} @@ -8671,7 +8903,7 @@ "SBB ECX,ECX\n\t" "AND ECX,$y\n\t" "ADD $p,ECX" %} - ins_encode( enc_cmpLTP(p,q,y,tmp) ); + ins_encode( enc_cmpLTP(p,q,y,tmp) ); ins_pipe( pipe_cmplt ); %} @@ -8685,7 +8917,7 @@ "SBB ECX,ECX\n\t" "AND ECX,$y\n\t" "ADD $p,ECX" %} - ins_encode( enc_cmpLTP_mem(p,q,y,tmp) ); + ins_encode( enc_cmpLTP_mem(p,q,y,tmp) ); %} */ @@ -8848,6 +9080,18 @@ ins_pipe( ialu_reg_reg_long ); %} +// Xor Long Register with Immediate -1 +instruct xorl_eReg_im1(eRegL dst, immL_M1 imm) %{ + match(Set dst (XorL dst imm)); + format %{ "NOT $dst.lo\n\t" + "NOT $dst.hi" %} + ins_encode %{ + __ notl($dst$$Register); + __ notl(HIGH_FROM_LOW($dst$$Register)); + %} + ins_pipe( ialu_reg_long ); +%} + // Xor Long Register with Immediate instruct xorl_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{ match(Set dst (XorL dst src)); @@ -8871,6 +9115,63 @@ ins_pipe( ialu_reg_long_mem ); %} +// Shift Left Long by 1 +instruct shlL_eReg_1(eRegL dst, immI_1 cnt, eFlagsReg cr) %{ + predicate(UseNewLongLShift); + match(Set dst (LShiftL dst cnt)); + effect(KILL cr); + ins_cost(100); + format %{ "ADD $dst.lo,$dst.lo\n\t" + "ADC $dst.hi,$dst.hi" %} + ins_encode %{ + __ addl($dst$$Register,$dst$$Register); + __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register)); + %} + ins_pipe( ialu_reg_long ); +%} + +// Shift Left Long by 2 +instruct shlL_eReg_2(eRegL dst, immI_2 cnt, eFlagsReg cr) %{ + predicate(UseNewLongLShift); + match(Set dst (LShiftL dst cnt)); + effect(KILL cr); + ins_cost(100); + format %{ "ADD $dst.lo,$dst.lo\n\t" + "ADC $dst.hi,$dst.hi\n\t" + "ADD $dst.lo,$dst.lo\n\t" + "ADC $dst.hi,$dst.hi" %} + ins_encode %{ + __ addl($dst$$Register,$dst$$Register); + __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register)); + __ addl($dst$$Register,$dst$$Register); + __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register)); + %} + ins_pipe( ialu_reg_long ); +%} + +// Shift Left Long by 3 +instruct shlL_eReg_3(eRegL dst, immI_3 cnt, eFlagsReg cr) %{ + predicate(UseNewLongLShift); + match(Set dst (LShiftL dst cnt)); + effect(KILL cr); + ins_cost(100); + format %{ "ADD $dst.lo,$dst.lo\n\t" + "ADC $dst.hi,$dst.hi\n\t" + "ADD $dst.lo,$dst.lo\n\t" + "ADC $dst.hi,$dst.hi\n\t" + "ADD $dst.lo,$dst.lo\n\t" + "ADC $dst.hi,$dst.hi" %} + ins_encode %{ + __ addl($dst$$Register,$dst$$Register); + __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register)); + __ addl($dst$$Register,$dst$$Register); + __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register)); + __ addl($dst$$Register,$dst$$Register); + __ adcl(HIGH_FROM_LOW($dst$$Register),HIGH_FROM_LOW($dst$$Register)); + %} + ins_pipe( ialu_reg_long ); +%} + // Shift Left Long by 1-31 instruct shlL_eReg_1_31(eRegL dst, immI_1_31 cnt, eFlagsReg cr) %{ match(Set dst (LShiftL dst cnt)); @@ -9019,6 +9320,18 @@ ins_pipe( pipe_slow ); %} +instruct cmpD_cc_P6CF(eFlagsRegUCF cr, regD src1, regD src2) %{ + predicate(VM_Version::supports_cmov() && UseSSE <=1); + match(Set cr (CmpD src1 src2)); + ins_cost(150); + format %{ "FLD $src1\n\t" + "FUCOMIP ST,$src2 // P6 instruction" %} + opcode(0xDF, 0x05); /* DF E8+i or DF /5 */ + ins_encode( Push_Reg_D(src1), + OpcP, RegOpc(src2)); + ins_pipe( pipe_slow ); +%} + // Compare & branch instruct cmpD_cc(eFlagsRegU cr, regD src1, regD src2, eAXRegI rax) %{ predicate(UseSSE<=1); @@ -9046,7 +9359,7 @@ effect(KILL cr, KILL rax); ins_cost(280); format %{ "FTSTD $dst,$src1" %} - opcode(0xE4, 0xD9); + opcode(0xE4, 0xD9); ins_encode( Push_Reg_D(src1), OpcS, OpcP, PopFPU, CmpF_Result(dst)); @@ -9083,6 +9396,16 @@ ins_pipe( pipe_slow ); %} +instruct cmpXD_ccCF(eFlagsRegUCF cr, regXD dst, regXD src) %{ + predicate(UseSSE>=2); + match(Set cr (CmpD dst src)); + ins_cost(100); + format %{ "COMISD $dst,$src" %} + opcode(0x66, 0x0F, 0x2F); + ins_encode(OpcP, OpcS, Opcode(tertiary), RegReg(dst, src)); + ins_pipe( pipe_slow ); +%} + // float compare and set condition codes in EFLAGS by XMM regs instruct cmpXD_ccmem(eFlagsRegU cr, regXD dst, memory src, eAXRegI rax) %{ predicate(UseSSE>=2); @@ -9099,6 +9422,16 @@ ins_pipe( pipe_slow ); %} +instruct cmpXD_ccmemCF(eFlagsRegUCF cr, regXD dst, memory src) %{ + predicate(UseSSE>=2); + match(Set cr (CmpD dst (LoadD src))); + ins_cost(100); + format %{ "COMISD $dst,$src" %} + opcode(0x66, 0x0F, 0x2F); + ins_encode(OpcP, OpcS, Opcode(tertiary), RegMem(dst, src)); + ins_pipe( pipe_slow ); +%} + // Compare into -1,0,1 in XMM instruct cmpXD_reg(eRegI dst, regXD src1, regXD src2, eFlagsReg cr) %{ predicate(UseSSE>=2); @@ -9116,7 +9449,7 @@ "exit:" %} opcode(0x66, 0x0F, 0x2F); - ins_encode(Xor_Reg(dst), OpcP, OpcS, Opcode(tertiary), RegReg(src1, src2), + ins_encode(Xor_Reg(dst), OpcP, OpcS, Opcode(tertiary), RegReg(src1, src2), CmpX_Result(dst)); ins_pipe( pipe_slow ); %} @@ -9165,7 +9498,7 @@ format %{ "FLD $src2\n\t" "DSUB ST,$src1\n\t" "FSTP_D $dst\t# D-round" %} - opcode(0xD8, 0x5); + opcode(0xD8, 0x5); ins_encode( Push_Reg_D(src2), OpcP, RegOpc(src1), Pop_Mem_D(dst) ); ins_pipe( fpu_mem_reg_reg ); @@ -9289,7 +9622,7 @@ ins_cost(125); format %{ "FLD1\n\t" "DADDp $dst,ST" %} - opcode(0xDE, 0x00); + opcode(0xDE, 0x00); ins_encode( LdImmD(src), OpcP, RegOpc(dst) ); ins_pipe( fpu_reg ); @@ -9400,7 +9733,7 @@ predicate(UseSSE>=2); match(Set dst (DivD dst src)); format %{ "DIVSD $dst,$src" %} - opcode(0xF2, 0x0F, 0x5E); + opcode(0xF2, 0x0F, 0x5E); ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5E), RegReg(dst, src)); ins_pipe( pipe_slow ); %} @@ -9434,14 +9767,14 @@ ins_pipe( fpu_reg_reg ); %} -// Strict FP instruction biases argument before multiply then +// Strict FP instruction biases argument before multiply then // biases result to avoid double rounding of subnormals. -// +// // scale arg1 by multiplying arg1 by 2^(-15360) // load arg2 // multiply scaled arg1 by arg2 // rescale product by 2^(15360) -// +// instruct strictfp_mulD_reg(regDPR1 dst, regnotDPR1 src) %{ predicate( UseSSE<=1 && Compile::current()->has_method() && Compile::current()->method()->is_strict() ); match(Set dst (MulD dst src)); @@ -9486,7 +9819,7 @@ ins_pipe( fpu_reg_mem ); %} -// +// // Cisc-alternate to reg-reg multiply instruct mulD_reg_mem_cisc(regD dst, regD src, memory mem) %{ predicate( UseSSE<=1 ); @@ -9504,8 +9837,8 @@ // MACRO3 -- addD a mulD -// This instruction is a '2-address' instruction in that the result goes -// back to src2. This eliminates a move from the macro; possibly the +// This instruction is a '2-address' instruction in that the result goes +// back to src2. This eliminates a move from the macro; possibly the // register allocator will have to add it back (and maybe not). instruct addD_mulD_reg(regD src2, regD src1, regD src0) %{ predicate( UseSSE<=1 ); @@ -9550,14 +9883,14 @@ ins_pipe( fpu_reg_reg ); %} -// Strict FP instruction biases argument before division then +// Strict FP instruction biases argument before division then // biases result, to avoid double rounding of subnormals. -// +// // scale dividend by multiplying dividend by 2^(-15360) // load divisor // divide scaled dividend by divisor // rescale quotient by 2^(15360) -// +// instruct strictfp_divD_reg(regDPR1 dst, regnotDPR1 src) %{ predicate (UseSSE<=1); match(Set dst (DivD dst src)); @@ -9678,7 +10011,7 @@ match(Set dst(TanD src)); format %{ "DTAN $dst" %} ins_encode( Opcode(0xD9), Opcode(0xF2), // fptan - Opcode(0xDD), Opcode(0xD8)); // fstp st + Opcode(0xDD), Opcode(0xD8)); // fstp st ins_pipe( pipe_slow ); %} @@ -9986,6 +10319,18 @@ ins_pipe( pipe_slow ); %} +instruct cmpF_cc_P6CF(eFlagsRegUCF cr, regF src1, regF src2) %{ + predicate(VM_Version::supports_cmov() && UseSSE == 0); + match(Set cr (CmpF src1 src2)); + ins_cost(100); + format %{ "FLD $src1\n\t" + "FUCOMIP ST,$src2 // P6 instruction" %} + opcode(0xDF, 0x05); /* DF E8+i or DF /5 */ + ins_encode( Push_Reg_D(src1), + OpcP, RegOpc(src2)); + ins_pipe( pipe_slow ); +%} + // Compare & branch instruct cmpF_cc(eFlagsRegU cr, regF src1, regF src2, eAXRegI rax) %{ @@ -10014,7 +10359,7 @@ effect(KILL cr, KILL rax); ins_cost(280); format %{ "FTSTF $dst,$src1" %} - opcode(0xE4, 0xD9); + opcode(0xE4, 0xD9); ins_encode( Push_Reg_D(src1), OpcS, OpcP, PopFPU, CmpF_Result(dst)); @@ -10051,6 +10396,16 @@ ins_pipe( pipe_slow ); %} +instruct cmpX_ccCF(eFlagsRegUCF cr, regX dst, regX src) %{ + predicate(UseSSE>=1); + match(Set cr (CmpF dst src)); + ins_cost(100); + format %{ "COMISS $dst,$src" %} + opcode(0x0F, 0x2F); + ins_encode(OpcP, OpcS, RegReg(dst, src)); + ins_pipe( pipe_slow ); +%} + // float compare and set condition codes in EFLAGS by XMM regs instruct cmpX_ccmem(eFlagsRegU cr, regX dst, memory src, eAXRegI rax) %{ predicate(UseSSE>=1); @@ -10067,6 +10422,16 @@ ins_pipe( pipe_slow ); %} +instruct cmpX_ccmemCF(eFlagsRegUCF cr, regX dst, memory src) %{ + predicate(UseSSE>=1); + match(Set cr (CmpF dst (LoadF src))); + ins_cost(100); + format %{ "COMISS $dst,$src" %} + opcode(0x0F, 0x2F); + ins_encode(OpcP, OpcS, RegMem(dst, src)); + ins_pipe( pipe_slow ); +%} + // Compare into -1,0,1 in XMM instruct cmpX_reg(eRegI dst, regX src1, regX src2, eFlagsReg cr) %{ predicate(UseSSE>=1); @@ -10121,7 +10486,7 @@ Pop_Mem_F(dst) ); ins_pipe( fpu_mem_reg_reg ); %} -// +// // This instruction does not round to 24-bits instruct subF_reg(regF dst, regF src) %{ predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -10146,7 +10511,7 @@ Pop_Mem_F(dst) ); ins_pipe( fpu_mem_reg_reg ); %} -// +// // This instruction does not round to 24-bits instruct addF_reg(regF dst, regF src) %{ predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -10345,7 +10710,7 @@ Pop_Mem_F(dst) ); ins_pipe( fpu_mem_reg_mem ); %} -// +// // Cisc-alternate to addF_reg // This instruction does not round to 24-bits instruct addF_reg_mem(regF dst, memory src) %{ @@ -10416,7 +10781,7 @@ Pop_Mem_F(dst)); ins_pipe( fpu_mem_reg_con ); %} -// +// // This instruction does not round to 24-bits instruct addF_reg_imm(regF dst, regF src1, immF src2) %{ predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -10445,7 +10810,7 @@ Pop_Mem_F(dst) ); ins_pipe( fpu_mem_reg_reg ); %} -// +// // This instruction does not round to 24-bits instruct mulF_reg(regF dst, regF src1, regF src2) %{ predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -10477,7 +10842,7 @@ Pop_Mem_F(dst) ); ins_pipe( fpu_mem_reg_mem ); %} -// +// // This instruction does not round to 24-bits // Cisc-alternate to reg-reg multiply instruct mulF_reg_mem(regF dst, regF src1, memory src2) %{ @@ -10518,7 +10883,7 @@ Pop_Mem_F(dst)); ins_pipe( fpu_mem_reg_con ); %} -// +// // This instruction does not round to 24-bits instruct mulF_reg_imm(regF dst, regF src1, immF src2) %{ predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -10533,7 +10898,7 @@ %} -// +// // MACRO1 -- subsume unshared load into mulF // This instruction does not round to 24-bits instruct mulF_reg_load1(regF dst, regF src, memory mem1 ) %{ @@ -10549,7 +10914,7 @@ Pop_Reg_F(dst) ); ins_pipe( fpu_reg_reg_mem ); %} -// +// // MACRO2 -- addF a mulF which subsumed an unshared load // This instruction does not round to 24-bits instruct addF_mulF_reg_load1(regF dst, memory mem1, regF src1, regF src2) %{ @@ -10568,7 +10933,7 @@ Pop_Reg_F(dst) ); ins_pipe( fpu_reg_mem_reg_reg ); %} - + // MACRO3 -- addF a mulF // This instruction does not round to 24-bits. It is a '2-address' // instruction in that the result goes back to src2. This eliminates @@ -10617,7 +10982,7 @@ Pop_Mem_F(dst) ); ins_pipe( fpu_mem_reg_reg ); %} -// +// // This instruction does not round to 24-bits instruct divF_reg(regF dst, regF src) %{ predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -10644,7 +11009,7 @@ Pop_Mem_F(dst)); ins_pipe( pipe_slow ); %} -// +// // This instruction does not round to 24-bits instruct modF_reg(regF dst, regF src, eAXRegI rax, eFlagsReg cr) %{ predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -10733,7 +11098,7 @@ predicate(UseSSE>=2); match(Set dst (ConvD2F src)); format %{ "CVTSD2SS $dst,$src\t# F-round" %} - opcode(0xF2, 0x0F, 0x5A); + opcode(0xF2, 0x0F, 0x5A); ins_encode( OpcP, OpcS, Opcode(tertiary), RegReg(dst, src)); ins_pipe( pipe_slow ); %} @@ -10772,7 +11137,7 @@ predicate(UseSSE>=2); match(Set dst (ConvF2D src)); format %{ "CVTSS2SD $dst,$src\t# D-round" %} - opcode(0xF3, 0x0F, 0x5A); + opcode(0xF3, 0x0F, 0x5A); ins_encode( OpcP, OpcS, Opcode(tertiary), RegReg(dst, src)); ins_pipe( pipe_slow ); %} @@ -10789,7 +11154,7 @@ "FLDCW std/24-bit mode\n\t" "POP EAX\n\t" "CMP EAX,0x80000000\n\t" - "JNE,s fast\n\t" + "JNE,s fast\n\t" "FLD_D $src\n\t" "CALL d2i_wrapper\n" "fast:" %} @@ -10802,7 +11167,7 @@ predicate(UseSSE>=2); match(Set dst (ConvD2I src)); effect( KILL tmp, KILL cr ); - format %{ "CVTTSD2SI $dst, $src\n\t" + format %{ "CVTTSD2SI $dst, $src\n\t" "CMP $dst,0x80000000\n\t" "JNE,s fast\n\t" "SUB ESP, 8\n\t" @@ -10895,7 +11260,7 @@ predicate(UseSSE>=1); match(Set dst (ConvF2I src)); effect( KILL tmp, KILL cr ); - format %{ "CVTTSS2SI $dst, $src\n\t" + format %{ "CVTTSS2SI $dst, $src\n\t" "CMP $dst,0x80000000\n\t" "JNE,s fast\n\t" "SUB ESP, 4\n\t" @@ -10970,10 +11335,10 @@ %} instruct convI2XD_reg(regXD dst, eRegI src) %{ - predicate( UseSSE>=2 ); + predicate( UseSSE>=2 && !UseXmmI2D ); match(Set dst (ConvI2D src)); format %{ "CVTSI2SD $dst,$src" %} - opcode(0xF2, 0x0F, 0x2A); + opcode(0xF2, 0x0F, 0x2A); ins_encode( OpcP, OpcS, Opcode(tertiary), RegReg(dst, src)); ins_pipe( pipe_slow ); %} @@ -10982,11 +11347,25 @@ predicate( UseSSE>=2 ); match(Set dst (ConvI2D (LoadI mem))); format %{ "CVTSI2SD $dst,$mem" %} - opcode(0xF2, 0x0F, 0x2A); + opcode(0xF2, 0x0F, 0x2A); ins_encode( OpcP, OpcS, Opcode(tertiary), RegMem(dst, mem)); ins_pipe( pipe_slow ); %} +instruct convXI2XD_reg(regXD dst, eRegI src) +%{ + predicate( UseSSE>=2 && UseXmmI2D ); + match(Set dst (ConvI2D src)); + + format %{ "MOVD $dst,$src\n\t" + "CVTDQ2PD $dst,$dst\t# i2d" %} + ins_encode %{ + __ movdl($dst$$XMMRegister, $src$$Register); + __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister); + %} + ins_pipe(pipe_slow); // XXX +%} + instruct convI2D_mem(regD dst, memory mem) %{ predicate( UseSSE<=1 && !Compile::current()->select_24_bit_instr()); match(Set dst (ConvI2D (LoadI mem))); @@ -11022,7 +11401,7 @@ Pop_Mem_F(dst)); ins_pipe( fpu_mem_mem ); %} - + // In 24-bit mode, force exponent rounding by storing back out instruct convI2F_SSF_mem(stackSlotF dst, memory mem) %{ predicate( UseSSE==0 && Compile::current()->select_24_bit_instr()); @@ -11035,7 +11414,7 @@ Pop_Mem_F(dst)); ins_pipe( fpu_mem_mem ); %} - + // This instruction does not round to 24-bits instruct convI2F_reg(regF dst, stackSlotI src) %{ predicate( UseSSE==0 && !Compile::current()->select_24_bit_instr()); @@ -11062,7 +11441,7 @@ // Convert an int to a float in xmm; no rounding step needed. instruct convI2X_reg(regX dst, eRegI src) %{ - predicate(UseSSE>=1); + predicate( UseSSE==1 || UseSSE>=2 && !UseXmmI2F ); match(Set dst (ConvI2F src)); format %{ "CVTSI2SS $dst, $src" %} @@ -11071,6 +11450,20 @@ ins_pipe( pipe_slow ); %} + instruct convXI2X_reg(regX dst, eRegI src) +%{ + predicate( UseSSE>=2 && UseXmmI2F ); + match(Set dst (ConvI2F src)); + + format %{ "MOVD $dst,$src\n\t" + "CVTDQ2PS $dst,$dst\t# i2f" %} + ins_encode %{ + __ movdl($dst$$XMMRegister, $src$$Register); + __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister); + %} + ins_pipe(pipe_slow); // XXX +%} + instruct convI2L_reg( eRegL dst, eRegI src, eFlagsReg cr) %{ match(Set dst (ConvI2L src)); effect(KILL cr); @@ -11176,7 +11569,7 @@ format %{ "MOV $dst,$src\t# MoveF2I_stack_reg" %} opcode(0x8B); ins_encode( OpcP, RegMem(dst,src)); - ins_pipe( ialu_reg_mem ); + ins_pipe( ialu_reg_mem ); %} instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ @@ -11208,7 +11601,7 @@ ins_cost(85); format %{ "MOVD $dst,$src\t# MoveF2I_reg_reg_sse" %} ins_encode( MovX2I_reg(dst, src)); - ins_pipe( pipe_slow ); + ins_pipe( pipe_slow ); %} instruct MoveI2F_reg_stack(stackSlotF dst, eRegI src) %{ @@ -11234,7 +11627,7 @@ opcode(0xD9); /* D9 /0, FLD m32real */ ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src), Pop_Reg_F(dst) ); - ins_pipe( fpu_reg_mem ); + ins_pipe( fpu_reg_mem ); %} instruct MoveI2F_stack_reg_sse(regX dst, stackSlotI src) %{ @@ -11256,7 +11649,7 @@ ins_cost(85); format %{ "MOVD $dst,$src\t# MoveI2F_reg_reg_sse" %} ins_encode( MovI2X_reg(dst, src) ); - ins_pipe( pipe_slow ); + ins_pipe( pipe_slow ); %} instruct MoveD2L_stack_reg(eRegL dst, stackSlotD src) %{ @@ -11268,7 +11661,7 @@ "MOV $dst.hi,$src+4\t# MoveD2L_stack_reg" %} opcode(0x8B, 0x8B); ins_encode( OpcP, RegMem(dst,src), OpcS, RegMem_Hi(dst,src)); - ins_pipe( ialu_mem_long_reg ); + ins_pipe( ialu_mem_long_reg ); %} instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ @@ -11329,7 +11722,7 @@ opcode(0xDD); /* DD /0, FLD m64real */ ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src), Pop_Reg_D(dst) ); - ins_pipe( fpu_reg_mem ); + ins_pipe( fpu_reg_mem ); %} @@ -11537,6 +11930,17 @@ ins_pipe( pipe_slow ); %} +// fast array equals +instruct array_equals(eDIRegP ary1, eSIRegP ary2, eAXRegI tmp1, eBXRegI tmp2, eCXRegI result, eFlagsReg cr) %{ + match(Set result (AryEq ary1 ary2)); + effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL cr); + //ins_cost(300); + + format %{ "Array Equals $ary1,$ary2 -> $result // KILL EAX, EBX" %} + ins_encode( enc_Array_Equals(ary1, ary2, tmp1, tmp2, result) ); + ins_pipe( pipe_slow ); +%} + //----------Control Flow Instructions------------------------------------------ // Signed compare Instructions instruct compI_eReg(eFlagsReg cr, eRegI op1, eRegI op2) %{ @@ -11561,7 +11965,7 @@ // Cisc-spilled version of cmpI_eReg instruct compI_eReg_mem(eFlagsReg cr, eRegI op1, memory op2) %{ match(Set cr (CmpI op1 (LoadI op2))); - + format %{ "CMP $op1,$op2" %} ins_cost(500); opcode(0x3B); /* Opcode 3B /r */ @@ -11620,7 +12024,7 @@ // // Cisc-spilled version of cmpU_eReg instruct compU_eReg_mem(eFlagsRegU cr, eRegI op1, memory op2) %{ match(Set cr (CmpU op1 (LoadI op2))); - + format %{ "CMPu $op1,$op2" %} ins_cost(500); opcode(0x3B); /* Opcode 3B /r */ @@ -11631,7 +12035,7 @@ // // Cisc-spilled version of cmpU_eReg //instruct compU_mem_eReg(eFlagsRegU cr, memory op1, eRegI op2) %{ // match(Set cr (CmpU (LoadI op1) op2)); -// +// // format %{ "CMPu $op1,$op2" %} // ins_cost(500); // opcode(0x39); /* Opcode 39 /r */ @@ -11669,7 +12073,7 @@ // // Cisc-spilled version of cmpP_eReg instruct compP_eReg_mem(eFlagsRegU cr, eRegP op1, memory op2) %{ match(Set cr (CmpP op1 (LoadP op2))); - + format %{ "CMPu $op1,$op2" %} ins_cost(500); opcode(0x3B); /* Opcode 3B /r */ @@ -11680,7 +12084,7 @@ // // Cisc-spilled version of cmpP_eReg //instruct compP_mem_eReg(eFlagsRegU cr, memory op1, eRegP op2) %{ // match(Set cr (CmpP (LoadP op1) op2)); -// +// // format %{ "CMPu $op1,$op2" %} // ins_cost(500); // opcode(0x39); /* Opcode 39 /r */ @@ -11693,7 +12097,7 @@ instruct compP_mem_eReg( eFlagsRegU cr, eRegP op1, memory op2 ) %{ predicate( !n->in(2)->in(2)->bottom_type()->isa_oop_ptr() ); match(Set cr (CmpP op1 (LoadP op2))); - + format %{ "CMPu $op1,$op2" %} opcode(0x3B); /* Opcode 3B /r */ ins_encode( OpcP, RegMem( op1, op2) ); @@ -11717,7 +12121,7 @@ // since any compare to a zero should be eq/neq. instruct testP_Reg_mem( eFlagsReg cr, memory op, immI0 zero ) %{ match(Set cr (CmpP (LoadP op) zero)); - + format %{ "TEST $op,0xFFFFFFFF" %} ins_cost(500); opcode(0xF7); /* Opcode F7 /0 */ @@ -11879,6 +12283,19 @@ ins_pc_relative(1); %} +instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{ + match(CountedLoopEnd cop cmp); + effect(USE labl); + + ins_cost(200); + format %{ "J$cop,u $labl\t# Loop end" %} + size(6); + opcode(0x0F, 0x80); + ins_encode( Jcc( cop, labl) ); + ins_pipe( pipe_jcc ); + ins_pc_relative(1); +%} + // Jump Direct Conditional - using unsigned comparison instruct jmpConU(cmpOpU cop, eFlagsRegU cmp, label labl) %{ match(If cop cmp); @@ -11888,8 +12305,63 @@ format %{ "J$cop,u $labl" %} size(6); opcode(0x0F, 0x80); - ins_encode( Jcc( cop, labl) ); - ins_pipe( pipe_jcc ); + ins_encode(Jcc(cop, labl)); + ins_pipe(pipe_jcc); + ins_pc_relative(1); +%} + +instruct jmpConUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(200); + format %{ "J$cop,u $labl" %} + size(6); + opcode(0x0F, 0x80); + ins_encode(Jcc(cop, labl)); + ins_pipe(pipe_jcc); + ins_pc_relative(1); +%} + +instruct jmpConUCF2(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(200); + format %{ $$template + if ($cop$$cmpcode == Assembler::notEqual) { + $$emit$$"JP,u $labl\n\t" + $$emit$$"J$cop,u $labl" + } else { + $$emit$$"JP,u done\n\t" + $$emit$$"J$cop,u $labl\n\t" + $$emit$$"done:" + } + %} + size(12); + opcode(0x0F, 0x80); + ins_encode %{ + Label* l = $labl$$label; + $$$emit8$primary; + emit_cc(cbuf, $secondary, Assembler::parity); + int parity_disp = -1; + bool ok = false; + if ($cop$$cmpcode == Assembler::notEqual) { + // the two jumps 6 bytes apart so the jump distances are too + parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; + } else if ($cop$$cmpcode == Assembler::equal) { + parity_disp = 6; + ok = true; + } else { + ShouldNotReachHere(); + } + emit_d32(cbuf, parity_disp); + $$$emit8$primary; + emit_cc(cbuf, $secondary, $cop$$cmpcode); + int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; + emit_d32(cbuf, disp); + %} + ins_pipe(pipe_jcc); ins_pc_relative(1); %} @@ -11942,7 +12414,7 @@ // ============================================================================ // Branch Instructions -- short offset versions -// +// // These instructions are used to replace jumps of a long offset (the default // match) with jumps of a shorter offset. These instructions are all tagged // with the ins_short_branch attribute, which causes the ADLC to suppress the @@ -11988,7 +12460,7 @@ effect(USE labl); ins_cost(300); - format %{ "J$cop,s $labl" %} + format %{ "J$cop,s $labl\t# Loop end" %} size(2); opcode(0x70); ins_encode( JccShort( cop, labl) ); @@ -12003,7 +12475,21 @@ effect(USE labl); ins_cost(300); - format %{ "J$cop,us $labl" %} + format %{ "J$cop,us $labl\t# Loop end" %} + size(2); + opcode(0x70); + ins_encode( JccShort( cop, labl) ); + ins_pipe( pipe_jcc ); + ins_pc_relative(1); + ins_short_branch(1); +%} + +instruct jmpLoopEndUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{ + match(CountedLoopEnd cop cmp); + effect(USE labl); + + ins_cost(300); + format %{ "J$cop,us $labl\t# Loop end" %} size(2); opcode(0x70); ins_encode( JccShort( cop, labl) ); @@ -12027,6 +12513,60 @@ ins_short_branch(1); %} +instruct jmpConUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(300); + format %{ "J$cop,us $labl" %} + size(2); + opcode(0x70); + ins_encode( JccShort( cop, labl) ); + ins_pipe( pipe_jcc ); + ins_pc_relative(1); + ins_short_branch(1); +%} + +instruct jmpConUCF2_short(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(300); + format %{ $$template + if ($cop$$cmpcode == Assembler::notEqual) { + $$emit$$"JP,u,s $labl\n\t" + $$emit$$"J$cop,u,s $labl" + } else { + $$emit$$"JP,u,s done\n\t" + $$emit$$"J$cop,u,s $labl\n\t" + $$emit$$"done:" + } + %} + size(4); + opcode(0x70); + ins_encode %{ + Label* l = $labl$$label; + emit_cc(cbuf, $primary, Assembler::parity); + int parity_disp = -1; + if ($cop$$cmpcode == Assembler::notEqual) { + parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; + } else if ($cop$$cmpcode == Assembler::equal) { + parity_disp = 2; + } else { + ShouldNotReachHere(); + } + emit_d8(cbuf, parity_disp); + emit_cc(cbuf, $primary, $cop$$cmpcode); + int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; + emit_d8(cbuf, disp); + assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); + assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp"); + %} + ins_pipe(pipe_jcc); + ins_pc_relative(1); + ins_short_branch(1); +%} + // ============================================================================ // Long Compare // @@ -12039,13 +12579,13 @@ // NE test is negated from that. // Due to a shortcoming in the ADLC, it mixes up expressions like: -// (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the -// difference between 'Y' and '0L'. The tree-matches for the CmpI sections -// are collapsed internally in the ADLC's dfa-gen code. The match for -// (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the -// foo match ends up with the wrong leaf. One fix is to not match both -// reg-reg and reg-zero forms of long-compare. This is unfortunate because -// both forms beat the trinary form of long-compare and both are very useful +// (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the +// difference between 'Y' and '0L'. The tree-matches for the CmpI sections +// are collapsed internally in the ADLC's dfa-gen code. The match for +// (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the +// foo match ends up with the wrong leaf. One fix is to not match both +// reg-reg and reg-zero forms of long-compare. This is unfortunate because +// both forms beat the trinary form of long-compare and both are very useful // on Intel which has so few registers. // Manifest a CmpL result in an integer register. Very painful. @@ -12067,7 +12607,7 @@ "done:" %} ins_encode %{ Label p_one, m_one, done; - __ xorl($dst$$Register, $dst$$Register); + __ xorptr($dst$$Register, $dst$$Register); __ cmpl(HIGH_FROM_LOW($src1$$Register), HIGH_FROM_LOW($src2$$Register)); __ jccb(Assembler::less, m_one); __ jccb(Assembler::greater, p_one); @@ -12075,10 +12615,10 @@ __ jccb(Assembler::below, m_one); __ jccb(Assembler::equal, done); __ bind(p_one); - __ increment($dst$$Register); + __ incrementl($dst$$Register); __ jmpb(done); __ bind(m_one); - __ decrement($dst$$Register); + __ decrementl($dst$$Register); __ bind(done); %} ins_pipe( pipe_slow ); @@ -12087,7 +12627,7 @@ //====== // Manifest a CmpL result in the normal flags. Only good for LT or GE // compares. Can be used for LE or GT compares by reversing arguments. -// NOT GOOD FOR EQ/NE tests. +// NOT GOOD FOR EQ/NE tests. instruct cmpL_zero_flags_LTGE( flagsReg_long_LTGE flags, eRegL src, immL0 zero ) %{ match( Set flags (CmpL src zero )); ins_cost(100); @@ -12130,7 +12670,7 @@ format %{ "CMOV$cmp $dst.lo,$src.lo\n\t" "CMOV$cmp $dst.hi,$src.hi" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) ); + ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) ); ins_pipe( pipe_cmov_reg_long ); %} @@ -12141,7 +12681,7 @@ format %{ "CMOV$cmp $dst.lo,$src.lo\n\t" "CMOV$cmp $dst.hi,$src.hi" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) ); + ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) ); ins_pipe( pipe_cmov_reg_long ); %} @@ -12214,9 +12754,9 @@ fcmovX_regS(cmp,flags,dst,src); %} %} - + //====== -// Manifest a CmpL result in the normal flags. Only good for EQ/NE compares. +// Manifest a CmpL result in the normal flags. Only good for EQ/NE compares. instruct cmpL_zero_flags_EQNE( flagsReg_long_EQNE flags, eRegL src, immL0 zero, eRegI tmp ) %{ match( Set flags (CmpL src zero )); effect(TEMP tmp); @@ -12227,7 +12767,7 @@ ins_pipe( ialu_reg_reg_long ); %} -// Manifest a CmpL result in the normal flags. Only good for EQ/NE compares. +// Manifest a CmpL result in the normal flags. Only good for EQ/NE compares. instruct cmpL_reg_flags_EQNE( flagsReg_long_EQNE flags, eRegL src1, eRegL src2 ) %{ match( Set flags (CmpL src1 src2 )); ins_cost(200+300); @@ -12258,7 +12798,7 @@ format %{ "CMOV$cmp $dst.lo,$src.lo\n\t" "CMOV$cmp $dst.hi,$src.hi" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) ); + ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) ); ins_pipe( pipe_cmov_reg_long ); %} @@ -12269,7 +12809,7 @@ format %{ "CMOV$cmp $dst.lo,$src.lo\n\t" "CMOV$cmp $dst.hi,$src.hi" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) ); + ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) ); ins_pipe( pipe_cmov_reg_long ); %} @@ -12342,7 +12882,7 @@ fcmovX_regS(cmp,flags,dst,src); %} %} - + //====== // Manifest a CmpL result in the normal flags. Only good for LE or GT compares. // Same as cmpL_reg_flags_LEGT except must negate src @@ -12359,7 +12899,7 @@ // Manifest a CmpL result in the normal flags. Only good for LE or GT compares. // Same as cmpL_reg_flags_LTGE except operands swapped. Swapping operands -// requires a commuted test to get the same result. +// requires a commuted test to get the same result. instruct cmpL_reg_flags_LEGT( flagsReg_long_LEGT flags, eRegL src1, eRegL src2, eRegI tmp ) %{ match( Set flags (CmpL src1 src2 )); effect( TEMP tmp ); @@ -12391,7 +12931,7 @@ format %{ "CMOV$cmp $dst.lo,$src.lo\n\t" "CMOV$cmp $dst.hi,$src.hi" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) ); + ins_encode( enc_cmov(cmp), RegReg_Lo2( dst, src ), enc_cmov(cmp), RegReg_Hi2( dst, src ) ); ins_pipe( pipe_cmov_reg_long ); %} @@ -12402,7 +12942,7 @@ format %{ "CMOV$cmp $dst.lo,$src.lo\n\t" "CMOV$cmp $dst.hi,$src.hi+4" %} opcode(0x0F,0x40); - ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) ); + ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) ); ins_pipe( pipe_cmov_reg_long ); %} @@ -12570,7 +13110,7 @@ instruct Ret() %{ match(Return); format %{ "RET" %} - opcode(0xC3); + opcode(0xC3); ins_encode(OpcP); ins_pipe( pipe_jmp ); %} @@ -12617,7 +13157,7 @@ %} -// Rethrow exception: +// Rethrow exception: // The exception oop will come in the first argument position. // Then JUMP (not call) to the rethrow stub code. instruct RethrowException() @@ -12662,7 +13202,7 @@ effect(KILL cr); // TODO-FIXME: we currently poll at offset 0 of the safepoint polling page. - // On SPARC that might be acceptable as we can generate the address with + // On SPARC that might be acceptable as we can generate the address with // just a sethi, saving an or. By polling at offset 0 we can end up // putting additional pressure on the index-0 in the D$. Because of // alignment (just like the situation at hand) the lower indices tend @@ -12671,7 +13211,7 @@ format %{ "TSTL #polladdr,EAX\t! Safepoint: poll for GC" %} ins_cost(125); - size(6) ; + size(6) ; ins_encode( Safepoint_Poll() ); ins_pipe( ialu_reg_mem ); %} @@ -12679,7 +13219,7 @@ //----------PEEPHOLE RULES----------------------------------------------------- // These must follow all instruction definitions as they use the names // defined in the instructions definitions. -// +// // peepmatch ( root_instr_name [preceeding_instruction]* ); // // peepconstraint %{ @@ -12692,16 +13232,16 @@ // // in the replacement instruction's match rule // // ---------VM FLAGS--------------------------------------------------------- -// +// // All peephole optimizations can be turned off using -XX:-OptoPeephole -// +// // Each peephole rule is given an identifying number starting with zero and // increasing by one in the order seen by the parser. An individual peephole // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# // on the command-line. -// +// // ---------CURRENT LIMITATIONS---------------------------------------------- -// +// // Only match adjacent instructions in same basic block // Only equality constraints // Only constraints between operands, not (0.dest_reg == EAX_enc) @@ -12713,45 +13253,45 @@ // instruct movI(eRegI dst, eRegI src) %{ // match(Set dst (CopyI src)); // %} -// +// // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ // match(Set dst (AddI dst src)); // effect(KILL cr); // %} -// +// // // Change (inc mov) to lea // peephole %{ // // increment preceeded by register-register move // peepmatch ( incI_eReg movI ); -// // require that the destination register of the increment +// // require that the destination register of the increment // // match the destination register of the move // peepconstraint ( 0.dst == 1.dst ); // // construct a replacement instruction that sets // // the destination to ( move's source register + one ) // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); // %} -// -// Implementation no longer uses movX instructions since +// +// Implementation no longer uses movX instructions since // machine-independent system no longer uses CopyX nodes. -// +// // peephole %{ // peepmatch ( incI_eReg movI ); // peepconstraint ( 0.dst == 1.dst ); // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); // %} -// +// // peephole %{ // peepmatch ( decI_eReg movI ); // peepconstraint ( 0.dst == 1.dst ); // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); // %} -// +// // peephole %{ // peepmatch ( addI_eReg_imm movI ); // peepconstraint ( 0.dst == 1.dst ); // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); // %} -// +// // peephole %{ // peepmatch ( addP_eReg_imm movP ); // peepconstraint ( 0.dst == 1.dst ); @@ -12762,11 +13302,11 @@ // instruct storeI(memory mem, eRegI src) %{ // match(Set mem (StoreI mem src)); // %} -// +// // instruct loadI(eRegI dst, memory mem) %{ // match(Set dst (LoadI mem)); // %} -// +// peephole %{ peepmatch ( loadI storeI ); peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); @@ -12776,6 +13316,3 @@ //----------SMARTSPILL RULES--------------------------------------------------- // These must follow all instruction definitions as they use the names // defined in the instructions definitions. - - - --- old/hotspot/src/cpu/x86/vm/x86_64.ad 2009-08-01 04:08:34.178333687 +0100 +++ new/hotspot/src/cpu/x86/vm/x86_64.ad 2009-08-01 04:08:34.052752392 +0100 @@ -1,5 +1,5 @@ // -// Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // // AMD64 Architecture Description File @@ -32,31 +32,31 @@ register %{ //----------Architecture Description Register Definitions---------------------- // General Registers -// "reg_def" name ( register save type, C convention save type, +// "reg_def" name ( register save type, C convention save type, // ideal register type, encoding ); // Register Save Types: -// +// // NS = No-Save: The register allocator assumes that these registers // can be used without saving upon entry to the method, & // that they do not need to be saved at call sites. -// +// // SOC = Save-On-Call: The register allocator assumes that these registers // can be used without saving upon entry to the method, // but that they must be saved at call sites. -// +// // SOE = Save-On-Entry: The register allocator assumes that these registers // must be saved before using them upon entry to the // method, but they do not need to be saved at call // sites. -// +// // AS = Always-Save: The register allocator assumes that these registers // must be saved before using them upon entry to the // method, & that they must be saved at call sites. // -// Ideal Register Type is used to determine how to save & restore a +// Ideal Register Type is used to determine how to save & restore a // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. -// +// // The encoding number is the actual bit-pattern placed into the opcodes. // General Registers @@ -227,9 +227,9 @@ reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad()); -// Specify priority of register selection within phases of register -// allocation. Highest priority is first. A useful heuristic is to -// give registers a low priority when they are required by machine +// Specify priority of register selection within phases of register +// allocation. Highest priority is first. A useful heuristic is to +// give registers a low priority when they are required by machine // instructions, like EAX and EDX on I486, and choose no-save registers // before save-on-call, & save-on-call before save-on-entry. Registers // which participate in fixed calling sequences should come last. @@ -253,16 +253,16 @@ RSP, RSP_H); // XXX probably use 8-15 first on Linux -alloc_class chunk1(XMM0, XMM0_H, - XMM1, XMM1_H, - XMM2, XMM2_H, - XMM3, XMM3_H, - XMM4, XMM4_H, - XMM5, XMM5_H, - XMM6, XMM6_H, - XMM7, XMM7_H, - XMM8, XMM8_H, - XMM9, XMM9_H, +alloc_class chunk1(XMM0, XMM0_H, + XMM1, XMM1_H, + XMM2, XMM2_H, + XMM3, XMM3_H, + XMM4, XMM4_H, + XMM5, XMM5_H, + XMM6, XMM6_H, + XMM7, XMM7_H, + XMM8, XMM8_H, + XMM9, XMM9_H, XMM10, XMM10_H, XMM11, XMM11_H, XMM12, XMM12_H, @@ -280,7 +280,7 @@ // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ ) // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ ) // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) -// +// // Class for all pointer registers (including RSP) reg_class any_reg(RAX, RAX_H, @@ -312,7 +312,6 @@ R9, R9_H, R10, R10_H, R11, R11_H, - R12, R12_H, R13, R13_H, R14, R14_H); @@ -358,7 +357,7 @@ R12, R12_H, R13, R13_H, R14, R14_H); - + // Singleton class for RAX pointer register reg_class ptr_rax_reg(RAX, RAX_H); @@ -392,7 +391,6 @@ R9, R9_H, R10, R10_H, R11, R11_H, - R12, R12_H, R13, R13_H, R14, R14_H); @@ -406,7 +404,6 @@ R9, R9_H, R10, R10_H, R11, R11_H, - R12, R12_H, R13, R13_H, R14, R14_H); @@ -421,7 +418,6 @@ R9, R9_H, R10, R10_H, R11, R11_H, - R12, R12_H, R13, R13_H, R14, R14_H); @@ -436,7 +432,6 @@ R9, R9_H, R10, R10_H, R11, R11_H, - R12, R12_H, R13, R13_H, R14, R14_H); @@ -449,6 +444,9 @@ // Singleton class for RDX long register reg_class long_rdx_reg(RDX, RDX_H); +// Singleton class for R12 long register +reg_class long_r12_reg(R12, R12_H); + // Class for all int registers (except RSP) reg_class int_reg(RAX, RDX, @@ -461,7 +459,6 @@ R9, R10, R11, - R12, R13, R14); @@ -476,13 +473,12 @@ R9, R10, R11, - R12, R13, R14); // Class for all int registers except RAX, RDX (and RSP) reg_class int_no_rax_rdx_reg(RBP, - RDI + RDI, RSI, RCX, RBX, @@ -490,7 +486,6 @@ R9, R10, R11, - R12, R13, R14); @@ -557,7 +552,7 @@ // This is a block of C++ code which provides values, functions, and // definitions necessary in the rest of the architecture description source %{ -#define RELOC_IMM64 Assembler::imm64_operand +#define RELOC_IMM64 Assembler::imm_operand #define RELOC_DISP32 Assembler::disp32_operand #define __ _masm. @@ -637,11 +632,11 @@ } // EMIT_OPCODE() w/ relocation information -void emit_opcode(CodeBuffer &cbuf, +void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset, int format) { cbuf.relocate(cbuf.inst_mark() + offset, reloc, format); - emit_opcode(cbuf, code); + emit_opcode(cbuf, code); } // EMIT_D8() @@ -673,8 +668,8 @@ } // emit 32 bit value and construct relocation entry from relocInfo::relocType -void emit_d32_reloc(CodeBuffer& cbuf, - int d32, +void emit_d32_reloc(CodeBuffer& cbuf, + int d32, relocInfo::relocType reloc, int format) { @@ -686,13 +681,13 @@ } // emit 32 bit value and construct relocation entry from RelocationHolder -void emit_d32_reloc(CodeBuffer& cbuf, - int d32, +void emit_d32_reloc(CodeBuffer& cbuf, + int d32, RelocationHolder const& rspec, int format) { #ifdef ASSERT - if (rspec.reloc()->type() == relocInfo::oop_type && + if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) { assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code"); } @@ -712,7 +707,7 @@ // emit 64 bit value and construct relocation entry from relocInfo::relocType -void emit_d64_reloc(CodeBuffer& cbuf, +void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) @@ -724,13 +719,13 @@ } // emit 64 bit value and construct relocation entry from RelocationHolder -void emit_d64_reloc(CodeBuffer& cbuf, +void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) { #ifdef ASSERT - if (rspec.reloc()->type() == relocInfo::oop_type && + if (rspec.reloc()->type() == relocInfo::oop_type && d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) { assert(oop(d64)->is_oop() && oop(d64)->is_perm(), "cannot embed non-perm oops in code"); @@ -758,9 +753,9 @@ } // rRegI ereg, memory mem) %{ // emit_reg_mem -void encode_RegMem(CodeBuffer &cbuf, - int reg, - int base, int index, int scale, int disp, bool disp_is_oop) +void encode_RegMem(CodeBuffer &cbuf, + int reg, + int base, int index, int scale, int disp, bool disp_is_oop) { assert(!disp_is_oop, "cannot have disp"); int regenc = reg & 7; @@ -770,13 +765,13 @@ // There is no index & no scale, use form without SIB byte if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) { // If no displacement, mode is 0x0; unless base is [RBP] or [R13] - if (disp == 0 && base != RBP_enc && base != R13_enc) { + if (disp == 0 && base != RBP_enc && base != R13_enc) { emit_rm(cbuf, 0x0, regenc, baseenc); // * } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) { // If 8-bit displacement, mode 0x1 emit_rm(cbuf, 0x1, regenc, baseenc); // * emit_d8(cbuf, disp); - } else { + } else { // If 32-bit displacement if (base == -1) { // Special flag for absolute address emit_rm(cbuf, 0x0, regenc, 0x5); // * @@ -795,7 +790,7 @@ } } } - } else { + } else { // Else, encode with the SIB byte // If no displacement, mode is 0x0; unless base is [RBP] or [R13] if (disp == 0 && base != RBP_enc && base != R13_enc) { @@ -827,7 +822,7 @@ } } -void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc) +void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc) { if (dstenc != srcenc) { if (dstenc < 8) { @@ -844,7 +839,7 @@ } dstenc -= 8; } - + emit_opcode(cbuf, 0x8B); emit_rm(cbuf, 0x3, dstenc, srcenc); } @@ -880,7 +875,7 @@ // use several kilobytes of stack. But the stack safety zone should // account for that. See bugs 4446381, 4468289, 4497237. if (C->need_stack_bang(framesize)) { - st->print_cr("# stack bang"); st->print("\t"); + st->print_cr("# stack bang"); st->print("\t"); need_nop = false; } st->print_cr("pushq rbp"); st->print("\t"); @@ -889,7 +884,7 @@ // Majik cookie to verify stack depth st->print_cr("pushq 0xffffffffbadb100d" "\t# Majik cookie for stack depth check"); - st->print("\t"); + st->print("\t"); framesize -= wordSize; // Remove 2 for cookie need_nop = false; } @@ -937,7 +932,7 @@ // restored correctly and we can correct the stack. emit_opcode(cbuf, 0x50 | RBP_enc); - if (VerifyStackAtCalls) { + if (VerifyStackAtCalls) { // Majik cookie to verify stack depth emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d emit_d32(cbuf, 0xbadb100d); @@ -951,27 +946,27 @@ emit_opcode(cbuf, 0x83); // sub SP,#framesize emit_rm(cbuf, 0x3, 0x05, RSP_enc); emit_d8(cbuf, framesize); - if (need_nop) { + if (need_nop) { emit_opcode(cbuf, 0x90); // nop } } else { emit_opcode(cbuf, 0x81); // sub SP,#framesize - emit_rm(cbuf, 0x3, 0x05, RSP_enc); + emit_rm(cbuf, 0x3, 0x05, RSP_enc); emit_d32(cbuf, framesize); } } C->set_frame_complete(cbuf.code_end() - cbuf.code_begin()); -#ifdef ASSERT - if (VerifyStackAtCalls) { +#ifdef ASSERT + if (VerifyStackAtCalls) { Label L; MacroAssembler masm(&cbuf); - masm.pushq(rax); - masm.movq(rax, rsp); - masm.andq(rax, StackAlignmentInBytes-1); - masm.cmpq(rax, StackAlignmentInBytes-wordSize); - masm.popq(rax); + masm.push(rax); + masm.mov(rax, rsp); + masm.andptr(rax, StackAlignmentInBytes-1); + masm.cmpptr(rax, StackAlignmentInBytes-wordSize); + masm.pop(rax); masm.jcc(Assembler::equal, L); masm.stop("Stack is not properly aligned!"); masm.bind(L); @@ -1051,7 +1046,7 @@ emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 // cbuf.inst_mark() is beginning of instruction emit_d32_reloc(cbuf, os::get_polling_page()); -// relocInfo::poll_return_type, +// relocInfo::poll_return_type, } } @@ -1066,7 +1061,7 @@ uint size = 0; - if (do_polling() && C->is_method_compilation()) { + if (do_polling() && C->is_method_compilation()) { size += 6; } @@ -1084,7 +1079,7 @@ return size; } -int MachEpilogNode::reloc() const +int MachEpilogNode::reloc() const { return 2; // a large enough number } @@ -1102,13 +1097,13 @@ //============================================================================= enum RC { - rc_bad, - rc_int, + rc_bad, + rc_int, rc_float, - rc_stack + rc_stack }; -static enum RC rc_class(OptoReg::Name reg) +static enum RC rc_class(OptoReg::Name reg) { if( !OptoReg::is_valid(reg) ) return rc_bad; @@ -1122,13 +1117,13 @@ return rc_float; } -uint MachSpillCopyNode::implementation(CodeBuffer* cbuf, - PhaseRegAlloc* ra_, +uint MachSpillCopyNode::implementation(CodeBuffer* cbuf, + PhaseRegAlloc* ra_, bool do_size, outputStream* st) const { - // Get registers to move + // Get registers to move OptoReg::Name src_second = ra_->get_reg_second(in(1)); OptoReg::Name src_first = ra_->get_reg_first(in(1)); OptoReg::Name dst_second = ra_->get_reg_second(this); @@ -1165,12 +1160,12 @@ #ifndef PRODUCT } else if (!do_size) { st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t" - "popq [rsp + #%d]", + "popq [rsp + #%d]", src_offset, dst_offset); #endif } - return + return 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)); } else { @@ -1188,17 +1183,17 @@ emit_opcode(*cbuf, 0xF8); emit_opcode(*cbuf, 0x8B); - encode_RegMem(*cbuf, - RAX_enc, - RSP_enc, 0x4, 0, src_offset, + encode_RegMem(*cbuf, + RAX_enc, + RSP_enc, 0x4, 0, src_offset, false); emit_opcode(*cbuf, 0x89); - encode_RegMem(*cbuf, - RAX_enc, - RSP_enc, 0x4, 0, dst_offset, + encode_RegMem(*cbuf, + RAX_enc, + RSP_enc, 0x4, 0, dst_offset, false); - + emit_opcode(*cbuf, Assembler::REX_W); emit_opcode(*cbuf, 0x8B); emit_opcode(*cbuf, 0x44); @@ -1215,7 +1210,7 @@ dst_offset); #endif } - return + return 5 + // movq 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl @@ -1235,17 +1230,17 @@ } emit_opcode(*cbuf, 0x8B); encode_RegMem(*cbuf, - Matcher::_regEncode[dst_first], + Matcher::_regEncode[dst_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("movq %s, [rsp + #%d]\t# spill", + st->print("movq %s, [rsp + #%d]\t# spill", Matcher::regName[dst_first], offset); #endif } - return + return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX } else { // 32-bit @@ -1258,17 +1253,17 @@ } emit_opcode(*cbuf, 0x8B); encode_RegMem(*cbuf, - Matcher::_regEncode[dst_first], + Matcher::_regEncode[dst_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("movl %s, [rsp + #%d]\t# spill", + st->print("movl %s, [rsp + #%d]\t# spill", Matcher::regName[dst_first], offset); #endif } - return + return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + ((Matcher::_regEncode[dst_first] < 8) ? 3 @@ -1287,19 +1282,19 @@ } emit_opcode(*cbuf, 0x0F); emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12); - encode_RegMem(*cbuf, - Matcher::_regEncode[dst_first], + encode_RegMem(*cbuf, + Matcher::_regEncode[dst_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("%s %s, [rsp + #%d]\t# spill", + st->print("%s %s, [rsp + #%d]\t# spill", UseXmmLoadAndClearUpper ? "movsd " : "movlpd", Matcher::regName[dst_first], offset); #endif } - return + return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + ((Matcher::_regEncode[dst_first] < 8) ? 5 @@ -1316,18 +1311,18 @@ } emit_opcode(*cbuf, 0x0F); emit_opcode(*cbuf, 0x10); - encode_RegMem(*cbuf, - Matcher::_regEncode[dst_first], + encode_RegMem(*cbuf, + Matcher::_regEncode[dst_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("movss %s, [rsp + #%d]\t# spill", + st->print("movss %s, [rsp + #%d]\t# spill", Matcher::regName[dst_first], offset); #endif } - return + return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + ((Matcher::_regEncode[dst_first] < 8) ? 5 @@ -1349,13 +1344,13 @@ emit_opcode(*cbuf, Assembler::REX_WR); } emit_opcode(*cbuf, 0x89); - encode_RegMem(*cbuf, - Matcher::_regEncode[src_first], + encode_RegMem(*cbuf, + Matcher::_regEncode[src_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("movq [rsp + #%d], %s\t# spill", + st->print("movq [rsp + #%d], %s\t# spill", offset, Matcher::regName[src_first]); #endif @@ -1371,18 +1366,18 @@ emit_opcode(*cbuf, Assembler::REX_R); } emit_opcode(*cbuf, 0x89); - encode_RegMem(*cbuf, - Matcher::_regEncode[src_first], + encode_RegMem(*cbuf, + Matcher::_regEncode[src_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("movl [rsp + #%d], %s\t# spill", + st->print("movl [rsp + #%d], %s\t# spill", offset, Matcher::regName[src_first]); #endif } - return + return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + ((Matcher::_regEncode[src_first] < 8) ? 3 @@ -1441,12 +1436,12 @@ Matcher::_regEncode[src_first] & 7); #ifndef PRODUCT } else if (!do_size) { - st->print("movl %s, %s\t# spill", + st->print("movl %s, %s\t# spill", Matcher::regName[dst_first], Matcher::regName[src_first]); #endif } - return + return (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) ? 2 : 3; // REX @@ -1478,7 +1473,7 @@ Matcher::_regEncode[src_first] & 7); #ifndef PRODUCT } else if (!do_size) { - st->print("movdq %s, %s\t# spill", + st->print("movdq %s, %s\t# spill", Matcher::regName[dst_first], Matcher::regName[src_first]); #endif @@ -1508,12 +1503,12 @@ Matcher::_regEncode[src_first] & 7); #ifndef PRODUCT } else if (!do_size) { - st->print("movdl %s, %s\t# spill", + st->print("movdl %s, %s\t# spill", Matcher::regName[dst_first], Matcher::regName[src_first]); #endif } - return + return (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) ? 4 : 5; // REX @@ -1534,18 +1529,18 @@ } emit_opcode(*cbuf, 0x0F); emit_opcode(*cbuf, 0x11); - encode_RegMem(*cbuf, - Matcher::_regEncode[src_first], + encode_RegMem(*cbuf, + Matcher::_regEncode[src_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("movsd [rsp + #%d], %s\t# spill", + st->print("movsd [rsp + #%d], %s\t# spill", offset, Matcher::regName[src_first]); #endif } - return + return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + ((Matcher::_regEncode[src_first] < 8) ? 5 @@ -1562,18 +1557,18 @@ } emit_opcode(*cbuf, 0x0F); emit_opcode(*cbuf, 0x11); - encode_RegMem(*cbuf, - Matcher::_regEncode[src_first], + encode_RegMem(*cbuf, + Matcher::_regEncode[src_first], RSP_enc, 0x4, 0, offset, false); #ifndef PRODUCT } else if (!do_size) { - st->print("movss [rsp + #%d], %s\t# spill", + st->print("movss [rsp + #%d], %s\t# spill", offset, Matcher::regName[src_first]); #endif } - return + return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + ((Matcher::_regEncode[src_first] < 8) ? 5 @@ -1606,7 +1601,7 @@ Matcher::_regEncode[src_first] & 7); #ifndef PRODUCT } else if (!do_size) { - st->print("movdq %s, %s\t# spill", + st->print("movdq %s, %s\t# spill", Matcher::regName[dst_first], Matcher::regName[src_first]); #endif @@ -1636,12 +1631,12 @@ Matcher::_regEncode[src_first] & 7); #ifndef PRODUCT } else if (!do_size) { - st->print("movdl %s, %s\t# spill", + st->print("movdl %s, %s\t# spill", Matcher::regName[dst_first], Matcher::regName[src_first]); #endif } - return + return (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) ? 4 : 5; // REX @@ -1671,13 +1666,13 @@ Matcher::_regEncode[src_first] & 7); #ifndef PRODUCT } else if (!do_size) { - st->print("%s %s, %s\t# spill", + st->print("%s %s, %s\t# spill", UseXmmRegToRegMoveAll ? "movapd" : "movsd ", Matcher::regName[dst_first], Matcher::regName[src_first]); #endif } - return + return (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) ? 4 : 5; // REX @@ -1686,7 +1681,7 @@ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); if (cbuf) { - if (!UseXmmRegToRegMoveAll) + if (!UseXmmRegToRegMoveAll) emit_opcode(*cbuf, 0xF3); if (Matcher::_regEncode[dst_first] < 8) { if (Matcher::_regEncode[src_first] >= 8) { @@ -1706,19 +1701,19 @@ Matcher::_regEncode[src_first] & 7); #ifndef PRODUCT } else if (!do_size) { - st->print("%s %s, %s\t# spill", + st->print("%s %s, %s\t# spill", UseXmmRegToRegMoveAll ? "movaps" : "movss ", Matcher::regName[dst_first], Matcher::regName[src_first]); #endif } - return + return (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) ? (UseXmmRegToRegMoveAll ? 3 : 4) : (UseXmmRegToRegMoveAll ? 4 : 5); // REX } } - } + } assert(0," foo "); Unimplemented(); @@ -1727,18 +1722,18 @@ } #ifndef PRODUCT -void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const +void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const { implementation(NULL, ra_, false, st); } #endif -void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const +void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { implementation(&cbuf, ra_, false, NULL); } -uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const +uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { return implementation(NULL, ra_, true, NULL); } @@ -1751,7 +1746,7 @@ } #endif -void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const +void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const { MacroAssembler _masm(&cbuf); __ nop(_count); @@ -1769,7 +1764,7 @@ { int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); int reg = ra_->get_reg_first(this); - st->print("leaq %s, [rsp + #%d]\t# box lock", + st->print("leaq %s, [rsp + #%d]\t# box lock", Matcher::regName[reg], offset); } #endif @@ -1822,6 +1817,7 @@ __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64); // static stub relocation also tags the methodOop in the code-stream. __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time + // This is recognized as unresolved by relocs/nativeinst/ic code __ jump(RuntimeAddress(__ pc())); // Update current stubs pointer and restore code_end. @@ -1835,7 +1831,7 @@ } // relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() +uint reloc_java_to_interp() { return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call } @@ -1844,8 +1840,14 @@ #ifndef PRODUCT void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { - st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t" - "# Inline cache check", oopDesc::klass_offset_in_bytes()); + if (UseCompressedOops) { + st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes()); + st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]"); + st->print_cr("cmpq rax, rscratch1\t # Inline cache check"); + } else { + st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t" + "# Inline cache check", oopDesc::klass_offset_in_bytes()); + } st->print_cr("\tjne SharedRuntime::_ic_miss_stub"); st->print_cr("\tnop"); if (!OptoBreakpoint) { @@ -1860,7 +1862,12 @@ #ifdef ASSERT uint code_size = cbuf.code_size(); #endif - masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); + if (UseCompressedOops) { + masm.load_klass(rscratch1, j_rarg0); + masm.cmpptr(rax, rscratch1); + } else { + masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); + } masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); @@ -1871,15 +1878,23 @@ // Leave space for int3 nops_cnt += 1; } + if (UseCompressedOops) { + // ??? divisible by 4 is aligned? + nops_cnt += 1; + } masm.nop(nops_cnt); - assert(cbuf.code_size() - code_size == size(ra_), + assert(cbuf.code_size() - code_size == size(ra_), "checking code size of inline cache node"); } uint MachUEPNode::size(PhaseRegAlloc* ra_) const { - return OptoBreakpoint ? 11 : 12; + if (UseCompressedOops) { + return OptoBreakpoint ? 19 : 20; + } else { + return OptoBreakpoint ? 11 : 12; + } } @@ -1892,8 +1907,8 @@ return NativeJump::instruction_size; } -// Emit exception handler code. -int emit_exception_handler(CodeBuffer& cbuf) +// Emit exception handler code. +int emit_exception_handler(CodeBuffer& cbuf) { // Note that the code buffer's inst_mark is always relative to insts. @@ -1915,8 +1930,8 @@ return 15; } -// Emit deopt handler code. -int emit_deopt_handler(CodeBuffer& cbuf) +// Emit deopt handler code. +int emit_deopt_handler(CodeBuffer& cbuf) { // Note that the code buffer's inst_mark is always relative to insts. @@ -1935,7 +1950,7 @@ __ call(next, relocInfo::none); // reloc none is fine since it is a disp32 __ bind(next); // adjust it so it matches "the_pc" - __ subq(Address(rsp, 0), __ offset() - offset); + __ subptr(Address(rsp, 0), __ offset() - offset); __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); __ end_a_stub(); @@ -1948,9 +1963,9 @@ address double_address = __ double_constant(x); cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift emit_d32_reloc(cbuf, - (int) (double_address - cbuf.code_end() - 4), - internal_word_Relocation::spec(double_address), - RELOC_DISP32); + (int) (double_address - cbuf.code_end() - 4), + internal_word_Relocation::spec(double_address), + RELOC_DISP32); } static void emit_float_constant(CodeBuffer& cbuf, float x) { @@ -1958,10 +1973,10 @@ MacroAssembler _masm(&cbuf); address float_address = __ float_constant(x); cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift - emit_d32_reloc(cbuf, - (int) (float_address - cbuf.code_end() - 4), - internal_word_Relocation::spec(float_address), - RELOC_DISP32); + emit_d32_reloc(cbuf, + (int) (float_address - cbuf.code_end() - 4), + internal_word_Relocation::spec(float_address), + RELOC_DISP32); } @@ -1989,9 +2004,12 @@ // // NOTE: If the platform does not provide any short branch variants, then // this method should return false for offset 0. -bool Matcher::is_short_branch_offset(int offset) -{ - return -0x80 <= offset && offset < 0x80; +bool Matcher::is_short_branch_offset(int rule, int offset) { + // the short version of jmpConUCF2 contains multiple branches, + // making the reach slightly less + if (rule == jmpConUCF2_rule) + return (-126 <= offset && offset <= 125); + return (-128 <= offset && offset <= 127); } const bool Matcher::isSimpleConstant64(jlong value) { @@ -2046,12 +2064,13 @@ bool Matcher::can_be_java_arg(int reg) { return - reg == RDI_num || reg == RDI_H_num || - reg == RSI_num || reg == RSI_H_num || + reg == RDI_num || reg == RDI_H_num || + reg == RSI_num || reg == RSI_H_num || reg == RDX_num || reg == RDX_H_num || - reg == RCX_num || reg == RCX_H_num || - reg == R8_num || reg == R8_H_num || - reg == R9_num || reg == R9_H_num || + reg == RCX_num || reg == RCX_H_num || + reg == R8_num || reg == R8_H_num || + reg == R9_num || reg == R9_H_num || + reg == R12_num || reg == R12_H_num || reg == XMM0_num || reg == XMM0_H_num || reg == XMM1_num || reg == XMM1_H_num || reg == XMM2_num || reg == XMM2_H_num || @@ -2087,6 +2106,17 @@ return LONG_RDX_REG_mask; } +static Address build_address(int b, int i, int s, int d) { + Register index = as_Register(i); + Address::ScaleFactor scale = (Address::ScaleFactor)s; + if (index == rsp) { + index = noreg; + scale = Address::no_scale; + } + Address addr(as_Register(b), index, scale, d); + return addr; +} + %} //----------ENCODING BLOCK----------------------------------------------------- @@ -2125,19 +2155,19 @@ // automagically // Emit primary opcode - enc_class OpcP + enc_class OpcP %{ emit_opcode(cbuf, $primary); %} // Emit secondary opcode - enc_class OpcS + enc_class OpcS %{ emit_opcode(cbuf, $secondary); %} // Emit tertiary opcode - enc_class OpcT + enc_class OpcT %{ emit_opcode(cbuf, $tertiary); %} @@ -2170,7 +2200,7 @@ emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); %} - enc_class cmpfp_fixup() + enc_class cmpfp_fixup() %{ // jnp,s exit emit_opcode(cbuf, 0x7B); @@ -2243,7 +2273,7 @@ // rdx: remainder (= rax irem reg) 0 // // Code sequnce: - // + // // 0: 3d 00 00 00 80 cmp $0x80000000,%eax // 5: 75 07/08 jne e // 7: 33 d2 xor %edx,%edx @@ -2252,7 +2282,7 @@ // 9: 83 f9 ff cmp $0xffffffffffffffff,$div // c: 74 03/04 je 11 // 000000000000000e : - // e: 99 cltd + // e: 99 cltd // [div >= 8 -> offset + 1] // [REX_B] // f: f7 f9 idiv $div @@ -2260,7 +2290,7 @@ // cmp $0x80000000,%eax emit_opcode(cbuf, 0x3d); - emit_d8(cbuf, 0x00); + emit_d8(cbuf, 0x00); emit_d8(cbuf, 0x00); emit_d8(cbuf, 0x00); emit_d8(cbuf, 0x80); @@ -2286,7 +2316,7 @@ emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04); // - // cltd + // cltd emit_opcode(cbuf, 0x99); // idivl (note: must be emitted by the user of this rule) @@ -2307,16 +2337,16 @@ // rdx: remainder (= rax irem reg) 0 // // Code sequnce: - // + // // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx - // 7: 00 00 80 + // 7: 00 00 80 // a: 48 39 d0 cmp %rdx,%rax // d: 75 08 jne 17 // f: 33 d2 xor %edx,%edx // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div // 15: 74 05 je 1c // 0000000000000017 : - // 17: 48 99 cqto + // 17: 48 99 cqto // 19: 48 f7 f9 idiv $div // 000000000000001c : @@ -2356,7 +2386,7 @@ emit_d8(cbuf, 0x05); // - // cqto + // cqto emit_opcode(cbuf, Assembler::REX_W); emit_opcode(cbuf, 0x99); @@ -2365,7 +2395,7 @@ %} // Opcde enc_class for 8/32 bit immediate instructions with sign-extension - enc_class OpcSE(immI imm) + enc_class OpcSE(immI imm) %{ // Emit primary opcode and set sign-extend bit // Check for 8-bit immediate, and set sign extend bit in opcode @@ -2438,7 +2468,7 @@ %} enc_class LblShort(label labl) - %{ + %{ // JMP, CALL Label* l = $labl$$label; int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; @@ -2545,32 +2575,47 @@ Register Rrax = as_Register(RAX_enc); // super class Register Rrcx = as_Register(RCX_enc); // killed Register Rrsi = as_Register(RSI_enc); // sub class - Label hit, miss; + Label hit, miss, cmiss; MacroAssembler _masm(&cbuf); // Compare super with sub directly, since super is not in its own SSA. // The compiler used to emit this test, but we fold it in here, // to allow platform-specific tweaking on sparc. - __ cmpq(Rrax, Rrsi); + __ cmpptr(Rrax, Rrsi); __ jcc(Assembler::equal, hit); #ifndef PRODUCT __ lea(Rrcx, ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); __ incrementl(Address(Rrcx, 0)); #endif //PRODUCT - __ movq(Rrdi, Address(Rrsi, + __ movptr(Rrdi, Address(Rrsi, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); __ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes())); - __ addq(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); - __ repne_scan(); - __ jcc(Assembler::notEqual, miss); - __ movq(Address(Rrsi, - sizeof(oopDesc) + - Klass::secondary_super_cache_offset_in_bytes()), - Rrax); + __ addptr(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + if (UseCompressedOops) { + __ encode_heap_oop(Rrax); + __ repne_scanl(); + __ jcc(Assembler::notEqual, cmiss); + __ decode_heap_oop(Rrax); + __ movptr(Address(Rrsi, + sizeof(oopDesc) + + Klass::secondary_super_cache_offset_in_bytes()), + Rrax); + __ jmp(hit); + __ bind(cmiss); + __ decode_heap_oop(Rrax); + __ jmp(miss); + } else { + __ repne_scan(); + __ jcc(Assembler::notEqual, miss); + __ movptr(Address(Rrsi, + sizeof(oopDesc) + + Klass::secondary_super_cache_offset_in_bytes()), + Rrax); + } __ bind(hit); if ($primary) { - __ xorq(Rrdi, Rrdi); + __ xorptr(Rrdi, Rrdi); } __ bind(miss); %} @@ -2596,7 +2641,7 @@ cbuf.set_inst_mark(); $$$emit8$primary; - if (!_method) { + if (!_method) { emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), runtime_call_Relocation::spec(), @@ -2604,9 +2649,9 @@ } else if (_optimized_virtual) { emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), - opt_virtual_call_Relocation::spec(), + opt_virtual_call_Relocation::spec(), RELOC_DISP32); - } else { + } else { emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), static_call_Relocation::spec(), @@ -2614,7 +2659,7 @@ } if (_method) { // Emit stub for static call - emit_java_to_interp(cbuf); + emit_java_to_interp(cbuf); } %} @@ -2629,7 +2674,7 @@ // movq rax, -1 emit_opcode(cbuf, Assembler::REX_W); emit_opcode(cbuf, 0xB8 | RAX_enc); - emit_d64_reloc(cbuf, + emit_d64_reloc(cbuf, (int64_t) Universe::non_oop_word(), oop_Relocation::spec_for_immediate(), RELOC_IMM64); address virtual_call_oop_addr = cbuf.inst_mark(); @@ -2637,7 +2682,7 @@ // who we intended to call. cbuf.set_inst_mark(); $$$emit8$primary; - emit_d32_reloc(cbuf, + emit_d32_reloc(cbuf, (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), virtual_call_Relocation::spec(virtual_call_oop_addr), RELOC_DISP32); @@ -2838,7 +2883,7 @@ } dstenc -= 8; } - + emit_opcode(cbuf, 0x8B); emit_rm(cbuf, 0x3, dstenc, srcenc); %} @@ -2847,7 +2892,7 @@ %{ int srcenc = $src$$reg; int dstenc = $dst$$reg; - + if (dstenc != srcenc) { if (dstenc < 8) { if (srcenc < 8) { @@ -3174,7 +3219,7 @@ } %} - enc_class reg_mem(rRegI ereg, memory mem) + enc_class reg_mem(rRegI ereg, memory mem) %{ // High registers handle in encode_RegMem int reg = $ereg$$reg; @@ -3323,7 +3368,7 @@ int src1enc = $src1$$reg; int src2enc = $src2$$reg; int dstenc = $dst$$reg; - + // cmpq $src1, $src2 if (src1enc < 8) { if (src2enc < 8) { @@ -3395,7 +3440,7 @@ // subq rsp,#8 emit_opcode(cbuf, Assembler::REX_W); - emit_opcode(cbuf, 0x83); + emit_opcode(cbuf, 0x83); emit_rm(cbuf, 0x3, 0x5, RSP_enc); emit_d8(cbuf, 0x8); @@ -3462,15 +3507,15 @@ // obj: object to lock // box: box address (header location) -- killed // tmp: rax -- killed - // scr: rbx -- killed + // scr: rbx -- killed // - // What follows is a direct transliteration of fast_lock() and fast_unlock() - // from i486.ad. See that file for comments. + // What follows is a direct transliteration of fast_lock() and fast_unlock() + // from i486.ad. See that file for comments. // TODO: where possible switch from movq (r, 0) to movl(r,0) and - // use the shorter encoding. (Movl clears the high-order 32-bits). - + // use the shorter encoding. (Movl clears the high-order 32-bits). + - enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr) + enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr) %{ Register objReg = as_Register((int)$obj$$reg); Register boxReg = as_Register((int)$box$$reg); @@ -3479,179 +3524,183 @@ MacroAssembler masm(&cbuf); // Verify uniqueness of register assignments -- necessary but not sufficient - assert (objReg != boxReg && objReg != tmpReg && - objReg != scrReg && tmpReg != scrReg, "invariant") ; + assert (objReg != boxReg && objReg != tmpReg && + objReg != scrReg && tmpReg != scrReg, "invariant") ; if (_counters != NULL) { masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr())); } - if (EmitSync & 1) { - masm.movptr (Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ; - masm.cmpq (rsp, 0) ; + if (EmitSync & 1) { + // Without cast to int32_t a movptr will destroy r10 which is typically obj + masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; + masm.cmpptr(rsp, (int32_t)NULL_WORD) ; } else - if (EmitSync & 2) { + if (EmitSync & 2) { Label DONE_LABEL; if (UseBiasedLocking) { // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument. masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters); } - masm.movl(tmpReg, 0x1); - masm.orq(tmpReg, Address(objReg, 0)); - masm.movq(Address(boxReg, 0), tmpReg); + // QQQ was movl... + masm.movptr(tmpReg, 0x1); + masm.orptr(tmpReg, Address(objReg, 0)); + masm.movptr(Address(boxReg, 0), tmpReg); if (os::is_MP()) { masm.lock(); } - masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg + masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg masm.jcc(Assembler::equal, DONE_LABEL); - + // Recursive locking - masm.subq(tmpReg, rsp); - masm.andq(tmpReg, 7 - os::vm_page_size()); - masm.movq(Address(boxReg, 0), tmpReg); + masm.subptr(tmpReg, rsp); + masm.andptr(tmpReg, 7 - os::vm_page_size()); + masm.movptr(Address(boxReg, 0), tmpReg); - masm.bind(DONE_LABEL); + masm.bind(DONE_LABEL); masm.nop(); // avoid branch to branch - } else { - Label DONE_LABEL, IsInflated, Egress; + } else { + Label DONE_LABEL, IsInflated, Egress; - masm.movq (tmpReg, Address(objReg, 0)) ; - masm.testq (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased + masm.movptr(tmpReg, Address(objReg, 0)) ; + masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased masm.jcc (Assembler::notZero, IsInflated) ; // it's stack-locked, biased or neutral - // TODO: optimize markword triage order to reduce the number of - // conditional branches in the most common cases. + // TODO: optimize markword triage order to reduce the number of + // conditional branches in the most common cases. // Beware -- there's a subtle invariant that fetch of the markword - // at [FETCH], below, will never observe a biased encoding (*101b). + // at [FETCH], below, will never observe a biased encoding (*101b). // If this invariant is not held we'll suffer exclusion (safety) failure. - - if (UseBiasedLocking) { + + if (UseBiasedLocking && !UseOptoBiasInlining) { masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters); - masm.movq (tmpReg, Address(objReg, 0)) ; // [FETCH] + masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH] } - masm.orq (tmpReg, 1) ; - masm.movq (Address(boxReg, 0), tmpReg) ; + // was q will it destroy high? + masm.orl (tmpReg, 1) ; + masm.movptr(Address(boxReg, 0), tmpReg) ; if (os::is_MP()) { masm.lock(); } - masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg + masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg if (_counters != NULL) { masm.cond_inc32(Assembler::equal, ExternalAddress((address) _counters->fast_path_entry_count_addr())); } masm.jcc (Assembler::equal, DONE_LABEL); - + // Recursive locking - masm.subq (tmpReg, rsp); - masm.andq (tmpReg, 7 - os::vm_page_size()); - masm.movq (Address(boxReg, 0), tmpReg); + masm.subptr(tmpReg, rsp); + masm.andptr(tmpReg, 7 - os::vm_page_size()); + masm.movptr(Address(boxReg, 0), tmpReg); if (_counters != NULL) { masm.cond_inc32(Assembler::equal, ExternalAddress((address) _counters->fast_path_entry_count_addr())); } - masm.jmp (DONE_LABEL) ; + masm.jmp (DONE_LABEL) ; - masm.bind (IsInflated) ; + masm.bind (IsInflated) ; // It's inflated - + // TODO: someday avoid the ST-before-CAS penalty by - // relocating (deferring) the following ST. + // relocating (deferring) the following ST. // We should also think about trying a CAS without having // fetched _owner. If the CAS is successful we may - // avoid an RTO->RTS upgrade on the $line. - masm.movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ; - - masm.movq (boxReg, tmpReg) ; - masm.movq (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - masm.testq (tmpReg, tmpReg) ; - masm.jcc (Assembler::notZero, DONE_LABEL) ; + // avoid an RTO->RTS upgrade on the $line. + // Without cast to int32_t a movptr will destroy r10 which is typically obj + masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; + + masm.mov (boxReg, tmpReg) ; + masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.testptr(tmpReg, tmpReg) ; + masm.jcc (Assembler::notZero, DONE_LABEL) ; // It's inflated and appears unlocked if (os::is_MP()) { masm.lock(); } - masm.cmpxchgq(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // Intentional fall-through into DONE_LABEL ... - - masm.bind (DONE_LABEL) ; + + masm.bind (DONE_LABEL) ; masm.nop () ; // avoid jmp to jmp } %} - // obj: object to unlock + // obj: object to unlock // box: box address (displaced header location), killed // RBX: killed tmp; cannot be obj nor box enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp) %{ - + Register objReg = as_Register($obj$$reg); Register boxReg = as_Register($box$$reg); Register tmpReg = as_Register($tmp$$reg); MacroAssembler masm(&cbuf); if (EmitSync & 4) { - masm.cmpq (rsp, 0) ; + masm.cmpptr(rsp, 0) ; } else - if (EmitSync & 8) { + if (EmitSync & 8) { Label DONE_LABEL; if (UseBiasedLocking) { masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); } - // Check whether the displaced header is 0 + // Check whether the displaced header is 0 //(=> recursive unlock) - masm.movq(tmpReg, Address(boxReg, 0)); - masm.testq(tmpReg, tmpReg); + masm.movptr(tmpReg, Address(boxReg, 0)); + masm.testptr(tmpReg, tmpReg); masm.jcc(Assembler::zero, DONE_LABEL); // If not recursive lock, reset the header to displaced header if (os::is_MP()) { masm.lock(); } - masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box + masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box masm.bind(DONE_LABEL); masm.nop(); // avoid branch to branch - } else { - Label DONE_LABEL, Stacked, CheckSucc ; + } else { + Label DONE_LABEL, Stacked, CheckSucc ; - if (UseBiasedLocking) { + if (UseBiasedLocking && !UseOptoBiasInlining) { masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); } - masm.movq (tmpReg, Address(objReg, 0)) ; - masm.cmpq (Address(boxReg, 0), (int)NULL_WORD) ; + masm.movptr(tmpReg, Address(objReg, 0)) ; + masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ; masm.jcc (Assembler::zero, DONE_LABEL) ; - masm.testq (tmpReg, 0x02) ; + masm.testl (tmpReg, 0x02) ; masm.jcc (Assembler::zero, Stacked) ; // It's inflated - masm.movq (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - masm.xorq (boxReg, r15_thread) ; - masm.orq (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; + masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.xorptr(boxReg, r15_thread) ; + masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; masm.jcc (Assembler::notZero, DONE_LABEL) ; - masm.movq (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; - masm.orq (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; + masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; + masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; masm.jcc (Assembler::notZero, CheckSucc) ; - masm.mov64 (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ; + masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ; masm.jmp (DONE_LABEL) ; if ((EmitSync & 65536) == 0) { Label LSuccess, LGoSlowPath ; masm.bind (CheckSucc) ; - masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ; + masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ; masm.jcc (Assembler::zero, LGoSlowPath) ; // I'd much rather use lock:andl m->_owner, 0 as it's faster than the // the explicit ST;MEMBAR combination, but masm doesn't currently support // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc // are all faster when the write buffer is populated. - masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ; + masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ; if (os::is_MP()) { - masm.lock () ; masm.addq (Address(rsp, 0), 0) ; + masm.lock () ; masm.addl (Address(rsp, 0), 0) ; } - masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ; + masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ; masm.jcc (Assembler::notZero, LSuccess) ; - masm.movptr (boxReg, (int)NULL_WORD) ; // box is really EAX + masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX if (os::is_MP()) { masm.lock(); } - masm.cmpxchgq (r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); masm.jcc (Assembler::notEqual, LSuccess) ; // Intentional fall-through into slow-path @@ -3665,15 +3714,15 @@ } masm.bind (Stacked) ; - masm.movq (tmpReg, Address (boxReg, 0)) ; // re-fetch + masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch if (os::is_MP()) { masm.lock(); } - masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box + masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box - if (EmitSync & 65536) { - masm.bind (CheckSucc) ; + if (EmitSync & 65536) { + masm.bind (CheckSucc) ; } masm.bind(DONE_LABEL); - if (EmitSync & 32768) { + if (EmitSync & 32768) { masm.nop(); // avoid branch to branch } } @@ -3681,9 +3730,9 @@ enc_class enc_String_Compare() %{ - Label RCX_GOOD_LABEL, LENGTH_DIFF_LABEL, - POP_LABEL, DONE_LABEL, CONT_LABEL, - WHILE_HEAD_LABEL; + Label RCX_GOOD_LABEL, LENGTH_DIFF_LABEL, + POP_LABEL, DONE_LABEL, CONT_LABEL, + WHILE_HEAD_LABEL; MacroAssembler masm(&cbuf); // Get the first character position in both strings @@ -3693,12 +3742,12 @@ int count_offset = java_lang_String::count_offset_in_bytes(); int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); - masm.movq(rax, Address(rsi, value_offset)); + masm.load_heap_oop(rax, Address(rsi, value_offset)); masm.movl(rcx, Address(rsi, offset_offset)); - masm.leaq(rax, Address(rax, rcx, Address::times_2, base_offset)); - masm.movq(rbx, Address(rdi, value_offset)); + masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset)); + masm.load_heap_oop(rbx, Address(rdi, value_offset)); masm.movl(rcx, Address(rdi, offset_offset)); - masm.leaq(rbx, Address(rbx, rcx, Address::times_2, base_offset)); + masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset)); // Compute the minimum of the string lengths(rsi) and the // difference of the string lengths (stack) @@ -3707,18 +3756,18 @@ masm.movl(rsi, Address(rsi, count_offset)); masm.movl(rcx, rdi); masm.subl(rdi, rsi); - masm.pushq(rdi); - masm.cmovl(Assembler::lessEqual, rsi, rcx); - + masm.push(rdi); + masm.cmov(Assembler::lessEqual, rsi, rcx); + // Is the minimum length zero? masm.bind(RCX_GOOD_LABEL); masm.testl(rsi, rsi); masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL); - + // Load first characters masm.load_unsigned_word(rcx, Address(rbx, 0)); masm.load_unsigned_word(rdi, Address(rax, 0)); - + // Compare first characters masm.subl(rcx, rdi); masm.jcc(Assembler::notZero, POP_LABEL); @@ -3729,9 +3778,9 @@ // Check after comparing first character to see if strings are equivalent Label LSkip2; // Check if the strings start at same location - masm.cmpq(rbx, rax); + masm.cmpptr(rbx, rax); masm.jcc(Assembler::notEqual, LSkip2); - + // Check if the length difference is zero (from stack) masm.cmpl(Address(rsp, 0), 0x0); masm.jcc(Assembler::equal, LENGTH_DIFF_LABEL); @@ -3741,9 +3790,9 @@ } // Shift RAX and RBX to the end of the arrays, negate min - masm.leaq(rax, Address(rax, rsi, Address::times_2, 2)); - masm.leaq(rbx, Address(rbx, rsi, Address::times_2, 2)); - masm.negq(rsi); + masm.lea(rax, Address(rax, rsi, Address::times_2, 2)); + masm.lea(rbx, Address(rbx, rsi, Address::times_2, 2)); + masm.negptr(rsi); // Compare the rest of the characters masm.bind(WHILE_HEAD_LABEL); @@ -3751,28 +3800,100 @@ masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0)); masm.subl(rcx, rdi); masm.jcc(Assembler::notZero, POP_LABEL); - masm.incrementq(rsi); + masm.increment(rsi); masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL); - + // Strings are equal up to min length. Return the length difference. masm.bind(LENGTH_DIFF_LABEL); - masm.popq(rcx); + masm.pop(rcx); masm.jmp(DONE_LABEL); // Discard the stored length difference masm.bind(POP_LABEL); - masm.addq(rsp, 8); + masm.addptr(rsp, 8); // That's it masm.bind(DONE_LABEL); %} + enc_class enc_Array_Equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI tmp1, rbx_RegI tmp2, rcx_RegI result) %{ + Label TRUE_LABEL, FALSE_LABEL, DONE_LABEL, COMPARE_LOOP_HDR, COMPARE_LOOP; + MacroAssembler masm(&cbuf); + + Register ary1Reg = as_Register($ary1$$reg); + Register ary2Reg = as_Register($ary2$$reg); + Register tmp1Reg = as_Register($tmp1$$reg); + Register tmp2Reg = as_Register($tmp2$$reg); + Register resultReg = as_Register($result$$reg); + + int length_offset = arrayOopDesc::length_offset_in_bytes(); + int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); + + // Check the input args + masm.cmpq(ary1Reg, ary2Reg); + masm.jcc(Assembler::equal, TRUE_LABEL); + masm.testq(ary1Reg, ary1Reg); + masm.jcc(Assembler::zero, FALSE_LABEL); + masm.testq(ary2Reg, ary2Reg); + masm.jcc(Assembler::zero, FALSE_LABEL); + + // Check the lengths + masm.movl(tmp2Reg, Address(ary1Reg, length_offset)); + masm.movl(resultReg, Address(ary2Reg, length_offset)); + masm.cmpl(tmp2Reg, resultReg); + masm.jcc(Assembler::notEqual, FALSE_LABEL); + masm.testl(resultReg, resultReg); + masm.jcc(Assembler::zero, TRUE_LABEL); + + // Get the number of 4 byte vectors to compare + masm.shrl(resultReg, 1); + + // Check for odd-length arrays + masm.andl(tmp2Reg, 1); + masm.testl(tmp2Reg, tmp2Reg); + masm.jcc(Assembler::zero, COMPARE_LOOP_HDR); + + // Compare 2-byte "tail" at end of arrays + masm.load_unsigned_word(tmp1Reg, Address(ary1Reg, resultReg, Address::times_4, base_offset)); + masm.load_unsigned_word(tmp2Reg, Address(ary2Reg, resultReg, Address::times_4, base_offset)); + masm.cmpl(tmp1Reg, tmp2Reg); + masm.jcc(Assembler::notEqual, FALSE_LABEL); + masm.testl(resultReg, resultReg); + masm.jcc(Assembler::zero, TRUE_LABEL); + + // Setup compare loop + masm.bind(COMPARE_LOOP_HDR); + // Shift tmp1Reg and tmp2Reg to the last 4-byte boundary of the arrays + masm.leaq(tmp1Reg, Address(ary1Reg, resultReg, Address::times_4, base_offset)); + masm.leaq(tmp2Reg, Address(ary2Reg, resultReg, Address::times_4, base_offset)); + masm.negq(resultReg); + + // 4-byte-wide compare loop + masm.bind(COMPARE_LOOP); + masm.movl(ary1Reg, Address(tmp1Reg, resultReg, Address::times_4, 0)); + masm.movl(ary2Reg, Address(tmp2Reg, resultReg, Address::times_4, 0)); + masm.cmpl(ary1Reg, ary2Reg); + masm.jcc(Assembler::notEqual, FALSE_LABEL); + masm.incrementq(resultReg); + masm.jcc(Assembler::notZero, COMPARE_LOOP); + + masm.bind(TRUE_LABEL); + masm.movl(resultReg, 1); // return true + masm.jmp(DONE_LABEL); + + masm.bind(FALSE_LABEL); + masm.xorl(resultReg, resultReg); // return false + + // That's it + masm.bind(DONE_LABEL); + %} + enc_class enc_rethrow() %{ cbuf.set_inst_mark(); emit_opcode(cbuf, 0xE9); // jmp entry - emit_d32_reloc(cbuf, - (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4), + emit_d32_reloc(cbuf, + (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4), runtime_call_Relocation::spec(), RELOC_DISP32); %} @@ -3780,7 +3901,7 @@ enc_class absF_encoding(regF dst) %{ int dstenc = $dst$$reg; - address signmask_address = (address) StubRoutines::amd64::float_sign_mask(); + address signmask_address = (address) StubRoutines::x86::float_sign_mask(); cbuf.set_inst_mark(); if (dstenc >= 8) { @@ -3797,7 +3918,7 @@ enc_class absD_encoding(regD dst) %{ int dstenc = $dst$$reg; - address signmask_address = (address) StubRoutines::amd64::double_sign_mask(); + address signmask_address = (address) StubRoutines::x86::double_sign_mask(); cbuf.set_inst_mark(); emit_opcode(cbuf, 0x66); @@ -3815,7 +3936,7 @@ enc_class negF_encoding(regF dst) %{ int dstenc = $dst$$reg; - address signflip_address = (address) StubRoutines::amd64::float_sign_flip(); + address signflip_address = (address) StubRoutines::x86::float_sign_flip(); cbuf.set_inst_mark(); if (dstenc >= 8) { @@ -3832,7 +3953,7 @@ enc_class negD_encoding(regD dst) %{ int dstenc = $dst$$reg; - address signflip_address = (address) StubRoutines::amd64::double_sign_flip(); + address signflip_address = (address) StubRoutines::x86::double_sign_flip(); cbuf.set_inst_mark(); emit_opcode(cbuf, 0x66); @@ -3889,8 +4010,8 @@ cbuf.set_inst_mark(); emit_opcode(cbuf, 0xE8); emit_d32_reloc(cbuf, - (int) - (StubRoutines::amd64::f2i_fixup() - cbuf.code_end() - 4), + (int) + (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4), runtime_call_Relocation::spec(), RELOC_DISP32); @@ -3907,7 +4028,7 @@ %{ int dstenc = $dst$$reg; int srcenc = $src$$reg; - address const_address = (address) StubRoutines::amd64::double_sign_flip(); + address const_address = (address) StubRoutines::x86::double_sign_flip(); // cmpq $dst, [0x8000000000000000] cbuf.set_inst_mark(); @@ -3947,8 +4068,8 @@ cbuf.set_inst_mark(); emit_opcode(cbuf, 0xE8); emit_d32_reloc(cbuf, - (int) - (StubRoutines::amd64::f2l_fixup() - cbuf.code_end() - 4), + (int) + (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4), runtime_call_Relocation::spec(), RELOC_DISP32); @@ -4003,8 +4124,8 @@ cbuf.set_inst_mark(); emit_opcode(cbuf, 0xE8); emit_d32_reloc(cbuf, - (int) - (StubRoutines::amd64::d2i_fixup() - cbuf.code_end() - 4), + (int) + (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4), runtime_call_Relocation::spec(), RELOC_DISP32); @@ -4021,7 +4142,7 @@ %{ int dstenc = $dst$$reg; int srcenc = $src$$reg; - address const_address = (address) StubRoutines::amd64::double_sign_flip(); + address const_address = (address) StubRoutines::x86::double_sign_flip(); // cmpq $dst, [0x8000000000000000] cbuf.set_inst_mark(); @@ -4061,8 +4182,8 @@ cbuf.set_inst_mark(); emit_opcode(cbuf, 0xE8); emit_d32_reloc(cbuf, - (int) - (StubRoutines::amd64::d2l_fixup() - cbuf.code_end() - 4), + (int) + (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4), runtime_call_Relocation::spec(), RELOC_DISP32); @@ -4081,7 +4202,7 @@ // emits code don't forget to the remove the "size(0)" line in // membar_acquire() // MacroAssembler masm(&cbuf); - // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore | + // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore | // Assembler::LoadLoad)); %} @@ -4115,11 +4236,12 @@ emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 // cbuf.inst_mark() is beginning of instruction emit_d32_reloc(cbuf, os::get_polling_page()); -// relocInfo::poll_type, +// relocInfo::poll_type, %} %} + //----------FRAME-------------------------------------------------------------- // Definition of frame structure and management information. // @@ -4127,10 +4249,10 @@ // | (to get allocators register number // G Owned by | | v add OptoReg::stack0()) // r CALLER | | -// o | +--------+ pad to even-align allocators stack-slot +// o | +--------+ pad to even-align allocators stack-slot // w V | pad0 | numbers; owned by CALLER // t -----------+--------+----> Matcher::_in_arg_limit, unaligned -// h ^ | in | 5 +// h ^ | in | 5 // | | args | 4 Holes in incoming args owned by SELF // | | | | 3 // | | +--------+ @@ -4143,14 +4265,14 @@ // | | pad2 | 2 pad to align old SP // | +--------+ 1 // | | locks | 0 -// | +--------+----> OptoReg::stack0(), even aligned +// | +--------+----> OptoReg::stack0(), even aligned // | | pad1 | 11 pad to align new SP // | +--------+ // | | | 10 // | | spills | 9 spills // V | | 8 (pad0 slot for callee) // -----------+--------+----> Matcher::_out_arg_limit, unaligned -// ^ | out | 7 +// ^ | out | 7 // | | args | 6 Holes in outgoing args owned by CALLEE // Owned by +--------+ // CALLEE | new out| 6 Empty on Intel, window on Sparc @@ -4158,17 +4280,17 @@ // | SP-+--------+----> Matcher::_new_SP, even aligned // | | | // -// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is +// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is // known from SELF's arguments and the Java calling convention. // Region 6-7 is determined per call site. -// Note 2: If the calling convention leaves holes in the incoming argument +// Note 2: If the calling convention leaves holes in the incoming argument // area, those holes are owned by SELF. Holes in the outgoing area // are owned by the CALLEE. Holes should not be nessecary in the // incoming area, as the Java calling convention is completely under // the control of the AD file. Doubles can be sorted and packed to // avoid holes. Holes in the outgoing arguments may be nessecary for // varargs C calling conventions. -// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is +// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is // even aligned with pad0 as needed. // Region 6 is even aligned. Region 6-7 is NOT even aligned; // region 6-11 is even aligned; it may be padded out more so that @@ -4182,7 +4304,7 @@ // What direction does stack grow in (assumed to be same for C & Java) stack_direction(TOWARDS_LOW); - // These three registers define part of the calling convention + // These three registers define part of the calling convention // between compiled code and the interpreter. inline_cache_reg(RAX); // Inline Cache Register interpreter_method_oop_reg(RBX); // Method Oop Register when @@ -4190,7 +4312,7 @@ // Optional: name the operand used by cisc-spilling to access // [stack_pointer + offset] - cisc_spilling_operand_name(indOffset32); + cisc_spilling_operand_name(indOffset32); // Number of stack slots consumed by locking an object sync_stack_slots(2); @@ -4198,7 +4320,7 @@ // Compiled code's Frame Pointer frame_pointer(RSP); - // Interpreter stores its frame pointer in a register which is + // Interpreter stores its frame pointer in a register which is // stored to the stack by I2CAdaptors. // I2CAdaptors convert from interpreted java to compiled java. interpreter_frame_pointer(RBP); @@ -4206,7 +4328,7 @@ // Stack alignment requirement stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) - // Number of stack slots between incoming argument block and the start of + // Number of stack slots between incoming argument block and the start of // a new frame. The PROLOG must add this many slots to the stack. The // EPILOG must remove this many slots. amd64 needs two slots for // return address. @@ -4253,8 +4375,9 @@ "only return normal values"); static const int lo[Op_RegL + 1] = { - 0, - 0, + 0, + 0, + RAX_num, // Op_RegN RAX_num, // Op_RegI RAX_num, // Op_RegP XMM0_num, // Op_RegF @@ -4264,13 +4387,14 @@ static const int hi[Op_RegL + 1] = { 0, 0, + OptoReg::Bad, // Op_RegN OptoReg::Bad, // Op_RegI RAX_H_num, // Op_RegP OptoReg::Bad, // Op_RegF XMM0_H_num, // Op_RegD RAX_H_num // Op_RegL }; - + assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type"); return OptoRegPair(hi[ideal_reg], lo[ideal_reg]); %} %} @@ -4417,9 +4541,25 @@ interface(CONST_INTER); %} -// Unsigned 31-bit Pointer Immediate -// Can be used in both 32-bit signed and 32-bit unsigned insns. -// Works for nulls and markOops; not for relocatable (oop) pointers. +// Pointer Immediate +operand immN() %{ + match(ConN); + + op_cost(10); + format %{ %} + interface(CONST_INTER); +%} + +// NULL Pointer Immediate +operand immN0() %{ + predicate(n->get_narrowcon() == 0); + match(ConN); + + op_cost(5); + format %{ %} + interface(CONST_INTER); +%} + operand immP31() %{ predicate(!n->as_Type()->type()->isa_oopptr() @@ -4431,6 +4571,7 @@ interface(CONST_INTER); %} + // Long Immediate operand immL() %{ @@ -4643,7 +4784,7 @@ // Register Operands // Integer Register -operand rRegI() +operand rRegI() %{ constraint(ALLOC_IN_RC(int_reg)); match(RegI); @@ -4767,6 +4908,23 @@ interface(REG_INTER); %} + +operand r12RegL() %{ + constraint(ALLOC_IN_RC(long_r12_reg)); + match(RegL); + + format %{ %} + interface(REG_INTER); +%} + +operand rRegN() %{ + constraint(ALLOC_IN_RC(int_reg)); + match(RegN); + + format %{ %} + interface(REG_INTER); +%} + // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP? // Answer: Operand match rules govern the DFA as it processes instruction inputs. // It's fine for an instruction input which expects rRegP to match a r15_RegP. @@ -4822,6 +4980,18 @@ interface(REG_INTER); %} +// Special Registers +// Return a compressed pointer value +operand rax_RegN() +%{ + constraint(ALLOC_IN_RC(int_rax_reg)); + match(RegN); + match(rRegN); + + format %{ %} + interface(REG_INTER); +%} + // Used in AtomicAdd operand rbx_RegP() %{ @@ -4912,7 +5082,7 @@ constraint(ALLOC_IN_RC(long_no_rcx_reg)); match(RegL); match(rRegL); - + format %{ %} interface(REG_INTER); %} @@ -4967,6 +5137,15 @@ interface(REG_INTER); %} +operand rFlagsRegUCF() %{ + constraint(ALLOC_IN_RC(int_flags)); + match(RegFlags); + predicate(false); + + format %{ "RFLAGS_U_CF" %} + interface(REG_INTER); +%} + // Float register operands operand regF() %{ @@ -5080,7 +5259,7 @@ %} %} -// Indirect Memory Times Scale Plus Index Register +// Indirect Memory Times Scale Plus Index Register operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale) %{ constraint(ALLOC_IN_RC(ptr_reg)); @@ -5112,6 +5291,21 @@ %} %} +// Indirect Narrow Oop Plus Offset Operand +operand indNarrowOopOffset(rRegN src, immL32 off) %{ + constraint(ALLOC_IN_RC(ptr_reg)); + match(AddP (DecodeN src) off); + + op_cost(10); + format %{"[R12 + $src << 3 + $off] (compressed oop addressing)" %} + interface(MEMORY_INTER) %{ + base(0xc); // R12 + index($src); + scale(0x3); + disp($off); + %} +%} + // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale) %{ @@ -5133,7 +5327,7 @@ // Stack Slot Operand - This operand is used for loading and storing temporary // values on the stack where a match requires a value to // flow through memory. -operand stackSlotP(sRegP reg) +operand stackSlotP(sRegP reg) %{ constraint(ALLOC_IN_RC(stack_slots)); // No match rule because this operand is only generated in matching @@ -5223,12 +5417,12 @@ format %{ "" %} interface(COND_INTER) %{ - equal(0x4); - not_equal(0x5); - less(0xC); - greater_equal(0xD); - less_equal(0xE); - greater(0xF); + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0xC, "l"); + greater_equal(0xD, "ge"); + less_equal(0xE, "le"); + greater(0xF, "g"); %} %} @@ -5241,12 +5435,48 @@ format %{ "" %} interface(COND_INTER) %{ - equal(0x4); - not_equal(0x5); - less(0x2); - greater_equal(0x3); - less_equal(0x6); - greater(0x7); + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0x2, "b"); + greater_equal(0x3, "nb"); + less_equal(0x6, "be"); + greater(0x7, "nbe"); + %} +%} + + +// Floating comparisons that don't require any fixup for the unordered case +operand cmpOpUCF() %{ + match(Bool); + predicate(n->as_Bool()->_test._test == BoolTest::lt || + n->as_Bool()->_test._test == BoolTest::ge || + n->as_Bool()->_test._test == BoolTest::le || + n->as_Bool()->_test._test == BoolTest::gt); + format %{ "" %} + interface(COND_INTER) %{ + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0x2, "b"); + greater_equal(0x3, "nb"); + less_equal(0x6, "be"); + greater(0x7, "nbe"); + %} +%} + + +// Floating comparisons that can be fixed up with extra conditional jumps +operand cmpOpUCF2() %{ + match(Bool); + predicate(n->as_Bool()->_test._test == BoolTest::ne || + n->as_Bool()->_test._test == BoolTest::eq); + format %{ "" %} + interface(COND_INTER) %{ + equal(0x4, "e"); + not_equal(0x5, "ne"); + less(0x2, "b"); + greater_equal(0x3, "nb"); + less_equal(0x6, "be"); + greater(0x7, "nbe"); %} %} @@ -5258,8 +5488,9 @@ // multiple operand types with the same basic encoding and format. The classic // case of this is memory operands. -opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex, - indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset); +opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex, + indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset, + indNarrowOopOffset); //----------PIPELINE----------------------------------------------------------- // Rules which define the behavior of the target architectures pipeline. @@ -5283,11 +5514,11 @@ // Generic P2/P3 pipeline // 3 decoders, only D0 handles big operands; a "bundle" is the limit of // 3 instructions decoded per cycle. -// 2 load/store ops per cycle, 1 branch, 1 FPU, +// 2 load/store ops per cycle, 1 branch, 1 FPU, // 3 ALU op, only ALU0 handles mul instructions. -resources( D0, D1, D2, DECODE = D0 | D1 | D2, - MS0, MS1, MS2, MEM = MS0 | MS1 | MS2, - BR, FPU, +resources( D0, D1, D2, DECODE = D0 | D1 | D2, + MS0, MS1, MS2, MEM = MS0 | MS1 | MS2, + BR, FPU, ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2); //----------PIPELINE DESCRIPTION----------------------------------------------- @@ -5313,7 +5544,7 @@ single_instruction; dst : S4(write); dst : S3(read); - DECODE : S0; // any decoder + DECODE : S0; // any decoder ALU : S3; // any alu %} @@ -5323,7 +5554,7 @@ instruction_count(2); dst : S4(write); dst : S3(read); - DECODE : S0(2); // any 2 decoders + DECODE : S0(2); // any 2 decoders ALU : S3(2); // both alus %} @@ -5333,7 +5564,7 @@ single_instruction; dst : S4(write); dst : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only ALU : S3; // any alu %} @@ -5343,7 +5574,7 @@ instruction_count(2); dst : S4(write); dst : S3(read); - D0 : S0(2); // big decoder only; twice + D0 : S0(2); // big decoder only; twice ALU : S3(2); // any 2 alus %} @@ -5353,7 +5584,7 @@ single_instruction; dst : S4(write); src : S3(read); - DECODE : S0; // any decoder + DECODE : S0; // any decoder ALU : S3; // any alu %} @@ -5363,7 +5594,7 @@ instruction_count(2); dst : S4(write); src : S3(read); - DECODE : S0(2); // any 2 decoders + DECODE : S0(2); // any 2 decoders ALU : S3(2); // both alus %} @@ -5373,7 +5604,7 @@ single_instruction; dst : S4(write); src : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only ALU : S3; // any alu %} @@ -5383,7 +5614,7 @@ instruction_count(2); dst : S4(write); src : S3(read); - D0 : S0(2); // big decoder only; twice + D0 : S0(2); // big decoder only; twice ALU : S3(2); // both alus %} @@ -5393,7 +5624,7 @@ single_instruction; dst : S5(write); mem : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only ALU : S4; // any alu MEM : S3; // any mem %} @@ -5403,7 +5634,7 @@ %{ single_instruction; mem : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only MEM : S3; // any mem %} @@ -5413,7 +5644,7 @@ single_instruction; mem : S3(read); src : S5(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only ALU : S4; // any alu MEM : S3; %} @@ -5424,9 +5655,9 @@ // instruction_count(2); // mem : S3(read); // src : S5(read); -// D0 : S0(2); // big decoder only; twice +// D0 : S0(2); // big decoder only; twice // ALU : S4(2); // any 2 alus -// MEM : S3(2); // Both mems +// MEM : S3(2); // Both mems // %} // Integer Store to Memory @@ -5434,7 +5665,7 @@ %{ single_instruction; mem : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only ALU : S4; // any alu MEM : S3; %} @@ -5445,7 +5676,7 @@ single_instruction; dst : S4(write); src : S3(read); - D0 : S0; // Big decoder only + D0 : S0; // Big decoder only ALU0 : S3; // only alu0 %} @@ -5455,7 +5686,7 @@ single_instruction; dst : S5(write); mem : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only ALU0 : S4; // ALU0 only MEM : S3; // any mem %} @@ -5467,7 +5698,7 @@ cr : S4(write); src1 : S3(read); src2 : S3(read); - DECODE : S0; // any decoder + DECODE : S0; // any decoder ALU : S3; // any alu %} @@ -5477,7 +5708,7 @@ single_instruction; cr : S4(write); src1 : S3(read); - DECODE : S0; // any decoder + DECODE : S0; // any decoder ALU : S3; // any alu %} @@ -5488,7 +5719,7 @@ cr : S4(write); src1 : S3(read); src2 : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only ALU : S4; // any alu MEM : S3; %} @@ -5500,7 +5731,7 @@ y : S4(read); q : S3(read); p : S3(read); - DECODE : S0(4); // any decoder + DECODE : S0(4); // any decoder %} // Conditional move reg-reg @@ -5510,7 +5741,7 @@ dst : S4(write); src : S3(read); cr : S3(read); - DECODE : S0; // any decoder + DECODE : S0; // any decoder %} // Conditional move reg-mem @@ -5520,7 +5751,7 @@ dst : S4(write); src : S3(read); cr : S3(read); - DECODE : S0; // any decoder + DECODE : S0; // any decoder MEM : S3; %} @@ -5531,7 +5762,7 @@ dst : S4(write); src : S3(read); cr : S3(read); - DECODE : S0(2); // any 2 decoders + DECODE : S0(2); // any 2 decoders %} // XXX @@ -5542,7 +5773,7 @@ // dst : S4(write); // src : S3(read); // cr : S3(read); -// DECODE : S0; // any decoder +// DECODE : S0; // any decoder // %} // Float reg-reg operation @@ -5550,7 +5781,7 @@ %{ instruction_count(2); dst : S3(read); - DECODE : S0(2); // any 2 decoders + DECODE : S0(2); // any 2 decoders FPU : S3; %} @@ -5560,7 +5791,7 @@ instruction_count(2); dst : S4(write); src : S3(read); - DECODE : S0(2); // any 2 decoders + DECODE : S0(2); // any 2 decoders FPU : S3; %} @@ -5571,7 +5802,7 @@ dst : S4(write); src1 : S3(read); src2 : S3(read); - DECODE : S0(3); // any 3 decoders + DECODE : S0(3); // any 3 decoders FPU : S3(2); %} @@ -5583,7 +5814,7 @@ src1 : S3(read); src2 : S3(read); src3 : S3(read); - DECODE : S0(4); // any 3 decoders + DECODE : S0(4); // any 3 decoders FPU : S3(2); %} @@ -5595,7 +5826,7 @@ src1 : S3(read); src2 : S3(read); src3 : S3(read); - DECODE : S1(3); // any 3 decoders + DECODE : S1(3); // any 3 decoders D0 : S0; // Big decoder only FPU : S3(2); MEM : S3; @@ -5607,7 +5838,7 @@ instruction_count(2); dst : S5(write); mem : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only DECODE : S1; // any decoder for FPU POP FPU : S4; MEM : S3; // any mem @@ -5620,7 +5851,7 @@ dst : S5(write); src1 : S3(read); mem : S3(read); - D0 : S0; // big decoder only + D0 : S0; // big decoder only DECODE : S1(2); // any decoder for FPU POP FPU : S4; MEM : S3; // any mem @@ -5633,7 +5864,7 @@ src : S5(read); mem : S3(read); DECODE : S0; // any decoder for FPU PUSH - D0 : S1; // big decoder only + D0 : S1; // big decoder only FPU : S4; MEM : S3; // any mem %} @@ -5645,7 +5876,7 @@ src2 : S3(read); mem : S3(read); DECODE : S0(2); // any decoder for FPU PUSH - D0 : S1; // big decoder only + D0 : S1; // big decoder only FPU : S4; MEM : S3; // any mem %} @@ -5657,7 +5888,7 @@ src2 : S3(read); mem : S4(read); DECODE : S0; // any decoder for FPU PUSH - D0 : S0(2); // big decoder only + D0 : S0(2); // big decoder only FPU : S4; MEM : S3(2); // any mem %} @@ -5667,7 +5898,7 @@ instruction_count(2); src1 : S3(read); dst : S4(read); - D0 : S0(2); // big decoder only + D0 : S0(2); // big decoder only MEM : S3(2); // any mem %} @@ -5677,7 +5908,7 @@ src1 : S3(read); src2 : S3(read); dst : S4(read); - D0 : S0(3); // big decoder only + D0 : S0(3); // big decoder only FPU : S4; MEM : S3(3); // any mem %} @@ -5688,7 +5919,7 @@ src1 : S4(read); mem : S4(read); DECODE : S0; // any decoder for FPU PUSH - D0 : S0(2); // big decoder only + D0 : S0(2); // big decoder only FPU : S4; MEM : S3(2); // any mem %} @@ -5698,7 +5929,7 @@ %{ instruction_count(2); dst : S5(write); - D0 : S0; // big decoder only for the load + D0 : S0; // big decoder only for the load DECODE : S1; // any decoder for FPU POP FPU : S4; MEM : S3; // any mem @@ -5710,7 +5941,7 @@ instruction_count(3); dst : S5(write); src : S3(read); - D0 : S0; // big decoder only for the load + D0 : S0; // big decoder only for the load DECODE : S1(2); // any decoder for FPU POP FPU : S4; MEM : S3; // any mem @@ -5746,7 +5977,7 @@ %} // Generic big/slow expanded idiom -pipe_class pipe_slow() +pipe_class pipe_slow() %{ instruction_count(10); multiple_bundles; force_serialization; fixed_latency(100); @@ -5761,7 +5992,7 @@ %} // Define the class for the Nop node -define +define %{ MachNop = empty; %} @@ -5769,21 +6000,21 @@ %} //----------INSTRUCTIONS------------------------------------------------------- -// -// match -- States which machine-independent subtree may be replaced +// +// match -- States which machine-independent subtree may be replaced // by this instruction. // ins_cost -- The estimated cost of this instruction is used by instruction -// selection to identify a minimum cost tree of machine -// instructions that matches a tree of machine-independent +// selection to identify a minimum cost tree of machine +// instructions that matches a tree of machine-independent // instructions. // format -- A string providing the disassembly for this instruction. -// The value of an instruction's operand may be inserted +// The value of an instruction's operand may be inserted // by referring to it with a '$' prefix. -// opcode -- Three instruction opcodes may be provided. These are referred +// opcode -- Three instruction opcodes may be provided. These are referred // to within an encode class as $primary, $secondary, and $tertiary -// rrspectively. The primary opcode is commonly used to -// indicate the type of machine instruction, while secondary -// and tertiary are often used for prefix options or addressing +// rrspectively. The primary opcode is commonly used to +// indicate the type of machine instruction, while secondary +// and tertiary are often used for prefix options or addressing // modes. // ins_encode -- A list of encode classes with parameters. The encode class // name must have been defined in an 'enc_class' specification @@ -5866,7 +6097,7 @@ // %} // Load Char (16 bit UNsigned) -instruct loadC(rRegI dst, memory mem) +instruct loadC(rRegI dst, memory mem) %{ match(Set dst (LoadC mem)); @@ -5878,7 +6109,7 @@ %} // Load Char (16 bit UNsigned) into long -// instruct loadC2L(rRegL dst, memory mem) +// instruct loadC2L(rRegL dst, memory mem) // %{ // match(Set dst (ConvI2L (LoadC mem))); @@ -5937,6 +6168,22 @@ ins_pipe(ialu_reg_mem); // XXX %} +// Load Compressed Pointer +instruct loadN(rRegN dst, memory mem) +%{ + match(Set dst (LoadN mem)); + + ins_cost(125); // XXX + format %{ "movl $dst, $mem\t# compressed ptr" %} + ins_encode %{ + Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp); + Register dst = as_Register($dst$$reg); + __ movl(dst, addr); + %} + ins_pipe(ialu_reg_mem); // XXX +%} + + // Load Klass Pointer instruct loadKlass(rRegP dst, memory mem) %{ @@ -5949,6 +6196,21 @@ ins_pipe(ialu_reg_mem); // XXX %} +// Load narrow Klass Pointer +instruct loadNKlass(rRegN dst, memory mem) +%{ + match(Set dst (LoadNKlass mem)); + + ins_cost(125); // XXX + format %{ "movl $dst, $mem\t# compressed klass ptr" %} + ins_encode %{ + Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp); + Register dst = as_Register($dst$$reg); + __ movl(dst, addr); + %} + ins_pipe(ialu_reg_mem); // XXX +%} + // Load Float instruct loadF(regF dst, memory mem) %{ @@ -6032,7 +6294,7 @@ %} // Load Effective Address -instruct leaP8(rRegP dst, indOffset8 mem) +instruct leaP8(rRegP dst, indOffset8 mem) %{ match(Set dst mem); @@ -6111,7 +6373,7 @@ %{ match(Set dst src); effect(KILL cr); - + ins_cost(50); format %{ "xorl $dst, $dst\t# int" %} opcode(0x33); /* + rd */ @@ -6133,7 +6395,7 @@ %{ match(Set dst src); effect(KILL cr); - + ins_cost(50); format %{ "xorl $dst, $dst\t# long" %} opcode(0x33); /* + rd */ @@ -6174,7 +6436,7 @@ %{ match(Set dst src); effect(KILL cr); - + ins_cost(50); format %{ "xorl $dst, $dst\t# ptr" %} opcode(0x33); /* + rd */ @@ -6186,7 +6448,7 @@ %{ match(Set dst src); effect(KILL cr); - + ins_cost(60); format %{ "movl $dst, $src\t# ptr (positive 32-bit)" %} ins_encode(load_immP31(dst, src)); @@ -6203,6 +6465,34 @@ ins_pipe(pipe_slow); %} +instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{ + match(Set dst src); + effect(KILL cr); + format %{ "xorq $dst, $src\t# compressed NULL ptr" %} + ins_encode %{ + Register dst = $dst$$Register; + __ xorq(dst, dst); + %} + ins_pipe(ialu_reg); +%} + +instruct loadConN(rRegN dst, immN src) %{ + match(Set dst src); + + ins_cost(125); + format %{ "movl $dst, $src\t# compressed ptr" %} + ins_encode %{ + address con = (address)$src$$constant; + Register dst = $dst$$Register; + if (con == NULL) { + ShouldNotReachHere(); + } else { + __ set_narrow_oop(dst, (jobject)$src$$constant); + } + %} + ins_pipe(ialu_reg_fat); // XXX +%} + instruct loadConF0(regF dst, immF0 src) %{ match(Set dst src); @@ -6215,7 +6505,7 @@ %} // Use the same format since predicate() can not be used here. -instruct loadConD(regD dst, immD src) +instruct loadConD(regD dst, immD src) %{ match(Set dst src); ins_cost(125); @@ -6411,7 +6701,7 @@ %} // Store Integer -instruct storeI(memory mem, rRegI src) +instruct storeI(memory mem, rRegI src) %{ match(Set mem (StoreI mem src)); @@ -6458,6 +6748,21 @@ ins_pipe(ialu_mem_imm); %} +// Store Compressed Pointer +instruct storeN(memory mem, rRegN src) +%{ + match(Set mem (StoreN mem src)); + + ins_cost(125); // XXX + format %{ "movl $mem, $src\t# compressed ptr" %} + ins_encode %{ + Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp); + Register src = as_Register($src$$reg); + __ movl(addr, src); + %} + ins_pipe(ialu_mem_reg); +%} + // Store Integer Immediate instruct storeImmI(memory mem, immI src) %{ @@ -6504,7 +6809,7 @@ format %{ "movb $mem, $src\t# byte" %} opcode(0xC6); /* C6 /0 */ ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src)); - ins_pipe(ialu_mem_imm); + ins_pipe(ialu_mem_imm); %} // Store Aligned Packed Byte XMM register to memory @@ -6543,7 +6848,7 @@ format %{ "movb $mem, $src\t# CMS card-mark byte 0" %} opcode(0xC6); /* C6 /0 */ ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src)); - ins_pipe(ialu_mem_imm); + ins_pipe(ialu_mem_imm); %} // Store Aligned Packed Single Float XMM register to memory @@ -6672,7 +6977,7 @@ match(Set dst (ReverseBytesL dst)); format %{ "bswapq $dst" %} - + opcode(0x0F, 0xC8); /* Opcode 0F /C8 */ ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) ); ins_pipe( ialu_reg); @@ -6683,7 +6988,7 @@ format %{ "bswap_movl $dst, $src" %} opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */ - ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src), REX_reg(dst), OpcS, opc3_reg(dst)); + ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src), REX_reg(dst), OpcS, opc3_reg(dst)); ins_pipe( ialu_reg_mem ); %} @@ -6692,7 +6997,7 @@ format %{ "bswap_movq $dst, $src" %} opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */ - ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src), REX_reg_wide(dst), OpcS, opc3_reg(dst)); + ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src), REX_reg_wide(dst), OpcS, opc3_reg(dst)); ins_pipe( ialu_reg_mem ); %} @@ -6701,7 +7006,7 @@ format %{ "movl_bswap $dst, $src" %} opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */ - ins_encode( REX_reg(src), OpcP, opc2_reg(src), REX_reg_mem(src, dst), OpcT, reg_mem(src, dst) ); + ins_encode( REX_reg(src), OpcP, opc2_reg(src), REX_reg_mem(src, dst), OpcT, reg_mem(src, dst) ); ins_pipe( ialu_mem_reg ); %} @@ -6710,7 +7015,7 @@ format %{ "movq_bswap $dst, $src" %} opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */ - ins_encode( REX_reg_wide(src), OpcP, opc2_reg(src), REX_reg_mem_wide(src, dst), OpcT, reg_mem(src, dst) ); + ins_encode( REX_reg_wide(src), OpcP, opc2_reg(src), REX_reg_mem_wide(src, dst), OpcT, reg_mem(src, dst) ); ins_pipe( ialu_mem_reg ); %} @@ -6728,7 +7033,7 @@ ins_pipe(empty); %} -instruct membar_acquire_lock() +instruct membar_acquire_lock() %{ match(MemBarAcquire); predicate(Matcher::prior_fast_lock(n)); @@ -6805,6 +7110,68 @@ ins_pipe(ialu_reg_reg); // XXX %} + +// Convert oop pointer into compressed form +instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{ + predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); + match(Set dst (EncodeP src)); + effect(KILL cr); + format %{ "encode_heap_oop $dst,$src" %} + ins_encode %{ + Register s = $src$$Register; + Register d = $dst$$Register; + if (s != d) { + __ movq(d, s); + } + __ encode_heap_oop(d); + %} + ins_pipe(ialu_reg_long); +%} + +instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{ + predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); + match(Set dst (EncodeP src)); + effect(KILL cr); + format %{ "encode_heap_oop_not_null $dst,$src" %} + ins_encode %{ + Register s = $src$$Register; + Register d = $dst$$Register; + __ encode_heap_oop_not_null(d, s); + %} + ins_pipe(ialu_reg_long); +%} + +instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{ + predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && + n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); + match(Set dst (DecodeN src)); + effect(KILL cr); + format %{ "decode_heap_oop $dst,$src" %} + ins_encode %{ + Register s = $src$$Register; + Register d = $dst$$Register; + if (s != d) { + __ movq(d, s); + } + __ decode_heap_oop(d); + %} + ins_pipe(ialu_reg_long); +%} + +instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{ + predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || + n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); + match(Set dst (DecodeN src)); + format %{ "decode_heap_oop_not_null $dst,$src" %} + ins_encode %{ + Register s = $src$$Register; + Register d = $dst$$Register; + __ decode_heap_oop_not_null(d, s); + %} + ins_pipe(ialu_reg_long); +%} + + //----------Conditional Move--------------------------------------------------- // Jump // dummy instruction for generating temp registers @@ -6857,8 +7224,7 @@ ins_pipe(pipe_cmov_reg); %} -instruct cmovI_regU(rRegI dst, rRegI src, rFlagsRegU cr, cmpOpU cop) -%{ +instruct cmovI_regU(cmpOpU cop, rFlagsRegU cr, rRegI dst, rRegI src) %{ match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); ins_cost(200); // XXX @@ -6868,9 +7234,16 @@ ins_pipe(pipe_cmov_reg); %} +instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{ + match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovI_regU(cop, cr, dst, src); + %} +%} + // Conditional move -instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) -%{ +instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{ match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); ins_cost(250); // XXX @@ -6892,37 +7265,85 @@ ins_pipe(pipe_cmov_mem); %} +instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{ + match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); + ins_cost(250); + expand %{ + cmovI_memU(cop, cr, dst, src); + %} +%} + // Conditional move -instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop) +instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop) %{ - match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); + match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); ins_cost(200); // XXX - format %{ "cmovq$cop $dst, $src\t# signed, ptr" %} + format %{ "cmovl$cop $dst, $src\t# signed, compressed ptr" %} opcode(0x0F, 0x40); - ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); - ins_pipe(pipe_cmov_reg); // XXX + ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src)); + ins_pipe(pipe_cmov_reg); %} // Conditional move -instruct cmovP_regU(rRegP dst, rRegP src, rFlagsRegU cr, cmpOpU cop) +instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src) %{ - match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); + match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); ins_cost(200); // XXX - format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %} + format %{ "cmovl$cop $dst, $src\t# unsigned, compressed ptr" %} opcode(0x0F, 0x40); - ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); - ins_pipe(pipe_cmov_reg); // XXX + ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src)); + ins_pipe(pipe_cmov_reg); %} -// DISABLED: Requires the ADLC to emit a bottom_type call that -// correctly meets the two pointer arguments; one is an incoming -// register but the other is a memory operand. ALSO appears to -// be buggy with implicit null checks. -// -//// Conditional move -//instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src) +instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{ + match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovN_regU(cop, cr, dst, src); + %} +%} + +// Conditional move +instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop) +%{ + match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); + + ins_cost(200); // XXX + format %{ "cmovq$cop $dst, $src\t# signed, ptr" %} + opcode(0x0F, 0x40); + ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); + ins_pipe(pipe_cmov_reg); // XXX +%} + +// Conditional move +instruct cmovP_regU(cmpOpU cop, rFlagsRegU cr, rRegP dst, rRegP src) +%{ + match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); + + ins_cost(200); // XXX + format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %} + opcode(0x0F, 0x40); + ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); + ins_pipe(pipe_cmov_reg); // XXX +%} + +instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{ + match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovP_regU(cop, cr, dst, src); + %} +%} + +// DISABLED: Requires the ADLC to emit a bottom_type call that +// correctly meets the two pointer arguments; one is an incoming +// register but the other is a memory operand. ALSO appears to +// be buggy with implicit null checks. +// +//// Conditional move +//instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src) //%{ // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src)))); // ins_cost(250); @@ -6976,6 +7397,14 @@ ins_pipe(pipe_cmov_reg); // XXX %} +instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{ + match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovL_regU(cop, cr, dst, src); + %} +%} + instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src) %{ match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); @@ -6987,13 +7416,21 @@ ins_pipe(pipe_cmov_mem); // XXX %} +instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{ + match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); + ins_cost(200); + expand %{ + cmovL_memU(cop, cr, dst, src); + %} +%} + instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src) %{ match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); ins_cost(200); // XXX format %{ "jn$cop skip\t# signed cmove float\n\t" - "movss $dst, $src\n" + "movss $dst, $src\n" "skip:" %} ins_encode(enc_cmovf_branch(cop, dst, src)); ins_pipe(pipe_slow); @@ -7005,7 +7442,7 @@ // ins_cost(200); // XXX // format %{ "jn$cop skip\t# signed cmove float\n\t" -// "movss $dst, $src\n" +// "movss $dst, $src\n" // "skip:" %} // ins_encode(enc_cmovf_mem_branch(cop, dst, src)); // ins_pipe(pipe_slow); @@ -7017,19 +7454,27 @@ ins_cost(200); // XXX format %{ "jn$cop skip\t# unsigned cmove float\n\t" - "movss $dst, $src\n" + "movss $dst, $src\n" "skip:" %} ins_encode(enc_cmovf_branch(cop, dst, src)); ins_pipe(pipe_slow); %} +instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{ + match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovF_regU(cop, cr, dst, src); + %} +%} + instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src) %{ match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); ins_cost(200); // XXX format %{ "jn$cop skip\t# signed cmove double\n\t" - "movsd $dst, $src\n" + "movsd $dst, $src\n" "skip:" %} ins_encode(enc_cmovd_branch(cop, dst, src)); ins_pipe(pipe_slow); @@ -7041,12 +7486,20 @@ ins_cost(200); // XXX format %{ "jn$cop skip\t# unsigned cmove double\n\t" - "movsd $dst, $src\n" + "movsd $dst, $src\n" "skip:" %} ins_encode(enc_cmovd_branch(cop, dst, src)); ins_pipe(pipe_slow); %} +instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{ + match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); + ins_cost(200); + expand %{ + cmovD_regU(cop, cr, dst, src); + %} +%} + //----------Arithmetic Instructions-------------------------------------------- //----------Addition Instructions---------------------------------------------- @@ -7134,7 +7587,7 @@ %} // XXX why does that use AddI -instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr) +instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr) %{ predicate(UseIncDec); match(Set dst (AddI dst src)); @@ -7256,7 +7709,7 @@ %} // XXX why does that use AddL -instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr) +instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr) %{ predicate(UseIncDec); match(Set dst (AddL dst src)); @@ -7387,70 +7840,57 @@ // Used during allocation of the shared heap. // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel. -instruct storePConditional(memory heap_top_ptr, - rax_RegP oldval, rRegP newval, +instruct storePConditional(memory heap_top_ptr, + rax_RegP oldval, rRegP newval, rFlagsReg cr) %{ match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); - + format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) " "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %} opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem_wide(newval, heap_top_ptr), - OpcP, OpcS, + ins_encode(lock_prefix, + REX_reg_mem_wide(newval, heap_top_ptr), + OpcP, OpcS, reg_mem(newval, heap_top_ptr)); ins_pipe(pipe_cmpxchg); %} -// Conditional-store of a long value -// Returns a boolean value (0/1) on success. Implemented with a -// CMPXCHG8 on Intel. mem_ptr can actually be in either RSI or RDI - -instruct storeLConditional(rRegI res, - memory mem_ptr, - rax_RegL oldval, rRegL newval, - rFlagsReg cr) +// Conditional-store of an int value. +// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG. +instruct storeIConditional(memory mem, rax_RegI oldval, rRegI newval, rFlagsReg cr) %{ - match(Set res (StoreLConditional mem_ptr (Binary oldval newval))); - effect(KILL cr); + match(Set cr (StoreIConditional mem (Binary oldval newval))); + effect(KILL oldval); - format %{ "cmpxchgq $mem_ptr, $newval\t# (long) " - "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" - "sete $res\n\t" - "movzbl $res, $res" %} + format %{ "cmpxchgl $mem, $newval\t# If rax == $mem then store $newval into $mem" %} opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem_wide(newval, mem_ptr), - OpcP, OpcS, - reg_mem(newval, mem_ptr), - REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete - REX_reg_breg(res, res), // movzbl - Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); + ins_encode(lock_prefix, + REX_reg_mem(newval, mem), + OpcP, OpcS, + reg_mem(newval, mem)); ins_pipe(pipe_cmpxchg); %} -// Conditional-store of a long value -// ZF flag is set on success, reset otherwise. Implemented with a -// CMPXCHG8 on Intel. mem_ptr can actually be in either RSI or RDI -instruct storeLConditional_flags(memory mem_ptr, - rax_RegL oldval, rRegL newval, - rFlagsReg cr, - immI0 zero) +// Conditional-store of a long value. +// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG. +instruct storeLConditional(memory mem, rax_RegL oldval, rRegL newval, rFlagsReg cr) %{ - match(Set cr (CmpI (StoreLConditional mem_ptr (Binary oldval newval)) zero)); + match(Set cr (StoreLConditional mem (Binary oldval newval))); + effect(KILL oldval); - format %{ "cmpxchgq $mem_ptr, $newval\t# (long) " - "If rax == $mem_ptr then store $newval into $mem_ptr" %} + format %{ "cmpxchgq $mem, $newval\t# If rax == $mem then store $newval into $mem" %} opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem_wide(newval, mem_ptr), - OpcP, OpcS, - reg_mem(newval, mem_ptr)); + ins_encode(lock_prefix, + REX_reg_mem_wide(newval, mem), + OpcP, OpcS, + reg_mem(newval, mem)); ins_pipe(pipe_cmpxchg); %} -instruct compareAndSwapP(rRegI res, + +// XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them +instruct compareAndSwapP(rRegI res, memory mem_ptr, rax_RegP oldval, rRegP newval, rFlagsReg cr) @@ -7463,9 +7903,9 @@ "sete $res\n\t" "movzbl $res, $res" %} opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem_wide(newval, mem_ptr), - OpcP, OpcS, + ins_encode(lock_prefix, + REX_reg_mem_wide(newval, mem_ptr), + OpcP, OpcS, reg_mem(newval, mem_ptr), REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete REX_reg_breg(res, res), // movzbl @@ -7473,8 +7913,7 @@ ins_pipe( pipe_cmpxchg ); %} -// XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them -instruct compareAndSwapL(rRegI res, +instruct compareAndSwapL(rRegI res, memory mem_ptr, rax_RegL oldval, rRegL newval, rFlagsReg cr) @@ -7487,9 +7926,9 @@ "sete $res\n\t" "movzbl $res, $res" %} opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem_wide(newval, mem_ptr), - OpcP, OpcS, + ins_encode(lock_prefix, + REX_reg_mem_wide(newval, mem_ptr), + OpcP, OpcS, reg_mem(newval, mem_ptr), REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete REX_reg_breg(res, res), // movzbl @@ -7497,7 +7936,7 @@ ins_pipe( pipe_cmpxchg ); %} -instruct compareAndSwapI(rRegI res, +instruct compareAndSwapI(rRegI res, memory mem_ptr, rax_RegI oldval, rRegI newval, rFlagsReg cr) @@ -7510,9 +7949,9 @@ "sete $res\n\t" "movzbl $res, $res" %} opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem(newval, mem_ptr), - OpcP, OpcS, + ins_encode(lock_prefix, + REX_reg_mem(newval, mem_ptr), + OpcP, OpcS, reg_mem(newval, mem_ptr), REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete REX_reg_breg(res, res), // movzbl @@ -7521,6 +7960,28 @@ %} +instruct compareAndSwapN(rRegI res, + memory mem_ptr, + rax_RegN oldval, rRegN newval, + rFlagsReg cr) %{ + match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); + effect(KILL cr, KILL oldval); + + format %{ "cmpxchgl $mem_ptr,$newval\t# " + "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" + "sete $res\n\t" + "movzbl $res, $res" %} + opcode(0x0F, 0xB1); + ins_encode(lock_prefix, + REX_reg_mem(newval, mem_ptr), + OpcP, OpcS, + reg_mem(newval, mem_ptr), + REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete + REX_reg_breg(res, res), // movzbl + Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); + ins_pipe( pipe_cmpxchg ); +%} + //----------Subtraction Instructions------------------------------------------- // Integer Subtraction Instructions @@ -7723,7 +8184,7 @@ ins_cost(300); format %{ "imull $dst, $src, $imm\t# int" %} opcode(0x69); /* 69 /r id */ - ins_encode(REX_reg_reg(dst, src), + ins_encode(REX_reg_reg(dst, src), OpcSE(imm), reg_reg(dst, src), Con8or32(imm)); ins_pipe(ialu_reg_reg_alu0); %} @@ -7748,7 +8209,7 @@ ins_cost(300); format %{ "imull $dst, $src, $imm\t# int" %} opcode(0x69); /* 69 /r id */ - ins_encode(REX_reg_mem(dst, src), + ins_encode(REX_reg_mem(dst, src), OpcSE(imm), reg_mem(dst, src), Con8or32(imm)); ins_pipe(ialu_reg_mem_alu0); %} @@ -7773,7 +8234,7 @@ ins_cost(300); format %{ "imulq $dst, $src, $imm\t# long" %} opcode(0x69); /* 69 /r id */ - ins_encode(REX_reg_reg_wide(dst, src), + ins_encode(REX_reg_reg_wide(dst, src), OpcSE(imm), reg_reg(dst, src), Con8or32(imm)); ins_pipe(ialu_reg_reg_alu0); %} @@ -7798,11 +8259,23 @@ ins_cost(300); format %{ "imulq $dst, $src, $imm\t# long" %} opcode(0x69); /* 69 /r id */ - ins_encode(REX_reg_mem_wide(dst, src), + ins_encode(REX_reg_mem_wide(dst, src), OpcSE(imm), reg_mem(dst, src), Con8or32(imm)); ins_pipe(ialu_reg_mem_alu0); %} +instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr) +%{ + match(Set dst (MulHiL src rax)); + effect(USE_KILL rax, KILL cr); + + ins_cost(300); + format %{ "imulq RDX:RAX, RAX, $src\t# mulhi" %} + opcode(0xF7, 0x5); /* Opcode F7 /5 */ + ins_encode(REX_reg_wide(src), OpcP, reg_opc(src)); + ins_pipe(ialu_reg_reg_alu0); +%} + instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div, rFlagsReg cr) %{ @@ -7816,7 +8289,7 @@ "cmpl $div, -1\n\t" "je,s done\n" "normal: cdql\n\t" - "idivl $div\n" + "idivl $div\n" "done:" %} opcode(0xF7, 0x7); /* Opcode F7 /7 */ ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div)); @@ -7837,7 +8310,7 @@ "cmpq $div, -1\n\t" "je,s done\n" "normal: cdqq\n\t" - "idivq $div\n" + "idivq $div\n" "done:" %} opcode(0xF7, 0x7); /* Opcode F7 /7 */ ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div)); @@ -7858,7 +8331,7 @@ "cmpl $div, -1\n\t" "je,s done\n" "normal: cdql\n\t" - "idivl $div\n" + "idivl $div\n" "done:" %} opcode(0xF7, 0x7); /* Opcode F7 /7 */ ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div)); @@ -7880,7 +8353,7 @@ "cmpq $div, -1\n\t" "je,s done\n" "normal: cdqq\n\t" - "idivq $div\n" + "idivq $div\n" "done:" %} opcode(0xF7, 0x7); /* Opcode F7 /7 */ ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div)); @@ -7961,7 +8434,7 @@ "cmpl $div, -1\n\t" "je,s done\n" "normal: cdql\n\t" - "idivl $div\n" + "idivl $div\n" "done:" %} opcode(0xF7, 0x7); /* Opcode F7 /7 */ ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div)); @@ -7982,7 +8455,7 @@ "cmpq $div, -1\n\t" "je,s done\n" "normal: cdqq\n\t" - "idivq $div\n" + "idivq $div\n" "done:" %} opcode(0xF7, 0x7); /* Opcode F7 /7 */ ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div)); @@ -8135,7 +8608,7 @@ %} // Logical shift right by one -instruct shrI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr) +instruct shrI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr) %{ match(Set dst (URShiftI dst shift)); effect(KILL cr); @@ -8251,7 +8724,7 @@ format %{ "salq $dst, $shift" %} opcode(0xC1, 0x4); /* C1 /4 ib */ - ins_encode(REX_mem_wide(dst), OpcP, + ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift)); ins_pipe(ialu_mem_imm); %} @@ -8324,7 +8797,7 @@ format %{ "sarq $dst, $shift" %} opcode(0xC1, 0x7); /* C1 /7 ib */ - ins_encode(REX_mem_wide(dst), OpcP, + ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift)); ins_pipe(ialu_mem_imm); %} @@ -8354,7 +8827,7 @@ %} // Logical shift right by one -instruct shrL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr) +instruct shrL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr) %{ match(Set dst (URShiftL dst shift)); effect(KILL cr); @@ -8389,6 +8862,7 @@ ins_pipe(ialu_reg); %} + // Logical Shift Right by 8-bit immediate instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr) %{ @@ -8464,7 +8938,7 @@ instruct rolI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) %{ effect(USE_DEF dst, USE shift, KILL cr); - + format %{ "roll $dst, $shift" %} opcode(0xC1, 0x0); /* Opcode C1 /0 ib */ ins_encode( reg_opc_imm(dst, shift) ); @@ -8476,7 +8950,7 @@ effect(USE_DEF dst, USE shift, KILL cr); format %{ "roll $dst, $shift" %} - opcode(0xD3, 0x0); /* Opcode D3 /0 */ + opcode(0xD3, 0x0); /* Opcode D3 /0 */ ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); ins_pipe(ialu_reg_reg); %} @@ -8486,7 +8960,7 @@ instruct rolI_rReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr) %{ match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift))); - + expand %{ rolI_rReg_imm1(dst, cr); %} @@ -8523,7 +8997,7 @@ %} %} -// ROR expand +// ROR expand instruct rorI_rReg_imm1(rRegI dst, rFlagsReg cr) %{ effect(USE_DEF dst, KILL cr); @@ -8553,7 +9027,7 @@ ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); ins_pipe(ialu_reg_reg); %} -// end of ROR expand +// end of ROR expand // Rotate Right by one instruct rorI_rReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr) @@ -8609,7 +9083,7 @@ instruct rolL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) %{ effect(USE_DEF dst, USE shift, KILL cr); - + format %{ "rolq $dst, $shift" %} opcode(0xC1, 0x0); /* Opcode C1 /0 ib */ ins_encode( reg_opc_imm_wide(dst, shift) ); @@ -8621,7 +9095,7 @@ effect(USE_DEF dst, USE shift, KILL cr); format %{ "rolq $dst, $shift" %} - opcode(0xD3, 0x0); /* Opcode D3 /0 */ + opcode(0xD3, 0x0); /* Opcode D3 /0 */ ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); ins_pipe(ialu_reg_reg); %} @@ -8631,7 +9105,7 @@ instruct rolL_rReg_i1(rRegL dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr) %{ match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift))); - + expand %{ rolL_rReg_imm1(dst, cr); %} @@ -8668,7 +9142,7 @@ %} %} -// ROR expand +// ROR expand instruct rorL_rReg_imm1(rRegL dst, rFlagsReg cr) %{ effect(USE_DEF dst, KILL cr); @@ -8698,7 +9172,7 @@ ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); ins_pipe(ialu_reg_reg); %} -// end of ROR expand +// end of ROR expand // Rotate Right by one instruct rorL_rReg_i1(rRegL dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr) @@ -8849,7 +9323,7 @@ ins_cost(125); format %{ "andl $dst, $src\t# int" %} opcode(0x81, 0x4); /* Opcode 81 /4 id */ - ins_encode(REX_mem(dst), OpcSE(src), + ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(secondary, dst), Con8or32(src)); ins_pipe(ialu_mem_imm); %} @@ -8932,6 +9406,17 @@ ins_pipe(ialu_reg_reg); %} +// Xor Register with Immediate -1 +instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{ + match(Set dst (XorI dst imm)); + + format %{ "not $dst" %} + ins_encode %{ + __ notl($dst$$Register); + %} + ins_pipe(ialu_reg); +%} + // Xor Register with Immediate instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) %{ @@ -9069,7 +9554,7 @@ ins_cost(125); format %{ "andq $dst, $src\t# long" %} opcode(0x81, 0x4); /* Opcode 81 /4 id */ - ins_encode(REX_mem_wide(dst), OpcSE(src), + ins_encode(REX_mem_wide(dst), OpcSE(src), RM_opc_mem(secondary, dst), Con8or32(src)); ins_pipe(ialu_mem_imm); %} @@ -9087,6 +9572,18 @@ ins_pipe(ialu_reg_reg); %} +// Use any_RegP to match R15 (TLS register) without spilling. +instruct orL_rReg_castP2X(rRegL dst, any_RegP src, rFlagsReg cr) %{ + match(Set dst (OrL dst (CastP2X src))); + effect(KILL cr); + + format %{ "orq $dst, $src\t# long" %} + opcode(0x0B); + ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); + ins_pipe(ialu_reg_reg); +%} + + // Or Register with Immediate instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) %{ @@ -9152,6 +9649,17 @@ ins_pipe(ialu_reg_reg); %} +// Xor Register with Immediate -1 +instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{ + match(Set dst (XorL dst imm)); + + format %{ "notq $dst" %} + ins_encode %{ + __ notq($dst$$Register); + %} + ins_pipe(ialu_reg); +%} + // Xor Register with Immediate instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) %{ @@ -9243,9 +9751,9 @@ ins_cost(400); // XXX format %{ "cmpl $p, $q\t# cmpLTMask\n\t" - "setlt $dst\n\t" + "setlt $dst\n\t" "movzbl $dst, $dst\n\t" - "negl $dst" %} + "negl $dst" %} ins_encode(REX_reg_reg(p, q), opc_reg_reg(0x3B, p, q), // cmpl setLT_reg(dst), REX_reg_breg(dst, dst), // movzbl @@ -9268,7 +9776,7 @@ instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, - rRegI tmp, + rRegI tmp, rFlagsReg cr) %{ match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); @@ -9276,31 +9784,31 @@ ins_cost(400); // XXX format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t" - "sbbl $tmp, $tmp\n\t" - "andl $tmp, $y\n\t" - "addl $p, $tmp" %} - ins_encode(enc_cmpLTP(p, q, y, tmp)); + "sbbl $tmp, $tmp\n\t" + "andl $tmp, $y\n\t" + "addl $p, $tmp" %} + ins_encode(enc_cmpLTP(p, q, y, tmp)); ins_pipe(pipe_cmplt); %} /* If I enable this, I encourage spilling in the inner loop of compress. -instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr ) +instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr ) %{ match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q))); effect( TEMP tmp, KILL cr ); ins_cost(400); format %{ "SUB $p,$q\n\t" - "SBB RCX,RCX\n\t" - "AND RCX,$y\n\t" - "ADD $p,RCX" %} - ins_encode( enc_cmpLTP_mem(p,q,y,tmp) ); + "SBB RCX,RCX\n\t" + "AND RCX,$y\n\t" + "ADD $p,RCX" %} + ins_encode( enc_cmpLTP_mem(p,q,y,tmp) ); %} */ //---------- FP Instructions------------------------------------------------ -instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2) +instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2) %{ match(Set cr (CmpF src1 src2)); @@ -9317,7 +9825,18 @@ ins_pipe(pipe_slow); %} -instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2) +instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{ + match(Set cr (CmpF src1 src2)); + + ins_cost(145); + format %{ "ucomiss $src1, $src2" %} + ins_encode %{ + __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister); + %} + ins_pipe(pipe_slow); +%} + +instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2) %{ match(Set cr (CmpF src1 (LoadF src2))); @@ -9334,6 +9853,16 @@ ins_pipe(pipe_slow); %} +instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{ + match(Set cr (CmpF src1 (LoadF src2))); + + ins_cost(100); + format %{ "ucomiss $src1, $src2" %} + opcode(0x0F, 0x2E); + ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2)); + ins_pipe(pipe_slow); +%} + instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2) %{ match(Set cr (CmpF src1 src2)); @@ -9351,7 +9880,17 @@ ins_pipe(pipe_slow); %} -instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2) +instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{ + match(Set cr (CmpF src1 src2)); + + ins_cost(100); + format %{ "ucomiss $src1, $src2" %} + opcode(0x0F, 0x2E); + ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2)); + ins_pipe(pipe_slow); +%} + +instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2) %{ match(Set cr (CmpD src1 src2)); @@ -9368,7 +9907,18 @@ ins_pipe(pipe_slow); %} -instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2) +instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{ + match(Set cr (CmpD src1 src2)); + + ins_cost(100); + format %{ "ucomisd $src1, $src2 test" %} + ins_encode %{ + __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister); + %} + ins_pipe(pipe_slow); +%} + +instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2) %{ match(Set cr (CmpD src1 (LoadD src2))); @@ -9385,7 +9935,17 @@ ins_pipe(pipe_slow); %} -instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2) +instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{ + match(Set cr (CmpD src1 (LoadD src2))); + + ins_cost(100); + format %{ "ucomisd $src1, $src2" %} + opcode(0x66, 0x0F, 0x2E); + ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2)); + ins_pipe(pipe_slow); +%} + +instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2) %{ match(Set cr (CmpD src1 src2)); @@ -9402,6 +9962,16 @@ ins_pipe(pipe_slow); %} +instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{ + match(Set cr (CmpD src1 src2)); + + ins_cost(100); + format %{ "ucomisd $src1, [$src2]" %} + opcode(0x66, 0x0F, 0x2E); + ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2)); + ins_pipe(pipe_slow); +%} + // Compare into -1,0,1 instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr) %{ @@ -9919,7 +10489,7 @@ match(Set dst (TanD dst)); format %{ "dtan $dst\n\t" %} - ins_encode( Push_SrcXD(dst), + ins_encode( Push_SrcXD(dst), Opcode(0xD9), Opcode(0xF2), //fptan Opcode(0xDD), Opcode(0xD8), //fstp st Push_ResultXD(dst) ); @@ -9931,10 +10501,10 @@ match(Set dst (Log10D dst)); // fldlg2 ; push log_10(2) on the FPU stack; full 80-bit number // fyl2x ; compute log_10(2) * log_2(x) - format %{ "fldlg2\t\t\t#Log10\n\t" + format %{ "fldlg2\t\t\t#Log10\n\t" "fyl2x\t\t\t# Q=Log10*Log_2(x)\n\t" %} - ins_encode(Opcode(0xD9), Opcode(0xEC), // fldlg2 + ins_encode(Opcode(0xD9), Opcode(0xEC), // fldlg2 Push_SrcXD(dst), Opcode(0xD9), Opcode(0xF1), // fyl2x Push_ResultXD(dst)); @@ -10020,7 +10590,7 @@ %} // XXX do mem variants -instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr) +instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr) %{ match(Set dst (ConvF2I src)); effect(KILL cr); @@ -10039,7 +10609,7 @@ ins_pipe(pipe_slow); %} -instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr) +instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr) %{ match(Set dst (ConvF2L src)); effect(KILL cr); @@ -10058,7 +10628,7 @@ ins_pipe(pipe_slow); %} -instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr) +instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr) %{ match(Set dst (ConvD2I src)); effect(KILL cr); @@ -10077,7 +10647,7 @@ ins_pipe(pipe_slow); %} -instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr) +instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr) %{ match(Set dst (ConvD2L src)); effect(KILL cr); @@ -10096,8 +10666,9 @@ ins_pipe(pipe_slow); %} -instruct convI2F_reg_reg(regF dst, rRegI src) +instruct convI2F_reg_reg(regF dst, rRegI src) %{ + predicate(!UseXmmI2F); match(Set dst (ConvI2F src)); format %{ "cvtsi2ssl $dst, $src\t# i2f" %} @@ -10106,7 +10677,7 @@ ins_pipe(pipe_slow); // XXX %} -instruct convI2F_reg_mem(regF dst, memory src) +instruct convI2F_reg_mem(regF dst, memory src) %{ match(Set dst (ConvI2F (LoadI src))); @@ -10116,8 +10687,9 @@ ins_pipe(pipe_slow); // XXX %} -instruct convI2D_reg_reg(regD dst, rRegI src) +instruct convI2D_reg_reg(regD dst, rRegI src) %{ + predicate(!UseXmmI2D); match(Set dst (ConvI2D src)); format %{ "cvtsi2sdl $dst, $src\t# i2d" %} @@ -10126,7 +10698,7 @@ ins_pipe(pipe_slow); // XXX %} -instruct convI2D_reg_mem(regD dst, memory src) +instruct convI2D_reg_mem(regD dst, memory src) %{ match(Set dst (ConvI2D (LoadI src))); @@ -10136,7 +10708,35 @@ ins_pipe(pipe_slow); // XXX %} -instruct convL2F_reg_reg(regF dst, rRegL src) +instruct convXI2F_reg(regF dst, rRegI src) +%{ + predicate(UseXmmI2F); + match(Set dst (ConvI2F src)); + + format %{ "movdl $dst, $src\n\t" + "cvtdq2psl $dst, $dst\t# i2f" %} + ins_encode %{ + __ movdl($dst$$XMMRegister, $src$$Register); + __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister); + %} + ins_pipe(pipe_slow); // XXX +%} + +instruct convXI2D_reg(regD dst, rRegI src) +%{ + predicate(UseXmmI2D); + match(Set dst (ConvI2D src)); + + format %{ "movdl $dst, $src\n\t" + "cvtdq2pdl $dst, $dst\t# i2d" %} + ins_encode %{ + __ movdl($dst$$XMMRegister, $src$$Register); + __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister); + %} + ins_pipe(pipe_slow); // XXX +%} + +instruct convL2F_reg_reg(regF dst, rRegL src) %{ match(Set dst (ConvL2F src)); @@ -10146,7 +10746,7 @@ ins_pipe(pipe_slow); // XXX %} -instruct convL2F_reg_mem(regF dst, memory src) +instruct convL2F_reg_mem(regF dst, memory src) %{ match(Set dst (ConvL2F (LoadL src))); @@ -10156,7 +10756,7 @@ ins_pipe(pipe_slow); // XXX %} -instruct convL2D_reg_reg(regD dst, rRegL src) +instruct convL2D_reg_reg(regD dst, rRegL src) %{ match(Set dst (ConvL2D src)); @@ -10166,7 +10766,7 @@ ins_pipe(pipe_slow); // XXX %} -instruct convL2D_reg_mem(regD dst, memory src) +instruct convL2D_reg_mem(regD dst, memory src) %{ match(Set dst (ConvL2D (LoadL src))); @@ -10244,7 +10844,7 @@ ins_pipe(ialu_reg_reg); %} -instruct convL2I_reg_reg(rRegI dst, rRegL src) +instruct convL2I_reg_reg(rRegI dst, rRegL src) %{ match(Set dst (ConvL2I src)); @@ -10523,14 +11123,14 @@ // ======================================================================= // fast clearing of an array -instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy, +instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy, rFlagsReg cr) %{ match(Set dummy (ClearArray cnt base)); effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr); format %{ "xorl rax, rax\t# ClearArray:\n\t" - "rep stosq\t# Store rax to *rdi++ while rcx--" %} + "rep stosq\t# Store rax to *rdi++ while rcx--" %} ins_encode(opc_reg_reg(0x33, RAX, RAX), // xorl %eax, %eax Opcode(0xF3), Opcode(0x48), Opcode(0xAB)); // rep REX_W stos ins_pipe(pipe_slow); @@ -10548,6 +11148,18 @@ ins_pipe( pipe_slow ); %} +// fast array equals +instruct array_equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI tmp1, + rbx_RegI tmp2, rcx_RegI result, rFlagsReg cr) %{ + match(Set result (AryEq ary1 ary2)); + effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL cr); + //ins_cost(300); + + format %{ "Array Equals $ary1,$ary2 -> $result // KILL RAX, RBX" %} + ins_encode( enc_Array_Equals(ary1, ary2, tmp1, tmp2, result) ); + ins_pipe( pipe_slow ); +%} + //----------Control Flow Instructions------------------------------------------ // Signed compare Instructions @@ -10576,7 +11188,7 @@ instruct compI_rReg_mem(rFlagsReg cr, rRegI op1, memory op2) %{ match(Set cr (CmpI op1 (LoadI op2))); - + ins_cost(500); // XXX format %{ "cmpl $op1, $op2" %} opcode(0x3B); /* Opcode 3B /r */ @@ -10639,7 +11251,7 @@ instruct compU_rReg_mem(rFlagsRegU cr, rRegI op1, memory op2) %{ match(Set cr (CmpU op1 (LoadI op2))); - + ins_cost(500); // XXX format %{ "cmpl $op1, $op2\t# unsigned" %} opcode(0x3B); /* Opcode 3B /r */ @@ -10648,10 +11260,10 @@ %} // // // Cisc-spilled version of cmpU_rReg -// //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2) +// //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2) // //%{ // // match(Set cr (CmpU (LoadI op1) op2)); -// // +// // // // format %{ "CMPu $op1,$op2" %} // // ins_cost(500); // // opcode(0x39); /* Opcode 39 /r */ @@ -10681,7 +11293,7 @@ instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2) %{ match(Set cr (CmpP op1 (LoadP op2))); - + ins_cost(500); // XXX format %{ "cmpq $op1, $op2\t# ptr" %} opcode(0x3B); /* Opcode 3B /r */ @@ -10690,10 +11302,10 @@ %} // // // Cisc-spilled version of cmpP_rReg -// //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2) +// //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2) // //%{ // // match(Set cr (CmpP (LoadP op1) op2)); -// // +// // // // format %{ "CMPu $op1,$op2" %} // // ins_cost(500); // // opcode(0x39); /* Opcode 39 /r */ @@ -10708,7 +11320,7 @@ %{ predicate(!n->in(2)->in(2)->bottom_type()->isa_oop_ptr()); match(Set cr (CmpP op1 (LoadP op2))); - + format %{ "cmpq $op1, $op2\t# raw ptr" %} opcode(0x3B); /* Opcode 3B /r */ ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2)); @@ -10732,7 +11344,7 @@ instruct testP_reg_mem(rFlagsReg cr, memory op, immP0 zero) %{ match(Set cr (CmpP (LoadP op) zero)); - + ins_cost(500); // XXX format %{ "testq $op, 0xffffffffffffffff\t# ptr" %} opcode(0xF7); /* Opcode F7 /0 */ @@ -10741,6 +11353,50 @@ ins_pipe(ialu_cr_reg_imm); %} + +instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2) +%{ + match(Set cr (CmpN op1 op2)); + + format %{ "cmpl $op1, $op2\t# compressed ptr" %} + ins_encode %{ __ cmpl(as_Register($op1$$reg), as_Register($op2$$reg)); %} + ins_pipe(ialu_cr_reg_reg); +%} + +instruct compN_rReg_mem(rFlagsRegU cr, rRegN src, memory mem) +%{ + match(Set cr (CmpN src (LoadN mem))); + + ins_cost(500); // XXX + format %{ "cmpl $src, mem\t# compressed ptr" %} + ins_encode %{ + Address adr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp); + __ cmpl(as_Register($src$$reg), adr); + %} + ins_pipe(ialu_cr_reg_mem); +%} + +instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{ + match(Set cr (CmpN src zero)); + + format %{ "testl $src, $src\t# compressed ptr" %} + ins_encode %{ __ testl($src$$Register, $src$$Register); %} + ins_pipe(ialu_cr_reg_imm); +%} + +instruct testN_reg_mem(rFlagsReg cr, memory mem, immN0 zero) +%{ + match(Set cr (CmpN (LoadN mem) zero)); + + ins_cost(500); // XXX + format %{ "testl $mem, 0xffffffff\t# compressed ptr" %} + ins_encode %{ + Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp); + __ cmpl(addr, (int)0xFFFFFFFF); + %} + ins_pipe(ialu_cr_reg_mem); +%} + // Yanked all unsigned pointer compare operations. // Pointer compares are done with CmpP which is already unsigned. @@ -10767,7 +11423,7 @@ instruct compL_rReg_mem(rFlagsReg cr, rRegL op1, memory op2) %{ match(Set cr (CmpL op1 (LoadL op2))); - + ins_cost(500); // XXX format %{ "cmpq $op1, $op2" %} opcode(0x3B); /* Opcode 3B /r */ @@ -10826,7 +11482,7 @@ //----------Max and Min-------------------------------------------------------- // Min Instructions -instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr) +instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr) %{ effect(USE_DEF dst, USE src, USE cr); @@ -10849,7 +11505,7 @@ %} %} -instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr) +instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr) %{ effect(USE_DEF dst, USE src, USE cr); @@ -10921,8 +11577,7 @@ %} // Jump Direct Conditional - Label defines a relative address from Jcc+1 -instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) -%{ +instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) %{ match(CountedLoopEnd cop cmp); effect(USE labl); @@ -10935,14 +11590,39 @@ ins_pc_relative(1); %} +instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ + match(CountedLoopEnd cop cmp); + effect(USE labl); + + ins_cost(200); + format %{ "j$cop,u $labl\t# loop end" %} + size(6); + opcode(0x0F, 0x80); + ins_encode(Jcc(cop, labl)); + ins_pipe(pipe_jcc); + ins_pc_relative(1); +%} + // Jump Direct Conditional - using unsigned comparison -instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) -%{ +instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) %{ match(If cop cmp); effect(USE labl); ins_cost(300); - format %{ "j$cop,u $labl" %} + format %{ "j$cop,u $labl" %} + size(6); + opcode(0x0F, 0x80); + ins_encode(Jcc(cop, labl)); + ins_pipe(pipe_jcc); + ins_pc_relative(1); +%} + +instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(200); + format %{ "j$cop,u $labl" %} size(6); opcode(0x0F, 0x80); ins_encode(Jcc(cop, labl)); @@ -10950,6 +11630,46 @@ ins_pc_relative(1); %} +instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(200); + format %{ $$template + if ($cop$$cmpcode == Assembler::notEqual) { + $$emit$$"jp,u $labl\n\t" + $$emit$$"j$cop,u $labl" + } else { + $$emit$$"jp,u done\n\t" + $$emit$$"j$cop,u $labl\n\t" + $$emit$$"done:" + } + %} + size(12); + opcode(0x0F, 0x80); + ins_encode %{ + Label* l = $labl$$label; + $$$emit8$primary; + emit_cc(cbuf, $secondary, Assembler::parity); + int parity_disp = -1; + if ($cop$$cmpcode == Assembler::notEqual) { + // the two jumps 6 bytes apart so the jump distances are too + parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; + } else if ($cop$$cmpcode == Assembler::equal) { + parity_disp = 6; + } else { + ShouldNotReachHere(); + } + emit_d32(cbuf, parity_disp); + $$$emit8$primary; + emit_cc(cbuf, $secondary, $cop$$cmpcode); + int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; + emit_d32(cbuf, disp); + %} + ins_pipe(pipe_jcc); + ins_pc_relative(1); +%} + // ============================================================================ // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary // superklass array for an instance of the superklass. Set a hidden @@ -10982,12 +11702,13 @@ ins_pipe(pipe_slow); %} -instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr, +instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr, rsi_RegP sub, rax_RegP super, rcx_RegI rcx, immP0 zero, rdi_RegP result) %{ match(Set cr (CmpP (PartialSubtypeCheck sub super) zero)); + predicate(!UseCompressedOops); // decoding oop kills condition codes effect(KILL rcx, KILL result); ins_cost(1000); @@ -11008,7 +11729,7 @@ // ============================================================================ // Branch Instructions -- short offset versions -// +// // These instructions are used to replace jumps of a long offset (the default // match) with jumps of a shorter offset. These instructions are all tagged // with the ins_short_branch attribute, which causes the ADLC to suppress the @@ -11019,8 +11740,7 @@ // specific code section of the file. // Jump Direct - Label defines a relative address from JMP+1 -instruct jmpDir_short(label labl) -%{ +instruct jmpDir_short(label labl) %{ match(Goto); effect(USE labl); @@ -11035,8 +11755,7 @@ %} // Jump Direct Conditional - Label defines a relative address from Jcc+1 -instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) -%{ +instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) %{ match(If cop cr); effect(USE labl); @@ -11051,13 +11770,12 @@ %} // Jump Direct Conditional - Label defines a relative address from Jcc+1 -instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) -%{ +instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) %{ match(CountedLoopEnd cop cr); effect(USE labl); ins_cost(300); - format %{ "j$cop,s $labl" %} + format %{ "j$cop,s $labl\t# loop end" %} size(2); opcode(0x70); ins_encode(JccShort(cop, labl)); @@ -11067,13 +11785,26 @@ %} // Jump Direct Conditional - Label defines a relative address from Jcc+1 -instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) -%{ +instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{ match(CountedLoopEnd cop cmp); effect(USE labl); ins_cost(300); - format %{ "j$cop,us $labl" %} + format %{ "j$cop,us $labl\t# loop end" %} + size(2); + opcode(0x70); + ins_encode(JccShort(cop, labl)); + ins_pipe(pipe_jcc); + ins_pc_relative(1); + ins_short_branch(1); +%} + +instruct jmpLoopEndUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ + match(CountedLoopEnd cop cmp); + effect(USE labl); + + ins_cost(300); + format %{ "j$cop,us $labl\t# loop end" %} size(2); opcode(0x70); ins_encode(JccShort(cop, labl)); @@ -11083,8 +11814,7 @@ %} // Jump Direct Conditional - using unsigned comparison -instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) -%{ +instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{ match(If cop cmp); effect(USE labl); @@ -11098,10 +11828,64 @@ ins_short_branch(1); %} +instruct jmpConUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(300); + format %{ "j$cop,us $labl" %} + size(2); + opcode(0x70); + ins_encode(JccShort(cop, labl)); + ins_pipe(pipe_jcc); + ins_pc_relative(1); + ins_short_branch(1); +%} + +instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{ + match(If cop cmp); + effect(USE labl); + + ins_cost(300); + format %{ $$template + if ($cop$$cmpcode == Assembler::notEqual) { + $$emit$$"jp,u,s $labl\n\t" + $$emit$$"j$cop,u,s $labl" + } else { + $$emit$$"jp,u,s done\n\t" + $$emit$$"j$cop,u,s $labl\n\t" + $$emit$$"done:" + } + %} + size(4); + opcode(0x70); + ins_encode %{ + Label* l = $labl$$label; + emit_cc(cbuf, $primary, Assembler::parity); + int parity_disp = -1; + if ($cop$$cmpcode == Assembler::notEqual) { + parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; + } else if ($cop$$cmpcode == Assembler::equal) { + parity_disp = 2; + } else { + ShouldNotReachHere(); + } + emit_d8(cbuf, parity_disp); + emit_cc(cbuf, $primary, $cop$$cmpcode); + int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; + emit_d8(cbuf, disp); + assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); + assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp"); + %} + ins_pipe(pipe_jcc); + ins_pc_relative(1); + ins_short_branch(1); +%} + // ============================================================================ // inlined locking and unlocking -instruct cmpFastLock(rFlagsReg cr, +instruct cmpFastLock(rFlagsReg cr, rRegP object, rRegP box, rax_RegI tmp, rRegP scr) %{ match(Set cr (FastLock object box)); @@ -11172,7 +11956,7 @@ ins_cost(300); format %{ "movq rax, #Universe::non_oop_word()\n\t" - "call,dynamic " %} + "call,dynamic " %} opcode(0xE8); /* E8 cd */ ins_encode(Java_Dynamic_Call(meth), call_epilog); ins_pipe(pipe_slow); @@ -11224,14 +12008,14 @@ // Return Instruction // Remove the return address & jump to it. -// Notice: We always emit a nop after a ret to make sure there is room +// Notice: We always emit a nop after a ret to make sure there is room // for safepoint patching instruct Ret() %{ match(Return); - + format %{ "ret" %} - opcode(0xC3); + opcode(0xC3); ins_encode(OpcP); ins_pipe(pipe_jmp); %} @@ -11259,7 +12043,7 @@ ins_cost(300); format %{ "popq rdx\t# pop return address\n\t" - "jmp $jump_target" %} + "jmp $jump_target" %} opcode(0xFF, 0x4); /* Opcode FF /4 */ ins_encode(Opcode(0x5a), // popq rdx REX_reg(jump_target), OpcP, reg_opc(jump_target)); @@ -11280,7 +12064,7 @@ ins_pipe(empty); %} -// Rethrow exception: +// Rethrow exception: // The exception oop will come in the first argument position. // Then JUMP (not call) to the rethrow stub code. instruct RethrowException() @@ -11297,7 +12081,7 @@ //----------PEEPHOLE RULES----------------------------------------------------- // These must follow all instruction definitions as they use the names // defined in the instructions definitions. -// +// // peepmatch ( root_instr_name [precerding_instruction]* ); // // peepconstraint %{ @@ -11310,16 +12094,16 @@ // // in the replacement instruction's match rule // // ---------VM FLAGS--------------------------------------------------------- -// +// // All peephole optimizations can be turned off using -XX:-OptoPeephole -// +// // Each peephole rule is given an identifying number starting with zero and // increasing by one in the order seen by the parser. An individual peephole // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# // on the command-line. -// +// // ---------CURRENT LIMITATIONS---------------------------------------------- -// +// // Only match adjacent instructions in same basic block // Only equality constraints // Only constraints between operands, not (0.dest_reg == RAX_enc) @@ -11328,31 +12112,31 @@ // ---------EXAMPLE---------------------------------------------------------- // // // pertinent parts of existing instructions in architecture description -// instruct movI(rRegI dst, rRegI src) +// instruct movI(rRegI dst, rRegI src) // %{ // match(Set dst (CopyI src)); // %} -// -// instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr) +// +// instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr) // %{ // match(Set dst (AddI dst src)); // effect(KILL cr); // %} -// +// // // Change (inc mov) to lea // peephole %{ // // increment preceeded by register-register move // peepmatch ( incI_rReg movI ); -// // require that the destination register of the increment +// // require that the destination register of the increment // // match the destination register of the move // peepconstraint ( 0.dst == 1.dst ); // // construct a replacement instruction that sets // // the destination to ( move's source register + one ) // peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) ); // %} -// +// -// Implementation no longer uses movX instructions since +// Implementation no longer uses movX instructions since // machine-independent system no longer uses CopyX nodes. // // peephole @@ -11405,16 +12189,16 @@ // %} // // Change load of spilled value to only a spill -// instruct storeI(memory mem, rRegI src) +// instruct storeI(memory mem, rRegI src) // %{ // match(Set mem (StoreI mem src)); // %} -// -// instruct loadI(rRegI dst, memory mem) +// +// instruct loadI(rRegI dst, memory mem) // %{ // match(Set dst (LoadI mem)); // %} -// +// peephole %{ --- old/hotspot/src/os/linux/launcher/java.c 2009-08-01 04:08:36.032694815 +0100 +++ new/hotspot/src/os/linux/launcher/java.c 2009-08-01 04:08:35.940246775 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1110,7 +1110,7 @@ if (propname) { jclass cls; jmethodID mid; - NULL_CHECK0 (cls = (*env)->FindClass(env, "java/lang/System")); + NULL_CHECK0 (cls = FindBootStrapClass(env, "java/lang/System")); NULL_CHECK0 (mid = (*env)->GetStaticMethodID( env, cls, "getProperty", @@ -1125,7 +1125,7 @@ static jboolean isEncodingSupported(JNIEnv *env, jstring enc) { jclass cls; jmethodID mid; - NULL_CHECK0 (cls = (*env)->FindClass(env, "java/nio/charset/Charset")); + NULL_CHECK0 (cls = FindBootStrapClass(env, "java/nio/charset/Charset")); NULL_CHECK0 (mid = (*env)->GetStaticMethodID( env, cls, "isSupported", @@ -1161,21 +1161,21 @@ #else if (isEncodingSupported(env, enc) == JNI_TRUE) { #endif - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", - "([BLjava/lang/String;)V")); - str = (*env)->NewObject(env, cls, mid, ary, enc); - } else { - /*If the encoding specified in sun.jnu.encoding is not - endorsed by "Charset.isSupported" we have to fall back + NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String")); + NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", + "([BLjava/lang/String;)V")); + str = (*env)->NewObject(env, cls, mid, ary, enc); + } else { + /*If the encoding specified in sun.jnu.encoding is not + endorsed by "Charset.isSupported" we have to fall back to use String(byte[]) explicitly here without specifying - the encoding name, in which the StringCoding class will - pickup the iso-8859-1 as the fallback converter for us. - */ - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", - "([B)V")); - str = (*env)->NewObject(env, cls, mid, ary); + the encoding name, in which the StringCoding class will + pickup the iso-8859-1 as the fallback converter for us. + */ + NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String")); + NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", + "([B)V")); + str = (*env)->NewObject(env, cls, mid, ary); } (*env)->DeleteLocalRef(env, ary); return str; @@ -1195,7 +1195,7 @@ jarray ary; int i; - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); + NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String")); NULL_CHECK0(ary = (*env)->NewObjectArray(env, strc, cls, 0)); for (i = 0; i < strc; i++) { jstring str = NewPlatformString(env, *strv++); @@ -1224,6 +1224,7 @@ c = *t++; *s++ = (c == '.') ? '/' : c; } while (c != '\0'); + // use the application class loader for main-class cls = (*env)->FindClass(env, buf); free(buf); @@ -1250,7 +1251,7 @@ jobject jar, man, attr; jstring str, result = 0; - NULL_CHECK0(cls = (*env)->FindClass(env, "java/util/jar/JarFile")); + NULL_CHECK0(cls = FindBootStrapClass(env, "java/util/jar/JarFile")); NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", "(Ljava/lang/String;)V")); NULL_CHECK0(str = NewPlatformString(env, jarname)); @@ -1471,7 +1472,7 @@ jclass ver; jmethodID print; - NULL_CHECK(ver = (*env)->FindClass(env, "sun/misc/Version")); + NULL_CHECK(ver = FindBootStrapClass(env, "sun/misc/Version")); NULL_CHECK(print = (*env)->GetStaticMethodID(env, ver, "print", "()V")); (*env)->CallStaticVoidMethod(env, ver, print); --- old/hotspot/src/os/linux/launcher/java.h 2009-08-01 04:08:36.985152859 +0100 +++ new/hotspot/src/os/linux/launcher/java.h 2009-08-01 04:08:36.910947414 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,5 +100,15 @@ * Make launcher spit debug output. */ extern jboolean _launcher_debug; +/* + * This allows for finding classes from the VM's bootstrap class loader + * directly, FindClass uses the application class loader internally, this will + * cause unnecessary searching of the classpath for the required classes. + */ +typedef jclass (JNICALL FindClassFromBootLoader_t(JNIEnv *env, + const char *name, + jboolean throwError)); + +jclass FindBootStrapClass(JNIEnv *env, const char *classname); #endif /* _JAVA_H_ */ --- old/hotspot/src/os/linux/launcher/java_md.c 2009-08-01 04:08:37.853325156 +0100 +++ new/hotspot/src/os/linux/launcher/java_md.c 2009-08-01 04:08:37.764422195 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1826,3 +1826,23 @@ { return(borrowed_unsetenv(name)); } +/* + * The implementation for finding classes from the bootstrap + * class loader, refer to java.h + */ +static FindClassFromBootLoader_t *findBootClass = NULL; + +jclass +FindBootStrapClass(JNIEnv *env, const char* classname) +{ + if (findBootClass == NULL) { + findBootClass = (FindClassFromBootLoader_t *)dlsym(RTLD_DEFAULT, + "JVM_FindClassFromBootLoader"); + if (findBootClass == NULL) { + fprintf(stderr, "Error: could load method JVM_FindClassFromBootLoader"); + return NULL; + } + } + return findBootClass(env, classname, JNI_FALSE); +} + --- old/hotspot/src/os/linux/vm/attachListener_linux.cpp 2009-08-01 04:08:38.777168661 +0100 +++ new/hotspot/src/os/linux/vm/attachListener_linux.cpp 2009-08-01 04:08:38.690263689 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)attachListener_linux.cpp 1.14 07/05/05 17:04:34 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,7 +235,7 @@ // where is the protocol version (1), is the command // name ("load", "datadump", ...), and is an argument int expected_str_count = 2 + AttachOperation::arg_count_max; - int max_len = (strlen(ver_str) + 1) + (AttachOperation::name_length_max + 1) + + const int max_len = (sizeof(ver_str) + 1) + (AttachOperation::name_length_max + 1) + AttachOperation::arg_count_max*(AttachOperation::arg_length_max + 1); char buf[max_len]; --- old/hotspot/src/os/linux/vm/globals_linux.hpp 2009-08-01 04:08:39.653449443 +0100 +++ new/hotspot/src/os/linux/vm/globals_linux.hpp 2009-08-01 04:08:39.564727474 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globals_linux.hpp 1.12 07/05/05 17:04:35 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,5 +41,6 @@ // platforms, but they may have different default values on other platforms. // define_pd_global(bool, UseLargePages, false); +define_pd_global(bool, UseLargePagesIndividualAllocation, false); define_pd_global(bool, UseOSErrorReporting, false); define_pd_global(bool, UseThreadPriorities, true) ; --- old/hotspot/src/os/linux/vm/hpi_linux.hpp 2009-08-01 04:08:40.479407317 +0100 +++ new/hotspot/src/os/linux/vm/hpi_linux.hpp 2009-08-01 04:08:40.395652273 +0100 @@ -73,6 +73,10 @@ RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, (unsigned int) flags)); } +inline int hpi::raw_send(int fd, char *buf, int nBytes, int flags) { + return send(fd, buf, nBytes, flags); +} + inline int hpi::timeout(int fd, long timeout) { julong prevtime,newtime; struct timeval t; --- old/hotspot/src/os/linux/vm/os_linux.cpp 2009-08-01 04:08:42.173231404 +0100 +++ new/hotspot/src/os/linux/vm/os_linux.cpp 2009-08-01 04:08:42.067673364 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os_linux.cpp 1.259 08/11/24 12:20:22 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,6 +97,9 @@ static int SR_signum = SIGUSR2; sigset_t SR_sigset; +/* Used to protect dlsym() calls */ +static pthread_mutex_t dl_mutex; + //////////////////////////////////////////////////////////////////////////////// // utility functions @@ -217,8 +220,8 @@ // a single processor and elide locking (see is_MP() call). static bool unsafe_chroot_detected = false; static const char *unstable_chroot_error = "/proc file system not found.\n" - "Java may be unstable running multithreaded in a chroot " - "environment on Linux when /proc filesystem is not mounted."; + "Java may be unstable running multithreaded in a chroot " + "environment on Linux when /proc filesystem is not mounted."; void os::Linux::initialize_system_info() { _processor_count = sysconf(_SC_NPROCESSORS_CONF); @@ -547,26 +550,25 @@ if (n > 0) { char *str = (char *)malloc(n); confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); - // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells // us "NPTL-0.29" even we are running with LinuxThreads. Check if this - // is the case. LinuxThreads has a hard limit on max number of threads. + // is the case. LinuxThreads has a hard limit on max number of threads. // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value. // On the other hand, NPTL does not have such a limit, sysconf() // will return -1 and errno is not changed. Check if it is really NPTL. if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 && - strstr(str, "NPTL") && + strstr(str, "NPTL") && sysconf(_SC_THREAD_THREADS_MAX) > 0) { free(str); os::Linux::set_libpthread_version("linuxthreads"); } else { os::Linux::set_libpthread_version(str); - } + } } else { // glibc before 2.3.2 only has LinuxThreads. os::Linux::set_libpthread_version("linuxthreads"); } - + if (strstr(libpthread_version(), "NPTL")) { os::Linux::set_is_NPTL(); } else { @@ -1262,19 +1264,24 @@ return (1000 * 1000); } -jlong os::timeofday() { +// For now, we say that linux does not support vtime. I have no idea +// whether it can actually be made to (DLD, 9/13/05). + +bool os::supports_vtime() { return false; } +bool os::enable_vtime() { return false; } +bool os::vtime_enabled() { return false; } +double os::elapsedVTime() { + // better than nothing, but not much + return elapsedTime(); +} + +jlong os::javaTimeMillis() { timeval time; int status = gettimeofday(&time, NULL); assert(status != -1, "linux error"); return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); } -// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis -// _use_global_time is only set if CacheTimeMillis is true -jlong os::javaTimeMillis() { - return (_use_global_time ? read_global_time() : timeofday()); -} - #ifndef CLOCK_MONOTONIC #define CLOCK_MONOTONIC (1) #endif @@ -1421,6 +1428,10 @@ return buf; } +struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { + return localtime_r(clock, res); +} + //////////////////////////////////////////////////////////////////////////////// // runtime exit support @@ -1503,6 +1514,54 @@ const char* os::get_temp_directory() { return "/tmp/"; } +static bool file_exists(const char* filename) { + struct stat statbuf; + if (filename == NULL || strlen(filename) == 0) { + return false; + } + return os::stat(filename, &statbuf) == 0; +} + +void os::dll_build_name(char* buffer, size_t buflen, + const char* pname, const char* fname) { + // Copied from libhpi + const size_t pnamelen = pname ? strlen(pname) : 0; + + // Quietly truncate on buffer overflow. Should be an error. + if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { + *buffer = '\0'; + return; + } + + if (pnamelen == 0) { + snprintf(buffer, buflen, "lib%s.so", fname); + } else if (strchr(pname, *os::path_separator()) != NULL) { + int n; + char** pelements = split_path(pname, &n); + for (int i = 0 ; i < n ; i++) { + // Really shouldn't be NULL, but check can't hurt + if (pelements[i] == NULL || strlen(pelements[i]) == 0) { + continue; // skip the empty path values + } + snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); + if (file_exists(buffer)) { + break; + } + } + // release the storage + for (int i = 0 ; i < n ; i++) { + if (pelements[i] != NULL) { + FREE_C_HEAP_ARRAY(char, pelements[i]); + } + } + if (pelements != NULL) { + FREE_C_HEAP_ARRAY(char*, pelements); + } + } else { + snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); + } +} + const char* os::get_current_directory(char *buf, int buflen) { return getcwd(buf, buflen); } @@ -1752,7 +1811,17 @@ return NULL; } - +/* + * glibc-2.0 libdl is not MT safe. If you are building with any glibc, + * chances are you might want to run the generated bits against glibc-2.0 + * libdl.so, so always use locking for any version of glibc. + */ +void* os::dll_lookup(void* handle, const char* name) { + pthread_mutex_lock(&dl_mutex); + void* res = dlsym(handle, name); + pthread_mutex_unlock(&dl_mutex); + return res; +} bool _print_ascii_file(const char* filename, outputStream* st) { @@ -2235,20 +2304,44 @@ } void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } -void os::free_memory(char *addr, size_t bytes) { } -void os::numa_make_global(char *addr, size_t bytes) { } -void os::numa_make_local(char *addr, size_t bytes) { } -bool os::numa_topology_changed() { return false; } -size_t os::numa_get_groups_num() { return 1; } -int os::numa_get_group_id() { return 0; } -size_t os::numa_get_leaf_groups(int *ids, size_t size) { - if (size > 0) { - ids[0] = 0; - return 1; + +void os::free_memory(char *addr, size_t bytes) { + uncommit_memory(addr, bytes); +} + +void os::numa_make_global(char *addr, size_t bytes) { + Linux::numa_interleave_memory(addr, bytes); +} + +void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { + Linux::numa_tonode_memory(addr, bytes, lgrp_hint); +} + +bool os::numa_topology_changed() { return false; } + +size_t os::numa_get_groups_num() { + int max_node = Linux::numa_max_node(); + return max_node > 0 ? max_node + 1 : 1; +} + +int os::numa_get_group_id() { + int cpu_id = Linux::sched_getcpu(); + if (cpu_id != -1) { + int lgrp_id = Linux::get_node_by_cpu(cpu_id); + if (lgrp_id != -1) { + return lgrp_id; + } } return 0; } +size_t os::numa_get_leaf_groups(int *ids, size_t size) { + for (size_t i = 0; i < size; i++) { + ids[i] = i; + } + return size; +} + bool os::get_page_info(char *start, page_info* info) { return false; } @@ -2257,6 +2350,95 @@ return end; } +extern "C" void numa_warn(int number, char *where, ...) { } +extern "C" void numa_error(char *where) { } + +bool os::Linux::libnuma_init() { + // sched_getcpu() should be in libc. + set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, + dlsym(RTLD_DEFAULT, "sched_getcpu"))); + + if (sched_getcpu() != -1) { // Does it work? + void *handle = dlopen("libnuma.so.1", RTLD_LAZY); + if (handle != NULL) { + set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t, + dlsym(handle, "numa_node_to_cpus"))); + set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t, + dlsym(handle, "numa_max_node"))); + set_numa_available(CAST_TO_FN_PTR(numa_available_func_t, + dlsym(handle, "numa_available"))); + set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t, + dlsym(handle, "numa_tonode_memory"))); + set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, + dlsym(handle, "numa_interleave_memory"))); + + + if (numa_available() != -1) { + set_numa_all_nodes((unsigned long*)dlsym(handle, "numa_all_nodes")); + // Create a cpu -> node mapping + _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray(0, true); + rebuild_cpu_to_node_map(); + return true; + } + } + } + return false; +} + +// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id. +// The table is later used in get_node_by_cpu(). +void os::Linux::rebuild_cpu_to_node_map() { + const size_t NCPUS = 32768; // Since the buffer size computation is very obscure + // in libnuma (possible values are starting from 16, + // and continuing up with every other power of 2, but less + // than the maximum number of CPUs supported by kernel), and + // is a subject to change (in libnuma version 2 the requirements + // are more reasonable) we'll just hardcode the number they use + // in the library. + const size_t BitsPerCLong = sizeof(long) * CHAR_BIT; + + size_t cpu_num = os::active_processor_count(); + size_t cpu_map_size = NCPUS / BitsPerCLong; + size_t cpu_map_valid_size = + MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size); + + cpu_to_node()->clear(); + cpu_to_node()->at_grow(cpu_num - 1); + size_t node_num = numa_get_groups_num(); + + unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size); + for (size_t i = 0; i < node_num; i++) { + if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) { + for (size_t j = 0; j < cpu_map_valid_size; j++) { + if (cpu_map[j] != 0) { + for (size_t k = 0; k < BitsPerCLong; k++) { + if (cpu_map[j] & (1UL << k)) { + cpu_to_node()->at_put(j * BitsPerCLong + k, i); + } + } + } + } + } + } + FREE_C_HEAP_ARRAY(unsigned long, cpu_map); +} + +int os::Linux::get_node_by_cpu(int cpu_id) { + if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) { + return cpu_to_node()->at(cpu_id); + } + return -1; +} + +GrowableArray* os::Linux::_cpu_to_node; +os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu; +os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus; +os::Linux::numa_max_node_func_t os::Linux::_numa_max_node; +os::Linux::numa_available_func_t os::Linux::_numa_available; +os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; +os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; +unsigned long* os::Linux::_numa_all_nodes; + bool os::uncommit_memory(char* addr, size_t size) { return ::mmap(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, @@ -2334,8 +2516,20 @@ return ::mprotect(bottom, size, prot) == 0; } -bool os::protect_memory(char* addr, size_t size) { - return linux_mprotect(addr, size, PROT_READ); +// Set protections specified +bool os::protect_memory(char* addr, size_t bytes, ProtType prot, + bool is_committed) { + unsigned int p = 0; + switch (prot) { + case MEM_PROT_NONE: p = PROT_NONE; break; + case MEM_PROT_READ: p = PROT_READ; break; + case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; + case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; + default: + ShouldNotReachHere(); + } + // is_committed is unused. + return linux_mprotect(addr, bytes, p); } bool os::guard_memory(char* addr, size_t size) { @@ -2343,7 +2537,7 @@ } bool os::unguard_memory(char* addr, size_t size) { - return linux_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC); + return linux_mprotect(addr, size, PROT_READ|PROT_WRITE); } // Large page support @@ -2487,6 +2681,10 @@ return false; } +bool os::can_execute_large_page_memory() { + return false; +} + // Reserve memory at an arbitrary address, only if that area is // available (and not reserved for something else). @@ -3485,6 +3683,7 @@ Linux::clock_init(); initial_time_count = os::elapsed_counter(); + pthread_mutex_init(&dl_mutex, NULL); } // To install functions for atexit system call @@ -3555,6 +3754,20 @@ Linux::is_floating_stack() ? "floating stack" : "fixed stack"); } + if (UseNUMA) { + if (!Linux::libnuma_init()) { + UseNUMA = false; + } else { + if ((Linux::numa_max_node() < 1)) { + // There's only one node(they start from 0), disable NUMA. + UseNUMA = false; + } + } + if (!UseNUMA && ForceNUMA) { + UseNUMA = true; + } + } + if (MaxFDLimit) { // set the number of file descriptors to max. print out error // if getrlimit/setrlimit fails but continue regardless. @@ -3616,8 +3829,9 @@ // Mark the polling page as readable void os::make_polling_page_readable(void) { - if( !protect_memory((char *)_polling_page, Linux::page_size()) ) + if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) { fatal("Could not enable polling page"); + } }; int os::active_processor_count() { @@ -4542,9 +4756,9 @@ // doesn't block SIGINT et al. int os::fork_and_exec(char* cmd) { const char * argv[4] = {"sh", "-c", cmd, NULL}; - - // fork() in LinuxThreads/NPTL is not async-safe. It needs to run - // pthread_atfork handlers and reset pthread library. All we need is a + + // fork() in LinuxThreads/NPTL is not async-safe. It needs to run + // pthread_atfork handlers and reset pthread library. All we need is a // separate process to execve. Make a direct syscall to fork process. // On IA64 there's no fork syscall, we have to use fork() and hope for // the best... --- old/hotspot/src/os/linux/vm/os_linux.hpp 2009-08-01 04:08:43.411694664 +0100 +++ new/hotspot/src/os/linux/vm/os_linux.hpp 2009-08-01 04:08:43.330944712 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os_linux.hpp 1.72 08/11/24 12:20:24 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +62,8 @@ static bool _is_NPTL; static bool _supports_fast_thread_cpu_time; + static GrowableArray* _cpu_to_node; + protected: static julong _physical_memory; @@ -82,8 +84,9 @@ static void set_is_LinuxThreads() { _is_NPTL = false; } static void set_is_floating_stack() { _is_floating_stack = true; } + static void rebuild_cpu_to_node_map(); + static GrowableArray* cpu_to_node() { return _cpu_to_node; } public: - static void init_thread_fpu_state(); static int get_fpu_control_word(); static void set_fpu_control_word(int fpu_control); @@ -146,6 +149,7 @@ static bool is_floating_stack() { return _is_floating_stack; } static void libpthread_init(); + static bool libnuma_init(); // Minimum stack size a thread can be created with (allowing // the VM to completely create the thread and enter user code) @@ -232,6 +236,46 @@ #undef SR_SUSPENDED }; + +private: + typedef int (*sched_getcpu_func_t)(void); + typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); + typedef int (*numa_max_node_func_t)(void); + typedef int (*numa_available_func_t)(void); + typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); + typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); + + static sched_getcpu_func_t _sched_getcpu; + static numa_node_to_cpus_func_t _numa_node_to_cpus; + static numa_max_node_func_t _numa_max_node; + static numa_available_func_t _numa_available; + static numa_tonode_memory_func_t _numa_tonode_memory; + static numa_interleave_memory_func_t _numa_interleave_memory; + static unsigned long* _numa_all_nodes; + + static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } + static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; } + static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; } + static void set_numa_available(numa_available_func_t func) { _numa_available = func; } + static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } + static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } + static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } +public: + static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; } + static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) { + return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1; + } + static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; } + static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; } + static int numa_tonode_memory(void *start, size_t size, int node) { + return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1; + } + static void numa_interleave_memory(void *start, size_t size) { + if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) { + _numa_interleave_memory(start, size, _numa_all_nodes); + } + } + static int get_node_by_cpu(int cpu_id); }; --- old/hotspot/src/os/linux/vm/os_linux.inline.hpp 2009-08-01 04:08:44.329595380 +0100 +++ new/hotspot/src/os/linux/vm/os_linux.inline.hpp 2009-08-01 04:08:44.251326181 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os_linux.inline.hpp 1.31 07/06/29 04:01:54 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -123,3 +123,6 @@ RESTARTABLE(_cmd, _result); \ return _result; \ } while(false) + +inline bool os::numa_has_static_binding() { return true; } +inline bool os::numa_has_group_homing() { return false; } --- old/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp 2009-08-01 04:08:45.214690842 +0100 +++ new/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp 2009-08-01 04:08:45.128559054 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)generateJvmOffsets.cpp 1.33 07/05/05 17:04:38 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,7 +199,7 @@ printf("\n"); GEN_VALUE(OFFSET_HeapBlockHeader_used, offset_of(HeapBlock::Header, _used)); - GEN_OFFS(oopDesc, _klass); + GEN_OFFS(oopDesc, _metadata); printf("\n"); GEN_VALUE(AccessFlags_NATIVE, JVM_ACC_NATIVE); --- old/hotspot/src/os/solaris/dtrace/jhelper.d 2009-08-01 04:08:46.053507482 +0100 +++ new/hotspot/src/os/solaris/dtrace/jhelper.d 2009-08-01 04:08:45.973491341 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jhelper.d 1.25 07/05/05 17:04:38" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,6 +49,7 @@ extern pointer __1cJCodeCacheF_heap_; extern pointer __1cIUniverseP_methodKlassObj_; extern pointer __1cIUniverseO_collectedHeap_; +extern pointer __1cIUniverseK_heap_base_; extern pointer __1cHnmethodG__vtbl_; extern pointer __1cKBufferBlobG__vtbl_; @@ -110,7 +111,7 @@ copyin_offset(OFFSET_constantPoolOopDesc_pool_holder); copyin_offset(OFFSET_HeapBlockHeader_used); - copyin_offset(OFFSET_oopDesc_klass); + copyin_offset(OFFSET_oopDesc_metadata); copyin_offset(OFFSET_symbolOopDesc_length); copyin_offset(OFFSET_symbolOopDesc_body); @@ -153,6 +154,7 @@ this->Universe_methodKlassOop = copyin_ptr(&``__1cIUniverseP_methodKlassObj_); this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_); + this->Universe_heap_base = copyin_ptr(&``__1cIUniverseK_heap_base_); /* Reading volatile values */ this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + @@ -296,10 +298,27 @@ dtrace:helper:ustack: /!this->done && this->vtbl == this->BufferBlob_vtbl && +this->Universe_heap_base == NULL && this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/ { MARK_LINE; - this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_klass); + this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_metadata); + this->methodOop = this->klass == this->Universe_methodKlassOop; + this->done = !this->methodOop; +} + +dtrace:helper:ustack: +/!this->done && this->vtbl == this->BufferBlob_vtbl && +this->Universe_heap_base != NULL && +this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/ +{ + MARK_LINE; + /* + * Read compressed pointer and decode heap oop, same as oop.inline.hpp + */ + this->cklass = copyin_uint32(this->methodOopPtr + OFFSET_oopDesc_metadata); + this->klass = (uint64_t)((uintptr_t)this->Universe_heap_base + + ((uintptr_t)this->cklass << 3)); this->methodOop = this->klass == this->Universe_methodKlassOop; this->done = !this->methodOop; } --- old/hotspot/src/os/solaris/dtrace/libjvm_db.c 2009-08-01 04:08:46.884462435 +0100 +++ new/hotspot/src/os/solaris/dtrace/libjvm_db.c 2009-08-01 04:08:46.798136049 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)libjvm_db.c 1.29 07/05/05 17:04:38 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,9 +151,11 @@ uint64_t Universe_methodKlassObj_address; uint64_t CodeCache_heap_address; + uint64_t Universe_heap_base_address; /* Volatiles */ uint64_t Universe_methodKlassObj; + uint64_t Universe_heap_base; uint64_t CodeCache_low; uint64_t CodeCache_high; uint64_t CodeCache_segmap_low; @@ -169,7 +171,6 @@ Frame_t curr_fr; }; - static int read_string(struct ps_prochandle *P, char *buf, /* caller's buffer */ @@ -188,6 +189,14 @@ return -1; } +static int read_compressed_pointer(jvm_agent_t* J, uint64_t base, uint32_t *ptr) { + int err = -1; + uint32_t ptr32; + err = ps_pread(J->P, base, &ptr32, sizeof(uint32_t)); + *ptr = ptr32; + return err; +} + static int read_pointer(jvm_agent_t* J, uint64_t base, uint64_t* ptr) { int err = -1; uint32_t ptr32; @@ -273,6 +282,9 @@ if (strcmp("_methodKlassObj", vmp->fieldName) == 0) { J->Universe_methodKlassObj_address = vmp->address; } + if (strcmp("_heap_base", vmp->fieldName) == 0) { + J->Universe_heap_base_address = vmp->address; + } } CHECK_FAIL(err); @@ -295,6 +307,8 @@ err = read_pointer(J, J->Universe_methodKlassObj_address, &J->Universe_methodKlassObj); CHECK_FAIL(err); + err = read_pointer(J, J->Universe_heap_base_address, &J->Universe_heap_base); + CHECK_FAIL(err); err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low, &J->CodeCache_low); CHECK_FAIL(err); @@ -447,7 +461,17 @@ static int is_methodOop(jvm_agent_t* J, uint64_t methodOopPtr) { uint64_t klass; int err; - err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_klass, &klass); + // If heap_base is nonnull, this was a compressed oop. + if (J->Universe_heap_base != NULL) { + uint32_t cklass; + err = read_compressed_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata, + &cklass); + // decode heap oop, same as oop.inline.hpp + klass = (uint64_t)((uintptr_t)J->Universe_heap_base + + ((uintptr_t)cklass << 3)); + } else { + err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata, &klass); + } if (err != PS_OK) goto fail; return klass == J->Universe_methodKlassObj; --- old/hotspot/src/os/solaris/launcher/java.c 2009-08-01 04:08:47.912670881 +0100 +++ new/hotspot/src/os/solaris/launcher/java.c 2009-08-01 04:08:47.820739226 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1110,7 +1110,7 @@ if (propname) { jclass cls; jmethodID mid; - NULL_CHECK0 (cls = (*env)->FindClass(env, "java/lang/System")); + NULL_CHECK0 (cls = FindBootStrapClass(env, "java/lang/System")); NULL_CHECK0 (mid = (*env)->GetStaticMethodID( env, cls, "getProperty", @@ -1125,7 +1125,7 @@ static jboolean isEncodingSupported(JNIEnv *env, jstring enc) { jclass cls; jmethodID mid; - NULL_CHECK0 (cls = (*env)->FindClass(env, "java/nio/charset/Charset")); + NULL_CHECK0 (cls = FindBootStrapClass(env, "java/nio/charset/Charset")); NULL_CHECK0 (mid = (*env)->GetStaticMethodID( env, cls, "isSupported", @@ -1161,21 +1161,21 @@ #else if (isEncodingSupported(env, enc) == JNI_TRUE) { #endif - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", - "([BLjava/lang/String;)V")); - str = (*env)->NewObject(env, cls, mid, ary, enc); - } else { - /*If the encoding specified in sun.jnu.encoding is not - endorsed by "Charset.isSupported" we have to fall back + NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String")); + NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", + "([BLjava/lang/String;)V")); + str = (*env)->NewObject(env, cls, mid, ary, enc); + } else { + /*If the encoding specified in sun.jnu.encoding is not + endorsed by "Charset.isSupported" we have to fall back to use String(byte[]) explicitly here without specifying - the encoding name, in which the StringCoding class will - pickup the iso-8859-1 as the fallback converter for us. - */ - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); - NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", - "([B)V")); - str = (*env)->NewObject(env, cls, mid, ary); + the encoding name, in which the StringCoding class will + pickup the iso-8859-1 as the fallback converter for us. + */ + NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String")); + NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", + "([B)V")); + str = (*env)->NewObject(env, cls, mid, ary); } (*env)->DeleteLocalRef(env, ary); return str; @@ -1195,7 +1195,7 @@ jarray ary; int i; - NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String")); + NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String")); NULL_CHECK0(ary = (*env)->NewObjectArray(env, strc, cls, 0)); for (i = 0; i < strc; i++) { jstring str = NewPlatformString(env, *strv++); @@ -1224,6 +1224,7 @@ c = *t++; *s++ = (c == '.') ? '/' : c; } while (c != '\0'); + // use the application class loader for the main-class cls = (*env)->FindClass(env, buf); free(buf); @@ -1250,7 +1251,7 @@ jobject jar, man, attr; jstring str, result = 0; - NULL_CHECK0(cls = (*env)->FindClass(env, "java/util/jar/JarFile")); + NULL_CHECK0(cls = FindBootStrapClass(env, "java/util/jar/JarFile")); NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "", "(Ljava/lang/String;)V")); NULL_CHECK0(str = NewPlatformString(env, jarname)); @@ -1471,7 +1472,7 @@ jclass ver; jmethodID print; - NULL_CHECK(ver = (*env)->FindClass(env, "sun/misc/Version")); + NULL_CHECK(ver = FindBootStrapClass(env, "sun/misc/Version")); NULL_CHECK(print = (*env)->GetStaticMethodID(env, ver, "print", "()V")); (*env)->CallStaticVoidMethod(env, ver, print); --- old/hotspot/src/os/solaris/launcher/java.h 2009-08-01 04:08:48.876758155 +0100 +++ new/hotspot/src/os/solaris/launcher/java.h 2009-08-01 04:08:48.803467809 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,4 +101,15 @@ */ extern jboolean _launcher_debug; +/* + * This allows for finding classes from the VM's bootstrap class loader + * directly, FindClass uses the application class loader internally, this will + * cause unnecessary searching of the classpath for the required classes. + */ +typedef jclass (JNICALL FindClassFromBootLoader_t(JNIEnv *env, + const char *name, + jboolean throwError)); + +jclass FindBootStrapClass(JNIEnv *env, const char *classname); + #endif /* _JAVA_H_ */ --- old/hotspot/src/os/solaris/launcher/java_md.c 2009-08-01 04:08:49.709534857 +0100 +++ new/hotspot/src/os/solaris/launcher/java_md.c 2009-08-01 04:08:49.614546879 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1826,3 +1826,24 @@ { return(borrowed_unsetenv(name)); } + +/* + * The implementation for finding classes from the bootstrap + * class loader, refer to java.h + */ +static FindClassFromBootLoader_t *findBootClass = NULL; + +jclass +FindBootStrapClass(JNIEnv *env, const char* classname) +{ + if (findBootClass == NULL) { + findBootClass = (FindClassFromBootLoader_t *)dlsym(RTLD_DEFAULT, + "JVM_FindClassFromBootLoader"); + if (findBootClass == NULL) { + fprintf(stderr, "Error: could not load method JVM_FindClassFromBootLoader"); + return NULL; + } + } + return findBootClass(env, classname, JNI_FALSE); +} + --- old/hotspot/src/os/solaris/vm/globals_solaris.hpp 2009-08-01 04:08:50.648022999 +0100 +++ new/hotspot/src/os/solaris/vm/globals_solaris.hpp 2009-08-01 04:08:50.571003611 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globals_solaris.hpp 1.12 07/05/05 17:04:40 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,6 +47,7 @@ // platforms, but they may have different default values on other platforms. // define_pd_global(bool, UseLargePages, true); +define_pd_global(bool, UseLargePagesIndividualAllocation, false); define_pd_global(bool, UseOSErrorReporting, false); define_pd_global(bool, UseThreadPriorities, false); --- old/hotspot/src/os/solaris/vm/hpi_solaris.hpp 2009-08-01 04:08:51.492523565 +0100 +++ new/hotspot/src/os/solaris/vm/hpi_solaris.hpp 2009-08-01 04:08:51.414565571 +0100 @@ -74,7 +74,11 @@ INTERRUPTIBLE_RETURN_INT(::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); } -// As both poll and select can be interrupted by signals, we have to be +inline int hpi::raw_send(int fd, char *buf, int nBytes, int flags) { + RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags)); +} + +// As both poll and select can be interrupted by signals, we have to be // prepared to restart the system call after updating the timeout, unless // a poll() is done with timeout == -1, in which case we repeat with this // "wait forever" value. --- old/hotspot/src/os/solaris/vm/osThread_solaris.cpp 2009-08-01 04:08:52.330705909 +0100 +++ new/hotspot/src/os/solaris/vm/osThread_solaris.cpp 2009-08-01 04:08:52.252619431 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)osThread_solaris.cpp 1.59 07/06/29 04:03:46 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,15 +72,15 @@ static intptr_t compare_and_exchange_current_callback ( intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) { if (VM_Version::supports_compare_and_exchange()) { - return Atomic::cmpxchg_ptr(callback, addr, compare_value); + return Atomic::cmpxchg_ptr(callback, addr, compare_value); } else { - MutexLockerEx(sync, Mutex::_no_safepoint_check_flag); - if (*addr == compare_value) { - *addr = callback; - return compare_value; - } else { - return callback; - } + MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); + if (*addr == compare_value) { + *addr = callback; + return compare_value; + } else { + return callback; + } } } @@ -89,7 +89,7 @@ if (VM_Version::supports_compare_and_exchange()) { return Atomic::xchg_ptr(callback, addr); } else { - MutexLockerEx(sync, Mutex::_no_safepoint_check_flag); + MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); intptr_t cb = *addr; *addr = callback; return cb; --- old/hotspot/src/os/solaris/vm/os_solaris.cpp 2009-08-01 04:08:53.212850057 +0100 +++ new/hotspot/src/os/solaris/vm/os_solaris.cpp 2009-08-01 04:08:53.091356346 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os_solaris.cpp 1.402 07/10/04 10:49:26 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,6 +125,13 @@ # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ #endif +#ifndef LGRP_RSRC_CPU +# define LGRP_RSRC_CPU 0 /* CPU resources */ +#endif +#ifndef LGRP_RSRC_MEM +# define LGRP_RSRC_MEM 1 /* memory resources */ +#endif + // Some more macros from sys/mman.h that are not present in Solaris 8. #ifndef MAX_MEMINFO_CNT @@ -319,6 +326,10 @@ return (size_t)(base - bottom); } +struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { + return localtime_r(clock, res); +} + // interruptible infrastructure // setup_interruptible saves the thread state before going into an @@ -458,16 +469,14 @@ int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); pid_t pid = getpid(); psetid_t pset = PS_NONE; - // Are we running in a processor set? + // Are we running in a processor set or is there any processor set around? if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { - if (pset != PS_NONE) { - uint_t pset_cpus; - // Query number of cpus in processor set - if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { - assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); - _processors_online = pset_cpus; - return pset_cpus; - } + uint_t pset_cpus; + // Query the number of cpus available to us. + if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { + assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); + _processors_online = pset_cpus; + return pset_cpus; } } // Otherwise return number of online cpus @@ -1636,16 +1645,24 @@ // getTimeNanos is guaranteed to not move backward on Solaris inline hrtime_t getTimeNanos() { if (VM_Version::supports_cx8()) { - bool retry = false; - hrtime_t newtime = gethrtime(); - hrtime_t oldmaxtime = max_hrtime; - hrtime_t retmaxtime = oldmaxtime; - while ((newtime > retmaxtime) && (retry == false || retmaxtime != oldmaxtime)) { - oldmaxtime = retmaxtime; - retmaxtime = Atomic::cmpxchg(newtime, (volatile jlong *)&max_hrtime, oldmaxtime); - retry = true; - } - return (newtime > retmaxtime) ? newtime : retmaxtime; + const hrtime_t now = gethrtime(); + const hrtime_t prev = max_hrtime; + if (now <= prev) return prev; // same or retrograde time; + const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); + assert(obsv >= prev, "invariant"); // Monotonicity + // If the CAS succeeded then we're done and return "now". + // If the CAS failed and the observed value "obs" is >= now then + // we should return "obs". If the CAS failed and now > obs > prv then + // some other thread raced this thread and installed a new value, in which case + // we could either (a) retry the entire operation, (b) retry trying to install now + // or (c) just return obs. We use (c). No loop is required although in some cases + // we might discard a higher "now" value in deference to a slightly lower but freshly + // installed obs value. That's entirely benign -- it admits no new orderings compared + // to (a) or (b) -- and greatly reduces coherence traffic. + // We might also condition (c) on the magnitude of the delta between obs and now. + // Avoiding excessive CAS operations to hot RW locations is critical. + // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate + return (prev == obsv) ? now : obsv ; } else { return oldgetTimeNanos(); } @@ -1687,6 +1704,40 @@ } } +bool os::supports_vtime() { return true; } + +bool os::enable_vtime() { + int fd = open("/proc/self/ctl", O_WRONLY); + if (fd == -1) + return false; + + long cmd[] = { PCSET, PR_MSACCT }; + int res = write(fd, cmd, sizeof(long) * 2); + close(fd); + if (res != sizeof(long) * 2) + return false; + + return true; +} + +bool os::vtime_enabled() { + int fd = open("/proc/self/status", O_RDONLY); + if (fd == -1) + return false; + + pstatus_t status; + int res = read(fd, (void*) &status, sizeof(pstatus_t)); + close(fd); + if (res != sizeof(pstatus_t)) + return false; + + return status.pr_flags & PR_MSACCT; +} + +double os::elapsedVTime() { + return (double)gethrvtime() / (double)hrtime_hz; +} + // Used internally for comparisons only // getTimeMillis guaranteed to not move backwards on Solaris jlong getTimeMillis() { @@ -1694,19 +1745,14 @@ return (jlong)(nanotime / NANOSECS_PER_MILLISECS); } -jlong os::timeofday() { +// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis +jlong os::javaTimeMillis() { timeval t; if (gettimeofday( &t, NULL) == -1) - fatal1("timeofday: gettimeofday (%s)", strerror(errno)); + fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)); return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; } -// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis -// _use_global_time is only set if CacheTimeMillis is true -jlong os::javaTimeMillis() { - return (_use_global_time ? read_global_time() : timeofday()); -} - jlong os::javaTimeNanos() { return (jlong)getTimeNanos(); } @@ -1784,6 +1830,54 @@ const char* os::get_temp_directory() { return "/tmp/"; } +static bool file_exists(const char* filename) { + struct stat statbuf; + if (filename == NULL || strlen(filename) == 0) { + return false; + } + return os::stat(filename, &statbuf) == 0; +} + +void os::dll_build_name(char* buffer, size_t buflen, + const char* pname, const char* fname) { + // Copied from libhpi + const size_t pnamelen = pname ? strlen(pname) : 0; + + // Quietly truncate on buffer overflow. Should be an error. + if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { + *buffer = '\0'; + return; + } + + if (pnamelen == 0) { + snprintf(buffer, buflen, "lib%s.so", fname); + } else if (strchr(pname, *os::path_separator()) != NULL) { + int n; + char** pelements = split_path(pname, &n); + for (int i = 0 ; i < n ; i++) { + // really shouldn't be NULL but what the heck, check can't hurt + if (pelements[i] == NULL || strlen(pelements[i]) == 0) { + continue; // skip the empty path values + } + snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); + if (file_exists(buffer)) { + break; + } + } + // release the storage + for (int i = 0 ; i < n ; i++) { + if (pelements[i] != NULL) { + FREE_C_HEAP_ARRAY(char, pelements[i]); + } + } + if (pelements != NULL) { + FREE_C_HEAP_ARRAY(char*, pelements); + } + } else { + snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); + } +} + const char* os::get_current_directory(char *buf, int buflen) { return getcwd(buf, buflen); } @@ -2035,6 +2129,9 @@ return NULL; } +void* os::dll_lookup(void* handle, const char* name) { + return dlsym(handle, name); +} bool _print_ascii_file(const char* filename, outputStream* st) { @@ -2610,7 +2707,7 @@ } // Tell the OS to make the range local to the first-touching LWP -void os::numa_make_local(char *addr, size_t bytes) { +void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { debug_only(warning("MADV_ACCESS_LWP failed.")); @@ -2648,16 +2745,27 @@ return 1; } if (!r) { + // That's a leaf node. assert (bottom <= cur, "Sanity check"); - ids[bottom++] = ids[cur]; + // Check if the node has memory + if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], + NULL, 0, LGRP_RSRC_MEM) > 0) { + ids[bottom++] = ids[cur]; + } } top += r; cur++; } + if (bottom == 0) { + // Handle a situation, when the OS reports no memory available. + // Assume UMA architecture. + ids[0] = 0; + return 1; + } return bottom; } -// Detect the topology change. Typically happens during CPU pluggin-unplugging. +// Detect the topology change. Typically happens during CPU plugging-unplugging. bool os::numa_topology_changed() { int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); if (is_stale != -1 && is_stale) { @@ -2672,11 +2780,20 @@ // Get the group id of the current LWP. int os::numa_get_group_id() { - int lgrp_id = os::Solaris::lgrp_home(P_LWPID, P_MYID); + int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); if (lgrp_id == -1) { return 0; } - return lgrp_id; + const int size = os::numa_get_groups_num(); + int *ids = (int*)alloca(size * sizeof(int)); + + // Get the ids of all lgroups with memory; r is the count. + int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, + (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); + if (r <= 0) { + return 0; + } + return ids[os::random() % r]; } // Request information about the page. @@ -2788,16 +2905,15 @@ return b; } -char* -os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { - char* addr = NULL; - int flags; - - flags = MAP_PRIVATE | MAP_NORESERVE; - if (requested_addr != NULL) { - flags |= MAP_FIXED; - addr = requested_addr; - } else if (has_map_align && alignment_hint > (size_t) vm_page_size()) { +char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { + char* addr = requested_addr; + int flags = MAP_PRIVATE | MAP_NORESERVE; + + assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); + + if (fixed) { + flags |= MAP_FIXED; + } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { flags |= MAP_ALIGN; addr = (char*) alignment_hint; } @@ -2805,11 +2921,14 @@ // Map uncommitted pages PROT_NONE so we fail early if we touch an // uncommitted page. Otherwise, the read/write might succeed if we // have enough swap space to back the physical page. - addr = Solaris::mmap_chunk(addr, bytes, flags, PROT_NONE); + return mmap_chunk(addr, bytes, flags, PROT_NONE); +} - guarantee(requested_addr == NULL || requested_addr == addr, - "OS failed to return requested mmap address."); +char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { + char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); + guarantee(requested_addr == NULL || requested_addr == addr, + "OS failed to return requested mmap address."); return addr; } @@ -2835,6 +2954,31 @@ // in one of the methods further up the call chain. See bug 5044738. assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); + // Since snv_84, Solaris attempts to honor the address hint - see 5003415. + // Give it a try, if the kernel honors the hint we can return immediately. + char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); + volatile int err = errno; + if (addr == requested_addr) { + return addr; + } else if (addr != NULL) { + unmap_memory(addr, bytes); + } + + if (PrintMiscellaneous && Verbose) { + char buf[256]; + buf[0] = '\0'; + if (addr == NULL) { + jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); + } + warning("attempt_reserve_memory_at: couldn't reserve %d bytes at " + PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT + "%s", bytes, requested_addr, addr, buf); + } + + // Address hint method didn't work. Fall back to the old method. + // In theory, once SNV becomes our oldest supported platform, this + // code will no longer be needed. + // // Repeatedly allocate blocks until the block is allocated at the // right spot. Give up after max_tries. int i; @@ -2925,10 +3069,23 @@ return retVal == 0; } -// Protect memory (make it read-only. (Used to pass readonly pages through +// Protect memory (Used to pass readonly pages through // JNI GetArrayElements with empty arrays.) -bool os::protect_memory(char* addr, size_t bytes) { - return solaris_mprotect(addr, bytes, PROT_READ); +// Also, used for serialization page and for compressed oops null pointer +// checking. +bool os::protect_memory(char* addr, size_t bytes, ProtType prot, + bool is_committed) { + unsigned int p = 0; + switch (prot) { + case MEM_PROT_NONE: p = PROT_NONE; break; + case MEM_PROT_READ: p = PROT_READ; break; + case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; + case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; + default: + ShouldNotReachHere(); + } + // is_committed is unused. + return solaris_mprotect(addr, bytes, p); } // guard_memory and unguard_memory only happens within stack guard pages. @@ -2939,7 +3096,7 @@ } bool os::unguard_memory(char* addr, size_t bytes) { - return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE|PROT_EXEC); + return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); } // Large page support @@ -3070,6 +3227,8 @@ if (UseISM) { // ISM disables MPSS to be compatible with old JDK behavior UseMPSS = false; + _page_sizes[0] = _large_page_size; + _page_sizes[1] = vm_page_size(); } UseMPSS = UseMPSS && @@ -3159,6 +3318,10 @@ return UseISM ? false : true; } +bool os::can_execute_large_page_memory() { + return UseISM ? false : true; +} + static int os_sleep(jlong millis, bool interruptible) { const jlong limit = INT_MAX; jlong prevtime; @@ -3639,12 +3802,11 @@ iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim); iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio); - iaInfo->ia_uprilim = IA_NOCHANGE; - iaInfo->ia_nice = IA_NOCHANGE; - iaInfo->ia_mode = IA_NOCHANGE; - if (ThreadPriorityVerbose) { - tty->print_cr ("IA: [%d...%d] %d->%d\n", - iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); + iaInfo->ia_uprilim = IA_NOCHANGE; + iaInfo->ia_mode = IA_NOCHANGE; + if (ThreadPriorityVerbose) { + tty->print_cr ("IA: [%d...%d] %d->%d\n", + iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); } } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; @@ -4328,6 +4490,7 @@ os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; +os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; @@ -4366,61 +4529,52 @@ // threads. Calling thr_setprio is meaningless in this case. // bool isT2_libthread() { - int i, rslt; - static prheader_t * lwpArray = NULL; + static prheader_t * lwpArray = NULL; static int lwpSize = 0; static int lwpFile = -1; lwpstatus_t * that; - int aslwpcount; char lwpName [128]; bool isT2 = false; #define ADR(x) ((uintptr_t)(x)) #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) - aslwpcount = 0; - lwpSize = 16*1024; - lwpArray = ( prheader_t *)NEW_C_HEAP_ARRAY (char, lwpSize); - lwpFile = open ("/proc/self/lstatus", O_RDONLY, 0); - if (lwpArray == NULL) { - if ( ThreadPriorityVerbose ) warning ("Couldn't allocate T2 Check array\n"); - return(isT2); - } + lwpFile = open("/proc/self/lstatus", O_RDONLY, 0); if (lwpFile < 0) { - if ( ThreadPriorityVerbose ) warning ("Couldn't open /proc/self/lstatus\n"); - return(isT2); + if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); + return false; } + lwpSize = 16*1024; for (;;) { lseek (lwpFile, 0, SEEK_SET); - rslt = read (lwpFile, lwpArray, lwpSize); + lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize); + if (read(lwpFile, lwpArray, lwpSize) < 0) { + if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); + break; + } if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { + // We got a good snapshot - now iterate over the list. + int aslwpcount = 0; + for (int i = 0; i < lwpArray->pr_nent; i++ ) { + that = LWPINDEX(lwpArray,i); + if (that->pr_flags & PR_ASLWP) { + aslwpcount++; + } + } + if (aslwpcount == 0) isT2 = true; break; } - FREE_C_HEAP_ARRAY(char, lwpArray); lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; - lwpArray = ( prheader_t *)NEW_C_HEAP_ARRAY (char, lwpSize); - if (lwpArray == NULL) { - if ( ThreadPriorityVerbose ) warning ("Couldn't allocate T2 Check array\n"); - return(isT2); - } + FREE_C_HEAP_ARRAY(char, lwpArray); // retry. } - // We got a good snapshot - now iterate over the list. - for (i = 0; i < lwpArray->pr_nent; i++ ) { - that = LWPINDEX(lwpArray,i); - if (that->pr_flags & PR_ASLWP) { - aslwpcount++; - } - } - if ( aslwpcount == 0 ) isT2 = true; - FREE_C_HEAP_ARRAY(char, lwpArray); close (lwpFile); - if ( ThreadPriorityVerbose ) { - if ( isT2 ) tty->print_cr("We are running with a T2 libthread\n"); + if (ThreadPriorityVerbose) { + if (isT2) tty->print_cr("We are running with a T2 libthread\n"); else tty->print_cr("We are not running with a T2 libthread\n"); } - return (isT2); + return isT2; } @@ -4531,23 +4685,24 @@ } } -void os::Solaris::liblgrp_init() { - void *handle = dlopen("liblgrp.so", RTLD_LAZY); +bool os::Solaris::liblgrp_init() { + void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); if (handle != NULL) { os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); + os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, dlsym(handle, "lgrp_cookie_stale"))); lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); set_lgrp_cookie(c); - } else { - warning("your OS does not support NUMA"); + return true; } + return false; } void os::Solaris::misc_sym_init() { @@ -4716,9 +4871,25 @@ vm_page_size())); Solaris::libthread_init(); + if (UseNUMA) { - Solaris::liblgrp_init(); + if (!Solaris::liblgrp_init()) { + UseNUMA = false; + } else { + size_t lgrp_limit = os::numa_get_groups_num(); + int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); + size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); + FREE_C_HEAP_ARRAY(int, lgrp_ids); + if (lgrp_num < 2) { + // There's only one locality group, disable NUMA. + UseNUMA = false; + } + } + if (!UseNUMA && ForceNUMA) { + UseNUMA = true; + } } + Solaris::misc_sym_init(); Solaris::signal_sets_init(); Solaris::init_signal_mem(); --- old/hotspot/src/os/solaris/vm/os_solaris.hpp 2009-08-01 04:08:54.480275224 +0100 +++ new/hotspot/src/os/solaris/vm/os_solaris.hpp 2009-08-01 04:08:54.404932505 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os_solaris.hpp 1.121 07/06/29 04:05:00 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,6 +69,7 @@ typedef uintptr_t lgrp_cookie_t; typedef id_t lgrp_id_t; + typedef int lgrp_rsrc_t; typedef enum lgrp_view { LGRP_VIEW_CALLER, /* what's available to the caller */ LGRP_VIEW_OS /* what's available to operating system */ @@ -80,6 +81,9 @@ typedef lgrp_id_t (*lgrp_root_func_t)(lgrp_cookie_t cookie); typedef int (*lgrp_children_func_t)(lgrp_cookie_t cookie, lgrp_id_t parent, lgrp_id_t *lgrp_array, uint_t lgrp_array_size); + typedef int (*lgrp_resources_func_t)(lgrp_cookie_t cookie, lgrp_id_t lgrp, + lgrp_id_t *lgrp_array, uint_t lgrp_array_size, + lgrp_rsrc_t type); typedef int (*lgrp_nlgrps_func_t)(lgrp_cookie_t cookie); typedef int (*lgrp_cookie_stale_func_t)(lgrp_cookie_t cookie); typedef int (*meminfo_func_t)(const uint64_t inaddr[], int addr_count, @@ -91,6 +95,7 @@ static lgrp_fini_func_t _lgrp_fini; static lgrp_root_func_t _lgrp_root; static lgrp_children_func_t _lgrp_children; + static lgrp_resources_func_t _lgrp_resources; static lgrp_nlgrps_func_t _lgrp_nlgrps; static lgrp_cookie_stale_func_t _lgrp_cookie_stale; static lgrp_cookie_t _lgrp_cookie; @@ -112,7 +117,6 @@ static int (*get_libjsig_version)(); static void save_preinstalled_handler(int, struct sigaction&); static void check_signal_handler(int sig); - // For overridable signals static int _SIGinterrupt; // user-overridable INTERRUPT_SIGNAL static int _SIGasync; // user-overridable ASYNC_SIGNAL @@ -159,6 +163,7 @@ static int get_dev_zero_fd() { return _dev_zero_fd; } static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; } static char* mmap_chunk(char *addr, size_t size, int flags, int prot); + static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed); static bool mpss_sanity_check(bool warn, size_t * page_size); static bool ism_sanity_check (bool warn, size_t * page_size); @@ -172,9 +177,9 @@ static address _main_stack_base; public: - static void libthread_init(); - static void synchronization_init(); - static void liblgrp_init(); + static void libthread_init(); + static void synchronization_init(); + static bool liblgrp_init(); // Load miscellaneous symbols. static void misc_sym_init(); // This boolean allows users to forward their own non-matching signals @@ -255,8 +260,9 @@ static void set_lgrp_init(lgrp_init_func_t func) { _lgrp_init = func; } static void set_lgrp_fini(lgrp_fini_func_t func) { _lgrp_fini = func; } static void set_lgrp_root(lgrp_root_func_t func) { _lgrp_root = func; } - static void set_lgrp_children(lgrp_children_func_t func) { _lgrp_children = func; } - static void set_lgrp_nlgrps(lgrp_nlgrps_func_t func) { _lgrp_nlgrps = func; } + static void set_lgrp_children(lgrp_children_func_t func) { _lgrp_children = func; } + static void set_lgrp_resources(lgrp_resources_func_t func) { _lgrp_resources = func; } + static void set_lgrp_nlgrps(lgrp_nlgrps_func_t func) { _lgrp_nlgrps = func; } static void set_lgrp_cookie_stale(lgrp_cookie_stale_func_t func) { _lgrp_cookie_stale = func; } static void set_lgrp_cookie(lgrp_cookie_t cookie) { _lgrp_cookie = cookie; } @@ -268,6 +274,12 @@ lgrp_id_t *lgrp_array, uint_t lgrp_array_size) { return _lgrp_children != NULL ? _lgrp_children(cookie, parent, lgrp_array, lgrp_array_size) : -1; } + static int lgrp_resources(lgrp_cookie_t cookie, lgrp_id_t lgrp, + lgrp_id_t *lgrp_array, uint_t lgrp_array_size, + lgrp_rsrc_t type) { + return _lgrp_resources != NULL ? _lgrp_resources(cookie, lgrp, lgrp_array, lgrp_array_size, type) : -1; + } + static int lgrp_nlgrps(lgrp_cookie_t cookie) { return _lgrp_nlgrps != NULL ? _lgrp_nlgrps(cookie) : -1; } static int lgrp_cookie_stale(lgrp_cookie_t cookie) { return _lgrp_cookie_stale != NULL ? _lgrp_cookie_stale(cookie) : -1; --- old/hotspot/src/os/solaris/vm/os_solaris.inline.hpp 2009-08-01 04:08:55.407327940 +0100 +++ new/hotspot/src/os/solaris/vm/os_solaris.inline.hpp 2009-08-01 04:08:55.325282367 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os_solaris.inline.hpp 1.63 07/06/29 04:05:41 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -208,3 +208,5 @@ return _result; \ } while(false) +inline bool os::numa_has_static_binding() { return false; } +inline bool os::numa_has_group_homing() { return true; } --- old/hotspot/src/os/windows/vm/globals_windows.hpp 2009-08-01 04:08:56.251377384 +0100 +++ new/hotspot/src/os/windows/vm/globals_windows.hpp 2009-08-01 04:08:56.181818778 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)globals_windows.hpp 1.11 07/05/29 13:32:14 JVM" -#endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // @@ -40,7 +37,6 @@ // platforms, but they may have different default values on other platforms. // define_pd_global(bool, UseLargePages, false); +define_pd_global(bool, UseLargePagesIndividualAllocation, true); define_pd_global(bool, UseOSErrorReporting, false); // for now. -define_pd_global(bool, UseThreadPriorities, true) ; - - +define_pd_global(bool, UseThreadPriorities, true) ; --- old/hotspot/src/os/windows/vm/hpi_windows.hpp 2009-08-01 04:08:57.125719405 +0100 +++ new/hotspot/src/os/windows/vm/hpi_windows.hpp 2009-08-01 04:08:57.043549179 +0100 @@ -1,6 +1,3 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)hpi_windows.hpp 1.17 07/05/05 17:04:44 JVM" -#endif /* * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // Win32 delegates these to the HPI. Solaris provides its own @@ -73,26 +70,26 @@ (int fd, struct sockaddr *him, int len), ("fd = %d, him = %p, len = %d", fd, him, len), (fd, him, len)); - + HPIDECL(accept, "accept", _socket, Accept, int, "%d", (int fd, struct sockaddr *him, int *len), ("fd = %d, him = %p, len = %p", fd, him, len), (fd, him, len)); - + HPIDECL(sendto, "sendto", _socket, SendTo, int, "%d", (int fd, char *buf, int len, int flags, struct sockaddr *to, int tolen), ("fd = %d, buf = %p, len = %d, flags = %d, to = %p, tolen = %d", fd, buf, len, flags, to, tolen), (fd, buf, len, flags, to, tolen)); - + HPIDECL(recvfrom, "recvfrom", _socket, RecvFrom, int, "%d", (int fd, char *buf, int nbytes, int flags, struct sockaddr *from, int *fromlen), ("fd = %d, buf = %p, len = %d, flags = %d, frm = %p, frmlen = %d", fd, buf, nbytes, flags, from, fromlen), (fd, buf, nbytes, flags, from, fromlen)); - + HPIDECL(recv, "recv", _socket, Recv, int, "%d", (int fd, char *buf, int nBytes, int flags), ("fd = %d, buf = %p, nBytes = %d, flags = %d", @@ -105,6 +102,10 @@ fd, buf, nBytes, flags), (fd, buf, nBytes, flags)); +inline int hpi::raw_send(int fd, char *buf, int nBytes, int flags) { + return send(fd, buf, nBytes, flags); +} + HPIDECL(timeout, "timeout", _socket, Timeout, int, "%d", (int fd, long timeout), ("fd = %d, timeout = %ld", fd, timeout), @@ -116,20 +117,20 @@ ("%s", name), (name)); -HPIDECL(socket_shutdown, "socket_shutdown", _socket, SocketShutdown, +HPIDECL(socket_shutdown, "socket_shutdown", _socket, SocketShutdown, int, "%d", (int fd, int howto), ("fd = %d, howto = %d", fd, howto), (fd, howto)); -HPIDECL(bind, "bind", _socket, Bind, +HPIDECL(bind, "bind", _socket, Bind, int, "%d", (int fd, struct sockaddr *him, int len), ("fd = %d, him = %p, len = %d", fd, him, len), (fd, him, len)); -HPIDECL(get_sock_name, "get_sock_name", _socket, GetSocketName, +HPIDECL(get_sock_name, "get_sock_name", _socket, GetSocketName, int, "%d", (int fd, struct sockaddr *him, int *len), ("fd = %d, him = %p, len = %p", @@ -142,7 +143,7 @@ hostname, namelen), (hostname, namelen)); -HPIDECL(get_host_by_addr, "get_host_by_addr", _socket, GetHostByAddr, +HPIDECL(get_host_by_addr, "get_host_by_addr", _socket, GetHostByAddr, struct hostent *, "(struct hostent *)%p", (const char* name, int len, int type), ("name = %p, len = %d, type = %d", @@ -161,10 +162,9 @@ fd, level, optname, optval, optlen), (fd, level, optname, optval, optlen)); -HPIDECL(get_proto_by_name, "get_proto_by_name", _socket, GetProtoByName, +HPIDECL(get_proto_by_name, "get_proto_by_name", _socket, GetProtoByName, struct protoent *, "(struct protoent *)%p", (char* name), ("name = %p", name), (name)); - --- old/hotspot/src/os/windows/vm/os_windows.cpp 2009-08-01 04:08:58.003098245 +0100 +++ new/hotspot/src/os/windows/vm/os_windows.cpp 2009-08-01 04:08:57.900568645 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)os_windows.cpp 1.535 07/11/15 10:56:43 JVM" -#endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #ifdef _WIN64 @@ -62,7 +59,7 @@ #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) // For DLL loading/load error detection -// Values of PE COFF +// Values of PE COFF #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c #define IMAGE_FILE_SIGNATURE_LENGTH 4 @@ -118,7 +115,7 @@ static inline double fileTimeAsDouble(FILETIME* time) { const double high = (double) ((unsigned int) ~0); const double split = 10000000.0; - double result = (time->dwLowDateTime / split) + + double result = (time->dwLowDateTime / split) + time->dwHighDateTime * (high/split); return result; } @@ -158,8 +155,8 @@ if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { os::jvm_path(home_dir, sizeof(home_dir)); - // Found the full path to jvm[_g].dll. - // Now cut the path to /jre if we can. + // Found the full path to jvm[_g].dll. + // Now cut the path to /jre if we can. *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ pslash = strrchr(home_dir, '\\'); if (pslash != NULL) { @@ -184,7 +181,7 @@ Arguments::set_dll_dir(dll_path); if (!set_boot_path('\\', ';')) - return; + return; } /* library_path */ @@ -196,7 +193,7 @@ * * 1. The directory from which application is loaded. * 2. The current directory - * 3. The system wide Java Extensions directory (Java only) + * 3. The system wide Java Extensions directory (Java only) * 4. System directory (GetSystemDirectory) * 5. Windows directory (GetWindowsDirectory) * 6. The PATH environment variable @@ -206,8 +203,8 @@ char tmp[MAX_PATH]; char *path_str = ::getenv("PATH"); - library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + - sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10); + library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + + sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10); library_path[0] = '\0'; @@ -229,7 +226,7 @@ GetWindowsDirectory(tmp, sizeof(tmp)); strcat(library_path, ";"); strcat(library_path, tmp); - + if (path_str) { strcat(library_path, ";"); strcat(library_path, path_str); @@ -241,11 +238,11 @@ /* Default extensions directory */ { - char path[MAX_PATH]; - char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; - GetWindowsDirectory(path, MAX_PATH); - sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, - path, PACKAGE_DIR, EXT_DIR); + char path[MAX_PATH]; + char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; + GetWindowsDirectory(path, MAX_PATH); + sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, + path, PACKAGE_DIR, EXT_DIR); Arguments::set_ext_dirs(buf); } #undef EXT_DIR @@ -264,7 +261,7 @@ #ifndef _WIN64 SetUnhandledExceptionFilter(Handle_FLT_Exception); -#endif +#endif // Done return; @@ -292,7 +289,7 @@ // os::current_stack_base() // // Returns the base of the stack, which is the stack's -// starting address. This function must be called +// starting address. This function must be called // while running on the stack of the thread being queried. address os::current_stack_base() { @@ -330,6 +327,14 @@ return sz; } +struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { + const struct tm* time_struct_ptr = localtime(clock); + if (time_struct_ptr != NULL) { + *res = *time_struct_ptr; + return res; + } + return NULL; +} LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); @@ -360,7 +365,7 @@ thread->run(); } else { - // Install a win32 structured exception handler around every thread created + // Install a win32 structured exception handler around every thread created // by VM, so VM can genrate error dump when an exception occurred in non- // Java thread (e.g. VM thread). __try { @@ -421,15 +426,15 @@ &thread_h, THREAD_ALL_ACCESS, false, 0)) { fatal("DuplicateHandle failed\n"); } - OSThread* osthread = create_os_thread(thread, thread_h, - (int)current_thread_id()); + OSThread* osthread = create_os_thread(thread, thread_h, + (int)current_thread_id()); if (osthread == NULL) { return false; } // Initial thread state is RUNNABLE osthread->set_state(RUNNABLE); - + thread->set_osthread(osthread); return true; } @@ -440,7 +445,7 @@ #endif if (_starting_thread == NULL) { _starting_thread = create_os_thread(thread, main_thread, main_thread_id); - if (_starting_thread == NULL) { + if (_starting_thread == NULL) { return false; } } @@ -472,7 +477,7 @@ osthread->set_interrupted(false); thread->set_osthread(osthread); - + if (stack_size == 0) { switch (thr_type) { case os::java_thread: @@ -545,7 +550,7 @@ delete osthread; return NULL; } - + Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); // Store info on the Win32 thread into the OSThread @@ -584,7 +589,7 @@ jlong os::elapsed_counter() { - LARGE_INTEGER count; + LARGE_INTEGER count; if (has_performance_count) { QueryPerformanceCounter(&count); return as_long(count) - initial_performance_count; @@ -689,7 +694,7 @@ // Windows format: // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. // Java format: -// Java standards require the number of milliseconds since 1/1/1970 +// Java standards require the number of milliseconds since 1/1/1970 // Constant offset - calculated using offset() static jlong _offset = 116444736000000000; @@ -705,13 +710,13 @@ if (_has_calculated_offset) return _calculated_offset; SYSTEMTIME java_origin; java_origin.wYear = 1970; - java_origin.wMonth = 1; - java_origin.wDayOfWeek = 0; // ignored - java_origin.wDay = 1; - java_origin.wHour = 0; - java_origin.wMinute = 0; - java_origin.wSecond = 0; - java_origin.wMilliseconds = 0; + java_origin.wMonth = 1; + java_origin.wDayOfWeek = 0; // ignored + java_origin.wDay = 1; + java_origin.wHour = 0; + java_origin.wMinute = 0; + java_origin.wSecond = 0; + java_origin.wMilliseconds = 0; FILETIME jot; if (!SystemTimeToFileTime(&java_origin, &jot)) { fatal1("Error = %d\nWindows error", GetLastError()); @@ -735,35 +740,39 @@ FILETIME java_to_windows_time(jlong l) { jlong a = (l * 10000) + offset(); FILETIME result; - result.dwHighDateTime = high(a); + result.dwHighDateTime = high(a); result.dwLowDateTime = low(a); return result; } -jlong os::timeofday() { - FILETIME wt; - GetSystemTimeAsFileTime(&wt); - return windows_to_java_time(wt); -} +// For now, we say that Windows does not support vtime. I have no idea +// whether it can actually be made to (DLD, 9/13/05). +bool os::supports_vtime() { return false; } +bool os::enable_vtime() { return false; } +bool os::vtime_enabled() { return false; } +double os::elapsedVTime() { + // better than nothing, but not much + return elapsedTime(); +} -// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis -// _use_global_time is only set if CacheTimeMillis is true jlong os::javaTimeMillis() { if (UseFakeTimers) { return fake_time++; } else { - return (_use_global_time ? read_global_time() : timeofday()); + FILETIME wt; + GetSystemTimeAsFileTime(&wt); + return windows_to_java_time(wt); } } #define NANOS_PER_SEC CONST64(1000000000) #define NANOS_PER_MILLISEC 1000000 jlong os::javaTimeNanos() { - if (!has_performance_count) { + if (!has_performance_count) { return javaTimeMillis() * NANOS_PER_MILLISEC; // the best we can do. } else { - LARGE_INTEGER current_count; + LARGE_INTEGER current_count; QueryPerformanceCounter(¤t_count); double current = as_long(current_count); double freq = performance_frequency; @@ -773,14 +782,14 @@ } void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { - if (!has_performance_count) { + if (!has_performance_count) { // javaTimeMillis() doesn't have much percision, // but it is not going to wrap -- so all 64 bits - info_ptr->max_value = ALL_64_BITS; + info_ptr->max_value = ALL_64_BITS; // this is a wall clock timer, so may skip - info_ptr->may_skip_backward = true; - info_ptr->may_skip_forward = true; + info_ptr->may_skip_backward = true; + info_ptr->may_skip_forward = true; } else { jlong freq = performance_frequency; if (freq < NANOS_PER_SEC) { @@ -799,8 +808,8 @@ } // using a counter, so no skipping - info_ptr->may_skip_backward = false; - info_ptr->may_skip_forward = false; + info_ptr->may_skip_backward = false; + info_ptr->may_skip_forward = false; } info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time } @@ -815,7 +824,7 @@ bool os::getTimesSecs(double* process_real_time, double* process_user_time, - double* process_system_time) { + double* process_system_time) { HANDLE h_process = GetCurrentProcess(); FILETIME create_time, exit_time, kernel_time, user_time; BOOL result = GetProcessTimes(h_process, @@ -838,7 +847,7 @@ } } -void os::shutdown() { +void os::shutdown() { // allow PerfMemory to attempt cleanup of any persistent resources perfMemory_exit(); @@ -866,7 +875,7 @@ } // Directory routines copied from src/win32/native/java/io/dirent_md.c -// * dirent_md.c 1.15 00/02/02 +// * dirent_md.c 1.15 00/02/02 // // The declarations for DIR and struct dirent are in jvm_win32.h. @@ -876,14 +885,14 @@ DIR * os::opendir(const char *dirname) { - assert(dirname != NULL, "just checking"); // hotspot change + assert(dirname != NULL, "just checking"); // hotspot change DIR *dirp = (DIR *)malloc(sizeof(DIR)); - DWORD fattr; // hotspot change + DWORD fattr; // hotspot change char alt_dirname[4] = { 0, 0, 0, 0 }; if (dirp == 0) { - errno = ENOMEM; - return 0; + errno = ENOMEM; + return 0; } /* @@ -892,52 +901,52 @@ * prepend the current drive name. */ if (dirname[1] == '\0' && dirname[0] == '\\') { - alt_dirname[0] = _getdrive() + 'A' - 1; - alt_dirname[1] = ':'; - alt_dirname[2] = '\\'; - alt_dirname[3] = '\0'; - dirname = alt_dirname; + alt_dirname[0] = _getdrive() + 'A' - 1; + alt_dirname[1] = ':'; + alt_dirname[2] = '\\'; + alt_dirname[3] = '\0'; + dirname = alt_dirname; } dirp->path = (char *)malloc(strlen(dirname) + 5); if (dirp->path == 0) { - free(dirp); - errno = ENOMEM; - return 0; + free(dirp); + errno = ENOMEM; + return 0; } strcpy(dirp->path, dirname); fattr = GetFileAttributes(dirp->path); if (fattr == 0xffffffff) { - free(dirp->path); - free(dirp); - errno = ENOENT; - return 0; + free(dirp->path); + free(dirp); + errno = ENOENT; + return 0; } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { - free(dirp->path); - free(dirp); - errno = ENOTDIR; - return 0; + free(dirp->path); + free(dirp); + errno = ENOTDIR; + return 0; } /* Append "*.*", or possibly "\\*.*", to path */ if (dirp->path[1] == ':' - && (dirp->path[2] == '\0' - || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { - /* No '\\' needed for cases like "Z:" or "Z:\" */ - strcat(dirp->path, "*.*"); + && (dirp->path[2] == '\0' + || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { + /* No '\\' needed for cases like "Z:" or "Z:\" */ + strcat(dirp->path, "*.*"); } else { - strcat(dirp->path, "\\*.*"); + strcat(dirp->path, "\\*.*"); } dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); if (dirp->handle == INVALID_HANDLE_VALUE) { if (GetLastError() != ERROR_FILE_NOT_FOUND) { - free(dirp->path); - free(dirp); - errno = EACCES; - return 0; - } + free(dirp->path); + free(dirp); + errno = EACCES; + return 0; + } } return dirp; } @@ -947,20 +956,20 @@ struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { - assert(dirp != NULL, "just checking"); // hotspot change + assert(dirp != NULL, "just checking"); // hotspot change if (dirp->handle == INVALID_HANDLE_VALUE) { - return 0; + return 0; } strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); if (!FindNextFile(dirp->handle, &dirp->find_data)) { - if (GetLastError() == ERROR_INVALID_HANDLE) { - errno = EBADF; - return 0; - } - FindClose(dirp->handle); - dirp->handle = INVALID_HANDLE_VALUE; + if (GetLastError() == ERROR_INVALID_HANDLE) { + errno = EBADF; + return 0; + } + FindClose(dirp->handle); + dirp->handle = INVALID_HANDLE_VALUE; } return &dirp->dirent; @@ -969,13 +978,13 @@ int os::closedir(DIR *dirp) { - assert(dirp != NULL, "just checking"); // hotspot change + assert(dirp != NULL, "just checking"); // hotspot change if (dirp->handle != INVALID_HANDLE_VALUE) { - if (!FindClose(dirp->handle)) { - errno = EBADF; - return -1; - } - dirp->handle = INVALID_HANDLE_VALUE; + if (!FindClose(dirp->handle)) { + errno = EBADF; + return -1; + } + dirp->handle = INVALID_HANDLE_VALUE; } free(dirp->path); free(dirp); @@ -995,6 +1004,63 @@ } } +static bool file_exists(const char* filename) { + if (filename == NULL || strlen(filename) == 0) { + return false; + } + return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; +} + +void os::dll_build_name(char *buffer, size_t buflen, + const char* pname, const char* fname) { + // Copied from libhpi + const size_t pnamelen = pname ? strlen(pname) : 0; + const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; + + // Quietly truncates on buffer overflow. Should be an error. + if (pnamelen + strlen(fname) + 10 > buflen) { + *buffer = '\0'; + return; + } + + if (pnamelen == 0) { + jio_snprintf(buffer, buflen, "%s.dll", fname); + } else if (c == ':' || c == '\\') { + jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); + } else if (strchr(pname, *os::path_separator()) != NULL) { + int n; + char** pelements = split_path(pname, &n); + for (int i = 0 ; i < n ; i++) { + char* path = pelements[i]; + // Really shouldn't be NULL, but check can't hurt + size_t plen = (path == NULL) ? 0 : strlen(path); + if (plen == 0) { + continue; // skip the empty path values + } + const char lastchar = path[plen - 1]; + if (lastchar == ':' || lastchar == '\\') { + jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); + } else { + jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); + } + if (file_exists(buffer)) { + break; + } + } + // release the storage + for (int i = 0 ; i < n ; i++) { + if (pelements[i] != NULL) { + FREE_C_HEAP_ARRAY(char, pelements[i]); + } + } + if (pelements != NULL) { + FREE_C_HEAP_ARRAY(char*, pelements); + } + } else { + jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); + } +} + // Needs to be in os specific directory because windows requires another // header file const char* os::get_current_directory(char *buf, int buflen) { @@ -1008,7 +1074,7 @@ // PSAPI functions, for Windows NT, 2000, XP -// psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform +// psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform // SDK from Microsoft. Here are the definitions copied from psapi.h typedef struct _MODULEINFO { LPVOID lpBaseOfDll; @@ -1093,12 +1159,12 @@ // Enumerate all modules for a given process ID // -// Notice that Windows 95/98/Me and Windows NT/2000/XP have +// Notice that Windows 95/98/Me and Windows NT/2000/XP have // different API for doing this. We use PSAPI.DLL on NT based // Windows and ToolHelp on 95/98/Me. // Callback function that is called by enumerate_modules() on -// every DLL module. +// every DLL module. // Input parameters: // int pid, // char* module_file_name, @@ -1107,7 +1173,7 @@ // void* param typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); -// enumerate_modules for Windows NT, using PSAPI +// enumerate_modules for Windows NT, using PSAPI static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) { HANDLE hProcess ; @@ -1124,7 +1190,7 @@ if (hProcess == NULL) return 0; DWORD size_needed; - if (!_EnumProcessModules(hProcess, modules, + if (!_EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { CloseHandle( hProcess ); return 0; @@ -1148,7 +1214,7 @@ } // Invoke callback function - result = func(pid, filename, (address)modinfo.lpBaseOfDll, + result = func(pid, filename, (address)modinfo.lpBaseOfDll, modinfo.SizeOfImage, param); if (result) break; } @@ -1179,7 +1245,7 @@ while( not_done ) { // invoke the callback - result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, + result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, modentry.modBaseSize, param); if (result) break; @@ -1207,12 +1273,12 @@ address base_addr; }; -static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, +static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, unsigned size, void * param) { struct _modinfo *pmod = (struct _modinfo *)param; if (!pmod) return -1; - if (base_addr <= pmod->addr && + if (base_addr <= pmod->addr && base_addr+size > pmod->addr) { // if a buffer is provided, copy path name to the buffer if (pmod->full_path) { @@ -1228,7 +1294,7 @@ int buflen, int* offset) { // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always // return the full path to the DLL file, sometimes it returns path -// to the corresponding PDB file (debug info); sometimes it only +// to the corresponding PDB file (debug info); sometimes it only // returns partial path, which makes life painful. struct _modinfo mi; @@ -1250,7 +1316,7 @@ bool os::dll_address_to_function_name(address addr, char *buf, int buflen, int *offset) { // Unimplemented on Windows - in order to use SymGetSymFromAddr(), - // we need to initialize imagehlp/dbghelp, then load symbol table + // we need to initialize imagehlp/dbghelp, then load symbol table // for every module. That's too much work to do after a fatal error. // For an example on how to implement this function, see 1.4.2. if (offset) *offset = -1; @@ -1258,8 +1324,12 @@ return false; } +void* os::dll_lookup(void* handle, const char* name) { + return GetProcAddress((HMODULE)handle, name); +} + // save the start and end address of jvm.dll into param[0] and param[1] -static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, +static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, unsigned size, void * param) { if (!param) return -1; @@ -1288,7 +1358,7 @@ } // print module info; param is outputStream* -static int _print_module(int pid, char* fname, address base, +static int _print_module(int pid, char* fname, address base, unsigned size, void* param) { if (!param) return -1; @@ -1299,23 +1369,23 @@ return 0; } -// Loads .dll/.so and +// Loads .dll/.so and // in case of error it checks if .dll/.so was built for the // same architecture as Hotspot is running on void * os::dll_load(const char *name, char *ebuf, int ebuflen) { void * result = LoadLibrary(name); - if (result != NULL) + if (result != NULL) { return result; } - + long errcode = GetLastError(); if (errcode == ERROR_MOD_NOT_FOUND) { strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); - ebuf[ebuflen-1]='\0'; + ebuf[ebuflen-1]='\0'; return NULL; - } + } // Parsing dll below // If we can read dll-info and find that dll was built @@ -1326,17 +1396,17 @@ // Read system error message into ebuf // It may or may not be overwritten below (in the for loop and just above) getLastErrorString(ebuf, (size_t) ebuflen); - ebuf[ebuflen-1]='\0'; + ebuf[ebuflen-1]='\0'; int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); - if (file_descriptor<0) + if (file_descriptor<0) { return NULL; } uint32_t signature_offset; - uint16_t lib_arch=0; + uint16_t lib_arch=0; bool failed_to_get_lib_arch= - ( + ( //Go to position 3c in the dll (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) || @@ -1344,15 +1414,15 @@ (sizeof(signature_offset)!= (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) || - //Go to COFF File Header in dll + //Go to COFF File Header in dll //that is located after"signature" (4 bytes long) - (os::seek_to_file_offset(file_descriptor, - signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) + (os::seek_to_file_offset(file_descriptor, + signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) || - //Read field that contains code of architecture + //Read field that contains code of architecture // that dll was build for (sizeof(lib_arch)!= - (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) + (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) ); ::close(file_descriptor); @@ -1365,21 +1435,21 @@ typedef struct { uint16_t arch_code; - char* arch_name; + char* arch_name; } arch_t; - static const arch_t arch_array[]={ + static const arch_t arch_array[]={ {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} }; #if (defined _M_IA64) - static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; + static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; #elif (defined _M_AMD64) - static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; + static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; #elif (defined _M_IX86) - static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; - #else + static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; + #else #error Method os::dll_load requires that one of following \ is defined :_M_IA64,_M_AMD64 or _M_IX86 #endif @@ -1391,18 +1461,18 @@ char *running_arch_str=NULL,*lib_arch_str=NULL; for (unsigned int i=0;iprint("OS:"); - - OSVERSIONINFOEX osvi; - ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); - osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); +// function pointer to Windows API "GetNativeSystemInfo". +typedef void (WINAPI *GetNativeSystemInfo_func_type)(LPSYSTEM_INFO); +static GetNativeSystemInfo_func_type _GetNativeSystemInfo; - if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { - st->print_cr("N/A"); - return; - } +void os::print_os_info(outputStream* st) { + st->print("OS:"); - int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; + OSVERSIONINFOEX osvi; + ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); - if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { - switch (os_vers) { - case 3051: st->print(" Windows NT 3.51"); break; - case 4000: st->print(" Windows NT 4.0"); break; - case 5000: st->print(" Windows 2000"); break; - case 5001: st->print(" Windows XP"); break; - case 5002: st->print(" Windows Server 2003 family"); break; - case 6000: st->print(" Windows Vista"); break; - default: // future windows, print out its major and minor versions - st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); - } - } else { - switch (os_vers) { - case 4000: st->print(" Windows 95"); break; - case 4010: st->print(" Windows 98"); break; - case 4090: st->print(" Windows Me"); break; - default: // future windows, print out its major and minor versions - st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); - } - } + if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { + st->print_cr("N/A"); + return; + } - st->print(" Build %d", osvi.dwBuildNumber); - st->print(" %s", osvi.szCSDVersion); // service pack - st->cr(); + int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; + if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { + switch (os_vers) { + case 3051: st->print(" Windows NT 3.51"); break; + case 4000: st->print(" Windows NT 4.0"); break; + case 5000: st->print(" Windows 2000"); break; + case 5001: st->print(" Windows XP"); break; + case 5002: + case 6000: + case 6001: { + // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could + // find out whether we are running on 64 bit processor or not. + SYSTEM_INFO si; + ZeroMemory(&si, sizeof(SYSTEM_INFO)); + // Check to see if _GetNativeSystemInfo has been initialized. + if (_GetNativeSystemInfo == NULL) { + HMODULE hKernel32 = GetModuleHandle(TEXT("kernel32.dll")); + _GetNativeSystemInfo = + CAST_TO_FN_PTR(GetNativeSystemInfo_func_type, + GetProcAddress(hKernel32, + "GetNativeSystemInfo")); + if (_GetNativeSystemInfo == NULL) + GetSystemInfo(&si); + } else { + _GetNativeSystemInfo(&si); + } + if (os_vers == 5002) { + if (osvi.wProductType == VER_NT_WORKSTATION && + si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) + st->print(" Windows XP x64 Edition"); + else + st->print(" Windows Server 2003 family"); + } else if (os_vers == 6000) { + if (osvi.wProductType == VER_NT_WORKSTATION) + st->print(" Windows Vista"); + else + st->print(" Windows Server 2008"); + if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) + st->print(" , 64 bit"); + } else { // os_vers == 6001 + if (osvi.wProductType == VER_NT_WORKSTATION) { + st->print(" Windows 7"); + } else { + // Unrecognized windows, print out its major and minor versions + st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); + } + if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) + st->print(" , 64 bit"); + } + break; + } + default: // future windows, print out its major and minor versions + st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); + } + } else { + switch (os_vers) { + case 4000: st->print(" Windows 95"); break; + case 4010: st->print(" Windows 98"); break; + case 4090: st->print(" Windows Me"); break; + default: // future windows, print out its major and minor versions + st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); + } + } + st->print(" Build %d", osvi.dwBuildNumber); + st->print(" %s", osvi.szCSDVersion); // service pack + st->cr(); } void os::print_memory_info(outputStream* st) { @@ -1493,7 +1607,7 @@ st->print("siginfo:"); st->print(" ExceptionCode=0x%x", er->ExceptionCode); - if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && + if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && er->NumberParameters >= 2) { switch (er->ExceptionInformation[0]) { case 0: st->print(", reading address"); break; @@ -1569,7 +1683,7 @@ static void UserHandler(int sig, void *siginfo, void *context) { os::signal_notify(sig); - // We need to reinstate the signal handler each time... + // We need to reinstate the signal handler each time... os::signal(sig, (void*)UserHandler); } @@ -1644,7 +1758,7 @@ void os::signal_init_pd() { // Initialize signal structures memset((void*)pending_signals, 0, sizeof(pending_signals)); - + sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); // Programs embedding the VM do not want it to attempt to receive @@ -1660,7 +1774,7 @@ // (=ReduceSignalUsage) is specified. This means, for example, that // the CTRL-BREAK thread dump mechanism is also disabled in this // case. See bugs 4323062, 4345157, and related bugs. - + if (!ReduceSignalUsage) { // Add a CTRL-C handler SetConsoleCtrlHandler(consoleHandler, TRUE); @@ -1673,7 +1787,7 @@ Atomic::inc(&pending_signals[signal_number]); ret = ::ReleaseSemaphore(sig_sem, 1, NULL); assert(ret != 0, "ReleaseSemaphore() failed"); -} +} static int check_pending_signals(bool wait_for_signal) { DWORD ret; @@ -1733,15 +1847,15 @@ #ifdef _M_IA64 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP); // Set pc to handler - exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; + exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; #elif _M_AMD64 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip); // Set pc to handler - exceptionInfo->ContextRecord->Rip = (DWORD64)handler; + exceptionInfo->ContextRecord->Rip = (DWORD64)handler; #else thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip); // Set pc to handler - exceptionInfo->ContextRecord->Eip = (LONG)handler; + exceptionInfo->ContextRecord->Eip = (LONG)handler; #endif // Continue the execution @@ -1875,9 +1989,9 @@ /* On Windows, the mxcsr control bits are non-volatile across calls See also CR 6192333 - If EXCEPTION_FLT_* happened after some native method modified + If EXCEPTION_FLT_* happened after some native method modified mxcsr - it is not a jvm fault. - However should we decide to restore of mxcsr after a faulty + However should we decide to restore of mxcsr after a faulty native method we can uncomment following code jint MxCsr = INITIAL_MXCSR; // we can't use StubRoutines::addr_mxcsr_std() @@ -1913,7 +2027,7 @@ //----------------------------------------------------------------------------- LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { - if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; + if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; #ifdef _M_IA64 address pc = (address) exceptionInfo->ContextRecord->StIIP; @@ -1933,10 +2047,10 @@ PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; address addr = (address) exceptionRecord->ExceptionInformation[1]; - + if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { int page_size = os::vm_page_size(); - + // Make sure the pc and the faulting address are sane. // // If an instruction spans a page boundary, and the page containing @@ -1945,7 +2059,7 @@ // different - we still want to unguard the 2nd page in this case. // // 15 bytes seems to be a (very) safe value for max instruction size. - bool pc_is_near_addr = + bool pc_is_near_addr = (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); bool instr_spans_page_boundary = (align_size_down((intptr_t) pc ^ (intptr_t) addr, @@ -1958,12 +2072,13 @@ // In conservative mode, don't unguard unless the address is in the VM if (UnguardOnExecutionViolation > 0 && addr != last_addr && (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { - - // Unguard and retry + + // Set memory to RWX and retry address page_start = (address) align_size_down((intptr_t) addr, (intptr_t) page_size); - bool res = os::unguard_memory((char*) page_start, page_size); - + bool res = os::protect_memory((char*) page_start, page_size, + os::MEM_PROT_RWX); + if (PrintMiscellaneous && Verbose) { char buf[256]; jio_snprintf(buf, sizeof(buf), "Execution protection violation " @@ -1975,12 +2090,12 @@ // Set last_addr so if we fault again at the same address, we don't // end up in an endless loop. - // + // // There are two potential complications here. Two threads trapping // at the same address at the same time could cause one of the // threads to think it already unguarded, and abort the VM. Likely // very rare. - // + // // The other race involves two threads alternately trapping at // different addresses and failing to unguard the page, resulting in // an endless loop. This condition is probably even more unlikely @@ -2032,26 +2147,26 @@ if (os::uses_stack_guard_pages()) { #ifdef _M_IA64 // - // If it's a legal stack address continue, Windows will map it in. + // If it's a legal stack address continue, Windows will map it in. // PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; address addr = (address) exceptionRecord->ExceptionInformation[1]; - if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) - return EXCEPTION_CONTINUE_EXECUTION; + if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) + return EXCEPTION_CONTINUE_EXECUTION; // The register save area is the same size as the memory stack // and starts at the page just above the start of the memory stack. // If we get a fault in this area, we've run out of register // stack. If we are in java, try throwing a stack overflow exception. - if (addr > thread->stack_base() && + if (addr > thread->stack_base() && addr <= (thread->stack_base()+thread->stack_size()) ) { - char buf[256]; - jio_snprintf(buf, sizeof(buf), - "Register stack overflow, addr:%p, stack_base:%p\n", - addr, thread->stack_base() ); - tty->print_raw_cr(buf); - // If not in java code, return and hope for the best. - return in_java ? Handle_Exception(exceptionInfo, + char buf[256]; + jio_snprintf(buf, sizeof(buf), + "Register stack overflow, addr:%p, stack_base:%p\n", + addr, thread->stack_base() ); + tty->print_raw_cr(buf); + // If not in java code, return and hope for the best. + return in_java ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) : EXCEPTION_CONTINUE_EXECUTION; } @@ -2061,12 +2176,12 @@ // zone page for us. Note: must call disable_stack_yellow_zone to // update the enabled status, even if the zone contains only one page. thread->disable_stack_yellow_zone(); - // If not in java code, return and hope for the best. - return in_java ? Handle_Exception(exceptionInfo, + // If not in java code, return and hope for the best. + return in_java ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) : EXCEPTION_CONTINUE_EXECUTION; } else { - // Fatal red zone violation. + // Fatal red zone violation. thread->disable_stack_red_zone(); tty->print_raw_cr("An unrecoverable stack overflow has occurred."); report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, @@ -2082,28 +2197,28 @@ } else { // Can only return and hope for the best. Further stack growth will // result in an ACCESS_VIOLATION. - return EXCEPTION_CONTINUE_EXECUTION; + return EXCEPTION_CONTINUE_EXECUTION; } } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { // Either stack overflow or null pointer exception. if (in_java) { PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; - address addr = (address) exceptionRecord->ExceptionInformation[1]; - address stack_end = thread->stack_base() - thread->stack_size(); - if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { - // Stack overflow. - assert(!os::uses_stack_guard_pages(), - "should be caught by red zone code above."); - return Handle_Exception(exceptionInfo, - SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); - } - // - // Check for safepoint polling and implicit null - // We only expect null pointers in the stubs (vtable) - // the rest are checked explicitly now. - // - CodeBlob* cb = CodeCache::find_blob(pc); - if (cb != NULL) { + address addr = (address) exceptionRecord->ExceptionInformation[1]; + address stack_end = thread->stack_base() - thread->stack_size(); + if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { + // Stack overflow. + assert(!os::uses_stack_guard_pages(), + "should be caught by red zone code above."); + return Handle_Exception(exceptionInfo, + SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); + } + // + // Check for safepoint polling and implicit null + // We only expect null pointers in the stubs (vtable) + // the rest are checked explicitly now. + // + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb != NULL) { if (os::is_poll_address(addr)) { address stub = SharedRuntime::get_poll_stub(pc); return Handle_Exception(exceptionInfo, stub); @@ -2112,32 +2227,32 @@ { #ifdef _WIN64 // - // If it's a legal stack address map the entire region in + // If it's a legal stack address map the entire region in // PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; - address addr = (address) exceptionRecord->ExceptionInformation[1]; - if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { - addr = (address)((uintptr_t)addr & + address addr = (address) exceptionRecord->ExceptionInformation[1]; + if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { + addr = (address)((uintptr_t)addr & (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); os::commit_memory( (char *)addr, thread->stack_base() - addr ); - return EXCEPTION_CONTINUE_EXECUTION; + return EXCEPTION_CONTINUE_EXECUTION; } - else + else #endif { - // Null pointer exception. + // Null pointer exception. #ifdef _M_IA64 // We catch register stack overflows in compiled code by doing // an explicit compare and executing a st8(G0, G0) if the // BSP enters into our guard area. We test for the overflow - // condition and fall into the normal null pointer exception + // condition and fall into the normal null pointer exception // code if BSP hasn't overflowed. if ( in_java ) { if(thread->register_stack_overflow()) { - assert((address)exceptionInfo->ContextRecord->IntS3 == - thread->register_stack_limit(), + assert((address)exceptionInfo->ContextRecord->IntS3 == + thread->register_stack_limit(), "GR7 doesn't contain register_stack_limit"); - // Disable the yellow zone which sets the state that + // Disable the yellow zone which sets the state that // we've got a stack overflow problem. if (thread->stack_yellow_zone_enabled()) { thread->disable_stack_yellow_zone(); @@ -2146,27 +2261,22 @@ thread->disable_register_stack_guard(); // Update GR7 with the new limit so we can continue running // compiled code. - exceptionInfo->ContextRecord->IntS3 = + exceptionInfo->ContextRecord->IntS3 = (ULONGLONG)thread->register_stack_limit(); - return Handle_Exception(exceptionInfo, + return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); } else { - // - // Check for implicit null - // We only expect null pointers in the stubs (vtable) - // the rest are checked explicitly now. - // - CodeBlob* cb = CodeCache::find_blob(pc); - if (cb != NULL) { - if (VtableStubs::stub_containing(pc) != NULL) { - if (((uintptr_t)addr) < os::vm_page_size() ) { - // an access to the first page of VM--assume it is a null pointer - return Handle_Exception(exceptionInfo, - SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL)); - } - } - } - } + // + // Check for implicit null + // We only expect null pointers in the stubs (vtable) + // the rest are checked explicitly now. + // + if (((uintptr_t)addr) < os::vm_page_size() ) { + // an access to the first page of VM--assume it is a null pointer + address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); + if (stub != NULL) return Handle_Exception(exceptionInfo, stub); + } + } } // in_java // IA64 doesn't use implicit null checking yet. So we shouldn't @@ -2178,17 +2288,17 @@ #else /* !IA64 */ // Windows 98 reports faulting addresses incorrectly - if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || + if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || !os::win32::is_nt()) { - return Handle_Exception(exceptionInfo, - SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL)); - } + address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); + if (stub != NULL) return Handle_Exception(exceptionInfo, stub); + } report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, exceptionInfo->ContextRecord); return EXCEPTION_CONTINUE_SEARCH; #endif } - } + } } #ifdef _WIN64 @@ -2226,11 +2336,11 @@ case EXCEPTION_INT_OVERFLOW: return Handle_IDiv_Exception(exceptionInfo); - + } // switch } #ifndef _WIN64 - if ((thread->thread_state() == _thread_in_Java) || + if ((thread->thread_state() == _thread_in_Java) || (thread->thread_state() == _thread_in_native) ) { LONG result=Handle_FLT_Exception(exceptionInfo); @@ -2239,7 +2349,7 @@ #endif //_WIN64 } - if (exception_code != EXCEPTION_BREAKPOINT) { + if (exception_code != EXCEPTION_BREAKPOINT) { #ifndef _WIN64 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, exceptionInfo->ContextRecord); @@ -2338,10 +2448,10 @@ // // Note about Windows 2003: although the API supports committing large page // memory on a page-by-page basis and VirtualAlloc() returns success under this -// scenario, I found through experiment it only uses large page if the entire +// scenario, I found through experiment it only uses large page if the entire // memory region is reserved and committed in a single VirtualAlloc() call. // This makes Windows large page support more or less like Solaris ISM, in -// that the entire heap must be committed upfront. This probably will change +// that the entire heap must be committed upfront. This probably will change // in the future, if so the code below needs to be revisited. #ifndef MEM_LARGE_PAGES @@ -2349,10 +2459,10 @@ #endif // GetLargePageMinimum is only available on Windows 2003. The other functions -// are available on NT but not on Windows 98/Me. We have to resolve them at +// are available on NT but not on Windows 98/Me. We have to resolve them at // runtime. typedef SIZE_T (WINAPI *GetLargePageMinimum_func_type) (void); -typedef BOOL (WINAPI *AdjustTokenPrivileges_func_type) +typedef BOOL (WINAPI *AdjustTokenPrivileges_func_type) (HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); typedef BOOL (WINAPI *OpenProcessToken_func_type) (HANDLE, DWORD, PHANDLE); typedef BOOL (WINAPI *LookupPrivilegeValue_func_type) (LPCTSTR, LPCTSTR, PLUID); @@ -2392,7 +2502,7 @@ } static bool request_lock_memory_privilege() { - _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, + _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, os::current_process_id()); LUID luid; @@ -2526,12 +2636,111 @@ return false; } +bool os::can_execute_large_page_memory() { + return true; +} + char* os::reserve_memory_special(size_t bytes) { - DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; - char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE); - return res; + + if (UseLargePagesIndividualAllocation) { + if (TracePageSizes && Verbose) { + tty->print_cr("Reserving large pages individually."); + } + char * p_buf; + // first reserve enough address space in advance since we want to be + // able to break a single contiguous virtual address range into multiple + // large page commits but WS2003 does not allow reserving large page space + // so we just use 4K pages for reserve, this gives us a legal contiguous + // address space. then we will deallocate that reservation, and re alloc + // using large pages + const size_t size_of_reserve = bytes + _large_page_size; + if (bytes > size_of_reserve) { + // Overflowed. + warning("Individually allocated large pages failed, " + "use -XX:-UseLargePagesIndividualAllocation to turn off"); + return NULL; + } + p_buf = (char *) VirtualAlloc(NULL, + size_of_reserve, // size of Reserve + MEM_RESERVE, + PAGE_EXECUTE_READWRITE); + // If reservation failed, return NULL + if (p_buf == NULL) return NULL; + + release_memory(p_buf, bytes + _large_page_size); + // round up to page boundary. If the size_of_reserve did not + // overflow and the reservation did not fail, this align up + // should not overflow. + p_buf = (char *) align_size_up((size_t)p_buf, _large_page_size); + + // now go through and allocate one page at a time until all bytes are + // allocated + size_t bytes_remaining = align_size_up(bytes, _large_page_size); + // An overflow of align_size_up() would have been caught above + // in the calculation of size_of_reserve. + char * next_alloc_addr = p_buf; + +#ifdef ASSERT + // Variable for the failure injection + long ran_num = os::random(); + size_t fail_after = ran_num % bytes; +#endif + + while (bytes_remaining) { + size_t bytes_to_rq = MIN2(bytes_remaining, _large_page_size); + // Note allocate and commit + char * p_new; + +#ifdef ASSERT + bool inject_error = LargePagesIndividualAllocationInjectError && + (bytes_remaining <= fail_after); +#else + const bool inject_error = false; +#endif + + if (inject_error) { + p_new = NULL; + } else { + p_new = (char *) VirtualAlloc(next_alloc_addr, + bytes_to_rq, + MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, + PAGE_EXECUTE_READWRITE); + } + + if (p_new == NULL) { + // Free any allocated pages + if (next_alloc_addr > p_buf) { + // Some memory was committed so release it. + size_t bytes_to_release = bytes - bytes_remaining; + release_memory(p_buf, bytes_to_release); + } +#ifdef ASSERT + if (UseLargePagesIndividualAllocation && + LargePagesIndividualAllocationInjectError) { + if (TracePageSizes && Verbose) { + tty->print_cr("Reserving large pages individually failed."); + } + } +#endif + return NULL; + } + bytes_remaining -= bytes_to_rq; + next_alloc_addr += bytes_to_rq; + } + + return p_buf; + + } else { + // normal policy just allocate it all at once + DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; + char * res = (char *)VirtualAlloc(NULL, + bytes, + flag, + PAGE_EXECUTE_READWRITE); + return res; + } } - + bool os::release_memory_special(char* base, size_t bytes) { return release_memory(base, bytes); } @@ -2569,25 +2778,49 @@ return VirtualFree(addr, 0, MEM_RELEASE) != 0; } -bool os::protect_memory(char* addr, size_t bytes) { +// Set protections specified +bool os::protect_memory(char* addr, size_t bytes, ProtType prot, + bool is_committed) { + unsigned int p = 0; + switch (prot) { + case MEM_PROT_NONE: p = PAGE_NOACCESS; break; + case MEM_PROT_READ: p = PAGE_READONLY; break; + case MEM_PROT_RW: p = PAGE_READWRITE; break; + case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; + default: + ShouldNotReachHere(); + } + DWORD old_status; - return VirtualProtect(addr, bytes, PAGE_READONLY, &old_status) != 0; + + // Strange enough, but on Win32 one can change protection only for committed + // memory, not a big deal anyway, as bytes less or equal than 64K + if (!is_committed && !commit_memory(addr, bytes)) { + fatal("cannot commit protection page"); + } + // One cannot use os::guard_memory() here, as on Win32 guard page + // have different (one-shot) semantics, from MSDN on PAGE_GUARD: + // + // Pages in the region become guard pages. Any attempt to access a guard page + // causes the system to raise a STATUS_GUARD_PAGE exception and turn off + // the guard page status. Guard pages thus act as a one-time access alarm. + return VirtualProtect(addr, bytes, p, &old_status) != 0; } bool os::guard_memory(char* addr, size_t bytes) { DWORD old_status; - return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE | PAGE_GUARD, &old_status) != 0; + return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; } bool os::unguard_memory(char* addr, size_t bytes) { DWORD old_status; - return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &old_status) != 0; + return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; } void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } void os::free_memory(char *addr, size_t bytes) { } void os::numa_make_global(char *addr, size_t bytes) { } -void os::numa_make_local(char *addr, size_t bytes) { } +void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } bool os::numa_topology_changed() { return false; } size_t os::numa_get_groups_num() { return 1; } int os::numa_get_group_id() { return 0; } @@ -2626,7 +2859,7 @@ assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back } -size_t os::read(int fd, void *buf, unsigned int nBytes) { +size_t os::read(int fd, void *buf, unsigned int nBytes) { return ::read(fd, buf, nBytes); } @@ -2634,18 +2867,18 @@ // The default timer resolution seems to be 10 milliseconds. // (Where is this written down?) // If someone wants to sleep for only a fraction of the default, - // then we set the timer resolution down to 1 millisecond for + // then we set the timer resolution down to 1 millisecond for // the duration of their interval. - // We carefully set the resolution back, since otherwise we + // We carefully set the resolution back, since otherwise we // seem to incur an overhead (3%?) that we don't need. // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). // Alternatively, we could compute the relative error (503/500 = .6%) and only use - // timeBeginPeriod() if the relative error exceeded some threshold. + // timeBeginPeriod() if the relative error exceeded some threshold. // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and // to decreased efficiency related to increased timer "tick" rates. We want to minimize // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high - // resolution timers running. + // resolution timers running. private: jlong resolution; public: @@ -2677,7 +2910,7 @@ OSThread* osthread = thread->osthread(); OSThreadWaitState osts(osthread, false /* not Object.wait() */); int result; - if (interruptable) { + if (interruptable) { assert(thread->is_Java_thread(), "must be java thread"); JavaThread *jt = (JavaThread *) thread; ThreadBlockInVM tbivm(jt); @@ -2687,7 +2920,7 @@ // java_suspend_self() via check_and_wait_while_suspended() HANDLE events[1]; - events[0] = osthread->interrupt_event(); + events[0] = osthread->interrupt_event(); HighResolutionInterval *phri=NULL; if(!ForceTimeHighResolution) phri = new HighResolutionInterval( ms ); @@ -2702,7 +2935,7 @@ // were we externally suspended while we were waiting? jt->check_and_wait_while_suspended(); - } else { + } else { assert(!thread->is_Java_thread(), "must not be java thread"); Sleep((long) ms); result = OS_TIMEOUT; @@ -2717,37 +2950,37 @@ } } -typedef BOOL (WINAPI * STTSignature)(void) ; +typedef BOOL (WINAPI * STTSignature)(void) ; -os::YieldResult os::NakedYield() { +os::YieldResult os::NakedYield() { // Use either SwitchToThread() or Sleep(0) - // Consider passing back the return value from SwitchToThread(). + // Consider passing back the return value from SwitchToThread(). // We use GetProcAddress() as ancient Win9X versions of windows doen't support SwitchToThread. - // In that case we revert to Sleep(0). - static volatile STTSignature stt = (STTSignature) 1 ; + // In that case we revert to Sleep(0). + static volatile STTSignature stt = (STTSignature) 1 ; if (stt == ((STTSignature) 1)) { - stt = (STTSignature) ::GetProcAddress (LoadLibrary ("Kernel32.dll"), "SwitchToThread") ; + stt = (STTSignature) ::GetProcAddress (LoadLibrary ("Kernel32.dll"), "SwitchToThread") ; // It's OK if threads race during initialization as the operation above is idempotent. } - if (stt != NULL) { - return (*stt)() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; - } else { - Sleep (0) ; + if (stt != NULL) { + return (*stt)() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; + } else { + Sleep (0) ; } - return os::YIELD_UNKNOWN ; + return os::YIELD_UNKNOWN ; } -void os::yield() { os::NakedYield(); } +void os::yield() { os::NakedYield(); } void os::yield_all(int attempts) { - // Yields to all threads, including threads with lower priorities + // Yields to all threads, including threads with lower priorities Sleep(1); } // Win32 only gives you access to seven real priorities at a time, // so we compress Java's ten down to seven. It would be better -// if we dynamically adjusted relative priorities. +// if we dynamically adjusted relative priorities. int os::java_to_os_priority[MaxPriority + 1] = { THREAD_PRIORITY_IDLE, // 0 Entry should never be used @@ -2826,11 +3059,11 @@ OrderAccess::release(); SetEvent(osthread->interrupt_event()); // For JSR166: unpark after setting status - if (thread->is_Java_thread()) + if (thread->is_Java_thread()) ((JavaThread*)thread)->parker()->unpark(); - ParkEvent * ev = thread->_ParkEvent ; - if (ev != NULL) ev->unpark() ; + ParkEvent * ev = thread->_ParkEvent ; + if (ev != NULL) ev->unpark() ; } @@ -2853,7 +3086,7 @@ // Get's a pc (hint) for a running thread. Currently used only for profiling. ExtendedPC os::get_thread_pc(Thread* thread) { CONTEXT context; - context.ContextFlags = CONTEXT_CONTROL; + context.ContextFlags = CONTEXT_CONTROL; HANDLE handle = thread->osthread()->thread_handle(); #ifdef _M_IA64 assert(0, "Fix get_thread_pc"); @@ -2893,6 +3126,7 @@ volatile intx os::win32::_os_thread_count = 0; bool os::win32::_is_nt = false; +bool os::win32::_is_windows_2003 = false; void os::win32::initialize_system_info() { @@ -2915,11 +3149,19 @@ GetVersionEx(&oi); switch(oi.dwPlatformId) { case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; - case VER_PLATFORM_WIN32_NT: _is_nt = true; break; + case VER_PLATFORM_WIN32_NT: + _is_nt = true; + { + int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; + if (os_vers == 5002) { + _is_windows_2003 = true; + } + } + break; default: fatal("Unknown platform"); - } + } - _default_stack_size = os::current_stack_size(); + _default_stack_size = os::current_stack_size(); assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); assert((_default_stack_size & (_vm_page_size - 1)) == 0, "stack size not a multiple of page size"); @@ -2928,10 +3170,10 @@ // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is // known to deadlock the system, if the VM issues to thread operations with - // a too high frequency, e.g., such as changing the priorities. + // a too high frequency, e.g., such as changing the priorities. // The 6000 seems to work well - no deadlocks has been notices on the test // programs that we have seen experience this problem. - if (!os::win32::is_nt()) { + if (!os::win32::is_nt()) { StarvationMonitorInterval = 6000; } } @@ -3009,19 +3251,23 @@ // For better scalability on MP systems (must be called after initialize_system_info) #ifndef PRODUCT - if (is_MP()) { + if (is_MP()) { NoYieldsInMicrolock = true; } #endif + // This may be overridden later when argument processing is done. + FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, + os::win32::is_windows_2003()); + // Initialize main_process and main_thread main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle - if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, + if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, &main_thread, THREAD_ALL_ACCESS, false, 0)) { fatal("DuplicateHandle failed\n"); } main_thread_id = (int) GetCurrentThreadId(); } - + // To install functions for atexit processing extern "C" { static void perfMemory_exit_helper() { @@ -3098,7 +3344,7 @@ jint hpi_result = hpi::initialize(); if (hpi_result != JNI_OK) { return hpi_result; } - // If stack_commit_size is 0, windows will reserve the default size, + // If stack_commit_size is 0, windows will reserve the default size, // but only commit a small portion of it. size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); size_t default_reserve_size = os::win32::default_stack_size(); @@ -3115,14 +3361,14 @@ // reserved by thread stacks. assert(actual_reserve_size != 0, "Must have a stack"); - // Calculate the thread limit when we should start doing Virtual Memory + // Calculate the thread limit when we should start doing Virtual Memory // banging. Currently when the threads will have used all but 200Mb of space. // - // TODO: consider performing a similar calculation for commit size instead + // TODO: consider performing a similar calculation for commit size instead // as reserve size, since on a 64-bit platform we'll run into that more - // often than running out of virtual memory space. We can use the + // often than running out of virtual memory space. We can use the // lower value of the two calculations as the os_thread_limit. - size_t max_address_space = ((size_t)1 << (BitsPerOop - 1)) - (200 * K * K); + size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); // at exit methods are called in the reverse order of their registration. @@ -3155,6 +3401,10 @@ // initialize thread priority policy prio_init(); + if (UseNUMA && !ForceNUMA) { + UseNUMA = false; // Currently unsupported. + } + return JNI_OK; } @@ -3183,8 +3433,8 @@ hpi::native_path(strcpy(pathbuf, path)); int ret = ::stat(pathbuf, sbuf); if (sbuf != NULL && UseUTCFileTimestamp) { - // Fix for 6539723. st_mtime returned from stat() is dependent on - // the system timezone and so can return different values for the + // Fix for 6539723. st_mtime returned from stat() is dependent on + // the system timezone and so can return different values for the // same file if/when daylight savings time changes. This adjustment // makes sure the same timestamp is returned regardless of the TZ. // @@ -3195,16 +3445,16 @@ // and // http://msdn.microsoft.com/library/default.asp?url= // /library/en-us/sysinfo/base/settimezoneinformation.asp - // + // // NOTE: there is a insidious bug here: If the timezone is changed // after the call to stat() but before 'GetTimeZoneInformation()', then // the adjustment we do here will be wrong and we'll return the wrong - // value (which will likely end up creating an invalid class data - // archive). Absent a better API for this, or some time zone locking + // value (which will likely end up creating an invalid class data + // archive). Absent a better API for this, or some time zone locking // mechanism, we'll have to live with this risk. TIME_ZONE_INFORMATION tz; DWORD tzid = GetTimeZoneInformation(&tz); - int daylightBias = + int daylightBias = (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; sbuf->st_mtime += (tz.Bias + daylightBias) * 60; } @@ -3377,7 +3627,7 @@ // unless it comes from a PE image (which the shared archive is not.) // Even VirtualProtect refuses to give execute access to mapped memory // that was not previously executable. - // + // // Instead, stick the executable region in anonymous memory. Yuck. // Penalty is that ~4 pages will not be shareable - in the future // we might consider DLLizing the shared archive with a proper PE @@ -3393,7 +3643,7 @@ CloseHandle(hFile); return NULL; } - + DWORD bytes_read; OVERLAPPED overlapped; overlapped.Offset = (DWORD)file_offset; @@ -3422,7 +3672,7 @@ CloseHandle(hFile); return NULL; } - + DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, (DWORD)bytes, addr); @@ -3435,7 +3685,7 @@ CloseHandle(hFile); return NULL; } - + if (CloseHandle(hMap) == 0) { if (PrintMiscellaneous && Verbose) { DWORD err = GetLastError(); @@ -3490,7 +3740,7 @@ // code may be able to access an address that is no longer mapped. return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, - allow_exec); + allow_exec); } @@ -3525,48 +3775,48 @@ Sleep(100); } } else { - jio_fprintf(stderr, + jio_fprintf(stderr, "Could not open pause file '%s', continuing immediately.\n", filename); } } -// An Event wraps a win32 "CreateEvent" kernel handle. +// An Event wraps a win32 "CreateEvent" kernel handle. // // We have a number of choices regarding "CreateEvent" win32 handle leakage: -// +// // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle -// field, and call CloseHandle() on the win32 event handle. Unpark() would -// need to be modified to tolerate finding a NULL (invalid) win32 event handle. -// In addition, an unpark() operation might fetch the handle field, but the -// event could recycle between the fetch and the SetEvent() operation. -// SetEvent() would either fail because the handle was invalid, or inadvertently work, -// as the win32 handle value had been recycled. In an ideal world calling SetEvent() -// on an stale but recycled handle would be harmless, but in practice this might +// field, and call CloseHandle() on the win32 event handle. Unpark() would +// need to be modified to tolerate finding a NULL (invalid) win32 event handle. +// In addition, an unpark() operation might fetch the handle field, but the +// event could recycle between the fetch and the SetEvent() operation. +// SetEvent() would either fail because the handle was invalid, or inadvertently work, +// as the win32 handle value had been recycled. In an ideal world calling SetEvent() +// on an stale but recycled handle would be harmless, but in practice this might // confuse other non-Sun code, so it's not a viable approach. -// +// // 2: Once a win32 event handle is associated with an Event, it remains associated // with the Event. The event handle is never closed. This could be construed -// as handle leakage, but only up to the maximum # of threads that have been extant +// as handle leakage, but only up to the maximum # of threads that have been extant // at any one time. This shouldn't be an issue, as windows platforms typically // permit a process to have hundreds of thousands of open handles. -// +// // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList // and release unused handles. // -// 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. +// 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. // It's not clear, however, that we wouldn't be trading one type of leak for another. // -// 5. Use an RCU-like mechanism (Read-Copy Update). -// Or perhaps something similar to Maged Michael's "Hazard pointers". +// 5. Use an RCU-like mechanism (Read-Copy Update). +// Or perhaps something similar to Maged Michael's "Hazard pointers". // -// We use (2). +// We use (2). // // TODO-FIXME: // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks // to recover from (or at least detect) the dreaded Windows 841176 bug. // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent -// into a single win32 CreateEvent() handle. +// into a single win32 CreateEvent() handle. // // _Event transitions in park() // -1 => -1 : illegal @@ -3579,109 +3829,109 @@ // 1 : signaled - thread is running or ready // // Another possible encoding of _Event would be -// with explicit "PARKED" and "SIGNALED" bits. +// with explicit "PARKED" and "SIGNALED" bits. int os::PlatformEvent::park (jlong Millis) { - guarantee (_ParkHandle != NULL , "Invariant") ; + guarantee (_ParkHandle != NULL , "Invariant") ; guarantee (Millis > 0 , "Invariant") ; int v ; // CONSIDER: defer assigning a CreateEvent() handle to the Event until - // the initial park() operation. + // the initial park() operation. - for (;;) { - v = _Event ; - if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; + for (;;) { + v = _Event ; + if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; } - guarantee ((v == 0) || (v == 1), "invariant") ; - if (v != 0) return OS_OK ; + guarantee ((v == 0) || (v == 1), "invariant") ; + if (v != 0) return OS_OK ; // Do this the hard way by blocking ... // TODO: consider a brief spin here, gated on the success of recent - // spin attempts by this thread. - // + // spin attempts by this thread. + // // We decompose long timeouts into series of shorter timed waits. - // Evidently large timo values passed in WaitForSingleObject() are problematic on some - // versions of Windows. See EventWait() for details. This may be superstition. Or not. + // Evidently large timo values passed in WaitForSingleObject() are problematic on some + // versions of Windows. See EventWait() for details. This may be superstition. Or not. // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend - // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == + // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate - // for the already waited time. This policy does not admit any new outcomes. + // for the already waited time. This policy does not admit any new outcomes. // In the future, however, we might want to track the accumulated wait time and - // adjust Millis accordingly if we encounter a spurious wakeup. + // adjust Millis accordingly if we encounter a spurious wakeup. const int MAXTIMEOUT = 0x10000000 ; - DWORD rv = WAIT_TIMEOUT ; - while (_Event < 0 && Millis > 0) { + DWORD rv = WAIT_TIMEOUT ; + while (_Event < 0 && Millis > 0) { DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) if (Millis > MAXTIMEOUT) { - prd = MAXTIMEOUT ; - } - rv = ::WaitForSingleObject (_ParkHandle, prd) ; + prd = MAXTIMEOUT ; + } + rv = ::WaitForSingleObject (_ParkHandle, prd) ; assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; if (rv == WAIT_TIMEOUT) { - Millis -= prd ; + Millis -= prd ; } } - v = _Event ; - _Event = 0 ; - OrderAccess::fence() ; + v = _Event ; + _Event = 0 ; + OrderAccess::fence() ; // If we encounter a nearly simultanous timeout expiry and unpark() - // we return OS_OK indicating we awoke via unpark(). - // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. - return (v >= 0) ? OS_OK : OS_TIMEOUT ; + // we return OS_OK indicating we awoke via unpark(). + // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. + return (v >= 0) ? OS_OK : OS_TIMEOUT ; } void os::PlatformEvent::park () { - guarantee (_ParkHandle != NULL, "Invariant") ; + guarantee (_ParkHandle != NULL, "Invariant") ; // Invariant: Only the thread associated with the Event/PlatformEvent - // may call park(). - int v ; - for (;;) { - v = _Event ; - if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; + // may call park(). + int v ; + for (;;) { + v = _Event ; + if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; } - guarantee ((v == 0) || (v == 1), "invariant") ; - if (v != 0) return ; + guarantee ((v == 0) || (v == 1), "invariant") ; + if (v != 0) return ; // Do this the hard way by blocking ... // TODO: consider a brief spin here, gated on the success of recent - // spin attempts by this thread. - while (_Event < 0) { - DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; + // spin attempts by this thread. + while (_Event < 0) { + DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; } - // Usually we'll find _Event == 0 at this point, but as + // Usually we'll find _Event == 0 at this point, but as // an optional optimization we clear it, just in case can - // multiple unpark() operations drove _Event up to 1. - _Event = 0 ; + // multiple unpark() operations drove _Event up to 1. + _Event = 0 ; OrderAccess::fence() ; guarantee (_Event >= 0, "invariant") ; } void os::PlatformEvent::unpark() { - guarantee (_ParkHandle != NULL, "Invariant") ; - int v ; - for (;;) { - v = _Event ; // Increment _Event if it's < 1. - if (v > 0) { - // If it's already signaled just return. + guarantee (_ParkHandle != NULL, "Invariant") ; + int v ; + for (;;) { + v = _Event ; // Increment _Event if it's < 1. + if (v > 0) { + // If it's already signaled just return. // The LD of _Event could have reordered or be satisfied // by a read-aside from this processor's write buffer. // To avoid problems execute a barrier and then // ratify the value. A degenerate CAS() would also work. - // Viz., CAS (v+0, &_Event, v) == v). - OrderAccess::fence() ; - if (_Event == v) return ; - continue ; + // Viz., CAS (v+0, &_Event, v) == v). + OrderAccess::fence() ; + if (_Event == v) return ; + continue ; } - if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; + if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; } if (v < 0) { - ::SetEvent (_ParkHandle) ; + ::SetEvent (_ParkHandle) ; } } @@ -3698,10 +3948,10 @@ void Parker::park(bool isAbsolute, jlong time) { - guarantee (_ParkEvent != NULL, "invariant") ; + guarantee (_ParkEvent != NULL, "invariant") ; // First, demultiplex/decode time arguments if (time < 0) { // don't wait - return; + return; } else if (time == 0) { time = INFINITE; @@ -3715,14 +3965,14 @@ time /= 1000000; // Must coarsen from nanos to millis if (time == 0) // Wait for the minimal time unit if zero time = 1; - } + } JavaThread* thread = (JavaThread*)(Thread::current()); assert(thread->is_Java_thread(), "Must be JavaThread"); JavaThread *jt = (JavaThread *)thread; // Don't wait if interrupted or already triggered - if (Thread::is_interrupted(thread, false) || + if (Thread::is_interrupted(thread, false) || WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { ResetEvent(_ParkEvent); return; @@ -3731,10 +3981,10 @@ ThreadBlockInVM tbivm(jt); OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); jt->set_suspend_equivalent(); - + WaitForSingleObject(_ParkEvent, time); ResetEvent(_ParkEvent); - + // If externally suspended while waiting, re-suspend if (jt->handle_special_suspend_equivalent_condition()) { jt->java_suspend_self(); @@ -3743,7 +3993,7 @@ } void Parker::unpark() { - guarantee (_ParkEvent != NULL, "invariant") ; + guarantee (_ParkEvent != NULL, "invariant") ; SetEvent(_ParkEvent); } @@ -3824,20 +4074,20 @@ // Nothing yet return false; } -#endif +#endif LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { DWORD exception_code = e->ExceptionRecord->ExceptionCode; - + if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; address addr = (address) exceptionRecord->ExceptionInformation[1]; - if (os::is_memory_serialize_page(thread, addr)) + if (os::is_memory_serialize_page(thread, addr)) return EXCEPTION_CONTINUE_EXECUTION; } - + return EXCEPTION_CONTINUE_SEARCH; } @@ -3845,16 +4095,16 @@ { long errval; - if ((errval = GetLastError()) != 0) + if ((errval = GetLastError()) != 0) { /* DOS error */ size_t n = (size_t)FormatMessage( FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, + NULL, errval, - 0, - buf, - (DWORD)len, + 0, + buf, + (DWORD)len, NULL); if (n > 3) { /* Drop final '.', CR, LF */ @@ -3866,7 +4116,7 @@ return (int)n; } - if (errno != 0) + if (errno != 0) { /* C runtime error that has no corresponding DOS error code */ const char *s = strerror(errno); --- old/hotspot/src/os/windows/vm/os_windows.hpp 2009-08-01 04:08:59.161165953 +0100 +++ new/hotspot/src/os/windows/vm/os_windows.hpp 2009-08-01 04:08:59.078346677 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)os_windows.hpp 1.55 07/05/05 17:04:46 JVM" -#endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // Win32_OS defines the interface to windows operating systems @@ -36,12 +33,13 @@ static int _processor_level; static julong _physical_memory; static size_t _default_stack_size; - static bool _is_nt; + static bool _is_nt; + static bool _is_windows_2003; public: // Windows-specific interface: static void initialize_system_info(); - static void setmode_streams(); + static void setmode_streams(); // Processor info as provided by NT static int processor_type() { return _processor_type; } @@ -63,6 +61,9 @@ // Tells whether the platform is NT or Windown95 static bool is_nt() { return _is_nt; } + // Tells whether the platform is Windows 2003 + static bool is_windows_2003() { return _is_windows_2003; } + // Returns the byte size of a virtual memory page static int vm_page_size() { return _vm_page_size; } @@ -89,7 +90,7 @@ private: double CachePad [4] ; // increase odds that _Event is sole occupant of cache line volatile int _Event ; - HANDLE _ParkHandle ; + HANDLE _ParkHandle ; public: // TODO-FIXME: make dtor private ~PlatformEvent() { guarantee (0, "invariant") ; } @@ -97,8 +98,8 @@ public: PlatformEvent() { _Event = 0 ; - _ParkHandle = CreateEvent (NULL, false, false, NULL) ; - guarantee (_ParkHandle != NULL, "invariant") ; + _ParkHandle = CreateEvent (NULL, false, false, NULL) ; + guarantee (_ParkHandle != NULL, "invariant") ; } // Exercise caution using reset() and fired() - they may require MEMBARs @@ -111,19 +112,15 @@ -class PlatformParker : public CHeapObj { +class PlatformParker : public CHeapObj { protected: - HANDLE _ParkEvent ; + HANDLE _ParkEvent ; public: - ~PlatformParker () { guarantee (0, "invariant") ; } - PlatformParker () { - _ParkEvent = CreateEvent (NULL, true, false, NULL) ; - guarantee (_ParkEvent != NULL, "invariant") ; + ~PlatformParker () { guarantee (0, "invariant") ; } + PlatformParker () { + _ParkEvent = CreateEvent (NULL, true, false, NULL) ; + guarantee (_ParkEvent != NULL, "invariant") ; } -} ; - - - - +} ; --- old/hotspot/src/os/windows/vm/os_windows.inline.hpp 2009-08-01 04:09:00.422446390 +0100 +++ new/hotspot/src/os/windows/vm/os_windows.inline.hpp 2009-08-01 04:09:00.345043354 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)os_windows.inline.hpp 1.44 07/05/05 17:04:43 JVM" -#endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ inline const char* os::file_separator() { return "\\"; } @@ -72,3 +69,6 @@ *((int *)(sp - (pages * vm_page_size()))) = 0; } } + +inline bool os::numa_has_static_binding() { return true; } +inline bool os::numa_has_group_homing() { return false; } --- old/hotspot/src/os_cpu/linux_x86/vm/bytes_linux_x86.inline.hpp 2009-08-01 04:09:01.377194439 +0100 +++ new/hotspot/src/os_cpu/linux_x86/vm/bytes_linux_x86.inline.hpp 2009-08-01 04:09:01.302308093 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)bytes_linux_x86.inline.hpp 1.12 07/09/17 09:21:07 JVM" -#endif /* - * Copyright 1999-2001 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include @@ -54,7 +51,7 @@ __asm__ __volatile__ ( "bswap %0" :"=r" (ret) // output : register 0 => ret - :"0" (x) // input : x => register 0 + :"0" (x) // input : x => register 0 :"0" // clobbered register ); return ret; @@ -63,7 +60,18 @@ #ifdef AMD64 inline u8 Bytes::swap_u8(u8 x) { +#ifdef SPARC_WORKS + // workaround for SunStudio12 CR6615391 + __asm__ __volatile__ ( + "bswapq %0" + :"=r" (x) // output : register 0 => x + :"0" (x) // input : x => register 0 + :"0" // clobbered register + ); + return x; +#else return bswap_64(x); +#endif } #else // Helper function for swap_u8 @@ -75,4 +83,3 @@ return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1)); } #endif // !AMD64 - --- old/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.ad 2009-08-01 04:09:02.227183983 +0100 +++ new/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.ad 2009-08-01 04:09:02.145657723 +0100 @@ -1,5 +1,5 @@ // -// Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // // X86 Linux Architecture Description File @@ -55,32 +55,32 @@ enc_class linux_tlsencode (eRegP dst) %{ Register dstReg = as_Register($dst$$reg); - MacroAssembler* masm = new MacroAssembler(&cbuf); + MacroAssembler* masm = new MacroAssembler(&cbuf); masm->get_thread(dstReg); %} enc_class linux_breakpoint %{ - MacroAssembler* masm = new MacroAssembler(&cbuf); + MacroAssembler* masm = new MacroAssembler(&cbuf); masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); %} enc_class call_epilog %{ - if( VerifyStackAtCalls ) { + if( VerifyStackAtCalls ) { // Check that stack depth is unchanged: find majik cookie on stack int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word)); if(framesize >= 128) { - emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood - emit_d8(cbuf,0xBC); - emit_d8(cbuf,0x24); - emit_d32(cbuf,framesize); // Find majik cookie from ESP - emit_d32(cbuf, 0xbadb100d); + emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood + emit_d8(cbuf,0xBC); + emit_d8(cbuf,0x24); + emit_d32(cbuf,framesize); // Find majik cookie from ESP + emit_d32(cbuf, 0xbadb100d); } else { - emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood - emit_d8(cbuf,0x7C); - emit_d8(cbuf,0x24); - emit_d8(cbuf,framesize); // Find majik cookie from ESP - emit_d32(cbuf, 0xbadb100d); + emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood + emit_d8(cbuf,0x7C); + emit_d8(cbuf,0x24); + emit_d8(cbuf,framesize); // Find majik cookie from ESP + emit_d32(cbuf, 0xbadb100d); } // jmp EQ around INT3 // QQQ TODO @@ -103,16 +103,16 @@ // This name is KNOWN by the ADLC and cannot be changed. // The ADLC forces a 'TypeRawPtr::BOTTOM' output type // for this guy. -instruct tlsLoadP(eAXRegP dst, eFlagsReg cr) %{ +instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{ match(Set dst (ThreadLocal)); effect(DEF dst, KILL cr); - format %{ "MOV EAX, Thread::current()" %} + format %{ "MOV $dst, Thread::current()" %} ins_encode( linux_tlsencode(dst) ); ins_pipe( ialu_reg_fat ); %} -instruct TLS(eAXRegP dst) %{ +instruct TLS(eRegP dst) %{ match(Set dst (ThreadLocal)); expand %{ --- old/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp 2009-08-01 04:09:03.028240417 +0100 +++ new/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp 2009-08-01 04:09:02.941663893 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)os_linux_x86.cpp 1.98 07/11/15 11:29:19 JVM" -#endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // do not include precompiled header file @@ -65,8 +62,14 @@ #endif // AMD64 address os::current_stack_pointer() { +#ifdef SPARC_WORKS + register void *esp; + __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp)); + return (address) ((char*)esp + sizeof(long)*2); +#else register void *esp __asm__ (SPELL_REG_SP); return (address) esp; +#endif } char* os::non_memory_address_word() { @@ -96,7 +99,7 @@ // For Forte Analyzer AsyncGetCallTrace profiling support - thread // is currently interrupted by SIGPROF. // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal -// frames. Currently we don't do that on Linux, so it's the same as +// frames. Currently we don't do that on Linux, so it's the same as // os::fetch_frame_from_context(). ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { @@ -142,14 +145,19 @@ } intptr_t* _get_previous_fp() { +#ifdef SPARC_WORKS + register intptr_t **ebp; + __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp)); +#else register intptr_t **ebp __asm__ (SPELL_REG_FP); +#endif return (intptr_t*) *ebp; // we want what it points to. } frame os::current_frame() { intptr_t* fp = _get_previous_fp(); - frame myframe((intptr_t*)os::current_stack_pointer(), + frame myframe((intptr_t*)os::current_stack_pointer(), (intptr_t*)fp, CAST_FROM_FN_PTR(address, os::current_frame)); if (os::is_first_C_frame(&myframe)) { @@ -167,14 +175,14 @@ trap_page_fault = 0xE }; -extern "C" void Fetch32PFI () ; -extern "C" void Fetch32Resume () ; +extern "C" void Fetch32PFI () ; +extern "C" void Fetch32Resume () ; #ifdef AMD64 extern "C" void FetchNPFI () ; extern "C" void FetchNResume () ; #endif // AMD64 -extern "C" int +extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info, void* ucVoid, @@ -188,8 +196,8 @@ // Note: it's not uncommon that JNI code uses signal/sigset to install // then restore certain signal handler (e.g. to temporarily block SIGPIPE, // or have a SIGILL handler when detecting CPU type). When that happens, - // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To - // avoid unnecessary crash when libjsig is not preloaded, try handle signals + // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To + // avoid unnecessary crash when libjsig is not preloaded, try handle signals // that do not require siginfo/ucontext first. if (sig == SIGPIPE || sig == SIGXFSZ) { @@ -199,7 +207,7 @@ } else { if (PrintMiscellaneous && (WizardMode || Verbose)) { char buf[64]; - warning("Ignoring %s - see bugs 4229104 or 646499219", + warning("Ignoring %s - see bugs 4229104 or 646499219", os::exception_name(sig, buf, sizeof(buf))); } return true; @@ -236,15 +244,15 @@ if (info != NULL && uc != NULL && thread != NULL) { pc = (address) os::Linux::ucontext_get_pc(uc); - if (pc == (address) Fetch32PFI) { - uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; - return 1 ; + if (pc == (address) Fetch32PFI) { + uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; + return 1 ; } #ifdef AMD64 - if (pc == (address) FetchNPFI) { - uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ; - return 1 ; - } + if (pc == (address) FetchNPFI) { + uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ; + return 1 ; + } #endif // AMD64 // Handle ALL stack overflow variations here @@ -296,24 +304,24 @@ if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { stub = SharedRuntime::get_poll_stub(pc); } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { - // BugId 4454115: A read from a MappedByteBuffer can fault - // here if the underlying file has been truncated. - // Do not crash the VM in such a case. + // BugId 4454115: A read from a MappedByteBuffer can fault + // here if the underlying file has been truncated. + // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob_unsafe(pc); nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; if (nm != NULL && nm->has_unsafe_access()) { - stub = StubRoutines::handler_for_unsafe_access(); - } + stub = StubRoutines::handler_for_unsafe_access(); + } } else #ifdef AMD64 - if (sig == SIGFPE && + if (sig == SIGFPE && (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { - stub = + stub = SharedRuntime:: continuation_for_implicit_exception(thread, - pc, + pc, SharedRuntime:: IMPLICIT_DIVIDE_BY_ZERO); #else @@ -351,7 +359,7 @@ } else if (thread->thread_state() == _thread_in_vm && sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ thread->doing_unsafe_access()) { - stub = StubRoutines::handler_for_unsafe_access(); + stub = StubRoutines::handler_for_unsafe_access(); } // jni_fast_GetField can trap at certain pc's if a GC kicks in @@ -400,24 +408,25 @@ // different - we still want to unguard the 2nd page in this case. // // 15 bytes seems to be a (very) safe value for max instruction size. - bool pc_is_near_addr = + bool pc_is_near_addr = (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); bool instr_spans_page_boundary = (align_size_down((intptr_t) pc ^ (intptr_t) addr, (intptr_t) page_size) > 0); - + if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { static volatile address last_addr = (address) os::non_memory_address_word(); - + // In conservative mode, don't unguard unless the address is in the VM if (addr != last_addr && (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { - - // Unguard and retry + + // Set memory to RWX and retry address page_start = (address) align_size_down((intptr_t) addr, (intptr_t) page_size); - bool res = os::unguard_memory((char*) page_start, page_size); + bool res = os::protect_memory((char*) page_start, page_size, + os::MEM_PROT_RWX); if (PrintMiscellaneous && Verbose) { char buf[256]; @@ -429,23 +438,23 @@ } stub = pc; - // Set last_addr so if we fault again at the same address, we don't end - // up in an endless loop. - // - // There are two potential complications here. Two threads trapping at - // the same address at the same time could cause one of the threads to - // think it already unguarded, and abort the VM. Likely very rare. - // - // The other race involves two threads alternately trapping at - // different addresses and failing to unguard the page, resulting in - // an endless loop. This condition is probably even more unlikely than - // the first. - // - // Although both cases could be avoided by using locks or thread local - // last_addr, these solutions are unnecessary complication: this - // handler is a best-effort safety net, not a complete solution. It is - // disabled by default and should only be used as a workaround in case - // we missed any no-execute-unsafe VM code. + // Set last_addr so if we fault again at the same address, we don't end + // up in an endless loop. + // + // There are two potential complications here. Two threads trapping at + // the same address at the same time could cause one of the threads to + // think it already unguarded, and abort the VM. Likely very rare. + // + // The other race involves two threads alternately trapping at + // different addresses and failing to unguard the page, resulting in + // an endless loop. This condition is probably even more unlikely than + // the first. + // + // Although both cases could be avoided by using locks or thread local + // last_addr, these solutions are unnecessary complication: this + // handler is a best-effort safety net, not a complete solution. It is + // disabled by default and should only be used as a workaround in case + // we missed any no-execute-unsafe VM code. last_addr = addr; } @@ -563,10 +572,12 @@ #else size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; +#ifdef __GNUC__ #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) +#endif // Test if pthread library can support variable thread stack size. LinuxThreads -// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads +// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads // in floating stack mode and NPTL support variable stack size. bool os::Linux::supports_variable_stack_size() { if (os::Linux::is_NPTL()) { @@ -575,26 +586,30 @@ } else { // Note: We can't control default stack size when creating a thread. - // If we use non-default stack size (pthread_attr_setstacksize), both - // floating stack and non-floating stack LinuxThreads will return the + // If we use non-default stack size (pthread_attr_setstacksize), both + // floating stack and non-floating stack LinuxThreads will return the // same value. This makes it impossible to implement this function by // detecting thread stack size directly. - // + // // An alternative approach is to check %gs. Fixed-stack LinuxThreads // do not use %gs, so its value is 0. Floating-stack LinuxThreads use // %gs (either as LDT selector or GDT selector, depending on kernel) - // to access thread specific data. + // to access thread specific data. // - // Note that %gs is a reserved glibc register since early 2001, so - // applications are not allowed to change its value (Ulrich Drepper from - // Redhat confirmed that all known offenders have been modified to use - // either %fs or TSD). In the worst case scenario, when VM is embedded in - // a native application that plays with %gs, we might see non-zero %gs - // even LinuxThreads is running in fixed stack mode. As the result, we'll - // return true and skip _thread_safety_check(), so we may not be able to + // Note that %gs is a reserved glibc register since early 2001, so + // applications are not allowed to change its value (Ulrich Drepper from + // Red Hat confirmed that all known offenders have been modified to use + // either %fs or TSD). In the worst case scenario, when VM is embedded in + // a native application that plays with %gs, we might see non-zero %gs + // even LinuxThreads is running in fixed stack mode. As the result, we'll + // return true and skip _thread_safety_check(), so we may not be able to // detect stack-heap collisions. But otherwise it's harmless. // +#ifdef __GNUC__ return (GET_GS() != 0); +#else + return false; +#endif } } #endif // AMD64 @@ -608,14 +623,14 @@ size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K); #endif // AMD64 return s; -} - +} + size_t os::Linux::default_guard_size(os::ThreadType thr_type) { // Creating guard page is very expensive. Java thread has HotSpot // guard page, only enable glibc guard page for non-Java threads. return (thr_type == java_thread ? 0 : page_size()); } - + // Java thread: // // Low memory addresses --- old/hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp 2009-08-01 04:09:04.023907930 +0100 +++ new/hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp 2009-08-01 04:09:03.946173091 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)assembler_solaris_sparc.cpp 1.11 07/05/05 17:04:53 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,18 +31,6 @@ #include // For trap numbers #include // For V8 compatibility -bool MacroAssembler::needs_explicit_null_check(intptr_t offset) { - // The first page of virtual addresses is unmapped on SPARC. - // Thus, any access the VM makes through a null pointer with an offset of - // less than 4K will get a recognizable SIGSEGV, which the signal handler - // will transform into a NullPointerException. - // (Actually, the first 64K or so is unmapped, but it's simpler - // to depend only on the first 4K or so.) - - bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size(); - return !offset_in_first_page; -} - void MacroAssembler::read_ccr_trap(Register ccr_save) { // Execute a trap to get the PSR, mask and shift // to get the condition codes. --- old/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s 2009-08-01 04:09:04.858954296 +0100 +++ new/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s 2009-08-01 04:09:04.782377591 +0100 @@ -1,5 +1,5 @@ !! -!! Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. +!! Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. !! DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. !! !! This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,9 @@ !! by the .il "call", in some cases optimizing the code, completely eliding it, !! or by moving the code from the "call site". - + !! ASM better know we may use G6 for our own purposes + .register %g6, #ignore + .globl SafeFetch32 .align 32 .global Fetch32PFI, Fetch32Resume @@ -106,6 +108,7 @@ .globl _raw_thread_id .align 32 _raw_thread_id: + .register %g7, #scratch retl mov %g7, %o0 --- old/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp 2009-08-01 04:09:05.715519212 +0100 +++ new/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp 2009-08-01 04:09:05.628426893 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)thread_solaris_sparc.cpp 1.16 07/05/05 17:04:53 JVM" #endif /* - * Copyright 2003-2004 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,17 +53,6 @@ // even if isInJava == true. It should be more reliable than // ucontext info. if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { -#if 0 - // This sanity check may not be needed with the new frame - // walking code. Remove it for now. - if (!jt->frame_anchor()->post_Java_state_is_pc() - && frame::next_younger_sp_or_null(last_Java_sp(), - jt->frame_anchor()->post_Java_sp()) == NULL) { - // the anchor contains an SP, but the frame is not walkable - // because post_Java_sp isn't valid relative to last_Java_sp - return false; - } -#endif *fr_addr = jt->pd_last_frame(); return true; } @@ -80,23 +69,59 @@ return false; } + frame ret_frame(ret_sp, frame::unpatchable, addr.pc()); + // we were running Java code when SIGPROF came in if (isInJava) { + + + // If the frame we got is safe then it is most certainly valid + if (ret_frame.safe_for_sender(jt)) { + *fr_addr = ret_frame; + return true; + } + + // If it isn't safe then we can try several things to try and get + // a good starting point. + // + // On sparc the frames are almost certainly walkable in the sense + // of sp/fp linkages. However because of recycling of windows if + // a piece of code does multiple save's where the initial save creates + // a real frame with a return pc and the succeeding save's are used to + // simply get free registers and have no real pc then the pc linkage on these + // "inner" temporary frames will be bogus. + // Since there is in general only a nesting level like + // this one deep in general we'll try and unwind such an "inner" frame + // here ourselves and see if it makes sense + + frame unwind_frame(ret_frame.fp(), frame::unpatchable, addr.pc()); + + if (unwind_frame.safe_for_sender(jt)) { + *fr_addr = unwind_frame; + return true; + } + + // Well that didn't work. Most likely we're toast on this tick + // The previous code would try this. I think it is dubious in light + // of changes to safe_for_sender and the unwind trick above but + // if it gets us a safe frame who wants to argue. + // If we have a last_Java_sp, then the SIGPROF signal caught us // right when we were transitioning from _thread_in_Java to a new // JavaThreadState. We use last_Java_sp instead of the sp from // the ucontext since it should be more reliable. + if (jt->has_last_Java_frame()) { ret_sp = jt->last_Java_sp(); + frame ret_frame2(ret_sp, frame::unpatchable, addr.pc()); + if (ret_frame2.safe_for_sender(jt)) { + *fr_addr = ret_frame2; + return true; + } } - // Implied else: we don't have a last_Java_sp so we use what we - // got from the ucontext. - frame ret_frame(ret_sp, frame::unpatchable, addr.pc()); - if (!ret_frame.safe_for_sender(jt)) { - // nothing else to try if the frame isn't good - return false; - } + // This is the best we can do. We will only be able to decode the top frame + *fr_addr = ret_frame; return true; } @@ -108,17 +133,13 @@ if (jt->has_last_Java_frame()) { assert(!jt->frame_anchor()->walkable(), "case covered above"); - if (jt->thread_state() == _thread_in_native) { - frame ret_frame(jt->last_Java_sp(), frame::unpatchable, addr.pc()); - if (!ret_frame.safe_for_sender(jt)) { - // nothing else to try if the frame isn't good - return false; - } - *fr_addr = ret_frame; - return true; - } + frame ret_frame(jt->last_Java_sp(), frame::unpatchable, addr.pc()); + *fr_addr = ret_frame; + return true; } - // nothing else to try - return false; + // nothing else to try but what we found initially + + *fr_addr = ret_frame; + return true; } --- old/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp 2009-08-01 04:09:06.695984976 +0100 +++ new/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp 2009-08-01 04:09:06.606078087 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)os_solaris_x86.cpp 1.122 07/09/17 09:16:14 JVM" -#endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // do not include precompiled header file @@ -93,10 +90,10 @@ // // Validate a ucontext retrieved from walking a uc_link of a ucontext. // There are issues with libthread giving out uc_links for different threads -// on the same uc_link chain and bad or circular links. +// on the same uc_link chain and bad or circular links. // bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) { - if (valid >= suspect || + if (valid >= suspect || valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags || valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp || valid->uc_stack.ss_size != suspect->uc_stack.ss_size) { @@ -175,7 +172,7 @@ return os::fetch_frame_from_context(luc, ret_sp, ret_fp); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; @@ -206,16 +203,17 @@ return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); } -extern "C" intptr_t *_get_previous_fp(); // in .il file. +extern "C" intptr_t *_get_current_fp(); // in .il file frame os::current_frame() { - intptr_t* fp = _get_previous_fp(); - frame myframe((intptr_t*)os::current_stack_pointer(), + intptr_t* fp = _get_current_fp(); // it's inlined so want current fp + frame myframe((intptr_t*)os::current_stack_pointer(), (intptr_t*)fp, CAST_FROM_FN_PTR(address, os::current_frame)); if (os::is_first_C_frame(&myframe)) { // stack is not walkable - return frame(NULL, NULL, NULL); + frame ret; // This will be a null useless frame + return ret; } else { return os::get_sender_for_C_frame(&myframe); } @@ -232,7 +230,7 @@ intptr_t* sp; assert(ProfileVM && thread->is_VM_thread(), "just checking"); - + ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]); _addr = new_addr; } @@ -245,13 +243,13 @@ return (err); if (*flags == TRS_LWPID) { sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(), - *lwp); + *lwp); if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) { perror("thr_mutator_status: open lwpstatus"); return (EINVAL); } if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) != - sizeof (lwpstatus_t)) { + sizeof (lwpstatus_t)) { perror("thr_mutator_status: read lwpstatus"); (void) close(lwpfd); return (EINVAL); @@ -278,7 +276,7 @@ return; } // looking for _sse_hw in libc.so, if it does not exist or - // the value (int) is 0, OS has no support for SSE + // the value (int) is 0, OS has no support for SSE int *sse_hwp; void *h; @@ -331,7 +329,7 @@ extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); -extern "C" void Fetch32PFI () ; +extern "C" void Fetch32PFI () ; extern "C" void Fetch32Resume () ; #ifdef AMD64 extern "C" void FetchNPFI () ; @@ -373,10 +371,10 @@ if (os::Solaris::signal_handlers_are_installed) { if (t != NULL ){ if(t->is_Java_thread()) { - thread = (JavaThread*)t; + thread = (JavaThread*)t; } else if(t->is_VM_thread()){ - vmthread = (VMThread *)t; + vmthread = (VMThread *)t; } } } @@ -387,8 +385,8 @@ if(thread){ OSThread::InterruptArguments args(thread, uc); thread->osthread()->do_interrupt_callbacks_at_interrupt(&args); - return true; - } + return true; + } else if(vmthread){ OSThread::InterruptArguments args(vmthread, uc); vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args); @@ -420,14 +418,14 @@ pc = (address) uc->uc_mcontext.gregs[REG_PC]; // SafeFetch32() support - if (pc == (address) Fetch32PFI) { - uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; - return true ; + if (pc == (address) Fetch32PFI) { + uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; + return true ; } #ifdef AMD64 - if (pc == (address) FetchNPFI) { - uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ; - return true ; + if (pc == (address) FetchNPFI) { + uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ; + return true ; } #endif // AMD64 @@ -435,20 +433,20 @@ if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { address addr = (address) info->si_addr; if (thread->in_stack_yellow_zone(addr)) { - thread->disable_stack_yellow_zone(); - if (thread->thread_state() == _thread_in_Java) { - // Throw a stack overflow exception. Guard pages will be reenabled - // while unwinding the stack. - stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); - } else { - // Thread was in the vm or native code. Return and try to finish. - return true; - } + thread->disable_stack_yellow_zone(); + if (thread->thread_state() == _thread_in_Java) { + // Throw a stack overflow exception. Guard pages will be reenabled + // while unwinding the stack. + stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); + } else { + // Thread was in the vm or native code. Return and try to finish. + return true; + } } else if (thread->in_stack_red_zone(addr)) { - // Fatal red zone violation. Disable the guard pages and fall through - // to handle_unexpected_exception way down below. - thread->disable_stack_red_zone(); - tty->print_raw_cr("An irrecoverable stack overflow has occurred."); + // Fatal red zone violation. Disable the guard pages and fall through + // to handle_unexpected_exception way down below. + thread->disable_stack_red_zone(); + tty->print_raw_cr("An irrecoverable stack overflow has occurred."); } } @@ -473,42 +471,42 @@ stub = StubRoutines::handler_for_unsafe_access(); } } - else + else if (sig == SIGFPE && info->si_code == FPE_INTDIV) { - // integer divide by zero + // integer divide by zero stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); } #ifndef AMD64 else if (sig == SIGFPE && info->si_code == FPE_FLTDIV) { - // floating-point divide by zero + // floating-point divide by zero stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); } else if (sig == SIGFPE && info->si_code == FPE_FLTINV) { - // The encoding of D2I in i486.ad can cause an exception prior - // to the fist instruction if there was an invalid operation - // pending. We want to dismiss that exception. From the win_32 - // side it also seems that if it really was the fist causing - // the exception that we do the d2i by hand with different - // rounding. Seems kind of weird. QQQ TODO - // Note that we take the exception at the NEXT floating point instruction. - if (pc[0] == 0xDB) { - assert(pc[0] == 0xDB, "not a FIST opcode"); - assert(pc[1] == 0x14, "not a FIST opcode"); - assert(pc[2] == 0x24, "not a FIST opcode"); - return true; - } else { - assert(pc[-3] == 0xDB, "not an flt invalid opcode"); - assert(pc[-2] == 0x14, "not an flt invalid opcode"); - assert(pc[-1] == 0x24, "not an flt invalid opcode"); - } + // The encoding of D2I in i486.ad can cause an exception prior + // to the fist instruction if there was an invalid operation + // pending. We want to dismiss that exception. From the win_32 + // side it also seems that if it really was the fist causing + // the exception that we do the d2i by hand with different + // rounding. Seems kind of weird. QQQ TODO + // Note that we take the exception at the NEXT floating point instruction. + if (pc[0] == 0xDB) { + assert(pc[0] == 0xDB, "not a FIST opcode"); + assert(pc[1] == 0x14, "not a FIST opcode"); + assert(pc[2] == 0x24, "not a FIST opcode"); + return true; + } else { + assert(pc[-3] == 0xDB, "not an flt invalid opcode"); + assert(pc[-2] == 0x14, "not an flt invalid opcode"); + assert(pc[-1] == 0x24, "not an flt invalid opcode"); + } } else if (sig == SIGFPE ) { tty->print_cr("caught SIGFPE, info 0x%x.", info->si_code); } #endif // !AMD64 - // QQQ It doesn't seem that we need to do this on x86 because we should be able - // to return properly from the handler without this extra stuff on the back side. + // QQQ It doesn't seem that we need to do this on x86 because we should be able + // to return properly from the handler without this extra stuff on the back side. else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { // Determination of interpreter/vtable stub/compiled code null exception @@ -541,7 +539,7 @@ // // Preventative code for future versions of Solaris which may // enable execution protection when running the 32-bit VM on AMD64. - // + // // This should be kept as the last step in the triage. We don't // have a dedicated trap number for a no-execute fault, so be // conservative and allow other handlers the first shot. @@ -564,25 +562,26 @@ // different - we still want to unguard the 2nd page in this case. // // 15 bytes seems to be a (very) safe value for max instruction size. - bool pc_is_near_addr = + bool pc_is_near_addr = (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); bool instr_spans_page_boundary = (align_size_down((intptr_t) pc ^ (intptr_t) addr, (intptr_t) page_size) > 0); - + if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { static volatile address last_addr = (address) os::non_memory_address_word(); - + // In conservative mode, don't unguard unless the address is in the VM if (addr != last_addr && (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { - - // Unguard and retry + + // Make memory rwx and retry address page_start = (address) align_size_down((intptr_t) addr, (intptr_t) page_size); - bool res = os::unguard_memory((char*) page_start, page_size); - + bool res = os::protect_memory((char*) page_start, page_size, + os::MEM_PROT_RWX); + if (PrintMiscellaneous && Verbose) { char buf[256]; jio_snprintf(buf, sizeof(buf), "Execution protection violation " @@ -593,23 +592,23 @@ } stub = pc; - // Set last_addr so if we fault again at the same address, we don't end - // up in an endless loop. - // - // There are two potential complications here. Two threads trapping at - // the same address at the same time could cause one of the threads to - // think it already unguarded, and abort the VM. Likely very rare. - // - // The other race involves two threads alternately trapping at - // different addresses and failing to unguard the page, resulting in - // an endless loop. This condition is probably even more unlikely than - // the first. - // - // Although both cases could be avoided by using locks or thread local - // last_addr, these solutions are unnecessary complication: this - // handler is a best-effort safety net, not a complete solution. It is - // disabled by default and should only be used as a workaround in case - // we missed any no-execute-unsafe VM code. + // Set last_addr so if we fault again at the same address, we don't end + // up in an endless loop. + // + // There are two potential complications here. Two threads trapping at + // the same address at the same time could cause one of the threads to + // think it already unguarded, and abort the VM. Likely very rare. + // + // The other race involves two threads alternately trapping at + // different addresses and failing to unguard the page, resulting in + // an endless loop. This condition is probably even more unlikely than + // the first. + // + // Although both cases could be avoided by using locks or thread local + // last_addr, these solutions are unnecessary complication: this + // handler is a best-effort safety net, not a complete solution. It is + // disabled by default and should only be used as a workaround in case + // we missed any no-execute-unsafe VM code. last_addr = addr; } @@ -644,8 +643,8 @@ // infinite loop if some other move to fs caused the GP fault. Note that // this loop counter is ultimately a heuristic as it is possible for // more than one thread to generate this fault at a time in an MP system. - // In the case of the loop count being exceeded or if the poll fails - // just fall through to a fatal error. + // In the case of the loop count being exceeded or if the poll fails + // just fall through to a fatal error. // If there is some other source of T_GPFLT traps and the text at EIP is // unreadable this code will loop infinitely until the stack is exausted. // The key to diagnosis in this case is to look for the bottom signal handler @@ -653,28 +652,28 @@ if(! IgnoreLibthreadGPFault) { if (sig == SIGSEGV && uc->uc_mcontext.gregs[TRAPNO] == T_GPFLT) { - const unsigned char *p = - (unsigned const char *) uc->uc_mcontext.gregs[EIP]; + const unsigned char *p = + (unsigned const char *) uc->uc_mcontext.gregs[EIP]; // Expected instruction? if(p[0] == movlfs[0] && p[1] == movlfs[1]) { - Atomic::inc(&ldtr_refresh); + Atomic::inc(&ldtr_refresh); - // Infinite loop? + // Infinite loop? - if(ldtr_refresh < ((2 << 16) / PAGESIZE)) { + if(ldtr_refresh < ((2 << 16) / PAGESIZE)) { - // No, force scheduling to get a fresh view of the LDTR + // No, force scheduling to get a fresh view of the LDTR - if(poll(NULL, 0, 10) == 0) { + if(poll(NULL, 0, 10) == 0) { - // Retry the move + // Retry the move - return false; - } - } + return false; + } + } } } } @@ -690,7 +689,7 @@ sigaction(sig, (struct sigaction *)0, &oldAct); if (oldAct.sa_sigaction != signalHandler) { void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) - : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); + : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); warning("Unexpected Signal %d occured under user-defined signal handler %#lx", sig, (long)sighand); } } @@ -879,4 +878,3 @@ _solaris_raw_setup_fpu(fpu_cntrl); } #endif // AMD64 - --- old/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad 2009-08-01 04:09:07.651733190 +0100 +++ new/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad 2009-08-01 04:09:07.564665796 +0100 @@ -1,5 +1,5 @@ // -// Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // // X86 Solaris Architecture Description File @@ -55,39 +55,39 @@ enc_class solaris_tlsencode (eRegP dst) %{ Register dstReg = as_Register($dst$$reg); - MacroAssembler* masm = new MacroAssembler(&cbuf); + MacroAssembler* masm = new MacroAssembler(&cbuf); masm->get_thread(dstReg); %} enc_class solaris_breakpoint %{ - MacroAssembler* masm = new MacroAssembler(&cbuf); + MacroAssembler* masm = new MacroAssembler(&cbuf); // Really need to fix this - masm->pushl(rax); - masm->pushl(rcx); - masm->pushl(rdx); + masm->push(rax); + masm->push(rcx); + masm->push(rdx); masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); - masm->popl(rdx); - masm->popl(rcx); - masm->popl(rax); + masm->pop(rdx); + masm->pop(rcx); + masm->pop(rax); %} enc_class call_epilog %{ - if( VerifyStackAtCalls ) { + if( VerifyStackAtCalls ) { // Check that stack depth is unchanged: find majik cookie on stack int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word)); if(framesize >= 128) { - emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood - emit_d8(cbuf,0xBC); - emit_d8(cbuf,0x24); - emit_d32(cbuf,framesize); // Find majik cookie from ESP - emit_d32(cbuf, 0xbadb100d); + emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood + emit_d8(cbuf,0xBC); + emit_d8(cbuf,0x24); + emit_d32(cbuf,framesize); // Find majik cookie from ESP + emit_d32(cbuf, 0xbadb100d); } else { - emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood - emit_d8(cbuf,0x7C); - emit_d8(cbuf,0x24); - emit_d8(cbuf,framesize); // Find majik cookie from ESP - emit_d32(cbuf, 0xbadb100d); + emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood + emit_d8(cbuf,0x7C); + emit_d8(cbuf,0x24); + emit_d8(cbuf,framesize); // Find majik cookie from ESP + emit_d32(cbuf, 0xbadb100d); } // jmp EQ around INT3 // QQQ TODO @@ -110,16 +110,16 @@ // This name is KNOWN by the ADLC and cannot be changed. // The ADLC forces a 'TypeRawPtr::BOTTOM' output type // for this guy. -instruct tlsLoadP(eAXRegP dst, eFlagsReg cr) %{ +instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{ match(Set dst (ThreadLocal)); effect(DEF dst, KILL cr); - format %{ "MOV EAX, Thread::current()" %} + format %{ "MOV $dst, Thread::current()" %} ins_encode( solaris_tlsencode(dst) ); ins_pipe( ialu_reg_fat ); %} -instruct TLS(eAXRegP dst) %{ +instruct TLS(eRegP dst) %{ match(Set dst (ThreadLocal)); expand %{ --- old/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il 2009-08-01 04:09:08.476858624 +0100 +++ new/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il 2009-08-01 04:09:08.392068779 +0100 @@ -37,10 +37,10 @@ movl %gs:0, %eax .end - // Get callers fp - .inline _get_previous_fp,0 + // Get current fp + .inline _get_current_fp,0 + .volatile movl %ebp, %eax - movl %eax, %eax .end // Support for jint Atomic::add(jint inc, volatile jint* dest) --- old/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il 2009-08-01 04:09:09.294718719 +0100 +++ new/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il 2009-08-01 04:09:09.211735927 +0100 @@ -30,10 +30,10 @@ movq %fs:0, %rax .end - // Get the frame pointer from previous frame. - .inline _get_previous_fp,0 + // Get the frame pointer from current frame. + .inline _get_current_fp,0 + .volatile movq %rbp, %rax - movq %rax, %rax .end // Support for jint Atomic::add(jint add_value, volatile jint* dest) --- old/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp 2009-08-01 04:09:10.108603792 +0100 +++ new/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp 2009-08-01 04:09:10.031410981 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)thread_solaris_x86.cpp 1.15 07/09/17 10:00:20 JVM" -#endif /* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -35,49 +32,53 @@ assert(Thread::current() == this, "caller must be current thread"); assert(this->is_Java_thread(), "must be JavaThread"); - JavaThread* jt = (JavaThread *)this; - // If we have a last_Java_frame, then we should use it even if - // isInJava == true. It should be more reliable than ucontext info. + // last_Java_frame is always walkable and safe use it if we have it + if (jt->has_last_Java_frame()) { *fr_addr = jt->pd_last_frame(); return true; } - // At this point, we don't have a last_Java_frame, so - // we try to glean some information out of the ucontext - // if we were running Java code when SIGPROF came in. - if (isInJava) { - ucontext_t* uc = (ucontext_t*) ucontext; - - intptr_t* ret_fp; - intptr_t* ret_sp; - ExtendedPC addr = os::Solaris::fetch_frame_from_ucontext(this, uc, - &ret_sp, &ret_fp); - if (addr.pc() == NULL || ret_sp == NULL ) { - // ucontext wasn't useful - return false; - } + ucontext_t* uc = (ucontext_t*) ucontext; + + // We always want to use the initial frame we create from the ucontext as + // it certainly signals where we currently are. However that frame may not + // be safe for calling sender. In that case if we have a last_Java_frame + // then the forte walker will switch to that frame as the virtual sender + // for the frame we create here which is not sender safe. + + intptr_t* ret_fp; + intptr_t* ret_sp; + ExtendedPC addr = os::Solaris::fetch_frame_from_ucontext(this, uc, &ret_sp, &ret_fp); + + // Something would really have to be screwed up to get a NULL pc + + if (addr.pc() == NULL ) { + assert(false, "NULL pc from signal handler!"); + return false; + + } + + // If sp and fp are nonsense just leave them out + + if ((address)ret_sp >= jt->stack_base() || + (address)ret_sp < jt->stack_base() - jt->stack_size() ) { - frame ret_frame(ret_sp, ret_fp, addr.pc()); - if (!ret_frame.safe_for_sender(jt)) { -#ifdef COMPILER2 - frame ret_frame2(ret_sp, NULL, addr.pc()); - if (!ret_frame2.safe_for_sender(jt)) { - // nothing else to try if the frame isn't good - return false; - } - ret_frame = ret_frame2; -#else - // nothing else to try if the frame isn't good - return false; -#endif /* COMPILER2 */ + ret_sp = NULL; + ret_fp = NULL; + } else { + + // sp is reasonable is fp reasonable? + if ( (address)ret_fp >= jt->stack_base() || ret_fp < ret_sp) { + ret_fp = NULL; } - *fr_addr = ret_frame; - return true; } - // nothing else to try - return false; + frame ret_frame(ret_sp, ret_fp, addr.pc()); + + *fr_addr = ret_frame; + return true; + } --- old/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp 2009-08-01 04:09:11.024729565 +0100 +++ new/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp 2009-08-01 04:09:10.943982975 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)os_windows_x86.cpp 1.32 07/09/17 09:11:33 JVM" -#endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // do not include precompiled header file @@ -50,9 +47,9 @@ #ifndef AMD64 // We store the current thread in this wrapperthread location // and determine how far away this address is from the structured - // execption pointer that FS:[0] points to. This get_thread + // exception pointer that FS:[0] points to. This get_thread // code can then get the thread pointer via FS. - // + // // Warning: This routine must NEVER be inlined since we'd end up with // multiple offsets. // @@ -68,7 +65,7 @@ ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset); } #ifdef ASSERT - // Verify that the offset hasn't changed since we initally captured + // Verify that the offset hasn't changed since we initally captured // it. This might happen if we accidentally ended up with an // inlined version of this routine. else { @@ -78,7 +75,7 @@ sub eax, dword ptr FS:[0H]; mov test_thread_ptr_offset, eax }; - assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(), + assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(), "thread pointer offset from SEH changed"); } #endif // ASSERT @@ -107,13 +104,13 @@ ep.ExceptionRecord = ExceptionRecord; ep.ContextRecord = ContextRecord; - + result = topLevelExceptionFilter(&ep); // We better only get a CONTINUE_EXECUTION from our handler // since we don't have unwind information registered. - guarantee( result == EXCEPTION_CONTINUE_EXECUTION, + guarantee( result == EXCEPTION_CONTINUE_EXECUTION, "Unexpected result from topLevelExceptionFilter"); return(ExceptionContinueExecution); @@ -123,7 +120,7 @@ // Structure containing the Windows Data Structures required // to register our Code Cache exception handler. // We put these in the CodeCache since the API requires -// all addresses in these structures are relative to the Code +// all addresses in these structures are relative to the Code // area registered with RtlAddFunctionTable. typedef struct { char ExceptionHandlerInstr[16]; // jmp HandleExceptionFromCodeCache @@ -137,8 +134,8 @@ // to our topLevelExceptionFilter when we take an exception in our // dynamically generated code. // -// Arguments: low and high are the address of the full reserved -// codeCache area +// Arguments: low and high are the address of the full reserved +// codeCache area // bool os::register_code_area(char *low, char *high) { #ifdef AMD64 @@ -160,27 +157,27 @@ masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache)); masm->flush(); - // Create an Unwind Structure specifying no unwind info + // Create an Unwind Structure specifying no unwind info // other than an Exception Handler punwind = &pDCD->unw; punwind->Version = 1; - punwind->Flags = UNW_FLAG_EHANDLER; + punwind->Flags = UNW_FLAG_EHANDLER; punwind->SizeOfProlog = 0; punwind->CountOfCodes = 0; punwind->FrameRegister = 0; punwind->FrameOffset = 0; - punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) - + punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) - (char*)low; punwind->ExceptionData[0] = 0; // This structure describes the covered dynamic code area. - // Addresses are relative to the beginning on the code cache area + // Addresses are relative to the beginning on the code cache area prt = &pDCD->rt; - prt->BeginAddress = 0; + prt->BeginAddress = 0; prt->EndAddress = (ULONG)(high - low); prt->UnwindData = ((char *)punwind - low); - guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low), + guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low), "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable"); #endif // AMD64 @@ -321,7 +318,7 @@ cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; @@ -372,14 +369,14 @@ // apparently _asm not supported on windows amd64 typedef intptr_t* get_fp_func (); get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*, - StubRoutines::amd64::get_previous_fp_entry()); + StubRoutines::x86::get_previous_fp_entry()); if (func == NULL) return frame(NULL, NULL, NULL); intptr_t* fp = (*func)(); #else intptr_t* fp = _get_previous_fp(); #endif // AMD64 - frame myframe((intptr_t*)os::current_stack_pointer(), + frame myframe((intptr_t*)os::current_stack_pointer(), (intptr_t*)fp, CAST_FROM_FN_PTR(address, os::current_frame)); if (os::is_first_C_frame(&myframe)) { @@ -441,32 +438,32 @@ } extern "C" int SafeFetch32 (int * adr, int Err) { - int rv = Err ; - _try { - rv = *((volatile int *) adr) ; + int rv = Err ; + _try { + rv = *((volatile int *) adr) ; } __except(EXCEPTION_EXECUTE_HANDLER) { - } - return rv ; + } + return rv ; } extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) { - intptr_t rv = Err ; - _try { - rv = *((volatile intptr_t *) adr) ; + intptr_t rv = Err ; + _try { + rv = *((volatile intptr_t *) adr) ; } __except(EXCEPTION_EXECUTE_HANDLER) { - } - return rv ; + } + return rv ; } -extern "C" int SpinPause () { +extern "C" int SpinPause () { #ifdef AMD64 return 0 ; #else // pause == rep:nop - // On systems that don't support pause a rep:nop + // On systems that don't support pause a rep:nop // is executed as a nop. The rep: prefix is ignored. - _asm { - pause ; + _asm { + pause ; }; return 1 ; #endif // AMD64 @@ -479,4 +476,3 @@ __asm fldcw fpu_cntrl_word; #endif // !AMD64 } - --- old/hotspot/src/share/tools/MakeDeps/Database.java 2009-08-01 04:09:12.028629865 +0100 +++ new/hotspot/src/share/tools/MakeDeps/Database.java 2009-08-01 04:09:11.947408368 +0100 @@ -36,6 +36,7 @@ private FileList outerFiles; private FileList indivIncludes; private FileList grandInclude; // the results for the grand include file + private HashMap platformDepFiles; private long threshold; private int nOuterFiles; private int nPrecompiledFiles; @@ -57,6 +58,7 @@ outerFiles = new FileList("outerFiles", plat); indivIncludes = new FileList("IndivIncludes", plat); grandInclude = new FileList(plat.getGIFileTemplate().nameOfList(), plat); + platformDepFiles = new HashMap(); threshold = t; nOuterFiles = 0; @@ -209,6 +211,10 @@ FileList p = allFiles.listForFile(includer); p.setPlatformDependentInclude(pdName.dirPreStemSuff()); + // Record the implicit include of this file so that the + // dependencies for precompiled headers can mention it. + platformDepFiles.put(newIncluder, includer); + // Add an implicit dependency on platform // specific file for the generic file @@ -408,6 +414,12 @@ for (Iterator iter = grandInclude.iterator(); iter.hasNext(); ) { FileList list = (FileList) iter.next(); gd.println(list.getName() + " \\"); + String platformDep = platformDepFiles.get(list.getName()); + if (platformDep != null) { + // make sure changes to the platform dependent file will + // cause regeneration of the pch file. + gd.println(platformDep + " \\"); + } } gd.println(); gd.println(); --- old/hotspot/src/share/vm/adlc/adlparse.cpp 2009-08-01 04:09:13.785472610 +0100 +++ new/hotspot/src/share/vm/adlc/adlparse.cpp 2009-08-01 04:09:13.678575943 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)adlparse.cpp 1.205 07/05/05 17:05:00 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,6 @@ _globalNames(archDesc.globalNames()) { _AD._syntax_errs = _AD._semantic_errs = 0; // No errors so far this file _AD._warnings = 0; // No warnings either - _linenum = 0; // Will increment to first line _curline = _ptr = NULL; // No pointers into buffer yet _preproc_depth = 0; @@ -79,7 +78,7 @@ } if (!_AD._quiet_mode) fprintf(stderr,"-----------------------------------------------------------------------------\n"); - _AD._TotalLines += _linenum-1; // -1 for overshoot in "nextline" routine + _AD._TotalLines += linenum()-1; // -1 for overshoot in "nextline" routine // Write out information we have stored // // UNIXism == fsync(stderr); @@ -112,6 +111,7 @@ else if (!strcmp(ident, "pipeline")) pipe_parse(); else if (!strcmp(ident, "definitions")) definitions_parse(); else if (!strcmp(ident, "peephole")) peep_parse(); + else if (!strcmp(ident, "#line")) preproc_line(); else if (!strcmp(ident, "#define")) preproc_define(); else if (!strcmp(ident, "#undef")) preproc_undef(); else { @@ -151,7 +151,7 @@ if( (ident = get_unique_ident(_globalNames,"instruction")) == NULL ) return; instr = new InstructForm(ident); // Create new instruction form - instr->_linenum = _linenum; + instr->_linenum = linenum(); _globalNames.Insert(ident, instr); // Add name to the name table // Debugging Stuff if (_AD._adl_debug > 1) @@ -407,7 +407,7 @@ if( (ident = get_unique_ident(_globalNames,"operand")) == NULL ) return; oper = new OperandForm(ident); // Create new operand form - oper->_linenum = _linenum; + oper->_linenum = linenum(); _globalNames.Insert(ident, oper); // Add name to the name table // Debugging Stuff @@ -777,7 +777,7 @@ // Create the RegisterForm for the architecture description. RegisterForm *regBlock = new RegisterForm(); // Build new Source object - regBlock->_linenum = _linenum; + regBlock->_linenum = linenum(); _AD.addForm(regBlock); skipws(); // Skip leading whitespace @@ -790,9 +790,11 @@ parse_err(SYNERR, "missing identifier inside register block.\n"); return; } - if (strcmp(token,"reg_def")==0) { reg_def_parse(); } - if (strcmp(token,"reg_class")==0) { reg_class_parse(); } - if (strcmp(token,"alloc_class")==0) { alloc_class_parse(); } + if (strcmp(token,"reg_def")==0) { reg_def_parse(); } + else if (strcmp(token,"reg_class")==0) { reg_class_parse(); } + else if (strcmp(token,"alloc_class")==0) { alloc_class_parse(); } + else if (strcmp(token,"#define")==0) { preproc_define(); } + else { parse_err(SYNERR, "bad token %s inside register block.\n", token); break; } skipws(); } } @@ -850,7 +852,7 @@ } EncClass *encoding = _AD._encode->add_EncClass(ec_name); - encoding->_linenum = _linenum; + encoding->_linenum = linenum(); skipws(); // Skip leading whitespace // Check for optional parameter list @@ -907,11 +909,7 @@ skipws_no_preproc(); // Skip leading whitespace // Prepend location descriptor, for debugging; cf. ADLParser::find_cpp_block if (_AD._adlocation_debug) { - const char* file = _AD._ADL_file._name; - int line = _linenum; - char* location = (char *)malloc(strlen(file) + 100); - sprintf(location, "#line %d \"%s\"\n", line, file); - encoding->add_code(location); + encoding->add_code(get_line_string()); } // Collect the parts of the encode description @@ -952,6 +950,10 @@ skipws(); + if (_AD._adlocation_debug) { + encoding->add_code(end_line_marker()); + } + // Debug Stuff if (_AD._adl_debug > 1) fprintf(stderr,"EncodingClass Form: %s\n", ec_name); } @@ -2353,7 +2355,11 @@ return; } RegDef *regDef = _AD._register->getRegDef(rname); - reg_class->addReg(regDef); // add regDef to regClass + if (!regDef) { + parse_err(SEMERR, "unknown identifier %s inside reg_class list.\n", rname); + } else { + reg_class->addReg(regDef); // add regDef to regClass + } // Check for ',' and position to next token. skipws(); @@ -2750,7 +2756,8 @@ char *rule = NULL; // String representation of predicate skipws(); // Skip leading whitespace - if ( (rule = get_paren_expr("pred expression")) == NULL ) { + int line = linenum(); + if ( (rule = get_paren_expr("pred expression", true)) == NULL ) { parse_err(SYNERR, "incorrect or missing expression for 'predicate'\n"); return NULL; } @@ -2779,7 +2786,7 @@ assert(_AD._encode->encClass(ec_name) == NULL, "shouldn't already exist"); EncClass *encoding = _AD._encode->add_EncClass(ec_name); - encoding->_linenum = _linenum; + encoding->_linenum = linenum(); // synthesize the arguments list for the enc_class from the // arguments to the instruct definition. @@ -2855,7 +2862,7 @@ skipws(); InsEncode *encrule = new InsEncode(); // Encode class for instruction - encrule->_linenum = _linenum; + encrule->_linenum = linenum(); char *ec_name = NULL; // String representation of encode rule // identifier is optional. while (_curchar != ')') { @@ -3206,7 +3213,13 @@ char *greater_equal; char *less_equal; char *greater; - + const char *equal_format = "eq"; + const char *not_equal_format = "ne"; + const char *less_format = "lt"; + const char *greater_equal_format = "ge"; + const char *less_equal_format = "le"; + const char *greater_format = "gt"; + if (_curchar != '%') { parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n"); return NULL; @@ -3225,22 +3238,22 @@ return NULL; } if ( strcmp(field,"equal") == 0 ) { - equal = interface_field_parse(); + equal = interface_field_parse(&equal_format); } else if ( strcmp(field,"not_equal") == 0 ) { - not_equal = interface_field_parse(); + not_equal = interface_field_parse(¬_equal_format); } else if ( strcmp(field,"less") == 0 ) { - less = interface_field_parse(); + less = interface_field_parse(&less_format); } else if ( strcmp(field,"greater_equal") == 0 ) { - greater_equal = interface_field_parse(); + greater_equal = interface_field_parse(&greater_equal_format); } else if ( strcmp(field,"less_equal") == 0 ) { - less_equal = interface_field_parse(); + less_equal = interface_field_parse(&less_equal_format); } else if ( strcmp(field,"greater") == 0 ) { - greater = interface_field_parse(); + greater = interface_field_parse(&greater_format); } else { parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%}' ending interface.\n"); @@ -3255,14 +3268,18 @@ next_char(); // Skip '}' // Construct desired object and return - Interface *inter = new CondInterface(equal, not_equal, less, greater_equal, - less_equal, greater); + Interface *inter = new CondInterface(equal, equal_format, + not_equal, not_equal_format, + less, less_format, + greater_equal, greater_equal_format, + less_equal, less_equal_format, + greater, greater_format); return inter; } //------------------------------interface_field_parse-------------------------- -char *ADLParser::interface_field_parse(void) { +char *ADLParser::interface_field_parse(const char ** format) { char *iface_field = NULL; // Get interface field @@ -3283,6 +3300,32 @@ return NULL; } skipws(); + if (format != NULL && _curchar == ',') { + next_char(); + skipws(); + if (_curchar != '"') { + parse_err(SYNERR, "Missing '\"' in field format .\n"); + return NULL; + } + next_char(); + char *start = _ptr; // Record start of the next string + while ((_curchar != '"') && (_curchar != '%') && (_curchar != '\n')) { + if (_curchar == '\\') next_char(); // superquote + if (_curchar == '\n') parse_err(SYNERR, "newline in string"); // unimplemented! + next_char(); + } + if (_curchar != '"') { + parse_err(SYNERR, "Missing '\"' at end of field format .\n"); + return NULL; + } + // If a string was found, terminate it and record in FormatRule + if ( start != _ptr ) { + *_ptr = '\0'; // Terminate the string + *format = start; + } + next_char(); + skipws(); + } if (_curchar != ')') { parse_err(SYNERR, "Missing ')' after interface field.\n"); return NULL; @@ -3345,6 +3388,12 @@ next_char(); // Move past the '{' skipws(); + if (_curchar == '$') { + char* ident = get_rep_var_ident(); + if (strcmp(ident, "$$template") == 0) return template_parse(); + parse_err(SYNERR, "Unknown \"%s\" directive in format", ident); + return NULL; + } // Check for the opening '"' inside the format description if ( _curchar == '"' ) { next_char(); // Move past the initial '"' @@ -3369,8 +3418,13 @@ // Check if there is a string to pass through to output char *start = _ptr; // Record start of the next string while ((_curchar != '$') && (_curchar != '"') && (_curchar != '%') && (_curchar != '\n')) { - if (_curchar == '\\') next_char(); // superquote - if (_curchar == '\n') parse_err(SYNERR, "newline in string"); // unimplemented! + if (_curchar == '\\') { + next_char(); // superquote + if ((_curchar == '$') || (_curchar == '%')) + // hack to avoid % escapes and warnings about undefined \ escapes + *(_ptr-1) = _curchar; + } + if (_curchar == '\n') parse_err(SYNERR, "newline in string"); // unimplemented! next_char(); } // If a string was found, terminate it and record in FormatRule @@ -3436,6 +3490,131 @@ } +//------------------------------template_parse----------------------------------- +FormatRule* ADLParser::template_parse(void) { + char *desc = NULL; + FormatRule *format = (new FormatRule(desc)); + + skipws(); + while ( (_curchar != '%') && (*(_ptr+1) != '}') ) { + + // (1) + // Check if there is a string to pass through to output + char *start = _ptr; // Record start of the next string + while ((_curchar != '$') && ((_curchar != '%') || (*(_ptr+1) != '}')) ) { + // If at the start of a comment, skip past it + if( (_curchar == '/') && ((*(_ptr+1) == '/') || (*(_ptr+1) == '*')) ) { + skipws_no_preproc(); + } else { + // ELSE advance to the next character, or start of the next line + next_char_or_line(); + } + } + // If a string was found, terminate it and record in EncClass + if ( start != _ptr ) { + *_ptr = '\0'; // Terminate the string + // Add flag to _strings list indicating we should check _rep_vars + format->_strings.addName(NameList::_signal2); + format->_strings.addName(start); + } + + // (2) + // If we are at a replacement variable, + // copy it and record in EncClass + if ( _curchar == '$' ) { + // Found replacement Variable + char *rep_var = get_rep_var_ident_dup(); + if (strcmp(rep_var, "$emit") == 0) { + // switch to normal format parsing + next_char(); + next_char(); + skipws(); + // Check for the opening '"' inside the format description + if ( _curchar == '"' ) { + next_char(); // Move past the initial '"' + if( _curchar == '"' ) { // Handle empty format string case + *_ptr = '\0'; // Terminate empty string + format->_strings.addName(_ptr); + } + + // Collect the parts of the format description + // (1) strings that are passed through to tty->print + // (2) replacement/substitution variable, preceeded by a '$' + // (3) multi-token ANSIY C style strings + while ( true ) { + if ( _curchar == '%' || _curchar == '\n' ) { + parse_err(SYNERR, "missing '\"' at end of format block"); + return NULL; + } + + // (1) + // Check if there is a string to pass through to output + char *start = _ptr; // Record start of the next string + while ((_curchar != '$') && (_curchar != '"') && (_curchar != '%') && (_curchar != '\n')) { + if (_curchar == '\\') next_char(); // superquote + if (_curchar == '\n') parse_err(SYNERR, "newline in string"); // unimplemented! + next_char(); + } + // If a string was found, terminate it and record in FormatRule + if ( start != _ptr ) { + *_ptr = '\0'; // Terminate the string + format->_strings.addName(start); + } + + // (2) + // If we are at a replacement variable, + // copy it and record in FormatRule + if ( _curchar == '$' ) { + next_char(); // Move past the '$' + char* rep_var = get_ident(); // Nil terminate the variable name + rep_var = strdup(rep_var);// Copy the string + *_ptr = _curchar; // and replace Nil with original character + format->_rep_vars.addName(rep_var); + // Add flag to _strings list indicating we should check _rep_vars + format->_strings.addName(NameList::_signal); + } + + // (3) + // Allow very long strings to be broken up, + // using the ANSI C syntax "foo\n" "bar" + if ( _curchar == '"') { + next_char(); // Move past the '"' + skipws(); // Skip white space before next string token + if ( _curchar != '"') { + break; + } else { + // Found one. Skip both " and the whitespace in between. + next_char(); + } + } + } // end while part of format description + } + } else { + // Add flag to _strings list indicating we should check _rep_vars + format->_rep_vars.addName(rep_var); + // Add flag to _strings list indicating we should check _rep_vars + format->_strings.addName(NameList::_signal3); + } + } // end while part of format description + } + + skipws(); + // Past format description, at '%' + if ( _curchar != '%' || *(_ptr+1) != '}' ) { + parse_err(SYNERR, "missing '%}' at end of format block"); + return NULL; + } + next_char(); // Move past the '%' + next_char(); // Move past the '}' + + // Debug Stuff + if (_AD._adl_debug > 1) fprintf(stderr,"Format Rule: %s\n", desc); + + skipws(); + return format; +} + + //------------------------------effect_parse----------------------------------- void ADLParser::effect_parse(InstructForm *instr) { char* desc = NULL; @@ -3779,8 +3958,7 @@ next_char(); // Skip block delimiter skipws_no_preproc(); // Skip leading whitespace cppBlock = _ptr; // Point to start of expression - const char* file = _AD._ADL_file._name; - int line = _linenum; + int line = linenum(); next = _ptr + 1; while(((_curchar != '%') || (*next != '}')) && (_curchar != '\0')) { next_char_or_line(); @@ -3795,15 +3973,16 @@ _curchar = *_ptr; // Maintain invariant // Prepend location descriptor, for debugging. - char* location = (char *)malloc(strlen(file) + 100); - *location = '\0'; - if (_AD._adlocation_debug) - sprintf(location, "#line %d \"%s\"\n", line, file); - char* result = (char *)malloc(strlen(location) + strlen(cppBlock) + 1); - strcpy(result, location); - strcat(result, cppBlock); - cppBlock = result; - free(location); + if (_AD._adlocation_debug) { + char* location = get_line_string(line); + char* end_loc = end_line_marker(); + char* result = (char *)malloc(strlen(location) + strlen(cppBlock) + strlen(end_loc) + 1); + strcpy(result, location); + strcat(result, cppBlock); + strcat(result, end_loc); + cppBlock = result; + free(location); + } } return cppBlock; @@ -3873,13 +4052,26 @@ // Helper function around get_expr // Sets _curchar to '(' so that get_paren_expr will search for a matching ')' -char *ADLParser::get_paren_expr(const char *description) { +char *ADLParser::get_paren_expr(const char *description, bool include_location) { + int line = linenum(); if (_curchar != '(') // Escape if not valid starting position return NULL; next_char(); // Skip the required initial paren. char *token2 = get_expr(description, ")"); if (_curchar == ')') - next_char(); // Skip required final paren. + next_char(); // Skip required final paren. + int junk = 0; + if (include_location && _AD._adlocation_debug && !is_int_token(token2, junk)) { + // Prepend location descriptor, for debugging. + char* location = get_line_string(line); + char* end_loc = end_line_marker(); + char* result = (char *)malloc(strlen(location) + strlen(token2) + strlen(end_loc) + 1); + strcpy(result, location); + strcat(result, token2); + strcat(result, end_loc); + token2 = result; + free(location); + } return token2; } @@ -3919,10 +4111,16 @@ if (do_preproc && start != NULL) { const char* def = _AD.get_preproc_def(start); if (def != NULL && strcmp(def, start)) { - const char* def2 = _AD.get_preproc_def(def); - if (def2 != NULL && strcmp(def2, def)) { - parse_err(SYNERR, "unimplemented: using %s defined as %s => %s", - start, def, def2); + const char* def1 = def; + const char* def2 = _AD.get_preproc_def(def1); + // implement up to 2 levels of #define + if (def2 != NULL && strcmp(def2, def1)) { + def = def2; + const char* def3 = _AD.get_preproc_def(def2); + if (def3 != NULL && strcmp(def3, def2) && strcmp(def3, def1)) { + parse_err(SYNERR, "unimplemented: using %s defined as %s => %s => %s", + start, def1, def2, def3); + } } start = strdup(def); } @@ -4268,6 +4466,35 @@ } +//-------------------------------preproc_line---------------------------------- +// A "#line" keyword has been seen, so parse the rest of the line. +void ADLParser::preproc_line(void) { + int line = get_int(); + skipws_no_preproc(); + const char* file = NULL; + if (_curchar == '"') { + next_char(); // Move past the initial '"' + file = _ptr; + while (true) { + if (_curchar == '\n') { + parse_err(SYNERR, "missing '\"' at end of #line directive"); + return; + } + if (_curchar == '"') { + *_ptr = '\0'; // Terminate the string + next_char(); + skipws_no_preproc(); + break; + } + next_char(); + } + } + ensure_end_of_line(); + if (file != NULL) + _AD._ADL_file._name = file; + _buf.set_linenum(line); +} + //------------------------------preproc_define--------------------------------- // A "#define" keyword has been seen, so parse the rest of the line. void ADLParser::preproc_define(void) { @@ -4300,11 +4527,11 @@ va_start(args, fmt); if (flag == 1) - _AD._syntax_errs += _AD.emit_msg(0, flag, _linenum, fmt, args); + _AD._syntax_errs += _AD.emit_msg(0, flag, linenum(), fmt, args); else if (flag == 2) - _AD._semantic_errs += _AD.emit_msg(0, flag, _linenum, fmt, args); + _AD._semantic_errs += _AD.emit_msg(0, flag, linenum(), fmt, args); else - _AD._warnings += _AD.emit_msg(0, flag, _linenum, fmt, args); + _AD._warnings += _AD.emit_msg(0, flag, linenum(), fmt, args); int error_char = _curchar; char* error_ptr = _ptr+1; @@ -4331,6 +4558,7 @@ // A preprocessor directive has been encountered. Be sure it has fallen at // the begining of a line, or else report an error. void ADLParser::ensure_start_of_line(void) { + if (_curchar == '\n') { next_line(); return; } assert( _ptr >= _curline && _ptr < _curline+strlen(_curline), "Must be able to find which line we are in" ); @@ -4499,6 +4727,7 @@ //---------------------------next_char----------------------------------------- void ADLParser::next_char() { + if (_curchar == '\n') parse_err(WARN, "must call next_line!"); _curchar = *++_ptr; // if ( _curchar == '\n' ) { // next_line(); @@ -4518,7 +4747,19 @@ //---------------------------next_line----------------------------------------- void ADLParser::next_line() { - _curline = _buf.get_line(); _linenum++; + _curline = _buf.get_line(); + _curchar = ' '; +} + +//------------------------get_line_string-------------------------------------- +// Prepended location descriptor, for debugging. +// Must return a malloced string (that can be freed if desired). +char* ADLParser::get_line_string(int linenum) { + const char* file = _AD._ADL_file._name; + int line = linenum ? linenum : this->linenum(); + char* location = (char *)malloc(strlen(file) + 100); + sprintf(location, "\n#line %d \"%s\"\n", line, file); + return location; } //-------------------------is_literal_constant--------------------------------- @@ -4559,6 +4800,66 @@ return true; } +static const char* skip_expr_ws(const char* str) { + const char * cp = str; + while (cp[0]) { + if (cp[0] <= ' ') { + ++cp; + } else if (cp[0] == '#') { + ++cp; + while (cp[0] == ' ') ++cp; + assert(0 == strncmp(cp, "line", 4), "must be a #line directive"); + const char* eol = strchr(cp, '\n'); + assert(eol != NULL, "must find end of line"); + if (eol == NULL) eol = cp + strlen(cp); + cp = eol; + } else { + break; + } + } + return cp; +} + +//-----------------------equivalent_expressions-------------------------------- +bool ADLParser::equivalent_expressions(const char* str1, const char* str2) { + if (str1 == str2) + return true; + else if (str1 == NULL || str2 == NULL) + return false; + const char* cp1 = str1; + const char* cp2 = str2; + char in_quote = '\0'; + while (cp1[0] && cp2[0]) { + if (!in_quote) { + // skip spaces and/or cpp directives + const char* cp1a = skip_expr_ws(cp1); + const char* cp2a = skip_expr_ws(cp2); + if (cp1a > cp1 && cp2a > cp2) { + cp1 = cp1a; cp2 = cp2a; + continue; + } + if (cp1a > cp1 || cp2a > cp2) break; // fail + } + // match one non-space char + if (cp1[0] != cp2[0]) break; // fail + char ch = cp1[0]; + cp1++; cp2++; + // watch for quotes + if (in_quote && ch == '\\') { + if (cp1[0] != cp2[0]) break; // fail + if (!cp1[0]) break; + cp1++; cp2++; + } + if (in_quote && ch == in_quote) { + in_quote = '\0'; + } else if (!in_quote && (ch == '"' || ch == '\'')) { + in_quote = ch; + } + } + return (!cp1[0] && !cp2[0]); +} + + //-------------------------------trim------------------------------------------ void ADLParser::trim(char* &token) { while (*token <= ' ') token++; --- old/hotspot/src/share/vm/adlc/adlparse.hpp 2009-08-01 04:09:14.982797710 +0100 +++ new/hotspot/src/share/vm/adlc/adlparse.hpp 2009-08-01 04:09:14.898263941 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)adlparse.hpp 1.79 07/05/05 17:05:00 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,7 +73,6 @@ protected: char *_curline; // Start of current line char *_ptr; // Pointer into current location in File Buffer - int _linenum; // Count of line numbers seen so far char _curchar; // Current character from buffer FormDict &_globalNames; // Global names @@ -97,6 +96,7 @@ void pipe_parse(void); // Parse pipeline section void definitions_parse(void); // Parse definitions section void peep_parse(void); // Parse peephole rule definitions + void preproc_line(void); // Parse a #line statement void preproc_define(void); // Parse a #define statement void preproc_undef(void); // Parse an #undef statement @@ -163,9 +163,10 @@ Interface *interface_parse(); // Parse operand interface rule Interface *mem_interface_parse(); // Parse memory interface rule Interface *cond_interface_parse(); // Parse conditional interface rule - char *interface_field_parse();// Parse field contents + char *interface_field_parse(const char** format = NULL);// Parse field contents FormatRule *format_parse(void); // Parse format rule + FormatRule *template_parse(void); // Parse format rule void effect_parse(InstructForm *instr); // Parse effect rule ExpandRule *expand_parse(InstructForm *instr); // Parse expand rule RewriteRule *rewrite_parse(void); // Parse rewrite rule @@ -229,7 +230,7 @@ void get_effectlist(FormDict &effects, FormDict &operands); // Parse effect-operand pairs // Return the contents of a parenthesized expression. // Requires initial '(' and consumes final ')', which is replaced by '\0'. - char *get_paren_expr(const char *description); + char *get_paren_expr(const char *description, bool include_location = false); // Return expression up to next stop-char, which terminator replaces. // Does not require initial '('. Does not consume final stop-char. // Final stop-char is left in _curchar, but is also is replaced by '\0'. @@ -237,6 +238,11 @@ char *find_cpp_block(const char *description); // Parse a C++ code block // Issue parser error message & go to EOL void parse_err(int flag, const char *fmt, ...); + // Create a location marker for this file and line. + char *get_line_string(int linenum = 0); + // Return a location marker which tells the C preprocessor to + // forget the previous location marker. (Requires awk postprocessing.) + char *end_line_marker() { return (char*)"\n#line 999999\n"; } // Return pointer to current character inline char cur_char(void); @@ -266,11 +272,12 @@ void parse(void); // Do the parsing & build forms lists - int getlines( ) { return _linenum; } + int linenum() { return _buf.linenum(); } static bool is_literal_constant(const char *hex_string); static bool is_hex_digit(char digit); static bool is_int_token(const char* token, int& intval); + static bool equivalent_expressions(const char* str1, const char* str2); static void trim(char* &token); // trim leading & trailing spaces }; --- old/hotspot/src/share/vm/adlc/archDesc.cpp 2009-08-01 04:09:15.898707506 +0100 +++ new/hotspot/src/share/vm/adlc/archDesc.cpp 2009-08-01 04:09:15.804239806 +0100 @@ -1,5 +1,5 @@ // -// Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -137,11 +137,11 @@ if ((res == _resultStr) || (res && _resultStr && !strcmp(res, _resultStr))) { if ((lch == _lchild) || (lch && _lchild && !strcmp(lch, _lchild))) { if ((rch == _rchild) || (rch && _rchild && !strcmp(rch, _rchild))) { - char * predStr = get_pred(); - char * prStr = pr?pr->_pred:NULL; - if ((prStr == predStr) || (prStr && predStr && !strcmp(prStr, predStr))) { - return true; - } + char * predStr = get_pred(); + char * prStr = pr?pr->_pred:NULL; + if (ADLParser::equivalent_expressions(prStr, predStr)) { + return true; + } } } } @@ -211,9 +211,9 @@ // Initialize I/O Files _ADL_file._name = NULL; _ADL_file._fp = NULL; // Machine dependent output files - _DFA_file._name = "dfa_i486.cpp"; _DFA_file._fp = NULL; - _HPP_file._name = "ad_i486.hpp"; _HPP_file._fp = NULL; - _CPP_file._name = "ad_i486.cpp"; _CPP_file._fp = NULL; + _DFA_file._name = NULL; _DFA_file._fp = NULL; + _HPP_file._name = NULL; _HPP_file._fp = NULL; + _CPP_file._name = NULL; _CPP_file._fp = NULL; _bug_file._name = "bugs.out"; _bug_file._fp = NULL; // Initialize Register & Pipeline Form Pointers @@ -866,6 +866,7 @@ Form *form = (Form*)_globalNames[result]; assert( form, "Result operand must be defined"); OperandForm *oper = form->is_operand(); + if (oper == NULL) form->dump(); assert( oper, "Result must be an OperandForm"); return reg_mask( *oper ); } @@ -907,6 +908,7 @@ switch( last_char ) { case 'I': return "TypeInt::INT"; case 'P': return "TypePtr::BOTTOM"; + case 'N': return "TypeNarrowOop::BOTTOM"; case 'F': return "Type::FLOAT"; case 'D': return "Type::DOUBLE"; case 'L': return "TypeLong::LONG"; @@ -943,7 +945,7 @@ // Create InstructForm and assign type for each ideal instruction. for ( int j = _last_machine_leaf+1; j < _last_opcode; ++j) { char *ident = (char *)NodeClassNames[j]; - if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || + if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || !strcmp(ident, "ConN") || !strcmp(ident, "ConF") || !strcmp(ident, "ConD") || !strcmp(ident, "ConL") || !strcmp(ident, "Con" ) || !strcmp(ident, "Bool") ) { @@ -1108,6 +1110,7 @@ if ( strcmp(idealName,"CmpI") == 0 || strcmp(idealName,"CmpU") == 0 || strcmp(idealName,"CmpP") == 0 + || strcmp(idealName,"CmpN") == 0 || strcmp(idealName,"CmpL") == 0 || strcmp(idealName,"CmpD") == 0 || strcmp(idealName,"CmpF") == 0 --- old/hotspot/src/share/vm/adlc/dfa.cpp 2009-08-01 04:09:16.842612825 +0100 +++ new/hotspot/src/share/vm/adlc/dfa.cpp 2009-08-01 04:09:16.757052287 +0100 @@ -461,7 +461,7 @@ class dfa_shared_preds { - enum { count = 2 }; + enum { count = 4 }; static bool _found[count]; static const char* _type [count]; @@ -482,12 +482,15 @@ char c = *prev; switch( c ) { case ' ': + case '\n': return dfa_shared_preds::valid_loc(pred, prev); case '!': case '(': case '<': case '=': return true; + case '"': // such as: #line 10 "myfile.ad"\n mypredicate + return true; case '|': if( prev != pred && *(prev-1) == '|' ) return true; case '&': @@ -567,10 +570,14 @@ } }; // shared predicates, _var and _pred entry should be the same length -bool dfa_shared_preds::_found[dfa_shared_preds::count] = { false, false }; -const char* dfa_shared_preds::_type[dfa_shared_preds::count] = { "int", "bool" }; -const char* dfa_shared_preds::_var [dfa_shared_preds::count] = { "_n_get_int__", "Compile__current____select_24_bit_instr__" }; -const char* dfa_shared_preds::_pred[dfa_shared_preds::count] = { "n->get_int()", "Compile::current()->select_24_bit_instr()" }; +bool dfa_shared_preds::_found[dfa_shared_preds::count] + = { false, false, false, false }; +const char* dfa_shared_preds::_type[dfa_shared_preds::count] + = { "int", "jlong", "intptr_t", "bool" }; +const char* dfa_shared_preds::_var [dfa_shared_preds::count] + = { "_n_get_int__", "_n_get_long__", "_n_get_intptr_t__", "Compile__current____select_24_bit_instr__" }; +const char* dfa_shared_preds::_pred[dfa_shared_preds::count] + = { "n->get_int()", "n->get_long()", "n->get_intptr_t()", "Compile::current()->select_24_bit_instr()" }; void ArchDesc::gen_dfa_state_body(FILE* fp, Dict &minimize, ProductionState &status, Dict &operands_chained_from, int i) { --- old/hotspot/src/share/vm/adlc/dict2.cpp 2009-08-01 04:09:17.742156756 +0100 +++ new/hotspot/src/share/vm/adlc/dict2.cpp 2009-08-01 04:09:17.649715956 +0100 @@ -319,9 +319,12 @@ return strcmp((const char *)k1,(const char *)k2); } -// Slimey cheap key comparator. +// Cheap key comparator. int cmpkey(const void *key1, const void *key2) { - return (int)((intptr_t)key1 - (intptr_t)key2); + if (key1 == key2) return 0; + intptr_t delta = (intptr_t)key1 - (intptr_t)key2; + if (delta > 0) return 1; + return -1; } //============================================================================= --- old/hotspot/src/share/vm/adlc/filebuff.cpp 2009-08-01 04:09:18.602982848 +0100 +++ new/hotspot/src/share/vm/adlc/filebuff.cpp 2009-08-01 04:09:18.513693411 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)filebuff.cpp 1.30 07/05/05 17:05:01 JVM" #endif /* - * Copyright 1997-2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ exit(1); // Exit on seek error } _filepos = ftell(_fp->_fp); // Reset current file position + _linenum = 0; _bigbuf = new char[_bufferSize]; // Create buffer to hold text for parser if( !_bigbuf ) { @@ -79,6 +80,7 @@ // Check for end of file & return NULL if (_bufeol >= _bufmax) return NULL; + _linenum++; retval = ++_bufeol; // return character following end of previous line if (*retval == '\0') return NULL; // Check for EOF sentinal // Search for newline character which must end each line --- old/hotspot/src/share/vm/adlc/filebuff.hpp 2009-08-01 04:09:19.508134401 +0100 +++ new/hotspot/src/share/vm/adlc/filebuff.hpp 2009-08-01 04:09:19.429940757 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)filebuff.hpp 1.27 07/05/05 17:05:01 JVM" #endif /* - * Copyright 1997-2004 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,7 @@ int _err; // Error flag for file seek/read operations long _filepos; // Current offset from start of file + int _linenum; ArchDesc& _AD; // Reference to Architecture Description @@ -69,6 +70,8 @@ // This returns a pointer to the start of the current line in the buffer, // and increments bufeol and filepos to point at the end of that line. char *get_line(void); + int linenum() const { return _linenum; } + void set_linenum(int line) { _linenum = line; } // This converts a pointer into the buffer to a file offset. It only works // when the pointer is valid (i.e. just obtained from getline()). --- old/hotspot/src/share/vm/adlc/forms.cpp 2009-08-01 04:09:20.350578912 +0100 +++ new/hotspot/src/share/vm/adlc/forms.cpp 2009-08-01 04:09:20.266079384 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)forms.cpp 1.161 07/05/05 17:04:59 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,9 @@ //------------------------------NameList--------------------------------------- // reserved user-defined string -const char *NameList::_signal = "$$SIGNAL$$"; +const char *NameList::_signal = "$$SIGNAL$$"; +const char *NameList::_signal2 = "$$SIGNAL2$$"; +const char *NameList::_signal3 = "$$SIGNAL3$$"; // Constructor and Destructor NameList::NameList() : _cur(0), _max(4), _iter(0), _justReset(true) { @@ -214,6 +216,7 @@ if (strcmp(name,"ConI")==0) return Form::idealI; if (strcmp(name,"ConP")==0) return Form::idealP; + if (strcmp(name,"ConN")==0) return Form::idealN; if (strcmp(name,"ConL")==0) return Form::idealL; if (strcmp(name,"ConF")==0) return Form::idealF; if (strcmp(name,"ConD")==0) return Form::idealD; @@ -254,11 +257,13 @@ if( strcmp(opType,"LoadF")==0 ) return Form::idealF; if( strcmp(opType,"LoadI")==0 ) return Form::idealI; if( strcmp(opType,"LoadKlass")==0 ) return Form::idealP; + if( strcmp(opType,"LoadNKlass")==0 ) return Form::idealN; if( strcmp(opType,"LoadL")==0 ) return Form::idealL; if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL; if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP; if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL; if( strcmp(opType,"LoadP")==0 ) return Form::idealP; + if( strcmp(opType,"LoadN")==0 ) return Form::idealN; if( strcmp(opType,"LoadRange")==0 ) return Form::idealI; if( strcmp(opType,"LoadS")==0 ) return Form::idealS; if( strcmp(opType,"Load16B")==0 ) return Form::idealB; @@ -289,6 +294,7 @@ if( strcmp(opType,"StoreI")==0) return Form::idealI; if( strcmp(opType,"StoreL")==0) return Form::idealL; if( strcmp(opType,"StoreP")==0) return Form::idealP; + if( strcmp(opType,"StoreN")==0) return Form::idealN; if( strcmp(opType,"Store16B")==0) return Form::idealB; if( strcmp(opType,"Store8B")==0) return Form::idealB; if( strcmp(opType,"Store4B")==0) return Form::idealB; --- old/hotspot/src/share/vm/adlc/forms.hpp 2009-08-01 04:09:21.213915045 +0100 +++ new/hotspot/src/share/vm/adlc/forms.hpp 2009-08-01 04:09:21.127983579 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)forms.hpp 1.150 07/05/05 17:05:00 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,7 +171,8 @@ idealD = 5, // Double type idealB = 6, // Byte type idealC = 7, // Char type - idealS = 8 // String type + idealS = 8, // String type + idealN = 9 // Narrow oop types }; // Convert ideal name to a DataType, return DataType::none if not a 'ConX' Form::DataType ideal_to_const_type(const char *ideal_type_name) const; @@ -331,6 +332,8 @@ public: static const char *_signal; // reserved user-defined string + static const char *_signal2; // reserved user-defined string + static const char *_signal3; // reserved user-defined string enum { Not_in_list = -1 }; void addName(const char *name); --- old/hotspot/src/share/vm/adlc/formssel.cpp 2009-08-01 04:09:22.065486581 +0100 +++ new/hotspot/src/share/vm/adlc/formssel.cpp 2009-08-01 04:09:21.969909634 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)formssel.cpp 1.185 07/09/28 10:23:26 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -729,7 +729,11 @@ if( _matrule && _matrule->_rChild && (!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type !strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type - !strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception + !strcmp(_matrule->_rChild->_opType,"DecodeN") || + !strcmp(_matrule->_rChild->_opType,"EncodeP") || + !strcmp(_matrule->_rChild->_opType,"LoadN") || + !strcmp(_matrule->_rChild->_opType,"LoadNKlass") || + !strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception !strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true; else if ( is_ideal_load() == Form::idealP ) return true; else if ( is_ideal_store() != Form::none ) return true; @@ -1101,10 +1105,7 @@ } if( pred1 != NULL && pred2 != NULL ) { // compare the predicates - const char *str1 = pred1->_pred; - const char *str2 = pred2->_pred; - if( (str1 == NULL && str2 == NULL) - || (str1 != NULL && str2 != NULL && strcmp(str1,str2) == 0) ) { + if (ADLParser::equivalent_expressions(pred1->_pred, pred2->_pred)) { return true; } } @@ -1573,10 +1574,10 @@ return Opcode::NOT_AN_OPCODE; } -void Opcode::print_opcode(FILE *fp, Opcode::opcode_type desired_opcode) { +bool Opcode::print_opcode(FILE *fp, Opcode::opcode_type desired_opcode) { // Default values previously provided by MachNode::primary()... - const char *description = "default_opcode()"; - const char *value = "-1"; + const char *description = NULL; + const char *value = NULL; // Check if user provided any opcode definitions if( this != NULL ) { // Update 'value' if user provided a definition in the instruction @@ -1598,7 +1599,10 @@ break; } } - fprintf(fp, "(%s /*%s*/)", value, description); + if (value != NULL) { + fprintf(fp, "(%s /*%s*/)", value, description); + } + return value != NULL; } void Opcode::dump() { @@ -2104,6 +2108,7 @@ if (strcmp(name,"RegF")==0) size = 1; if (strcmp(name,"RegD")==0) size = 2; if (strcmp(name,"RegL")==0) size = 2; + if (strcmp(name,"RegN")==0) size = 1; if (strcmp(name,"RegP")==0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1; if (size == 0) return false; return size == reg_class->size(); @@ -2368,11 +2373,12 @@ void OperandForm::format_constant(FILE *fp, uint const_index, uint const_type) { switch(const_type) { - case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break; - case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break; - case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break; - case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break; - case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break; + case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break; + case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break; + case Form::idealN: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break; + case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break; + case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break; + case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break; default: assert( false, "ShouldNotReachHere()"); } @@ -2607,14 +2613,19 @@ } //------------------------------CondInterface---------------------------------- -CondInterface::CondInterface(char *equal, char *not_equal, - char *less, char *greater_equal, - char *less_equal, char *greater) - : Interface("COND_INTER"), - _equal(equal), _not_equal(not_equal), - _less(less), _greater_equal(greater_equal), - _less_equal(less_equal), _greater(greater) { - // +CondInterface::CondInterface(const char* equal, const char* equal_format, + const char* not_equal, const char* not_equal_format, + const char* less, const char* less_format, + const char* greater_equal, const char* greater_equal_format, + const char* less_equal, const char* less_equal_format, + const char* greater, const char* greater_format) + : Interface("COND_INTER"), + _equal(equal), _equal_format(equal_format), + _not_equal(not_equal), _not_equal_format(not_equal_format), + _less(less), _less_format(less_format), + _greater_equal(greater_equal), _greater_equal_format(greater_equal_format), + _less_equal(less_equal), _less_equal_format(less_equal_format), + _greater(greater), _greater_format(greater_format) { } CondInterface::~CondInterface() { // not owner of any character arrays @@ -3303,18 +3314,18 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const { static const char *needs_ideal_memory_list[] = { - "StoreI","StoreL","StoreP","StoreD","StoreF" , + "StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" , "StoreB","StoreC","Store" ,"StoreFP", - "LoadI" ,"LoadL", "LoadP" ,"LoadD" ,"LoadF" , - "LoadB" ,"LoadC" ,"LoadS" ,"Load" , + "LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF" , + "LoadB" ,"LoadC" ,"LoadS" ,"Load" , "Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B", "Store8B","Store4B","Store8C","Store4C","Store2C", "Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" , "Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S", - "LoadRange", "LoadKlass", "LoadL_unaligned", "LoadD_unaligned", + "LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned", "LoadPLocked", "LoadLLocked", - "StorePConditional", "StoreLConditional", - "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", + "StorePConditional", "StoreIConditional", "StoreLConditional", + "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN", "StoreCM", "ClearArray" }; @@ -3712,12 +3723,13 @@ const char *opType = NULL; if (!base_operand(position, globals, result, name, opType)) { position = 0; - if( base_operand(position, globals, result, name, opType) && - (strcmp(opType,"RegI")==0 || - strcmp(opType,"RegP")==0 || - strcmp(opType,"RegL")==0 || - strcmp(opType,"RegF")==0 || - strcmp(opType,"RegD")==0 || + if( base_operand(position, globals, result, name, opType) && + (strcmp(opType,"RegI")==0 || + strcmp(opType,"RegP")==0 || + strcmp(opType,"RegN")==0 || + strcmp(opType,"RegL")==0 || + strcmp(opType,"RegF")==0 || + strcmp(opType,"RegD")==0 || strcmp(opType,"Reg" )==0) ) { return 1; } @@ -3764,6 +3776,10 @@ int MatchRule::is_ideal_copy() const { if( _rChild ) { const char *opType = _rChild->_opType; +#if 1 + if( strcmp(opType,"CastIP")==0 ) + return 1; +#else if( strcmp(opType,"CastII")==0 ) return 1; // Do not treat *CastPP this way, because it @@ -3783,6 +3799,7 @@ // return 1; //if( strcmp(opType,"CastP2X")==0 ) // return 1; +#endif } if( is_chain_rule(_AD.globalNames()) && _lChild && strncmp(_lChild->_opType,"stackSlot",9)==0 ) @@ -3821,6 +3838,8 @@ strcmp(opType,"ConvL2D")==0 || strcmp(opType,"ConvL2F")==0 || strcmp(opType,"ConvL2I")==0 || + strcmp(opType,"DecodeN")==0 || + strcmp(opType,"EncodeP")==0 || strcmp(opType,"RoundDouble")==0 || strcmp(opType,"RoundFloat")==0 || strcmp(opType,"ReverseBytesI")==0 || --- old/hotspot/src/share/vm/adlc/formssel.hpp 2009-08-01 04:09:23.131595264 +0100 +++ new/hotspot/src/share/vm/adlc/formssel.hpp 2009-08-01 04:09:23.043271229 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)formssel.hpp 1.77 07/07/19 19:08:28 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -400,7 +400,7 @@ void output(FILE *fp); // --------------------------- FILE *output_routines - void print_opcode(FILE *fp, Opcode::opcode_type desired_opcode); + bool print_opcode(FILE *fp, Opcode::opcode_type desired_opcode); }; //------------------------------InsEncode-------------------------------------- @@ -782,10 +782,20 @@ const char *_greater_equal; const char *_less_equal; const char *_greater; - + const char *_equal_format; + const char *_not_equal_format; + const char *_less_format; + const char *_greater_equal_format; + const char *_less_equal_format; + const char *_greater_format; + // Public Methods - CondInterface(char *equal, char *not_equal, char *less, char *greater_equal, - char *less_equal, char *greater); + CondInterface(const char* equal, const char* equal_format, + const char* not_equal, const char* not_equal_format, + const char* less, const char* less_format, + const char* greater_equal, const char* greater_equal_format, + const char* less_equal, const char* less_equal_format, + const char* greater, const char* greater_format); ~CondInterface(); void dump(); --- old/hotspot/src/share/vm/adlc/output_c.cpp 2009-08-01 04:09:24.040144379 +0100 +++ new/hotspot/src/share/vm/adlc/output_c.cpp 2009-08-01 04:09:23.930641908 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)output_c.cpp 1.185 07/07/02 16:50:40 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1549,6 +1549,18 @@ // Build a mapping from operand index to input edges fprintf(fp," unsigned idx0 = oper_input_base();\n"); + + // The order in which inputs are added to a node is very + // strange. Store nodes get a memory input before Expand is + // called and all other nodes get it afterwards so + // oper_input_base is wrong during expansion. This code adjusts + // is so that expansion will work correctly. + bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) && + node->is_ideal_store() == Form::none; + if (missing_memory_edge) { + fprintf(fp," idx0--; // Adjust base because memory edge hasn't been inserted yet\n"); + } + for( i = 0; i < node->num_opnds(); i++ ) { fprintf(fp," unsigned idx%d = idx%d + num%d;\n", i+1,i,i); @@ -1603,11 +1615,14 @@ int node_mem_op = node->memory_operand(_globalNames); assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND, "expand rule member needs memory but top-level inst doesn't have any" ); - // Copy memory edge - fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt); + if (!missing_memory_edge) { + // Copy memory edge + fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt); + } } // Iterate over the new instruction's operands + int prev_pos = -1; for( expand_instr->reset(); (opid = expand_instr->iter()) != NULL; ) { // Use 'parameter' at current position in list of new instruction's formals // instead of 'opid' when looking up info internal to new_inst @@ -1631,7 +1646,19 @@ // ins = (InstructForm *) _globalNames[new_id]; exp_pos = node->operand_position_format(opid); assert(exp_pos != -1, "Bad expand rule"); - + if (prev_pos > exp_pos && expand_instruction->_matrule != NULL) { + // For the add_req calls below to work correctly they need + // to added in the same order that a match would add them. + // This means that they would need to be in the order of + // the components list instead of the formal parameters. + // This is a sort of hidden invariant that previously + // wasn't checked and could lead to incorrectly + // constructed nodes. + syntax_err(node->_linenum, "For expand in %s to work, parameter declaration order in %s must follow matchrule\n", + node->_ident, new_inst->_ident); + } + prev_pos = exp_pos; + new_pos = new_inst->operand_position(parameter,Component::USE); if (new_pos != -1) { // Copy the operand from the ExpandNode to the new node @@ -2295,7 +2322,12 @@ _processing_noninput = false; // A replacement variable, originally '$' if ( Opcode::as_opcode_type(rep_var) != Opcode::NOT_AN_OPCODE ) { - _inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(rep_var) ); + if (!_inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(rep_var) )) { + // Missing opcode + _AD.syntax_err( _inst._linenum, + "Missing $%s opcode definition in %s, used by encoding %s\n", + rep_var, _inst._ident, _encoding._name); + } } else { // Lookup its position in parameter list @@ -2337,7 +2369,13 @@ else if( Opcode::as_opcode_type(inst_rep_var) != Opcode::NOT_AN_OPCODE ) { // else check if "primary", "secondary", "tertiary" assert( _constant_status == LITERAL_ACCESSED, "Must be processing a literal constant parameter"); - _inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(inst_rep_var) ); + if (!_inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(inst_rep_var) )) { + // Missing opcode + _AD.syntax_err( _inst._linenum, + "Missing $%s opcode definition in %s\n", + rep_var, _inst._ident); + + } _constant_status = LITERAL_OUTPUT; } else if((_AD.get_registers() != NULL ) && (_AD.get_registers()->getRegDef(inst_rep_var) != NULL)) { @@ -2366,6 +2404,8 @@ fprintf(fp,"uint %sNode::size(PhaseRegAlloc *ra_) const {\n", inst._ident); + fprintf(fp, " assert(VerifyOops || MachNode::size(ra_) <= %s, \"bad fixed size\");\n", inst._size); + //(2) // Print the size fprintf(fp, " return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size); @@ -3429,6 +3469,8 @@ fprintf(fp, "_leaf->get_int()"); } else if ( (strcmp(optype,"ConP") == 0) ) { fprintf(fp, "_leaf->bottom_type()->is_ptr()"); + } else if ( (strcmp(optype,"ConN") == 0) ) { + fprintf(fp, "_leaf->bottom_type()->is_narrowoop()"); } else if ( (strcmp(optype,"ConF") == 0) ) { fprintf(fp, "_leaf->getf()"); } else if ( (strcmp(optype,"ConD") == 0) ) { --- old/hotspot/src/share/vm/adlc/output_h.cpp 2009-08-01 04:09:25.131436010 +0100 +++ new/hotspot/src/share/vm/adlc/output_h.cpp 2009-08-01 04:09:25.036102959 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)output_h.cpp 1.180 07/09/28 10:23:25 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -206,6 +206,10 @@ if (i > 0) fprintf(fp,", "); fprintf(fp," const TypePtr *_c%d;\n", i); } + else if (!strcmp(type, "ConN")) { + if (i > 0) fprintf(fp,", "); + fprintf(fp," const TypeNarrowOop *_c%d;\n", i); + } else if (!strcmp(type, "ConL")) { if (i > 0) fprintf(fp,", "); fprintf(fp," jlong _c%d;\n", i); @@ -238,6 +242,10 @@ fprintf(fp," const TypePtr *_c%d;\n", i); i++; } + else if (!strcmp(comp->base_type(globals), "ConN")) { + fprintf(fp," const TypePtr *_c%d;\n", i); + i++; + } else if (!strcmp(comp->base_type(globals), "ConL")) { fprintf(fp," jlong _c%d;\n", i); i++; @@ -283,6 +291,7 @@ fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i); break; } + case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; } case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; } case Form::idealL : { fprintf(fp,"jlong c%d", i); break; } case Form::idealF : { fprintf(fp,"jfloat c%d", i); break; } @@ -305,6 +314,11 @@ fprintf(fp,"const TypePtr *c%d", i); i++; } + else if (!strcmp(comp->base_type(globals), "ConN")) { + if (i > 0) fprintf(fp,", "); + fprintf(fp,"const TypePtr *c%d", i); + i++; + } else if (!strcmp(comp->base_type(globals), "ConL")) { if (i > 0) fprintf(fp,", "); fprintf(fp,"jlong c%d", i); @@ -344,17 +358,19 @@ // --------------------------------------------------------------------------- // Generate the format rule for condition codes -static void defineCCodeDump(FILE *fp, int i) { - fprintf(fp, " if( _c%d == BoolTest::eq ) st->print(\"eq\");\n",i); - fprintf(fp, " else if( _c%d == BoolTest::ne ) st->print(\"ne\");\n",i); - fprintf(fp, " else if( _c%d == BoolTest::le ) st->print(\"le\");\n",i); - fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"ge\");\n",i); - fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"lt\");\n",i); - fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"gt\");\n",i); +static void defineCCodeDump(OperandForm* oper, FILE *fp, int i) { + assert(oper != NULL, "what"); + CondInterface* cond = oper->_interface->is_CondInterface(); + fprintf(fp, " if( _c%d == BoolTest::eq ) st->print(\"%s\");\n",i,cond->_equal_format); + fprintf(fp, " else if( _c%d == BoolTest::ne ) st->print(\"%s\");\n",i,cond->_not_equal_format); + fprintf(fp, " else if( _c%d == BoolTest::le ) st->print(\"%s\");\n",i,cond->_less_equal_format); + fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format); + fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format); + fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format); } // Output code that dumps constant values, increment "i" if type is constant -static uint dump_spec_constant(FILE *fp, const char *ideal_type, uint i) { +static uint dump_spec_constant(FILE *fp, const char *ideal_type, uint i, OperandForm* oper) { if (!strcmp(ideal_type, "ConI")) { fprintf(fp," st->print(\"#%%d\", _c%d);\n", i); ++i; @@ -363,6 +379,10 @@ fprintf(fp," _c%d->dump_on(st);\n", i); ++i; } + else if (!strcmp(ideal_type, "ConN")) { + fprintf(fp," _c%d->dump_on(st);\n", i); + ++i; + } else if (!strcmp(ideal_type, "ConL")) { fprintf(fp," st->print(\"#\" INT64_FORMAT, _c%d);\n", i); ++i; @@ -376,7 +396,7 @@ ++i; } else if (!strcmp(ideal_type, "Bool")) { - defineCCodeDump(fp,i); + defineCCodeDump(oper, fp,i); ++i; } @@ -420,8 +440,13 @@ // Replacement variable const char *rep_var = oper._format->_rep_vars.iter(); // Check that it is a local name, and an operand - OperandForm *op = oper._localNames[rep_var]->is_operand(); - assert( op, "replacement variable was not found in local names"); + const Form* form = oper._localNames[rep_var]; + if (form == NULL) { + globalAD->syntax_err(oper._linenum, + "\'%s\' not found in format for %s\n", rep_var, oper._ident); + assert(form, "replacement variable was not found in local names"); + } + OperandForm *op = form->is_operand(); // Get index if register or constant if ( op->_matrule && op->_matrule->is_base_register(globals) ) { idx = oper.register_position( globals, rep_var); @@ -456,7 +481,7 @@ } // ALWAYS! Provide a special case output for condition codes. if( oper.is_ideal_bool() ) { - defineCCodeDump(fp,0); + defineCCodeDump(&oper, fp,0); } fprintf(fp,"}\n"); @@ -486,9 +511,14 @@ } else { // Replacement variable const char *rep_var = oper._format->_rep_vars.iter(); - // Check that it is a local name, and an operand - OperandForm *op = oper._localNames[rep_var]->is_operand(); - assert( op, "replacement variable was not found in local names"); + // Check that it is a local name, and an operand + const Form* form = oper._localNames[rep_var]; + if (form == NULL) { + globalAD->syntax_err(oper._linenum, + "\'%s\' not found in format for %s\n", rep_var, oper._ident); + assert(form, "replacement variable was not found in local names"); + } + OperandForm *op = form->is_operand(); // Get index if register or constant if ( op->_matrule && op->_matrule->is_base_register(globals) ) { idx = oper.register_position( globals, rep_var); @@ -524,7 +554,7 @@ } // ALWAYS! Provide a special case output for condition codes. if( oper.is_ideal_bool() ) { - defineCCodeDump(fp,0); + defineCCodeDump(&oper, fp,0); } fprintf(fp, "}\n"); fprintf(fp, "#endif\n"); @@ -558,10 +588,53 @@ while( (string = inst._format->_strings.iter()) != NULL ) { fprintf(fp," "); // Check if this is a standard string or a replacement variable - if( string != NameList::_signal ) // Normal string. Pass through. + if( string == NameList::_signal ) { // Replacement variable + const char* rep_var = inst._format->_rep_vars.iter(); + inst.rep_var_format( fp, rep_var); + } else if( string == NameList::_signal3 ) { // Replacement variable in raw text + const char* rep_var = inst._format->_rep_vars.iter(); + const Form *form = inst._localNames[rep_var]; + if (form == NULL) { + fprintf(stderr, "unknown replacement variable in format statement: '%s'\n", rep_var); + assert(false, "ShouldNotReachHere()"); + } + OpClassForm *opc = form->is_opclass(); + assert( opc, "replacement variable was not found in local names"); + // Lookup the index position of the replacement variable + int idx = inst.operand_position_format(rep_var); + if ( idx == -1 ) { + assert( strcmp(opc->_ident,"label")==0, "Unimplemented"); + assert( false, "ShouldNotReachHere()"); + } + + if (inst.is_noninput_operand(idx)) { + assert( false, "ShouldNotReachHere()"); + } else { + // Output the format call for this operand + fprintf(fp,"opnd_array(%d)",idx); + } + rep_var = inst._format->_rep_vars.iter(); + inst._format->_strings.iter(); + if ( strcmp(rep_var,"$constant") == 0 && opc->is_operand()) { + Form::DataType constant_type = form->is_operand()->is_base_constant(globals); + if ( constant_type == Form::idealD ) { + fprintf(fp,"->constantD()"); + } else if ( constant_type == Form::idealF ) { + fprintf(fp,"->constantF()"); + } else if ( constant_type == Form::idealL ) { + fprintf(fp,"->constantL()"); + } else { + fprintf(fp,"->constant()"); + } + } else if ( strcmp(rep_var,"$cmpcode") == 0) { + fprintf(fp,"->ccode()"); + } else { + assert( false, "ShouldNotReachHere()"); + } + } else if( string == NameList::_signal2 ) // Raw program text + fputs(inst._format->_strings.iter(), fp); + else fprintf(fp,"st->print(\"%s\");\n", string); - else // Replacement variable - inst.rep_var_format( fp, inst._format->_rep_vars.iter() ); } // Done with all format strings } // Done generating the user-defined portion of the format @@ -1166,7 +1239,7 @@ if( type != NULL ) { Form::DataType data_type = oper->is_base_constant(_globalNames); // Check if we are an ideal pointer type - if( data_type == Form::idealP ) { + if( data_type == Form::idealP || data_type == Form::idealN ) { // Return the ideal type we already have: fprintf(fp," return _c0;"); } else { @@ -1294,6 +1367,16 @@ fprintf(fp, " return _c0->isa_oop_ptr();"); fprintf(fp, " }\n"); } + else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) { + // Access the locally stored constant + fprintf(fp," virtual intptr_t constant() const {"); + fprintf(fp, " return _c0->make_oopptr()->get_con();"); + fprintf(fp, " }\n"); + // Generate query to determine if this pointer is an oop + fprintf(fp," virtual bool constant_is_oop() const {"); + fprintf(fp, " return _c0->make_oopptr()->isa_oop_ptr();"); + fprintf(fp, " }\n"); + } else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) { fprintf(fp," virtual intptr_t constant() const {"); // We don't support addressing modes with > 4Gig offsets. @@ -1369,7 +1452,7 @@ oper->_components.reset(); if ((comp = oper->_components.iter()) == NULL) { assert(num_consts == 1, "Bad component list detected.\n"); - i = dump_spec_constant( fp, type, i ); + i = dump_spec_constant( fp, type, i, oper ); // Check that type actually matched assert( i != 0, "Non-constant operand lacks component list."); } // end if NULL @@ -1379,7 +1462,7 @@ oper->_components.reset(); while((comp = oper->_components.iter()) != NULL) { type = comp->base_type(_globalNames); - i = dump_spec_constant( fp, type, i ); + i = dump_spec_constant( fp, type, i, NULL ); } } // finish line (3) @@ -1751,6 +1834,7 @@ fprintf(fp," return TypeInt::make(opnd_array(1)->constant());\n"); break; case Form::idealP: + case Form::idealN: fprintf(fp," return opnd_array(1)->type();\n",result); break; case Form::idealD: @@ -1812,6 +1896,19 @@ fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n", offset, offset+1, offset+1); } + else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveN") ) { + int offset = 1; + // Special special hack to see if the Cmp? has been incorporated in the conditional move + MatchNode *rl = instr->_matrule->_rChild->_lChild; + if( rl && !strcmp(rl->_opType, "Binary") ) { + MatchNode *rlr = rl->_rChild; + if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0) + offset = 2; + } + // Special hack for ideal CMoveN; ideal type depends on inputs + fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n", + offset, offset+1, offset+1); + } else if( instr->needs_base_oop_edge(_globalNames) ) { // Special hack for ideal AddP. Bottom type is an oop IFF it has a // legal base-pointer input. Otherwise it is NOT an oop. --- old/hotspot/src/share/vm/asm/assembler.cpp 2009-08-01 04:09:26.154021726 +0100 +++ new/hotspot/src/share/vm/asm/assembler.cpp 2009-08-01 04:09:26.068765194 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)assembler.cpp 1.41 07/05/05 17:05:03 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -249,6 +249,21 @@ } } +bool MacroAssembler::needs_explicit_null_check(intptr_t offset) { + // Exception handler checks the nmethod's implicit null checks table + // only when this method returns false. + if (UseCompressedOops) { + // The first page after heap_base is unmapped and + // the 'offset' is equal to [heap_base + offset] for + // narrow oop implicit null checks. + uintptr_t heap_base = (uintptr_t)Universe::heap_base(); + if ((uintptr_t)offset >= heap_base) { + // Normalize offset for the next check. + offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1)); + } + } + return offset < 0 || os::vm_page_size() <= offset; +} #ifndef PRODUCT void Label::print_instructions(MacroAssembler* masm) const { --- old/hotspot/src/share/vm/asm/codeBuffer.cpp 2009-08-01 04:09:26.944386251 +0100 +++ new/hotspot/src/share/vm/asm/codeBuffer.cpp 2009-08-01 04:09:26.858403750 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)codeBuffer.cpp 1.100 07/05/05 17:05:03 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,8 +284,10 @@ // Need to return a pc, doesn't matter what it is since it will be // replaced during resolution later. - // (Don't return NULL or badAddress, since branches shouldn't overflow.) - return base; + // Don't return NULL or badAddress, since branches shouldn't overflow. + // Don't return base either because that could overflow displacements + // for shorter branches. It will get checked when bound. + return branch_pc; } } @@ -950,6 +952,7 @@ if (_comments != NULL) { CodeComment* c = _comments->find(offset); while (c && c->offset() == offset) { + stream->bol(); stream->print(" ;; "); stream->print_cr(c->comment()); c = c->next(); --- old/hotspot/src/share/vm/asm/codeBuffer.hpp 2009-08-01 04:09:27.806861461 +0100 +++ new/hotspot/src/share/vm/asm/codeBuffer.hpp 2009-08-01 04:09:27.728984437 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)codeBuffer.hpp 1.63 07/05/17 15:49:26 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,7 @@ Verified_Entry, Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete OSR_Entry, + Dtrace_trap = OSR_Entry, // dtrace probes can never have an OSR entry so reuse it Exceptions, // Offset where exception handler lives Deopt, // Offset where deopt handler lives max_Entries }; --- old/hotspot/src/share/vm/c1/c1_CodeStubs.hpp 2009-08-01 04:09:28.650574866 +0100 +++ new/hotspot/src/share/vm/c1/c1_CodeStubs.hpp 2009-08-01 04:09:28.575482107 +0100 @@ -486,3 +486,80 @@ #endif // PRODUCT }; +////////////////////////////////////////////////////////////////////////////////////////// +#ifndef SERIALGC + +// Code stubs for Garbage-First barriers. +class G1PreBarrierStub: public CodeStub { + private: + LIR_Opr _addr; + LIR_Opr _pre_val; + LIR_PatchCode _patch_code; + CodeEmitInfo* _info; + + public: + // pre_val (a temporary register) must be a register; + // addr (the address of the field to be read) must be a LIR_Address + G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) : + _addr(addr), _pre_val(pre_val), _patch_code(patch_code), _info(info) + { + assert(_pre_val->is_register(), "should be temporary register"); + assert(_addr->is_address(), "should be the address of the field"); + } + + LIR_Opr addr() const { return _addr; } + LIR_Opr pre_val() const { return _pre_val; } + LIR_PatchCode patch_code() const { return _patch_code; } + CodeEmitInfo* info() const { return _info; } + + virtual void emit_code(LIR_Assembler* e); + virtual void visit(LIR_OpVisitState* visitor) { + // don't pass in the code emit info since it's processed in the fast + // path + if (_info != NULL) + visitor->do_slow_case(_info); + else + visitor->do_slow_case(); + visitor->do_input(_addr); + visitor->do_temp(_pre_val); + } +#ifndef PRODUCT + virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); } +#endif // PRODUCT +}; + +class G1PostBarrierStub: public CodeStub { + private: + LIR_Opr _addr; + LIR_Opr _new_val; + + static jbyte* _byte_map_base; + static jbyte* byte_map_base_slow(); + static jbyte* byte_map_base() { + if (_byte_map_base == NULL) { + _byte_map_base = byte_map_base_slow(); + } + return _byte_map_base; + } + + public: + // addr (the address of the object head) and new_val must be registers. + G1PostBarrierStub(LIR_Opr addr, LIR_Opr new_val): _addr(addr), _new_val(new_val) { } + + LIR_Opr addr() const { return _addr; } + LIR_Opr new_val() const { return _new_val; } + + virtual void emit_code(LIR_Assembler* e); + virtual void visit(LIR_OpVisitState* visitor) { + // don't pass in the code emit info since it's processed in the fast path + visitor->do_slow_case(); + visitor->do_input(_addr); + visitor->do_input(_new_val); + } +#ifndef PRODUCT + virtual void print_name(outputStream* out) const { out->print("G1PostBarrierStub"); } +#endif // PRODUCT +}; + +#endif // SERIALGC +////////////////////////////////////////////////////////////////////////////////////////// --- old/hotspot/src/share/vm/c1/c1_FrameMap.cpp 2009-08-01 04:09:29.466442264 +0100 +++ new/hotspot/src/share/vm/c1/c1_FrameMap.cpp 2009-08-01 04:09:29.381069538 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_FrameMap.cpp 1.38 07/06/18 14:25:23 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -281,7 +281,7 @@ ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const { int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) + _num_spills * spill_slot_size_in_bytes; - int offset = round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock); + int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock); return in_ByteSize(offset); } --- old/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp 2009-08-01 04:09:30.807205696 +0100 +++ new/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp 2009-08-01 04:09:30.693189444 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_GraphBuilder.cpp 1.256 07/06/18 14:25:23 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -679,21 +679,6 @@ } -void GraphBuilder::kill_field(ciField* field) { - if (UseLocalValueNumbering) { - vmap()->kill_field(field); - } -} - - -void GraphBuilder::kill_array(Value value) { - if (UseLocalValueNumbering) { - vmap()->kill_array(value->type()); - } - _memory->store_value(value); -} - - void GraphBuilder::kill_all() { if (UseLocalValueNumbering) { vmap()->kill_all(); @@ -990,8 +975,8 @@ length = append(new ArrayLength(array, lock_stack())); } StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack()); - kill_array(value); // invalidate all CSEs that are memory accesses of the same type append(result); + _memory->store_value(value); } @@ -1481,9 +1466,6 @@ case Bytecodes::_putstatic: { Value val = pop(type); append(new StoreField(append(obj), offset, field, val, true, lock_stack(), state_copy, is_loaded, is_initialized)); - if (UseLocalValueNumbering) { - vmap()->kill_field(field); // invalidate all CSEs that are memory accesses - } } break; case Bytecodes::_getfield : @@ -1506,7 +1488,6 @@ if (is_loaded) store = _memory->store(store); if (store != NULL) { append(store); - kill_field(field); // invalidate all CSEs that are accesses of this field } } break; @@ -1903,6 +1884,8 @@ assert(i2->bci() != -1, "should already be linked"); return i2; } + ValueNumberingEffects vne(vmap()); + i1->visit(&vne); } if (i1->as_Phi() == NULL && i1->as_Local() == NULL) { @@ -1929,14 +1912,8 @@ assert(_last == i1, "adjust code below"); StateSplit* s = i1->as_StateSplit(); if (s != NULL && i1->as_BlockEnd() == NULL) { - // Continue CSE across certain intrinsics - Intrinsic* intrinsic = s->as_Intrinsic(); - if (UseLocalValueNumbering) { - if (intrinsic == NULL || !intrinsic->preserves_state()) { - vmap()->kill_all(); // for now, hopefully we need this only for calls eventually - } - } if (EliminateFieldAccess) { + Intrinsic* intrinsic = s->as_Intrinsic(); if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) { _memory->kill(); } --- old/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp 2009-08-01 04:09:31.901133825 +0100 +++ new/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp 2009-08-01 04:09:31.817399957 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_GraphBuilder.hpp 1.75 07/05/17 15:49:37 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -286,8 +286,6 @@ Dependencies* dependency_recorder() const; // = compilation()->dependencies() bool direct_compare(ciKlass* k); - void kill_field(ciField* field); - void kill_array(Value value); void kill_all(); ValueStack* lock_stack(); --- old/hotspot/src/share/vm/c1/c1_IR.cpp 2009-08-01 04:09:32.783188706 +0100 +++ new/hotspot/src/share/vm/c1/c1_IR.cpp 2009-08-01 04:09:32.689097120 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_IR.cpp 1.161 08/11/07 15:47:10 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/src/share/vm/c1/c1_LIR.cpp 2009-08-01 04:09:33.730222975 +0100 +++ new/hotspot/src/share/vm/c1/c1_LIR.cpp 2009-08-01 04:09:33.640479008 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_LIR.cpp 1.119 07/06/18 14:25:24 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ return FrameMap::cpu_rnr2reg(cpu_regnrHi()); } -#ifdef IA32 +#if defined(X86) XMMRegister LIR_OprDesc::as_xmm_float_reg() const { return FrameMap::nr2xmmreg(xmm_regnr()); @@ -51,7 +51,7 @@ return FrameMap::nr2xmmreg(xmm_regnrLo()); } -#endif +#endif // X86 #ifdef SPARC @@ -84,7 +84,7 @@ case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value()); case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value()); case doubleTag : return LIR_OprFact::doubleConst(type->as_DoubleConstant()->value()); - default: ShouldNotReachHere(); + default: ShouldNotReachHere(); return LIR_OprFact::intConst(-1); } } @@ -97,7 +97,7 @@ case floatTag: return LIR_OprFact::floatConst(0.0); case longTag: return LIR_OprFact::longConst(0); case doubleTag: return LIR_OprFact::doubleConst(0.0); - default: ShouldNotReachHere(); + default: ShouldNotReachHere(); return LIR_OprFact::intConst(-1); } return illegalOpr; } @@ -108,7 +108,7 @@ LIR_Address::Scale LIR_Address::scale(BasicType type) { - int elem_size = type2aelembytes[type]; + int elem_size = type2aelembytes(type); switch (elem_size) { case 1: return LIR_Address::times_1; case 2: return LIR_Address::times_2; @@ -165,6 +165,7 @@ default: ShouldNotReachHere(); + return '?'; } } @@ -1377,7 +1378,7 @@ } else if (is_double_cpu()) { out->print(as_register_hi()->name()); out->print(as_register_lo()->name()); -#ifdef IA32 +#if defined(X86) } else if (is_single_xmm()) { out->print(as_xmm_float_reg()->name()); } else if (is_double_xmm()) { --- old/hotspot/src/share/vm/c1/c1_LIR.hpp 2009-08-01 04:09:34.675565187 +0100 +++ new/hotspot/src/share/vm/c1/c1_LIR.hpp 2009-08-01 04:09:34.581361616 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_LIR.hpp 1.134 07/06/18 14:25:24 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,6 +138,13 @@ return as_jint_hi(); } } + jlong as_jlong_bits() const { + if (type() == T_DOUBLE) { + return jlong_cast(_value.get_jdouble()); + } else { + return as_jlong(); + } + } virtual void print_value_on(outputStream* out) const PRODUCT_RETURN; @@ -305,6 +312,7 @@ default: ShouldNotReachHere(); + return single_size; } } @@ -420,12 +428,12 @@ return as_register(); } -#ifdef IA32 +#ifdef X86 XMMRegister as_xmm_float_reg() const; XMMRegister as_xmm_double_reg() const; // for compatibility with RInfo int fpu () const { return lo_reg_half(); } -#endif +#endif // X86 #ifdef SPARC FloatRegister as_float_reg () const; @@ -506,14 +514,14 @@ , _type(type) , _disp(disp) { verify(); } -#ifdef IA32 - LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type): +#ifdef X86 + LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type): _base(base) , _index(index) , _scale(scale) , _type(type) , _disp(disp) { verify(); } -#endif +#endif // X86 LIR_Opr base() const { return _base; } LIR_Opr index() const { return _index; } @@ -538,31 +546,93 @@ static LIR_Opr illegalOpr; - static LIR_Opr single_cpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } - static LIR_Opr single_cpu_oop(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } - static LIR_Opr double_cpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); } - - static LIR_Opr single_fpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); } + static LIR_Opr single_cpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } + static LIR_Opr single_cpu_oop(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } + static LIR_Opr double_cpu(int reg1, int reg2) { + LP64_ONLY(assert(reg1 == reg2, "must be identical")); + return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | + (reg2 << LIR_OprDesc::reg2_shift) | + LIR_OprDesc::long_type | + LIR_OprDesc::cpu_register | + LIR_OprDesc::double_size); + } + + static LIR_Opr single_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | + LIR_OprDesc::float_type | + LIR_OprDesc::fpu_register | + LIR_OprDesc::single_size); } #ifdef SPARC - static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } -#endif -#ifdef IA32 - static LIR_Opr double_fpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } - static LIR_Opr single_xmm(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::is_xmm_mask); } - static LIR_Opr double_xmm(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::is_xmm_mask); } -#endif + static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | + (reg2 << LIR_OprDesc::reg2_shift) | + LIR_OprDesc::double_type | + LIR_OprDesc::fpu_register | + LIR_OprDesc::double_size); } +#endif +#ifdef X86 + static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | + (reg << LIR_OprDesc::reg2_shift) | + LIR_OprDesc::double_type | + LIR_OprDesc::fpu_register | + LIR_OprDesc::double_size); } + + static LIR_Opr single_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | + LIR_OprDesc::float_type | + LIR_OprDesc::fpu_register | + LIR_OprDesc::single_size | + LIR_OprDesc::is_xmm_mask); } + static LIR_Opr double_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | + (reg << LIR_OprDesc::reg2_shift) | + LIR_OprDesc::double_type | + LIR_OprDesc::fpu_register | + LIR_OprDesc::double_size | + LIR_OprDesc::is_xmm_mask); } +#endif // X86 static LIR_Opr virtual_register(int index, BasicType type) { LIR_Opr res; switch (type) { case T_OBJECT: // fall through - case T_ARRAY: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; - case T_INT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; - case T_LONG: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break; - case T_FLOAT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; - case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break; + case T_ARRAY: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::object_type | + LIR_OprDesc::cpu_register | + LIR_OprDesc::single_size | + LIR_OprDesc::virtual_mask); + break; + + case T_INT: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::int_type | + LIR_OprDesc::cpu_register | + LIR_OprDesc::single_size | + LIR_OprDesc::virtual_mask); + break; + + case T_LONG: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::long_type | + LIR_OprDesc::cpu_register | + LIR_OprDesc::double_size | + LIR_OprDesc::virtual_mask); + break; + + case T_FLOAT: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::float_type | + LIR_OprDesc::fpu_register | + LIR_OprDesc::single_size | + LIR_OprDesc::virtual_mask); + break; + + case + T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::double_type | + LIR_OprDesc::fpu_register | + LIR_OprDesc::double_size | + LIR_OprDesc::virtual_mask); + break; default: ShouldNotReachHere(); res = illegalOpr; } @@ -575,8 +645,8 @@ // old-style calculation; check if old and new method are equal LIR_OprDesc::OprType t = as_OprType(type); - LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | t | - ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) | + LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t | + ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) | LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask); assert(res == old_res, "old and new method not equal"); #endif @@ -591,11 +661,39 @@ LIR_Opr res; switch (type) { case T_OBJECT: // fall through - case T_ARRAY: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; - case T_INT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; - case T_LONG: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break; - case T_FLOAT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; - case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break; + case T_ARRAY: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::object_type | + LIR_OprDesc::stack_value | + LIR_OprDesc::single_size); + break; + + case T_INT: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::int_type | + LIR_OprDesc::stack_value | + LIR_OprDesc::single_size); + break; + + case T_LONG: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::long_type | + LIR_OprDesc::stack_value | + LIR_OprDesc::double_size); + break; + + case T_FLOAT: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::float_type | + LIR_OprDesc::stack_value | + LIR_OprDesc::single_size); + break; + case T_DOUBLE: + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::double_type | + LIR_OprDesc::stack_value | + LIR_OprDesc::double_size); + break; default: ShouldNotReachHere(); res = illegalOpr; } @@ -604,7 +702,10 @@ assert(index >= 0, "index must be positive"); assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big"); - LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::stack_value | as_OprType(type) | LIR_OprDesc::size_for(type)); + LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | + LIR_OprDesc::stack_value | + as_OprType(type) | + LIR_OprDesc::size_for(type)); assert(res == old_res, "old and new method not equal"); #endif --- old/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp 2009-08-01 04:09:35.649186255 +0100 +++ new/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp 2009-08-01 04:09:35.563961016 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_LIRAssembler.cpp 1.135 07/07/02 16:50:41 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,6 +77,7 @@ LIR_Assembler::LIR_Assembler(Compilation* c): _compilation(c) , _masm(c->masm()) + , _bs(Universe::heap()->barrier_set()) , _frame_map(c->frame_map()) , _current_block(NULL) , _pending_non_safepoint(NULL) @@ -218,7 +219,7 @@ #endif /* PRODUCT */ assert(block->lir() != NULL, "must have LIR"); - IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); + X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); #ifndef PRODUCT if (CommentedAssembly) { @@ -230,7 +231,7 @@ emit_lir_list(block->lir()); - IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); + X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); } @@ -437,7 +438,7 @@ break; default: ShouldNotReachHere(); } -#if defined(IA32) && defined(TIERED) +#if defined(X86) && defined(TIERED) // C2 leave fpu stack dirty clean it if (UseSSE < 2) { int i; @@ -448,7 +449,7 @@ ffree(0); } } -#endif // IA32 && TIERED +#endif // X86 && TIERED } --- old/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp 2009-08-01 04:09:36.567412319 +0100 +++ new/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp 2009-08-01 04:09:36.481601819 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_LIRAssembler.hpp 1.116 07/05/05 17:05:08 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,11 +27,13 @@ class Compilation; class ScopeValue; +class BarrierSet; class LIR_Assembler: public CompilationResourceObj { private: C1_MacroAssembler* _masm; CodeStubList* _slow_case_stubs; + BarrierSet* _bs; Compilation* _compilation; FrameMap* _frame_map; @@ -78,9 +80,9 @@ void emit_stubs(CodeStubList* stub_list); // addresses - static Address as_Address(LIR_Address* addr); - static Address as_Address_lo(LIR_Address* addr); - static Address as_Address_hi(LIR_Address* addr); + Address as_Address(LIR_Address* addr); + Address as_Address_lo(LIR_Address* addr); + Address as_Address_hi(LIR_Address* addr); // debug information void add_call_info(int pc_offset, CodeEmitInfo* cinfo); --- old/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp 2009-08-01 04:09:37.463074410 +0100 +++ new/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp 2009-08-01 04:09:37.360125905 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_LIRGenerator.cpp 1.24 08/11/07 15:47:10 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -288,16 +288,7 @@ void LIRGenerator::init() { - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - -#ifdef _LP64 - _card_table_base = new LIR_Const((jlong)ct->byte_map_base); -#else - _card_table_base = new LIR_Const((jint)ct->byte_map_base); -#endif + _bs = Universe::heap()->barrier_set(); } @@ -1242,8 +1233,37 @@ // Various barriers +void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { + // Do the pre-write barrier, if any. + switch (_bs->kind()) { +#ifndef SERIALGC + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info); + break; +#endif // SERIALGC + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + // No pre barriers + break; + case BarrierSet::ModRef: + case BarrierSet::Other: + // No pre barriers + break; + default : + ShouldNotReachHere(); + + } +} + void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { - switch (Universe::heap()->barrier_set()->kind()) { + switch (_bs->kind()) { +#ifndef SERIALGC + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + G1SATBCardTableModRef_post_barrier(addr, new_val); + break; +#endif // SERIALGC case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: CardTableModRef_post_barrier(addr, new_val); @@ -1257,11 +1277,120 @@ } } +//////////////////////////////////////////////////////////////////////// +#ifndef SERIALGC + +void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { + if (G1DisablePreBarrier) return; + + // First we test whether marking is in progress. + BasicType flag_type; + if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { + flag_type = T_INT; + } else { + guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, + "Assumption"); + flag_type = T_BYTE; + } + LIR_Opr thrd = getThreadPointer(); + LIR_Address* mark_active_flag_addr = + new LIR_Address(thrd, + in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_active()), + flag_type); + // Read the marking-in-progress flag. + LIR_Opr flag_val = new_register(T_INT); + __ load(mark_active_flag_addr, flag_val); + + LabelObj* start_store = new LabelObj(); + + LIR_PatchCode pre_val_patch_code = + patch ? lir_patch_normal : lir_patch_none; + + LIR_Opr pre_val = new_register(T_OBJECT); + + __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); + if (!addr_opr->is_address()) { + assert(addr_opr->is_register(), "must be"); + addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT)); + } + CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, + info); + __ branch(lir_cond_notEqual, T_INT, slow); + __ branch_destination(slow->continuation()); +} + +void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { + if (G1DisablePostBarrier) return; + + // If the "new_val" is a constant NULL, no barrier is necessary. + if (new_val->is_constant() && + new_val->as_constant_ptr()->as_jobject() == NULL) return; + + if (!new_val->is_register()) { + LIR_Opr new_val_reg = new_pointer_register(); + if (new_val->is_constant()) { + __ move(new_val, new_val_reg); + } else { + __ leal(new_val, new_val_reg); + } + new_val = new_val_reg; + } + assert(new_val->is_register(), "must be a register at this point"); + + if (addr->is_address()) { + LIR_Address* address = addr->as_address_ptr(); + LIR_Opr ptr = new_pointer_register(); + if (!address->index()->is_valid() && address->disp() == 0) { + __ move(address->base(), ptr); + } else { + assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); + __ leal(addr, ptr); + } + addr = ptr; + } + assert(addr->is_register(), "must be a register at this point"); + + LIR_Opr xor_res = new_pointer_register(); + LIR_Opr xor_shift_res = new_pointer_register(); + + if (TwoOperandLIRForm ) { + __ move(addr, xor_res); + __ logical_xor(xor_res, new_val, xor_res); + __ move(xor_res, xor_shift_res); + __ unsigned_shift_right(xor_shift_res, + LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), + xor_shift_res, + LIR_OprDesc::illegalOpr()); + } else { + __ logical_xor(addr, new_val, xor_res); + __ unsigned_shift_right(xor_res, + LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), + xor_shift_res, + LIR_OprDesc::illegalOpr()); + } + + if (!new_val->is_register()) { + LIR_Opr new_val_reg = new_pointer_register(); + __ leal(new_val, new_val_reg); + new_val = new_val_reg; + } + assert(new_val->is_register(), "must be a register at this point"); + + __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); + + CodeStub* slow = new G1PostBarrierStub(addr, new_val); + __ branch(lir_cond_notEqual, T_INT, slow); + __ branch_destination(slow->continuation()); +} + +#endif // SERIALGC +//////////////////////////////////////////////////////////////////////// + void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(sizeof(*((CardTableModRefBS*)bs)->byte_map_base) == sizeof(jbyte), "adjust this code"); - LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)bs)->byte_map_base); + assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code"); + LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base); if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); LIR_Opr ptr = new_register(T_OBJECT); @@ -1391,6 +1520,13 @@ __ membar_release(); } + if (is_oop) { + // Do the pre-write barrier, if any. + pre_barrier(LIR_OprFact::address(address), + needs_patching, + (info ? new CodeEmitInfo(info) : NULL)); + } + if (is_volatile) { assert(!needs_patching && x->is_loaded(), "how do we know it's volatile if it's not loaded"); @@ -1401,7 +1537,12 @@ } if (is_oop) { +#ifdef PRECISE_CARDMARK + // Precise cardmarks don't work + post_barrier(LIR_OprFact::address(address), value.result()); +#else post_barrier(object.result(), value.result()); +#endif // PRECISE_CARDMARK } if (is_volatile && os::is_MP()) { @@ -1720,7 +1861,7 @@ assert(log2_scale == 0, "must not have a scale"); addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); } else { -#ifdef IA32 +#ifdef X86 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); #else if (index_op->is_illegal() || log2_scale == 0) { --- old/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp 2009-08-01 04:09:38.505970984 +0100 +++ new/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp 2009-08-01 04:09:38.422495280 +0100 @@ -148,6 +148,7 @@ // only the classes below belong in the same file class LIRGenerator: public InstructionVisitor, public BlockClosure { + private: Compilation* _compilation; ciMethod* _method; // method that we are compiling @@ -157,6 +158,7 @@ Values _instruction_for_operand; BitMap2D _vreg_flags; // flags which can be set on a per-vreg basis LIR_List* _lir; + BarrierSet* _bs; LIRGenerator* gen() { return this; @@ -177,8 +179,6 @@ LIR_OprList _reg_for_constants; Values _unpinned_constants; - LIR_Const* _card_table_base; - friend class PhiResolver; // unified bailout support @@ -199,8 +199,6 @@ LIR_Opr load_constant(Constant* x); LIR_Opr load_constant(LIR_Const* constant); - LIR_Const* card_table_base() const { return _card_table_base; } - void set_result(Value x, LIR_Opr opr) { assert(opr->is_valid(), "must set to valid value"); assert(x->operand()->is_illegal(), "operand should never change"); @@ -256,12 +254,17 @@ // generic interface + void pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info); void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); // specific implementations + // pre barriers + + void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info); // post barriers + void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); --- old/hotspot/src/share/vm/c1/c1_LinearScan.cpp 2009-08-01 04:09:39.451664683 +0100 +++ new/hotspot/src/share/vm/c1/c1_LinearScan.cpp 2009-08-01 04:09:39.330773577 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_LinearScan.cpp 1.14 07/08/14 16:07:30 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,7 +83,7 @@ , _scope_value_cache(0) // initialized later with correct length , _interval_in_loop(0, 0) // initialized later with correct length , _cached_blocks(*ir->linear_scan_order()) -#ifdef IA32 +#ifdef X86 , _fpu_stack_allocator(NULL) #endif { @@ -119,7 +119,7 @@ return opr->cpu_regnr(); } else if (opr->is_double_cpu()) { return opr->cpu_regnrLo(); -#ifdef IA32 +#ifdef X86 } else if (opr->is_single_xmm()) { return opr->fpu_regnr() + pd_first_xmm_reg; } else if (opr->is_double_xmm()) { @@ -131,6 +131,7 @@ return opr->fpu_regnrLo() + pd_first_fpu_reg; } else { ShouldNotReachHere(); + return -1; } } @@ -143,7 +144,7 @@ return -1; } else if (opr->is_double_cpu()) { return opr->cpu_regnrHi(); -#ifdef IA32 +#ifdef X86 } else if (opr->is_single_xmm()) { return -1; } else if (opr->is_double_xmm()) { @@ -155,6 +156,7 @@ return opr->fpu_regnrHi() + pd_first_fpu_reg; } else { ShouldNotReachHere(); + return -1; } } @@ -1066,7 +1068,7 @@ } -#ifdef IA32 +#ifdef X86 if (op->code() == lir_cmove) { // conditional moves can handle stack operands assert(op->result_opr()->is_register(), "result must always be in a register"); @@ -1131,7 +1133,7 @@ } } } -#endif // IA32 +#endif // X86 // all other operands require a register return mustHaveRegister; @@ -1264,7 +1266,7 @@ // virtual fpu operands. Otherwise no allocation for fpu registers is // perfomed and so the temp ranges would be useless if (has_fpu_registers()) { -#ifdef IA32 +#ifdef X86 if (UseSSE < 2) { #endif for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) { @@ -1273,7 +1275,7 @@ assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); caller_save_registers[num_caller_save_registers++] = reg_num(opr); } -#ifdef IA32 +#ifdef X86 } if (UseSSE > 0) { for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) { @@ -1302,8 +1304,8 @@ // Update intervals for registers live at the end of this block; BitMap live = block->live_out(); - int size = live.size(); - for (int number = live.get_next_one_offset(0, size); number < size; number = live.get_next_one_offset(number + 1, size)) { + int size = (int)live.size(); + for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) { assert(live.at(number), "should not stop here otherwise"); assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds"); TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2)); @@ -1657,7 +1659,7 @@ const BitMap live_at_edge = to_block->live_in(); // visit all registers where the live_at_edge bit is set - for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) { + for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { assert(r < num_regs, "live information set for not exisiting interval"); assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge"); @@ -1827,7 +1829,7 @@ // visit all registers where the live_in bit is set int size = live_set_size(); - for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) { + for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { resolve_exception_entry(block, r, move_resolver); } @@ -1901,7 +1903,7 @@ // visit all registers where the live_in bit is set BlockBegin* block = handler->entry_block(); int size = live_set_size(); - for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) { + for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) { resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver); } @@ -2035,19 +2037,19 @@ assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even"); } -#ifdef SPARC #ifdef _LP64 return LIR_OprFact::double_cpu(assigned_reg, assigned_reg); #else +#ifdef SPARC return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg); -#endif #else return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi); -#endif +#endif // SPARC +#endif // LP64 } case T_FLOAT: { -#ifdef IA32 +#ifdef X86 if (UseSSE >= 1) { assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); assert(interval->assigned_regHi() == any_reg, "must not have hi register"); @@ -2061,7 +2063,7 @@ } case T_DOUBLE: { -#ifdef IA32 +#ifdef X86 if (UseSSE >= 2) { assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)"); @@ -2125,7 +2127,7 @@ LIR_Opr res = operand_for_interval(interval); -#ifdef IA32 +#ifdef X86 // new semantic for is_last_use: not only set on definite end of interval, // but also before hole // This may still miss some cases (e.g. for dead values), but it is not necessary that the @@ -2478,6 +2480,7 @@ default: ShouldNotReachHere(); + return -1; } } @@ -2518,7 +2521,7 @@ scope_values->append(sv); return 1; -#ifdef IA32 +#ifdef X86 } else if (opr->is_single_xmm()) { VMReg rname = opr->as_xmm_float_reg()->as_VMReg(); LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname)); @@ -2528,7 +2531,7 @@ #endif } else if (opr->is_single_fpu()) { -#ifdef IA32 +#ifdef X86 // the exact location of fpu stack values is only known // during fpu stack allocation, so the stack allocator object // must be present @@ -2551,12 +2554,23 @@ ScopeValue* second; if (opr->is_double_stack()) { +#ifdef _LP64 + Location loc1; + Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl; + if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) { + bailout("too large frame"); + } + // Does this reverse on x86 vs. sparc? + first = new LocationValue(loc1); + second = &_int_0_scope_value; +#else Location loc1, loc2; if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) { bailout("too large frame"); } first = new LocationValue(loc1); second = new LocationValue(loc2); +#endif // _LP64 } else if (opr->is_double_cpu()) { #ifdef _LP64 @@ -2576,9 +2590,10 @@ first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); -#endif +#endif //_LP64 -#ifdef IA32 + +#ifdef X86 } else if (opr->is_double_xmm()) { assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); @@ -2592,13 +2607,13 @@ } else if (opr->is_double_fpu()) { // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of - // the double as float registers in the native ordering. On IA32, + // the double as float registers in the native ordering. On X86, // fpu_regnrLo is a FPU stack slot whose VMReg represents // the low-order word of the double and fpu_regnrLo + 1 is the // name for the other half. *first and *second must represent the // least and most significant words, respectively. -#ifdef IA32 +#ifdef X86 // the exact location of fpu stack values is only known // during fpu stack allocation, so the stack allocator object // must be present @@ -2868,7 +2883,6 @@ op->verify(); #endif -#ifndef _LP64 // remove useless moves if (op->code() == lir_move) { assert(op->as_Op1() != NULL, "move must be LIR_Op1"); @@ -2882,7 +2896,6 @@ has_dead = true; } } -#endif } if (has_dead) { @@ -3195,7 +3208,7 @@ BitMap live_at_edge = block->live_in(); // visit all registers where the live_at_edge bit is set - for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) { + for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) { TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id())); Value value = gen()->instruction_for_vreg(r); @@ -3441,7 +3454,7 @@ state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL); } -#ifdef IA32 +#ifdef X86 for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) { state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL); } @@ -4360,7 +4373,7 @@ opr = LIR_OprFact::single_cpu(assigned_reg()); } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) { opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg); -#ifdef IA32 +#ifdef X86 } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) { opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg); #endif @@ -5438,7 +5451,7 @@ } bool LinearScanWalker::no_allocation_possible(Interval* cur) { -#ifdef IA32 +#ifdef X86 // fast calculation of intervals that can never get a register because the // the next instruction is a call that blocks all registers // Note: this does not work if callee-saved registers are available (e.g. on Sparc) --- old/hotspot/src/share/vm/c1/c1_LinearScan.hpp 2009-08-01 04:09:40.667955707 +0100 +++ new/hotspot/src/share/vm/c1/c1_LinearScan.hpp 2009-08-01 04:09:40.582156038 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_LinearScan.hpp 1.13 07/08/14 16:07:30 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -180,7 +180,7 @@ bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); } // handling of fpu stack allocation (platform dependent, needed for debug information generation) -#ifdef IA32 +#ifdef X86 FpuStackAllocator* _fpu_stack_allocator; bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); } #else --- old/hotspot/src/share/vm/c1/c1_Runtime1.cpp 2009-08-01 04:09:42.377325632 +0100 +++ new/hotspot/src/share/vm/c1/c1_Runtime1.cpp 2009-08-01 04:09:42.288819474 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_Runtime1.cpp 1.245 08/11/07 15:47:09 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,6 +171,8 @@ switch (id) { // These stubs don't need to have an oopmap case dtrace_object_alloc_id: + case g1_pre_barrier_slow_id: + case g1_post_barrier_slow_id: case slow_subtype_check_id: case fpu2long_stub_id: case unwind_exception_id: @@ -339,21 +341,6 @@ assert(oop(klass)->is_klass(), "not a class"); assert(rank >= 1, "rank must be nonzero"); -#ifdef _LP64 -// In 64 bit mode, the sizes are stored in the top 32 bits -// of each 64 bit stack entry. -// dims is actually an intptr_t * because the arguments -// are pushed onto a 64 bit stack. -// We must create an array of jints to pass to multi_allocate. -// We reuse the current stack because it will be popped -// after this bytecode is completed. - if ( rank > 1 ) { - int index; - for ( index = 1; index < rank; index++ ) { // First size is ok - dims[index] = dims[index*2]; - } - } -#endif oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); thread->set_vm_result(obj); JRT_END @@ -1084,6 +1071,43 @@ JRT_END +// Array copy return codes. +enum { + ac_failed = -1, // arraycopy failed + ac_ok = 0 // arraycopy succeeded +}; + + +template int obj_arraycopy_work(oopDesc* src, T* src_addr, + oopDesc* dst, T* dst_addr, + int length) { + + // For performance reasons, we assume we are using a card marking write + // barrier. The assert will fail if this is not the case. + // Note that we use the non-virtual inlineable variant of write_ref_array. + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->has_write_ref_array_opt(), + "Barrier set must have ref array opt"); + if (src == dst) { + // same object, no check + Copy::conjoint_oops_atomic(src_addr, dst_addr, length); + bs->write_ref_array(MemRegion((HeapWord*)dst_addr, + (HeapWord*)(dst_addr + length))); + return ac_ok; + } else { + klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); + klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); + if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { + // Elements are guaranteed to be subtypes, so no check necessary + Copy::conjoint_oops_atomic(src_addr, dst_addr, length); + bs->write_ref_array(MemRegion((HeapWord*)dst_addr, + (HeapWord*)(dst_addr + length))); + return ac_ok; + } + } + return ac_failed; +} + // fast and direct copy of arrays; returning -1, means that an exception may be thrown // and we did not copy anything JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length)) @@ -1091,11 +1115,6 @@ _generic_arraycopy_cnt++; // Slow-path oop array copy #endif - enum { - ac_failed = -1, // arraycopy failed - ac_ok = 0 // arraycopy succeeded - }; - if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed; if (!dst->is_array() || !src->is_array()) return ac_failed; if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed; @@ -1115,30 +1134,14 @@ memmove(dst_addr, src_addr, length << l2es); return ac_ok; } else if (src->is_objArray() && dst->is_objArray()) { - oop* src_addr = objArrayOop(src)->obj_at_addr(src_pos); - oop* dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos); - // For performance reasons, we assume we are using a card marking write - // barrier. The assert will fail if this is not the case. - // Note that we use the non-virtual inlineable variant of write_ref_array. - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->has_write_ref_array_opt(), - "Barrier set must have ref array opt"); - if (src == dst) { - // same object, no check - Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array(MemRegion((HeapWord*)dst_addr, - (HeapWord*)(dst_addr + length))); - return ac_ok; + if (UseCompressedOops) { // will need for tiered + narrowOop *src_addr = objArrayOop(src)->obj_at_addr(src_pos); + narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos); + return obj_arraycopy_work(src, src_addr, dst, dst_addr, length); } else { - klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); - klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); - if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { - // Elements are guaranteed to be subtypes, so no check necessary - Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array(MemRegion((HeapWord*)dst_addr, - (HeapWord*)(dst_addr + length))); - return ac_ok; - } + oop *src_addr = objArrayOop(src)->obj_at_addr(src_pos); + oop *dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos); + return obj_arraycopy_work(src, src_addr, dst, dst_addr, length); } } return ac_failed; --- old/hotspot/src/share/vm/c1/c1_Runtime1.hpp 2009-08-01 04:09:43.323705994 +0100 +++ new/hotspot/src/share/vm/c1/c1_Runtime1.hpp 2009-08-01 04:09:43.249966238 +0100 @@ -59,6 +59,8 @@ stub(access_field_patching) \ stub(load_klass_patching) \ stub(jvmti_exception_throw) \ + stub(g1_pre_barrier_slow) \ + stub(g1_post_barrier_slow) \ stub(fpu2long_stub) \ stub(counter_overflow) \ last_entry(number_of_ids) --- old/hotspot/src/share/vm/c1/c1_ValueMap.hpp 2009-08-01 04:09:44.212433086 +0100 +++ new/hotspot/src/share/vm/c1/c1_ValueMap.hpp 2009-08-01 04:09:44.126210641 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c1_ValueMap.hpp 1.22 07/05/05 17:05:07 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,53 +136,77 @@ virtual void kill_array(ValueType* type) = 0; // visitor functions - void do_StoreField (StoreField* x) { kill_field(x->field()); }; - void do_StoreIndexed (StoreIndexed* x) { kill_array(x->type()); }; - void do_MonitorEnter (MonitorEnter* x) { kill_memory(); }; - void do_MonitorExit (MonitorExit* x) { kill_memory(); }; - void do_Invoke (Invoke* x) { kill_memory(); }; - void do_UnsafePutRaw (UnsafePutRaw* x) { kill_memory(); }; - void do_UnsafePutObject(UnsafePutObject* x) { kill_memory(); }; - void do_Intrinsic (Intrinsic* x) { if (!x->preserves_state()) kill_memory(); }; - - void do_Phi (Phi* x) { /* nothing to do */ }; - void do_Local (Local* x) { /* nothing to do */ }; - void do_Constant (Constant* x) { /* nothing to do */ }; - void do_LoadField (LoadField* x) { /* nothing to do */ }; - void do_ArrayLength (ArrayLength* x) { /* nothing to do */ }; - void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ }; - void do_NegateOp (NegateOp* x) { /* nothing to do */ }; - void do_ArithmeticOp (ArithmeticOp* x) { /* nothing to do */ }; - void do_ShiftOp (ShiftOp* x) { /* nothing to do */ }; - void do_LogicOp (LogicOp* x) { /* nothing to do */ }; - void do_CompareOp (CompareOp* x) { /* nothing to do */ }; - void do_IfOp (IfOp* x) { /* nothing to do */ }; - void do_Convert (Convert* x) { /* nothing to do */ }; - void do_NullCheck (NullCheck* x) { /* nothing to do */ }; - void do_NewInstance (NewInstance* x) { /* nothing to do */ }; - void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }; - void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }; - void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ }; - void do_CheckCast (CheckCast* x) { /* nothing to do */ }; - void do_InstanceOf (InstanceOf* x) { /* nothing to do */ }; - void do_BlockBegin (BlockBegin* x) { /* nothing to do */ }; - void do_Goto (Goto* x) { /* nothing to do */ }; - void do_If (If* x) { /* nothing to do */ }; - void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ }; - void do_TableSwitch (TableSwitch* x) { /* nothing to do */ }; - void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ }; - void do_Return (Return* x) { /* nothing to do */ }; - void do_Throw (Throw* x) { /* nothing to do */ }; - void do_Base (Base* x) { /* nothing to do */ }; - void do_OsrEntry (OsrEntry* x) { /* nothing to do */ }; - void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ }; - void do_RoundFP (RoundFP* x) { /* nothing to do */ }; - void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ }; - void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ }; - void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ }; - void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }; - void do_ProfileCall (ProfileCall* x) { /* nothing to do */ }; - void do_ProfileCounter (ProfileCounter* x) { /* nothing to do */ }; + void do_StoreField (StoreField* x) { + if (!x->is_initialized()) { + kill_memory(); + } else { + kill_field(x->field()); + } + } + void do_StoreIndexed (StoreIndexed* x) { kill_array(x->type()); } + void do_MonitorEnter (MonitorEnter* x) { kill_memory(); } + void do_MonitorExit (MonitorExit* x) { kill_memory(); } + void do_Invoke (Invoke* x) { kill_memory(); } + void do_UnsafePutRaw (UnsafePutRaw* x) { kill_memory(); } + void do_UnsafePutObject(UnsafePutObject* x) { kill_memory(); } + void do_Intrinsic (Intrinsic* x) { if (!x->preserves_state()) kill_memory(); } + + void do_Phi (Phi* x) { /* nothing to do */ } + void do_Local (Local* x) { /* nothing to do */ } + void do_Constant (Constant* x) { /* nothing to do */ } + void do_LoadField (LoadField* x) { + if (!x->is_initialized()) { + kill_memory(); + } + } + void do_ArrayLength (ArrayLength* x) { /* nothing to do */ } + void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ } + void do_NegateOp (NegateOp* x) { /* nothing to do */ } + void do_ArithmeticOp (ArithmeticOp* x) { /* nothing to do */ } + void do_ShiftOp (ShiftOp* x) { /* nothing to do */ } + void do_LogicOp (LogicOp* x) { /* nothing to do */ } + void do_CompareOp (CompareOp* x) { /* nothing to do */ } + void do_IfOp (IfOp* x) { /* nothing to do */ } + void do_Convert (Convert* x) { /* nothing to do */ } + void do_NullCheck (NullCheck* x) { /* nothing to do */ } + void do_NewInstance (NewInstance* x) { /* nothing to do */ } + void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ } + void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ } + void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ } + void do_CheckCast (CheckCast* x) { /* nothing to do */ } + void do_InstanceOf (InstanceOf* x) { /* nothing to do */ } + void do_BlockBegin (BlockBegin* x) { /* nothing to do */ } + void do_Goto (Goto* x) { /* nothing to do */ } + void do_If (If* x) { /* nothing to do */ } + void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ } + void do_TableSwitch (TableSwitch* x) { /* nothing to do */ } + void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ } + void do_Return (Return* x) { /* nothing to do */ } + void do_Throw (Throw* x) { /* nothing to do */ } + void do_Base (Base* x) { /* nothing to do */ } + void do_OsrEntry (OsrEntry* x) { /* nothing to do */ } + void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ } + void do_RoundFP (RoundFP* x) { /* nothing to do */ } + void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ } + void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ } + void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ } + void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ } + void do_ProfileCall (ProfileCall* x) { /* nothing to do */ } + void do_ProfileCounter (ProfileCounter* x) { /* nothing to do */ } +}; + + +class ValueNumberingEffects: public ValueNumberingVisitor { + private: + ValueMap* _map; + + public: + // implementation for abstract methods of ValueNumberingVisitor + void kill_memory() { _map->kill_memory(); } + void kill_field(ciField* field) { _map->kill_field(field); } + void kill_array(ValueType* type) { _map->kill_array(type); } + + ValueNumberingEffects(ValueMap* map): _map(map) {} }; --- old/hotspot/src/share/vm/c1/c1_globals.hpp 2009-08-01 04:09:45.122736680 +0100 +++ new/hotspot/src/share/vm/c1/c1_globals.hpp 2009-08-01 04:09:45.038739639 +0100 @@ -216,9 +216,6 @@ develop(bool, UseFastLocking, true, \ "Use fast inlined locking code") \ \ - product(bool, FastTLABRefill, true, \ - "Use fast TLAB refill code") \ - \ develop(bool, UseSlowPath, false, \ "For debugging: test slow cases by always using them") \ \ --- old/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp 2009-08-01 04:09:46.023055981 +0100 +++ new/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp 2009-08-01 04:09:45.946298139 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)bcEscapeAnalyzer.cpp 1.7 07/05/17 15:49:50 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,7 +107,7 @@ }; void BCEscapeAnalyzer::set_returned(ArgumentMap vars) { - for (int i = 0; i <= _arg_size; i++) { + for (int i = 0; i < _arg_size; i++) { if (vars.contains(i)) _arg_returned.set_bit(i); } @@ -115,10 +115,9 @@ _return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars()); } - // return true if any element of vars is an argument bool BCEscapeAnalyzer::is_argument(ArgumentMap vars) { - for (int i = 0; i <= _arg_size; i++) { + for (int i = 0; i < _arg_size; i++) { if (vars.contains(i)) return true; } @@ -129,7 +128,7 @@ bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){ if (_conservative) return true; - for (int i = 0; i <= _arg_size; i++) { + for (int i = 0; i < _arg_size; i++) { if (vars.contains(i) && _arg_stack.at(i)) return true; } @@ -137,12 +136,13 @@ } void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, BitMap &bm) { - for (int i = 0; i <= _arg_size; i++) { + for (int i = 0; i < _arg_size; i++) { if (vars.contains(i)) { bm.clear_bit(i); } } } + void BCEscapeAnalyzer::set_method_escape(ArgumentMap vars) { clear_bits(vars, _arg_local); } @@ -158,6 +158,17 @@ clear_bits(vars, _dirty); } +void BCEscapeAnalyzer::set_modified(ArgumentMap vars, int offs, int size) { + + for (int i = 0; i < _arg_size; i++) { + if (vars.contains(i)) { + set_arg_modified(i, offs, size); + } + } + if (vars.contains_unknown()) + _unknown_modified = true; +} + bool BCEscapeAnalyzer::is_recursive_call(ciMethod* callee) { for (BCEscapeAnalyzer* scope = this; scope != NULL; scope = scope->_parent) { if (scope->method() == callee) { @@ -167,6 +178,40 @@ return false; } +bool BCEscapeAnalyzer::is_arg_modified(int arg, int offset, int size_in_bytes) { + if (offset == OFFSET_ANY) + return _arg_modified[arg] != 0; + assert(arg >= 0 && arg < _arg_size, "must be an argument."); + bool modified = false; + int l = offset / HeapWordSize; + int h = round_to(offset + size_in_bytes, HeapWordSize) / HeapWordSize; + if (l > ARG_OFFSET_MAX) + l = ARG_OFFSET_MAX; + if (h > ARG_OFFSET_MAX+1) + h = ARG_OFFSET_MAX + 1; + for (int i = l; i < h; i++) { + modified = modified || (_arg_modified[arg] & (1 << i)) != 0; + } + return modified; +} + +void BCEscapeAnalyzer::set_arg_modified(int arg, int offset, int size_in_bytes) { + if (offset == OFFSET_ANY) { + _arg_modified[arg] = (uint) -1; + return; + } + assert(arg >= 0 && arg < _arg_size, "must be an argument."); + int l = offset / HeapWordSize; + int h = round_to(offset + size_in_bytes, HeapWordSize) / HeapWordSize; + if (l > ARG_OFFSET_MAX) + l = ARG_OFFSET_MAX; + if (h > ARG_OFFSET_MAX+1) + h = ARG_OFFSET_MAX + 1; + for (int i = l; i < h; i++) { + _arg_modified[arg] |= (1 << i); + } +} + void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod* target, ciKlass* holder) { int i; @@ -175,7 +220,14 @@ ciInstanceKlass* calling_klass = method()->holder(); ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); ciInstanceKlass* actual_recv = callee_holder; - + + // some methods are obviously bindable without any type checks so + // convert them directly to an invokespecial. + if (target->is_loaded() && !target->is_abstract() && + target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) { + code = Bytecodes::_invokespecial; + } + // compute size of arguments int arg_size = target->arg_size(); if (!target->is_loaded() && code == Bytecodes::_invokestatic) { @@ -200,6 +252,7 @@ for (i = 0; i < arg_size; i++) { set_method_escape(state.raw_pop()); } + _unknown_modified = true; // assume the worst since we don't analyze the called method return; } @@ -227,6 +280,11 @@ ArgumentMap arg = state.raw_pop(); if (!is_argument(arg)) continue; + for (int j = 0; j < _arg_size; j++) { + if (arg.contains(j)) { + _arg_modified[j] |= analyzer._arg_modified[i]; + } + } if (!is_arg_stack(arg)) { // arguments have already been recognized as escaping } else if (analyzer.is_arg_stack(i) && !analyzer.is_arg_returned(i)) { @@ -236,6 +294,7 @@ set_global_escape(arg); } } + _unknown_modified = _unknown_modified || analyzer.has_non_arg_side_affects(); // record dependencies if at least one parameter retained stack-allocatable if (must_record_dependencies) { @@ -253,8 +312,10 @@ ArgumentMap arg = state.raw_pop(); if (!is_argument(arg)) continue; + set_modified(arg, OFFSET_ANY, type2size[T_INT]*HeapWordSize); set_global_escape(arg); } + _unknown_modified = true; // assume the worst since we don't know the called method } } @@ -424,6 +485,7 @@ state.spop(); ArgumentMap arr = state.apop(); set_method_escape(arr); + set_modified(arr, OFFSET_ANY, type2size[T_INT]*HeapWordSize); break; } case Bytecodes::_lastore: @@ -433,6 +495,7 @@ state.spop(); ArgumentMap arr = state.apop(); set_method_escape(arr); + set_modified(arr, OFFSET_ANY, type2size[T_LONG]*HeapWordSize); break; } case Bytecodes::_aastore: @@ -440,6 +503,7 @@ set_global_escape(state.apop()); state.spop(); ArgumentMap arr = state.apop(); + set_modified(arr, OFFSET_ANY, type2size[T_OBJECT]*HeapWordSize); break; } case Bytecodes::_pop: @@ -765,6 +829,7 @@ if (s.cur_bc() != Bytecodes::_putstatic) { ArgumentMap p = state.apop(); set_method_escape(p); + set_modified(p, will_link ? field->offset() : OFFSET_ANY, type2size[field_type]*HeapWordSize); } } break; @@ -875,7 +940,7 @@ } void BCEscapeAnalyzer::merge_block_states(StateInfo *blockstates, ciBlock *dest, StateInfo *s_state) { - StateInfo *d_state = blockstates+dest->index(); + StateInfo *d_state = blockstates + dest->index(); int nlocals = _method->max_locals(); // exceptions may cause transfer of control to handlers in the middle of a @@ -919,6 +984,7 @@ } for (int i = 0; i < s_state->_stack_height; i++) { ArgumentMap t; + //extra_vars |= !d_state->_vars[i] & s_state->_vars[i]; t.clear(); t = s_state->_stack[i]; t.set_difference(d_state->_stack[i]); @@ -936,7 +1002,7 @@ int datacount = (numblocks + 1) * (stkSize + numLocals); int datasize = datacount * sizeof(ArgumentMap); - StateInfo *blockstates = (StateInfo *) arena->Amalloc(_methodBlocks->num_blocks() * sizeof(StateInfo)); + StateInfo *blockstates = (StateInfo *) arena->Amalloc(numblocks * sizeof(StateInfo)); ArgumentMap *statedata = (ArgumentMap *) arena->Amalloc(datasize); for (int i = 0; i < datacount; i++) ::new ((void*)&statedata[i]) ArgumentMap(); ArgumentMap *dp = statedata; @@ -964,33 +1030,35 @@ ArgumentMap allVars; // all oop arguments to method ciSignature* sig = method()->signature(); int j = 0; + ciBlock* first_blk = _methodBlocks->block_containing(0); + int fb_i = first_blk->index(); if (!method()->is_static()) { // record information for "this" - blockstates[0]._vars[j].set(j); + blockstates[fb_i]._vars[j].set(j); allVars.add(j); j++; } for (int i = 0; i < sig->count(); i++) { ciType* t = sig->type_at(i); if (!t->is_primitive_type()) { - blockstates[0]._vars[j].set(j); + blockstates[fb_i]._vars[j].set(j); allVars.add(j); } j += t->size(); } - blockstates[0]._initialized = true; + blockstates[fb_i]._initialized = true; assert(j == _arg_size, "just checking"); ArgumentMap unknown_map; unknown_map.add_unknown(); - worklist.push(_methodBlocks->block_containing(0)); + worklist.push(first_blk); while(worklist.length() > 0) { ciBlock *blk = worklist.pop(); - StateInfo *blkState = blockstates+blk->index(); + StateInfo *blkState = blockstates + blk->index(); if (blk->is_handler() || blk->is_ret_target()) { // for an exception handler or a target of a ret instruction, we assume the worst case, - // that any variable or stack slot could contain any argument + // that any variable could contain any argument for (int i = 0; i < numLocals; i++) { state._vars[i] = allVars; } @@ -1000,6 +1068,7 @@ state._stack_height = blkState->_stack_height; } for (int i = 0; i < state._stack_height; i++) { +// ??? should this be unknown_map ??? state._stack[i] = allVars; } } else { @@ -1056,6 +1125,7 @@ vmIntrinsics::ID iid = method()->intrinsic_id(); if (iid == vmIntrinsics::_getClass || + iid == vmIntrinsics::_fillInStackTrace || iid == vmIntrinsics::_hashCode) return iid; else @@ -1063,12 +1133,16 @@ } bool BCEscapeAnalyzer::compute_escape_for_intrinsic(vmIntrinsics::ID iid) { - ArgumentMap empty; - empty.clear(); + ArgumentMap arg; + arg.clear(); switch (iid) { case vmIntrinsics::_getClass: _return_local = false; break; + case vmIntrinsics::_fillInStackTrace: + arg.set(0); // 'this' + set_returned(arg); + break; case vmIntrinsics::_hashCode: // initialized state is correct break; @@ -1112,15 +1186,21 @@ _return_allocated = true; } _allocated_escapes = false; + _unknown_modified = false; } void BCEscapeAnalyzer::clear_escape_info() { ciSignature* sig = method()->signature(); int arg_count = sig->count(); ArgumentMap var; + if (!method()->is_static()) { + arg_count++; // allow for "this" + } for (int i = 0; i < arg_count; i++) { + set_arg_modified(i, OFFSET_ANY, 4); var.clear(); var.set(i); + set_modified(var, OFFSET_ANY, 4); set_global_escape(var); } _arg_local.clear(); @@ -1129,6 +1209,7 @@ _return_local = false; _return_allocated = false; _allocated_escapes = true; + _unknown_modified = true; } @@ -1176,8 +1257,14 @@ initialize(); - // do not scan method if it has no object parameters - if (_arg_local.is_empty()) { + // Do not scan method if it has no object parameters and + // does not returns an object (_return_allocated is set in initialize()). + if (_arg_local.is_empty() && !_return_allocated) { + // Clear all info since method's bytecode was not analysed and + // set pessimistic escape information. + clear_escape_info(); + methodData()->set_eflag(methodDataOopDesc::allocated_escapes); + methodData()->set_eflag(methodDataOopDesc::unknown_modified); methodData()->set_eflag(methodDataOopDesc::estimated); return; } @@ -1188,36 +1275,8 @@ success = do_analysis(); } - // dump result of bytecode analysis -#ifndef PRODUCT - if (BCEATraceLevel >= 3) { - tty->print("[EA] estimated escape information for"); - if (iid != vmIntrinsics::_none) - tty->print(" intrinsic"); - method()->print_short_name(); - tty->print_cr(has_dependencies() ? " (not stored)" : ""); - tty->print(" non-escaping args: "); - _arg_local.print_on(tty); - tty->print(" stack-allocatable args: "); - _arg_stack.print_on(tty); - if (_return_local) { - tty->print(" returned args: "); - _arg_returned.print_on(tty); - } else if (is_return_allocated()) { - tty->print_cr(" allocated return values"); - } else { - tty->print_cr(" non-local return values"); - } - tty->cr(); - tty->print(" flags: "); - if (_return_allocated) - tty->print(" return_allocated"); - tty->cr(); - } - -#endif - // don't store interprocedural escape information if it introduces dependencies - // or if method data is empty + // don't store interprocedural escape information if it introduces + // dependencies or if method data is empty // if (!has_dependencies() && !methodData()->is_empty()) { for (i = 0; i < _arg_size; i++) { @@ -1231,10 +1290,20 @@ if (_arg_returned.at(i)) { methodData()->set_arg_returned(i); } + methodData()->set_arg_modified(i, _arg_modified[i]); } if (_return_local) { methodData()->set_eflag(methodDataOopDesc::return_local); } + if (_return_allocated) { + methodData()->set_eflag(methodDataOopDesc::return_allocated); + } + if (_allocated_escapes) { + methodData()->set_eflag(methodDataOopDesc::allocated_escapes); + } + if (_unknown_modified) { + methodData()->set_eflag(methodDataOopDesc::unknown_modified); + } methodData()->set_eflag(methodDataOopDesc::estimated); } } @@ -1247,29 +1316,50 @@ _arg_local.at_put(i, methodData()->is_arg_local(i)); _arg_stack.at_put(i, methodData()->is_arg_stack(i)); _arg_returned.at_put(i, methodData()->is_arg_returned(i)); + _arg_modified[i] = methodData()->arg_modified(i); } _return_local = methodData()->eflag_set(methodDataOopDesc::return_local); + _return_allocated = methodData()->eflag_set(methodDataOopDesc::return_allocated); + _allocated_escapes = methodData()->eflag_set(methodDataOopDesc::allocated_escapes); + _unknown_modified = methodData()->eflag_set(methodDataOopDesc::unknown_modified); + +} - // dump result of loaded escape information #ifndef PRODUCT - if (BCEATraceLevel >= 4) { - tty->print(" non-escaping args: "); - _arg_local.print_on(tty); - tty->print(" stack-allocatable args: "); - _arg_stack.print_on(tty); - if (_return_local) { - tty->print(" returned args: "); - _arg_returned.print_on(tty); - } else { - tty->print_cr(" non-local return values"); - } - tty->print(" modified args: "); - tty->cr(); +void BCEscapeAnalyzer::dump() { + tty->print("[EA] estimated escape information for"); + method()->print_short_name(); + tty->print_cr(has_dependencies() ? " (not stored)" : ""); + tty->print(" non-escaping args: "); + _arg_local.print_on(tty); + tty->print(" stack-allocatable args: "); + _arg_stack.print_on(tty); + if (_return_local) { + tty->print(" returned args: "); + _arg_returned.print_on(tty); + } else if (is_return_allocated()) { + tty->print_cr(" return allocated value"); + } else { + tty->print_cr(" return non-local value"); } -#endif - + tty->print(" modified args: "); + for (int i = 0; i < _arg_size; i++) { + if (_arg_modified[i] == 0) + tty->print(" 0"); + else + tty->print(" 0x%x", _arg_modified[i]); + } + tty->cr(); + tty->print(" flags: "); + if (_return_allocated) + tty->print(" return_allocated"); + if (_allocated_escapes) + tty->print(" allocated_escapes"); + if (_unknown_modified) + tty->print(" unknown_modified"); + tty->cr(); } - +#endif BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent) : _conservative(method == NULL || !EstimateArgEscape) @@ -1284,6 +1374,7 @@ , _return_local(false) , _return_allocated(false) , _allocated_escapes(false) + , _unknown_modified(false) , _dependencies() , _parent(parent) , _level(parent == NULL ? 0 : parent->level() + 1) { @@ -1293,6 +1384,8 @@ _arg_returned.clear(); _dirty.clear(); Arena* arena = CURRENT_ENV->arena(); + _arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint)); + Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint)); if (methodData() == NULL) return; @@ -1310,6 +1403,12 @@ compute_escape_info(); methodData()->update_escape_info(); } +#ifndef PRODUCT + if (BCEATraceLevel >= 3) { + // dump escape information + dump(); + } +#endif } } --- old/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp 2009-08-01 04:09:47.005305885 +0100 +++ new/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp 2009-08-01 04:09:46.922646754 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)bcEscapeAnalyzer.hpp 1.6 07/05/05 17:05:11 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,10 +49,13 @@ BitMap _arg_stack; BitMap _arg_returned; BitMap _dirty; + enum{ ARG_OFFSET_MAX = 31}; + uint *_arg_modified; bool _return_local; - bool _allocated_escapes; bool _return_allocated; + bool _allocated_escapes; + bool _unknown_modified; ciObjectList _dependencies; @@ -83,6 +86,7 @@ void set_method_escape(ArgumentMap vars); void set_global_escape(ArgumentMap vars); void set_dirty(ArgumentMap vars); + void set_modified(ArgumentMap vars, int offs, int size); bool is_recursive_call(ciMethod* callee); void add_dependence(ciKlass *klass, ciMethod *meth); @@ -143,6 +147,18 @@ return !_conservative && _return_allocated && !_allocated_escapes; } + // Tracking of argument modification + + enum {OFFSET_ANY = -1}; + bool is_arg_modified(int arg, int offset, int size_in_bytes); + void set_arg_modified(int arg, int offset, int size_in_bytes); + bool has_non_arg_side_affects() { return _unknown_modified; } + // Copy dependencies from this analysis into "deps" void copy_dependencies(Dependencies *deps); + +#ifndef PRODUCT + // dump escape information + void dump(); +#endif }; --- old/hotspot/src/share/vm/ci/ciEnv.cpp 2009-08-01 04:09:47.844265838 +0100 +++ new/hotspot/src/share/vm/ci/ciEnv.cpp 2009-08-01 04:09:47.755918069 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciEnv.cpp 1.128 07/05/17 15:49:53 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -487,11 +487,16 @@ } else if (tag.is_double()) { return ciConstant((jdouble)cpool->double_at(index)); } else if (tag.is_string() || tag.is_unresolved_string()) { - oop string = cpool->string_at(index, THREAD); - if (HAS_PENDING_EXCEPTION) { - CLEAR_PENDING_EXCEPTION; - record_out_of_memory_failure(); - return ciConstant(); + oop string = NULL; + if (cpool->is_pseudo_string_at(index)) { + string = cpool->pseudo_string_at(index); + } else { + string = cpool->string_at(index, THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + record_out_of_memory_failure(); + return ciConstant(); + } } ciObject* constant = get_object(string); assert (constant->is_instance(), "must be an instance, or not? "); --- old/hotspot/src/share/vm/ci/ciField.hpp 2009-08-01 04:09:48.823855307 +0100 +++ new/hotspot/src/share/vm/ci/ciField.hpp 2009-08-01 04:09:48.738476532 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciField.hpp 1.23 07/09/28 10:23:24 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,7 +105,7 @@ BasicType layout_type() { return type2field[(_type == NULL) ? T_OBJECT : _type->basic_type()]; } // How big is this field in memory? - int size_in_bytes() { return type2aelembytes[layout_type()]; } + int size_in_bytes() { return type2aelembytes(layout_type()); } // What is the offset of this field? int offset() { --- old/hotspot/src/share/vm/ci/ciInstanceKlass.cpp 2009-08-01 04:09:49.688168590 +0100 +++ new/hotspot/src/share/vm/ci/ciInstanceKlass.cpp 2009-08-01 04:09:49.606608652 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciInstanceKlass.cpp 1.45 07/09/28 10:23:23 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,9 @@ // ciInstanceKlass::ciInstanceKlass // // Loaded instance klass. -ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) : ciKlass(h_k) { +ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) : + ciKlass(h_k), _non_static_fields(NULL) +{ assert(get_Klass()->oop_is_instance(), "wrong type"); instanceKlass* ik = get_instanceKlass(); @@ -49,6 +51,7 @@ // Next line must follow and use the result of the previous line: _is_linked = _is_initialized || ik->is_linked(); _nonstatic_field_size = ik->nonstatic_field_size(); + _has_nonstatic_fields = ik->has_nonstatic_fields(); _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: _nof_implementors = ik->nof_implementors(); @@ -94,6 +97,7 @@ _is_initialized = false; _is_linked = false; _nonstatic_field_size = -1; + _has_nonstatic_fields = false; _nonstatic_fields = NULL; _nof_implementors = -1; _loader = loader; @@ -202,7 +206,7 @@ assert(offset >= 0 && offset < layout_helper(), "offset must be tame"); #endif - if (offset < (instanceOopDesc::header_size() * wordSize)) { + if (offset < instanceOopDesc::base_offset_in_bytes()) { // All header offsets belong properly to java/lang/Object. return CURRENT_ENV->Object_klass(); } @@ -211,7 +215,8 @@ for (;;) { assert(self->is_loaded(), "must be loaded to have size"); ciInstanceKlass* super = self->super(); - if (super == NULL || !super->contains_field_offset(offset)) { + if (super == NULL || super->nof_nonstatic_fields() == 0 || + !super->contains_field_offset(offset)) { return self; } else { self = super; // return super->get_canonical_holder(offset) @@ -338,6 +343,37 @@ return field; } +// ------------------------------------------------------------------ +// ciInstanceKlass::non_static_fields. + +class NonStaticFieldFiller: public FieldClosure { + GrowableArray* _arr; + ciEnv* _curEnv; +public: + NonStaticFieldFiller(ciEnv* curEnv, GrowableArray* arr) : + _curEnv(curEnv), _arr(arr) + {} + void do_field(fieldDescriptor* fd) { + ciField* field = new (_curEnv->arena()) ciField(fd); + _arr->append(field); + } +}; + +GrowableArray* ciInstanceKlass::non_static_fields() { + if (_non_static_fields == NULL) { + VM_ENTRY_MARK; + ciEnv* curEnv = ciEnv::current(); + instanceKlass* ik = get_instanceKlass(); + int max_n_fields = ik->fields()->length()/instanceKlass::next_offset; + + _non_static_fields = + new (curEnv->arena()) GrowableArray(max_n_fields); + NonStaticFieldFiller filler(curEnv, _non_static_fields); + ik->do_nonstatic_fields(&filler); + } + return _non_static_fields; +} + static int sort_field_by_offset(ciField** a, ciField** b) { return (*a)->offset_in_bytes() - (*b)->offset_in_bytes(); // (no worries about 32-bit overflow...) @@ -351,31 +387,28 @@ if (_nonstatic_fields != NULL) return _nonstatic_fields->length(); - // Size in bytes of my fields, including inherited fields. - // About equal to size_helper() - sizeof(oopDesc). - int fsize = nonstatic_field_size() * wordSize; - if (fsize == 0) { // easy shortcut + if (!has_nonstatic_fields()) { Arena* arena = CURRENT_ENV->arena(); _nonstatic_fields = new (arena) GrowableArray(arena, 0, 0, NULL); return 0; } assert(!is_java_lang_Object(), "bootstrap OK"); + // Size in bytes of my fields, including inherited fields. + int fsize = nonstatic_field_size() * heapOopSize; + ciInstanceKlass* super = this->super(); - int super_fsize = 0; - int super_flen = 0; GrowableArray* super_fields = NULL; - if (super != NULL) { - super_fsize = super->nonstatic_field_size() * wordSize; - super_flen = super->nof_nonstatic_fields(); + if (super != NULL && super->has_nonstatic_fields()) { + int super_fsize = super->nonstatic_field_size() * heapOopSize; + int super_flen = super->nof_nonstatic_fields(); super_fields = super->_nonstatic_fields; assert(super_flen == 0 || super_fields != NULL, "first get nof_fields"); - } - - // See if I am no larger than my super; if so, I can use his fields. - if (fsize == super_fsize) { - _nonstatic_fields = super_fields; - return super_fields->length(); + // See if I am no larger than my super; if so, I can use his fields. + if (fsize == super_fsize) { + _nonstatic_fields = super_fields; + return super_fields->length(); + } } GrowableArray* fields = NULL; @@ -395,11 +428,11 @@ // (In principle, they could mix with superclass fields.) fields->sort(sort_field_by_offset); #ifdef ASSERT - int last_offset = sizeof(oopDesc); + int last_offset = instanceOopDesc::base_offset_in_bytes(); for (int i = 0; i < fields->length(); i++) { ciField* field = fields->at(i); int offset = field->offset_in_bytes(); - int size = (field->_type == NULL) ? oopSize : field->size_in_bytes(); + int size = (field->_type == NULL) ? heapOopSize : field->size_in_bytes(); assert(last_offset <= offset, "no field overlap"); if (last_offset > (int)sizeof(oopDesc)) assert((offset - last_offset) < BytesPerLong, "no big holes"); @@ -408,7 +441,7 @@ // This is a minor inefficiency classFileParser.cpp. last_offset = offset + size; } - assert(last_offset <= (int)sizeof(oopDesc) + fsize, "no overflow"); + assert(last_offset <= (int)instanceOopDesc::base_offset_in_bytes() + fsize, "no overflow"); #endif _nonstatic_fields = fields; --- old/hotspot/src/share/vm/ci/ciInstanceKlass.hpp 2009-08-01 04:09:50.603340440 +0100 +++ new/hotspot/src/share/vm/ci/ciInstanceKlass.hpp 2009-08-01 04:09:50.513626232 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciInstanceKlass.hpp 1.36 07/09/28 10:23:23 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,18 +38,20 @@ friend class ciBytecodeStream; private: - bool _is_shared; - jobject _loader; jobject _protection_domain; + bool _is_shared; bool _is_initialized; bool _is_linked; bool _has_finalizer; bool _has_subklass; + bool _has_nonstatic_fields; + ciFlags _flags; jint _nonstatic_field_size; - + jint _nonstatic_oop_map_size; + // Lazy fields get filled in only upon request. ciInstanceKlass* _super; ciInstance* _java_mirror; @@ -61,6 +63,8 @@ ciInstanceKlass* _implementors[implementors_limit]; jint _nof_implementors; + GrowableArray* _non_static_fields; + protected: ciInstanceKlass(KlassHandle h_k); ciInstanceKlass(ciSymbol* name, jobject loader, jobject protection_domain); @@ -132,6 +136,12 @@ jint nonstatic_field_size() { assert(is_loaded(), "must be loaded"); return _nonstatic_field_size; } + jint has_nonstatic_fields() { + assert(is_loaded(), "must be loaded"); + return _has_nonstatic_fields; } + jint nonstatic_oop_map_size() { + assert(is_loaded(), "must be loaded"); + return _nonstatic_oop_map_size; } ciInstanceKlass* super(); jint nof_implementors() { assert(is_loaded(), "must be loaded"); @@ -141,6 +151,9 @@ ciInstanceKlass* get_canonical_holder(int offset); ciField* get_field_by_offset(int field_offset, bool is_static); + + GrowableArray* non_static_fields(); + // total number of nonstatic fields (including inherited): int nof_nonstatic_fields() { if (_nonstatic_fields == NULL) @@ -158,8 +171,7 @@ bool has_finalizable_subclass(); bool contains_field_offset(int offset) { - return (offset/wordSize) >= instanceOopDesc::header_size() - && (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size(); + return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size()); } // Get the instance of java.lang.Class corresponding to --- old/hotspot/src/share/vm/ci/ciMethod.cpp 2009-08-01 04:09:51.491377696 +0100 +++ new/hotspot/src/share/vm/ci/ciMethod.cpp 2009-08-01 04:09:51.405450890 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciMethod.cpp 1.109 07/09/28 10:23:23 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -149,7 +149,7 @@ memcpy(_code, me->code_base(), code_size()); // Revert any breakpoint bytecodes in ci's copy - if (_is_compilable && me->number_of_breakpoints() > 0) { + if (me->number_of_breakpoints() > 0) { BreakpointInfo* bp = instanceKlass::cast(me->method_holder())->breakpoints(); for (; bp != NULL; bp = bp->next()) { if (bp->match(me)) { @@ -881,7 +881,7 @@ (TieredCompilation && code->compiler() != NULL && code->compiler()->is_c1())) { return 0; } - return code->code_size(); + return code->code_end() - code->verified_entry_point(); ) } --- old/hotspot/src/share/vm/ci/ciMethodBlocks.cpp 2009-08-01 04:09:52.398014339 +0100 +++ new/hotspot/src/share/vm/ci/ciMethodBlocks.cpp 2009-08-01 04:09:52.304994629 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciMethodBlocks.cpp 1.6 07/09/28 10:23:22 JVM" #endif /* - * Copyright 2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,7 @@ // first half. Returns the range beginning at bci. ciBlock *ciMethodBlocks::split_block_at(int bci) { ciBlock *former_block = block_containing(bci); - ciBlock *new_block = new(_arena) ciBlock(_method, _num_blocks++, this, former_block->start_bci()); + ciBlock *new_block = new(_arena) ciBlock(_method, _num_blocks++, former_block->start_bci()); _blocks->append(new_block); assert(former_block != NULL, "must not be NULL"); new_block->set_limit_bci(bci); @@ -70,6 +70,14 @@ break; } } + // Move an exception handler information if needed. + if (former_block->is_handler()) { + int ex_start = former_block->ex_start_bci(); + int ex_end = former_block->ex_limit_bci(); + new_block->set_exception_range(ex_start, ex_end); + // Clear information in former_block. + former_block->clear_exception_handler(); + } return former_block; } @@ -78,7 +86,7 @@ if (cb == NULL ) { // This is our first time visiting this bytecode. Create // a fresh block and assign it this starting point. - ciBlock *nb = new(_arena) ciBlock(_method, _num_blocks++, this, bci); + ciBlock *nb = new(_arena) ciBlock(_method, _num_blocks++, bci); _blocks->append(nb); _bci_to_block[bci] = nb; return nb; @@ -93,6 +101,11 @@ } } +ciBlock *ciMethodBlocks::make_dummy_block() { + ciBlock *dum = new(_arena) ciBlock(_method, -1, 0); + return dum; +} + void ciMethodBlocks::do_analysis() { ciBytecodeStream s(_method); ciBlock *cur_block = block_containing(0); @@ -105,7 +118,7 @@ // one and end the old one. assert(cur_block != NULL, "must always have a current block"); ciBlock *new_block = block_containing(bci); - if (new_block == NULL) { + if (new_block == NULL || new_block == cur_block) { // We have not marked this bci as the start of a new block. // Keep interpreting the current_range. _bci_to_block[bci] = cur_block; @@ -248,7 +261,7 @@ Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord)); // create initial block covering the entire method - ciBlock *b = new(arena) ciBlock(_method, _num_blocks++, this, 0); + ciBlock *b = new(arena) ciBlock(_method, _num_blocks++, 0); _blocks->append(b); _bci_to_block[0] = b; @@ -257,14 +270,39 @@ for(ciExceptionHandlerStream str(meth); !str.is_done(); str.next()) { ciExceptionHandler* handler = str.handler(); ciBlock *eb = make_block_at(handler->handler_bci()); - eb->set_handler(); + // + // Several exception handlers can have the same handler_bci: + // + // try { + // if (a.foo(b) < 0) { + // return a.error(); + // } + // return CoderResult.UNDERFLOW; + // } finally { + // a.position(b); + // } + // + // The try block above is divided into 2 exception blocks + // separated by 'areturn' bci. + // int ex_start = handler->start(); int ex_end = handler->limit(); - eb->set_exception_range(ex_start, ex_end); // ensure a block at the start of exception range and start of following code (void) make_block_at(ex_start); if (ex_end < _code_size) (void) make_block_at(ex_end); + + if (eb->is_handler()) { + // Extend old handler exception range to cover additional range. + int old_ex_start = eb->ex_start_bci(); + int old_ex_end = eb->ex_limit_bci(); + if (ex_start > old_ex_start) + ex_start = old_ex_start; + if (ex_end < old_ex_end) + ex_end = old_ex_end; + eb->clear_exception_handler(); // Reset exception information + } + eb->set_exception_range(ex_start, ex_end); } } @@ -305,7 +343,7 @@ #endif -ciBlock::ciBlock(ciMethod *method, int index, ciMethodBlocks *mb, int start_bci) : +ciBlock::ciBlock(ciMethod *method, int index, int start_bci) : #ifndef PRODUCT _method(method), #endif @@ -315,9 +353,10 @@ void ciBlock::set_exception_range(int start_bci, int limit_bci) { assert(limit_bci >= start_bci, "valid range"); - assert(is_handler(), "must be handler"); + assert(!is_handler() && _ex_start_bci == -1 && _ex_limit_bci == -1, "must not be handler"); _ex_start_bci = start_bci; _ex_limit_bci = limit_bci; + set_handler(); } #ifndef PRODUCT --- old/hotspot/src/share/vm/ci/ciMethodBlocks.hpp 2009-08-01 04:09:53.338927871 +0100 +++ new/hotspot/src/share/vm/ci/ciMethodBlocks.hpp 2009-08-01 04:09:53.247021173 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciMethodBlocks.hpp 1.5 07/05/05 17:05:14 JVM" #endif /* - * Copyright 2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,8 @@ int num_blocks() { return _num_blocks;} void clear_processed(); + ciBlock *make_dummy_block(); // a block not associated with a bci + #ifndef PRODUCT void dump(); #endif @@ -84,7 +86,7 @@ fall_through_bci = -1 }; - ciBlock(ciMethod *method, int index, ciMethodBlocks *mb, int start_bci); + ciBlock(ciMethod *method, int index, int start_bci); int start_bci() const { return _start_bci; } int limit_bci() const { return _limit_bci; } int control_bci() const { return _control_bci; } @@ -97,7 +99,6 @@ int ex_limit_bci() const { return _ex_limit_bci; } bool contains(int bci) const { return start_bci() <= bci && bci < limit_bci(); } - // flag handling bool processed() const { return (_flags & Processed) != 0; } bool is_handler() const { return (_flags & Handler) != 0; } @@ -113,9 +114,10 @@ void set_does_jsr() { _flags |= DoesJsr; } void clear_does_jsr() { _flags &= ~DoesJsr; } void set_does_ret() { _flags |= DoesRet; } - void clear_does_ret() { _flags |= DoesRet; } + void clear_does_ret() { _flags &= ~DoesRet; } void set_is_ret_target() { _flags |= RetTarget; } void set_has_handler() { _flags |= HasHandler; } + void clear_exception_handler() { _flags &= ~Handler; _ex_start_bci = -1; _ex_limit_bci = -1; } #ifndef PRODUCT ciMethod *method() const { return _method; } void dump(); --- old/hotspot/src/share/vm/ci/ciMethodData.cpp 2009-08-01 04:09:54.256529764 +0100 +++ new/hotspot/src/share/vm/ci/ciMethodData.cpp 2009-08-01 04:09:54.167260329 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciMethodData.cpp 1.29 07/09/28 10:23:22 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,8 @@ // Set an initial hint. Don't use set_hint_di() because // first_di() may be out of bounds if data_size is 0. _hint_di = first_di(); + // Initialize the escape information (to "don't know."); + _eflags = _arg_local = _arg_stack = _arg_returned = 0; } // ------------------------------------------------------------------ @@ -62,6 +64,8 @@ // Set an initial hint. Don't use set_hint_di() because // first_di() may be out of bounds if data_size is 0. _hint_di = first_di(); + // Initialize the escape information (to "don't know."); + _eflags = _arg_local = _arg_stack = _arg_returned = 0; } void ciMethodData::load_data() { @@ -145,6 +149,8 @@ return new ciBranchData(data_layout); case DataLayout::multi_branch_data_tag: return new ciMultiBranchData(data_layout); + case DataLayout::arg_info_data_tag: + return new ciArgInfoData(data_layout); }; } @@ -175,6 +181,9 @@ _saw_free_extra_data = true; // observed an empty slot (common case) return NULL; } + if (dp->tag() == DataLayout::arg_info_data_tag) { + break; // ArgInfoData is at the end of extra data section. + } if (dp->bci() == bci) { assert(dp->tag() == DataLayout::bit_data_tag, "sane"); return new ciBitData(dp); @@ -220,8 +229,14 @@ void ciMethodData::clear_escape_info() { VM_ENTRY_MARK; methodDataOop mdo = get_methodDataOop(); - if (mdo != NULL) + if (mdo != NULL) { mdo->clear_escape_info(); + ArgInfoData *aid = arg_info(); + int arg_count = (aid == NULL) ? 0 : aid->number_of_args(); + for (int i = 0; i < arg_count; i++) { + set_arg_modified(i, 0); + } + } _eflags = _arg_local = _arg_stack = _arg_returned = 0; } @@ -234,6 +249,10 @@ mdo->set_arg_local(_arg_local); mdo->set_arg_stack(_arg_stack); mdo->set_arg_returned(_arg_returned); + int arg_count = mdo->method()->size_of_parameters(); + for (int i = 0; i < arg_count; i++) { + mdo->set_arg_modified(i, arg_modified(i)); + } } } @@ -265,6 +284,14 @@ set_nth_bit(_arg_returned, i); } +void ciMethodData::set_arg_modified(int arg, uint val) { + ArgInfoData *aid = arg_info(); + if (aid == NULL) + return; + assert(arg >= 0 && arg < aid->number_of_args(), "valid argument number"); + aid->set_arg_modified(arg, val); +} + bool ciMethodData::is_arg_local(int i) const { return is_set_nth_bit(_arg_local, i); } @@ -277,6 +304,14 @@ return is_set_nth_bit(_arg_returned, i); } +uint ciMethodData::arg_modified(int arg) const { + ArgInfoData *aid = arg_info(); + if (aid == NULL) + return 0; + assert(arg >= 0 && arg < aid->number_of_args(), "valid argument number"); + return aid->arg_modified(arg); +} + ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { // Get offset within methodDataOop of the data array ByteSize data_offset = methodDataOopDesc::data_offset(); @@ -290,6 +325,18 @@ return in_ByteSize(offset); } +ciArgInfoData *ciMethodData::arg_info() const { + // Should be last, have to skip all traps. + DataLayout* dp = data_layout_at(data_size()); + DataLayout* end = data_layout_at(data_size() + extra_data_size()); + for (; dp < end; dp = methodDataOopDesc::next_extra(dp)) { + if (dp->tag() == DataLayout::arg_info_data_tag) + return new ciArgInfoData(dp); + } + return NULL; +} + + // Implementation of the print method. void ciMethodData::print_impl(outputStream* st) { ciObject::print_impl(st); @@ -308,6 +355,22 @@ st->fill_to(6); data->print_data_on(st); } + st->print_cr("--- Extra data:"); + DataLayout* dp = data_layout_at(data_size()); + DataLayout* end = data_layout_at(data_size() + extra_data_size()); + for (; dp < end; dp = methodDataOopDesc::next_extra(dp)) { + if (dp->tag() == DataLayout::no_tag) continue; + if (dp->tag() == DataLayout::bit_data_tag) { + data = new BitData(dp); + } else { + assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo"); + data = new ciArgInfoData(dp); + dp = end; // ArgInfoData is at the end of extra data section. + } + st->print("%d", dp_to_di(data->dp())); + st->fill_to(6); + data->print_data_on(st); + } } void ciReceiverTypeData::print_receiver_data_on(outputStream* st) { --- old/hotspot/src/share/vm/ci/ciMethodData.hpp 2009-08-01 04:09:55.194469985 +0100 +++ new/hotspot/src/share/vm/ci/ciMethodData.hpp 2009-08-01 04:09:55.116924738 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciMethodData.hpp 1.28 07/09/28 10:23:22 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ class ciBranchData; class ciArrayData; class ciMultiBranchData; +class ciArgInfoData; typedef ProfileData ciProfileData; @@ -124,6 +125,11 @@ ciMultiBranchData(DataLayout* layout) : MultiBranchData(layout) {}; }; +class ciArgInfoData : public ArgInfoData { +public: + ciArgInfoData(DataLayout* layout) : ArgInfoData(layout) {}; +}; + // ciMethodData // // This class represents a methodDataOop in the HotSpot virtual @@ -166,9 +172,9 @@ ciMethodData(); // Accessors - int data_size() { return _data_size; } - int extra_data_size() { return _extra_data_size; } - intptr_t * data() { return _data; } + int data_size() const { return _data_size; } + int extra_data_size() const { return _extra_data_size; } + intptr_t * data() const { return _data; } methodDataOop get_methodDataOop() const { if (handle() == NULL) return NULL; @@ -181,7 +187,7 @@ void print_impl(outputStream* st); - DataLayout* data_layout_at(int data_index) { + DataLayout* data_layout_at(int data_index) const { assert(data_index % sizeof(intptr_t) == 0, "unaligned"); return (DataLayout*) (((address)_data) + data_index); } @@ -210,6 +216,8 @@ // What is the index of the first data entry? int first_di() { return 0; } + ciArgInfoData *arg_info() const; + public: bool is_method_data() { return true; } bool is_empty() { return _state == empty_state; } @@ -273,10 +281,12 @@ void set_arg_local(int i); void set_arg_stack(int i); void set_arg_returned(int i); + void set_arg_modified(int arg, uint val); bool is_arg_local(int i) const; bool is_arg_stack(int i) const; bool is_arg_returned(int i) const; + uint arg_modified(int arg) const; // Code generation helper ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data); --- old/hotspot/src/share/vm/ci/ciObjArray.cpp 2009-08-01 04:09:56.088664020 +0100 +++ new/hotspot/src/share/vm/ci/ciObjArray.cpp 2009-08-01 04:09:56.019557849 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)ciObjArray.cpp 1.1 07/10/23 13:12:37 JVM" -#endif /* - * Copyright 1999-2001 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" --- old/hotspot/src/share/vm/ci/ciObjectFactory.cpp 2009-08-01 04:09:57.746790918 +0100 +++ new/hotspot/src/share/vm/ci/ciObjectFactory.cpp 2009-08-01 04:09:57.652411966 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciObjectFactory.cpp 1.39 07/05/17 15:50:05 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -124,7 +124,7 @@ for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) { BasicType t = (BasicType)i; - if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY) { + if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP) { ciType::_basic_types[t] = new (_arena) ciType(t); init_ident_of(ciType::_basic_types[t]); } --- old/hotspot/src/share/vm/ci/ciTypeFlow.cpp 2009-08-01 04:09:58.655129315 +0100 +++ new/hotspot/src/share/vm/ci/ciTypeFlow.cpp 2009-08-01 04:09:58.561892759 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciTypeFlow.cpp 1.47 07/09/28 10:23:20 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -341,8 +341,10 @@ } _trap_bci = -1; _trap_index = 0; + _def_locals.clear(); } + // ------------------------------------------------------------------ // ciTypeFlow::get_start_state // @@ -738,7 +740,7 @@ void ciTypeFlow::StateVector::do_new(ciBytecodeStream* str) { bool will_link; ciKlass* klass = str->get_klass(will_link); - if (!will_link) { + if (!will_link || str->is_unresolved_klass()) { trap(str, klass, str->get_klass_index()); } else { push_object(klass); @@ -1271,7 +1273,9 @@ } case Bytecodes::_iinc: { - check_int(local(str->get_index())); + int lnum = str->get_index(); + check_int(local(lnum)); + store_to_local(lnum); break; } case Bytecodes::_iload: load_local_int(str->get_index()); break; @@ -1509,6 +1513,46 @@ } #endif + +// ------------------------------------------------------------------ +// ciTypeFlow::SuccIter::next +// +void ciTypeFlow::SuccIter::next() { + int succ_ct = _pred->successors()->length(); + int next = _index + 1; + if (next < succ_ct) { + _index = next; + _succ = _pred->successors()->at(next); + return; + } + for (int i = next - succ_ct; i < _pred->exceptions()->length(); i++) { + // Do not compile any code for unloaded exception types. + // Following compiler passes are responsible for doing this also. + ciInstanceKlass* exception_klass = _pred->exc_klasses()->at(i); + if (exception_klass->is_loaded()) { + _index = next; + _succ = _pred->exceptions()->at(i); + return; + } + next++; + } + _index = -1; + _succ = NULL; +} + +// ------------------------------------------------------------------ +// ciTypeFlow::SuccIter::set_succ +// +void ciTypeFlow::SuccIter::set_succ(Block* succ) { + int succ_ct = _pred->successors()->length(); + if (_index < succ_ct) { + _pred->successors()->at_put(_index, succ); + } else { + int idx = _index - succ_ct; + _pred->exceptions()->at_put(idx, succ); + } +} + // ciTypeFlow::Block // // A basic block. @@ -1529,10 +1573,11 @@ _jsrs = new_jsrs; _next = NULL; _on_work_list = false; - _pre_order = -1; assert(!has_pre_order(), ""); - _private_copy = false; + _backedge_copy = false; + _exception_entry = false; _trap_bci = -1; _trap_index = 0; + df_init(); if (CITraceTypeFlow) { tty->print_cr(">> Created new block"); @@ -1544,55 +1589,13 @@ } // ------------------------------------------------------------------ -// ciTypeFlow::Block::clone_loop_head -// -ciTypeFlow::Block* -ciTypeFlow::Block::clone_loop_head(ciTypeFlow* analyzer, - int branch_bci, - ciTypeFlow::Block* target, - ciTypeFlow::JsrSet* jsrs) { - // Loop optimizations are not performed on Tier1 compiles. Do nothing. - if (analyzer->env()->comp_level() < CompLevel_full_optimization) { - return target; - } - - // The current block ends with a branch. - // - // If the target block appears to be the test-clause of a for loop, and - // it is not too large, and it has not yet been cloned, clone it. - // The pre-existing copy becomes the private clone used only by - // the initial iteration of the loop. (We know we are simulating - // the initial iteration right now, since we have never calculated - // successors before for this block.) - - if (branch_bci <= start() - && (target->limit() - target->start()) <= CICloneLoopTestLimit - && target->private_copy_count() == 0) { - // Setting the private_copy bit ensures that the target block cannot be - // reached by any other paths, such as fall-in from the loop body. - // The private copy will be accessible only on successor lists - // created up to this point. - target->set_private_copy(true); - if (CITraceTypeFlow) { - tty->print(">> Cloning a test-clause block "); - print_value_on(tty); - tty->cr(); - } - // If the target is the current block, then later on a new copy of the - // target block will be created when its bytecodes are reached by - // an alternate path. (This is the case for loops with the loop - // head at the bci-wise bottom of the loop, as with pre-1.4.2 javac.) - // - // Otherwise, duplicate the target block now and use it immediately. - // (The case for loops with the loop head at the bci-wise top of the - // loop, as with 1.4.2 javac.) - // - // In either case, the new copy of the block will remain public. - if (target != this) { - target = analyzer->block_at(branch_bci, jsrs); - } - } - return target; +// ciTypeFlow::Block::df_init +void ciTypeFlow::Block::df_init() { + _pre_order = -1; assert(!has_pre_order(), ""); + _post_order = -1; assert(!has_post_order(), ""); + _loop = NULL; + _irreducible_entry = false; + _rpo_next = NULL; } // ------------------------------------------------------------------ @@ -1645,31 +1648,23 @@ case Bytecodes::_if_icmpgt: case Bytecodes::_if_icmple: case Bytecodes::_if_acmpeq: case Bytecodes::_if_acmpne: case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: - // Our successors are the branch target and the next bci. - branch_bci = str->get_dest(); - clone_loop_head(analyzer, branch_bci, this, jsrs); - _successors = - new (arena) GrowableArray(arena, 2, 0, NULL); - assert(_successors->length() == IF_NOT_TAKEN, ""); - _successors->append(analyzer->block_at(next_bci, jsrs)); - assert(_successors->length() == IF_TAKEN, ""); - _successors->append(analyzer->block_at(branch_bci, jsrs)); - break; - + // Our successors are the branch target and the next bci. + branch_bci = str->get_dest(); + _successors = + new (arena) GrowableArray(arena, 2, 0, NULL); + assert(_successors->length() == IF_NOT_TAKEN, ""); + _successors->append(analyzer->block_at(next_bci, jsrs)); + assert(_successors->length() == IF_TAKEN, ""); + _successors->append(analyzer->block_at(branch_bci, jsrs)); + break; + case Bytecodes::_goto: - branch_bci = str->get_dest(); - _successors = - new (arena) GrowableArray(arena, 1, 0, NULL); - assert(_successors->length() == GOTO_TARGET, ""); - target = analyzer->block_at(branch_bci, jsrs); - // If the target block has not been visited yet, and looks like - // a two-way branch, attempt to clone it if it is a loop head. - if (target->_successors != NULL - && target->_successors->length() == (IF_TAKEN + 1)) { - target = clone_loop_head(analyzer, branch_bci, target, jsrs); - } - _successors->append(target); - break; + branch_bci = str->get_dest(); + _successors = + new (arena) GrowableArray(arena, 1, 0, NULL); + assert(_successors->length() == GOTO_TARGET, ""); + _successors->append(analyzer->block_at(branch_bci, jsrs)); + break; case Bytecodes::_jsr: branch_bci = str->get_dest(); @@ -1804,65 +1799,60 @@ } // ------------------------------------------------------------------ -// ciTypeFlow::Block::is_simpler_than -// -// A relation used to order our work list. We work on a block earlier -// if it has a smaller jsr stack or it occurs earlier in the program -// text. -// -// Note: maybe we should redo this functionality to make blocks -// which correspond to exceptions lower priority. -bool ciTypeFlow::Block::is_simpler_than(ciTypeFlow::Block* other) { - if (other == NULL) { - return true; - } else { - int size1 = _jsrs->size(); - int size2 = other->_jsrs->size(); - if (size1 < size2) { - return true; - } else if (size2 < size1) { - return false; - } else { -#if 0 - if (size1 > 0) { - int r1 = _jsrs->record_at(0)->return_address(); - int r2 = _jsrs->record_at(0)->return_address(); - if (r1 < r2) { - return true; - } else if (r2 < r1) { - return false; - } else { - int e1 = _jsrs->record_at(0)->return_address(); - int e2 = _jsrs->record_at(0)->return_address(); - if (e1 < e2) { - return true; - } else if (e2 < e1) { - return false; - } - } +// ciTypeFlow::Block::set_backedge_copy +// Use this only to make a pre-existing public block into a backedge copy. +void ciTypeFlow::Block::set_backedge_copy(bool z) { + assert(z || (z == is_backedge_copy()), "cannot make a backedge copy public"); + _backedge_copy = z; +} + +// ------------------------------------------------------------------ +// ciTypeFlow::Block::is_clonable_exit +// +// At most 2 normal successors, one of which continues looping, +// and all exceptional successors must exit. +bool ciTypeFlow::Block::is_clonable_exit(ciTypeFlow::Loop* lp) { + int normal_cnt = 0; + int in_loop_cnt = 0; + for (SuccIter iter(this); !iter.done(); iter.next()) { + Block* succ = iter.succ(); + if (iter.is_normal_ctrl()) { + if (++normal_cnt > 2) return false; + if (lp->contains(succ->loop())) { + if (++in_loop_cnt > 1) return false; } -#endif - return (start() <= other->start()); + } else { + if (lp->contains(succ->loop())) return false; } } + return in_loop_cnt == 1; } // ------------------------------------------------------------------ -// ciTypeFlow::Block::set_private_copy -// Use this only to make a pre-existing public block into a private copy. -void ciTypeFlow::Block::set_private_copy(bool z) { - assert(z || (z == is_private_copy()), "cannot make a private copy public"); - _private_copy = z; +// ciTypeFlow::Block::looping_succ +// +ciTypeFlow::Block* ciTypeFlow::Block::looping_succ(ciTypeFlow::Loop* lp) { + assert(successors()->length() <= 2, "at most 2 normal successors"); + for (SuccIter iter(this); !iter.done(); iter.next()) { + Block* succ = iter.succ(); + if (lp->contains(succ->loop())) { + return succ; + } + } + return NULL; } #ifndef PRODUCT // ------------------------------------------------------------------ // ciTypeFlow::Block::print_value_on void ciTypeFlow::Block::print_value_on(outputStream* st) const { - if (has_pre_order()) st->print("#%-2d ", pre_order()); + if (has_pre_order()) st->print("#%-2d ", pre_order()); + if (has_rpo()) st->print("rpo#%-2d ", rpo()); st->print("[%d - %d)", start(), limit()); + if (is_loop_head()) st->print(" lphd"); + if (is_irreducible_entry()) st->print(" irred"); if (_jsrs->size() > 0) { st->print("/"); _jsrs->print_on(st); } - if (is_private_copy()) st->print("/private_copy"); + if (is_backedge_copy()) st->print("/backedge_copy"); } // ------------------------------------------------------------------ @@ -1874,6 +1864,16 @@ st->print_cr(" ==================================================== "); st->print (" "); print_value_on(st); + st->print(" Stored locals: "); def_locals()->print_on(st, outer()->method()->max_locals()); tty->cr(); + if (loop() && loop()->parent() != NULL) { + st->print(" loops:"); + Loop* lp = loop(); + do { + st->print(" %d<-%d", lp->head()->pre_order(),lp->tail()->pre_order()); + if (lp->is_irreducible()) st->print("(ir)"); + lp = lp->parent(); + } while (lp->parent() != NULL); + } st->cr(); _state->print_on(st); if (_successors == NULL) { @@ -1910,6 +1910,21 @@ } #endif +#ifndef PRODUCT +// ------------------------------------------------------------------ +// ciTypeFlow::LocalSet::print_on +void ciTypeFlow::LocalSet::print_on(outputStream* st, int limit) const { + st->print("{"); + for (int i = 0; i < max; i++) { + if (test(i)) st->print(" %d", i); + } + if (limit > max) { + st->print(" %d..%d ", max, limit); + } + st->print(" }"); +} +#endif + // ciTypeFlow // // This is a pass over the bytecodes which computes the following: @@ -1925,12 +1940,11 @@ _max_locals = method->max_locals(); _max_stack = method->max_stack(); _code_size = method->code_size(); + _has_irreducible_entry = false; _osr_bci = osr_bci; _failure_reason = NULL; assert(start_bci() >= 0 && start_bci() < code_size() , "correct osr_bci argument"); - _work_list = NULL; - _next_pre_order = 0; _ciblock_count = _methodBlocks->num_blocks(); _idx_to_blocklist = NEW_ARENA_ARRAY(arena(), GrowableArray*, _ciblock_count); @@ -1952,12 +1966,6 @@ _work_list = next_block->next(); next_block->set_next(NULL); next_block->set_on_work_list(false); - if (!next_block->has_pre_order()) { - // Assign "pre_order" as each new block is taken from the work list. - // This number may be used by following phases to order block visits. - assert(!have_block_count(), "must not have mapped blocks yet") - next_block->set_pre_order(_next_pre_order++); - } return next_block; } @@ -1965,30 +1973,37 @@ // ciTypeFlow::add_to_work_list // // Add a basic block to our work list. +// List is sorted by decreasing postorder sort (same as increasing RPO) void ciTypeFlow::add_to_work_list(ciTypeFlow::Block* block) { assert(!block->is_on_work_list(), "must not already be on work list"); if (CITraceTypeFlow) { - tty->print(">> Adding block%s ", block->has_pre_order() ? " (again)" : ""); + tty->print(">> Adding block "); block->print_value_on(tty); tty->print_cr(" to the work list : "); } block->set_on_work_list(true); - if (block->is_simpler_than(_work_list)) { + + // decreasing post order sort + + Block* prev = NULL; + Block* current = _work_list; + int po = block->post_order(); + while (current != NULL) { + if (!current->has_post_order() || po > current->post_order()) + break; + prev = current; + current = current->next(); + } + if (prev == NULL) { block->set_next(_work_list); _work_list = block; } else { - Block *temp = _work_list; - while (!block->is_simpler_than(temp->next())) { - if (CITraceTypeFlow) { - tty->print("."); - } - temp = temp->next(); - } - block->set_next(temp->next()); - temp->set_next(block); + block->set_next(current); + prev->set_next(block); } + if (CITraceTypeFlow) { tty->cr(); } @@ -2011,7 +2026,7 @@ assert(ciblk->start_bci() == bci, "bad ciBlock boundaries"); Block* block = get_block_for(ciblk->index(), jsrs, option); - assert(block == NULL? (option == no_create): block->is_private_copy() == (option == create_private_copy), "create option consistent with result"); + assert(block == NULL? (option == no_create): block->is_backedge_copy() == (option == create_backedge_copy), "create option consistent with result"); if (CITraceTypeFlow) { if (block != NULL) { @@ -2075,9 +2090,10 @@ } if (block->meet_exception(exception_klass, state)) { - // Block was modified. Add it to the work list. - if (!block->is_on_work_list()) { - add_to_work_list(block); + // Block was modified and has PO. Add it to the work list. + if (block->has_post_order() && + !block->is_on_work_list()) { + add_to_work_list(block); } } } @@ -2094,9 +2110,10 @@ for (int i = 0; i < len; i++) { Block* block = successors->at(i); if (block->meet(state)) { - // Block was modified. Add it to the work list. - if (!block->is_on_work_list()) { - add_to_work_list(block); + // Block was modified and has PO. Add it to the work list. + if (block->has_post_order() && + !block->is_on_work_list()) { + add_to_work_list(block); } } } @@ -2136,6 +2153,110 @@ return true; } +// ------------------------------------------------------------------ +// ciTypeFlow::clone_loop_heads +// +// Clone the loop heads +bool ciTypeFlow::clone_loop_heads(Loop* lp, StateVector* temp_vector, JsrSet* temp_set) { + bool rslt = false; + for (PreorderLoops iter(loop_tree_root()); !iter.done(); iter.next()) { + lp = iter.current(); + Block* head = lp->head(); + if (lp == loop_tree_root() || + lp->is_irreducible() || + !head->is_clonable_exit(lp)) + continue; + + // check not already cloned + if (head->backedge_copy_count() != 0) + continue; + + // check _no_ shared head below us + Loop* ch; + for (ch = lp->child(); ch != NULL && ch->head() != head; ch = ch->sibling()); + if (ch != NULL) + continue; + + // Clone head + Block* new_head = head->looping_succ(lp); + Block* clone = clone_loop_head(lp, temp_vector, temp_set); + // Update lp's info + clone->set_loop(lp); + lp->set_head(new_head); + lp->set_tail(clone); + // And move original head into outer loop + head->set_loop(lp->parent()); + + rslt = true; + } + return rslt; +} + +// ------------------------------------------------------------------ +// ciTypeFlow::clone_loop_head +// +// Clone lp's head and replace tail's successors with clone. +// +// | +// v +// head <-> body +// | +// v +// exit +// +// new_head +// +// | +// v +// head ----------\ +// | | +// | v +// | clone <-> body +// | | +// | /--/ +// | | +// v v +// exit +// +ciTypeFlow::Block* ciTypeFlow::clone_loop_head(Loop* lp, StateVector* temp_vector, JsrSet* temp_set) { + Block* head = lp->head(); + Block* tail = lp->tail(); + if (CITraceTypeFlow) { + tty->print(">> Requesting clone of loop head "); head->print_value_on(tty); + tty->print(" for predecessor "); tail->print_value_on(tty); + tty->cr(); + } + Block* clone = block_at(head->start(), head->jsrs(), create_backedge_copy); + assert(clone->backedge_copy_count() == 1, "one backedge copy for all back edges"); + + assert(!clone->has_pre_order(), "just created"); + clone->set_next_pre_order(); + + // Insert clone after (orig) tail in reverse post order + clone->set_rpo_next(tail->rpo_next()); + tail->set_rpo_next(clone); + + // tail->head becomes tail->clone + for (SuccIter iter(tail); !iter.done(); iter.next()) { + if (iter.succ() == head) { + iter.set_succ(clone); + } + } + flow_block(tail, temp_vector, temp_set); + if (head == tail) { + // For self-loops, clone->head becomes clone->clone + flow_block(clone, temp_vector, temp_set); + for (SuccIter iter(clone); !iter.done(); iter.next()) { + if (iter.succ() == head) { + iter.set_succ(clone); + break; + } + } + } + flow_block(clone, temp_vector, temp_set); + + return clone; +} // ------------------------------------------------------------------ // ciTypeFlow::flow_block @@ -2162,11 +2283,14 @@ // Grab the state from the current block. block->copy_state_into(state); + state->def_locals()->clear(); GrowableArray* exceptions = block->exceptions(); GrowableArray* exc_klasses = block->exc_klasses(); bool has_exceptions = exceptions->length() > 0; + bool exceptions_used = false; + ciBytecodeStream str(method()); str.reset_to_bci(start); Bytecodes::Code code; @@ -2175,6 +2299,7 @@ // Check for exceptional control flow from this point. if (has_exceptions && can_trap(str)) { flow_exceptions(exceptions, exc_klasses, state); + exceptions_used = true; } // Apply the effects of the current bytecode to our state. bool res = state->apply_one_bytecode(&str); @@ -2192,9 +2317,14 @@ block->print_on(tty); } + // Save set of locals defined in this block + block->def_locals()->add(state->def_locals()); + // Record (no) successors. block->successors(&str, state, jsrs); + assert(!has_exceptions || exceptions_used, "Not removing exceptions"); + // Discontinue interpretation of this Block. return; } @@ -2205,6 +2335,7 @@ // Check for exceptional control flow from this point. if (has_exceptions && can_trap(str)) { flow_exceptions(exceptions, exc_klasses, state); + exceptions_used = true; } // Fix the JsrSet to reflect effect of the bytecode. @@ -2221,11 +2352,306 @@ successors = block->successors(&str, NULL, NULL); } + // Save set of locals defined in this block + block->def_locals()->add(state->def_locals()); + + // Remove untaken exception paths + if (!exceptions_used) + exceptions->clear(); + // Pass our state to successors. flow_successors(successors, state); } // ------------------------------------------------------------------ +// ciTypeFlow::PostOrderLoops::next +// +// Advance to next loop tree using a postorder, left-to-right traversal. +void ciTypeFlow::PostorderLoops::next() { + assert(!done(), "must not be done."); + if (_current->sibling() != NULL) { + _current = _current->sibling(); + while (_current->child() != NULL) { + _current = _current->child(); + } + } else { + _current = _current->parent(); + } +} + +// ------------------------------------------------------------------ +// ciTypeFlow::PreOrderLoops::next +// +// Advance to next loop tree using a preorder, left-to-right traversal. +void ciTypeFlow::PreorderLoops::next() { + assert(!done(), "must not be done."); + if (_current->child() != NULL) { + _current = _current->child(); + } else if (_current->sibling() != NULL) { + _current = _current->sibling(); + } else { + while (_current != _root && _current->sibling() == NULL) { + _current = _current->parent(); + } + if (_current == _root) { + _current = NULL; + assert(done(), "must be done."); + } else { + assert(_current->sibling() != NULL, "must be more to do"); + _current = _current->sibling(); + } + } +} + +// ------------------------------------------------------------------ +// ciTypeFlow::Loop::sorted_merge +// +// Merge the branch lp into this branch, sorting on the loop head +// pre_orders. Returns the leaf of the merged branch. +// Child and sibling pointers will be setup later. +// Sort is (looking from leaf towards the root) +// descending on primary key: loop head's pre_order, and +// ascending on secondary key: loop tail's pre_order. +ciTypeFlow::Loop* ciTypeFlow::Loop::sorted_merge(Loop* lp) { + Loop* leaf = this; + Loop* prev = NULL; + Loop* current = leaf; + while (lp != NULL) { + int lp_pre_order = lp->head()->pre_order(); + // Find insertion point for "lp" + while (current != NULL) { + if (current == lp) + return leaf; // Already in list + if (current->head()->pre_order() < lp_pre_order) + break; + if (current->head()->pre_order() == lp_pre_order && + current->tail()->pre_order() > lp->tail()->pre_order()) { + break; + } + prev = current; + current = current->parent(); + } + Loop* next_lp = lp->parent(); // Save future list of items to insert + // Insert lp before current + lp->set_parent(current); + if (prev != NULL) { + prev->set_parent(lp); + } else { + leaf = lp; + } + prev = lp; // Inserted item is new prev[ious] + lp = next_lp; // Next item to insert + } + return leaf; +} + +// ------------------------------------------------------------------ +// ciTypeFlow::build_loop_tree +// +// Incrementally build loop tree. +void ciTypeFlow::build_loop_tree(Block* blk) { + assert(!blk->is_post_visited(), "precondition"); + Loop* innermost = NULL; // merge of loop tree branches over all successors + + for (SuccIter iter(blk); !iter.done(); iter.next()) { + Loop* lp = NULL; + Block* succ = iter.succ(); + if (!succ->is_post_visited()) { + // Found backedge since predecessor post visited, but successor is not + assert(succ->pre_order() <= blk->pre_order(), "should be backedge"); + + // Create a LoopNode to mark this loop. + lp = new (arena()) Loop(succ, blk); + if (succ->loop() == NULL) + succ->set_loop(lp); + // succ->loop will be updated to innermost loop on a later call, when blk==succ + + } else { // Nested loop + lp = succ->loop(); + + // If succ is loop head, find outer loop. + while (lp != NULL && lp->head() == succ) { + lp = lp->parent(); + } + if (lp == NULL) { + // Infinite loop, it's parent is the root + lp = loop_tree_root(); + } + } + + // Check for irreducible loop. + // Successor has already been visited. If the successor's loop head + // has already been post-visited, then this is another entry into the loop. + while (lp->head()->is_post_visited() && lp != loop_tree_root()) { + _has_irreducible_entry = true; + lp->set_irreducible(succ); + if (!succ->is_on_work_list()) { + // Assume irreducible entries need more data flow + add_to_work_list(succ); + } + lp = lp->parent(); + assert(lp != NULL, "nested loop must have parent by now"); + } + + // Merge loop tree branch for all successors. + innermost = innermost == NULL ? lp : innermost->sorted_merge(lp); + + } // end loop + + if (innermost == NULL) { + assert(blk->successors()->length() == 0, "CFG exit"); + blk->set_loop(loop_tree_root()); + } else if (innermost->head() == blk) { + // If loop header, complete the tree pointers + if (blk->loop() != innermost) { +#if ASSERT + assert(blk->loop()->head() == innermost->head(), "same head"); + Loop* dl; + for (dl = innermost; dl != NULL && dl != blk->loop(); dl = dl->parent()); + assert(dl == blk->loop(), "blk->loop() already in innermost list"); +#endif + blk->set_loop(innermost); + } + innermost->def_locals()->add(blk->def_locals()); + Loop* l = innermost; + Loop* p = l->parent(); + while (p && l->head() == blk) { + l->set_sibling(p->child()); // Put self on parents 'next child' + p->set_child(l); // Make self the first child of parent + p->def_locals()->add(l->def_locals()); + l = p; // Walk up the parent chain + p = l->parent(); + } + } else { + blk->set_loop(innermost); + innermost->def_locals()->add(blk->def_locals()); + } +} + +// ------------------------------------------------------------------ +// ciTypeFlow::Loop::contains +// +// Returns true if lp is nested loop. +bool ciTypeFlow::Loop::contains(ciTypeFlow::Loop* lp) const { + assert(lp != NULL, ""); + if (this == lp || head() == lp->head()) return true; + int depth1 = depth(); + int depth2 = lp->depth(); + if (depth1 > depth2) + return false; + while (depth1 < depth2) { + depth2--; + lp = lp->parent(); + } + return this == lp; +} + +// ------------------------------------------------------------------ +// ciTypeFlow::Loop::depth +// +// Loop depth +int ciTypeFlow::Loop::depth() const { + int dp = 0; + for (Loop* lp = this->parent(); lp != NULL; lp = lp->parent()) + dp++; + return dp; +} + +#ifndef PRODUCT +// ------------------------------------------------------------------ +// ciTypeFlow::Loop::print +void ciTypeFlow::Loop::print(outputStream* st, int indent) const { + for (int i = 0; i < indent; i++) st->print(" "); + st->print("%d<-%d %s", + is_root() ? 0 : this->head()->pre_order(), + is_root() ? 0 : this->tail()->pre_order(), + is_irreducible()?" irr":""); + st->print(" defs: "); + def_locals()->print_on(st, _head->outer()->method()->max_locals()); + st->cr(); + for (Loop* ch = child(); ch != NULL; ch = ch->sibling()) + ch->print(st, indent+2); +} +#endif + +// ------------------------------------------------------------------ +// ciTypeFlow::df_flow_types +// +// Perform the depth first type flow analysis. Helper for flow_types. +void ciTypeFlow::df_flow_types(Block* start, + bool do_flow, + StateVector* temp_vector, + JsrSet* temp_set) { + int dft_len = 100; + GrowableArray stk(arena(), dft_len, 0, NULL); + + ciBlock* dummy = _methodBlocks->make_dummy_block(); + JsrSet* root_set = new JsrSet(NULL, 0); + Block* root_head = new (arena()) Block(this, dummy, root_set); + Block* root_tail = new (arena()) Block(this, dummy, root_set); + root_head->set_pre_order(0); + root_head->set_post_order(0); + root_tail->set_pre_order(max_jint); + root_tail->set_post_order(max_jint); + set_loop_tree_root(new (arena()) Loop(root_head, root_tail)); + + stk.push(start); + + _next_pre_order = 0; // initialize pre_order counter + _rpo_list = NULL; + int next_po = 0; // initialize post_order counter + + // Compute RPO and the control flow graph + int size; + while ((size = stk.length()) > 0) { + Block* blk = stk.top(); // Leave node on stack + if (!blk->is_visited()) { + // forward arc in graph + assert (!blk->has_pre_order(), ""); + blk->set_next_pre_order(); + + if (_next_pre_order >= MaxNodeLimit / 2) { + // Too many basic blocks. Bail out. + // This can happen when try/finally constructs are nested to depth N, + // and there is O(2**N) cloning of jsr bodies. See bug 4697245! + // "MaxNodeLimit / 2" is used because probably the parser will + // generate at least twice that many nodes and bail out. + record_failure("too many basic blocks"); + return; + } + if (do_flow) { + flow_block(blk, temp_vector, temp_set); + if (failing()) return; // Watch for bailouts. + } + } else if (!blk->is_post_visited()) { + // cross or back arc + for (SuccIter iter(blk); !iter.done(); iter.next()) { + Block* succ = iter.succ(); + if (!succ->is_visited()) { + stk.push(succ); + } + } + if (stk.length() == size) { + // There were no additional children, post visit node now + stk.pop(); // Remove node from stack + + build_loop_tree(blk); + blk->set_post_order(next_po++); // Assign post order + prepend_to_rpo_list(blk); + assert(blk->is_post_visited(), ""); + + if (blk->is_loop_head() && !blk->is_on_work_list()) { + // Assume loop heads need more data flow + add_to_work_list(blk); + } + } + } else { + stk.pop(); // Remove post-visited node from stack + } + } +} + +// ------------------------------------------------------------------ // ciTypeFlow::flow_types // // Perform the type flow analysis, creating and cloning Blocks as @@ -2236,92 +2662,94 @@ JsrSet* temp_set = new JsrSet(NULL, 16); // Create the method entry block. - Block* block = block_at(start_bci(), temp_set); - block->set_pre_order(_next_pre_order++); - assert(block->is_start(), "start block must have order #0"); + Block* start = block_at(start_bci(), temp_set); // Load the initial state into it. const StateVector* start_state = get_start_state(); if (failing()) return; - block->meet(start_state); - add_to_work_list(block); + start->meet(start_state); - // Trickle away. - while (!work_list_empty()) { - Block* block = work_list_next(); - flow_block(block, temp_vector, temp_set); + // Depth first visit + df_flow_types(start, true /*do flow*/, temp_vector, temp_set); + if (failing()) return; + assert(_rpo_list == start, "must be start"); - // NodeCountCutoff is the number of nodes at which the parser - // will bail out. Probably if we already have lots of BBs, - // the parser will generate at least twice that many nodes and bail out. - // Therefore, this is a conservatively large limit at which to - // bail out in the pre-parse typeflow pass. - int block_limit = MaxNodeLimit / 2; - - if (_next_pre_order >= block_limit) { - // Too many basic blocks. Bail out. - // - // This can happen when try/finally constructs are nested to depth N, - // and there is O(2**N) cloning of jsr bodies. See bug 4697245! - record_failure("too many basic blocks"); - return; + // Any loops found? + if (loop_tree_root()->child() != NULL && + env()->comp_level() >= CompLevel_full_optimization) { + // Loop optimizations are not performed on Tier1 compiles. + + bool changed = clone_loop_heads(loop_tree_root(), temp_vector, temp_set); + + // If some loop heads were cloned, recompute postorder and loop tree + if (changed) { + loop_tree_root()->set_child(NULL); + for (Block* blk = _rpo_list; blk != NULL;) { + Block* next = blk->rpo_next(); + blk->df_init(); + blk = next; + } + df_flow_types(start, false /*no flow*/, temp_vector, temp_set); } + } - // Watch for bailouts. - if (failing()) return; + if (CITraceTypeFlow) { + tty->print_cr("\nLoop tree"); + loop_tree_root()->print(); + } + + // Continue flow analysis until fixed point reached + + debug_only(int max_block = _next_pre_order;) + + while (!work_list_empty()) { + Block* blk = work_list_next(); + assert (blk->has_post_order(), "post order assigned above"); + + flow_block(blk, temp_vector, temp_set); + + assert (max_block == _next_pre_order, "no new blocks"); + assert (!failing(), "no more bailouts"); } } // ------------------------------------------------------------------ // ciTypeFlow::map_blocks // -// Create the block map, which indexes blocks in pre_order. +// Create the block map, which indexes blocks in reverse post-order. void ciTypeFlow::map_blocks() { assert(_block_map == NULL, "single initialization"); - int pre_order_limit = _next_pre_order; - _block_map = NEW_ARENA_ARRAY(arena(), Block*, pre_order_limit); - assert(pre_order_limit == block_count(), ""); - int po; - for (po = 0; po < pre_order_limit; po++) { - debug_only(_block_map[po] = NULL); - } - ciMethodBlocks *mblks = _methodBlocks; - ciBlock* current = NULL; - int limit_bci = code_size(); - for (int bci = 0; bci < limit_bci; bci++) { - ciBlock* ciblk = mblks->block_containing(bci); - if (ciblk != NULL && ciblk != current) { - current = ciblk; - int curidx = ciblk->index(); - int block_count = (_idx_to_blocklist[curidx] == NULL) ? 0 : _idx_to_blocklist[curidx]->length(); - for (int i = 0; i < block_count; i++) { - Block* block = _idx_to_blocklist[curidx]->at(i); - if (!block->has_pre_order()) continue; - int po = block->pre_order(); - assert(_block_map[po] == NULL, "unique ref to block"); - assert(0 <= po && po < pre_order_limit, ""); - _block_map[po] = block; - } - } - } - for (po = 0; po < pre_order_limit; po++) { - assert(_block_map[po] != NULL, "must not drop any blocks"); - Block* block = _block_map[po]; + int block_ct = _next_pre_order; + _block_map = NEW_ARENA_ARRAY(arena(), Block*, block_ct); + assert(block_ct == block_count(), ""); + + Block* blk = _rpo_list; + for (int m = 0; m < block_ct; m++) { + int rpo = blk->rpo(); + assert(rpo == m, "should be sequential"); + _block_map[rpo] = blk; + blk = blk->rpo_next(); + } + assert(blk == NULL, "should be done"); + + for (int j = 0; j < block_ct; j++) { + assert(_block_map[j] != NULL, "must not drop any blocks"); + Block* block = _block_map[j]; // Remove dead blocks from successor lists: for (int e = 0; e <= 1; e++) { GrowableArray* l = e? block->exceptions(): block->successors(); - for (int i = 0; i < l->length(); i++) { - Block* s = l->at(i); - if (!s->has_pre_order()) { - if (CITraceTypeFlow) { - tty->print("Removing dead %s successor of #%d: ", (e? "exceptional": "normal"), block->pre_order()); - s->print_value_on(tty); - tty->cr(); - } - l->remove(s); - --i; - } + for (int k = 0; k < l->length(); k++) { + Block* s = l->at(k); + if (!s->has_post_order()) { + if (CITraceTypeFlow) { + tty->print("Removing dead %s successor of #%d: ", (e? "exceptional": "normal"), block->pre_order()); + s->print_value_on(tty); + tty->cr(); + } + l->remove(s); + --k; + } } } } @@ -2332,7 +2760,7 @@ // // Find a block with this ciBlock which has a compatible JsrSet. // If no such block exists, create it, unless the option is no_create. -// If the option is create_private_copy, always create a fresh private copy. +// If the option is create_backedge_copy, always create a fresh backedge copy. ciTypeFlow::Block* ciTypeFlow::get_block_for(int ciBlockIndex, ciTypeFlow::JsrSet* jsrs, CreateOption option) { Arena* a = arena(); GrowableArray* blocks = _idx_to_blocklist[ciBlockIndex]; @@ -2345,12 +2773,12 @@ _idx_to_blocklist[ciBlockIndex] = blocks; } - if (option != create_private_copy) { + if (option != create_backedge_copy) { int len = blocks->length(); for (int i = 0; i < len; i++) { Block* block = blocks->at(i); - if (!block->is_private_copy() && block->is_compatible_with(jsrs)) { - return block; + if (!block->is_backedge_copy() && block->is_compatible_with(jsrs)) { + return block; } } } @@ -2360,15 +2788,15 @@ // We did not find a compatible block. Create one. Block* new_block = new (a) Block(this, _methodBlocks->block(ciBlockIndex), jsrs); - if (option == create_private_copy) new_block->set_private_copy(true); + if (option == create_backedge_copy) new_block->set_backedge_copy(true); blocks->append(new_block); return new_block; } // ------------------------------------------------------------------ -// ciTypeFlow::private_copy_count +// ciTypeFlow::backedge_copy_count // -int ciTypeFlow::private_copy_count(int ciBlockIndex, ciTypeFlow::JsrSet* jsrs) const { +int ciTypeFlow::backedge_copy_count(int ciBlockIndex, ciTypeFlow::JsrSet* jsrs) const { GrowableArray* blocks = _idx_to_blocklist[ciBlockIndex]; if (blocks == NULL) { @@ -2379,7 +2807,7 @@ int len = blocks->length(); for (int i = 0; i < len; i++) { Block* block = blocks->at(i); - if (block->is_private_copy() && block->is_compatible_with(jsrs)) { + if (block->is_backedge_copy() && block->is_compatible_with(jsrs)) { count++; } } @@ -2408,10 +2836,12 @@ if (failing()) { return; } + + map_blocks(); + if (CIPrintTypeFlow || CITraceTypeFlow) { - print_on(tty); + rpo_print_on(tty); } - map_blocks(); } // ------------------------------------------------------------------ @@ -2469,4 +2899,19 @@ st->print_cr("********************************************************"); st->cr(); } + +void ciTypeFlow::rpo_print_on(outputStream* st) const { + st->print_cr("********************************************************"); + st->print ("TypeFlow for "); + method()->name()->print_symbol_on(st); + int limit_bci = code_size(); + st->print_cr(" %d bytes", limit_bci); + for (Block* blk = _rpo_list; blk != NULL; blk = blk->rpo_next()) { + blk->print_on(st); + st->print_cr("--------------------------------------------------------"); + st->cr(); + } + st->print_cr("********************************************************"); + st->cr(); +} #endif --- old/hotspot/src/share/vm/ci/ciTypeFlow.hpp 2009-08-01 04:10:00.047195771 +0100 +++ new/hotspot/src/share/vm/ci/ciTypeFlow.hpp 2009-08-01 04:09:59.962143196 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ciTypeFlow.hpp 1.26 08/11/24 12:20:59 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,11 +37,13 @@ int _max_locals; int _max_stack; int _code_size; + bool _has_irreducible_entry; const char* _failure_reason; public: class StateVector; + class Loop; class Block; // Build a type flow analyzer @@ -58,6 +60,7 @@ int max_stack() const { return _max_stack; } int max_cells() const { return _max_locals + _max_stack; } int code_size() const { return _code_size; } + bool has_irreducible_entry() const { return _has_irreducible_entry; } // Represents information about an "active" jsr call. This // class represents a call to the routine at some entry address @@ -128,6 +131,19 @@ void print_on(outputStream* st) const PRODUCT_RETURN; }; + class LocalSet VALUE_OBJ_CLASS_SPEC { + private: + enum Constants { max = 63 }; + uint64_t _bits; + public: + LocalSet() : _bits(0) {} + void add(uint32_t i) { if (i < (uint32_t)max) _bits |= (1LL << i); } + void add(LocalSet* ls) { _bits |= ls->_bits; } + bool test(uint32_t i) const { return i < (uint32_t)max ? (_bits>>i)&1U : true; } + void clear() { _bits = 0; } + void print_on(outputStream* st, int limit) const PRODUCT_RETURN; + }; + // Used as a combined index for locals and temps enum Cell { Cell_0, Cell_max = INT_MAX @@ -145,6 +161,8 @@ int _trap_bci; int _trap_index; + LocalSet _def_locals; // For entire block + static ciType* type_meet_internal(ciType* t1, ciType* t2, ciTypeFlow* analyzer); public: @@ -184,6 +202,9 @@ int monitor_count() const { return _monitor_count; } void set_monitor_count(int mc) { _monitor_count = mc; } + LocalSet* def_locals() { return &_def_locals; } + const LocalSet* def_locals() const { return &_def_locals; } + static Cell start_cell() { return (Cell)0; } static Cell next_cell(Cell c) { return (Cell)(((int)c) + 1); } Cell limit_cell() const { @@ -253,6 +274,10 @@ return type->basic_type() == T_DOUBLE; } + void store_to_local(int lnum) { + _def_locals.add((uint) lnum); + } + void push_translate(ciType* type); void push_int() { @@ -361,6 +386,7 @@ "must be reference type or return address"); overwrite_local_double_long(index); set_type_at(local(index), type); + store_to_local(index); } void load_local_double(int index) { @@ -379,6 +405,8 @@ overwrite_local_double_long(index); set_type_at(local(index), type); set_type_at(local(index+1), type2); + store_to_local(index); + store_to_local(index+1); } void load_local_float(int index) { @@ -391,6 +419,7 @@ assert(is_float(type), "must be float type"); overwrite_local_double_long(index); set_type_at(local(index), type); + store_to_local(index); } void load_local_int(int index) { @@ -403,6 +432,7 @@ assert(is_int(type), "must be int type"); overwrite_local_double_long(index); set_type_at(local(index), type); + store_to_local(index); } void load_local_long(int index) { @@ -421,6 +451,8 @@ overwrite_local_double_long(index); set_type_at(local(index), type); set_type_at(local(index+1), type2); + store_to_local(index); + store_to_local(index+1); } // Stop interpretation of this path with a trap. @@ -453,13 +485,31 @@ }; // Parameter for "find_block" calls: - // Describes the difference between a public and private copy. + // Describes the difference between a public and backedge copy. enum CreateOption { create_public_copy, - create_private_copy, + create_backedge_copy, no_create }; + // Successor iterator + class SuccIter : public StackObj { + private: + Block* _pred; + int _index; + Block* _succ; + public: + SuccIter() : _pred(NULL), _index(-1), _succ(NULL) {} + SuccIter(Block* pred) : _pred(pred), _index(-1), _succ(NULL) { next(); } + int index() { return _index; } + Block* pred() { return _pred; } // Return predecessor + bool done() { return _index < 0; } // Finished? + Block* succ() { return _succ; } // Return current successor + void next(); // Advance + void set_succ(Block* succ); // Update current successor + bool is_normal_ctrl() { return index() < _pred->successors()->length(); } + }; + // A basic block class Block : public ResourceObj { private: @@ -473,15 +523,24 @@ int _trap_bci; int _trap_index; - // A reasonable approximation to pre-order, provided.to the client. + // pre_order, assigned at first visit. Used as block ID and "visited" tag int _pre_order; - // Has this block been cloned for some special purpose? - bool _private_copy; + // A post-order, used to compute the reverse post order (RPO) provided to the client + int _post_order; // used to compute rpo + + // Has this block been cloned for a loop backedge? + bool _backedge_copy; // A pointer used for our internal work list - Block* _next; - bool _on_work_list; + Block* _next; + bool _on_work_list; // on the work list + Block* _rpo_next; // Reverse post order list + + // Loop info + Loop* _loop; // nearest loop + bool _irreducible_entry; // entry to irreducible loop + bool _exception_entry; // entry to exception handler ciBlock* ciblock() const { return _ciblock; } StateVector* state() const { return _state; } @@ -507,10 +566,11 @@ int start() const { return _ciblock->start_bci(); } int limit() const { return _ciblock->limit_bci(); } int control() const { return _ciblock->control_bci(); } + JsrSet* jsrs() const { return _jsrs; } - bool is_private_copy() const { return _private_copy; } - void set_private_copy(bool z); - int private_copy_count() const { return outer()->private_copy_count(ciblock()->index(), _jsrs); } + bool is_backedge_copy() const { return _backedge_copy; } + void set_backedge_copy(bool z); + int backedge_copy_count() const { return outer()->backedge_copy_count(ciblock()->index(), _jsrs); } // access to entry state int stack_size() const { return _state->stack_size(); } @@ -518,6 +578,20 @@ ciType* local_type_at(int i) const { return _state->local_type_at(i); } ciType* stack_type_at(int i) const { return _state->stack_type_at(i); } + // Data flow on locals + bool is_invariant_local(uint v) const { + assert(is_loop_head(), "only loop heads"); + // Find outermost loop with same loop head + Loop* lp = loop(); + while (lp->parent() != NULL) { + if (lp->parent()->head() != lp->head()) break; + lp = lp->parent(); + } + return !lp->def_locals()->test(v); + } + LocalSet* def_locals() { return _state->def_locals(); } + const LocalSet* def_locals() const { return _state->def_locals(); } + // Get the successors for this Block. GrowableArray* successors(ciBytecodeStream* str, StateVector* state, @@ -527,13 +601,6 @@ return _successors; } - // Helper function for "successors" when making private copies of - // loop heads for C2. - Block * clone_loop_head(ciTypeFlow* analyzer, - int branch_bci, - Block* target, - JsrSet* jsrs); - // Get the exceptional successors for this Block. GrowableArray* exceptions() { if (_exceptions == NULL) { @@ -587,17 +654,126 @@ bool is_on_work_list() const { return _on_work_list; } bool has_pre_order() const { return _pre_order >= 0; } - void set_pre_order(int po) { assert(!has_pre_order() && po >= 0, ""); _pre_order = po; } + void set_pre_order(int po) { assert(!has_pre_order(), ""); _pre_order = po; } int pre_order() const { assert(has_pre_order(), ""); return _pre_order; } + void set_next_pre_order() { set_pre_order(outer()->inc_next_pre_order()); } bool is_start() const { return _pre_order == outer()->start_block_num(); } - // A ranking used in determining order within the work list. - bool is_simpler_than(Block* other); + // Reverse post order + void df_init(); + bool has_post_order() const { return _post_order >= 0; } + void set_post_order(int po) { assert(!has_post_order() && po >= 0, ""); _post_order = po; } + void reset_post_order(int o){ _post_order = o; } + int post_order() const { assert(has_post_order(), ""); return _post_order; } + + bool has_rpo() const { return has_post_order() && outer()->have_block_count(); } + int rpo() const { assert(has_rpo(), ""); return outer()->block_count() - post_order() - 1; } + void set_rpo_next(Block* b) { _rpo_next = b; } + Block* rpo_next() { return _rpo_next; } + + // Loops + Loop* loop() const { return _loop; } + void set_loop(Loop* lp) { _loop = lp; } + bool is_loop_head() const { return _loop && _loop->head() == this; } + void set_irreducible_entry(bool c) { _irreducible_entry = c; } + bool is_irreducible_entry() const { return _irreducible_entry; } + bool is_visited() const { return has_pre_order(); } + bool is_post_visited() const { return has_post_order(); } + bool is_clonable_exit(Loop* lp); + Block* looping_succ(Loop* lp); // Successor inside of loop + bool is_single_entry_loop_head() const { + if (!is_loop_head()) return false; + for (Loop* lp = loop(); lp != NULL && lp->head() == this; lp = lp->parent()) + if (lp->is_irreducible()) return false; + return true; + } void print_value_on(outputStream* st) const PRODUCT_RETURN; void print_on(outputStream* st) const PRODUCT_RETURN; }; + // Loop + class Loop : public ResourceObj { + private: + Loop* _parent; + Loop* _sibling; // List of siblings, null terminated + Loop* _child; // Head of child list threaded thru sibling pointer + Block* _head; // Head of loop + Block* _tail; // Tail of loop + bool _irreducible; + LocalSet _def_locals; + + public: + Loop(Block* head, Block* tail) : + _head(head), _tail(tail), + _parent(NULL), _sibling(NULL), _child(NULL), + _irreducible(false), _def_locals() {} + + Loop* parent() const { return _parent; } + Loop* sibling() const { return _sibling; } + Loop* child() const { return _child; } + Block* head() const { return _head; } + Block* tail() const { return _tail; } + void set_parent(Loop* p) { _parent = p; } + void set_sibling(Loop* s) { _sibling = s; } + void set_child(Loop* c) { _child = c; } + void set_head(Block* hd) { _head = hd; } + void set_tail(Block* tl) { _tail = tl; } + + int depth() const; // nesting depth + + // Returns true if lp is a nested loop or us. + bool contains(Loop* lp) const; + bool contains(Block* blk) const { return contains(blk->loop()); } + + // Data flow on locals + LocalSet* def_locals() { return &_def_locals; } + const LocalSet* def_locals() const { return &_def_locals; } + + // Merge the branch lp into this branch, sorting on the loop head + // pre_orders. Returns the new branch. + Loop* sorted_merge(Loop* lp); + + // Mark non-single entry to loop + void set_irreducible(Block* entry) { + _irreducible = true; + entry->set_irreducible_entry(true); + } + bool is_irreducible() const { return _irreducible; } + + bool is_root() const { return _tail->pre_order() == max_jint; } + + void print(outputStream* st = tty, int indent = 0) const PRODUCT_RETURN; + }; + + // Postorder iteration over the loop tree. + class PostorderLoops : public StackObj { + private: + Loop* _root; + Loop* _current; + public: + PostorderLoops(Loop* root) : _root(root), _current(root) { + while (_current->child() != NULL) { + _current = _current->child(); + } + } + bool done() { return _current == NULL; } // Finished iterating? + void next(); // Advance to next loop + Loop* current() { return _current; } // Return current loop. + }; + + // Preorder iteration over the loop tree. + class PreorderLoops : public StackObj { + private: + Loop* _root; + Loop* _current; + public: + PreorderLoops(Loop* root) : _root(root), _current(root) {} + bool done() { return _current == NULL; } // Finished iterating? + void next(); // Advance to next loop + Loop* current() { return _current; } // Return current loop. + }; + // Standard indexes of successors, for various bytecodes. enum { FALL_THROUGH = 0, // normal control @@ -622,6 +798,12 @@ // Tells if a given instruction is able to generate an exception edge. bool can_trap(ciBytecodeStream& str); + // Clone the loop heads. Returns true if any cloning occurred. + bool clone_loop_heads(Loop* lp, StateVector* temp_vector, JsrSet* temp_set); + + // Clone lp's head and replace tail's successors with clone. + Block* clone_loop_head(Loop* lp, StateVector* temp_vector, JsrSet* temp_set); + public: // Return the block beginning at bci which has a JsrSet compatible // with jsrs. @@ -630,8 +812,8 @@ // block factory Block* get_block_for(int ciBlockIndex, JsrSet* jsrs, CreateOption option = create_public_copy); - // How many of the blocks have the private_copy bit set? - int private_copy_count(int ciBlockIndex, JsrSet* jsrs) const; + // How many of the blocks have the backedge_copy bit set? + int backedge_copy_count(int ciBlockIndex, JsrSet* jsrs) const; // Return an existing block containing bci which has a JsrSet compatible // with jsrs, or NULL if there is none. @@ -654,11 +836,18 @@ return _block_map[po]; } Block* start_block() const { return pre_order_at(start_block_num()); } int start_block_num() const { return 0; } + Block* rpo_at(int rpo) const { assert(0 <= rpo && rpo < block_count(), "out of bounds"); + return _block_map[rpo]; } + int next_pre_order() { return _next_pre_order; } + int inc_next_pre_order() { return _next_pre_order++; } private: // A work list used during flow analysis. Block* _work_list; + // List of blocks in reverse post order + Block* _rpo_list; + // Next Block::_pre_order. After mapping, doubles as block_count. int _next_pre_order; @@ -671,6 +860,15 @@ // Add a basic block to our work list. void add_to_work_list(Block* block); + // Prepend a basic block to rpo list. + void prepend_to_rpo_list(Block* blk) { + blk->set_rpo_next(_rpo_list); + _rpo_list = blk; + } + + // Root of the loop tree + Loop* _loop_tree_root; + // State used for make_jsr_record int _jsr_count; GrowableArray* _jsr_records; @@ -680,6 +878,9 @@ // does not already exist. JsrRecord* make_jsr_record(int entry_address, int return_address); + void set_loop_tree_root(Loop* ltr) { _loop_tree_root = ltr; } + Loop* loop_tree_root() { return _loop_tree_root; } + private: // Get the initial state for start_bci: const StateVector* get_start_state(); @@ -706,6 +907,15 @@ // necessary. void flow_types(); + // Perform the depth first type flow analysis. Helper for flow_types. + void df_flow_types(Block* start, + bool do_flow, + StateVector* temp_vector, + JsrSet* temp_set); + + // Incrementally build loop tree. + void build_loop_tree(Block* blk); + // Create the block map, which indexes blocks in pre_order. void map_blocks(); @@ -714,4 +924,6 @@ void do_flow(); void print_on(outputStream* st) const PRODUCT_RETURN; + + void rpo_print_on(outputStream* st) const PRODUCT_RETURN; }; --- old/hotspot/src/share/vm/classfile/classFileParser.cpp 2009-08-01 04:10:01.082959655 +0100 +++ new/hotspot/src/share/vm/classfile/classFileParser.cpp 2009-08-01 04:10:00.972987965 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)classFileParser.cpp 1.280 07/07/09 11:19:49 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ #define JAVA_CLASSFILE_MAGIC 0xCAFEBABE #define JAVA_MIN_SUPPORTED_VERSION 45 -#define JAVA_MAX_SUPPORTED_VERSION 50 +#define JAVA_MAX_SUPPORTED_VERSION 51 #define JAVA_MAX_SUPPORTED_MINOR_VERSION 0 // Used for two backward compatibility reasons: @@ -47,7 +47,8 @@ // Used for backward compatibility reasons: // - to check for javac bug fixes that happened after 1.5 -#define JAVA_6_VERSION 50 +// - also used as the max version when running in jdk6 +#define JAVA_6_VERSION 50 void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS) { @@ -170,11 +171,23 @@ // Got utf8 string, guarantee utf8_length+1 bytes, set stream position forward. cfs->guarantee_more(utf8_length+1, CHECK); // utf8 string, tag/access_flags cfs->skip_u1_fast(utf8_length); + // Before storing the symbol, make sure it's legal if (_need_verify) { verify_legal_utf8((unsigned char*)utf8_buffer, utf8_length, CHECK); } + if (AnonymousClasses && has_cp_patch_at(index)) { + Handle patch = clear_cp_patch_at(index); + guarantee_property(java_lang_String::is_instance(patch()), + "Illegal utf8 patch at %d in class file %s", + index, CHECK); + char* str = java_lang_String::as_utf8_string(patch()); + // (could use java_lang_String::as_symbol instead, but might as well batch them) + utf8_buffer = (u1*) str; + utf8_length = (int) strlen(str); + } + unsigned int hash; symbolOop result = SymbolTable::lookup_only((char*)utf8_buffer, utf8_length, hash); if (result == NULL) { @@ -247,9 +260,9 @@ int klass_ref_index = cp->klass_ref_index_at(index); int name_and_type_ref_index = cp->name_and_type_ref_index_at(index); check_property(valid_cp_range(klass_ref_index, length) && - cp->tag_at(klass_ref_index).is_klass_reference(), - "Invalid constant pool index %u in class file %s", - klass_ref_index, + is_klass_reference(cp, klass_ref_index), + "Invalid constant pool index %u in class file %s", + klass_ref_index, CHECK_(nullHandle)); check_property(valid_cp_range(name_and_type_ref_index, length) && cp->tag_at(name_and_type_ref_index).is_name_and_type(), @@ -328,16 +341,46 @@ } // end of switch } // end of for + if (_cp_patches != NULL) { + // need to treat this_class specially... + assert(AnonymousClasses, ""); + int this_class_index; + { + cfs->guarantee_more(8, CHECK_(nullHandle)); // flags, this_class, super_class, infs_len + u1* mark = cfs->current(); + u2 flags = cfs->get_u2_fast(); + this_class_index = cfs->get_u2_fast(); + cfs->set_current(mark); // revert to mark + } + + for (index = 1; index < length; index++) { // Index 0 is unused + if (has_cp_patch_at(index)) { + guarantee_property(index != this_class_index, + "Illegal constant pool patch to self at %d in class file %s", + index, CHECK_(nullHandle)); + patch_constant_pool(cp, index, cp_patch_at(index), CHECK_(nullHandle)); + } + } + // Ensure that all the patches have been used. + for (index = 0; index < _cp_patches->length(); index++) { + guarantee_property(!has_cp_patch_at(index), + "Unused constant pool patch at %d in class file %s", + index, CHECK_(nullHandle)); + } + } + if (!_need_verify) { return cp; } // second verification pass - checks the strings are of the right format. + // but not yet to the other entries for (index = 1; index < length; index++) { jbyte tag = cp->tag_at(index).value(); switch (tag) { case JVM_CONSTANT_UnresolvedClass: { symbolHandle class_name(THREAD, cp->unresolved_klass_at(index)); + // check the name, even if _cp_patches will overwrite it verify_legal_class_name(class_name, CHECK_(nullHandle)); break; } @@ -380,6 +423,73 @@ } +void ClassFileParser::patch_constant_pool(constantPoolHandle cp, int index, Handle patch, TRAPS) { + assert(AnonymousClasses, ""); + BasicType patch_type = T_VOID; + switch (cp->tag_at(index).value()) { + + case JVM_CONSTANT_UnresolvedClass : + // Patching a class means pre-resolving it. + // The name in the constant pool is ignored. + if (patch->klass() == SystemDictionary::class_klass()) { // %%% java_lang_Class::is_instance + guarantee_property(!java_lang_Class::is_primitive(patch()), + "Illegal class patch at %d in class file %s", + index, CHECK); + cp->klass_at_put(index, java_lang_Class::as_klassOop(patch())); + } else { + guarantee_property(java_lang_String::is_instance(patch()), + "Illegal class patch at %d in class file %s", + index, CHECK); + symbolHandle name = java_lang_String::as_symbol(patch(), CHECK); + cp->unresolved_klass_at_put(index, name()); + } + break; + + case JVM_CONSTANT_UnresolvedString : + // Patching a string means pre-resolving it. + // The spelling in the constant pool is ignored. + // The constant reference may be any object whatever. + // If it is not a real interned string, the constant is referred + // to as a "pseudo-string", and must be presented to the CP + // explicitly, because it may require scavenging. + cp->pseudo_string_at_put(index, patch()); + break; + + case JVM_CONSTANT_Integer : patch_type = T_INT; goto patch_prim; + case JVM_CONSTANT_Float : patch_type = T_FLOAT; goto patch_prim; + case JVM_CONSTANT_Long : patch_type = T_LONG; goto patch_prim; + case JVM_CONSTANT_Double : patch_type = T_DOUBLE; goto patch_prim; + patch_prim: + { + jvalue value; + BasicType value_type = java_lang_boxing_object::get_value(patch(), &value); + guarantee_property(value_type == patch_type, + "Illegal primitive patch at %d in class file %s", + index, CHECK); + switch (value_type) { + case T_INT: cp->int_at_put(index, value.i); break; + case T_FLOAT: cp->float_at_put(index, value.f); break; + case T_LONG: cp->long_at_put(index, value.j); break; + case T_DOUBLE: cp->double_at_put(index, value.d); break; + default: assert(false, ""); + } + } + break; + + default: + // %%% TODO: put method handles into CONSTANT_InterfaceMethodref, etc. + guarantee_property(!has_cp_patch_at(index), + "Illegal unexpected patch at %d in class file %s", + index, CHECK); + return; + } + + // On fall-through, mark the patch as used. + clear_cp_patch_at(index); +} + + + class NameSigHash: public ResourceObj { public: symbolOop _name; // name @@ -450,25 +560,33 @@ int index; for (index = 0; index < length; index++) { u2 interface_index = cfs->get_u2(CHECK_(nullHandle)); + KlassHandle interf; check_property( - valid_cp_range(interface_index, cp->length()) && - cp->tag_at(interface_index).is_unresolved_klass(), - "Interface name has bad constant pool index %u in class file %s", + valid_cp_range(interface_index, cp->length()) && + is_klass_reference(cp, interface_index), + "Interface name has bad constant pool index %u in class file %s", interface_index, CHECK_(nullHandle)); - symbolHandle unresolved_klass (THREAD, cp->klass_name_at(interface_index)); + if (cp->tag_at(interface_index).is_klass()) { + interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index)); + } else { + symbolHandle unresolved_klass (THREAD, cp->klass_name_at(interface_index)); + + // Don't need to check legal name because it's checked when parsing constant pool. + // But need to make sure it's not an array type. + guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY, + "Bad interface name in class file %s", CHECK_(nullHandle)); + + vmtimer->suspend(); // do not count recursive loading twice + // Call resolve_super so classcircularity is checked + klassOop k = SystemDictionary::resolve_super_or_fail(class_name, + unresolved_klass, class_loader, protection_domain, + false, CHECK_(nullHandle)); + interf = KlassHandle(THREAD, k); + vmtimer->resume(); - // Don't need to check legal name because it's checked when parsing constant pool. - // But need to make sure it's not an array type. - guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY, - "Bad interface name in class file %s", CHECK_(nullHandle)); - - vmtimer->suspend(); // do not count recursive loading twice - // Call resolve_super so classcircularity is checked - klassOop k = SystemDictionary::resolve_super_or_fail(class_name, - unresolved_klass, class_loader, protection_domain, - false, CHECK_(nullHandle)); - KlassHandle interf (THREAD, k); - vmtimer->resume(); + if (LinkWellKnownClasses) // my super type is well known to me + cp->klass_at_put(interface_index, interf()); // eagerly resolve + } if (!Klass::cast(interf())->is_interface()) { THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", nullHandle); @@ -878,9 +996,8 @@ guarantee_property(handler_pc < code_length, "Illegal exception table handler in class file %s", CHECK_(nullHandle)); if (catch_type_index != 0) { - guarantee_property(valid_cp_range(catch_type_index, cp->length()) && - (cp->tag_at(catch_type_index).is_klass() || - cp->tag_at(catch_type_index).is_unresolved_klass()), + guarantee_property(valid_cp_range(catch_type_index, cp->length()) && + is_klass_reference(cp, catch_type_index), "Catch type in exception table has bad constant type in class file %s", CHECK_(nullHandle)); } } @@ -1119,8 +1236,8 @@ } else if (tag == ITEM_Object) { u2 class_index = u2_array[i2++] = cfs->get_u2(CHECK); guarantee_property(valid_cp_range(class_index, cp->length()) && - cp->tag_at(class_index).is_unresolved_klass(), - "Bad class index %u in StackMap in class file %s", + is_klass_reference(cp, class_index), + "Bad class index %u in StackMap in class file %s", class_index, CHECK); } else if (tag == ITEM_Uninitialized) { u2 offset = u2_array[i2++] = cfs->get_u2(CHECK); @@ -1185,8 +1302,8 @@ checked_exception = cfs->get_u2_fast(); check_property( valid_cp_range(checked_exception, cp->length()) && - cp->tag_at(checked_exception).is_klass_reference(), - "Exception name has bad type at constant pool %u in class file %s", + is_klass_reference(cp, checked_exception), + "Exception name has bad type at constant pool %u in class file %s", checked_exception, CHECK_NULL); } } @@ -1362,16 +1479,25 @@ // Parse additional attributes in code attribute cfs->guarantee_more(2, CHECK_(nullHandle)); // code_attributes_count u2 code_attributes_count = cfs->get_u2_fast(); - unsigned int calculated_attribute_length = sizeof(max_stack) + - sizeof(max_locals) + - sizeof(code_length) + - code_length + - sizeof(exception_table_length) + - sizeof(code_attributes_count) + - exception_table_length*(sizeof(u2) /* start_pc */+ - sizeof(u2) /* end_pc */ + - sizeof(u2) /* handler_pc */ + - sizeof(u2) /* catch_type_index */); + + unsigned int calculated_attribute_length = 0; + + if (_major_version > 45 || (_major_version == 45 && _minor_version > 2)) { + calculated_attribute_length = + sizeof(max_stack) + sizeof(max_locals) + sizeof(code_length); + } else { + // max_stack, locals and length are smaller in pre-version 45.2 classes + calculated_attribute_length = sizeof(u1) + sizeof(u1) + sizeof(u2); + } + calculated_attribute_length += + code_length + + sizeof(exception_table_length) + + sizeof(code_attributes_count) + + exception_table_length * + ( sizeof(u2) + // start_pc + sizeof(u2) + // end_pc + sizeof(u2) + // handler_pc + sizeof(u2) ); // catch_type_index while (code_attributes_count--) { cfs->guarantee_more(6, CHECK_(nullHandle)); // code_attribute_name_index, code_attribute_length @@ -1909,18 +2035,18 @@ // Inner class index u2 inner_class_info_index = cfs->get_u2_fast(); check_property( - inner_class_info_index == 0 || - (valid_cp_range(inner_class_info_index, cp_size) && - cp->tag_at(inner_class_info_index).is_klass_reference()), - "inner_class_info_index %u has bad constant type in class file %s", + inner_class_info_index == 0 || + (valid_cp_range(inner_class_info_index, cp_size) && + is_klass_reference(cp, inner_class_info_index)), + "inner_class_info_index %u has bad constant type in class file %s", inner_class_info_index, CHECK_0); // Outer class index u2 outer_class_info_index = cfs->get_u2_fast(); check_property( outer_class_info_index == 0 || (valid_cp_range(outer_class_info_index, cp_size) && - cp->tag_at(outer_class_info_index).is_klass_reference()), - "outer_class_info_index %u has bad constant type in class file %s", + is_klass_reference(cp, outer_class_info_index)), + "outer_class_info_index %u has bad constant type in class file %s", outer_class_info_index, CHECK_0); // Inner class name u2 inner_name_index = cfs->get_u2_fast(); @@ -2081,7 +2207,7 @@ } // Validate the constant pool indices and types if (!cp->is_within_bounds(class_index) || - !cp->tag_at(class_index).is_klass_reference()) { + !is_klass_reference(cp, class_index)) { classfile_parse_error("Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK); } if (method_index != 0 && @@ -2335,13 +2461,14 @@ // Incrementing next_nonstatic_oop_offset here advances the // location where the real java fields are placed. const int extra = java_lang_Class::number_of_fake_oop_fields; - (*next_nonstatic_oop_offset_ptr) += (extra * wordSize); + (*next_nonstatic_oop_offset_ptr) += (extra * heapOopSize); } -instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name, - Handle class_loader, - Handle protection_domain, +instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name, + Handle class_loader, + Handle protection_domain, + GrowableArray* cp_patches, symbolHandle& parsed_name, TRAPS) { // So that JVMTI can cache class file in the state before retransformable agents @@ -2373,6 +2500,7 @@ } } + _cp_patches = cp_patches; instanceKlassHandle nullHandle; @@ -2503,14 +2631,22 @@ CHECK_(nullHandle)); } else { check_property(valid_cp_range(super_class_index, cp_size) && - cp->tag_at(super_class_index).is_unresolved_klass(), - "Invalid superclass index %u in class file %s", + is_klass_reference(cp, super_class_index), + "Invalid superclass index %u in class file %s", super_class_index, CHECK_(nullHandle)); // The class name should be legal because it is checked when parsing constant pool. // However, make sure it is not an array type. + bool is_array = false; + if (cp->tag_at(super_class_index).is_klass()) { + super_klass = instanceKlassHandle(THREAD, cp->resolved_klass_at(super_class_index)); + if (_need_verify) + is_array = super_klass->oop_is_array(); + } else if (_need_verify) { + is_array = (cp->unresolved_klass_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY); + } if (_need_verify) { - guarantee_property(cp->unresolved_klass_at(super_class_index)->byte_at(0) != JVM_SIGNATURE_ARRAY, + guarantee_property(!is_array, "Bad superclass name in class file %s", CHECK_(nullHandle)); } } @@ -2550,7 +2686,7 @@ objArrayHandle methods_default_annotations(THREAD, methods_default_annotations_oop); // We check super class after class file is parsed and format is checked - if (super_class_index > 0) { + if (super_class_index > 0 && super_klass.is_null()) { symbolHandle sk (THREAD, cp->klass_name_at(super_class_index)); if (access_flags.is_interface()) { // Before attempting to resolve the superclass, check for class format @@ -2567,6 +2703,10 @@ CHECK_(nullHandle)); KlassHandle kh (THREAD, k); super_klass = instanceKlassHandle(THREAD, kh()); + if (LinkWellKnownClasses) // my super class is well known to me + cp->klass_at_put(super_class_index, super_klass()); // eagerly resolve + } + if (super_klass.not_null()) { if (super_klass->is_interface()) { ResourceMark rm(THREAD); Exceptions::fthrow( @@ -2637,14 +2777,14 @@ int next_nonstatic_field_offset; // Calculate the starting byte offsets - next_static_oop_offset = (instanceKlass::header_size() + - align_object_offset(vtable_size) + - align_object_offset(itable_size)) * wordSize; - next_static_double_offset = next_static_oop_offset + - (fac.static_oop_count * oopSize); - if ( fac.static_double_count && - (Universe::field_type_should_be_aligned(T_DOUBLE) || - Universe::field_type_should_be_aligned(T_LONG)) ) { + next_static_oop_offset = (instanceKlass::header_size() + + align_object_offset(vtable_size) + + align_object_offset(itable_size)) * wordSize; + next_static_double_offset = next_static_oop_offset + + (fac.static_oop_count * heapOopSize); + if ( fac.static_double_count && + (Universe::field_type_should_be_aligned(T_DOUBLE) || + Universe::field_type_should_be_aligned(T_LONG)) ) { next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong); } @@ -2655,11 +2795,11 @@ next_static_byte_offset = next_static_short_offset + (fac.static_short_count * BytesPerShort); next_static_type_offset = align_size_up((next_static_byte_offset + - fac.static_byte_count ), wordSize ); - static_field_size = (next_static_type_offset - - next_static_oop_offset) / wordSize; - first_nonstatic_field_offset = (instanceOopDesc::header_size() + - nonstatic_field_size) * wordSize; + fac.static_byte_count ), wordSize ); + static_field_size = (next_static_type_offset - + next_static_oop_offset) / wordSize; + first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() + + nonstatic_field_size * heapOopSize; next_nonstatic_field_offset = first_nonstatic_field_offset; // Add fake fields for java.lang.Class instances (also see below) @@ -2681,6 +2821,14 @@ int nonstatic_byte_count = fac.nonstatic_byte_count; int nonstatic_oop_count = fac.nonstatic_oop_count; + bool super_has_nonstatic_fields = + (super_klass() != NULL && super_klass->has_nonstatic_fields()); + bool has_nonstatic_fields = super_has_nonstatic_fields || + ((nonstatic_double_count + nonstatic_word_count + + nonstatic_short_count + nonstatic_byte_count + + nonstatic_oop_count) != 0); + + // Prepare list of oops for oop maps generation. u2* nonstatic_oop_offsets; u2* nonstatic_oop_length; @@ -2697,7 +2845,7 @@ java_lang_Class_fix_post(&next_nonstatic_field_offset); nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset; int fake_oop_count = (( next_nonstatic_field_offset - - first_nonstatic_field_offset ) / oopSize); + first_nonstatic_field_offset ) / heapOopSize); nonstatic_oop_length [0] = (u2)fake_oop_count; nonstatic_oop_map_count = 1; nonstatic_oop_count -= fake_oop_count; @@ -2708,8 +2856,8 @@ #ifndef PRODUCT if( PrintCompactFieldsSavings ) { - next_nonstatic_double_offset = next_nonstatic_field_offset + - (nonstatic_oop_count * oopSize); + next_nonstatic_double_offset = next_nonstatic_field_offset + + (nonstatic_oop_count * heapOopSize); if ( nonstatic_double_count > 0 ) { next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong); } @@ -2720,9 +2868,9 @@ next_nonstatic_byte_offset = next_nonstatic_short_offset + (nonstatic_short_count * BytesPerShort); next_nonstatic_type_offset = align_size_up((next_nonstatic_byte_offset + - nonstatic_byte_count ), wordSize ); - orig_nonstatic_field_size = nonstatic_field_size + - ((next_nonstatic_type_offset - first_nonstatic_field_offset)/wordSize); + nonstatic_byte_count ), heapOopSize ); + orig_nonstatic_field_size = nonstatic_field_size + + ((next_nonstatic_type_offset - first_nonstatic_field_offset)/heapOopSize); } #endif bool compact_fields = CompactFields; @@ -2743,7 +2891,15 @@ class_name() == vmSymbols::java_lang_ref_SoftReference() || class_name() == vmSymbols::java_lang_StackTraceElement() || class_name() == vmSymbols::java_lang_String() || - class_name() == vmSymbols::java_lang_Throwable()) ) { + class_name() == vmSymbols::java_lang_Throwable() || + class_name() == vmSymbols::java_lang_Boolean() || + class_name() == vmSymbols::java_lang_Character() || + class_name() == vmSymbols::java_lang_Float() || + class_name() == vmSymbols::java_lang_Double() || + class_name() == vmSymbols::java_lang_Byte() || + class_name() == vmSymbols::java_lang_Short() || + class_name() == vmSymbols::java_lang_Integer() || + class_name() == vmSymbols::java_lang_Long())) { allocation_style = 0; // Allocate oops first compact_fields = false; // Don't compact fields } @@ -2751,8 +2907,8 @@ if( allocation_style == 0 ) { // Fields order: oops, longs/doubles, ints, shorts/chars, bytes next_nonstatic_oop_offset = next_nonstatic_field_offset; - next_nonstatic_double_offset = next_nonstatic_oop_offset + - (nonstatic_oop_count * oopSize); + next_nonstatic_double_offset = next_nonstatic_oop_offset + + (nonstatic_oop_count * heapOopSize); } else if( allocation_style == 1 ) { // Fields order: longs/doubles, ints, shorts/chars, bytes, oops next_nonstatic_double_offset = next_nonstatic_field_offset; @@ -2798,12 +2954,12 @@ } // Allocate oop field in the gap if there are no other fields for that. nonstatic_oop_space_offset = offset; - if( length >= oopSize && nonstatic_oop_count > 0 && + if( length >= heapOopSize && nonstatic_oop_count > 0 && allocation_style != 0 ) { // when oop fields not first nonstatic_oop_count -= 1; nonstatic_oop_space_count = 1; // Only one will fit - length -= oopSize; - offset += oopSize; + length -= heapOopSize; + offset += heapOopSize; } } } @@ -2821,14 +2977,13 @@ } else { // allocation_style == 1 next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count; if( nonstatic_oop_count > 0 ) { - notaligned_offset = next_nonstatic_oop_offset; - next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, oopSize); + next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize); } - notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * oopSize); + notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize); } - next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize ); + next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize ); nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset - - first_nonstatic_field_offset)/wordSize); + - first_nonstatic_field_offset)/heapOopSize); // Iterate over fields again and compute correct offsets. // The field allocation type was temporarily stored in the offset slot. @@ -2840,7 +2995,7 @@ switch (atype) { case STATIC_OOP: real_offset = next_static_oop_offset; - next_static_oop_offset += oopSize; + next_static_oop_offset += heapOopSize; break; case STATIC_BYTE: real_offset = next_static_byte_offset; @@ -2862,16 +3017,16 @@ case NONSTATIC_OOP: if( nonstatic_oop_space_count > 0 ) { real_offset = nonstatic_oop_space_offset; - nonstatic_oop_space_offset += oopSize; + nonstatic_oop_space_offset += heapOopSize; nonstatic_oop_space_count -= 1; } else { real_offset = next_nonstatic_oop_offset; - next_nonstatic_oop_offset += oopSize; + next_nonstatic_oop_offset += heapOopSize; } // Update oop maps if( nonstatic_oop_map_count > 0 && - nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == - (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) { + nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == + (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) { // Extend current oop map nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1; } else { @@ -2929,9 +3084,10 @@ // Size of instances int instance_size; + next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize ); instance_size = align_object_size(next_nonstatic_type_offset / wordSize); - assert(instance_size == align_object_size(instanceOopDesc::header_size() + nonstatic_field_size), "consistent layout helper value"); + assert(instance_size == align_object_size(align_size_up((instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value"); // Size of non-static oop map blocks (in words) allocated at end of klass int nonstatic_oop_map_size = compute_oop_map_size(super_klass, nonstatic_oop_map_count, first_nonstatic_oop_offset); @@ -2964,7 +3120,8 @@ //this_klass->set_super(super_klass()); this_klass->set_class_loader(class_loader()); this_klass->set_nonstatic_field_size(nonstatic_field_size); - this_klass->set_static_oop_field_size(fac.static_oop_count); + this_klass->set_has_nonstatic_fields(has_nonstatic_fields); + this_klass->set_static_oop_field_size(fac.static_oop_count); cp->set_pool_holder(this_klass()); this_klass->set_constants(cp()); this_klass->set_local_interfaces(local_interfaces()); @@ -2976,6 +3133,8 @@ this_klass->set_method_ordering(method_ordering()); this_klass->set_initial_method_idnum(methods->length()); this_klass->set_name(cp->klass_name_at(this_class_index)); + if (LinkWellKnownClasses) // I am well known to myself + cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve this_klass->set_protection_domain(protection_domain()); this_klass->set_fields_annotations(fields_annotations()); this_klass->set_methods_annotations(methods_annotations()); @@ -3088,13 +3247,15 @@ #ifndef PRODUCT if( PrintCompactFieldsSavings ) { if( nonstatic_field_size < orig_nonstatic_field_size ) { - tty->print("[Saved %d of %3d words in %s]\n", - orig_nonstatic_field_size - nonstatic_field_size, - orig_nonstatic_field_size, this_klass->external_name()); + tty->print("[Saved %d of %d bytes in %s]\n", + (orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize, + orig_nonstatic_field_size*heapOopSize, + this_klass->external_name()); } else if( nonstatic_field_size > orig_nonstatic_field_size ) { - tty->print("[Wasted %d over %3d words in %s]\n", - nonstatic_field_size - orig_nonstatic_field_size, - orig_nonstatic_field_size, this_klass->external_name()); + tty->print("[Wasted %d over %d bytes in %s]\n", + (nonstatic_field_size - orig_nonstatic_field_size)*heapOopSize, + orig_nonstatic_field_size*heapOopSize, + this_klass->external_name()); } } #endif @@ -3122,7 +3283,7 @@ OopMapBlock* first_map = super->start_of_nonstatic_oop_maps(); OopMapBlock* last_map = first_map + map_size - 1; - int next_offset = last_map->offset() + (last_map->length() * oopSize); + int next_offset = last_map->offset() + (last_map->length() * heapOopSize); if (next_offset == first_nonstatic_oop_offset) { // There is no gap bettwen superklass's last oop field and first // local oop field, merge maps. @@ -3482,9 +3643,11 @@ } bool ClassFileParser::is_supported_version(u2 major, u2 minor) { - return (major >= JAVA_MIN_SUPPORTED_VERSION) && - (major <= JAVA_MAX_SUPPORTED_VERSION) && - ((major != JAVA_MAX_SUPPORTED_VERSION) || + u2 max_version = JDK_Version::is_gte_jdk17x_version() ? + JAVA_MAX_SUPPORTED_VERSION : JAVA_6_VERSION; + return (major >= JAVA_MIN_SUPPORTED_VERSION) && + (major <= max_version) && + ((major != max_version) || (minor <= JAVA_MAX_SUPPORTED_MINOR_VERSION)); } --- old/hotspot/src/share/vm/classfile/classFileParser.hpp 2009-08-01 04:10:02.274858351 +0100 +++ new/hotspot/src/share/vm/classfile/classFileParser.hpp 2009-08-01 04:10:02.190968815 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)classFileParser.hpp 1.85 07/07/09 11:19:50 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ u2 _major_version; u2 _minor_version; symbolHandle _class_name; + GrowableArray* _cp_patches; // overrides for CP entries bool _has_finalizer; bool _has_empty_finalizer; @@ -206,6 +207,35 @@ char* skip_over_field_name(char* name, bool slash_ok, unsigned int length); char* skip_over_field_signature(char* signature, bool void_ok, unsigned int length, TRAPS); + bool has_cp_patch_at(int index) { + assert(AnonymousClasses, ""); + assert(index >= 0, "oob"); + return (_cp_patches != NULL + && index < _cp_patches->length() + && _cp_patches->adr_at(index)->not_null()); + } + Handle cp_patch_at(int index) { + assert(has_cp_patch_at(index), "oob"); + return _cp_patches->at(index); + } + Handle clear_cp_patch_at(int index) { + Handle patch = cp_patch_at(index); + _cp_patches->at_put(index, Handle()); + assert(!has_cp_patch_at(index), ""); + return patch; + } + void patch_constant_pool(constantPoolHandle cp, int index, Handle patch, TRAPS); + + // Wrapper for constantTag.is_klass_[or_]reference. + // In older versions of the VM, klassOops cannot sneak into early phases of + // constant pool construction, but in later versions they can. + // %%% Let's phase out the old is_klass_reference. + bool is_klass_reference(constantPoolHandle cp, int index) { + return ((LinkWellKnownClasses || AnonymousClasses) + ? cp->tag_at(index).is_klass_or_reference() + : cp->tag_at(index).is_klass_reference()); + } + public: // Constructor ClassFileParser(ClassFileStream* st) { set_stream(st); } @@ -221,6 +251,14 @@ Handle class_loader, Handle protection_domain, symbolHandle& parsed_name, + TRAPS) { + return parseClassFile(name, class_loader, protection_domain, NULL, parsed_name, THREAD); + } + instanceKlassHandle parseClassFile(symbolHandle name, + Handle class_loader, + Handle protection_domain, + GrowableArray* cp_patches, + symbolHandle& parsed_name, TRAPS); // Verifier checks --- old/hotspot/src/share/vm/classfile/dictionary.cpp 2009-08-01 04:10:03.160141526 +0100 +++ new/hotspot/src/share/vm/classfile/dictionary.cpp 2009-08-01 04:10:03.066271819 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)dictionary.cpp 1.28 08/11/24 12:21:02 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/src/share/vm/classfile/javaClasses.cpp 2009-08-01 04:10:04.022354920 +0100 +++ new/hotspot/src/share/vm/classfile/javaClasses.cpp 2009-08-01 04:10:03.927646680 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)javaClasses.cpp 1.250 08/01/17 09:41:13 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,25 +28,29 @@ # include "incls/_precompiled.incl" # include "incls/_javaClasses.cpp.incl" -// Helpful macro for computing field offsets at run time rather than hardcoding them -#define COMPUTE_OFFSET(klass_name_as_C_str, dest_offset, klass_oop, name_symbol, signature_symbol) \ -{ \ - fieldDescriptor fd; \ - instanceKlass* ik = instanceKlass::cast(klass_oop); \ - if (!ik->find_local_field(name_symbol, signature_symbol, &fd)) { \ - fatal("Invalid layout of " klass_name_as_C_str); \ - } \ - dest_offset = fd.offset(); \ +// Helpful routine for computing field offsets at run time rather than hardcoding them +static void +compute_offset(int &dest_offset, + klassOop klass_oop, symbolOop name_symbol, symbolOop signature_symbol) { + fieldDescriptor fd; + instanceKlass* ik = instanceKlass::cast(klass_oop); + if (!ik->find_local_field(name_symbol, signature_symbol, &fd)) { + ResourceMark rm; + tty->print_cr("Invalid layout of %s at %s", ik->external_name(), name_symbol->as_C_string()); + fatal("Invalid layout of preloaded class"); + } + dest_offset = fd.offset(); } // Same as above but for "optional" offsets that might not be present in certain JDK versions -#define COMPUTE_OPTIONAL_OFFSET(klass_name_as_C_str, dest_offset, klass_oop, name_symbol, signature_symbol) \ -{ \ - fieldDescriptor fd; \ - instanceKlass* ik = instanceKlass::cast(klass_oop); \ - if (ik->find_local_field(name_symbol, signature_symbol, &fd)) { \ - dest_offset = fd.offset(); \ - } \ +static void +compute_optional_offset(int& dest_offset, + klassOop klass_oop, symbolOop name_symbol, symbolOop signature_symbol) { + fieldDescriptor fd; + instanceKlass* ik = instanceKlass::cast(klass_oop); + if (ik->find_local_field(name_symbol, signature_symbol, &fd)) { + dest_offset = fd.offset(); + } } Handle java_lang_String::basic_create(int length, bool tenured, TRAPS) { @@ -146,7 +150,7 @@ jstring js = NULL; { JavaThread* thread = (JavaThread*)THREAD; assert(thread->is_Java_thread(), "must be java thread"); - HandleMark hm(thread); + HandleMark hm(thread); ThreadToNativeFromVM ttn(thread); js = (_to_java_string_fn)(thread->jni_environment(), str); } @@ -174,7 +178,7 @@ JNIEnv *env = thread->jni_environment(); jstring js = (jstring) JNIHandles::make_local(env, java_string()); bool is_copy; - HandleMark hm(thread); + HandleMark hm(thread); ThreadToNativeFromVM ttn(thread); native_platform_string = (_to_platform_string_fn)(env, js, &is_copy); assert(is_copy == JNI_TRUE, "is_copy value changed"); @@ -447,8 +451,8 @@ klassOop k = SystemDictionary::class_klass(); // The classRedefinedCount field is only present starting in 1.5, - // so don't go fatal. - COMPUTE_OPTIONAL_OFFSET("java.lang.Class", classRedefinedCount_offset, + // so don't go fatal. + compute_optional_offset(classRedefinedCount_offset, k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature()); } @@ -502,37 +506,33 @@ assert(_group_offset == 0, "offsets should be initialized only once"); klassOop k = SystemDictionary::thread_klass(); - COMPUTE_OFFSET("java.lang.Thread", _name_offset, k, vmSymbols::name_name(), vmSymbols::char_array_signature()); - COMPUTE_OFFSET("java.lang.Thread", _group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature()); - COMPUTE_OFFSET("java.lang.Thread", _contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature()); - COMPUTE_OFFSET("java.lang.Thread", _inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), vmSymbols::accesscontrolcontext_signature()); - COMPUTE_OFFSET("java.lang.Thread", _priority_offset, k, vmSymbols::priority_name(), vmSymbols::int_signature()); - COMPUTE_OFFSET("java.lang.Thread", _daemon_offset, k, vmSymbols::daemon_name(), vmSymbols::bool_signature()); - COMPUTE_OFFSET("java.lang.Thread", _eetop_offset, k, vmSymbols::eetop_name(), vmSymbols::long_signature()); - COMPUTE_OFFSET("java.lang.Thread", _stillborn_offset, k, vmSymbols::stillborn_name(), vmSymbols::bool_signature()); - // The stackSize field is only present starting in 1.4, so don't go fatal. - COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _stackSize_offset, k, vmSymbols::stackSize_name(), vmSymbols::long_signature()); - // The tid and thread_status fields are only present starting in 1.5, so don't go fatal. - COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _tid_offset, k, vmSymbols::thread_id_name(), vmSymbols::long_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _thread_status_offset, k, vmSymbols::thread_status_name(), vmSymbols::int_signature()); - // The parkBlocker field is only present starting in 1.6, so don't go fatal. - COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_blocker_offset, k, vmSymbols::park_blocker_name(), vmSymbols::object_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_event_offset, k, vmSymbols::park_event_name(), + compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::char_array_signature()); + compute_offset(_group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature()); + compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature()); + compute_offset(_inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), vmSymbols::accesscontrolcontext_signature()); + compute_offset(_priority_offset, k, vmSymbols::priority_name(), vmSymbols::int_signature()); + compute_offset(_daemon_offset, k, vmSymbols::daemon_name(), vmSymbols::bool_signature()); + compute_offset(_eetop_offset, k, vmSymbols::eetop_name(), vmSymbols::long_signature()); + compute_offset(_stillborn_offset, k, vmSymbols::stillborn_name(), vmSymbols::bool_signature()); + // The stackSize field is only present starting in 1.4, so don't go fatal. + compute_optional_offset(_stackSize_offset, k, vmSymbols::stackSize_name(), vmSymbols::long_signature()); + // The tid and thread_status fields are only present starting in 1.5, so don't go fatal. + compute_optional_offset(_tid_offset, k, vmSymbols::thread_id_name(), vmSymbols::long_signature()); + compute_optional_offset(_thread_status_offset, k, vmSymbols::thread_status_name(), vmSymbols::int_signature()); + // The parkBlocker field is only present starting in 1.6, so don't go fatal. + compute_optional_offset(_park_blocker_offset, k, vmSymbols::park_blocker_name(), vmSymbols::object_signature()); + compute_optional_offset(_park_event_offset, k, vmSymbols::park_event_name(), vmSymbols::long_signature()); } JavaThread* java_lang_Thread::thread(oop java_thread) { - return (JavaThread*) java_thread->obj_field(_eetop_offset); + return (JavaThread*)java_thread->address_field(_eetop_offset); } void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) { - // We are storing a JavaThread* (malloc'ed data) into a long field in the thread - // object. The store has to be 64-bit wide so we use a pointer store, but we - // cannot call oopDesc::obj_field_put since it includes a write barrier! - oop* addr = java_thread->obj_field_addr(_eetop_offset); - *addr = (oop) thread; + java_thread->address_field_put(_eetop_offset, (address)thread); } @@ -652,8 +652,8 @@ } oop java_lang_Thread::park_blocker(oop java_thread) { - assert(JDK_Version::supports_thread_park_blocker() && _park_blocker_offset != 0, - "Must support parkBlocker field"); + assert(JDK_Version::current().supports_thread_park_blocker() && + _park_blocker_offset != 0, "Must support parkBlocker field"); if (_park_blocker_offset > 0) { return java_thread->obj_field(_park_blocker_offset); @@ -766,16 +766,16 @@ klassOop k = SystemDictionary::threadGroup_klass(); - COMPUTE_OFFSET("java.lang.ThreadGroup", _parent_offset, k, vmSymbols::parent_name(), vmSymbols::threadgroup_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _threads_offset, k, vmSymbols::threads_name(), vmSymbols::thread_array_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _groups_offset, k, vmSymbols::groups_name(), vmSymbols::threadgroup_array_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _maxPriority_offset, k, vmSymbols::maxPriority_name(), vmSymbols::int_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _destroyed_offset, k, vmSymbols::destroyed_name(), vmSymbols::bool_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _daemon_offset, k, vmSymbols::daemon_name(), vmSymbols::bool_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _vmAllowSuspension_offset, k, vmSymbols::vmAllowSuspension_name(), vmSymbols::bool_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _nthreads_offset, k, vmSymbols::nthreads_name(), vmSymbols::int_signature()); - COMPUTE_OFFSET("java.lang.ThreadGroup", _ngroups_offset, k, vmSymbols::ngroups_name(), vmSymbols::int_signature()); + compute_offset(_parent_offset, k, vmSymbols::parent_name(), vmSymbols::threadgroup_signature()); + compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); + compute_offset(_threads_offset, k, vmSymbols::threads_name(), vmSymbols::thread_array_signature()); + compute_offset(_groups_offset, k, vmSymbols::groups_name(), vmSymbols::threadgroup_array_signature()); + compute_offset(_maxPriority_offset, k, vmSymbols::maxPriority_name(), vmSymbols::int_signature()); + compute_offset(_destroyed_offset, k, vmSymbols::destroyed_name(), vmSymbols::bool_signature()); + compute_offset(_daemon_offset, k, vmSymbols::daemon_name(), vmSymbols::bool_signature()); + compute_offset(_vmAllowSuspension_offset, k, vmSymbols::vmAllowSuspension_name(), vmSymbols::bool_signature()); + compute_offset(_nthreads_offset, k, vmSymbols::nthreads_name(), vmSymbols::int_signature()); + compute_offset(_ngroups_offset, k, vmSymbols::ngroups_name(), vmSymbols::int_signature()); } oop java_lang_Throwable::backtrace(oop throwable) { @@ -1018,7 +1018,6 @@ typeArrayOop _bcis; int _index; bool _dirty; - bool _done; No_Safepoint_Verifier _nsv; public: @@ -1032,20 +1031,18 @@ }; // constructor for new backtrace - BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) { + BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _dirty(false) { expand(CHECK); _backtrace = _head; _index = 0; - _dirty = false; - _done = false; } void flush() { if (_dirty && _methods != NULL) { BarrierSet* bs = Universe::heap()->barrier_set(); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); - bs->write_ref_array(MemRegion((HeapWord*)_methods->obj_at_addr(0), - _methods->length() * HeapWordsPerOop)); + bs->write_ref_array(MemRegion((HeapWord*)_methods->base(), + _methods->array_size())); _dirty = false; } } @@ -1089,8 +1086,9 @@ method = mhandle(); } - // _methods->obj_at_put(_index, method); - *_methods->obj_at_addr(_index) = method; + _methods->obj_at_put(_index, method); + // bad for UseCompressedOops + // *_methods->obj_at_addr(_index) = method; _bcis->ushort_at_put(_index, bci); _index++; _dirty = true; @@ -1370,6 +1368,7 @@ // Allocate java.lang.StackTraceElement instance klassOop k = SystemDictionary::stackTraceElement_klass(); + assert(k != NULL, "must be loaded in 1.4+"); instanceKlassHandle ik (THREAD, k); if (ik->should_be_initialized()) { ik->initialize(CHECK_0); @@ -1407,7 +1406,7 @@ void java_lang_reflect_AccessibleObject::compute_offsets() { klassOop k = SystemDictionary::reflect_accessible_object_klass(); - COMPUTE_OFFSET("java.lang.reflect.AccessibleObject", override_offset, k, vmSymbols::override_name(), vmSymbols::bool_signature()); + compute_offset(override_offset, k, vmSymbols::override_name(), vmSymbols::bool_signature()); } jboolean java_lang_reflect_AccessibleObject::override(oop reflect) { @@ -1422,22 +1421,22 @@ void java_lang_reflect_Method::compute_offsets() { klassOop k = SystemDictionary::reflect_method_klass(); - COMPUTE_OFFSET("java.lang.reflect.Method", clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); - COMPUTE_OFFSET("java.lang.reflect.Method", name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); - COMPUTE_OFFSET("java.lang.reflect.Method", returnType_offset, k, vmSymbols::returnType_name(), vmSymbols::class_signature()); - COMPUTE_OFFSET("java.lang.reflect.Method", parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature()); - COMPUTE_OFFSET("java.lang.reflect.Method", exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature()); - COMPUTE_OFFSET("java.lang.reflect.Method", slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature()); - COMPUTE_OFFSET("java.lang.reflect.Method", modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature()); + compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); + compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); + compute_offset(returnType_offset, k, vmSymbols::returnType_name(), vmSymbols::class_signature()); + compute_offset(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature()); + compute_offset(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature()); + compute_offset(slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature()); + compute_offset(modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature()); // The generic signature and annotations fields are only present in 1.5 signature_offset = -1; annotations_offset = -1; parameter_annotations_offset = -1; annotation_default_offset = -1; - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", annotation_default_offset, k, vmSymbols::annotation_default_name(), vmSymbols::byte_array_signature()); + compute_optional_offset(signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature()); + compute_optional_offset(annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature()); + compute_optional_offset(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature()); + compute_optional_offset(annotation_default_offset, k, vmSymbols::annotation_default_name(), vmSymbols::byte_array_signature()); } Handle java_lang_reflect_Method::create(TRAPS) { @@ -1585,18 +1584,18 @@ void java_lang_reflect_Constructor::compute_offsets() { klassOop k = SystemDictionary::reflect_constructor_klass(); - COMPUTE_OFFSET("java.lang.reflect.Constructor", clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); - COMPUTE_OFFSET("java.lang.reflect.Constructor", parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature()); - COMPUTE_OFFSET("java.lang.reflect.Constructor", exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature()); - COMPUTE_OFFSET("java.lang.reflect.Constructor", slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature()); - COMPUTE_OFFSET("java.lang.reflect.Constructor", modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature()); + compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); + compute_offset(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature()); + compute_offset(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature()); + compute_offset(slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature()); + compute_offset(modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature()); // The generic signature and annotations fields are only present in 1.5 signature_offset = -1; annotations_offset = -1; parameter_annotations_offset = -1; - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature()); + compute_optional_offset(signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature()); + compute_optional_offset(annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature()); + compute_optional_offset(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature()); } Handle java_lang_reflect_Constructor::create(TRAPS) { @@ -1709,16 +1708,16 @@ void java_lang_reflect_Field::compute_offsets() { klassOop k = SystemDictionary::reflect_field_klass(); - COMPUTE_OFFSET("java.lang.reflect.Field", clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); - COMPUTE_OFFSET("java.lang.reflect.Field", name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); - COMPUTE_OFFSET("java.lang.reflect.Field", type_offset, k, vmSymbols::type_name(), vmSymbols::class_signature()); - COMPUTE_OFFSET("java.lang.reflect.Field", slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature()); - COMPUTE_OFFSET("java.lang.reflect.Field", modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature()); + compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); + compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); + compute_offset(type_offset, k, vmSymbols::type_name(), vmSymbols::class_signature()); + compute_offset(slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature()); + compute_offset(modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature()); // The generic signature and annotations fields are only present in 1.5 signature_offset = -1; annotations_offset = -1; - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Field", signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature()); - COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Field", annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature()); + compute_optional_offset(signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature()); + compute_optional_offset(annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature()); } Handle java_lang_reflect_Field::create(TRAPS) { @@ -1818,7 +1817,7 @@ klassOop k = SystemDictionary::reflect_constant_pool_klass(); // This null test can be removed post beta if (k != NULL) { - COMPUTE_OFFSET("sun.reflect.ConstantPool", _cp_oop_offset, k, vmSymbols::constantPoolOop_name(), vmSymbols::object_signature()); + compute_offset(_cp_oop_offset, k, vmSymbols::constantPoolOop_name(), vmSymbols::object_signature()); } } @@ -1848,52 +1847,47 @@ klassOop k = SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass(); // This null test can be removed post beta if (k != NULL) { - COMPUTE_OFFSET("sun.reflect.UnsafeStaticFieldAccessorImpl", _base_offset, k, + compute_offset(_base_offset, k, vmSymbols::base_name(), vmSymbols::object_signature()); } } -oop java_lang_boxing_object::initialize_and_allocate(klassOop k, TRAPS) { - instanceKlassHandle h (THREAD, k); - if (!h->is_initialized()) h->initialize(CHECK_0); - return h->allocate_instance(THREAD); +oop java_lang_boxing_object::initialize_and_allocate(BasicType type, TRAPS) { + klassOop k = SystemDictionary::box_klass(type); + if (k == NULL) return NULL; + instanceKlassHandle h (THREAD, k); + if (!h->is_initialized()) h->initialize(CHECK_0); + return h->allocate_instance(THREAD); } oop java_lang_boxing_object::create(BasicType type, jvalue* value, TRAPS) { - oop box; + oop box = initialize_and_allocate(type, CHECK_0); + if (box == NULL) return NULL; switch (type) { case T_BOOLEAN: - box = initialize_and_allocate(SystemDictionary::boolean_klass(), CHECK_0); box->bool_field_put(value_offset, value->z); break; case T_CHAR: - box = initialize_and_allocate(SystemDictionary::char_klass(), CHECK_0); box->char_field_put(value_offset, value->c); break; case T_FLOAT: - box = initialize_and_allocate(SystemDictionary::float_klass(), CHECK_0); box->float_field_put(value_offset, value->f); break; case T_DOUBLE: - box = initialize_and_allocate(SystemDictionary::double_klass(), CHECK_0); - box->double_field_put(value_offset, value->d); + box->double_field_put(long_value_offset, value->d); break; case T_BYTE: - box = initialize_and_allocate(SystemDictionary::byte_klass(), CHECK_0); box->byte_field_put(value_offset, value->b); break; case T_SHORT: - box = initialize_and_allocate(SystemDictionary::short_klass(), CHECK_0); box->short_field_put(value_offset, value->s); break; case T_INT: - box = initialize_and_allocate(SystemDictionary::int_klass(), CHECK_0); box->int_field_put(value_offset, value->i); break; case T_LONG: - box = initialize_and_allocate(SystemDictionary::long_klass(), CHECK_0); - box->long_field_put(value_offset, value->j); + box->long_field_put(long_value_offset, value->j); break; default: return NULL; @@ -1902,116 +1896,108 @@ } +BasicType java_lang_boxing_object::basic_type(oop box) { + if (box == NULL) return T_ILLEGAL; + BasicType type = SystemDictionary::box_klass_type(box->klass()); + if (type == T_OBJECT) // 'unknown' value returned by SD::bkt + return T_ILLEGAL; + return type; +} + + BasicType java_lang_boxing_object::get_value(oop box, jvalue* value) { - klassOop k = box->klass(); - if (k == SystemDictionary::boolean_klass()) { + BasicType type = SystemDictionary::box_klass_type(box->klass()); + switch (type) { + case T_BOOLEAN: value->z = box->bool_field(value_offset); - return T_BOOLEAN; - } - if (k == SystemDictionary::char_klass()) { + break; + case T_CHAR: value->c = box->char_field(value_offset); - return T_CHAR; - } - if (k == SystemDictionary::float_klass()) { + break; + case T_FLOAT: value->f = box->float_field(value_offset); - return T_FLOAT; - } - if (k == SystemDictionary::double_klass()) { - value->d = box->double_field(value_offset); - return T_DOUBLE; - } - if (k == SystemDictionary::byte_klass()) { + break; + case T_DOUBLE: + value->d = box->double_field(long_value_offset); + break; + case T_BYTE: value->b = box->byte_field(value_offset); - return T_BYTE; - } - if (k == SystemDictionary::short_klass()) { + break; + case T_SHORT: value->s = box->short_field(value_offset); - return T_SHORT; - } - if (k == SystemDictionary::int_klass()) { + break; + case T_INT: value->i = box->int_field(value_offset); - return T_INT; - } - if (k == SystemDictionary::long_klass()) { - value->j = box->long_field(value_offset); - return T_LONG; - } - return T_ILLEGAL; + break; + case T_LONG: + value->j = box->long_field(long_value_offset); + break; + default: + return T_ILLEGAL; + } // end switch + return type; } BasicType java_lang_boxing_object::set_value(oop box, jvalue* value) { - klassOop k = box->klass(); - if (k == SystemDictionary::boolean_klass()) { + BasicType type = SystemDictionary::box_klass_type(box->klass()); + switch (type) { + case T_BOOLEAN: box->bool_field_put(value_offset, value->z); - return T_BOOLEAN; - } - if (k == SystemDictionary::char_klass()) { + break; + case T_CHAR: box->char_field_put(value_offset, value->c); - return T_CHAR; - } - if (k == SystemDictionary::float_klass()) { + break; + case T_FLOAT: box->float_field_put(value_offset, value->f); - return T_FLOAT; - } - if (k == SystemDictionary::double_klass()) { - box->double_field_put(value_offset, value->d); - return T_DOUBLE; - } - if (k == SystemDictionary::byte_klass()) { + break; + case T_DOUBLE: + box->double_field_put(long_value_offset, value->d); + break; + case T_BYTE: box->byte_field_put(value_offset, value->b); - return T_BYTE; - } - if (k == SystemDictionary::short_klass()) { + break; + case T_SHORT: box->short_field_put(value_offset, value->s); - return T_SHORT; - } - if (k == SystemDictionary::int_klass()) { + break; + case T_INT: box->int_field_put(value_offset, value->i); - return T_INT; - } - if (k == SystemDictionary::long_klass()) { - box->long_field_put(value_offset, value->j); - return T_LONG; - } - return T_ILLEGAL; + break; + case T_LONG: + box->long_field_put(long_value_offset, value->j); + break; + default: + return T_ILLEGAL; + } // end switch + return type; } // Support for java_lang_ref_Reference - -void java_lang_ref_Reference::set_referent(oop ref, oop value) { - ref->obj_field_put(referent_offset, value); -} - -oop* java_lang_ref_Reference::referent_addr(oop ref) { - return ref->obj_field_addr(referent_offset); -} - -void java_lang_ref_Reference::set_next(oop ref, oop value) { - ref->obj_field_put(next_offset, value); -} - -oop* java_lang_ref_Reference::next_addr(oop ref) { - return ref->obj_field_addr(next_offset); -} - -void java_lang_ref_Reference::set_discovered(oop ref, oop value) { - ref->obj_field_put(discovered_offset, value); -} - -oop* java_lang_ref_Reference::discovered_addr(oop ref) { - return ref->obj_field_addr(discovered_offset); +oop java_lang_ref_Reference::pending_list_lock() { + instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass()); + char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset); + if (UseCompressedOops) { + return oopDesc::load_decode_heap_oop((narrowOop *)addr); + } else { + return oopDesc::load_decode_heap_oop((oop*)addr); + } } -oop* java_lang_ref_Reference::pending_list_lock_addr() { +HeapWord *java_lang_ref_Reference::pending_list_addr() { instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass()); - return (oop*)(((char *)ik->start_of_static_fields()) + static_lock_offset); + char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset); + // XXX This might not be HeapWord aligned, almost rather be char *. + return (HeapWord*)addr; } -oop* java_lang_ref_Reference::pending_list_addr() { - instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass()); - return (oop *)(((char *)ik->start_of_static_fields()) + static_pending_offset); +oop java_lang_ref_Reference::pending_list() { + char *addr = (char *)pending_list_addr(); + if (UseCompressedOops) { + return oopDesc::load_decode_heap_oop((narrowOop *)addr); + } else { + return oopDesc::load_decode_heap_oop((oop*)addr); + } } @@ -2180,6 +2166,7 @@ int java_lang_reflect_Field::signature_offset; int java_lang_reflect_Field::annotations_offset; int java_lang_boxing_object::value_offset; +int java_lang_boxing_object::long_value_offset; int java_lang_ref_Reference::referent_offset; int java_lang_ref_Reference::queue_offset; int java_lang_ref_Reference::next_offset; @@ -2260,7 +2247,8 @@ void java_nio_Buffer::compute_offsets() { klassOop k = SystemDictionary::java_nio_Buffer_klass(); - COMPUTE_OFFSET("java.nio.Buffer", _limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature()); + assert(k != NULL, "must be loaded in 1.4+"); + compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature()); } // Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate @@ -2274,7 +2262,7 @@ klassOop k = SystemDictionary::sun_misc_AtomicLongCSImpl_klass(); // If this class is not present, its value field offset won't be referenced. if (k != NULL) { - COMPUTE_OFFSET("sun.misc.AtomicLongCSImpl", _value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature()); + compute_offset(_value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature()); } } @@ -2284,7 +2272,7 @@ assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later"); SystemDictionary::load_abstract_ownable_synchronizer_klass(CHECK); klassOop k = SystemDictionary::abstract_ownable_synchronizer_klass(); - COMPUTE_OFFSET("java.util.concurrent.locks.AbstractOwnableSynchronizer", _owner_offset, k, + compute_offset(_owner_offset, k, vmSymbols::exclusive_owner_thread_name(), vmSymbols::thread_signature()); } @@ -2297,8 +2285,8 @@ // Invoked before SystemDictionary::initialize, so pre-loaded classes // are not available to determine the offset_of_static_fields. void JavaClasses::compute_hard_coded_offsets() { - const int x = wordSize; - const int header = instanceOopDesc::header_size_in_bytes(); + const int x = heapOopSize; + const int header = instanceOopDesc::base_offset_in_bytes(); // Do the String Class java_lang_String::value_offset = java_lang_String::hc_value_offset * x + header; @@ -2321,7 +2309,8 @@ java_lang_Throwable::stackTrace_offset = java_lang_Throwable::hc_stackTrace_offset * x + header; // java_lang_boxing_object - java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset * x + header; + java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset + header; + java_lang_boxing_object::long_value_offset = align_size_up((java_lang_boxing_object::hc_value_offset + header), BytesPerLong); // java_lang_ref_Reference: java_lang_ref_Reference::referent_offset = java_lang_ref_Reference::hc_referent_offset * x + header; @@ -2335,7 +2324,7 @@ java_lang_ref_Reference::number_of_fake_oop_fields = 1; // java_lang_ref_SoftReference Class - java_lang_ref_SoftReference::timestamp_offset = java_lang_ref_SoftReference::hc_timestamp_offset * x + header; + java_lang_ref_SoftReference::timestamp_offset = align_size_up((java_lang_ref_SoftReference::hc_timestamp_offset * x + header), BytesPerLong); // Don't multiply static fields because they are always in wordSize units java_lang_ref_SoftReference::static_clock_offset = java_lang_ref_SoftReference::hc_static_clock_offset * x; @@ -2444,6 +2433,36 @@ } +bool JavaClasses::check_constant(const char *klass_name, int hardcoded_constant, const char *field_name, const char* field_sig) { + EXCEPTION_MARK; + fieldDescriptor fd; + symbolHandle klass_sym = oopFactory::new_symbol_handle(klass_name, CATCH); + klassOop k = SystemDictionary::resolve_or_fail(klass_sym, true, CATCH); + instanceKlassHandle h_klass (THREAD, k); + symbolHandle f_name = oopFactory::new_symbol_handle(field_name, CATCH); + symbolHandle f_sig = oopFactory::new_symbol_handle(field_sig, CATCH); + if (!h_klass->find_local_field(f_name(), f_sig(), &fd)) { + tty->print_cr("Static field %s.%s not found", klass_name, field_name); + return false; + } + if (!fd.is_static() || !fd.has_initial_value()) { + tty->print_cr("Static field %s.%s appears to be non-constant", klass_name, field_name); + return false; + } + if (!fd.initial_value_tag().is_int()) { + tty->print_cr("Static field %s.%s is not an int", klass_name, field_name); + return false; + } + jint field_value = fd.int_initial_value(); + if (field_value == hardcoded_constant) { + return true; + } else { + tty->print_cr("Constant value of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_constant, field_value); + return false; + } +} + + // Check the hard-coded field offsets of all the classes in this file void JavaClasses::check_offsets() { @@ -2452,9 +2471,15 @@ #define CHECK_OFFSET(klass_name, cpp_klass_name, field_name, field_sig) \ valid &= check_offset(klass_name, cpp_klass_name :: field_name ## _offset, #field_name, field_sig) +#define CHECK_LONG_OFFSET(klass_name, cpp_klass_name, field_name, field_sig) \ + valid &= check_offset(klass_name, cpp_klass_name :: long_ ## field_name ## _offset, #field_name, field_sig) + #define CHECK_STATIC_OFFSET(klass_name, cpp_klass_name, field_name, field_sig) \ valid &= check_static_offset(klass_name, cpp_klass_name :: static_ ## field_name ## _offset, #field_name, field_sig) +#define CHECK_CONSTANT(klass_name, cpp_klass_name, field_name, field_sig) \ + valid &= check_constant(klass_name, cpp_klass_name :: field_name, #field_name, field_sig) + // java.lang.String CHECK_OFFSET("java/lang/String", java_lang_String, value, "[C"); @@ -2481,11 +2506,11 @@ CHECK_OFFSET("java/lang/Boolean", java_lang_boxing_object, value, "Z"); CHECK_OFFSET("java/lang/Character", java_lang_boxing_object, value, "C"); CHECK_OFFSET("java/lang/Float", java_lang_boxing_object, value, "F"); - CHECK_OFFSET("java/lang/Double", java_lang_boxing_object, value, "D"); + CHECK_LONG_OFFSET("java/lang/Double", java_lang_boxing_object, value, "D"); CHECK_OFFSET("java/lang/Byte", java_lang_boxing_object, value, "B"); CHECK_OFFSET("java/lang/Short", java_lang_boxing_object, value, "S"); CHECK_OFFSET("java/lang/Integer", java_lang_boxing_object, value, "I"); - CHECK_OFFSET("java/lang/Long", java_lang_boxing_object, value, "J"); + CHECK_LONG_OFFSET("java/lang/Long", java_lang_boxing_object, value, "J"); // java.lang.ClassLoader --- old/hotspot/src/share/vm/classfile/javaClasses.hpp 2009-08-01 04:10:05.099289980 +0100 +++ new/hotspot/src/share/vm/classfile/javaClasses.hpp 2009-08-01 04:10:05.007579922 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)javaClasses.hpp 1.158 08/01/17 09:41:12 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,9 +48,9 @@ private: enum { hc_value_offset = 0, - hc_offset_offset = 1, - hc_count_offset = 2, - hc_hash_offset = 3 + hc_offset_offset = 1 + //hc_count_offset = 2 -- not a word-scaled offset + //hc_hash_offset = 3 -- not a word-scaled offset }; static int value_offset; @@ -152,9 +152,12 @@ // Conversion static klassOop as_klassOop(oop java_class); // Testing - static bool is_primitive(oop java_class); - static BasicType primitive_type(oop java_class); - static oop primitive_mirror(BasicType t); + static bool is_instance(oop obj) { + return obj != NULL && obj->klass() == SystemDictionary::class_klass(); + } + static bool is_primitive(oop java_class); + static BasicType primitive_type(oop java_class); + static oop primitive_mirror(BasicType t); // JVM_NewInstance support static methodOop resolved_constructor(oop java_class); static void set_resolved_constructor(oop java_class, methodOop constructor); @@ -652,17 +655,24 @@ enum { hc_value_offset = 0 }; - static int value_offset; + static int value_offset; + static int long_value_offset; - static oop initialize_and_allocate(klassOop klass, TRAPS); + static oop initialize_and_allocate(BasicType type, TRAPS); public: // Allocation. Returns a boxed value, or NULL for invalid type. static oop create(BasicType type, jvalue* value, TRAPS); // Accessors. Returns the basic type being boxed, or T_ILLEGAL for invalid oop. static BasicType get_value(oop box, jvalue* value); static BasicType set_value(oop box, jvalue* value); - - static int value_offset_in_bytes() { return value_offset; } + static BasicType basic_type(oop box); + static bool is_instance(oop box) { return basic_type(box) != T_ILLEGAL; } + static bool is_instance(oop box, BasicType type) { return basic_type(box) == type; } + + static int value_offset_in_bytes(BasicType type) { + return ( type == T_LONG || type == T_DOUBLE ) ? long_value_offset : + value_offset; + } // Debugging friend class JavaClasses; @@ -694,24 +704,47 @@ static int number_of_fake_oop_fields; // Accessors - static oop referent(oop ref) { return *referent_addr(ref); } - static void set_referent(oop ref, oop value); - static oop* referent_addr(oop ref); - - static oop next(oop ref) { return *next_addr(ref); } - static void set_next(oop ref, oop value); - static oop* next_addr(oop ref); - - static oop discovered(oop ref) { return *discovered_addr(ref); } - static void set_discovered(oop ref, oop value); - static oop* discovered_addr(oop ref); - + static oop referent(oop ref) { + return ref->obj_field(referent_offset); + } + static void set_referent(oop ref, oop value) { + ref->obj_field_put(referent_offset, value); + } + static void set_referent_raw(oop ref, oop value) { + ref->obj_field_raw_put(referent_offset, value); + } + static HeapWord* referent_addr(oop ref) { + return ref->obj_field_addr(referent_offset); + } + static oop next(oop ref) { + return ref->obj_field(next_offset); + } + static void set_next(oop ref, oop value) { + ref->obj_field_put(next_offset, value); + } + static void set_next_raw(oop ref, oop value) { + ref->obj_field_raw_put(next_offset, value); + } + static HeapWord* next_addr(oop ref) { + return ref->obj_field_addr(next_offset); + } + static oop discovered(oop ref) { + return ref->obj_field(discovered_offset); + } + static void set_discovered(oop ref, oop value) { + ref->obj_field_put(discovered_offset, value); + } + static void set_discovered_raw(oop ref, oop value) { + ref->obj_field_raw_put(discovered_offset, value); + } + static HeapWord* discovered_addr(oop ref) { + return ref->obj_field_addr(discovered_offset); + } // Accessors for statics - static oop pending_list_lock() { return *pending_list_lock_addr(); } - static oop pending_list() { return *pending_list_addr(); } + static oop pending_list_lock(); + static oop pending_list(); - static oop* pending_list_lock_addr(); - static oop* pending_list_addr(); + static HeapWord* pending_list_addr(); }; @@ -721,7 +754,7 @@ public: enum { // The timestamp is a long field and may need to be adjusted for alignment. - hc_timestamp_offset = align_object_offset_(hc_discovered_offset + 1) + hc_timestamp_offset = hc_discovered_offset + 1 }; enum { hc_static_clock_offset = 0 @@ -901,6 +934,7 @@ private: static bool check_offset(const char *klass_name, int offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0; static bool check_static_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0; + static bool check_constant(const char *klass_name, int constant, const char *field_name, const char* field_sig) PRODUCT_RETURN0; public: static void compute_hard_coded_offsets(); static void compute_offsets(); --- old/hotspot/src/share/vm/classfile/systemDictionary.cpp 2009-08-01 04:10:06.037673223 +0100 +++ new/hotspot/src/share/vm/classfile/systemDictionary.cpp 2009-08-01 04:10:05.941129398 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)systemDictionary.cpp 1.367 08/01/17 09:41:11 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,71 +40,9 @@ oop SystemDictionary::_system_loader_lock_obj = NULL; -klassOop SystemDictionary::_object_klass = NULL; -klassOop SystemDictionary::_string_klass = NULL; -klassOop SystemDictionary::_class_klass = NULL; -klassOop SystemDictionary::_cloneable_klass = NULL; -klassOop SystemDictionary::_classloader_klass = NULL; -klassOop SystemDictionary::_serializable_klass = NULL; -klassOop SystemDictionary::_system_klass = NULL; - -klassOop SystemDictionary::_throwable_klass = NULL; -klassOop SystemDictionary::_error_klass = NULL; -klassOop SystemDictionary::_threaddeath_klass = NULL; -klassOop SystemDictionary::_exception_klass = NULL; -klassOop SystemDictionary::_runtime_exception_klass = NULL; -klassOop SystemDictionary::_classNotFoundException_klass = NULL; -klassOop SystemDictionary::_noClassDefFoundError_klass = NULL; -klassOop SystemDictionary::_linkageError_klass = NULL; -klassOop SystemDictionary::_classCastException_klass = NULL; -klassOop SystemDictionary::_arrayStoreException_klass = NULL; -klassOop SystemDictionary::_virtualMachineError_klass = NULL; -klassOop SystemDictionary::_outOfMemoryError_klass = NULL; -klassOop SystemDictionary::_StackOverflowError_klass = NULL; -klassOop SystemDictionary::_illegalMonitorStateException_klass = NULL; -klassOop SystemDictionary::_protectionDomain_klass = NULL; -klassOop SystemDictionary::_AccessControlContext_klass = NULL; - -klassOop SystemDictionary::_reference_klass = NULL; -klassOop SystemDictionary::_soft_reference_klass = NULL; -klassOop SystemDictionary::_weak_reference_klass = NULL; -klassOop SystemDictionary::_final_reference_klass = NULL; -klassOop SystemDictionary::_phantom_reference_klass = NULL; -klassOop SystemDictionary::_finalizer_klass = NULL; - -klassOop SystemDictionary::_thread_klass = NULL; -klassOop SystemDictionary::_threadGroup_klass = NULL; -klassOop SystemDictionary::_properties_klass = NULL; -klassOop SystemDictionary::_reflect_accessible_object_klass = NULL; -klassOop SystemDictionary::_reflect_field_klass = NULL; -klassOop SystemDictionary::_reflect_method_klass = NULL; -klassOop SystemDictionary::_reflect_constructor_klass = NULL; -klassOop SystemDictionary::_reflect_magic_klass = NULL; -klassOop SystemDictionary::_reflect_method_accessor_klass = NULL; -klassOop SystemDictionary::_reflect_constructor_accessor_klass = NULL; -klassOop SystemDictionary::_reflect_delegating_classloader_klass = NULL; -klassOop SystemDictionary::_reflect_constant_pool_klass = NULL; -klassOop SystemDictionary::_reflect_unsafe_static_field_accessor_impl_klass = NULL; - -klassOop SystemDictionary::_vector_klass = NULL; -klassOop SystemDictionary::_hashtable_klass = NULL; -klassOop SystemDictionary::_stringBuffer_klass = NULL; - -klassOop SystemDictionary::_stackTraceElement_klass = NULL; - -klassOop SystemDictionary::_java_nio_Buffer_klass = NULL; - -klassOop SystemDictionary::_sun_misc_AtomicLongCSImpl_klass = NULL; -klassOop SystemDictionary::_sun_jkernel_DownloadManager_klass = NULL; - -klassOop SystemDictionary::_boolean_klass = NULL; -klassOop SystemDictionary::_char_klass = NULL; -klassOop SystemDictionary::_float_klass = NULL; -klassOop SystemDictionary::_double_klass = NULL; -klassOop SystemDictionary::_byte_klass = NULL; -klassOop SystemDictionary::_short_klass = NULL; -klassOop SystemDictionary::_int_klass = NULL; -klassOop SystemDictionary::_long_klass = NULL; +klassOop SystemDictionary::_well_known_klasses[SystemDictionary::WKID_LIMIT] + = { NULL /*, NULL...*/ }; + klassOop SystemDictionary::_box_klasses[T_VOID+1] = { NULL /*, NULL...*/ }; oop SystemDictionary::_java_system_loader = NULL; @@ -124,10 +62,10 @@ } void SystemDictionary::compute_java_system_loader(TRAPS) { - KlassHandle system_klass(THREAD, _classloader_klass); + KlassHandle system_klass(THREAD, WK_KLASS(classloader_klass)); JavaValue result(T_OBJECT); - JavaCalls::call_static(&result, - KlassHandle(THREAD, _classloader_klass), + JavaCalls::call_static(&result, + KlassHandle(THREAD, WK_KLASS(classloader_klass)), vmSymbolHandles::getSystemClassLoader_name(), vmSymbolHandles::void_classloader_signature(), CHECK); @@ -295,6 +233,15 @@ bool is_superclass, TRAPS) { + // Try to get one of the well-known klasses. + // They are trusted, and do not participate in circularities. + if (LinkWellKnownClasses) { + klassOop k = find_well_known_klass(class_name()); + if (k != NULL) { + return k; + } + } + // Double-check, if child class is already loaded, just return super-class,interface // Don't add a placedholder if already loaded, i.e. already in system dictionary // Make sure there's a placeholder for the *child* before resolving. @@ -922,6 +869,15 @@ TRAPS) { klassOop k = NULL; assert(class_name() != NULL, "class name must be non NULL"); + + // Try to get one of the well-known klasses. + if (LinkWellKnownClasses) { + k = find_well_known_klass(class_name()); + if (k != NULL) { + return k; + } + } + if (FieldType::is_array(class_name())) { // The name refers to an array. Parse the name. jint dimension; @@ -945,6 +901,38 @@ return k; } +// Quick range check for names of well-known classes: +static symbolOop wk_klass_name_limits[2] = {NULL, NULL}; + +#ifndef PRODUCT +static int find_wkk_calls, find_wkk_probes, find_wkk_wins; +// counts for "hello world": 3983, 1616, 1075 +// => 60% hit after limit guard, 25% total win rate +#endif + +klassOop SystemDictionary::find_well_known_klass(symbolOop class_name) { + // A bounds-check on class_name will quickly get a negative result. + NOT_PRODUCT(find_wkk_calls++); + if (class_name >= wk_klass_name_limits[0] && + class_name <= wk_klass_name_limits[1]) { + NOT_PRODUCT(find_wkk_probes++); + vmSymbols::SID sid = vmSymbols::find_sid(class_name); + if (sid != vmSymbols::NO_SID) { + klassOop k = NULL; + switch (sid) { + #define WK_KLASS_CASE(name, symbol, ignore_option) \ + case vmSymbols::VM_SYMBOL_ENUM_NAME(symbol): \ + k = WK_KLASS(name); break; + WK_KLASSES_DO(WK_KLASS_CASE) + #undef WK_KLASS_CASE + } + NOT_PRODUCT(if (k != NULL) find_wkk_wins++); + return k; + } + } + return NULL; +} + // Note: this method is much like resolve_from_stream, but // updates no supplemental data structures. // TODO consolidate the two methods with a helper routine? @@ -952,6 +940,8 @@ Handle class_loader, Handle protection_domain, ClassFileStream* st, + KlassHandle host_klass, + GrowableArray* cp_patches, TRAPS) { symbolHandle parsed_name; @@ -968,10 +958,10 @@ instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, class_loader, protection_domain, + cp_patches, parsed_name, THREAD); - // We don't redefine the class, so we just need to clean up whether there // was an error or not (don't want to modify any system dictionary // data structures). @@ -988,6 +978,30 @@ } } + if (host_klass.not_null() && k.not_null()) { + assert(AnonymousClasses, ""); + // If it's anonymous, initialize it now, since nobody else will. + k->set_host_klass(host_klass()); + + { + MutexLocker mu_r(Compile_lock, THREAD); + + // Add to class hierarchy, initialize vtables, and do possible + // deoptimizations. + add_to_hierarchy(k, CHECK_NULL); // No exception, but can block + + // But, do not add to system dictionary. + } + + k->eager_initialize(THREAD); + + // notify jvmti + if (JvmtiExport::should_post_class_load()) { + assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_load((JavaThread *) THREAD, k()); + } + } + return k(); } @@ -1246,7 +1260,7 @@ if (obj == NULL) { return nk; } Handle h_obj(THREAD, obj); - char* new_class_name = java_lang_String::as_platform_dependent_str(h_obj, + char* new_class_name = java_lang_String::as_platform_dependent_str(h_obj, CHECK_(nk)); // lock the loader @@ -1687,71 +1701,13 @@ void SystemDictionary::preloaded_oops_do(OopClosure* f) { - f->do_oop((oop*) &_string_klass); - f->do_oop((oop*) &_object_klass); - f->do_oop((oop*) &_class_klass); - f->do_oop((oop*) &_cloneable_klass); - f->do_oop((oop*) &_classloader_klass); - f->do_oop((oop*) &_serializable_klass); - f->do_oop((oop*) &_system_klass); - - f->do_oop((oop*) &_throwable_klass); - f->do_oop((oop*) &_error_klass); - f->do_oop((oop*) &_threaddeath_klass); - f->do_oop((oop*) &_exception_klass); - f->do_oop((oop*) &_runtime_exception_klass); - f->do_oop((oop*) &_classNotFoundException_klass); - f->do_oop((oop*) &_noClassDefFoundError_klass); - f->do_oop((oop*) &_linkageError_klass); - f->do_oop((oop*) &_classCastException_klass); - f->do_oop((oop*) &_arrayStoreException_klass); - f->do_oop((oop*) &_virtualMachineError_klass); - f->do_oop((oop*) &_outOfMemoryError_klass); - f->do_oop((oop*) &_StackOverflowError_klass); - f->do_oop((oop*) &_illegalMonitorStateException_klass); - f->do_oop((oop*) &_protectionDomain_klass); - f->do_oop((oop*) &_AccessControlContext_klass); - - f->do_oop((oop*) &_reference_klass); - f->do_oop((oop*) &_soft_reference_klass); - f->do_oop((oop*) &_weak_reference_klass); - f->do_oop((oop*) &_final_reference_klass); - f->do_oop((oop*) &_phantom_reference_klass); - f->do_oop((oop*) &_finalizer_klass); - - f->do_oop((oop*) &_thread_klass); - f->do_oop((oop*) &_threadGroup_klass); - f->do_oop((oop*) &_properties_klass); - f->do_oop((oop*) &_reflect_accessible_object_klass); - f->do_oop((oop*) &_reflect_field_klass); - f->do_oop((oop*) &_reflect_method_klass); - f->do_oop((oop*) &_reflect_constructor_klass); - f->do_oop((oop*) &_reflect_magic_klass); - f->do_oop((oop*) &_reflect_method_accessor_klass); - f->do_oop((oop*) &_reflect_constructor_accessor_klass); - f->do_oop((oop*) &_reflect_delegating_classloader_klass); - f->do_oop((oop*) &_reflect_constant_pool_klass); - f->do_oop((oop*) &_reflect_unsafe_static_field_accessor_impl_klass); - - f->do_oop((oop*) &_stringBuffer_klass); - f->do_oop((oop*) &_vector_klass); - f->do_oop((oop*) &_hashtable_klass); - - f->do_oop((oop*) &_stackTraceElement_klass); - - f->do_oop((oop*) &_java_nio_Buffer_klass); - - f->do_oop((oop*) &_sun_misc_AtomicLongCSImpl_klass); - f->do_oop((oop*) &_sun_jkernel_DownloadManager_klass); - - f->do_oop((oop*) &_boolean_klass); - f->do_oop((oop*) &_char_klass); - f->do_oop((oop*) &_float_klass); - f->do_oop((oop*) &_double_klass); - f->do_oop((oop*) &_byte_klass); - f->do_oop((oop*) &_short_klass); - f->do_oop((oop*) &_int_klass); - f->do_oop((oop*) &_long_klass); + f->do_oop((oop*) &wk_klass_name_limits[0]); + f->do_oop((oop*) &wk_klass_name_limits[1]); + + for (int k = (int)FIRST_WKID; k < (int)WKID_LIMIT; k++) { + f->do_oop((oop*) &_well_known_klasses[k]); + } + { for (int i = 0; i < T_VOID+1; i++) { if (_box_klasses[i] != NULL) { @@ -1844,14 +1800,72 @@ initialize_preloaded_classes(CHECK); } +// Compact table of directions on the initialization of klasses: +static const short wk_init_info[] = { + #define WK_KLASS_INIT_INFO(name, symbol, option) \ + ( ((int)vmSymbols::VM_SYMBOL_ENUM_NAME(symbol) \ + << SystemDictionary::CEIL_LG_OPTION_LIMIT) \ + | (int)SystemDictionary::option ), + WK_KLASSES_DO(WK_KLASS_INIT_INFO) + #undef WK_KLASS_INIT_INFO + 0 +}; + +bool SystemDictionary::initialize_wk_klass(WKID id, int init_opt, TRAPS) { + assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob"); + int info = wk_init_info[id - FIRST_WKID]; + int sid = (info >> CEIL_LG_OPTION_LIMIT); + symbolHandle symbol = vmSymbolHandles::symbol_handle_at((vmSymbols::SID)sid); + klassOop* klassp = &_well_known_klasses[id]; + bool must_load = (init_opt < SystemDictionary::Opt); + bool try_load = true; + if (init_opt == SystemDictionary::Opt_Kernel) { +#ifndef KERNEL + try_load = false; +#endif //KERNEL + } + if ((*klassp) == NULL && try_load) { + if (must_load) { + (*klassp) = resolve_or_fail(symbol, true, CHECK_0); // load required class + } else { + (*klassp) = resolve_or_null(symbol, CHECK_0); // load optional klass + } + } + return ((*klassp) != NULL); +} + +void SystemDictionary::initialize_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS) { + assert((int)start_id <= (int)limit_id, "IDs are out of order!"); + for (int id = (int)start_id; id < (int)limit_id; id++) { + assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob"); + int info = wk_init_info[id - FIRST_WKID]; + int sid = (info >> CEIL_LG_OPTION_LIMIT); + int opt = (info & right_n_bits(CEIL_LG_OPTION_LIMIT)); + + initialize_wk_klass((WKID)id, opt, CHECK); + + // Update limits, so find_well_known_klass can be very fast: + symbolOop s = vmSymbols::symbol_at((vmSymbols::SID)sid); + if (wk_klass_name_limits[1] == NULL) { + wk_klass_name_limits[0] = wk_klass_name_limits[1] = s; + } else if (wk_klass_name_limits[1] < s) { + wk_klass_name_limits[1] = s; + } else if (wk_klass_name_limits[0] > s) { + wk_klass_name_limits[0] = s; + } + } +} + void SystemDictionary::initialize_preloaded_classes(TRAPS) { - assert(_object_klass == NULL, "preloaded classes should only be initialized once"); + assert(WK_KLASS(object_klass) == NULL, "preloaded classes should only be initialized once"); // Preload commonly used klasses - _object_klass = resolve_or_fail(vmSymbolHandles::java_lang_Object(), true, CHECK); - _string_klass = resolve_or_fail(vmSymbolHandles::java_lang_String(), true, CHECK); - _class_klass = resolve_or_fail(vmSymbolHandles::java_lang_Class(), true, CHECK); - debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(_class_klass)); + WKID scan = FIRST_WKID; + // first do Object, String, Class + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(class_klass), scan, CHECK); + + debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(class_klass))); + // Fixup mirrors for classes loaded before java.lang.Class. // These calls iterate over the objects currently in the perm gen // so calling them at this point is matters (not before when there @@ -1860,100 +1874,37 @@ Universe::initialize_basic_type_mirrors(CHECK); Universe::fixup_mirrors(CHECK); - _cloneable_klass = resolve_or_fail(vmSymbolHandles::java_lang_Cloneable(), true, CHECK); - _classloader_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassLoader(), true, CHECK); - _serializable_klass = resolve_or_fail(vmSymbolHandles::java_io_Serializable(), true, CHECK); - _system_klass = resolve_or_fail(vmSymbolHandles::java_lang_System(), true, CHECK); - - _throwable_klass = resolve_or_fail(vmSymbolHandles::java_lang_Throwable(), true, CHECK); - _error_klass = resolve_or_fail(vmSymbolHandles::java_lang_Error(), true, CHECK); - _threaddeath_klass = resolve_or_fail(vmSymbolHandles::java_lang_ThreadDeath(), true, CHECK); - _exception_klass = resolve_or_fail(vmSymbolHandles::java_lang_Exception(), true, CHECK); - _runtime_exception_klass = resolve_or_fail(vmSymbolHandles::java_lang_RuntimeException(), true, CHECK); - _protectionDomain_klass = resolve_or_fail(vmSymbolHandles::java_security_ProtectionDomain(), true, CHECK); - _AccessControlContext_klass = resolve_or_fail(vmSymbolHandles::java_security_AccessControlContext(), true, CHECK); - _classNotFoundException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassNotFoundException(), true, CHECK); - _noClassDefFoundError_klass = resolve_or_fail(vmSymbolHandles::java_lang_NoClassDefFoundError(), true, CHECK); - _linkageError_klass = resolve_or_fail(vmSymbolHandles::java_lang_LinkageError(), true, CHECK); - _classCastException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassCastException(), true, CHECK); - _arrayStoreException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ArrayStoreException(), true, CHECK); - _virtualMachineError_klass = resolve_or_fail(vmSymbolHandles::java_lang_VirtualMachineError(), true, CHECK); - _outOfMemoryError_klass = resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(), true, CHECK); - _StackOverflowError_klass = resolve_or_fail(vmSymbolHandles::java_lang_StackOverflowError(), true, CHECK); - _illegalMonitorStateException_klass = resolve_or_fail(vmSymbolHandles::java_lang_IllegalMonitorStateException(), true, CHECK); + // do a bunch more: + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(reference_klass), scan, CHECK); // Preload ref klasses and set reference types - _reference_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_Reference(), true, CHECK); - instanceKlass::cast(_reference_klass)->set_reference_type(REF_OTHER); - instanceRefKlass::update_nonstatic_oop_maps(_reference_klass); - - _soft_reference_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_SoftReference(), true, CHECK); - instanceKlass::cast(_soft_reference_klass)->set_reference_type(REF_SOFT); - _weak_reference_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_WeakReference(), true, CHECK); - instanceKlass::cast(_weak_reference_klass)->set_reference_type(REF_WEAK); - _final_reference_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_FinalReference(), true, CHECK); - instanceKlass::cast(_final_reference_klass)->set_reference_type(REF_FINAL); - _phantom_reference_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_PhantomReference(), true, CHECK); - instanceKlass::cast(_phantom_reference_klass)->set_reference_type(REF_PHANTOM); - _finalizer_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_Finalizer(), true, CHECK); - - _thread_klass = resolve_or_fail(vmSymbolHandles::java_lang_Thread(), true, CHECK); - _threadGroup_klass = resolve_or_fail(vmSymbolHandles::java_lang_ThreadGroup(), true, CHECK); - _properties_klass = resolve_or_fail(vmSymbolHandles::java_util_Properties(), true, CHECK); - _reflect_accessible_object_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_AccessibleObject(), true, CHECK); - _reflect_field_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Field(), true, CHECK); - _reflect_method_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(), true, CHECK); - _reflect_constructor_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Constructor(), true, CHECK); - // Universe::is_gte_jdk14x_version() is not set up by this point. - // It's okay if these turn out to be NULL in non-1.4 JDKs. - _reflect_magic_klass = resolve_or_null(vmSymbolHandles::sun_reflect_MagicAccessorImpl(), CHECK); - _reflect_method_accessor_klass = resolve_or_null(vmSymbolHandles::sun_reflect_MethodAccessorImpl(), CHECK); - _reflect_constructor_accessor_klass = resolve_or_null(vmSymbolHandles::sun_reflect_ConstructorAccessorImpl(), CHECK); - _reflect_delegating_classloader_klass = resolve_or_null(vmSymbolHandles::sun_reflect_DelegatingClassLoader(), CHECK); - _reflect_constant_pool_klass = resolve_or_null(vmSymbolHandles::sun_reflect_ConstantPool(), CHECK); - _reflect_unsafe_static_field_accessor_impl_klass = resolve_or_null(vmSymbolHandles::sun_reflect_UnsafeStaticFieldAccessorImpl(), CHECK); - - _vector_klass = resolve_or_fail(vmSymbolHandles::java_util_Vector(), true, CHECK); - _hashtable_klass = resolve_or_fail(vmSymbolHandles::java_util_Hashtable(), true, CHECK); - _stringBuffer_klass = resolve_or_fail(vmSymbolHandles::java_lang_StringBuffer(), true, CHECK); - - // It's NULL in non-1.4 JDKs. - _stackTraceElement_klass = resolve_or_null(vmSymbolHandles::java_lang_StackTraceElement(), CHECK); - - // Universe::is_gte_jdk14x_version() is not set up by this point. - // It's okay if this turns out to be NULL in non-1.4 JDKs. - _java_nio_Buffer_klass = resolve_or_null(vmSymbolHandles::java_nio_Buffer(), CHECK); + instanceKlass::cast(WK_KLASS(reference_klass))->set_reference_type(REF_OTHER); + instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(reference_klass)); + + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(phantom_reference_klass), scan, CHECK); + instanceKlass::cast(WK_KLASS(soft_reference_klass))->set_reference_type(REF_SOFT); + instanceKlass::cast(WK_KLASS(weak_reference_klass))->set_reference_type(REF_WEAK); + instanceKlass::cast(WK_KLASS(final_reference_klass))->set_reference_type(REF_FINAL); + instanceKlass::cast(WK_KLASS(phantom_reference_klass))->set_reference_type(REF_PHANTOM); + + initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK); + + _box_klasses[T_BOOLEAN] = WK_KLASS(boolean_klass); + _box_klasses[T_CHAR] = WK_KLASS(char_klass); + _box_klasses[T_FLOAT] = WK_KLASS(float_klass); + _box_klasses[T_DOUBLE] = WK_KLASS(double_klass); + _box_klasses[T_BYTE] = WK_KLASS(byte_klass); + _box_klasses[T_SHORT] = WK_KLASS(short_klass); + _box_klasses[T_INT] = WK_KLASS(int_klass); + _box_klasses[T_LONG] = WK_KLASS(long_klass); + //_box_klasses[T_OBJECT] = WK_KLASS(object_klass); + //_box_klasses[T_ARRAY] = WK_KLASS(object_klass); - // If this class isn't present, it won't be referenced. - _sun_misc_AtomicLongCSImpl_klass = resolve_or_null(vmSymbolHandles::sun_misc_AtomicLongCSImpl(), CHECK); #ifdef KERNEL - _sun_jkernel_DownloadManager_klass = resolve_or_null(vmSymbolHandles::sun_jkernel_DownloadManager(), CHECK); - if (_sun_jkernel_DownloadManager_klass == NULL) { + if (sun_jkernel_DownloadManager_klass() == NULL) { warning("Cannot find sun/jkernel/DownloadManager"); } #endif // KERNEL - - // Preload boxing klasses - _boolean_klass = resolve_or_fail(vmSymbolHandles::java_lang_Boolean(), true, CHECK); - _char_klass = resolve_or_fail(vmSymbolHandles::java_lang_Character(), true, CHECK); - _float_klass = resolve_or_fail(vmSymbolHandles::java_lang_Float(), true, CHECK); - _double_klass = resolve_or_fail(vmSymbolHandles::java_lang_Double(), true, CHECK); - _byte_klass = resolve_or_fail(vmSymbolHandles::java_lang_Byte(), true, CHECK); - _short_klass = resolve_or_fail(vmSymbolHandles::java_lang_Short(), true, CHECK); - _int_klass = resolve_or_fail(vmSymbolHandles::java_lang_Integer(), true, CHECK); - _long_klass = resolve_or_fail(vmSymbolHandles::java_lang_Long(), true, CHECK); - - _box_klasses[T_BOOLEAN] = _boolean_klass; - _box_klasses[T_CHAR] = _char_klass; - _box_klasses[T_FLOAT] = _float_klass; - _box_klasses[T_DOUBLE] = _double_klass; - _box_klasses[T_BYTE] = _byte_klass; - _box_klasses[T_SHORT] = _short_klass; - _box_klasses[T_INT] = _int_klass; - _box_klasses[T_LONG] = _long_klass; - //_box_klasses[T_OBJECT] = _object_klass; - //_box_klasses[T_ARRAY] = _object_klass; - { // Compute whether we should use loadClass or loadClassInternal when loading classes. methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature()); _has_loadClassInternal = (method != NULL); --- old/hotspot/src/share/vm/classfile/systemDictionary.hpp 2009-08-01 04:10:07.056705469 +0100 +++ new/hotspot/src/share/vm/classfile/systemDictionary.hpp 2009-08-01 04:10:06.974907421 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)systemDictionary.hpp 1.156 07/08/09 09:11:58 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,12 +67,133 @@ class HashtableBucket; class ResolutionErrorTable; +// Certain classes are preloaded, such as java.lang.Object and java.lang.String. +// They are all "well-known", in the sense that no class loader is allowed +// to provide a different definition. +// +// These klasses must all have names defined in vmSymbols. + +#define WK_KLASS_ENUM_NAME(kname) kname##_knum + +// Each well-known class has a short klass name (like object_klass), +// a vmSymbol name (like java_lang_Object), and a flag word +// that makes some minor distinctions, like whether the klass +// is preloaded, optional, release-specific, etc. +// The order of these definitions is significant; it is the order in which +// preloading is actually performed by initialize_preloaded_classes. + +#define WK_KLASSES_DO(template) \ + /* well-known classes */ \ + template(object_klass, java_lang_Object, Pre) \ + template(string_klass, java_lang_String, Pre) \ + template(class_klass, java_lang_Class, Pre) \ + template(cloneable_klass, java_lang_Cloneable, Pre) \ + template(classloader_klass, java_lang_ClassLoader, Pre) \ + template(serializable_klass, java_io_Serializable, Pre) \ + template(system_klass, java_lang_System, Pre) \ + template(throwable_klass, java_lang_Throwable, Pre) \ + template(error_klass, java_lang_Error, Pre) \ + template(threaddeath_klass, java_lang_ThreadDeath, Pre) \ + template(exception_klass, java_lang_Exception, Pre) \ + template(runtime_exception_klass, java_lang_RuntimeException, Pre) \ + template(protectionDomain_klass, java_security_ProtectionDomain, Pre) \ + template(AccessControlContext_klass, java_security_AccessControlContext, Pre) \ + template(classNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \ + template(noClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre) \ + template(linkageError_klass, java_lang_LinkageError, Pre) \ + template(ClassCastException_klass, java_lang_ClassCastException, Pre) \ + template(ArrayStoreException_klass, java_lang_ArrayStoreException, Pre) \ + template(virtualMachineError_klass, java_lang_VirtualMachineError, Pre) \ + template(OutOfMemoryError_klass, java_lang_OutOfMemoryError, Pre) \ + template(StackOverflowError_klass, java_lang_StackOverflowError, Pre) \ + template(IllegalMonitorStateException_klass, java_lang_IllegalMonitorStateException, Pre) \ + template(reference_klass, java_lang_ref_Reference, Pre) \ + \ + /* Preload ref klasses and set reference types */ \ + template(soft_reference_klass, java_lang_ref_SoftReference, Pre) \ + template(weak_reference_klass, java_lang_ref_WeakReference, Pre) \ + template(final_reference_klass, java_lang_ref_FinalReference, Pre) \ + template(phantom_reference_klass, java_lang_ref_PhantomReference, Pre) \ + template(finalizer_klass, java_lang_ref_Finalizer, Pre) \ + \ + template(thread_klass, java_lang_Thread, Pre) \ + template(threadGroup_klass, java_lang_ThreadGroup, Pre) \ + template(properties_klass, java_util_Properties, Pre) \ + template(reflect_accessible_object_klass, java_lang_reflect_AccessibleObject, Pre) \ + template(reflect_field_klass, java_lang_reflect_Field, Pre) \ + template(reflect_method_klass, java_lang_reflect_Method, Pre) \ + template(reflect_constructor_klass, java_lang_reflect_Constructor, Pre) \ + \ + /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \ + /* Universe::is_gte_jdk14x_version() is not set up by this point. */ \ + /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ + template(reflect_magic_klass, sun_reflect_MagicAccessorImpl, Opt) \ + template(reflect_method_accessor_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \ + template(reflect_constructor_accessor_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \ + template(reflect_delegating_classloader_klass, sun_reflect_DelegatingClassLoader, Opt) \ + template(reflect_constant_pool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15) \ + template(reflect_unsafe_static_field_accessor_impl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \ + \ + template(vector_klass, java_util_Vector, Pre) \ + template(hashtable_klass, java_util_Hashtable, Pre) \ + template(stringBuffer_klass, java_lang_StringBuffer, Pre) \ + \ + /* It's NULL in non-1.4 JDKs. */ \ + template(stackTraceElement_klass, java_lang_StackTraceElement, Opt) \ + /* Universe::is_gte_jdk14x_version() is not set up by this point. */ \ + /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ + template(java_nio_Buffer_klass, java_nio_Buffer, Opt) \ + \ + /* If this class isn't present, it won't be referenced. */ \ + template(sun_misc_AtomicLongCSImpl_klass, sun_misc_AtomicLongCSImpl, Opt) \ + \ + template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \ + \ + /* Preload boxing klasses */ \ + template(boolean_klass, java_lang_Boolean, Pre) \ + template(char_klass, java_lang_Character, Pre) \ + template(float_klass, java_lang_Float, Pre) \ + template(double_klass, java_lang_Double, Pre) \ + template(byte_klass, java_lang_Byte, Pre) \ + template(short_klass, java_lang_Short, Pre) \ + template(int_klass, java_lang_Integer, Pre) \ + template(long_klass, java_lang_Long, Pre) \ + /*end*/ + + class SystemDictionary : AllStatic { friend class VMStructs; friend class CompactingPermGenGen; NOT_PRODUCT(friend class instanceKlassKlass;) public: + enum WKID { + NO_WKID = 0, + + #define WK_KLASS_ENUM(name, ignore_s, ignore_o) WK_KLASS_ENUM_NAME(name), + WK_KLASSES_DO(WK_KLASS_ENUM) + #undef WK_KLASS_ENUM + + WKID_LIMIT, + + FIRST_WKID = NO_WKID + 1 + }; + + enum InitOption { + Pre, // preloaded; error if not present + + // Order is significant. Options before this point require resolve_or_fail. + // Options after this point will use resolve_or_null instead. + + Opt, // preload tried; NULL if not present + Opt_Only_JDK14NewRef, // preload tried; use only with NewReflection + Opt_Only_JDK15, // preload tried; use only with JDK1.5+ + Opt_Kernel, // preload tried only #ifdef KERNEL + OPTION_LIMIT, + CEIL_LG_OPTION_LIMIT = 4 // OPTION_LIMIT <= (1<* cp_patches, TRAPS); // Resolve from stream (called by jni_DefineClass and JVM_DefineClass) @@ -126,6 +257,9 @@ Handle protection_domain, TRAPS); + // If the given name is known to vmSymbols, return the well-know klass: + static klassOop find_well_known_klass(symbolOop class_name); + // Lookup an instance or array class that has already been loaded // either into the given class loader, or else into another class // loader that is constrained (via loader constraints) to produce @@ -238,85 +372,34 @@ return k; } -public: - static klassOop object_klass() { return check_klass(_object_klass); } - static klassOop string_klass() { return check_klass(_string_klass); } - static klassOop class_klass() { return check_klass(_class_klass); } - static klassOop cloneable_klass() { return check_klass(_cloneable_klass); } - static klassOop classloader_klass() { return check_klass(_classloader_klass); } - static klassOop serializable_klass() { return check_klass(_serializable_klass); } - static klassOop system_klass() { return check_klass(_system_klass); } - - static klassOop throwable_klass() { return check_klass(_throwable_klass); } - static klassOop error_klass() { return check_klass(_error_klass); } - static klassOop threaddeath_klass() { return check_klass(_threaddeath_klass); } - static klassOop exception_klass() { return check_klass(_exception_klass); } - static klassOop runtime_exception_klass() { return check_klass(_runtime_exception_klass); } - static klassOop classNotFoundException_klass() { return check_klass(_classNotFoundException_klass); } - static klassOop noClassDefFoundError_klass() { return check_klass(_noClassDefFoundError_klass); } - static klassOop linkageError_klass() { return check_klass(_linkageError_klass); } - static klassOop ClassCastException_klass() { return check_klass(_classCastException_klass); } - static klassOop ArrayStoreException_klass() { return check_klass(_arrayStoreException_klass); } - static klassOop virtualMachineError_klass() { return check_klass(_virtualMachineError_klass); } - static klassOop OutOfMemoryError_klass() { return check_klass(_outOfMemoryError_klass); } - static klassOop StackOverflowError_klass() { return check_klass(_StackOverflowError_klass); } - static klassOop IllegalMonitorStateException_klass() { return check_klass(_illegalMonitorStateException_klass); } - static klassOop protectionDomain_klass() { return check_klass(_protectionDomain_klass); } - static klassOop AccessControlContext_klass() { return check_klass(_AccessControlContext_klass); } - static klassOop reference_klass() { return check_klass(_reference_klass); } - static klassOop soft_reference_klass() { return check_klass(_soft_reference_klass); } - static klassOop weak_reference_klass() { return check_klass(_weak_reference_klass); } - static klassOop final_reference_klass() { return check_klass(_final_reference_klass); } - static klassOop phantom_reference_klass() { return check_klass(_phantom_reference_klass); } - static klassOop finalizer_klass() { return check_klass(_finalizer_klass); } - - static klassOop thread_klass() { return check_klass(_thread_klass); } - static klassOop threadGroup_klass() { return check_klass(_threadGroup_klass); } - static klassOop properties_klass() { return check_klass(_properties_klass); } - static klassOop reflect_accessible_object_klass() { return check_klass(_reflect_accessible_object_klass); } - static klassOop reflect_field_klass() { return check_klass(_reflect_field_klass); } - static klassOop reflect_method_klass() { return check_klass(_reflect_method_klass); } - static klassOop reflect_constructor_klass() { return check_klass(_reflect_constructor_klass); } - static klassOop reflect_method_accessor_klass() { - assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only"); - return check_klass(_reflect_method_accessor_klass); + static klassOop check_klass_Pre(klassOop k) { return check_klass(k); } + static klassOop check_klass_Opt(klassOop k) { return k; } + static klassOop check_klass_Opt_Kernel(klassOop k) { return k; } //== Opt + static klassOop check_klass_Opt_Only_JDK15(klassOop k) { + assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only"); + return k; } - static klassOop reflect_constructor_accessor_klass() { + static klassOop check_klass_Opt_Only_JDK14NewRef(klassOop k) { assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only"); - return check_klass(_reflect_constructor_accessor_klass); - } - // NOTE: needed too early in bootstrapping process to have checks based on JDK version - static klassOop reflect_magic_klass() { return _reflect_magic_klass; } - static klassOop reflect_delegating_classloader_klass() { return _reflect_delegating_classloader_klass; } - static klassOop reflect_constant_pool_klass() { - assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only"); - return _reflect_constant_pool_klass; + // despite the optional loading, if you use this it must be present: + return check_klass(k); } - static klassOop reflect_unsafe_static_field_accessor_impl_klass() { - assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only"); - return _reflect_unsafe_static_field_accessor_impl_klass; + + static bool initialize_wk_klass(WKID id, int init_opt, TRAPS); + static void initialize_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS); + static void initialize_wk_klasses_through(WKID end_id, WKID &start_id, TRAPS) { + int limit = (int)end_id + 1; + initialize_wk_klasses_until((WKID) limit, start_id, THREAD); } - static klassOop vector_klass() { return check_klass(_vector_klass); } - static klassOop hashtable_klass() { return check_klass(_hashtable_klass); } - static klassOop stringBuffer_klass() { return check_klass(_stringBuffer_klass); } - static klassOop stackTraceElement_klass() { return check_klass(_stackTraceElement_klass); } - - static klassOop java_nio_Buffer_klass() { return check_klass(_java_nio_Buffer_klass); } - - static klassOop sun_misc_AtomicLongCSImpl_klass() { return _sun_misc_AtomicLongCSImpl_klass; } - - // To support incremental JRE downloads (KERNEL JRE). Null if not present. - static klassOop sun_jkernel_DownloadManager_klass() { return _sun_jkernel_DownloadManager_klass; } - - static klassOop boolean_klass() { return check_klass(_boolean_klass); } - static klassOop char_klass() { return check_klass(_char_klass); } - static klassOop float_klass() { return check_klass(_float_klass); } - static klassOop double_klass() { return check_klass(_double_klass); } - static klassOop byte_klass() { return check_klass(_byte_klass); } - static klassOop short_klass() { return check_klass(_short_klass); } - static klassOop int_klass() { return check_klass(_int_klass); } - static klassOop long_klass() { return check_klass(_long_klass); } +public: + #define WK_KLASS_DECLARE(name, ignore_symbol, option) \ + static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } + WK_KLASSES_DO(WK_KLASS_DECLARE); + #undef WK_KLASS_DECLARE + + // Local definition for direct access to the private array: + #define WK_KLASS(name) _well_known_klasses[SystemDictionary::WK_KLASS_ENUM_NAME(name)] static klassOop box_klass(BasicType t) { assert((uint)t < T_VOID+1, "range check"); @@ -338,9 +421,9 @@ // Tells whether ClassLoader.checkPackageAccess is present static bool has_checkPackageAccess() { return _has_checkPackageAccess; } - static bool class_klass_loaded() { return _class_klass != NULL; } - static bool cloneable_klass_loaded() { return _cloneable_klass != NULL; } - + static bool class_klass_loaded() { return WK_KLASS(class_klass) != NULL; } + static bool cloneable_klass_loaded() { return WK_KLASS(cloneable_klass) != NULL; } + // Returns default system loader static oop java_system_loader(); @@ -501,80 +584,12 @@ instanceKlassHandle k, Handle loader, TRAPS); // Variables holding commonly used klasses (preloaded) - static klassOop _object_klass; - static klassOop _string_klass; - static klassOop _class_klass; - static klassOop _cloneable_klass; - static klassOop _classloader_klass; - static klassOop _serializable_klass; - static klassOop _system_klass; - - static klassOop _throwable_klass; - static klassOop _error_klass; - static klassOop _threaddeath_klass; - static klassOop _exception_klass; - static klassOop _runtime_exception_klass; - static klassOop _classNotFoundException_klass; - static klassOop _noClassDefFoundError_klass; - static klassOop _linkageError_klass; - static klassOop _classCastException_klass; - static klassOop _arrayStoreException_klass; - static klassOop _virtualMachineError_klass; - static klassOop _outOfMemoryError_klass; - static klassOop _StackOverflowError_klass; - static klassOop _illegalMonitorStateException_klass; - static klassOop _protectionDomain_klass; - static klassOop _AccessControlContext_klass; - static klassOop _reference_klass; - static klassOop _soft_reference_klass; - static klassOop _weak_reference_klass; - static klassOop _final_reference_klass; - static klassOop _phantom_reference_klass; - static klassOop _finalizer_klass; - - static klassOop _thread_klass; - static klassOop _threadGroup_klass; - static klassOop _properties_klass; - static klassOop _reflect_accessible_object_klass; - static klassOop _reflect_field_klass; - static klassOop _reflect_method_klass; - static klassOop _reflect_constructor_klass; - // 1.4 reflection implementation - static klassOop _reflect_magic_klass; - static klassOop _reflect_method_accessor_klass; - static klassOop _reflect_constructor_accessor_klass; - static klassOop _reflect_delegating_classloader_klass; - // 1.5 annotations implementation - static klassOop _reflect_constant_pool_klass; - static klassOop _reflect_unsafe_static_field_accessor_impl_klass; - - static klassOop _stringBuffer_klass; - static klassOop _vector_klass; - static klassOop _hashtable_klass; - - static klassOop _stackTraceElement_klass; - - static klassOop _java_nio_Buffer_klass; - - static klassOop _sun_misc_AtomicLongCSImpl_klass; - - // KERNEL JRE support. - static klassOop _sun_jkernel_DownloadManager_klass; + static klassOop _well_known_klasses[]; // Lazily loaded klasses static volatile klassOop _abstract_ownable_synchronizer_klass; - // Box klasses - static klassOop _boolean_klass; - static klassOop _char_klass; - static klassOop _float_klass; - static klassOop _double_klass; - static klassOop _byte_klass; - static klassOop _short_klass; - static klassOop _int_klass; - static klassOop _long_klass; - - // table of same + // table of box klasses (int_klass, etc.) static klassOop _box_klasses[T_VOID+1]; static oop _java_system_loader; --- old/hotspot/src/share/vm/classfile/verifier.cpp 2009-08-01 04:10:08.019875664 +0100 +++ new/hotspot/src/share/vm/classfile/verifier.cpp 2009-08-01 04:10:07.925776829 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)verifier.cpp 1.113 07/05/23 10:53:19 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1208,7 +1208,7 @@ case Bytecodes::_arraylength : type = current_frame.pop_stack( VerificationType::reference_check(), CHECK_VERIFY(this)); - if (!type.is_array()) { + if (!(type.is_null() || type.is_array())) { verify_error(bci, bad_type_msg, "arraylength"); } current_frame.push_stack( @@ -1603,7 +1603,11 @@ types = (1 << JVM_CONSTANT_Double) | (1 << JVM_CONSTANT_Long); verify_cp_type(index, cp, types, CHECK_VERIFY(this)); } - if (tag.is_string() || tag.is_unresolved_string()) { + if (tag.is_string() && cp->is_pseudo_string_at(index)) { + current_frame->push_stack( + VerificationType::reference_type( + vmSymbols::java_lang_Object()), CHECK_VERIFY(this)); + } else if (tag.is_string() || tag.is_unresolved_string()) { current_frame->push_stack( VerificationType::reference_type( vmSymbols::java_lang_String()), CHECK_VERIFY(this)); --- old/hotspot/src/share/vm/classfile/vmSymbols.cpp 2009-08-01 04:10:08.926754470 +0100 +++ new/hotspot/src/share/vm/classfile/vmSymbols.cpp 2009-08-01 04:10:08.849381896 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmSymbols.cpp 1.29 07/07/19 19:08:29 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -321,6 +321,11 @@ const int neg = JVM_ACC_SYNCHRONIZED; return (flags & (req | neg)) == req; } +inline bool match_F_RNY(jshort flags) { + const int req = JVM_ACC_NATIVE | JVM_ACC_SYNCHRONIZED; + const int neg = JVM_ACC_STATIC; + return (flags & (req | neg)) == req; +} // These are for forming case labels: #define ID3(x, y, z) (( jint)(z) + \ @@ -362,6 +367,7 @@ case F_RN: fname = "native "; break; case F_SN: fname = "native static "; break; case F_S: fname = "static "; break; + case F_RNY:fname = "native synchronized "; break; } const char* kptr = strrchr(kname, '/'); if (kptr != NULL) kname = kptr + 1; @@ -488,7 +494,7 @@ if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("*** misidentified method; %s(%d) should be %s(%d):", declared_name, declared_id, actual_name, actual_id); - m->print_short_name(tty); + mh()->print_short_name(tty); tty->cr(); } } --- old/hotspot/src/share/vm/classfile/vmSymbols.hpp 2009-08-01 04:10:09.769358849 +0100 +++ new/hotspot/src/share/vm/classfile/vmSymbols.hpp 2009-08-01 04:10:09.683168093 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmSymbols.hpp 1.166 07/11/01 16:55:02 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,6 +52,7 @@ template(java_lang_Object, "java/lang/Object") \ template(java_lang_Class, "java/lang/Class") \ template(java_lang_String, "java/lang/String") \ + template(java_lang_StringValue, "java/lang/StringValue") \ template(java_lang_Thread, "java/lang/Thread") \ template(java_lang_ThreadGroup, "java/lang/ThreadGroup") \ template(java_lang_Cloneable, "java/lang/Cloneable") \ @@ -286,6 +287,7 @@ template(cache_field_name, "cache") \ template(value_name, "value") \ template(frontCacheEnabled_name, "frontCacheEnabled") \ + template(stringCacheEnabled_name, "stringCacheEnabled") \ \ /* non-intrinsic name/signature pairs: */ \ template(register_method_name, "register") \ @@ -567,6 +569,10 @@ do_name( copyOfRange_name, "copyOfRange") \ do_signature(copyOfRange_signature, "([Ljava/lang/Object;IILjava/lang/Class;)[Ljava/lang/Object;") \ \ + do_intrinsic(_equalsC, java_util_Arrays, equals_name, equalsC_signature, F_S) \ + do_name( equals_name, "equals") \ + do_signature(equalsC_signature, "([C[C)Z") \ + \ do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \ /* (symbols invoke_name and invoke_signature defined above) */ \ \ @@ -587,6 +593,8 @@ do_name( attemptUpdate_name, "attemptUpdate") \ do_signature(attemptUpdate_signature, "(JJ)Z") \ \ + do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \ + \ /* support for sun.misc.Unsafe */ \ do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ \ @@ -874,7 +882,8 @@ F_R, // !static !synchronized (R="regular") F_S, // static !synchronized F_RN, // !static native !synchronized - F_SN // static native !synchronized + F_SN, // static native !synchronized + F_RNY // !static native synchronized }; public: --- old/hotspot/src/share/vm/code/codeCache.hpp 2009-08-01 04:10:10.741789013 +0100 +++ new/hotspot/src/share/vm/code/codeCache.hpp 2009-08-01 04:10:10.656532281 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)codeCache.hpp 1.68 07/09/01 18:01:02 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,10 +73,25 @@ // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know // what you are doing) static CodeBlob* find_blob_unsafe(void* start) { - CodeBlob* result = (CodeBlob*)_heap->find_start(start); - assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); - return result; - } + CodeBlob* result = (CodeBlob*)_heap->find_start(start); + // this assert is too strong because the heap code will return the + // heapblock containing start. That block can often be larger than + // the codeBlob itself. If you look up an address that is within + // the heapblock but not in the codeBlob you will assert. + // + // Most things will not lookup such bad addresses. However + // AsyncGetCallTrace can see intermediate frames and get that kind + // of invalid address and so can a developer using hsfind. + // + // The more correct answer is to return NULL if blob_contains() returns + // false. + // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); + + if (result != NULL && !result->blob_contains((address)start)) { + result = NULL; + } + return result; + } // Iteration static CodeBlob* first(); --- old/hotspot/src/share/vm/code/debugInfo.cpp 2009-08-01 04:10:11.509485811 +0100 +++ new/hotspot/src/share/vm/code/debugInfo.cpp 2009-08-01 04:10:11.427740091 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)debugInfo.cpp 1.35 07/07/27 16:12:09 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,8 @@ } #endif ObjectValue* result = new ObjectValue(id); - _obj_pool->append(result); + // Cache the object since an object field could reference it. + _obj_pool->push(result); result->read_object(this); return result; } @@ -59,9 +60,9 @@ int id = read_int(); assert(_obj_pool != NULL, "object pool does not exist"); for (int i = _obj_pool->length() - 1; i >= 0; i--) { - ObjectValue* sv = (ObjectValue*) _obj_pool->at(i); - if (sv->id() == id) { - return sv; + ObjectValue* ov = (ObjectValue*) _obj_pool->at(i); + if (ov->id() == id) { + return ov; } } ShouldNotReachHere(); --- old/hotspot/src/share/vm/code/dependencies.cpp 2009-08-01 04:10:12.394694431 +0100 +++ new/hotspot/src/share/vm/code/dependencies.cpp 2009-08-01 04:10:12.304081141 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)dependencies.cpp 1.18 08/02/29 12:46:18 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1502,9 +1502,12 @@ // fall through: _change_type = Change_new_sub; case Change_new_sub: - _klass = instanceKlass::cast(_klass)->super(); - if (_klass != NULL) { - return true; + // 6598190: brackets workaround Sun Studio C++ compiler bug 6629277 + { + _klass = instanceKlass::cast(_klass)->super(); + if (_klass != NULL) { + return true; + } } // else set up _ti_limit and fall through: _ti_limit = (_ti_base == NULL) ? 0 : _ti_base->length(); --- old/hotspot/src/share/vm/code/location.cpp 2009-08-01 04:10:13.327906345 +0100 +++ new/hotspot/src/share/vm/code/location.cpp 2009-08-01 04:10:13.259388913 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)location.cpp 1.40 07/05/05 17:05:21 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "incls/_location.cpp.incl" void Location::print_on(outputStream* st) const { - if(type() == invalid && !legal_offset_in_bytes(offset() * BytesPerInt)) { + if(type() == invalid) { // product of Location::invalid_loc() or Location::Location(). switch (where()) { case on_stack: st->print("empty"); break; @@ -45,6 +45,7 @@ switch (type()) { case normal: break; case oop: st->print(",oop"); break; + case narrowoop: st->print(",narrowoop"); break; case int_in_long: st->print(",int"); break; case lng: st->print(",long"); break; case float_in_dbl: st->print(",float"); break; @@ -56,18 +57,18 @@ Location::Location(DebugInfoReadStream* stream) { - _value = (uint16_t) stream->read_int(); + _value = (juint) stream->read_int(); } void Location::write_on(DebugInfoWriteStream* stream) { - stream->write_int(_value & 0x0000FFFF); + stream->write_int(_value); } // Valid argument to Location::new_stk_loc()? bool Location::legal_offset_in_bytes(int offset_in_bytes) { if ((offset_in_bytes % BytesPerInt) != 0) return false; - return (offset_in_bytes / BytesPerInt) < (OFFSET_MASK >> OFFSET_SHIFT); + return (juint)(offset_in_bytes / BytesPerInt) < (OFFSET_MASK >> OFFSET_SHIFT); } --- old/hotspot/src/share/vm/code/location.hpp 2009-08-01 04:10:14.129569693 +0100 +++ new/hotspot/src/share/vm/code/location.hpp 2009-08-01 04:10:14.037135765 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)location.hpp 1.47 07/05/05 17:05:21 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,10 +31,10 @@ // // Encoding: // -// bits: -// Where: [15] -// Type: [14..12] -// Offset: [11..0] +// bits (use low bits for best compression): +// Type: [3..0] +// Where: [4] +// Offset: [31..5] class Location VALUE_OBJ_CLASS_SPEC { friend class VMStructs; @@ -45,6 +45,7 @@ }; enum Type { + invalid, // Invalid location normal, // Ints, floats, double halves oop, // Oop (please GC me!) int_in_long, // Integer held in long register @@ -52,21 +53,21 @@ float_in_dbl, // Float held in double register dbl, // Double held in one register addr, // JSR return address - invalid // Invalid location + narrowoop // Narrow Oop (please GC me!) }; private: enum { - OFFSET_MASK = (jchar) 0x0FFF, - OFFSET_SHIFT = 0, - TYPE_MASK = (jchar) 0x7000, - TYPE_SHIFT = 12, - WHERE_MASK = (jchar) 0x8000, - WHERE_SHIFT = 15 + TYPE_MASK = (juint) 0x0F, + TYPE_SHIFT = 0, + WHERE_MASK = (juint) 0x10, + WHERE_SHIFT = 4, + OFFSET_MASK = (juint) 0xFFFFFFE0, + OFFSET_SHIFT = 5 }; - - uint16_t _value; + + juint _value; // Create a bit-packed Location Location(Where where_, Type type_, unsigned offset_) { @@ -77,9 +78,9 @@ } inline void set(Where where_, Type type_, unsigned offset_) { - _value = (uint16_t) ((where_ << WHERE_SHIFT) | - (type_ << TYPE_SHIFT) | - ((offset_ << OFFSET_SHIFT) & OFFSET_MASK)); + _value = (juint) ((where_ << WHERE_SHIFT) | + (type_ << TYPE_SHIFT) | + ((offset_ << OFFSET_SHIFT) & OFFSET_MASK)); } public: @@ -89,7 +90,7 @@ // Register location Factory static Location new_reg_loc( Type t, VMReg reg ) { return Location(in_register, t, reg->value()); } // Default constructor - Location() { set(on_stack,invalid,(unsigned) -1); } + Location() { set(on_stack,invalid,0); } // Bit field accessors Where where() const { return (Where) ((_value & WHERE_MASK) >> WHERE_SHIFT);} --- old/hotspot/src/share/vm/code/nmethod.cpp 2009-08-01 04:10:14.992702074 +0100 +++ new/hotspot/src/share/vm/code/nmethod.cpp 2009-08-01 04:10:14.906708855 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)nmethod.cpp 1.371 08/02/29 12:46:11 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,6 @@ #ifdef DTRACE_ENABLED - // Only bother with this argument setup if dtrace is available HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load, @@ -441,7 +440,6 @@ { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); - const int dummy = -1; // Flag to force proper "operator new" CodeOffsets offsets; offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); @@ -464,6 +462,41 @@ return nm; } +#ifdef HAVE_DTRACE_H +nmethod* nmethod::new_dtrace_nmethod(methodHandle method, + CodeBuffer *code_buffer, + int vep_offset, + int trap_offset, + int frame_complete, + int frame_size) { + // create nmethod + nmethod* nm = NULL; + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); + CodeOffsets offsets; + offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); + offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); + offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); + + nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size); + + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); + if (PrintAssembly && nm != NULL) + Disassembler::decode(nm); + } + // verify nmethod + debug_only(if (nm) nm->verify();) // might block + + if (nm != NULL) { + nm->log_new_nmethod(); + } + + return nm; +} + +#endif // def HAVE_DTRACE_H + nmethod* nmethod::new_nmethod(methodHandle method, int compile_id, int entry_bci, @@ -561,6 +594,9 @@ _exception_offset = 0; _deoptimize_offset = 0; _orig_pc_offset = 0; +#ifdef HAVE_DTRACE_H + _trap_offset = 0; +#endif // def HAVE_DTRACE_H _stub_offset = data_offset(); _consts_offset = data_offset(); _scopes_data_offset = data_offset(); @@ -618,6 +654,90 @@ Events::log("Create nmethod " INTPTR_FORMAT, this); } +// For dtrace wrappers +#ifdef HAVE_DTRACE_H +nmethod::nmethod( + methodOop method, + int nmethod_size, + CodeOffsets* offsets, + CodeBuffer* code_buffer, + int frame_size) + : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod), + nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL), + _compiled_synchronized_native_basic_lock_owner_sp_offset(in_ByteSize(-1)), + _compiled_synchronized_native_basic_lock_sp_offset(in_ByteSize(-1)) +{ + { + debug_only(No_Safepoint_Verifier nsv;) + assert_locked_or_safepoint(CodeCache_lock); + + NOT_PRODUCT(_has_debug_info = false; ) + _method = method; + _entry_bci = InvocationEntryBci; + _link = NULL; + _compiler = NULL; + // We have no exception handler or deopt handler make the + // values something that will never match a pc like the nmethod vtable entry + _exception_offset = 0; + _deoptimize_offset = 0; + _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); + _orig_pc_offset = 0; + _stub_offset = data_offset(); + _consts_offset = data_offset(); + _scopes_data_offset = data_offset(); + _scopes_pcs_offset = _scopes_data_offset; + _dependencies_offset = _scopes_pcs_offset; + _handler_table_offset = _dependencies_offset; + _nul_chk_table_offset = _handler_table_offset; + _nmethod_end_offset = _nul_chk_table_offset; + _compile_id = 0; // default + _comp_level = CompLevel_none; + _entry_point = instructions_begin(); + _verified_entry_point = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry); + _osr_entry_point = NULL; + _exception_cache = NULL; + _pc_desc_cache.reset_to(NULL); + + flags.clear(); + flags.state = alive; + _markedForDeoptimization = 0; + + _lock_count = 0; + _stack_traversal_mark = 0; + + code_buffer->copy_oops_to(this); + debug_only(check_store();) + CodeCache::commit(this); + VTune::create_nmethod(this); + } + + if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { + ttyLocker ttyl; // keep the following output all in one block + // This output goes directly to the tty, not the compiler log. + // To enable tools to match it up with the compilation activity, + // be sure to tag this tty output with the compile ID. + if (xtty != NULL) { + xtty->begin_head("print_dtrace_nmethod"); + xtty->method(_method); + xtty->stamp(); + xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this); + } + // print the header part first + print(); + // then print the requested information + if (PrintNMethods) { + print_code(); + } + if (PrintRelocations) { + print_relocations(); + } + if (xtty != NULL) { + xtty->tail("print_dtrace_nmethod"); + } + } + Events::log("Create nmethod " INTPTR_FORMAT, this); +} +#endif // def HAVE_DTRACE_H void* nmethod::operator new(size_t size, int nmethod_size) { // Always leave some room in the CodeCache for I2C/C2I adapters @@ -661,6 +781,9 @@ _link = NULL; _compiler = compiler; _orig_pc_offset = orig_pc_offset; +#ifdef HAVE_DTRACE_H + _trap_offset = 0; +#endif // def HAVE_DTRACE_H _stub_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start()); // Exception handler and deopt handler are in the stub section @@ -710,7 +833,9 @@ " entry points must be same for static methods and vice versa"); } - bool printnmethods = PrintNMethods || CompilerOracle::has_option_string(_method, "PrintNMethods"); + bool printnmethods = PrintNMethods + || CompilerOracle::should_print(_method) + || CompilerOracle::has_option_string(_method, "PrintNMethods"); if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { print_nmethod(printnmethods); } @@ -801,7 +926,6 @@ } -#ifndef PRODUCT void nmethod::print_nmethod(bool printmethod) { ttyLocker ttyl; // keep the following output all in one block if (xtty != NULL) { @@ -834,7 +958,6 @@ xtty->tail("print_nmethod"); } } -#endif void nmethod::set_version(int v) { @@ -1230,11 +1353,7 @@ return false; } } - if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { - // Cannot do this test if verification of the UseParallelOldGC - // code using the PSMarkSweep code is being done. - assert(unloading_occurred, "Inconsistency in unloading"); - } + assert(unloading_occurred, "Inconsistency in unloading"); make_unloaded(is_alive, obj); return true; } @@ -1873,6 +1992,7 @@ } } +#endif // PRODUCT // Printing operations @@ -1887,7 +2007,6 @@ } else if (is_compiled_by_c2()) { tty->print("(c2) "); } else { - assert(is_native_method(), "Who else?"); tty->print("(nm) "); } @@ -1951,6 +2070,14 @@ oops_size()); } +void nmethod::print_code() { + HandleMark hm; + ResourceMark m; + Disassembler::decode(this); +} + + +#ifndef PRODUCT void nmethod::print_scopes() { // Find the first pc desc for all scopes in the code and print it. @@ -1982,13 +2109,6 @@ } -void nmethod::print_code() { - HandleMark hm; - ResourceMark m; - Disassembler().decode(this); -} - - void nmethod::print_relocations() { ResourceMark m; // in case methods get printed via the debugger tty->print_cr("relocations:"); @@ -2024,6 +2144,7 @@ } } +#endif // PRODUCT const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { RelocIterator iter(this, begin, end); @@ -2058,7 +2179,6 @@ return have_one ? "other" : NULL; } - // Return a the last scope in (begin..end] ScopeDesc* nmethod::scope_desc_in(address begin, address end) { PcDesc* p = pc_desc_near(begin+1); @@ -2081,29 +2201,26 @@ address pc = base + om->offset(); if (pc > begin) { if (pc <= end) { - st->fill_to(column); - if (st == tty) { - st->print("; OopMap "); - om->print(); - tty->cr(); - } else { - st->print_cr("; OopMap #%d offset:%d", i, om->offset()); - } + st->move_to(column); + st->print("; "); + om->print_on(st); } break; } } } - ScopeDesc* sd = scope_desc_in(begin, end); + + // Print any debug info present at this pc. + ScopeDesc* sd = scope_desc_in(begin, end); if (sd != NULL) { - st->fill_to(column); + st->move_to(column); if (sd->bci() == SynchronizationEntryBCI) { st->print(";*synchronization entry"); } else { if (sd->method().is_null()) { - tty->print("method is NULL"); + st->print("method is NULL"); } else if (sd->method()->is_native()) { - tty->print("method is native"); + st->print("method is native"); } else { address bcp = sd->method()->bcp_from(sd->bci()); Bytecodes::Code bc = Bytecodes::java_code_at(bcp); @@ -2140,13 +2257,13 @@ } } } - st->cr(); + // Print all scopes for (;sd != NULL; sd = sd->sender()) { - st->fill_to(column); + st->move_to(column); st->print("; -"); if (sd->method().is_null()) { - tty->print("method is NULL"); + st->print("method is NULL"); } else { sd->method()->print_short_name(st); } @@ -2164,17 +2281,19 @@ const char* str = reloc_string_for(begin, end); if (str != NULL) { if (sd != NULL) st->cr(); - st->fill_to(column); + st->move_to(column); st->print("; {%s}", str); } int cont_offset = ImplicitExceptionTable(this).at(begin - instructions_begin()); if (cont_offset != 0) { - st->fill_to(column); + st->move_to(column); st->print("; implicit exception: dispatches to " INTPTR_FORMAT, instructions_begin() + cont_offset); } } +#ifndef PRODUCT + void nmethod::print_value_on(outputStream* st) const { print_on(st, "nmethod"); } --- old/hotspot/src/share/vm/code/nmethod.hpp 2009-08-01 04:10:15.947172427 +0100 +++ new/hotspot/src/share/vm/code/nmethod.hpp 2009-08-01 04:10:15.874476878 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)nmethod.hpp 1.171 07/09/01 18:01:02 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -143,6 +143,9 @@ int _exception_offset; // All deoptee's will resume execution at this location described by this offset int _deoptimize_offset; +#ifdef HAVE_DTRACE_H + int _trap_offset; +#endif // def HAVE_DTRACE_H int _stub_offset; int _consts_offset; int _scopes_data_offset; @@ -214,6 +217,15 @@ ByteSize basic_lock_sp_offset, /* synchronized natives only */ OopMapSet* oop_maps); +#ifdef HAVE_DTRACE_H + // For native wrappers + nmethod(methodOop method, + int nmethod_size, + CodeOffsets* offsets, + CodeBuffer *code_buffer, + int frame_size); +#endif // def HAVE_DTRACE_H + // Creation support nmethod(methodOop method, int nmethod_size, @@ -275,6 +287,22 @@ ByteSize basic_lock_sp_offset, OopMapSet* oop_maps); +#ifdef HAVE_DTRACE_H + // The method we generate for a dtrace probe has to look + // like an nmethod as far as the rest of the system is concerned + // which is somewhat unfortunate. + static nmethod* new_dtrace_nmethod(methodHandle method, + CodeBuffer *code_buffer, + int vep_offset, + int trap_offset, + int frame_complete, + int frame_size); + + int trap_offset() const { return _trap_offset; } + address trap_address() const { return code_begin() + _trap_offset; } + +#endif // def HAVE_DTRACE_H + // accessors methodOop method() const { return _method; } AbstractCompiler* compiler() const { return _compiler; } @@ -488,8 +516,8 @@ void verify_interrupt_point(address interrupt_point); // printing support - void print() const PRODUCT_RETURN; - void print_code() PRODUCT_RETURN; + void print() const; + void print_code(); void print_relocations() PRODUCT_RETURN; void print_pcs() PRODUCT_RETURN; void print_scopes() PRODUCT_RETURN; @@ -498,7 +526,7 @@ void print_calls(outputStream* st) PRODUCT_RETURN; void print_handler_table() PRODUCT_RETURN; void print_nul_chk_table() PRODUCT_RETURN; - void print_nmethod(bool print_code) PRODUCT_RETURN; + void print_nmethod(bool print_code); void print_on(outputStream* st, const char* title) const; @@ -508,7 +536,7 @@ void log_state_change(int state) const; // Prints a comment for one native instruction (reloc info, pc desc) - void print_code_comment_on(outputStream* st, int column, address begin, address end) PRODUCT_RETURN; + void print_code_comment_on(outputStream* st, int column, address begin, address end); static void print_statistics() PRODUCT_RETURN; // Compiler task identification. Note that all OSR methods --- old/hotspot/src/share/vm/code/relocInfo.hpp 2009-08-01 04:10:16.955605479 +0100 +++ new/hotspot/src/share/vm/code/relocInfo.hpp 2009-08-01 04:10:16.861827577 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)relocInfo.hpp 1.87 07/06/19 09:08:11 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1203,11 +1203,13 @@ class poll_Relocation : public Relocation { bool is_data() { return true; } relocInfo::relocType type() { return relocInfo::poll_type; } + void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest); }; class poll_return_Relocation : public Relocation { bool is_data() { return true; } relocInfo::relocType type() { return relocInfo::poll_return_type; } + void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest); }; --- old/hotspot/src/share/vm/code/scopeDesc.cpp 2009-08-01 04:10:17.858290611 +0100 +++ new/hotspot/src/share/vm/code/scopeDesc.cpp 2009-08-01 04:10:17.787087170 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)scopeDesc.cpp 1.58 07/07/27 16:13:17 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,7 +94,9 @@ DebugInfoReadStream* stream = new DebugInfoReadStream(_code, decode_offset, result); int length = stream->read_int(); for (int index = 0; index < length; index++) { - result->push(ScopeValue::read_from(stream)); + // Objects values are pushed to 'result' array during read so that + // object's fields could reference it (OBJECT_ID_CODE). + (void)ScopeValue::read_from(stream); } assert(result->length() == length, "inconsistent debug information"); return result; --- old/hotspot/src/share/vm/code/vmreg.cpp 2009-08-01 04:10:18.629485422 +0100 +++ new/hotspot/src/share/vm/code/vmreg.cpp 2009-08-01 04:10:18.560865278 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmreg.cpp 1.35 07/05/05 17:05:22 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,16 +39,14 @@ // Register names const char *VMRegImpl::regName[ConcreteRegisterImpl::number_of_registers]; -void VMRegImpl::print() { -#ifndef PRODUCT +void VMRegImpl::print_on(outputStream* st) const { if( is_reg() ) { assert( VMRegImpl::regName[value()], "" ); - tty->print("%s",VMRegImpl::regName[value()]); + st->print("%s",VMRegImpl::regName[value()]); } else if (is_stack()) { int stk = value() - stack0->value(); - tty->print("[%d]", stk*4); + st->print("[%d]", stk*4); } else { - tty->print("BAD!"); + st->print("BAD!"); } -#endif // PRODUCT } --- old/hotspot/src/share/vm/code/vmreg.hpp 2009-08-01 04:10:19.461076856 +0100 +++ new/hotspot/src/share/vm/code/vmreg.hpp 2009-08-01 04:10:19.379371648 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmreg.hpp 1.37 07/05/05 17:05:22 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,9 +69,9 @@ } } static VMReg Bad() { return (VMReg) (intptr_t) BAD; } - bool is_valid() { return ((intptr_t) this) != BAD; } - bool is_stack() { return (intptr_t) this >= (intptr_t) stack0; } - bool is_reg() { return is_valid() && !is_stack(); } + bool is_valid() const { return ((intptr_t) this) != BAD; } + bool is_stack() const { return (intptr_t) this >= (intptr_t) stack0; } + bool is_reg() const { return is_valid() && !is_stack(); } // A concrete register is a value that returns true for is_reg() and is // also a register you could use in the assembler. On machines with @@ -99,7 +99,8 @@ intptr_t value() const {return (intptr_t) this; } - void print(); + void print_on(outputStream* st) const; + void print() const { print_on(tty); } // bias a stack slot. // Typically used to adjust a virtual frame slots by amounts that are offset by @@ -158,22 +159,22 @@ _first = ptr; } // Return true if single register, even if the pair is really just adjacent stack slots - bool is_single_reg() { + bool is_single_reg() const { return (_first->is_valid()) && (_first->value() + 1 == _second->value()); } // Return true if single stack based "register" where the slot alignment matches input alignment - bool is_adjacent_on_stack(int alignment) { + bool is_adjacent_on_stack(int alignment) const { return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0)); } // Return true if single stack based "register" where the slot alignment matches input alignment - bool is_adjacent_aligned_on_stack(int alignment) { + bool is_adjacent_aligned_on_stack(int alignment) const { return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0)); } // Return true if single register but adjacent stack slots do not count - bool is_single_phys_reg() { + bool is_single_phys_reg() const { return (_first->is_reg() && (_first->value() + 1 == _second->value())); } --- old/hotspot/src/share/vm/compiler/methodLiveness.cpp 2009-08-01 04:10:20.324475765 +0100 +++ new/hotspot/src/share/vm/compiler/methodLiveness.cpp 2009-08-01 04:10:20.238975767 +0100 @@ -79,8 +79,9 @@ BitCounter() : _count(0) {} // Callback when bit in map is set - virtual void do_bit(size_t offset) { + virtual bool do_bit(size_t offset) { _count++; + return true; } int count() { @@ -470,7 +471,7 @@ bci = 0; } - MethodLivenessResult answer(NULL,0); + MethodLivenessResult answer((uintptr_t*)NULL,0); if (_block_count > 0) { if (TimeLivenessAnalysis) _time_total.start(); --- old/hotspot/src/share/vm/compiler/methodLiveness.hpp 2009-08-01 04:10:21.240460416 +0100 +++ new/hotspot/src/share/vm/compiler/methodLiveness.hpp 2009-08-01 04:10:21.165844711 +0100 @@ -32,7 +32,7 @@ bool _is_valid; public: - MethodLivenessResult(uintptr_t* map, idx_t size_in_bits) + MethodLivenessResult(BitMap::bm_word_t* map, idx_t size_in_bits) : BitMap(map, size_in_bits) , _is_valid(false) {} --- old/hotspot/src/share/vm/compiler/oopMap.cpp 2009-08-01 04:10:22.049427366 +0100 +++ new/hotspot/src/share/vm/compiler/oopMap.cpp 2009-08-01 04:10:21.963222916 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oopMap.cpp 1.153 07/09/28 10:23:20 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -172,11 +172,8 @@ } -void OopMap::set_dead(VMReg reg) { - // At this time, we only need dead entries in our OopMap when ZapDeadCompiledLocals is active. - if (ZapDeadCompiledLocals) { - set_xxx(reg, OopMapValue::dead_value, VMRegImpl::Bad()); - } +void OopMap::set_narrowoop(VMReg reg) { + set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad()); } @@ -194,10 +191,6 @@ } } -void OopMap::set_stack_obj(VMReg reg) { - set_xxx(reg, OopMapValue::stack_obj, VMRegImpl::Bad()); -} - // OopMapSet OopMapSet::OopMapSet() { @@ -308,7 +301,9 @@ } class DoNothingClosure: public OopClosure { -public: void do_oop(oop* p) {} + public: + void do_oop(oop* p) {} + void do_oop(narrowOop* p) {} }; static DoNothingClosure do_nothing; @@ -352,24 +347,22 @@ void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) { // add derived oops to a table - all_do(fr, reg_map, f, add_derived_oop, &do_nothing, &do_nothing); + all_do(fr, reg_map, f, add_derived_oop, &do_nothing); } void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map, OopClosure* oop_fn, void derived_oop_fn(oop*, oop*), - OopClosure* value_fn, OopClosure* dead_fn) { + OopClosure* value_fn) { CodeBlob* cb = fr->cb(); - { - assert(cb != NULL, "no codeblob"); - } + assert(cb != NULL, "no codeblob"); NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);) OopMapSet* maps = cb->oop_maps(); - OopMap* map = cb->oop_map_for_return_address(fr->pc()); - assert(map != NULL, " no ptr map found"); - + OopMap* map = cb->oop_map_for_return_address(fr->pc()); + assert(map != NULL, "no ptr map found"); + // handle derived pointers first (otherwise base pointer may be // changed before derived pointer offset has been collected) OopMapValue omv; @@ -396,8 +389,8 @@ } } - // We want dead, value and oop oop_types - int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::dead_value; + // We want coop, value and oop oop_types + int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::narrowoop_value; { for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) { omv = oms.current(); @@ -405,11 +398,14 @@ if ( loc != NULL ) { if ( omv.type() == OopMapValue::oop_value ) { #ifdef ASSERT - if (COMPILER2_PRESENT(!DoEscapeAnalysis &&) !Universe::heap()->is_in_or_null(*loc)) { + if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) || + !Universe::heap()->is_in_or_null(*loc)) { tty->print_cr("# Found non oop pointer. Dumping state at failure"); // try to dump out some helpful debugging information trace_codeblob_maps(fr, reg_map); omv.print(); + tty->print_cr("register r"); + omv.reg()->print(); tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc); // do the real assert. assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer"); @@ -418,23 +414,21 @@ oop_fn->do_oop(loc); } else if ( omv.type() == OopMapValue::value_value ) { value_fn->do_oop(loc); - } else if ( omv.type() == OopMapValue::dead_value ) { - dead_fn->do_oop(loc); + } else if ( omv.type() == OopMapValue::narrowoop_value ) { + narrowOop *nl = (narrowOop*)loc; +#ifndef VM_LITTLE_ENDIAN + if (!omv.reg()->is_stack()) { + // compressed oops in registers only take up 4 bytes of an + // 8 byte register but they are in the wrong part of the + // word so adjust loc to point at the right place. + nl = (narrowOop*)((address)nl + 4); + } +#endif + oop_fn->do_oop(nl); } } } } - -#ifdef COMPILER2 - if (DoEscapeAnalysis) { - for (OopMapStream oms(map, OopMapValue::stack_obj); !oms.is_done(); oms.next()) { - omv = oms.current(); - assert(omv.is_stack_loc(), "should refer to stack location"); - oop loc = (oop) fr->oopmapreg_to_location(omv.reg(),reg_map); - oop_fn->do_oop(&loc); - } - } -#endif // COMPILER2 } @@ -508,28 +502,30 @@ #endif // COMPILER2 } +#endif //PRODUCT + +// Printing code is present in product build for -XX:+PrintAssembly. -void print_register_type(OopMapValue::oop_types x, VMReg optional) { +static +void print_register_type(OopMapValue::oop_types x, VMReg optional, + outputStream* st) { switch( x ) { case OopMapValue::oop_value: - tty->print("Oop"); + st->print("Oop"); break; case OopMapValue::value_value: - tty->print("Value" ); + st->print("Value" ); break; - case OopMapValue::dead_value: - tty->print("Dead" ); + case OopMapValue::narrowoop_value: + tty->print("NarrowOop" ); break; case OopMapValue::callee_saved_value: - tty->print("Callers_" ); - optional->print(); + st->print("Callers_" ); + optional->print_on(st); break; case OopMapValue::derived_oop_value: - tty->print("Derived_oop_" ); - optional->print(); - break; - case OopMapValue::stack_obj: - tty->print("Stack"); + st->print("Derived_oop_" ); + optional->print_on(st); break; default: ShouldNotReachHere(); @@ -537,20 +533,22 @@ } -void OopMapValue::print() const { - reg()->print(); - tty->print("="); - print_register_type(type(),content_reg()); - tty->print(" "); +void OopMapValue::print_on(outputStream* st) const { + reg()->print_on(st); + st->print("="); + print_register_type(type(),content_reg(),st); + st->print(" "); } void OopMap::print_on(outputStream* st) const { OopMapValue omv; + st->print("OopMap{"); for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) { omv = oms.current(); omv.print_on(st); } + st->print("off=%d}", (int) offset()); } @@ -561,12 +559,12 @@ for( i = 0; i < len; i++) { OopMap* m = at(i); - st->print_cr("OopMap #%d offset:%p",i,m->offset()); + st->print_cr("#%d ",i); m->print_on(st); - st->print_cr("\n"); + st->cr(); } } -#endif // !PRODUCT + //------------------------------DerivedPointerTable--------------------------- --- old/hotspot/src/share/vm/compiler/oopMap.hpp 2009-08-01 04:10:22.980364270 +0100 +++ new/hotspot/src/share/vm/compiler/oopMap.hpp 2009-08-01 04:10:22.905765761 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oopMap.hpp 1.81 07/09/28 10:23:19 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,7 @@ public: // Constants - enum { type_bits = 6, + enum { type_bits = 5, register_bits = BitsPerShort - type_bits }; enum { type_shift = 0, @@ -64,10 +64,9 @@ unused_value =0, // powers of 2, for masking OopMapStream oop_value = 1, value_value = 2, - dead_value = 4, + narrowoop_value = 4, callee_saved_value = 8, - derived_oop_value= 16, - stack_obj = 32 }; + derived_oop_value= 16 }; // Constructors OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); } @@ -91,19 +90,17 @@ } // Querying - bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; } - bool is_value() { return mask_bits(value(), type_mask_in_place) == value_value; } - bool is_dead() { return mask_bits(value(), type_mask_in_place) == dead_value; } - bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; } - bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; } - bool is_stack_obj() { return mask_bits(value(), type_mask_in_place) == stack_obj; } - - void set_oop() { set_value((value() & register_mask_in_place) | oop_value); } - void set_value() { set_value((value() & register_mask_in_place) | value_value); } - void set_dead() { set_value((value() & register_mask_in_place) | dead_value); } - void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); } - void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); } - void set_stack_obj() { set_value((value() & register_mask_in_place) | stack_obj); } + bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; } + bool is_value() { return mask_bits(value(), type_mask_in_place) == value_value; } + bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; } + bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; } + bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; } + + void set_oop() { set_value((value() & register_mask_in_place) | oop_value); } + void set_value() { set_value((value() & register_mask_in_place) | value_value); } + void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); } + void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); } + void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); } VMReg reg() const { return VMRegImpl::as_VMReg(mask_bits(value(), register_mask_in_place) >> register_shift); } oop_types type() const { return (oop_types)mask_bits(value(), type_mask_in_place); } @@ -132,7 +129,8 @@ return reg()->reg2stack(); } - void print( ) const PRODUCT_RETURN; + void print_on(outputStream* st) const; + void print() const { print_on(tty); } }; @@ -178,10 +176,10 @@ // slots to hold 4-byte values like ints and floats in the LP64 build. void set_oop ( VMReg local); void set_value( VMReg local); + void set_narrowoop(VMReg local); void set_dead ( VMReg local); void set_callee_saved( VMReg local, VMReg caller_machine_register ); void set_derived_oop ( VMReg local, VMReg derived_from_local_register ); - void set_stack_obj( VMReg local); void set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional); int heap_size() const; @@ -195,7 +193,7 @@ } // Printing - void print_on(outputStream* st) const PRODUCT_RETURN; + void print_on(outputStream* st) const; void print() const { print_on(tty); } }; @@ -246,11 +244,11 @@ // Iterates through frame for a compiled method for dead ones and values, too static void all_do(const frame* fr, const RegisterMap* reg_map, OopClosure* oop_fn, - void derived_oop_fn(oop* base, oop* derived), - OopClosure* value_fn, OopClosure* dead_fn); + void derived_oop_fn(oop* base, oop* derived), + OopClosure* value_fn); // Printing - void print_on(outputStream* st) const PRODUCT_RETURN; + void print_on(outputStream* st) const; void print() const { print_on(tty); } }; --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp 2009-08-01 04:10:23.945521370 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp 2009-08-01 04:10:23.852916615 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)binaryTreeDictionary.cpp 1.37 07/05/05 17:05:43 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,8 +74,15 @@ TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) { TreeChunk* tc = (TreeChunk*) addr; assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk"); - assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL, - "Space should be clear"); + // The space in the heap will have been mangled initially but + // is not remangled when a free chunk is returned to the free list + // (since it is used to maintain the chunk on the free list). + assert((ZapUnusedHeapArea && + SpaceMangler::is_mangled((HeapWord*) tc->size_addr()) && + SpaceMangler::is_mangled((HeapWord*) tc->prev_addr()) && + SpaceMangler::is_mangled((HeapWord*) tc->next_addr())) || + (tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL), + "Space should be clear or mangled"); tc->setSize(size); tc->linkPrev(NULL); tc->linkNext(NULL); @@ -1074,85 +1081,56 @@ // for each list in the tree. Also print some summary // information. class printTreeCensusClosure : public AscendTreeCensusClosure { + int _print_line; size_t _totalFree; - AllocationStats _totals; - size_t _count; - + FreeList _total; + public: printTreeCensusClosure() { + _print_line = 0; _totalFree = 0; - _count = 0; - _totals.initialize(); } - AllocationStats* totals() { return &_totals; } - size_t count() { return _count; } - void increment_count_by(size_t v) { _count += v; } + FreeList* total() { return &_total; } size_t totalFree() { return _totalFree; } - void increment_totalFree_by(size_t v) { _totalFree += v; } void do_list(FreeList* fl) { - bool nl = false; // "maybe this is not needed" isNearLargestChunk(fl->head()); - - gclog_or_tty->print("%c %4d\t\t" "%7d\t" "%7d\t" - "%7d\t" "%7d\t" "%7d\t" "%7d\t" - "%7d\t" "%7d\t" "%7d\t" - "%7d\t" "\n", - " n"[nl], fl->size(), fl->bfrSurp(), fl->surplus(), - fl->desired(), fl->prevSweep(), fl->beforeSweep(), fl->count(), - fl->coalBirths(), fl->coalDeaths(), fl->splitBirths(), - fl->splitDeaths()); - - increment_totalFree_by(fl->count() * fl->size()); - increment_count_by(fl->count()); - totals()->set_bfrSurp(totals()->bfrSurp() + fl->bfrSurp()); - totals()->set_surplus(totals()->splitDeaths() + fl->surplus()); - totals()->set_prevSweep(totals()->prevSweep() + fl->prevSweep()); - totals()->set_beforeSweep(totals()->beforeSweep() + fl->beforeSweep()); - totals()->set_coalBirths(totals()->coalBirths() + fl->coalBirths()); - totals()->set_coalDeaths(totals()->coalDeaths() + fl->coalDeaths()); - totals()->set_splitBirths(totals()->splitBirths() + fl->splitBirths()); - totals()->set_splitDeaths(totals()->splitDeaths() + fl->splitDeaths()); + if (++_print_line >= 40) { + FreeList::print_labels_on(gclog_or_tty, "size"); + _print_line = 0; + } + fl->print_on(gclog_or_tty); + _totalFree += fl->count() * fl->size() ; + total()->set_count( total()->count() + fl->count() ); + total()->set_bfrSurp( total()->bfrSurp() + fl->bfrSurp() ); + total()->set_surplus( total()->splitDeaths() + fl->surplus() ); + total()->set_desired( total()->desired() + fl->desired() ); + total()->set_prevSweep( total()->prevSweep() + fl->prevSweep() ); + total()->set_beforeSweep(total()->beforeSweep() + fl->beforeSweep()); + total()->set_coalBirths( total()->coalBirths() + fl->coalBirths() ); + total()->set_coalDeaths( total()->coalDeaths() + fl->coalDeaths() ); + total()->set_splitBirths(total()->splitBirths() + fl->splitBirths()); + total()->set_splitDeaths(total()->splitDeaths() + fl->splitDeaths()); } }; void BinaryTreeDictionary::printDictCensus(void) const { gclog_or_tty->print("\nBinaryTree\n"); - gclog_or_tty->print( - "%4s\t\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" - "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "\n", - "size", "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep", - "count", "cBirths", "cDeaths", "sBirths", "sDeaths"); - + FreeList::print_labels_on(gclog_or_tty, "size"); printTreeCensusClosure ptc; ptc.do_tree(root()); + FreeList* total = ptc.total(); + FreeList::print_labels_on(gclog_or_tty, " "); + total->print_on(gclog_or_tty, "TOTAL\t"); gclog_or_tty->print( - "\t\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" - "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" "\n", - "bfrsurp", "surplus", "prvSwep", "bfrSwep", - "count", "cBirths", "cDeaths", "sBirths", "sDeaths"); - gclog_or_tty->print( - "%s\t\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" - "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n", - "totl", - ptc.totals()->bfrSurp(), - ptc.totals()->surplus(), - ptc.totals()->prevSweep(), - ptc.totals()->beforeSweep(), - ptc.count(), - ptc.totals()->coalBirths(), - ptc.totals()->coalDeaths(), - ptc.totals()->splitBirths(), - ptc.totals()->splitDeaths()); - gclog_or_tty->print("totalFree(words): %7d growth: %8.5f deficit: %8.5f\n", + "totalFree(words): " SIZE_FORMAT_W(16) + " growth: %8.5f deficit: %8.5f\n", ptc.totalFree(), - (double)(ptc.totals()->splitBirths()+ptc.totals()->coalBirths() - -ptc.totals()->splitDeaths()-ptc.totals()->coalDeaths()) - /(ptc.totals()->prevSweep() != 0 ? - (double)ptc.totals()->prevSweep() : 1.0), - (double)(ptc.totals()->desired() - ptc.count()) - /(ptc.totals()->desired() != 0 ? - (double)ptc.totals()->desired() : 1.0)); + (double)(total->splitBirths() + total->coalBirths() + - total->splitDeaths() - total->coalDeaths()) + /(total->prevSweep() != 0 ? (double)total->prevSweep() : 1.0), + (double)(total->desired() - total->count()) + /(total->desired() != 0 ? (double)total->desired() : 1.0)); } // Verify the following tree invariants: --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp 2009-08-01 04:10:24.856342060 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp 2009-08-01 04:10:24.787215687 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)binaryTreeDictionary.hpp 1.27 08/04/09 19:19:11 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp 2009-08-01 04:10:25.690852777 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp 2009-08-01 04:10:25.608696756 +0100 @@ -32,22 +32,34 @@ class CMSBitMap; class CMSMarkStack; class CMSCollector; -template class GenericTaskQueue; -typedef GenericTaskQueue OopTaskQueue; -template class GenericTaskQueueSet; -typedef GenericTaskQueueSet OopTaskQueueSet; class MarkFromRootsClosure; class Par_MarkFromRootsClosure; +// Decode the oop and call do_oop on it. +#define DO_OOP_WORK_DEFN \ + void do_oop(oop obj); \ + template inline void do_oop_work(T* p) { \ + T heap_oop = oopDesc::load_heap_oop(p); \ + if (!oopDesc::is_null(heap_oop)) { \ + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \ + do_oop(obj); \ + } \ + } + class MarkRefsIntoClosure: public OopsInGenClosure { - const MemRegion _span; - CMSBitMap* _bitMap; - const bool _should_do_nmethods; + private: + const MemRegion _span; + CMSBitMap* _bitMap; + const bool _should_do_nmethods; + protected: + DO_OOP_WORK_DEFN public: MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods); - void do_oop(oop* p); - void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } bool do_header() { return true; } virtual const bool do_nmethods() const { return _should_do_nmethods; @@ -60,15 +72,20 @@ // A variant of the above used in certain kinds of CMS // marking verification. class MarkRefsIntoVerifyClosure: public OopsInGenClosure { - const MemRegion _span; - CMSBitMap* _verification_bm; - CMSBitMap* _cms_bm; - const bool _should_do_nmethods; + private: + const MemRegion _span; + CMSBitMap* _verification_bm; + CMSBitMap* _cms_bm; + const bool _should_do_nmethods; + protected: + DO_OOP_WORK_DEFN public: MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm, bool should_do_nmethods); - void do_oop(oop* p); - void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } bool do_header() { return true; } virtual const bool do_nmethods() const { return _should_do_nmethods; @@ -78,37 +95,40 @@ } }; - // The non-parallel version (the parallel version appears further below). class PushAndMarkClosure: public OopClosure { - CMSCollector* _collector; - MemRegion _span; - CMSBitMap* _bit_map; - CMSBitMap* _mod_union_table; - CMSMarkStack* _mark_stack; - CMSMarkStack* _revisit_stack; - bool _concurrent_precleaning; - bool const _should_remember_klasses; + private: + CMSCollector* _collector; + MemRegion _span; + CMSBitMap* _bit_map; + CMSBitMap* _mod_union_table; + CMSMarkStack* _mark_stack; + CMSMarkStack* _revisit_stack; + bool _concurrent_precleaning; + bool const _should_remember_klasses; + protected: + DO_OOP_WORK_DEFN public: PushAndMarkClosure(CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map, CMSBitMap* mod_union_table, - CMSMarkStack* mark_stack, - CMSMarkStack* revisit_stack, - bool concurrent_precleaning); - - void do_oop(oop* p); - void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop(p); } + CMSMarkStack* mark_stack, + CMSMarkStack* revisit_stack, + bool concurrent_precleaning); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } bool do_header() { return true; } Prefetch::style prefetch_style() { return Prefetch::do_read; } - const bool should_remember_klasses() const { + virtual const bool should_remember_klasses() const { return _should_remember_klasses; } - void remember_klass(Klass* k); + virtual void remember_klass(Klass* k); }; // In the parallel case, the revisit stack, the bit map and the @@ -118,12 +138,15 @@ // used in the non-parallel case above is here replaced with // an OopTaskQueue structure to allow efficient work stealing. class Par_PushAndMarkClosure: public OopClosure { - CMSCollector* _collector; - MemRegion _span; - CMSBitMap* _bit_map; - OopTaskQueue* _work_queue; - CMSMarkStack* _revisit_stack; - bool const _should_remember_klasses; + private: + CMSCollector* _collector; + MemRegion _span; + CMSBitMap* _bit_map; + OopTaskQueue* _work_queue; + CMSMarkStack* _revisit_stack; + bool const _should_remember_klasses; + protected: + DO_OOP_WORK_DEFN public: Par_PushAndMarkClosure(CMSCollector* collector, MemRegion span, @@ -131,43 +154,48 @@ CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack); - - void do_oop(oop* p); - void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } bool do_header() { return true; } Prefetch::style prefetch_style() { return Prefetch::do_read; } - const bool should_remember_klasses() const { + virtual const bool should_remember_klasses() const { return _should_remember_klasses; } - void remember_klass(Klass* k); + virtual void remember_klass(Klass* k); }; - // The non-parallel version (the parallel version appears further below). class MarkRefsIntoAndScanClosure: public OopsInGenClosure { - MemRegion _span; - CMSBitMap* _bit_map; - CMSMarkStack* _mark_stack; - PushAndMarkClosure _pushAndMarkClosure; - CMSCollector* _collector; - bool _yield; + private: + MemRegion _span; + CMSBitMap* _bit_map; + CMSMarkStack* _mark_stack; + PushAndMarkClosure _pushAndMarkClosure; + CMSCollector* _collector; + Mutex* _freelistLock; + bool _yield; // Whether closure is being used for concurrent precleaning - bool _concurrent_precleaning; - Mutex* _freelistLock; + bool _concurrent_precleaning; + protected: + DO_OOP_WORK_DEFN public: MarkRefsIntoAndScanClosure(MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map, CMSBitMap* mod_union_table, - CMSMarkStack* mark_stack, - CMSMarkStack* revisit_stack, + CMSMarkStack* mark_stack, + CMSMarkStack* revisit_stack, CMSCollector* collector, bool should_yield, bool concurrent_precleaning); - void do_oop(oop* p); - void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } bool do_header() { return true; } virtual const bool do_nmethods() const { return true; } Prefetch::style prefetch_style() { @@ -188,11 +216,14 @@ // sycnhronized. An OopTaskQueue structure, supporting efficient // workstealing, replaces a CMSMarkStack for storing grey objects. class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure { - MemRegion _span; - CMSBitMap* _bit_map; - OopTaskQueue* _work_queue; - const uint _low_water_mark; - Par_PushAndMarkClosure _par_pushAndMarkClosure; + private: + MemRegion _span; + CMSBitMap* _bit_map; + OopTaskQueue* _work_queue; + const uint _low_water_mark; + Par_PushAndMarkClosure _par_pushAndMarkClosure; + protected: + DO_OOP_WORK_DEFN public: Par_MarkRefsIntoAndScanClosure(CMSCollector* collector, MemRegion span, @@ -200,8 +231,10 @@ CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack); - void do_oop(oop* p); - void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } bool do_header() { return true; } virtual const bool do_nmethods() const { return true; } Prefetch::style prefetch_style() { @@ -214,28 +247,34 @@ // following the first checkpoint. Its use is buried in // the closure MarkFromRootsClosure. class PushOrMarkClosure: public OopClosure { - CMSCollector* _collector; - MemRegion _span; - CMSBitMap* _bitMap; - CMSMarkStack* _markStack; - CMSMarkStack* _revisitStack; - HeapWord* const _finger; - MarkFromRootsClosure* const _parent; - bool const _should_remember_klasses; + private: + CMSCollector* _collector; + MemRegion _span; + CMSBitMap* _bitMap; + CMSMarkStack* _markStack; + CMSMarkStack* _revisitStack; + HeapWord* const _finger; + MarkFromRootsClosure* const + _parent; + bool const _should_remember_klasses; + protected: + DO_OOP_WORK_DEFN public: PushOrMarkClosure(CMSCollector* cms_collector, MemRegion span, CMSBitMap* bitMap, - CMSMarkStack* markStack, - CMSMarkStack* revisitStack, - HeapWord* finger, + CMSMarkStack* markStack, + CMSMarkStack* revisitStack, + HeapWord* finger, MarkFromRootsClosure* parent); - void do_oop(oop* p); - void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop(p); } - const bool should_remember_klasses() const { + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } + virtual const bool should_remember_klasses() const { return _should_remember_klasses; } - void remember_klass(Klass* k); + virtual void remember_klass(Klass* k); // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: @@ -247,6 +286,7 @@ // following the first checkpoint. Its use is buried in // the closure Par_MarkFromRootsClosure. class Par_PushOrMarkClosure: public OopClosure { + private: CMSCollector* _collector; MemRegion _whole_span; MemRegion _span; // local chunk @@ -256,24 +296,29 @@ CMSMarkStack* _revisit_stack; HeapWord* const _finger; HeapWord** const _global_finger_addr; - Par_MarkFromRootsClosure* const _parent; - bool const _should_remember_klasses; + Par_MarkFromRootsClosure* const + _parent; + bool const _should_remember_klasses; + protected: + DO_OOP_WORK_DEFN public: Par_PushOrMarkClosure(CMSCollector* cms_collector, - MemRegion span, - CMSBitMap* bit_map, - OopTaskQueue* work_queue, - CMSMarkStack* mark_stack, - CMSMarkStack* revisit_stack, - HeapWord* finger, - HeapWord** global_finger_addr, - Par_MarkFromRootsClosure* parent); - void do_oop(oop* p); - void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop(p); } - const bool should_remember_klasses() const { + MemRegion span, + CMSBitMap* bit_map, + OopTaskQueue* work_queue, + CMSMarkStack* mark_stack, + CMSMarkStack* revisit_stack, + HeapWord* finger, + HeapWord** global_finger_addr, + Par_MarkFromRootsClosure* parent); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } + virtual const bool should_remember_klasses() const { return _should_remember_klasses; } - void remember_klass(Klass* k); + virtual void remember_klass(Klass* k); // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); private: @@ -283,31 +328,44 @@ // For objects in CMS generation, this closure marks // given objects (transitively) as being reachable/live. // This is currently used during the (weak) reference object -// processing phase of the CMS final checkpoint step. +// processing phase of the CMS final checkpoint step, as +// well as during the concurrent precleaning of the discovered +// reference lists. class CMSKeepAliveClosure: public OopClosure { + private: CMSCollector* _collector; const MemRegion _span; CMSMarkStack* _mark_stack; CMSBitMap* _bit_map; + bool _concurrent_precleaning; + protected: + DO_OOP_WORK_DEFN public: CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, - CMSBitMap* bit_map, CMSMarkStack* mark_stack): + CMSBitMap* bit_map, CMSMarkStack* mark_stack, + bool cpc): _collector(collector), _span(span), _bit_map(bit_map), - _mark_stack(mark_stack) { - assert(!_span.is_empty(), "Empty span could spell trouble"); - } - - void do_oop(oop* p); - void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); } + _mark_stack(mark_stack), + _concurrent_precleaning(cpc) { + assert(!_span.is_empty(), "Empty span could spell trouble"); + } + bool concurrent_precleaning() const { return _concurrent_precleaning; } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } }; class CMSInnerParMarkAndPushClosure: public OopClosure { + private: CMSCollector* _collector; MemRegion _span; OopTaskQueue* _work_queue; CMSBitMap* _bit_map; + protected: + DO_OOP_WORK_DEFN public: CMSInnerParMarkAndPushClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, @@ -316,24 +374,32 @@ _span(span), _bit_map(bit_map), _work_queue(work_queue) { } - void do_oop(oop* p); - void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } }; // A parallel (MT) version of the above, used when // reference processing is parallel; the only difference // is in the do_oop method. class CMSParKeepAliveClosure: public OopClosure { + private: CMSCollector* _collector; MemRegion _span; OopTaskQueue* _work_queue; CMSBitMap* _bit_map; - CMSInnerParMarkAndPushClosure _mark_and_push; + CMSInnerParMarkAndPushClosure + _mark_and_push; const uint _low_water_mark; void trim_queue(uint max); + protected: + DO_OOP_WORK_DEFN public: CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue); - void do_oop(oop* p); - void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } }; --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp 2009-08-01 04:10:26.625334989 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp 2009-08-01 04:10:26.551310450 +0100 @@ -47,51 +47,11 @@ bool lock_owned = lock->owned_by_self(); if (lock_owned) { MutexUnlocker mul(lock); - return mem_allocate_work(size); + return mem_allocate_in_gen(size, _gen); } else { - return mem_allocate_work(size); + return mem_allocate_in_gen(size, _gen); } } - -HeapWord* CMSPermGen::mem_allocate_work(size_t size) { - assert(!_gen->freelistLock()->owned_by_self(), "Potetntial deadlock"); - - MutexLocker ml(Heap_lock); - HeapWord* obj = NULL; - - obj = _gen->allocate(size, false); - // Since we want to minimize pause times, we will prefer - // expanding the perm gen rather than doing a stop-world - // collection to satisfy the allocation request. - if (obj == NULL) { - // Try to expand the perm gen and allocate space. - obj = _gen->expand_and_allocate(size, false, false); - if (obj == NULL) { - // Let's see if a normal stop-world full collection will - // free up enough space. - SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full); - obj = _gen->allocate(size, false); - if (obj == NULL) { - // The collection above may have shrunk the space, so try - // to expand again and allocate space. - obj = _gen->expand_and_allocate(size, false, false); - } - if (obj == NULL) { - // We have not been able to allocate space despite a - // full stop-world collection. We now make a last-ditch collection - // attempt (in which soft refs are all aggressively freed) - // that will try to reclaim as much space as possible. - SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection); - obj = _gen->allocate(size, false); - if (obj == NULL) { - // Expand generation in case it was shrunk following the collection. - obj = _gen->expand_and_allocate(size, false, false); - } - } - } - } - return obj; -} void CMSPermGen::compute_new_size() { _gen->compute_new_size(); --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp 2009-08-01 04:10:27.452299187 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp 2009-08-01 04:10:27.383665684 +0100 @@ -32,7 +32,6 @@ class CMSPermGen: public PermGen { friend class VMStructs; - HeapWord* mem_allocate_work(size_t size); protected: // The "generation" view. ConcurrentMarkSweepGeneration* _gen; --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2009-08-01 04:10:28.324217575 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2009-08-01 04:10:28.229759048 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)compactibleFreeListSpace.cpp 1.144 08/09/06 09:20:55 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ _collector(NULL) { _bt.set_space(this); - initialize(mr, true); + initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); // We have all of "mr", all of which we place in the dictionary // as one big chunk. We'll need to decide here which of several // possible alternative dictionary implementations to use. For @@ -180,7 +180,7 @@ assert(q->forwardee() == NULL, "should be forwarded to NULL"); } - debug_only(MarkSweep::register_live_oop(q, adjusted_size)); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); compact_top += adjusted_size; // we need to update the offset table so that the beginnings of objects can be @@ -793,7 +793,7 @@ } -HeapWord* CompactibleFreeListSpace::block_start(const void* p) const { +HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const { NOT_PRODUCT(verify_objects_initialized()); return _bt.block_start(p); } @@ -808,28 +808,30 @@ // This must be volatile, or else there is a danger that the compiler // will compile the code below into a sometimes-infinite loop, by keeping // the value read the first time in a register. - oop o = (oop)p; - volatile oop* second_word_addr = o->klass_addr(); while (true) { - klassOop k = (klassOop)(*second_word_addr); // We must do this until we get a consistent view of the object. - if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { - FreeChunk* fc = (FreeChunk*)p; - volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); - size_t res = (*sz_addr); - klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm. - if (k == k2) { + if (FreeChunk::indicatesFreeChunk(p)) { + volatile FreeChunk* fc = (volatile FreeChunk*)p; + size_t res = fc->size(); + // If the object is still a free chunk, return the size, else it + // has been allocated so try again. + if (FreeChunk::indicatesFreeChunk(p)) { + assert(res != 0, "Block size should not be 0"); + return res; + } + } else { + // must read from what 'p' points to in each loop. + klassOop k = ((volatile oopDesc*)p)->klass_or_null(); + if (k != NULL) { + assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); + oop o = (oop)p; + assert(o->is_parsable(), "Should be parsable"); + assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); + size_t res = o->size_given_klass(k->klass_part()); + res = adjustObjectSize(res); assert(res != 0, "Block size should not be 0"); return res; } - } else if (k != NULL) { - assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); - assert(o->is_parsable(), "Should be parsable"); - assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); - size_t res = o->size_given_klass(k->klass_part()); - res = adjustObjectSize(res); - assert(res != 0, "Block size should not be 0"); - return res; } } } @@ -848,31 +850,31 @@ // This must be volatile, or else there is a danger that the compiler // will compile the code below into a sometimes-infinite loop, by keeping // the value read the first time in a register. - oop o = (oop)p; - volatile oop* second_word_addr = o->klass_addr(); DEBUG_ONLY(uint loops = 0;) while (true) { - klassOop k = (klassOop)(*second_word_addr); // We must do this until we get a consistent view of the object. - if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { - FreeChunk* fc = (FreeChunk*)p; - volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); - size_t res = (*sz_addr); - klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm. - if (k == k2) { + if (FreeChunk::indicatesFreeChunk(p)) { + volatile FreeChunk* fc = (volatile FreeChunk*)p; + size_t res = fc->size(); + if (FreeChunk::indicatesFreeChunk(p)) { assert(res != 0, "Block size should not be 0"); assert(loops == 0, "Should be 0"); return res; } - } else if (k != NULL && o->is_parsable()) { - assert(k->is_oop(), "Should really be klass oop."); - assert(o->is_oop(), "Should be an oop"); - size_t res = o->size_given_klass(k->klass_part()); - res = adjustObjectSize(res); - assert(res != 0, "Block size should not be 0"); - return res; } else { - return c->block_size_if_printezis_bits(p); + // must read from what 'p' points to in each loop. + klassOop k = ((volatile oopDesc*)p)->klass_or_null(); + if (k != NULL && ((oopDesc*)p)->is_parsable()) { + assert(k->is_oop(), "Should really be klass oop."); + oop o = (oop)p; + assert(o->is_oop(), "Should be an oop"); + size_t res = o->size_given_klass(k->klass_part()); + res = adjustObjectSize(res); + assert(res != 0, "Block size should not be 0"); + return res; + } else { + return c->block_size_if_printezis_bits(p); + } } assert(loops == 0, "Can loop at most once"); DEBUG_ONLY(loops++;) @@ -910,9 +912,8 @@ // and those objects (if garbage) may have been modified to hold // live range information. // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary"); - klassOop k = oop(p)->klass(); - intptr_t ki = (intptr_t)k; - if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false; + if (FreeChunk::indicatesFreeChunk(p)) return false; + klassOop k = oop(p)->klass_or_null(); if (k != NULL) { // Ignore mark word because it may have been used to // chain together promoted objects (the last one @@ -1030,7 +1031,7 @@ FreeChunk* fc = (FreeChunk*)res; fc->markNotFree(); assert(!fc->isFree(), "shouldn't be marked free"); - assert(oop(fc)->klass() == NULL, "should look uninitialized"); + assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized"); // Verify that the block offset table shows this to // be a single block, but not one which is unallocated. _bt.verify_single_block(res, size); @@ -1214,7 +1215,7 @@ return fc; } -oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) { +oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); assert_locked(); @@ -1838,7 +1839,7 @@ guarantee(false, "NYI"); } -bool CompactibleFreeListSpace::linearAllocationWouldFail() { +bool CompactibleFreeListSpace::linearAllocationWouldFail() const { return _smallLinearAllocBlock._word_size == 0; } @@ -1909,6 +1910,13 @@ } } +// Support for concurrent collection policy decisions. +bool CompactibleFreeListSpace::should_concurrent_collect() const { + // In the future we might want to add in frgamentation stats -- + // including erosion of the "mountain" into this decision as well. + return !adaptive_freelists() && linearAllocationWouldFail(); +} + // Support for compaction void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { @@ -2016,11 +2024,11 @@ } } -void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) { +void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { setFLSurplus(); setFLHints(); if (PrintGC && PrintFLSCensus > 0) { - printFLCensus(sweepCt); + printFLCensus(sweep_count); } clearFLCensus(); assert_locked(); @@ -2112,7 +2120,6 @@ splitBirth(to2); } - void CompactibleFreeListSpace::print() const { tty->print(" CompactibleFreeListSpace"); Space::print(); @@ -2126,6 +2133,7 @@ } class VerifyAllBlksClosure: public BlkClosure { + private: const CompactibleFreeListSpace* _sp; const MemRegion _span; @@ -2133,7 +2141,7 @@ VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, MemRegion span) : _sp(sp), _span(span) { } - size_t do_blk(HeapWord* addr) { + virtual size_t do_blk(HeapWord* addr) { size_t res; if (_sp->block_is_obj(addr)) { oop p = oop(addr); @@ -2156,12 +2164,54 @@ }; class VerifyAllOopsClosure: public OopClosure { + private: const CMSCollector* _collector; const CompactibleFreeListSpace* _sp; const MemRegion _span; const bool _past_remark; const CMSBitMap* _bit_map; + protected: + void do_oop(void* p, oop obj) { + if (_span.contains(obj)) { // the interior oop points into CMS heap + if (!_span.contains(p)) { // reference from outside CMS heap + // Should be a valid object; the first disjunct below allows + // us to sidestep an assertion in block_is_obj() that insists + // that p be in _sp. Note that several generations (and spaces) + // are spanned by _span (CMS heap) above. + guarantee(!_sp->is_in_reserved(obj) || + _sp->block_is_obj((HeapWord*)obj), + "Should be an object"); + guarantee(obj->is_oop(), "Should be an oop"); + obj->verify(); + if (_past_remark) { + // Remark has been completed, the object should be marked + _bit_map->isMarked((HeapWord*)obj); + } + } else { // reference within CMS heap + if (_past_remark) { + // Remark has been completed -- so the referent should have + // been marked, if referring object is. + if (_bit_map->isMarked(_collector->block_start(p))) { + guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); + } + } + } + } else if (_sp->is_in_reserved(p)) { + // the reference is from FLS, and points out of FLS + guarantee(obj->is_oop(), "Should be an oop"); + obj->verify(); + } + } + + template void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + do_oop(p, obj); + } + } + public: VerifyAllOopsClosure(const CMSCollector* collector, const CompactibleFreeListSpace* sp, MemRegion span, @@ -2169,40 +2219,8 @@ OopClosure(), _collector(collector), _sp(sp), _span(span), _past_remark(past_remark), _bit_map(bit_map) { } - void do_oop(oop* ptr) { - oop p = *ptr; - if (p != NULL) { - if (_span.contains(p)) { // the interior oop points into CMS heap - if (!_span.contains(ptr)) { // reference from outside CMS heap - // Should be a valid object; the first disjunct below allows - // us to sidestep an assertion in block_is_obj() that insists - // that p be in _sp. Note that several generations (and spaces) - // are spanned by _span (CMS heap) above. - guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p), - "Should be an object"); - guarantee(p->is_oop(), "Should be an oop"); - p->verify(); - if (_past_remark) { - // Remark has been completed, the object should be marked - _bit_map->isMarked((HeapWord*)p); - } - } - else { // reference within CMS heap - if (_past_remark) { - // Remark has been completed -- so the referent should have - // been marked, if referring object is. - if (_bit_map->isMarked(_collector->block_start(ptr))) { - guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?"); - } - } - } - } else if (_sp->is_in_reserved(ptr)) { - // the reference is from FLS, and points out of FLS - guarantee(p->is_oop(), "Should be an oop"); - p->verify(); - } - } - } + virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } }; void CompactibleFreeListSpace::verify(bool ignored) const { @@ -2271,9 +2289,9 @@ } void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { - guarantee(size % 2 == 0, "Odd slots should be empty"); - for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL; - fc = fc->next()) { + FreeChunk* fc = _indexedFreeList[size].head(); + guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); + for (; fc != NULL; fc = fc->next()) { guarantee(fc->size() == size, "Size inconsistency"); guarantee(fc->isFree(), "!free?"); guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); @@ -2296,59 +2314,37 @@ } #endif -void CompactibleFreeListSpace::printFLCensus(int sweepCt) const { +void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { assert_lock_strong(&_freelistLock); - ssize_t bfrSurp = 0; - ssize_t surplus = 0; - ssize_t desired = 0; - ssize_t prevSweep = 0; - ssize_t beforeSweep = 0; - ssize_t count = 0; - ssize_t coalBirths = 0; - ssize_t coalDeaths = 0; - ssize_t splitBirths = 0; - ssize_t splitDeaths = 0; - gclog_or_tty->print("end sweep# %d\n", sweepCt); - gclog_or_tty->print("%4s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" - "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t" - "%7s\t" "\n", - "size", "bfrsurp", "surplus", "desired", "prvSwep", - "bfrSwep", "count", "cBirths", "cDeaths", "sBirths", - "sDeaths"); - + FreeList total; + gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); + FreeList::print_labels_on(gclog_or_tty, "size"); size_t totalFree = 0; for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { - const FreeList *fl = &_indexedFreeList[i]; - totalFree += fl->count() * fl->size(); - - gclog_or_tty->print("%4d\t" "%7d\t" "%7d\t" "%7d\t" - "%7d\t" "%7d\t" "%7d\t" "%7d\t" - "%7d\t" "%7d\t" "%7d\t" "\n", - fl->size(), fl->bfrSurp(), fl->surplus(), fl->desired(), - fl->prevSweep(), fl->beforeSweep(), fl->count(), fl->coalBirths(), - fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths()); - bfrSurp += fl->bfrSurp(); - surplus += fl->surplus(); - desired += fl->desired(); - prevSweep += fl->prevSweep(); - beforeSweep += fl->beforeSweep(); - count += fl->count(); - coalBirths += fl->coalBirths(); - coalDeaths += fl->coalDeaths(); - splitBirths += fl->splitBirths(); - splitDeaths += fl->splitDeaths(); - } - gclog_or_tty->print("%4s\t" - "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" - "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n", - "totl", - bfrSurp, surplus, desired, prevSweep, beforeSweep, - count, coalBirths, coalDeaths, splitBirths, splitDeaths); - gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree); + const FreeList *fl = &_indexedFreeList[i]; + totalFree += fl->count() * fl->size(); + if (i % (40*IndexSetStride) == 0) { + FreeList::print_labels_on(gclog_or_tty, "size"); + } + fl->print_on(gclog_or_tty); + total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); + total.set_surplus( total.surplus() + fl->surplus() ); + total.set_desired( total.desired() + fl->desired() ); + total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); + total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); + total.set_count( total.count() + fl->count() ); + total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); + total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); + total.set_splitBirths(total.splitBirths() + fl->splitBirths()); + total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); + } + total.print_on(gclog_or_tty, "TOTAL"); + gclog_or_tty->print_cr("Total free in indexed lists " + SIZE_FORMAT " words", totalFree); gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", - (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/ - (prevSweep != 0 ? (double)prevSweep : 1.0), - (double)(desired - count)/(desired != 0 ? (double)desired : 1.0)); + (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ + (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), + (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); _dictionary->printDictCensus(); } @@ -2601,7 +2597,7 @@ } res->markNotFree(); assert(!res->isFree(), "shouldn't be marked free"); - assert(oop(res)->klass() == NULL, "should look uninitialized"); + assert(oop(res)->klass_or_null() == NULL, "should look uninitialized"); // mangle a just allocated object with a distinct pattern. debug_only(res->mangleAllocated(word_sz)); return (HeapWord*)res; @@ -2799,10 +2795,9 @@ size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect"); assert(n_tasks == 0 || - ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && - (used_region().start() + n_tasks*task_size >= used_region().end())), - "n_tasks calculation incorrect"); - + ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && + (used_region().start() + n_tasks*task_size >= used_region().end())), + "n_tasks calculation incorrect"); SequentialSubTasksDone* pst = conc_par_seq_tasks(); assert(!pst->valid(), "Clobbering existing data?"); pst->set_par_threads(n_threads); --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp 2009-08-01 04:10:29.409738714 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp 2009-08-01 04:10:29.333041647 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)compactibleFreeListSpace.hpp 1.91 07/05/05 17:05:45 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -421,7 +421,7 @@ // chunk exists, return NULL. FreeChunk* find_chunk_at_end(); - bool adaptive_freelists() { return _adaptive_freelists; } + bool adaptive_freelists() const { return _adaptive_freelists; } void set_collector(CMSCollector* collector) { _collector = collector; } @@ -505,7 +505,7 @@ void blk_iterate(BlkClosure* cl); void blk_iterate_careful(BlkClosureCareful* cl); - HeapWord* block_start(const void* p) const; + HeapWord* block_start_const(const void* p) const; HeapWord* block_start_careful(const void* p) const; size_t block_size(const HeapWord* p) const; size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; @@ -543,7 +543,7 @@ HeapWord* allocate(size_t size); HeapWord* par_allocate(size_t size); - oop promote(oop obj, size_t obj_size, oop* ref); + oop promote(oop obj, size_t obj_size); void gc_prologue(); void gc_epilogue(); @@ -569,7 +569,7 @@ FreeChunk* allocateScratch(size_t size); // returns true if either the small or large linear allocation buffer is empty. - bool linearAllocationWouldFail(); + bool linearAllocationWouldFail() const; // Adjust the chunk for the minimum size. This version is called in // most cases in CompactibleFreeListSpace methods. @@ -588,6 +588,9 @@ void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, bool coalesced); + // Support for decisions regarding concurrent collection policy + bool should_concurrent_collect() const; + // Support for compaction void prepare_for_compaction(CompactPoint* cp); void adjust_pointers(); @@ -625,7 +628,7 @@ // coalescing of chunks during the sweep of garbage. // Print the statistics for the free lists. - void printFLCensus(int sweepCt) const; + void printFLCensus(size_t sweep_count) const; // Statistics functions // Initialize census for lists before the sweep. @@ -638,12 +641,11 @@ // Clear the census for each of the free lists. void clearFLCensus(); // Perform functions for the census after the end of the sweep. - void endSweepFLCensus(int sweepCt); + void endSweepFLCensus(size_t sweep_count); // Return true if the count of free chunks is greater // than the desired number of free chunks. bool coalOverPopulated(size_t size); - // Record (for each size): // // split-births = #chunks added due to splits in (prev-sweep-end, --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2009-08-01 04:10:30.991927267 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2009-08-01 04:10:30.860699040 +0100 @@ -193,7 +193,8 @@ // depends on this property. debug_only( FreeChunk* junk = NULL; - assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()), + assert(UseCompressedOops || + junk->prev_addr() == (void*)(oop(junk)->klass_addr()), "Offset of FreeChunk::_prev within FreeChunk must match" " that of OopDesc::_klass within OopDesc"); ) @@ -228,6 +229,34 @@ assert(_dilatation_factor >= 1.0, "from previous assert"); } + +// The field "_initiating_occupancy" represents the occupancy percentage +// at which we trigger a new collection cycle. Unless explicitly specified +// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it +// is calculated by: +// +// Let "f" be MinHeapFreeRatio in +// +// _intiating_occupancy = 100-f + +// f * (CMSTrigger[Perm]Ratio/100) +// where CMSTrigger[Perm]Ratio is the argument "tr" below. +// +// That is, if we assume the heap is at its desired maximum occupancy at the +// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free +// space be allocated before initiating a new collection cycle. +// +void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { + assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); + if (io >= 0) { + _initiating_occupancy = (double)io / 100.0; + } else { + _initiating_occupancy = ((100 - MinHeapFreeRatio) + + (double)(tr * MinHeapFreeRatio) / 100.0) + / 100.0; + } +} + + void ConcurrentMarkSweepGeneration::ref_processor_init() { assert(collector() != NULL, "no collector"); collector()->ref_processor_init(); @@ -512,6 +541,7 @@ _survivor_chunk_capacity(0), // -- ditto -- _survivor_chunk_index(0), // -- ditto -- _ser_pmc_preclean_ovflw(0), + _ser_kac_preclean_ovflw(0), _ser_pmc_remark_ovflw(0), _par_pmc_remark_ovflw(0), _ser_kac_ovflw(0), @@ -526,8 +556,8 @@ _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _completed_initialization(false), _collector_policy(cp), - _unload_classes(false), - _unloaded_classes_last_cycle(false), + _should_unload_classes(false), + _concurrent_cycles_since_last_unload(0), _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { @@ -643,26 +673,11 @@ } } - // "initiatingOccupancy" is the occupancy ratio at which we trigger - // a new collection cycle. Unless explicitly specified via - // CMSTriggerRatio, it is calculated by: - // Let "f" be MinHeapFreeRatio in - // - // intiatingOccupancy = 100-f + - // f * (CMSTriggerRatio/100) - // That is, if we assume the heap is at its desired maximum occupancy at the - // end of a collection, we let CMSTriggerRatio of the (purported) free - // space be allocated before initiating a new collection cycle. - if (CMSInitiatingOccupancyFraction > 0) { - _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0; - } else { - _initiatingOccupancy = ((100 - MinHeapFreeRatio) + - (double)(CMSTriggerRatio * - MinHeapFreeRatio) / 100.0) - / 100.0; - } + _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); + _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio); + // Clip CMSBootstrapOccupancy between 0 and 100. - _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy))) + _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) /(double)100; _full_gcs_since_conc_gc = 0; @@ -1029,7 +1044,7 @@ // mark end of object } // check that oop looks uninitialized - assert(oop(start)->klass() == NULL, "_klass should be NULL"); + assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL"); } void CMSCollector::promoted(bool par, HeapWord* start, @@ -1214,7 +1229,7 @@ return NULL; } -oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) { +oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); // allocate, copy and if necessary update promoinfo -- // delegate to underlying space. @@ -1226,7 +1241,7 @@ } #endif // #ifndef PRODUCT - oop res = _cmsSpace->promote(obj, obj_size, ref); + oop res = _cmsSpace->promote(obj, obj_size); if (res == NULL) { // expand and retry size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords @@ -1237,7 +1252,7 @@ assert(next_gen() == NULL, "assumption, based upon which no attempt " "is made to pass on a possibly failing " "promotion to next generation"); - res = _cmsSpace->promote(obj, obj_size, ref); + res = _cmsSpace->promote(obj, obj_size); } if (res != NULL) { // See comment in allocate() about when objects should @@ -1299,25 +1314,34 @@ } } oop obj = oop(obj_ptr); - assert(obj->klass() == NULL, "Object should be uninitialized here."); + assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); // Otherwise, copy the object. Here we must be careful to insert the // klass pointer last, since this marks the block as an allocated object. + // Except with compressed oops it's the mark word. HeapWord* old_ptr = (HeapWord*)old; if (word_sz > (size_t)oopDesc::header_size()) { Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), obj_ptr + oopDesc::header_size(), word_sz - oopDesc::header_size()); } + + if (UseCompressedOops) { + // Copy gap missed by (aligned) header size calculation above + obj->set_klass_gap(old->klass_gap()); + } + // Restore the mark word copied above. obj->set_mark(m); - // Now we can track the promoted object, if necessary. We take care + + // Now we can track the promoted object, if necessary. We take care // To delay the transition from uninitialized to full object // (i.e., insertion of klass pointer) until after, so that it // atomically becomes a promoted object. if (promoInfo->tracking()) { promoInfo->track((PromotedObject*)obj, old->klass()); } - // Finally, install the klass pointer. + + // Finally, install the klass pointer (this should be volatile). obj->set_klass(old->klass()); assert(old->is_oop(), "Will dereference klass ptr below"); @@ -1414,7 +1438,8 @@ gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); - gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy()); + gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); + gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy()); } // ------------------------------------------------------------------ @@ -1447,22 +1472,36 @@ // old gen want a collection cycle started. Each may use // an appropriate criterion for making this decision. // XXX We need to make sure that the gen expansion - // criterion dovetails well with this. - if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) { + // criterion dovetails well with this. XXX NEED TO FIX THIS + if (_cmsGen->should_concurrent_collect()) { if (Verbose && PrintGCDetails) { gclog_or_tty->print_cr("CMS old gen initiated"); } return true; } - if (cms_should_unload_classes() && - _permGen->shouldConcurrentCollect(initiatingOccupancy())) { - if (Verbose && PrintGCDetails) { - gclog_or_tty->print_cr("CMS perm gen initiated"); + // We start a collection if we believe an incremental collection may fail; + // this is not likely to be productive in practice because it's probably too + // late anyway. + GenCollectedHeap* gch = GenCollectedHeap::heap(); + assert(gch->collector_policy()->is_two_generation_policy(), + "You may want to check the correctness of the following"); + if (gch->incremental_collection_will_fail()) { + if (PrintGCDetails && Verbose) { + gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); } return true; } + if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) { + bool res = update_should_unload_classes(); + if (res) { + if (Verbose && PrintGCDetails) { + gclog_or_tty->print_cr("CMS perm gen initiated"); + } + return true; + } + } return false; } @@ -1472,32 +1511,36 @@ _permGen->clear_expansion_cause(); } -bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect( - double initiatingOccupancy) { - // We should be conservative in starting a collection cycle. To - // start too eagerly runs the risk of collecting too often in the - // extreme. To collect too rarely falls back on full collections, - // which works, even if not optimum in terms of concurrent work. - // As a work around for too eagerly collecting, use the flag - // UseCMSInitiatingOccupancyOnly. This also has the advantage of - // giving the user an easily understandable way of controlling the - // collections. - // We want to start a new collection cycle if any of the following - // conditions hold: - // . our current occupancy exceeds the initiating occupancy, or - // . we recently needed to expand and have not since that expansion, - // collected, or - // . we are not using adaptive free lists and linear allocation is - // going to fail, or - // . (for old gen) incremental collection has already failed or - // may soon fail in the near future as we may not be able to absorb - // promotions. - assert_lock_strong(freelistLock()); +// We should be conservative in starting a collection cycle. To +// start too eagerly runs the risk of collecting too often in the +// extreme. To collect too rarely falls back on full collections, +// which works, even if not optimum in terms of concurrent work. +// As a work around for too eagerly collecting, use the flag +// UseCMSInitiatingOccupancyOnly. This also has the advantage of +// giving the user an easily understandable way of controlling the +// collections. +// We want to start a new collection cycle if any of the following +// conditions hold: +// . our current occupancy exceeds the configured initiating occupancy +// for this generation, or +// . we recently needed to expand this space and have not, since that +// expansion, done a collection of this generation, or +// . the underlying space believes that it may be a good idea to initiate +// a concurrent collection (this may be based on criteria such as the +// following: the space uses linear allocation and linear allocation is +// going to fail, or there is believed to be excessive fragmentation in +// the generation, etc... or ... +// [.(currently done by CMSCollector::shouldConcurrentCollect() only for +// the case of the old generation, not the perm generation; see CR 6543076): +// we may be approaching a point at which allocation requests may fail because +// we will be out of sufficient free space given allocation rate estimates.] +bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { - if (occupancy() > initiatingOccupancy) { + assert_lock_strong(freelistLock()); + if (occupancy() > initiating_occupancy()) { if (PrintGCDetails && Verbose) { gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", - short_name(), occupancy(), initiatingOccupancy); + short_name(), occupancy(), initiating_occupancy()); } return true; } @@ -1511,21 +1554,10 @@ } return true; } - GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(gch->collector_policy()->is_two_generation_policy(), - "You may want to check the correctness of the following"); - if (gch->incremental_collection_will_fail()) { + if (_cmsSpace->should_concurrent_collect()) { if (PrintGCDetails && Verbose) { - gclog_or_tty->print(" %s: collect because incremental collection will fail ", - short_name()); - } - return true; - } - if (!_cmsSpace->adaptive_freelists() && - _cmsSpace->linearAllocationWouldFail()) { - if (PrintGCDetails && Verbose) { - gclog_or_tty->print(" %s: collect because of linAB ", - short_name()); + gclog_or_tty->print(" %s: collect because cmsSpace says so ", + short_name()); } return true; } @@ -1932,6 +1964,7 @@ ref_processor()->set_enqueuing_is_done(false); ref_processor()->enable_discovery(); + ref_processor()->setup_policy(clear_all_soft_refs); // If an asynchronous collection finishes, the _modUnionTable is // all clear. If we are assuming the collection from an asynchronous // collection, clear the _modUnionTable. @@ -1971,8 +2004,9 @@ "Should have been NULL'd before baton was passed"); reset(false /* == !asynch */); _cmsGen->reset_after_compaction(); + _concurrent_cycles_since_last_unload = 0; - if (verifying() && !cms_should_unload_classes()) { + if (verifying() && !should_unload_classes()) { perm_gen_verify_bit_map()->clear_all(); } @@ -2099,6 +2133,7 @@ { bool safepoint_check = Mutex::_no_safepoint_check_flag; MutexLockerEx hl(Heap_lock, safepoint_check); + FreelistLocker fll(this); MutexLockerEx x(CGC_lock, safepoint_check); if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { // The foreground collector is active or we're @@ -2113,13 +2148,9 @@ // a new cycle. clear_expansion_cause(); } - _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle - // This controls class unloading in response to an explicit gc request. - // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then - // we will unload classes even if CMSClassUnloadingEnabled is not set. - // See CR 6541037 and related CRs. - _unload_classes = _full_gc_requested // ... for this cycle - && ExplicitGCInvokesConcurrentAndUnloadsClasses; + // Decide if we want to enable class unloading as part of the + // ensuing concurrent GC cycle. + update_should_unload_classes(); _full_gc_requested = false; // acks all outstanding full gc requests // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle @@ -2357,6 +2388,9 @@ Universe::verify(true); } + // Snapshot the soft reference policy to be used in this collection cycle. + ref_processor()->setup_policy(clear_all_soft_refs); + bool init_mark_was_synchronous = false; // until proven otherwise while (_collectorState != Idling) { if (TraceCMSState) { @@ -2735,13 +2769,14 @@ public: VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} - void do_bit(size_t offset) { + bool do_bit(size_t offset) { HeapWord* addr = _marks->offsetToHeapWord(offset); if (!_marks->isMarked(addr)) { oop(addr)->print(); gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); _failed = true; } + return true; } bool failed() { return _failed; } @@ -3048,21 +3083,62 @@ } #endif // PRODUCT +// Decide if we want to enable class unloading as part of the +// ensuing concurrent GC cycle. We will collect the perm gen and +// unload classes if it's the case that: +// (1) an explicit gc request has been made and the flag +// ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR +// (2) (a) class unloading is enabled at the command line, and +// (b) (i) perm gen threshold has been crossed, or +// (ii) old gen is getting really full, or +// (iii) the previous N CMS collections did not collect the +// perm gen +// NOTE: Provided there is no change in the state of the heap between +// calls to this method, it should have idempotent results. Moreover, +// its results should be monotonically increasing (i.e. going from 0 to 1, +// but not 1 to 0) between successive calls between which the heap was +// not collected. For the implementation below, it must thus rely on +// the property that concurrent_cycles_since_last_unload() +// will not decrease unless a collection cycle happened and that +// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are +// themselves also monotonic in that sense. See check_monotonicity() +// below. +bool CMSCollector::update_should_unload_classes() { + _should_unload_classes = false; + // Condition 1 above + if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) { + _should_unload_classes = true; + } else if (CMSClassUnloadingEnabled) { // Condition 2.a above + // Disjuncts 2.b.(i,ii,iii) above + _should_unload_classes = (concurrent_cycles_since_last_unload() >= + CMSClassUnloadingMaxInterval) + || _permGen->should_concurrent_collect() + || _cmsGen->is_too_full(); + } + return _should_unload_classes; +} + +bool ConcurrentMarkSweepGeneration::is_too_full() const { + bool res = should_concurrent_collect(); + res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); + return res; +} + void CMSCollector::setup_cms_unloading_and_verification_state() { const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit; const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; - if (cms_should_unload_classes()) { // Should unload classes this cycle + if (should_unload_classes()) { // Should unload classes this cycle remove_root_scanning_option(rso); // Shrink the root set appropriately set_verifying(should_verify); // Set verification state for this cycle return; // Nothing else needs to be done at this time } // Not unloading classes this cycle - assert(!cms_should_unload_classes(), "Inconsitency!"); - if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) { + assert(!should_unload_classes(), "Inconsitency!"); + if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { // We were not verifying, or we _were_ unloading classes in the last cycle, // AND some verification options are enabled this cycle; in this case, // we must make sure that the deadness map is allocated if not already so, @@ -3128,31 +3204,16 @@ // YSR: All of this generation expansion/shrinking stuff is an exact copy of // OneContigSpaceCardGeneration, which makes me wonder if we should move this // to CardGeneration and share it... +bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { + return CardGeneration::expand(bytes, expand_bytes); +} + void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause) { - assert_locked_or_safepoint(Heap_lock); - size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); - size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); - bool success = false; - if (aligned_expand_bytes > aligned_bytes) { - success = grow_by(aligned_expand_bytes); - } - if (!success) { - success = grow_by(aligned_bytes); - } - if (!success) { - size_t remaining_bytes = _virtual_space.uncommitted_size(); - if (remaining_bytes > 0) { - success = grow_by(remaining_bytes); - } - } - if (GC_locker::is_active()) { - if (PrintGC && Verbose) { - gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); - } - } + bool success = expand(bytes, expand_bytes); + // remember why we expanded; this information is used // by shouldConcurrentCollect() when making decisions on whether to start // a new CMS cycle. @@ -3647,9 +3708,9 @@ bool result() { return _result; } void reset(HeapWord* ra) { - assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); + assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)"); - assert(ra < _perm_space->end(), "ra too large"); + assert(ra < _perm_space->end(), "ra too large"); _restart_addr = _global_finger = ra; _term.reset_for_reuse(); } @@ -3779,7 +3840,7 @@ MutexLockerEx ml(ovflw_stk->par_lock(), Mutex::_no_safepoint_check_flag); // Grab up to 1/4 the size of the work queue - size_t num = MIN2((size_t)work_q->max_elems()/4, + size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, (size_t)ParGCDesiredObjsFromOverflowList); num = MIN2(num, ovflw_stk->length()); for (int i = (int) num; i > 0; i--) { @@ -3803,10 +3864,10 @@ // Align down to a card boundary for the start of 0th task // for this space. aligned_start = - (HeapWord*)align_size_down((uintptr_t)_restart_addr, - CardTableModRefBS::card_size); + (HeapWord*)align_size_down((uintptr_t)_restart_addr, + CardTableModRefBS::card_size); } - + size_t chunk_size = sp->marking_task_size(); while (!pst->is_task_claimed(/* reference */ nth_task)) { // Having claimed the nth task in this space, @@ -3829,13 +3890,13 @@ if (!span.is_empty()) { // Non-null task HeapWord* prev_obj; assert(!span.contains(_restart_addr) || nth_task == 0, - "Inconsistency"); + "Inconsistency"); if (nth_task == 0) { // For the 0th task, we'll not need to compute a block_start. if (span.contains(_restart_addr)) { // In the case of a restart because of stack overflow, // we might additionally skip a chunk prefix. - prev_obj = _restart_addr; + prev_obj = _restart_addr; } else { prev_obj = span.start(); } @@ -3860,7 +3921,7 @@ // scanning, but that appears unavoidable, short of // locking the free list locks; see bug 6324141. break; - } + } } } if (prev_obj < span.end()) { @@ -3890,13 +3951,15 @@ } class Par_ConcMarkingClosure: public OopClosure { + private: CMSCollector* _collector; MemRegion _span; CMSBitMap* _bit_map; CMSMarkStack* _overflow_stack; CMSMarkStack* _revisit_stack; // XXXXXX Check proper use OopTaskQueue* _work_queue; - + protected: + DO_OOP_WORK_DEFN public: Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, CMSBitMap* bit_map, CMSMarkStack* overflow_stack): @@ -3905,8 +3968,8 @@ _work_queue(work_queue), _bit_map(bit_map), _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. - - void do_oop(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); void trim_queue(size_t max); void handle_stack_overflow(HeapWord* lost); }; @@ -3916,12 +3979,10 @@ // that are in these stolen objects being scanned must // already have been initialized (else they would not have // been published), so we do not need to check for -// uninitialized objects before pushing here. -void Par_ConcMarkingClosure::do_oop(oop* p) { - oop this_oop = *p; - assert(this_oop->is_oop_or_null(true), - "expected an oop or NULL"); - HeapWord* addr = (HeapWord*)this_oop; +// uninitialized objects before pushing here. +void Par_ConcMarkingClosure::do_oop(oop obj) { + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); + HeapWord* addr = (HeapWord*)obj; // Check if oop points into the CMS generation // and is not marked if (_span.contains(addr) && !_bit_map->isMarked(addr)) { @@ -3940,7 +4001,7 @@ } ) if (simulate_overflow || - !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) { + !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow if (PrintCMSStatistics != 0) { gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " @@ -3957,6 +4018,9 @@ } } +void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } +void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } + void Par_ConcMarkingClosure::trim_queue(size_t max) { while (_work_queue->size() > max) { oop new_oop; @@ -4056,8 +4120,8 @@ // // Tony 2006.06.29 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && - ConcurrentMarkSweepThread::should_yield() && - !CMSCollector::foregroundGCIsActive(); ++i) { + ConcurrentMarkSweepThread::should_yield() && + !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); ConcurrentMarkSweepThread::acknowledge_yield_request(); } @@ -4332,10 +4396,10 @@ CMSPrecleanRefsYieldClosure yield_cl(this); assert(rp->span().equals(_span), "Spans should be equal"); CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, - &_markStack); + &_markStack, true /* preclean */); CMSDrainMarkingStackClosure complete_trace(this, - _span, &_markBitMap, &_markStack, - &keep_alive); + _span, &_markBitMap, &_markStack, + &keep_alive, true /* preclean */); // We don't want this step to interfere with a young // collection because we don't want to take CPU @@ -4534,11 +4598,11 @@ if (!dirtyRegion.is_empty()) { assert(numDirtyCards > 0, "consistency check"); HeapWord* stop_point = NULL; + stopTimer(); + CMSTokenSyncWithLocks ts(true, gen->freelistLock(), + bitMapLock()); + startTimer(); { - stopTimer(); - CMSTokenSyncWithLocks ts(true, gen->freelistLock(), - bitMapLock()); - startTimer(); verify_work_stacks_empty(); verify_overflow_empty(); sample_eden(); @@ -4555,10 +4619,6 @@ assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || (_collectorState == AbortablePreclean && should_abort_preclean()), "Unparsable objects should only be in perm gen."); - - stopTimer(); - CMSTokenSyncWithLocks ts(true, bitMapLock()); - startTimer(); _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); if (should_abort_preclean()) { break; // out of preclean loop @@ -4614,8 +4674,11 @@ startTimer(); sample_eden(); // Get and clear dirty region from card table - dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean( - MemRegion(nextAddr, endAddr)); + dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset( + MemRegion(nextAddr, endAddr), + true, + CardTableModRefBS::precleaned_card_val()); + assert(dirtyRegion.start() >= nextAddr, "returned region inconsistent?"); } @@ -4723,7 +4786,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); - if (cms_should_unload_classes()) { + if (should_unload_classes()) { CodeCache::gc_prologue(); } assert(haveFreelistLocks(), "must have free list locks"); @@ -4783,7 +4846,7 @@ verify_work_stacks_empty(); verify_overflow_empty(); - if (cms_should_unload_classes()) { + if (should_unload_classes()) { CodeCache::gc_epilogue(); } @@ -4793,17 +4856,19 @@ // recurrence of that condition. assert(_markStack.isEmpty(), "No grey objects"); size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + - _ser_kac_ovflw; + _ser_kac_ovflw + _ser_kac_preclean_ovflw; if (ser_ovflw > 0) { if (PrintCMSStatistics != 0) { gclog_or_tty->print_cr("Marking stack overflow (benign) " - "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")", + "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT + ", kac_preclean="SIZE_FORMAT")", _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, - _ser_kac_ovflw); + _ser_kac_ovflw, _ser_kac_preclean_ovflw); } _markStack.expand(); _ser_pmc_remark_ovflw = 0; _ser_pmc_preclean_ovflw = 0; + _ser_kac_preclean_ovflw = 0; _ser_kac_ovflw = 0; } if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { @@ -5132,13 +5197,12 @@ NOT_PRODUCT(int num_steals = 0;) oop obj_to_scan; CMSBitMap* bm = &(_collector->_markBitMap); - size_t num_from_overflow_list = - MIN2((size_t)work_q->max_elems()/4, - (size_t)ParGCDesiredObjsFromOverflowList); while (true) { // Completely finish any left over work from (an) earlier round(s) cl->trim_queue(0); + size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, + (size_t)ParGCDesiredObjsFromOverflowList); // Now check if there's any work in the overflow list if (_collector->par_take_from_overflow_list(num_from_overflow_list, work_q)) { @@ -5383,8 +5447,8 @@ &mrias_cl); { TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); - // Iterate over the dirty cards, marking them precleaned, and - // setting the corresponding bits in the mod union table. + // Iterate over the dirty cards, setting the corresponding bits in the + // mod union table. { ModUnionClosure modUnionClosure(&_modUnionTable); _ct->ct_bs()->dirty_card_iterate( @@ -5482,8 +5546,9 @@ _term(total_workers, task_queues) { assert(_collector->_span.equals(_span) && !_span.is_empty(), - "Inconsistency in _span"); + "Inconsistency in _span"); } + OopTaskQueueSet* task_queues() { return _task_queues; } OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } @@ -5549,13 +5614,12 @@ OopTaskQueue* work_q = work_queue(i); NOT_PRODUCT(int num_steals = 0;) oop obj_to_scan; - size_t num_from_overflow_list = - MIN2((size_t)work_q->max_elems()/4, - (size_t)ParGCDesiredObjsFromOverflowList); while (true) { // Completely finish any left over work from (an) earlier round(s) drain->trim_queue(0); + size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, + (size_t)ParGCDesiredObjsFromOverflowList); // Now check if there's any work in the overflow list if (_collector->par_take_from_overflow_list(num_from_overflow_list, work_q)) { @@ -5615,40 +5679,29 @@ ResourceMark rm; HandleMark hm; - ReferencePolicy* soft_ref_policy; - assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete"); + ReferenceProcessor* rp = ref_processor(); + assert(rp->span().equals(_span), "Spans should be equal"); + assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); // Process weak references. - if (clear_all_soft_refs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } + rp->setup_policy(clear_all_soft_refs); verify_work_stacks_empty(); - ReferenceProcessor* rp = ref_processor(); - assert(rp->span().equals(_span), "Spans should be equal"); CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, - &_markStack); + &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, - &cmsKeepAliveClosure); + &cmsKeepAliveClosure, false /* !preclean */); { TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); if (rp->processing_is_mt()) { CMSRefProcTaskExecutor task_executor(*this); - rp->process_discovered_references(soft_ref_policy, - &_is_alive_closure, + rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, &task_executor); } else { - rp->process_discovered_references(soft_ref_policy, - &_is_alive_closure, + rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, NULL); @@ -5656,7 +5709,7 @@ verify_work_stacks_empty(); } - if (cms_should_unload_classes()) { + if (should_unload_classes()) { { TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); @@ -5759,14 +5812,20 @@ // this cycle, we preserve the perm gen object "deadness" information // in the perm_gen_verify_bit_map. In order to do that we traverse // all blocks in perm gen and mark all dead objects. - if (verifying() && !cms_should_unload_classes()) { - CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), - bitMapLock()); + if (verifying() && !should_unload_classes()) { assert(perm_gen_verify_bit_map()->sizeInBits() != 0, "Should have already been allocated"); MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), markBitMap(), perm_gen_verify_bit_map()); - _permGen->cmsSpace()->blk_iterate(&mdo); + if (asynch) { + CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), + bitMapLock()); + _permGen->cmsSpace()->blk_iterate(&mdo); + } else { + // In the case of synchronous sweep, we already have + // the requisite locks/tokens. + _permGen->cmsSpace()->blk_iterate(&mdo); + } } if (asynch) { @@ -5780,7 +5839,7 @@ } // Now repeat for perm gen - if (cms_should_unload_classes()) { + if (should_unload_classes()) { CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), bitMapLock()); sweepWork(_permGen, asynch); @@ -5802,7 +5861,7 @@ // already have needed locks sweepWork(_cmsGen, asynch); - if (cms_should_unload_classes()) { + if (should_unload_classes()) { sweepWork(_permGen, asynch); } // Update heap occupancy information which is used as @@ -5964,6 +6023,11 @@ } gen->cmsSpace()->sweep_completed(); gen->cmsSpace()->endSweepFLCensus(sweepCount()); + if (should_unload_classes()) { // unloaded classes this cycle, + _concurrent_cycles_since_last_unload = 0; // ... reset count + } else { // did not unload classes, + _concurrent_cycles_since_last_unload++; // ... increment count + } } // Reset CMS data structures (for now just the marking bit map) @@ -6008,13 +6072,13 @@ } icms_wait(); - // See the comment in coordinator_yield() - for (unsigned i = 0; i < CMSYieldSleepCount && - ConcurrentMarkSweepThread::should_yield() && - !CMSCollector::foregroundGCIsActive(); ++i) { - os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); - } + // See the comment in coordinator_yield() + for (unsigned i = 0; i < CMSYieldSleepCount && + ConcurrentMarkSweepThread::should_yield() && + !CMSCollector::foregroundGCIsActive(); ++i) { + os::sleep(Thread::current(), 1, false); + ConcurrentMarkSweepThread::acknowledge_yield_request(); + } ConcurrentMarkSweepThread::synchronize(true); bitMapLock()->lock_without_safepoint_check(); @@ -6092,8 +6156,8 @@ #endif size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { - assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), - "missing Printezis mark?"); + assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), + "missing Printezis mark?"); HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); size_t size = pointer_delta(nextOneAddr + 1, addr); assert(size == CompactibleFreeListSpace::adjustObjectSize(size), @@ -6122,7 +6186,7 @@ HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { size_t sz = 0; oop p = (oop)addr; - if (p->klass() != NULL && p->is_parsable()) { + if (p->klass_or_null() != NULL && p->is_parsable()) { sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); } else { sz = block_size_using_printezis_bits(addr); @@ -6144,7 +6208,7 @@ // bit vector itself. That is done by a separate call CMSBitMap::allocate() // further below. CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): - _bm(NULL,0), + _bm(), _shifter(shifter), _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL) { @@ -6169,7 +6233,7 @@ } assert(_virtual_space.committed_size() == brs.size(), "didn't reserve backing store for all of CMS bit map?"); - _bm.set_map((uintptr_t*)_virtual_space.low()); + _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= _bmWordSize, "inconsistency in bit map sizing"); _bm.set_size(_bmWordSize >> _shifter); @@ -6324,19 +6388,19 @@ assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); } -void MarkRefsIntoClosure::do_oop(oop* p) { +void MarkRefsIntoClosure::do_oop(oop obj) { // if p points into _span, then mark corresponding bit in _markBitMap - oop thisOop = *p; - if (thisOop != NULL) { - assert(thisOop->is_oop(), "expected an oop"); - HeapWord* addr = (HeapWord*)thisOop; - if (_span.contains(addr)) { - // this should be made more efficient - _bitMap->mark(addr); - } + assert(obj->is_oop(), "expected an oop"); + HeapWord* addr = (HeapWord*)obj; + if (_span.contains(addr)) { + // this should be made more efficient + _bitMap->mark(addr); } } +void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } +void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } + // A variant of the above, used for CMS marking verification. MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm, @@ -6349,23 +6413,23 @@ assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); } -void MarkRefsIntoVerifyClosure::do_oop(oop* p) { +void MarkRefsIntoVerifyClosure::do_oop(oop obj) { // if p points into _span, then mark corresponding bit in _markBitMap - oop this_oop = *p; - if (this_oop != NULL) { - assert(this_oop->is_oop(), "expected an oop"); - HeapWord* addr = (HeapWord*)this_oop; - if (_span.contains(addr)) { - _verification_bm->mark(addr); - if (!_cms_bm->isMarked(addr)) { - oop(addr)->print(); - gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); - fatal("... aborting"); - } + assert(obj->is_oop(), "expected an oop"); + HeapWord* addr = (HeapWord*)obj; + if (_span.contains(addr)) { + _verification_bm->mark(addr); + if (!_cms_bm->isMarked(addr)) { + oop(addr)->print(); + gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr); + fatal("... aborting"); } } } +void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } +void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } + ////////////////////////////////////////////////// // MarkRefsIntoAndScanClosure ////////////////////////////////////////////////// @@ -6400,13 +6464,13 @@ // The marks are made in the marking bit map and the marking stack is // used for keeping the (newly) grey objects during the scan. // The parallel version (Par_...) appears further below. -void MarkRefsIntoAndScanClosure::do_oop(oop* p) { - oop this_oop = *p; - if (this_oop != NULL) { - assert(this_oop->is_oop(), "expected an oop"); - HeapWord* addr = (HeapWord*)this_oop; - assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); - assert(_collector->overflow_list_is_empty(), "should be empty"); +void MarkRefsIntoAndScanClosure::do_oop(oop obj) { + if (obj != NULL) { + assert(obj->is_oop(), "expected an oop"); + HeapWord* addr = (HeapWord*)obj; + assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); + assert(_collector->overflow_list_is_empty(), + "overflow list should be empty"); if (_span.contains(addr) && !_bit_map->isMarked(addr)) { // mark bit map (object is now grey) @@ -6414,7 +6478,7 @@ // push on marking stack (stack should be empty), and drain the // stack by applying this closure to the oops in the oops popped // from the stack (i.e. blacken the grey objects) - bool res = _mark_stack->push(this_oop); + bool res = _mark_stack->push(obj); assert(res, "Should have space to push on empty stack"); do { oop new_oop = _mark_stack->pop(); @@ -6450,6 +6514,9 @@ } } +void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } +void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } + void MarkRefsIntoAndScanClosure::do_yield_work() { assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "CMS thread should hold CMS token"); @@ -6468,10 +6535,12 @@ _collector->icms_wait(); // See the comment in coordinator_yield() - for (unsigned i = 0; i < CMSYieldSleepCount && - ConcurrentMarkSweepThread::should_yield() && - !CMSCollector::foregroundGCIsActive(); ++i) { - os::sleep(Thread::current(), 1, false); + for (unsigned i = 0; + i < CMSYieldSleepCount && + ConcurrentMarkSweepThread::should_yield() && + !CMSCollector::foregroundGCIsActive(); + ++i) { + os::sleep(Thread::current(), 1, false); ConcurrentMarkSweepThread::acknowledge_yield_request(); } @@ -6507,13 +6576,12 @@ // the scan phase whence they are also available for stealing by parallel // threads. Since the marking bit map is shared, updates are // synchronized (via CAS). -void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { - oop this_oop = *p; - if (this_oop != NULL) { +void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { + if (obj != NULL) { // Ignore mark word because this could be an already marked oop // that may be chained at the end of the overflow list. - assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop"); - HeapWord* addr = (HeapWord*)this_oop; + assert(obj->is_oop(true), "expected an oop"); + HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_bit_map->isMarked(addr)) { // mark bit map (object will become grey): @@ -6527,7 +6595,7 @@ // queue to an appropriate length by applying this closure to // the oops in the oops popped from the stack (i.e. blacken the // grey objects) - bool res = _work_queue->push(this_oop); + bool res = _work_queue->push(obj); assert(res, "Low water mark should be less than capacity?"); trim_queue(_low_water_mark); } // Else, another thread claimed the object @@ -6535,6 +6603,9 @@ } } +void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } +void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } + // This closure is used to rescan the marked objects on the dirty cards // in the mod union table and the card table proper. size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( @@ -6552,7 +6623,7 @@ } if (_bitMap->isMarked(addr)) { // it's marked; is it potentially uninitialized? - if (p->klass() != NULL) { + if (p->klass_or_null() != NULL) { if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) { // Signal precleaning to redirty the card since // the klass pointer is already installed. @@ -6565,11 +6636,8 @@ if (p->is_objArray()) { // objArrays are precisely marked; restrict scanning // to dirty cards only. - size = p->oop_iterate(_scanningClosure, mr); - assert(size == CompactibleFreeListSpace::adjustObjectSize(size), - "adjustObjectSize should be the identity for array sizes, " - "which are necessarily larger than minimum object size of " - "two heap words"); + size = CompactibleFreeListSpace::adjustObjectSize( + p->oop_iterate(_scanningClosure, mr)); } else { // A non-array may have been imprecisely marked; we need // to scan object in its entirety. @@ -6603,7 +6671,7 @@ } } else { // Either a not yet marked object or an uninitialized object - if (p->klass() == NULL || !p->is_parsable()) { + if (p->klass_or_null() == NULL || !p->is_parsable()) { // An uninitialized object, skip to the next card, since // we may not be able to read its P-bits yet. assert(size == 0, "Initial value"); @@ -6637,9 +6705,9 @@ // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && - ConcurrentMarkSweepThread::should_yield() && - !CMSCollector::foregroundGCIsActive(); ++i) { - os::sleep(Thread::current(), 1, false); + ConcurrentMarkSweepThread::should_yield() && + !CMSCollector::foregroundGCIsActive(); ++i) { + os::sleep(Thread::current(), 1, false); ConcurrentMarkSweepThread::acknowledge_yield_request(); } @@ -6660,7 +6728,7 @@ HeapWord* addr = (HeapWord*)p; DEBUG_ONLY(_collector->verify_work_stacks_empty();) assert(!_span.contains(addr), "we are scanning the survivor spaces"); - assert(p->klass() != NULL, "object should be initializd"); + assert(p->klass_or_null() != NULL, "object should be initializd"); assert(p->is_parsable(), "must be parsable."); // an initialized object; ignore mark word in verification below // since we are running concurrent with mutators @@ -6803,10 +6871,10 @@ // Should revisit to see if this should be restructured for // greater efficiency. -void MarkFromRootsClosure::do_bit(size_t offset) { +bool MarkFromRootsClosure::do_bit(size_t offset) { if (_skipBits > 0) { _skipBits--; - return; + return true; } // convert offset into a HeapWord* HeapWord* addr = _bitMap->startWord() + offset; @@ -6818,7 +6886,7 @@ assert(_skipBits == 0, "tautology"); _skipBits = 2; // skip next two marked bits ("Printezis-marks") oop p = oop(addr); - if (p->klass() == NULL || !p->is_parsable()) { + if (p->klass_or_null() == NULL || !p->is_parsable()) { DEBUG_ONLY(if (!_verifying) {) // We re-dirty the cards on which this object lies and increase // the _threshold so that we'll come back to scan this object @@ -6838,16 +6906,17 @@ if (_threshold < end_card_addr) { _threshold = end_card_addr; } - if (p->klass() != NULL) { + if (p->klass_or_null() != NULL) { // Redirty the range of cards... _mut->mark_range(redirty_range); } // ...else the setting of klass will dirty the card anyway. } DEBUG_ONLY(}) - return; + return true; } } scanOopsInOop(addr); + return true; } // We take a break if we've been at this for a while, @@ -6888,13 +6957,13 @@ assert(_markStack->isEmpty(), "should drain stack to limit stack usage"); // convert ptr to an oop preparatory to scanning - oop this_oop = oop(ptr); + oop obj = oop(ptr); // Ignore mark word in verification below, since we // may be running concurrent with mutators. - assert(this_oop->is_oop(true), "should be an oop"); + assert(obj->is_oop(true), "should be an oop"); assert(_finger <= ptr, "_finger runneth ahead"); // advance the finger to right end of this object - _finger = ptr + this_oop->size(); + _finger = ptr + obj->size(); assert(_finger > ptr, "we just incremented it above"); // On large heaps, it may take us some time to get through // the marking phase (especially if running iCMS). During @@ -6940,7 +7009,7 @@ _span, _bitMap, _markStack, _revisitStack, _finger, this); - bool res = _markStack->push(this_oop); + bool res = _markStack->push(obj); assert(res, "Empty non-zero size stack should have space for single push"); while (!_markStack->isEmpty()) { oop new_oop = _markStack->pop(); @@ -6981,10 +7050,10 @@ // Should revisit to see if this should be restructured for // greater efficiency. -void Par_MarkFromRootsClosure::do_bit(size_t offset) { +bool Par_MarkFromRootsClosure::do_bit(size_t offset) { if (_skip_bits > 0) { _skip_bits--; - return; + return true; } // convert offset into a HeapWord* HeapWord* addr = _bit_map->startWord() + offset; @@ -6996,13 +7065,14 @@ assert(_skip_bits == 0, "tautology"); _skip_bits = 2; // skip next two marked bits ("Printezis-marks") oop p = oop(addr); - if (p->klass() == NULL || !p->is_parsable()) { + if (p->klass_or_null() == NULL || !p->is_parsable()) { // in the case of Clean-on-Enter optimization, redirty card // and avoid clearing card by increasing the threshold. - return; + return true; } } scan_oops_in_oop(addr); + return true; } void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { @@ -7012,13 +7082,13 @@ assert(_work_queue->size() == 0, "should drain stack to limit stack usage"); // convert ptr to an oop preparatory to scanning - oop this_oop = oop(ptr); + oop obj = oop(ptr); // Ignore mark word in verification below, since we // may be running concurrent with mutators. - assert(this_oop->is_oop(true), "should be an oop"); + assert(obj->is_oop(true), "should be an oop"); assert(_finger <= ptr, "_finger runneth ahead"); // advance the finger to right end of this object - _finger = ptr + this_oop->size(); + _finger = ptr + obj->size(); assert(_finger > ptr, "we just incremented it above"); // On large heaps, it may take us some time to get through // the marking phase (especially if running iCMS). During @@ -7066,7 +7136,7 @@ _revisit_stack, _finger, gfa, this); - bool res = _work_queue->push(this_oop); // overflow could occur here + bool res = _work_queue->push(obj); // overflow could occur here assert(res, "Will hold once we use workqueues"); while (true) { oop new_oop; @@ -7125,7 +7195,7 @@ // Should revisit to see if this should be restructured for // greater efficiency. -void MarkFromRootsVerifyClosure::do_bit(size_t offset) { +bool MarkFromRootsVerifyClosure::do_bit(size_t offset) { // convert offset into a HeapWord* HeapWord* addr = _verification_bm->startWord() + offset; assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), @@ -7136,15 +7206,15 @@ assert(_mark_stack->isEmpty(), "should drain stack to limit stack usage"); // convert addr to an oop preparatory to scanning - oop this_oop = oop(addr); - assert(this_oop->is_oop(), "should be an oop"); + oop obj = oop(addr); + assert(obj->is_oop(), "should be an oop"); assert(_finger <= addr, "_finger runneth ahead"); // advance the finger to right end of this object - _finger = addr + this_oop->size(); + _finger = addr + obj->size(); assert(_finger > addr, "we just incremented it above"); // Note: the finger doesn't advance while we drain // the stack below. - bool res = _mark_stack->push(this_oop); + bool res = _mark_stack->push(obj); assert(res, "Empty non-zero size stack should have space for single push"); while (!_mark_stack->isEmpty()) { oop new_oop = _mark_stack->pop(); @@ -7153,6 +7223,7 @@ new_oop->oop_iterate(&_pam_verify_closure); } assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); + return true; } PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( @@ -7167,6 +7238,8 @@ _mark_stack(mark_stack) { } +void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } +void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } // Upon stack overflow, we discard (part of) the stack, // remembering the least address amongst those discarded @@ -7179,20 +7252,20 @@ _mark_stack->expand(); // expand the stack if possible } -void PushAndMarkVerifyClosure::do_oop(oop* p) { - oop this_oop = *p; - assert(this_oop->is_oop_or_null(), "expected an oop or NULL"); - HeapWord* addr = (HeapWord*)this_oop; +void PushAndMarkVerifyClosure::do_oop(oop obj) { + assert(obj->is_oop_or_null(), "expected an oop or NULL"); + HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { // Oop lies in _span and isn't yet grey or black _verification_bm->mark(addr); // now grey if (!_cms_bm->isMarked(addr)) { oop(addr)->print(); - gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); + gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", + addr); fatal("... aborting"); } - if (!_mark_stack->push(this_oop)) { // stack overflow + if (!_mark_stack->push(obj)) { // stack overflow if (PrintCMSStatistics != 0) { gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity()); @@ -7219,7 +7292,7 @@ _revisitStack(revisitStack), _finger(finger), _parent(parent), - _should_remember_klasses(collector->cms_should_unload_classes()) + _should_remember_klasses(collector->should_unload_classes()) { } Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, @@ -7242,11 +7315,11 @@ _finger(finger), _global_finger_addr(global_finger_addr), _parent(parent), - _should_remember_klasses(collector->cms_should_unload_classes()) + _should_remember_klasses(collector->should_unload_classes()) { } // Assumes thread-safe access by callers, who are -// responsible for mutual exclusion. +// responsible for mutual exclusion. void CMSCollector::lower_restart_addr(HeapWord* low) { assert(_span.contains(low), "Out of bounds addr"); if (_restart_addr == NULL) { @@ -7282,12 +7355,10 @@ _overflow_stack->expand(); // expand the stack if possible } - -void PushOrMarkClosure::do_oop(oop* p) { - oop thisOop = *p; +void PushOrMarkClosure::do_oop(oop obj) { // Ignore mark word because we are running concurrent with mutators. - assert(thisOop->is_oop_or_null(true), "expected an oop or NULL"); - HeapWord* addr = (HeapWord*)thisOop; + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); + HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_bitMap->isMarked(addr)) { // Oop lies in _span and isn't yet grey or black _bitMap->mark(addr); // now grey @@ -7303,7 +7374,7 @@ simulate_overflow = true; } ) - if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow + if (simulate_overflow || !_markStack->push(obj)) { // stack overflow if (PrintCMSStatistics != 0) { gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity()); @@ -7319,11 +7390,13 @@ } } -void Par_PushOrMarkClosure::do_oop(oop* p) { - oop this_oop = *p; +void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } +void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } + +void Par_PushOrMarkClosure::do_oop(oop obj) { // Ignore mark word because we are running concurrent with mutators. - assert(this_oop->is_oop_or_null(true), "expected an oop or NULL"); - HeapWord* addr = (HeapWord*)this_oop; + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); + HeapWord* addr = (HeapWord*)obj; if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { // Oop lies in _span and isn't yet grey or black // We read the global_finger (volatile read) strictly after marking oop @@ -7352,7 +7425,7 @@ } ) if (simulate_overflow || - !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) { + !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow if (PrintCMSStatistics != 0) { gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " @@ -7369,6 +7442,8 @@ } } +void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } +void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, MemRegion span, @@ -7386,23 +7461,22 @@ _mark_stack(mark_stack), _revisit_stack(revisit_stack), _concurrent_precleaning(concurrent_precleaning), - _should_remember_klasses(collector->cms_should_unload_classes()) + _should_remember_klasses(collector->should_unload_classes()) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } // Grey object rescan during pre-cleaning and second checkpoint phases -- // the non-parallel version (the parallel version appears further below.) -void PushAndMarkClosure::do_oop(oop* p) { - oop this_oop = *p; - // Ignore mark word verification. If during concurrent precleaning +void PushAndMarkClosure::do_oop(oop obj) { + // Ignore mark word verification. If during concurrent precleaning, // the object monitor may be locked. If during the checkpoint // phases, the object may already have been reached by a different // path and may be at the end of the global overflow list (so // the mark word may be NULL). - assert(this_oop->is_oop_or_null(true/* ignore mark word */), + assert(obj->is_oop_or_null(true /* ignore mark word */), "expected an oop or NULL"); - HeapWord* addr = (HeapWord*)this_oop; + HeapWord* addr = (HeapWord*)obj; // Check if oop points into the CMS generation // and is not marked if (_span.contains(addr) && !_bit_map->isMarked(addr)) { @@ -7417,32 +7491,32 @@ simulate_overflow = true; } ) - if (simulate_overflow || !_mark_stack->push(this_oop)) { + if (simulate_overflow || !_mark_stack->push(obj)) { if (_concurrent_precleaning) { // During precleaning we can just dirty the appropriate card(s) // in the mod union table, thus ensuring that the object remains - // in the grey set and continue. In the case of object arrays + // in the grey set and continue. In the case of object arrays // we need to dirty all of the cards that the object spans, // since the rescan of object arrays will be limited to the // dirty cards. // Note that no one can be intefering with us in this action // of dirtying the mod union table, so no locking or atomics // are required. - if (this_oop->is_objArray()) { - size_t sz = this_oop->size(); + if (obj->is_objArray()) { + size_t sz = obj->size(); HeapWord* end_card_addr = (HeapWord*)round_to( - (intptr_t)(addr+sz), CardTableModRefBS::card_size); + (intptr_t)(addr+sz), CardTableModRefBS::card_size); MemRegion redirty_range = MemRegion(addr, end_card_addr); assert(!redirty_range.is_empty(), "Arithmetical tautology"); _mod_union_table->mark_range(redirty_range); } else { _mod_union_table->mark(addr); - } + } _collector->_ser_pmc_preclean_ovflw++; } else { // During the remark phase, we need to remember this oop // in the overflow list. - _collector->push_on_overflow_list(this_oop); + _collector->push_on_overflow_list(obj); _collector->_ser_pmc_remark_ovflw++; } } @@ -7461,15 +7535,17 @@ _bit_map(bit_map), _work_queue(work_queue), _revisit_stack(revisit_stack), - _should_remember_klasses(collector->cms_should_unload_classes()) + _should_remember_klasses(collector->should_unload_classes()) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } +void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } +void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } + // Grey object rescan during second checkpoint phase -- // the parallel version. -void Par_PushAndMarkClosure::do_oop(oop* p) { - oop this_oop = *p; +void Par_PushAndMarkClosure::do_oop(oop obj) { // In the assert below, we ignore the mark word because // this oop may point to an already visited object that is // on the overflow stack (in which case the mark word has @@ -7481,10 +7557,10 @@ // value, by the time we get to examined this failing assert in // the debugger, is_oop_or_null(false) may subsequently start // to hold. - assert(this_oop->is_oop_or_null(true), + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); - HeapWord* addr = (HeapWord*)this_oop; - // Check if oop points into the CMS generation + HeapWord* addr = (HeapWord*)obj; + // Check if oop points into the CMS generation // and is not marked if (_span.contains(addr) && !_bit_map->isMarked(addr)) { // a white object ... @@ -7501,14 +7577,17 @@ simulate_overflow = true; } ) - if (simulate_overflow || !_work_queue->push(this_oop)) { - _collector->par_push_on_overflow_list(this_oop); + if (simulate_overflow || !_work_queue->push(obj)) { + _collector->par_push_on_overflow_list(obj); _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS } } // Else, some other thread got there first } } +void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } +void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } + void PushAndMarkClosure::remember_klass(Klass* k) { if (!_revisit_stack->push(oop(k))) { fatal("Revisit stack overflowed in PushAndMarkClosure"); @@ -7982,8 +8061,8 @@ "alignment problem"); #ifdef DEBUG - if (oop(addr)->klass() != NULL && - ( !_collector->cms_should_unload_classes() + if (oop(addr)->klass_or_null() != NULL && + ( !_collector->should_unload_classes() || oop(addr)->is_parsable())) { // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); @@ -7995,8 +8074,8 @@ } else { // This should be an initialized object that's alive. - assert(oop(addr)->klass() != NULL && - (!_collector->cms_should_unload_classes() + assert(oop(addr)->klass_or_null() != NULL && + (!_collector->should_unload_classes() || oop(addr)->is_parsable()), "Should be an initialized object"); // Ignore mark word because we are running concurrent with mutators @@ -8202,9 +8281,8 @@ } // CMSKeepAliveClosure: the serial version -void CMSKeepAliveClosure::do_oop(oop* p) { - oop this_oop = *p; - HeapWord* addr = (HeapWord*)this_oop; +void CMSKeepAliveClosure::do_oop(oop obj) { + HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_bit_map->isMarked(addr)) { _bit_map->mark(addr); @@ -8216,26 +8294,49 @@ simulate_overflow = true; } ) - if (simulate_overflow || !_mark_stack->push(this_oop)) { - _collector->push_on_overflow_list(this_oop); - _collector->_ser_kac_ovflw++; + if (simulate_overflow || !_mark_stack->push(obj)) { + if (_concurrent_precleaning) { + // We dirty the overflown object and let the remark + // phase deal with it. + assert(_collector->overflow_list_is_empty(), "Error"); + // In the case of object arrays, we need to dirty all of + // the cards that the object spans. No locking or atomics + // are needed since no one else can be mutating the mod union + // table. + if (obj->is_objArray()) { + size_t sz = obj->size(); + HeapWord* end_card_addr = + (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size); + MemRegion redirty_range = MemRegion(addr, end_card_addr); + assert(!redirty_range.is_empty(), "Arithmetical tautology"); + _collector->_modUnionTable.mark_range(redirty_range); + } else { + _collector->_modUnionTable.mark(addr); + } + _collector->_ser_kac_preclean_ovflw++; + } else { + _collector->push_on_overflow_list(obj); + _collector->_ser_kac_ovflw++; + } } } } +void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } +void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } + // CMSParKeepAliveClosure: a parallel version of the above. // The work queues are private to each closure (thread), // but (may be) available for stealing by other threads. -void CMSParKeepAliveClosure::do_oop(oop* p) { - oop this_oop = *p; - HeapWord* addr = (HeapWord*)this_oop; +void CMSParKeepAliveClosure::do_oop(oop obj) { + HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_bit_map->isMarked(addr)) { // In general, during recursive tracing, several threads // may be concurrently getting here; the first one to // "tag" it, claims it. - if (_bit_map->par_mark(addr)) { - bool res = _work_queue->push(this_oop); + if (_bit_map->par_mark(addr)) { + bool res = _work_queue->push(obj); assert(res, "Low water mark should be much less than capacity"); // Do a recursive trim in the hope that this will keep // stack usage lower, but leave some oops for potential stealers @@ -8244,6 +8345,9 @@ } } +void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } +void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } + void CMSParKeepAliveClosure::trim_queue(uint max) { while (_work_queue->size() > max) { oop new_oop; @@ -8259,9 +8363,8 @@ } } -void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { - oop this_oop = *p; - HeapWord* addr = (HeapWord*)this_oop; +void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { + HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_bit_map->isMarked(addr)) { if (_bit_map->par_mark(addr)) { @@ -8273,14 +8376,17 @@ simulate_overflow = true; } ) - if (simulate_overflow || !_work_queue->push(this_oop)) { - _collector->par_push_on_overflow_list(this_oop); + if (simulate_overflow || !_work_queue->push(obj)) { + _collector->par_push_on_overflow_list(obj); _collector->_par_kac_ovflw++; } } // Else another thread got there already } } +void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } +void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } + ////////////////////////////////////////////////////////////////// // CMSExpansionCause ///////////////////////////// ////////////////////////////////////////////////////////////////// @@ -8308,15 +8414,17 @@ void CMSDrainMarkingStackClosure::do_void() { // the max number to take from overflow list at a time const size_t num = _mark_stack->capacity()/4; + assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(), + "Overflow list should be NULL during concurrent phases"); while (!_mark_stack->isEmpty() || // if stack is empty, check the overflow list _collector->take_from_overflow_list(num, _mark_stack)) { - oop this_oop = _mark_stack->pop(); - HeapWord* addr = (HeapWord*)this_oop; + oop obj = _mark_stack->pop(); + HeapWord* addr = (HeapWord*)obj; assert(_span.contains(addr), "Should be within span"); assert(_bit_map->isMarked(addr), "Should be marked"); - assert(this_oop->is_oop(), "Should be an oop"); - this_oop->oop_iterate(_keep_alive); + assert(obj->is_oop(), "Should be an oop"); + obj->oop_iterate(_keep_alive); } } @@ -8801,7 +8909,7 @@ // Transfer some number of overflown objects to usual marking // stack. Return true if some objects were transferred. bool MarkRefsIntoAndScanClosure::take_from_overflow_list() { - size_t num = MIN2((size_t)_mark_stack->capacity()/4, + size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4, (size_t)ParGCDesiredObjsFromOverflowList); bool res = _collector->take_from_overflow_list(num, _mark_stack); --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp 2009-08-01 04:10:32.661355034 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp 2009-08-01 04:10:32.575428221 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)concurrentMarkSweepGeneration.hpp 1.163 08/09/25 13:47:54 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -447,12 +447,13 @@ CMSBitMap* bit_map): _span(span), _bit_map(bit_map) { - assert(!span.is_empty(), "Empty span could spell trouble"); - } + assert(!span.is_empty(), "Empty span could spell trouble"); + } void do_object(oop obj) { assert(false, "not to be invoked"); } + bool do_object_b(oop obj); }; @@ -536,13 +537,16 @@ // In support of ExplicitGCInvokesConcurrent static bool _full_gc_requested; unsigned int _collection_count_start; + // Should we unload classes this concurrent cycle? - // Set in response to a concurrent full gc request. - bool _unload_classes; - bool _unloaded_classes_last_cycle; + bool _should_unload_classes; + unsigned int _concurrent_cycles_since_last_unload; + unsigned int concurrent_cycles_since_last_unload() const { + return _concurrent_cycles_since_last_unload; + } // Did we (allow) unload classes in the previous concurrent cycle? - bool cms_unloaded_classes_last_cycle() const { - return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled; + bool unloaded_classes_last_cycle() const { + return concurrent_cycles_since_last_unload() == 0; } // Verification support @@ -591,6 +595,7 @@ size_t _ser_pmc_preclean_ovflw; size_t _ser_pmc_remark_ovflw; size_t _par_pmc_remark_ovflw; + size_t _ser_kac_preclean_ovflw; size_t _ser_kac_ovflw; size_t _par_kac_ovflw; NOT_PRODUCT(size_t _num_par_pushes;) @@ -652,8 +657,6 @@ // number of full gc's since the last concurrent gc. uint _full_gcs_since_conc_gc; - // if occupancy exceeds this, start a new gc cycle - double _initiatingOccupancy; // occupancy used for bootstrapping stats double _bootstrap_occupancy; @@ -826,7 +829,6 @@ Mutex* bitMapLock() const { return _markBitMap.lock(); } static CollectorState abstract_state() { return _collectorState; } - double initiatingOccupancy() const { return _initiatingOccupancy; } bool should_abort_preclean() const; // Whether preclean should be aborted. size_t get_eden_used() const; @@ -850,11 +852,10 @@ // In support of ExplicitGCInvokesConcurrent static void request_full_gc(unsigned int full_gc_count); // Should we unload classes in a particular concurrent cycle? - bool cms_should_unload_classes() const { - assert(!_unload_classes || ExplicitGCInvokesConcurrentAndUnloadsClasses, - "Inconsistency; see CR 6541037"); - return _unload_classes || CMSClassUnloadingEnabled; + bool should_unload_classes() const { + return _should_unload_classes; } + bool update_should_unload_classes(); void direct_allocated(HeapWord* start, size_t size); @@ -1023,6 +1024,10 @@ _incremental_collection_failed = false; } + // accessors + void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} + CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } + private: // For parallel young-gen GC support. CMSParGCThreadState** _par_gc_thread_states; @@ -1030,10 +1035,6 @@ // Reason generation was expanded CMSExpansionCause::Cause _expansion_cause; - // accessors - void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} - CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; } - // In support of MinChunkSize being larger than min object size const double _dilatation_factor; @@ -1046,11 +1047,11 @@ CollectionTypes _debug_collection_type; + // Fraction of current occupancy at which to start a CMS collection which + // will collect this generation (at least). + double _initiating_occupancy; + protected: - // Grow generation by specified size (returns false if unable to grow) - bool grow_by(size_t bytes); - // Grow generation to reserved size. - bool grow_to_reserved(); // Shrink generation by specified size (returns false if unable to shrink) virtual void shrink_by(size_t bytes); @@ -1061,6 +1062,10 @@ // space. size_t max_available() const; + // getter and initializer for _initiating_occupancy field. + double initiating_occupancy() const { return _initiating_occupancy; } + void init_initiating_occupancy(intx io, intx tr); + public: ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, int level, CardTableRS* ct, @@ -1098,13 +1103,18 @@ // Override virtual void ref_processor_init(); + // Grow generation by specified size (returns false if unable to grow) + bool grow_by(size_t bytes); + // Grow generation to reserved size. + bool grow_to_reserved(); + void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } // Space enquiries size_t capacity() const; size_t used() const; size_t free() const; - double occupancy() { return ((double)used())/((double)capacity()); } + double occupancy() const { return ((double)used())/((double)capacity()); } size_t contiguous_available() const; size_t unsafe_max_alloc_nogc() const; @@ -1132,7 +1142,7 @@ // Allocation support HeapWord* allocate(size_t size, bool tlab); HeapWord* have_lock_and_allocate(size_t size, bool tlab); - oop promote(oop obj, size_t obj_size, oop* ref); + oop promote(oop obj, size_t obj_size); HeapWord* par_allocate(size_t size, bool tlab) { return allocate(size, tlab); } @@ -1159,8 +1169,8 @@ bool younger_handles_promotion_failure) const; bool should_collect(bool full, size_t size, bool tlab); - // XXXPERM - bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM + virtual bool should_concurrent_collect() const; + virtual bool is_too_full() const; void collect(bool full, bool clear_all_soft_refs, size_t size, @@ -1188,6 +1198,7 @@ // Allocation failure void expand(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause); + virtual bool expand(size_t bytes, size_t expand_bytes); void shrink(size_t bytes); HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); bool expand_and_ensure_spooling_space(PromotionInfo* promo); @@ -1295,9 +1306,8 @@ // This closure is used to check that a certain set of oops is empty. class FalseClosure: public OopClosure { public: - void do_oop(oop* p) { - guarantee(false, "Should be an empty set"); - } + void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } + void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } }; // This closure is used to do concurrent marking from the roots @@ -1321,7 +1331,7 @@ CMSMarkStack* markStack, CMSMarkStack* revisitStack, bool should_yield, bool verifying = false); - void do_bit(size_t offset); + bool do_bit(size_t offset); void reset(HeapWord* addr); inline void do_yield_check(); @@ -1357,7 +1367,7 @@ CMSMarkStack* overflow_stack, CMSMarkStack* revisit_stack, bool should_yield); - void do_bit(size_t offset); + bool do_bit(size_t offset); inline void do_yield_check(); private: @@ -1374,6 +1384,12 @@ CMSBitMap* _verification_bm; CMSBitMap* _cms_bm; CMSMarkStack* _mark_stack; + protected: + void do_oop(oop p); + template inline void do_oop_work(T *p) { + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + do_oop(obj); + } public: PushAndMarkVerifyClosure(CMSCollector* cms_collector, MemRegion span, @@ -1381,6 +1397,7 @@ CMSBitMap* cms_bm, CMSMarkStack* mark_stack); void do_oop(oop* p); + void do_oop(narrowOop* p); // Deal with a stack overflow condition void handle_stack_overflow(HeapWord* lost); }; @@ -1398,7 +1415,7 @@ CMSBitMap* verification_bm, CMSBitMap* cms_bm, CMSMarkStack* mark_stack); - void do_bit(size_t offset); + bool do_bit(size_t offset); void reset(HeapWord* addr); }; @@ -1407,8 +1424,9 @@ // "empty" (i.e. the bit vector doesn't have any 1-bits). class FalseBitMapClosure: public BitMapClosure { public: - void do_bit(size_t offset) { - guarantee(false, "Should not have a 1 bit"); + bool do_bit(size_t offset) { + guarantee(false, "Should not have a 1 bit"); + return true; } }; @@ -1735,21 +1753,30 @@ // work-routine/closure used to complete transitive // marking of objects as live after a certain point // in which an initial set has been completely accumulated. +// This closure is currently used both during the final +// remark stop-world phase, as well as during the concurrent +// precleaning of the discovered reference lists. class CMSDrainMarkingStackClosure: public VoidClosure { CMSCollector* _collector; MemRegion _span; CMSMarkStack* _mark_stack; CMSBitMap* _bit_map; CMSKeepAliveClosure* _keep_alive; + bool _concurrent_precleaning; public: CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, CMSMarkStack* mark_stack, - CMSKeepAliveClosure* keep_alive): + CMSKeepAliveClosure* keep_alive, + bool cpc): _collector(collector), _span(span), _bit_map(bit_map), _mark_stack(mark_stack), - _keep_alive(keep_alive) { } + _keep_alive(keep_alive), + _concurrent_precleaning(cpc) { + assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), + "Mismatch"); + } void do_void(); }; --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp 2009-08-01 04:10:33.634174481 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp 2009-08-01 04:10:33.556727800 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)concurrentMarkSweepGeneration.inline.hpp 1.47 07/05/17 15:52:12 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -268,9 +268,9 @@ && _cmsGen->cmsSpace()->block_is_obj(addr)) || (_permGen->cmsSpace()->is_in_reserved(addr) - && _permGen->cmsSpace()->block_is_obj(addr)), - "must be object"); - return cms_should_unload_classes() && + && _permGen->cmsSpace()->block_is_obj(addr)), + "must be object"); + return should_unload_classes() && _collectorState == Sweeping && !_markBitMap.isMarked(addr); } --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp 2009-08-01 04:10:34.482807202 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp 2009-08-01 04:10:34.405617231 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)freeBlockDictionary.hpp 1.32 07/05/05 17:05:47 JVM" #endif /* - * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,89 +25,6 @@ * */ -// -// Free block maintenance for Concurrent Mark Sweep Generation -// -// The main data structure for free blocks are -// . an indexed array of small free blocks, and -// . a dictionary of large free blocks -// - -// No virtuals in FreeChunk (don't want any vtables). - -// A FreeChunk is merely a chunk that can be in a doubly linked list -// and has a size field. NOTE: FreeChunks are distinguished from allocated -// objects in two ways (by the sweeper). The second word (prev) has the -// LSB set to indicate a free chunk; allocated objects' klass() pointers -// don't have their LSB set. The corresponding bit in the CMSBitMap is -// set when the chunk is allocated. There are also blocks that "look free" -// but are not part of the free list and should not be coalesced into larger -// free blocks. These free blocks have their two LSB's set. - -class FreeChunk VALUE_OBJ_CLASS_SPEC { - friend class VMStructs; - FreeChunk* _next; - FreeChunk* _prev; - size_t _size; - - public: - NOT_PRODUCT(static const size_t header_size();) - // Returns "true" if the "wrd", which is required to be the second word - // of a block, indicates that the block represents a free chunk. - static bool secondWordIndicatesFreeChunk(intptr_t wrd) { - return (wrd & 0x1) == 0x1; - } - bool isFree() const { - return secondWordIndicatesFreeChunk((intptr_t)_prev); - } - bool cantCoalesce() const { return (((intptr_t)_prev) & 0x3) == 0x3; } - FreeChunk* next() const { return _next; } - FreeChunk* prev() const { return (FreeChunk*)(((intptr_t)_prev) & ~(0x3)); } - debug_only(void* prev_addr() const { return (void*)&_prev; }) - - void linkAfter(FreeChunk* ptr) { - linkNext(ptr); - if (ptr != NULL) ptr->linkPrev(this); - } - void linkAfterNonNull(FreeChunk* ptr) { - assert(ptr != NULL, "precondition violation"); - linkNext(ptr); - ptr->linkPrev(this); - } - void linkNext(FreeChunk* ptr) { _next = ptr; } - void linkPrev(FreeChunk* ptr) { _prev = (FreeChunk*)((intptr_t)ptr | 0x1); } - void clearPrev() { _prev = NULL; } - void clearNext() { _next = NULL; } - void dontCoalesce() { - // the block should be free - assert(isFree(), "Should look like a free block"); - _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2); - } - void markFree() { _prev = (FreeChunk*)((intptr_t)_prev | 0x1); } - void markNotFree() { _prev = NULL; } - - size_t size() const { return _size; } - void setSize(size_t size) { _size = size; } - - // For volatile reads: - size_t* size_addr() { return &_size; } - - // Return the address past the end of this chunk - HeapWord* end() const { return ((HeapWord*) this) + _size; } - - // debugging - void verify() const PRODUCT_RETURN; - void verifyList() const PRODUCT_RETURN; - void mangleAllocated(size_t size) PRODUCT_RETURN; - void mangleFreed(size_t size) PRODUCT_RETURN; -}; - -// Alignment helpers etc. -#define numQuanta(x,y) ((x+y-1)/y) -enum AlignmentConstants { - MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment -}; - // A FreeBlockDictionary is an abstract superclass that will allow // a number of alternative implementations in the future. class FreeBlockDictionary: public CHeapObj { --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp 2009-08-01 04:10:35.336970983 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp 2009-08-01 04:10:35.268364576 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)freeChunk.cpp 1.16 07/05/05 17:05:47 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,15 +50,15 @@ Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord); } -void FreeChunk::mangleFreed(size_t size) { +void FreeChunk::mangleFreed(size_t sz) { assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns"); // mangle all but the header of a just-freed block of storage // just prior to passing it to the storage dictionary - assert(size >= MinChunkSize, "smallest size of object"); - assert(size == _size, "just checking"); + assert(sz >= MinChunkSize, "smallest size of object"); + assert(sz == size(), "just checking"); HeapWord* addr = (HeapWord*)this; size_t hdr = header_size(); - Copy::fill_to_words(addr + hdr, size - hdr, deadbeefHeapWord); + Copy::fill_to_words(addr + hdr, sz - hdr, deadbeefHeapWord); } void FreeChunk::verifyList() const { --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp 2009-08-01 04:10:36.141272224 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp 2009-08-01 04:10:36.063469459 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)freeList.cpp 1.31 07/05/05 17:05:48 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -305,3 +305,29 @@ #endif } #endif + +// Print the "label line" for free list stats. +void FreeList::print_labels_on(outputStream* st, const char* c) { + st->print("%16s\t", c); + st->print("%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t" + "%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t" "\n", + "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep", + "count", "cBirths", "cDeaths", "sBirths", "sDeaths"); +} + +// Print the AllocationStats for the given free list. If the second argument +// to the call is a non-null string, it is printed in the first column; +// otherwise, if the argument is null (the default), then the size of the +// (free list) block is printed in the first column. +void FreeList::print_on(outputStream* st, const char* c) const { + if (c != NULL) { + st->print("%16s", c); + } else { + st->print(SIZE_FORMAT_W(16), size()); + } + st->print("\t" + SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" + SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n", + bfrSurp(), surplus(), desired(), prevSweep(), beforeSweep(), + count(), coalBirths(), coalDeaths(), splitBirths(), splitDeaths()); +} --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp 2009-08-01 04:10:36.978067529 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp 2009-08-01 04:10:36.900390694 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)freeList.hpp 1.32 08/04/09 19:18:54 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,11 +42,12 @@ class FreeList VALUE_OBJ_CLASS_SPEC { friend class CompactibleFreeListSpace; friend class VMStructs; - FreeChunk* _head; // List of free chunks - FreeChunk* _tail; // Tail of list of free chunks - size_t _size; // Size in Heap words of each chunks - ssize_t _count; // Number of entries in list - size_t _hint; // next larger size list with a positive surplus + friend class printTreeCensusClosure; + FreeChunk* _head; // List of free chunks + FreeChunk* _tail; // Tail of list of free chunks + size_t _size; // Size in Heap words of each chunks + ssize_t _count; // Number of entries in list + size_t _hint; // next larger size list with a positive surplus AllocationStats _allocation_stats; // statistics for smart allocation @@ -67,10 +68,11 @@ protected: void init_statistics(); void set_count(ssize_t v) { _count = v;} - void increment_count() { _count++; } + void increment_count() { _count++; } void decrement_count() { _count--; - assert(_count >= 0, "Count should not be negative"); } + assert(_count >= 0, "Count should not be negative"); + } public: // Constructor @@ -163,6 +165,10 @@ ssize_t desired() const { return _allocation_stats.desired(); } + void set_desired(ssize_t v) { + assert_proper_lock_protection(); + _allocation_stats.set_desired(v); + } void compute_desired(float inter_sweep_current, float inter_sweep_estimate) { assert_proper_lock_protection(); @@ -302,4 +308,8 @@ // Verify that the chunk is in the list. // found. Return NULL if "fc" is not found. bool verifyChunkInFreeLists(FreeChunk* fc) const; + + // Printing support + static void print_labels_on(outputStream* st, const char* c); + void print_on(outputStream* st, const char* c = NULL) const; }; --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp 2009-08-01 04:10:37.796473013 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp 2009-08-01 04:10:37.713459679 +0100 @@ -26,6 +26,7 @@ */ #define VM_STRUCTS_CMS(nonstatic_field, \ + volatile_nonstatic_field, \ static_field) \ nonstatic_field(CompactibleFreeListSpace, _collector, CMSCollector*) \ nonstatic_field(CompactibleFreeListSpace, _bt, BlockOffsetArrayNonContigSpace) \ @@ -39,9 +40,9 @@ nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \ nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \ static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \ + volatile_nonstatic_field(FreeChunk, _size, size_t) \ nonstatic_field(FreeChunk, _next, FreeChunk*) \ nonstatic_field(FreeChunk, _prev, FreeChunk*) \ - nonstatic_field(FreeChunk, _size, size_t) \ nonstatic_field(LinearAllocBlock, _word_size, size_t) \ nonstatic_field(FreeList, _size, size_t) \ nonstatic_field(FreeList, _count, ssize_t) \ --- old/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep 2009-08-01 04:10:38.625580836 +0100 +++ new/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep 2009-08-01 04:10:38.550989635 +0100 @@ -1,5 +1,5 @@ // -// Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ binaryTreeDictionary.cpp binaryTreeDictionary.hpp binaryTreeDictionary.cpp globals.hpp binaryTreeDictionary.cpp ostream.hpp +binaryTreeDictionary.cpp spaceDecorator.hpp binaryTreeDictionary.hpp freeBlockDictionary.hpp binaryTreeDictionary.hpp freeList.hpp @@ -114,6 +115,7 @@ compactibleFreeListSpace.cpp liveRange.hpp compactibleFreeListSpace.cpp oop.inline.hpp compactibleFreeListSpace.cpp resourceArea.hpp +compactibleFreeListSpace.cpp spaceDecorator.hpp compactibleFreeListSpace.cpp universe.inline.hpp compactibleFreeListSpace.cpp vmThread.hpp @@ -123,17 +125,6 @@ compactingPermGenGen.cpp concurrentMarkSweepGeneration.inline.hpp -concurrentGCThread.cpp concurrentGCThread.hpp -concurrentGCThread.cpp init.hpp -concurrentGCThread.cpp instanceRefKlass.hpp -concurrentGCThread.cpp interfaceSupport.hpp -concurrentGCThread.cpp java.hpp -concurrentGCThread.cpp javaCalls.hpp -concurrentGCThread.cpp oop.inline.hpp -concurrentGCThread.cpp systemDictionary.hpp - -concurrentGCThread.hpp thread.hpp - concurrentMarkSweepGeneration.cpp cardTableRS.hpp concurrentMarkSweepGeneration.cpp cmsAdaptiveSizePolicy.hpp concurrentMarkSweepGeneration.cpp cmsCollectorPolicy.hpp @@ -165,7 +156,7 @@ concurrentMarkSweepGeneration.cpp vmCMSOperations.hpp concurrentMarkSweepGeneration.cpp vmThread.hpp -concurrentMarkSweepGeneration.hpp bitMap.hpp +concurrentMarkSweepGeneration.hpp bitMap.inline.hpp concurrentMarkSweepGeneration.hpp freeBlockDictionary.hpp concurrentMarkSweepGeneration.hpp gSpaceCounters.hpp concurrentMarkSweepGeneration.hpp gcStats.hpp @@ -206,6 +197,7 @@ freeBlockDictionary.hpp allocation.hpp freeBlockDictionary.hpp debug.hpp +freeBlockDictionary.hpp freeChunk.hpp freeBlockDictionary.hpp globalDefinitions.hpp freeBlockDictionary.hpp memRegion.hpp freeBlockDictionary.hpp mutex.hpp @@ -214,6 +206,14 @@ freeChunk.cpp copy.hpp freeChunk.cpp freeBlockDictionary.hpp +freeChunk.hpp allocation.hpp +freeChunk.hpp debug.hpp +freeChunk.hpp globalDefinitions.hpp +freeChunk.hpp markOop.hpp +freeChunk.hpp memRegion.hpp +freeChunk.hpp mutex.hpp +freeChunk.hpp ostream.hpp + freeList.cpp freeBlockDictionary.hpp freeList.cpp freeList.hpp freeList.cpp globals.hpp --- old/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew 2009-08-01 04:10:39.522700348 +0100 +++ new/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew 2009-08-01 04:10:39.442119957 +0100 @@ -19,19 +19,20 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // -asParNewGeneration.hpp adaptiveSizePolicy.hpp -asParNewGeneration.hpp parNewGeneration.hpp +asParNewGeneration.hpp adaptiveSizePolicy.hpp +asParNewGeneration.hpp parNewGeneration.hpp -asParNewGeneration.cpp asParNewGeneration.hpp -asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp +asParNewGeneration.cpp asParNewGeneration.hpp +asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp asParNewGeneration.cpp cmsGCAdaptivePolicyCounters.hpp -asParNewGeneration.cpp defNewGeneration.inline.hpp -asParNewGeneration.cpp oop.pcgc.inline.hpp -asParNewGeneration.cpp parNewGeneration.hpp +asParNewGeneration.cpp defNewGeneration.inline.hpp +asParNewGeneration.cpp oop.pcgc.inline.hpp +asParNewGeneration.cpp parNewGeneration.hpp asParNewGeneration.cpp referencePolicy.hpp +asParNewGeneration.cpp spaceDecorator.hpp parCardTableModRefBS.cpp allocation.inline.hpp parCardTableModRefBS.cpp cardTableModRefBS.hpp @@ -66,8 +67,8 @@ parNewGeneration.cpp handles.inline.hpp parNewGeneration.cpp java.hpp parNewGeneration.cpp objArrayOop.hpp -parNewGeneration.cpp oop.pcgc.inline.hpp parNewGeneration.cpp oop.inline.hpp +parNewGeneration.cpp oop.pcgc.inline.hpp parNewGeneration.cpp parGCAllocBuffer.hpp parNewGeneration.cpp parNewGeneration.hpp parNewGeneration.cpp parOopClosures.inline.hpp @@ -75,8 +76,14 @@ parNewGeneration.cpp resourceArea.hpp parNewGeneration.cpp sharedHeap.hpp parNewGeneration.cpp space.hpp +parNewGeneration.cpp spaceDecorator.hpp parNewGeneration.cpp workgroup.hpp parNewGeneration.hpp defNewGeneration.hpp parNewGeneration.hpp parGCAllocBuffer.hpp parNewGeneration.hpp taskqueue.hpp + +parOopClosures.hpp genOopClosures.hpp + +parOopClosures.inline.hpp parNewGeneration.hpp +parOopClosures.inline.hpp parOopClosures.hpp --- old/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge 2009-08-01 04:10:40.401349528 +0100 +++ new/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge 2009-08-01 04:10:40.312384864 +0100 @@ -1,5 +1,5 @@ // -// Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! @@ -53,14 +53,15 @@ asPSOldGen.cpp oop.inline.hpp asPSOldGen.cpp parallelScavengeHeap.hpp asPSOldGen.cpp psMarkSweepDecorator.hpp -asPSOldGen.cpp asPSOldGen.hpp +asPSOldGen.cpp asPSOldGen.hpp asPSYoungGen.hpp generationCounters.hpp asPSYoungGen.hpp mutableSpace.hpp asPSYoungGen.hpp objectStartArray.hpp asPSYoungGen.hpp spaceCounters.hpp asPSYoungGen.hpp psVirtualspace.hpp -asPSYoungGen.hpp psYoungGen.hpp +asPSYoungGen.hpp psYoungGen.hpp +asPSYoungGen.hpp spaceDecorator.hpp asPSYoungGen.cpp gcUtil.hpp asPSYoungGen.cpp java.hpp @@ -68,8 +69,9 @@ asPSYoungGen.cpp parallelScavengeHeap.hpp asPSYoungGen.cpp psMarkSweepDecorator.hpp asPSYoungGen.cpp psScavenge.hpp -asPSYoungGen.cpp asPSYoungGen.hpp -asPSYoungGen.cpp psYoungGen.hpp +asPSYoungGen.cpp asPSYoungGen.hpp +asPSYoungGen.cpp psYoungGen.hpp +asPSYoungGen.cpp spaceDecorator.hpp cardTableExtension.cpp cardTableExtension.hpp cardTableExtension.cpp gcTaskManager.hpp @@ -148,7 +150,6 @@ parallelScavengeHeap.hpp psYoungGen.hpp parallelScavengeHeap.hpp ostream.hpp -parMarkBitMap.cpp bitMap.hpp parMarkBitMap.cpp bitMap.inline.hpp parMarkBitMap.cpp oop.inline.hpp parMarkBitMap.cpp os.hpp @@ -157,7 +158,6 @@ parMarkBitMap.cpp parMarkBitMap.inline.hpp parMarkBitMap.cpp psParallelCompact.hpp -parMarkBitMap.hpp bitMap.hpp parMarkBitMap.hpp bitMap.inline.hpp parMarkBitMap.hpp psVirtualspace.hpp @@ -225,6 +225,7 @@ psMarkSweep.cpp referencePolicy.hpp psMarkSweep.cpp referenceProcessor.hpp psMarkSweep.cpp safepoint.hpp +psMarkSweep.cpp spaceDecorator.hpp psMarkSweep.cpp symbolTable.hpp psMarkSweep.cpp systemDictionary.hpp psMarkSweep.cpp vmThread.hpp @@ -239,6 +240,7 @@ psMarkSweepDecorator.cpp parallelScavengeHeap.hpp psMarkSweepDecorator.cpp psMarkSweep.hpp psMarkSweepDecorator.cpp psMarkSweepDecorator.hpp +psMarkSweepDecorator.cpp spaceDecorator.hpp psMarkSweepDecorator.cpp systemDictionary.hpp psMarkSweepDecorator.hpp mutableSpace.hpp @@ -279,6 +281,7 @@ psParallelCompact.hpp objectStartArray.hpp psParallelCompact.hpp oop.hpp psParallelCompact.hpp parMarkBitMap.hpp +psParallelCompact.hpp psCompactionManager.hpp psParallelCompact.hpp sharedHeap.hpp psOldGen.cpp psAdaptiveSizePolicy.hpp @@ -289,6 +292,7 @@ psOldGen.cpp parallelScavengeHeap.hpp psOldGen.cpp psMarkSweepDecorator.hpp psOldGen.cpp psOldGen.hpp +psOldGen.cpp spaceDecorator.hpp psOldGen.hpp psGenerationCounters.hpp psOldGen.hpp mutableSpace.hpp @@ -350,6 +354,7 @@ psScavenge.cpp referencePolicy.hpp psScavenge.cpp referenceProcessor.hpp psScavenge.cpp resourceArea.hpp +psScavenge.cpp spaceDecorator.hpp psScavenge.cpp threadCritical.hpp psScavenge.cpp vmThread.hpp psScavenge.cpp vm_operations.hpp @@ -408,8 +413,8 @@ psVirtualspace.cpp os.hpp psVirtualspace.cpp os_.inline.hpp -psVirtualspace.cpp psVirtualspace.hpp -psVirtualspace.cpp virtualspace.hpp +psVirtualspace.cpp psVirtualspace.hpp +psVirtualspace.cpp virtualspace.hpp psYoungGen.cpp gcUtil.hpp psYoungGen.cpp java.hpp @@ -418,7 +423,8 @@ psYoungGen.cpp psMarkSweepDecorator.hpp psYoungGen.cpp psScavenge.hpp psYoungGen.cpp psYoungGen.hpp -psYoungGen.cpp mutableNUMASpace.hpp +psYoungGen.cpp mutableNUMASpace.hpp +psYoungGen.cpp spaceDecorator.hpp psYoungGen.hpp psGenerationCounters.hpp psYoungGen.hpp mutableSpace.hpp --- old/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared 2009-08-01 04:10:41.316572116 +0100 +++ new/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared 2009-08-01 04:10:41.232978195 +0100 @@ -1,5 +1,5 @@ // -// Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,17 +19,48 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! -gcAdaptivePolicyCounters.hpp adaptiveSizePolicy.hpp -gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp +concurrentGCThread.cpp concurrentGCThread.hpp +concurrentGCThread.cpp init.hpp +concurrentGCThread.cpp instanceRefKlass.hpp +concurrentGCThread.cpp interfaceSupport.hpp +concurrentGCThread.cpp java.hpp +concurrentGCThread.cpp javaCalls.hpp +concurrentGCThread.cpp oop.inline.hpp +concurrentGCThread.cpp systemDictionary.hpp + +concurrentGCThread.hpp thread.hpp + +coTracker.hpp globalDefinitions.hpp +coTracker.hpp numberSeq.hpp + +coTracker.cpp coTracker.hpp +coTracker.cpp os.hpp + +allocationStats.cpp allocationStats.hpp +allocationStats.cpp ostream.hpp + +allocationStats.hpp allocation.hpp +allocationStats.hpp gcUtil.hpp +allocationStats.hpp globalDefinitions.hpp + +gcAdaptivePolicyCounters.hpp adaptiveSizePolicy.hpp +gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp -gcAdaptivePolicyCounters.cpp resourceArea.hpp +gcAdaptivePolicyCounters.cpp resourceArea.hpp gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp +gcOverheadReporter.cpp allocation.inline.hpp +gcOverheadReporter.cpp concurrentGCThread.hpp +gcOverheadReporter.cpp coTracker.hpp +gcOverheadReporter.cpp gcOverheadReporter.hpp +gcOverheadReporter.cpp ostream.hpp +gcOverheadReporter.cpp thread_.inline.hpp + gSpaceCounters.cpp generation.hpp gSpaceCounters.cpp resourceArea.hpp gSpaceCounters.cpp gSpaceCounters.hpp @@ -44,10 +75,12 @@ isGCActiveMark.hpp parallelScavengeHeap.hpp -markSweep.inline.hpp psParallelCompact.hpp +markSweep.inline.hpp psParallelCompact.hpp mutableNUMASpace.cpp mutableNUMASpace.hpp +mutableNUMASpace.cpp oop.inline.hpp mutableNUMASpace.cpp sharedHeap.hpp +mutableNUMASpace.cpp spaceDecorator.hpp mutableNUMASpace.cpp thread_.inline.hpp mutableNUMASpace.hpp mutableSpace.hpp @@ -56,6 +89,7 @@ mutableSpace.cpp mutableSpace.hpp mutableSpace.cpp oop.inline.hpp mutableSpace.cpp safepoint.hpp +mutableSpace.cpp spaceDecorator.hpp mutableSpace.cpp thread.hpp spaceCounters.cpp resourceArea.hpp @@ -65,3 +99,5 @@ spaceCounters.hpp mutableSpace.hpp spaceCounters.hpp perfData.hpp spaceCounters.hpp generationCounters.hpp + +vmGCOperations.cpp g1CollectedHeap.inline.hpp --- old/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp 2009-08-01 04:10:42.198698903 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp 2009-08-01 04:10:42.129048195 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)asParNewGeneration.cpp 1.11 07/05/05 17:05:25 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,8 +77,8 @@ #ifdef SHRINKS_AT_END_OF_EDEN size_t delta_in_survivor = 0; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t space_alignment = heap->intra_generation_alignment(); - const size_t gen_alignment = heap->generation_alignment(); + const size_t space_alignment = heap->intra_heap_alignment(); + const size_t gen_alignment = heap->object_heap_alignment(); MutableSpace* space_shrinking = NULL; if (from_space()->end() > to_space()->end()) { @@ -165,10 +165,9 @@ // Grow the generation size_t change = desired_size - orig_size; assert(change % alignment == 0, "just checking"); - if (!virtual_space()->expand_by(change)) { + if (expand(change)) { return false; // Error if we fail to resize! } - size_changed = true; } else if (desired_size < orig_size) { size_t desired_change = orig_size - desired_size; @@ -225,7 +224,9 @@ // Was there a shrink of the survivor space? if (new_end < to()->end()) { MemRegion mr(to()->bottom(), new_end); - to()->initialize(mr, false /* clear */); + to()->initialize(mr, + SpaceDecorator::DontClear, + SpaceDecorator::DontMangle); } } } @@ -325,9 +326,7 @@ pointer_delta(from_start, eden_start, sizeof(char))); } -// tty->print_cr("eden_size before: " SIZE_FORMAT, eden_size); eden_size = align_size_down(eden_size, alignment); -// tty->print_cr("eden_size after: " SIZE_FORMAT, eden_size); eden_end = eden_start + eden_size; assert(eden_end >= eden_start, "addition overflowed") @@ -504,11 +503,31 @@ size_t old_from = from()->capacity(); size_t old_to = to()->capacity(); + // If not clearing the spaces, do some checking to verify that + // the spaces are already mangled. + + // Must check mangling before the spaces are reshaped. Otherwise, + // the bottom or end of one space may have moved into another + // a failure of the check may not correctly indicate which space + // is not properly mangled. + if (ZapUnusedHeapArea) { + HeapWord* limit = (HeapWord*) virtual_space()->high(); + eden()->check_mangled_unused_area(limit); + from()->check_mangled_unused_area(limit); + to()->check_mangled_unused_area(limit); + } + // The call to initialize NULL's the next compaction space - eden()->initialize(edenMR, true); + eden()->initialize(edenMR, + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); eden()->set_next_compaction_space(from()); - to()->initialize(toMR , true); - from()->initialize(fromMR, false); // Note, not cleared! + to()->initialize(toMR , + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); + from()->initialize(fromMR, + SpaceDecorator::DontClear, + SpaceDecorator::DontMangle); assert(from()->top() == old_from_top, "from top changed!"); --- old/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp 2009-08-01 04:10:43.109958295 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp 2009-08-01 04:10:43.024113810 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parGCAllocBuffer.cpp 1.28 07/05/29 09:44:12 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,32 +35,33 @@ _allocated(0), _wasted(0) { assert (min_size() > AlignmentReserve, "Inconsistency!"); + // arrayOopDesc::header_size depends on command line initialization. + FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT)); + AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0; } -const size_t ParGCAllocBuffer::FillerHeaderSize = - align_object_size(arrayOopDesc::header_size(T_INT)); +size_t ParGCAllocBuffer::FillerHeaderSize; // If the minimum object size is greater than MinObjAlignment, we can // end up with a shard at the end of the buffer that's smaller than // the smallest object. We can't allow that because the buffer must // look like it's full of objects when we retire it, so we make // sure we have enough space for a filler int array object. -const size_t ParGCAllocBuffer::AlignmentReserve = - oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0; +size_t ParGCAllocBuffer::AlignmentReserve; void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) { assert(!retain || end_of_gc, "Can only retain at GC end."); if (_retained) { // If the buffer had been retained shorten the previous filler object. assert(_retained_filler.end() <= _top, "INVARIANT"); - SharedHeap::fill_region_with_object(_retained_filler); + CollectedHeap::fill_with_object(_retained_filler); // Wasted space book-keeping, otherwise (normally) done in invalidate() _wasted += _retained_filler.word_size(); _retained = false; } assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained."); if (_top < _hard_end) { - SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end)); + CollectedHeap::fill_with_object(_top, _hard_end); if (!retain) { invalidate(); } else { @@ -156,8 +157,8 @@ // parameter below to directly manipulate the shared array without // modifying the _next_threshold state in the BOT. void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr, - bool contig) { - SharedHeap::fill_region_with_object(mr); + bool contig) { + CollectedHeap::fill_with_object(mr); if (contig) { _bt.alloc_block(mr.start(), mr.end()); } else { @@ -173,7 +174,7 @@ "or else _true_end should be equal to _hard_end"); assert(_retained, "or else _true_end should be equal to _hard_end"); assert(_retained_filler.end() <= _top, "INVARIANT"); - SharedHeap::fill_region_with_object(_retained_filler); + CollectedHeap::fill_with_object(_retained_filler); if (_top < _hard_end) { fill_region_with_block(MemRegion(_top, _hard_end), true); } @@ -231,7 +232,7 @@ HeapWord* first_card_start = _bsa->address_for_index(first_card_index); if (first_card_start < pre_top) { HeapWord* second_card_start = - _bsa->address_for_index(first_card_index + 1); + _bsa->inc_by_region_size(first_card_start); // Ensure enough room to fill with the smallest block second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve); @@ -318,13 +319,11 @@ while (_top <= chunk_boundary) { assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve, "Consequence of last card handling above."); - MemRegion chunk_portion(chunk_boundary, _hard_end); - _bt.BlockOffsetArray::alloc_block(chunk_portion.start(), - chunk_portion.end()); - SharedHeap::fill_region_with_object(chunk_portion); - _hard_end = chunk_portion.start(); - chunk_boundary -= ChunkSizeInWords; - } + _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end); + CollectedHeap::fill_with_object(chunk_boundary, _hard_end); + _hard_end = chunk_boundary; + chunk_boundary -= ChunkSizeInWords; + } _end = _hard_end - AlignmentReserve; assert(_top <= _end, "Invariant."); // Now reset the initial filler chunk so it doesn't overlap with --- old/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp 2009-08-01 04:10:43.982095514 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp 2009-08-01 04:10:43.897123752 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parGCAllocBuffer.hpp 1.30 07/05/29 09:44:13 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,8 +44,8 @@ size_t _allocated; // in HeapWord units size_t _wasted; // in HeapWord units char tail[32]; - static const size_t FillerHeaderSize; - static const size_t AlignmentReserve; + static size_t FillerHeaderSize; + static size_t AlignmentReserve; public: // Initializes the buffer to be empty, but with the given "word_sz". @@ -66,9 +66,8 @@ // return NULL. HeapWord* allocate(size_t word_sz) { HeapWord* res = _top; - HeapWord* new_top = _top + word_sz; - if (new_top <= _end) { - _top = new_top; + if (pointer_delta(_end, _top) >= word_sz) { + _top = _top + word_sz; return res; } else { return NULL; @@ -78,10 +77,9 @@ // Undo the last allocation in the buffer, which is required to be of the // "obj" of the given "word_sz". void undo_allocation(HeapWord* obj, size_t word_sz) { - assert(_top - word_sz >= _bottom - && _top - word_sz == obj, - "Bad undo_allocation"); - _top = _top - word_sz; + assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo"); + assert(pointer_delta(_top, obj) == word_sz, "Bad undo"); + _top = obj; } // The total (word) size of the buffer, including both allocated and --- old/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2009-08-01 04:10:44.836248392 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2009-08-01 04:10:44.743298395 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parNewGeneration.cpp 1.102 07/09/07 09:50:43 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,13 +34,15 @@ #endif ParScanThreadState::ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, - Generation* old_gen_, - int thread_num_, - ObjToScanQueueSet* work_queue_set_, + Generation* old_gen_, + int thread_num_, + ObjToScanQueueSet* work_queue_set_, + GrowableArray** overflow_stack_set_, size_t desired_plab_sz_, ParallelTaskTerminator& term_) : - _to_space(to_space_), _old_gen(old_gen_), _thread_num(thread_num_), + _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), + _overflow_stack(overflow_stack_set_[thread_num_]), _ageTable(false), // false ==> not the global age table, no perf data. _to_space_alloc_buffer(desired_plab_sz_), _to_space_closure(gen_, this), _old_gen_closure(gen_, this), @@ -84,7 +86,7 @@ assert(old->is_objArray(), "must be obj array"); assert(old->is_forwarded(), "must be forwarded"); assert(Universe::heap()->is_in_reserved(old), "must be in heap."); - assert(!_old_gen->is_in(old), "must be in young generation."); + assert(!old_gen()->is_in(old), "must be in young generation."); objArrayOop obj = objArrayOop(old->forwardee()); // Process ParGCArrayScanChunk elements now @@ -107,42 +109,83 @@ // must be removed. arrayOop(old)->set_length(end); } + // process our set of indices (include header in first chunk) - oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start); - oop* end_addr = obj->base() + end; // obj_at_addr(end) asserts end < length - MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); + // should make sure end is even (aligned to HeapWord in case of compressed oops) if ((HeapWord *)obj < young_old_boundary()) { // object is in to_space - obj->oop_iterate(&_to_space_closure, mr); + obj->oop_iterate_range(&_to_space_closure, start, end); } else { // object is in old generation - obj->oop_iterate(&_old_gen_closure, mr); + obj->oop_iterate_range(&_old_gen_closure, start, end); } } void ParScanThreadState::trim_queues(int max_size) { ObjToScanQueue* queue = work_queue(); - while (queue->size() > (juint)max_size) { - oop obj_to_scan; - if (queue->pop_local(obj_to_scan)) { - note_pop(); - - if ((HeapWord *)obj_to_scan < young_old_boundary()) { - if (obj_to_scan->is_objArray() && - obj_to_scan->is_forwarded() && - obj_to_scan->forwardee() != obj_to_scan) { - scan_partial_array_and_push_remainder(obj_to_scan); + do { + while (queue->size() > (juint)max_size) { + oop obj_to_scan; + if (queue->pop_local(obj_to_scan)) { + note_pop(); + if ((HeapWord *)obj_to_scan < young_old_boundary()) { + if (obj_to_scan->is_objArray() && + obj_to_scan->is_forwarded() && + obj_to_scan->forwardee() != obj_to_scan) { + scan_partial_array_and_push_remainder(obj_to_scan); + } else { + // object is in to_space + obj_to_scan->oop_iterate(&_to_space_closure); + } } else { - // object is in to_space - obj_to_scan->oop_iterate(&_to_space_closure); + // object is in old generation + obj_to_scan->oop_iterate(&_old_gen_closure); } - } else { - // object is in old generation - obj_to_scan->oop_iterate(&_old_gen_closure); } } + // For the case of compressed oops, we have a private, non-shared + // overflow stack, so we eagerly drain it so as to more evenly + // distribute load early. Note: this may be good to do in + // general rather than delay for the final stealing phase. + // If applicable, we'll transfer a set of objects over to our + // work queue, allowing them to be stolen and draining our + // private overflow stack. + } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); +} + +bool ParScanThreadState::take_from_overflow_stack() { + assert(ParGCUseLocalOverflow, "Else should not call"); + assert(young_gen()->overflow_list() == NULL, "Error"); + ObjToScanQueue* queue = work_queue(); + GrowableArray* of_stack = overflow_stack(); + uint num_overflow_elems = of_stack->length(); + uint num_take_elems = MIN2(MIN2((queue->max_elems() - queue->size())/4, + (juint)ParGCDesiredObjsFromOverflowList), + num_overflow_elems); + // Transfer the most recent num_take_elems from the overflow + // stack to our work queue. + for (size_t i = 0; i != num_take_elems; i++) { + oop cur = of_stack->pop(); + oop obj_to_push = cur->forwardee(); + assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); + assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); + assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); + if (should_be_partially_scanned(obj_to_push, cur)) { + assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); + obj_to_push = cur; + } + bool ok = queue->push(obj_to_push); + assert(ok, "Should have succeeded"); } + assert(young_gen()->overflow_list() == NULL, "Error"); + return num_take_elems > 0; // was something transferred? +} + +void ParScanThreadState::push_on_overflow_stack(oop p) { + assert(ParGCUseLocalOverflow, "Else should not call"); + overflow_stack()->push(p); + assert(young_gen()->overflow_list() == NULL, "Error"); } HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { @@ -205,7 +248,7 @@ "Should contain whole object."); to_space_alloc_buffer()->undo_allocation(obj, word_sz); } else { - SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); + CollectedHeap::fill_with_object(obj, word_sz); } } @@ -215,8 +258,9 @@ ParScanThreadStateSet(int num_threads, Space& to_space, ParNewGeneration& gen, - Generation& old_gen, - ObjToScanQueueSet& queue_set, + Generation& old_gen, + ObjToScanQueueSet& queue_set, + GrowableArray** overflow_stacks_, size_t desired_plab_sz, ParallelTaskTerminator& term); inline ParScanThreadState& thread_sate(int i); @@ -238,7 +282,8 @@ ParScanThreadStateSet::ParScanThreadStateSet( int num_threads, Space& to_space, ParNewGeneration& gen, - Generation& old_gen, ObjToScanQueueSet& queue_set, + Generation& old_gen, ObjToScanQueueSet& queue_set, + GrowableArray** overflow_stack_set_, size_t desired_plab_sz, ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), _gen(gen), _next_gen(old_gen), _term(term), @@ -249,7 +294,7 @@ for (int i = 0; i < num_threads; ++i) { new ((ParScanThreadState*)_data + i) ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, - desired_plab_sz, term); + overflow_stack_set_, desired_plab_sz, term); } } @@ -322,7 +367,6 @@ } } - ParScanClosure::ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g) @@ -331,11 +375,25 @@ _boundary = _g->reserved().end(); } +void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } +void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } + +void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } +void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } + +void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } +void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } + +void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } +void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } + ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) -{ -} +{} + +void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } +void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } #ifdef WIN32 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ @@ -447,6 +505,7 @@ _is_alive_closure(this), _plab_stats(YoungPLABSize, PLABWeight) { + NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) _task_queues = new ObjToScanQueueSet(ParallelGCThreads); guarantee(_task_queues != NULL, "task_queues allocation failure."); @@ -460,6 +519,17 @@ for (uint i2 = 0; i2 < ParallelGCThreads; i2++) _task_queues->queue(i2)->initialize(); + _overflow_stacks = NEW_C_HEAP_ARRAY(GrowableArray*, ParallelGCThreads); + guarantee(_overflow_stacks != NULL, "Overflow stack set allocation failure"); + for (uint i = 0; i < ParallelGCThreads; i++) { + if (ParGCUseLocalOverflow) { + _overflow_stacks[i] = new (ResourceObj::C_HEAP) GrowableArray(512, true); + guarantee(_overflow_stacks[i] != NULL, "Overflow Stack allocation failure."); + } else { + _overflow_stacks[i] = NULL; + } + } + if (UsePerfData) { EXCEPTION_MARK; ResourceMark rm; @@ -478,51 +548,66 @@ ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} -void -// ParNewGeneration:: -ParKeepAliveClosure::do_oop(oop* p) { - // We never expect to see a null reference being processed - // as a weak reference. - assert (*p != NULL, "expected non-null ref"); - assert ((*p)->is_oop(), "expected an oop while scanning weak refs"); +template +void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { +#ifdef ASSERT + { + assert(!oopDesc::is_null(*p), "expected non-null ref"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + // We never expect to see a null reference being processed + // as a weak reference. + assert(obj->is_oop(), "expected an oop while scanning weak refs"); + } +#endif // ASSERT _par_cl->do_oop_nv(p); if (Universe::heap()->is_in_reserved(p)) { - _rs->write_ref_field_gc_par(p, *p); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + _rs->write_ref_field_gc_par(p, obj); } } +void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } +void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } + // ParNewGeneration:: KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : DefNewGeneration::KeepAliveClosure(cl) {} -void -// ParNewGeneration:: -KeepAliveClosure::do_oop(oop* p) { - // We never expect to see a null reference being processed - // as a weak reference. - assert (*p != NULL, "expected non-null ref"); - assert ((*p)->is_oop(), "expected an oop while scanning weak refs"); +template +void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { +#ifdef ASSERT + { + assert(!oopDesc::is_null(*p), "expected non-null ref"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + // We never expect to see a null reference being processed + // as a weak reference. + assert(obj->is_oop(), "expected an oop while scanning weak refs"); + } +#endif // ASSERT _cl->do_oop_nv(p); if (Universe::heap()->is_in_reserved(p)) { - _rs->write_ref_field_gc_par(p, *p); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + _rs->write_ref_field_gc_par(p, obj); } } -void ScanClosureWithParBarrier::do_oop(oop* p) { - oop obj = *p; - // Should we copy the obj? - if (obj != NULL) { +void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } +void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } + +template void ScanClosureWithParBarrier::do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); - if (obj->is_forwarded()) { - *p = obj->forwardee(); - } else { - *p = _g->DefNewGeneration::copy_to_survivor_space(obj, p); - } + oop new_obj = obj->is_forwarded() + ? obj->forwardee() + : _g->DefNewGeneration::copy_to_survivor_space(obj); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); } if (_gc_barrier) { // If p points to a younger generation, mark the card. @@ -533,6 +618,9 @@ } } +void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } +void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } + class ParNewRefProcTaskProxy: public AbstractGangTask { typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; public: @@ -700,14 +788,14 @@ SpecializationStats::clear(); age_table()->clear(); - to()->clear(); + to()->clear(SpaceDecorator::Mangle); gch->save_marks(); assert(workers != NULL, "Need parallel worker threads."); ParallelTaskTerminator _term(workers->total_workers(), task_queues()); ParScanThreadStateSet thread_state_set(workers->total_workers(), - *to(), *this, *_next_gen, *task_queues(), - desired_plab_sz(), _term); + *to(), *this, *_next_gen, *task_queues(), + _overflow_stacks, desired_plab_sz(), _term); ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); int n_workers = workers->total_workers(); @@ -732,17 +820,12 @@ thread_state_set.steals(), thread_state_set.pops()+thread_state_set.steals()); } - assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(), - "Or else the queues are leaky."); + assert(thread_state_set.pushes() == thread_state_set.pops() + + thread_state_set.steals(), + "Or else the queues are leaky."); - // For now, process discovered weak refs sequentially. -#ifdef COMPILER2 - ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); -#else - ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - // Process (weak) reference objects found during scavenge. + ReferenceProcessor* rp = ref_processor(); IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); KeepAliveClosure keep_alive(&scan_weak_ref); @@ -751,23 +834,32 @@ set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, &scan_without_gc_barrier, &scan_with_gc_barrier); - if (ref_processor()->processing_is_mt()) { + rp->setup_policy(clear_all_soft_refs); + if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); - ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, - &task_executor); + rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, &task_executor); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); - ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, - NULL); + rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, NULL); } if (!promotion_failed()) { // Swap the survivor spaces. - eden()->clear(); - from()->clear(); + eden()->clear(SpaceDecorator::Mangle); + from()->clear(SpaceDecorator::Mangle); + if (ZapUnusedHeapArea) { + // This is now done here because of the piece-meal mangling which + // can check for valid mangling at intermediate points in the + // collection(s). When a minor collection fails to collect + // sufficient space resizing of the young generation can occur + // an redistribute the spaces in the young generation. Mangle + // here so that unzapped regions don't get distributed to + // other spaces. + to()->mangle_unused_area(); + } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); @@ -788,6 +880,9 @@ swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. from()->set_next_compaction_space(to()); gch->set_incremental_collection_will_fail(); + + // Reset the PromotionFailureALot counters. + NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); @@ -810,15 +905,15 @@ update_time_of_last_gc(os::javaTimeMillis()); SpecializationStats::print(); - - ref_processor()->set_enqueuing_is_done(true); - if (ref_processor()->processing_is_mt()) { + + rp->set_enqueuing_is_done(true); + if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); - ref_processor()->enqueue_discovered_references(&task_executor); + rp->enqueue_discovered_references(&task_executor); } else { - ref_processor()->enqueue_discovered_references(NULL); + rp->enqueue_discovered_references(NULL); } - ref_processor()->verify_no_references_recorded(); + rp->verify_no_references_recorded(); } static int sum; @@ -959,12 +1054,19 @@ "push forwarded object"); } // Push it on one of the queues of to-be-scanned objects. - if (!par_scan_state->work_queue()->push(obj_to_push)) { + bool simulate_overflow = false; + NOT_PRODUCT( + if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { + // simulate a stack overflow + simulate_overflow = true; + } + ) + if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { // Add stats for overflow pushes. if (Verbose && PrintGCDetails) { gclog_or_tty->print("queue overflow!\n"); } - push_on_overflow_list(old); + push_on_overflow_list(old, par_scan_state); par_scan_state->note_overflow_push(); } par_scan_state->note_push(); @@ -1076,9 +1178,16 @@ "push forwarded object"); } // Push it on one of the queues of to-be-scanned objects. - if (!par_scan_state->work_queue()->push(obj_to_push)) { + bool simulate_overflow = false; + NOT_PRODUCT( + if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { + // simulate a stack overflow + simulate_overflow = true; + } + ) + if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { // Add stats for overflow pushes. - push_on_overflow_list(old); + push_on_overflow_list(old, par_scan_state); par_scan_state->note_overflow_push(); } par_scan_state->note_push(); @@ -1101,33 +1210,94 @@ return forward_ptr; } -void ParNewGeneration::push_on_overflow_list(oop from_space_obj) { - oop cur_overflow_list = _overflow_list; - // if the object has been forwarded to itself, then we cannot - // use the klass pointer for the linked list. Instead we have - // to allocate an oopDesc in the C-Heap and use that for the linked list. - if (from_space_obj->forwardee() == from_space_obj) { - oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); - listhead->forward_to(from_space_obj); - from_space_obj = listhead; +#ifndef PRODUCT +// It's OK to call this multi-threaded; the worst thing +// that can happen is that we'll get a bunch of closely +// spaced simulated oveflows, but that's OK, in fact +// probably good as it would exercise the overflow code +// under contention. +bool ParNewGeneration::should_simulate_overflow() { + if (_overflow_counter-- <= 0) { // just being defensive + _overflow_counter = ParGCWorkQueueOverflowInterval; + return true; + } else { + return false; } - while (true) { - from_space_obj->set_klass_to_list_ptr(cur_overflow_list); - oop observed_overflow_list = - (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); - if (observed_overflow_list == cur_overflow_list) break; - // Otherwise... - cur_overflow_list = observed_overflow_list; +} +#endif + +// In case we are using compressed oops, we need to be careful. +// If the object being pushed is an object array, then its length +// field keeps track of the "grey boundary" at which the next +// incremental scan will be done (see ParGCArrayScanChunk). +// When using compressed oops, this length field is kept in the +// lower 32 bits of the erstwhile klass word and cannot be used +// for the overflow chaining pointer (OCP below). As such the OCP +// would itself need to be compressed into the top 32-bits in this +// case. Unfortunately, see below, in the event that we have a +// promotion failure, the node to be pushed on the list can be +// outside of the Java heap, so the heap-based pointer compression +// would not work (we would have potential aliasing between C-heap +// and Java-heap pointers). For this reason, when using compressed +// oops, we simply use a worker-thread-local, non-shared overflow +// list in the form of a growable array, with a slightly different +// overflow stack draining strategy. If/when we start using fat +// stacks here, we can go back to using (fat) pointer chains +// (although some performance comparisons would be useful since +// single global lists have their own performance disadvantages +// as we were made painfully aware not long ago, see 6786503). +void ParNewGeneration::push_on_overflow_list(oop from_space_obj, + ParScanThreadState* par_scan_state) { + assert(is_in_reserved(from_space_obj), "Should be from this generation"); + if (ParGCUseLocalOverflow) { + // In the case of compressed oops, we use a private, not-shared + // overflow stack. + par_scan_state->push_on_overflow_stack(from_space_obj); + } else { + assert(!UseCompressedOops, "Error"); + assert(par_scan_state->overflow_stack() == NULL, "Error"); + oop cur_overflow_list = _overflow_list; + // if the object has been forwarded to itself, then we cannot + // use the klass pointer for the linked list. Instead we have + // to allocate an oopDesc in the C-Heap and use that for the linked list. + if (from_space_obj->forwardee() == from_space_obj) { + oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); + listhead->forward_to(from_space_obj); + from_space_obj = listhead; + } + while (true) { + from_space_obj->set_klass_to_list_ptr(cur_overflow_list); + oop observed_overflow_list = + (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); + if (observed_overflow_list == cur_overflow_list) break; + // Otherwise... + cur_overflow_list = observed_overflow_list; + } } } bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { + bool res; + + if (ParGCUseLocalOverflow) { + res = par_scan_state->take_from_overflow_stack(); + } else { + assert(!UseCompressedOops, "Error"); + res = take_from_overflow_list_work(par_scan_state); + } + return res; +} + +bool +ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { ObjToScanQueue* work_q = par_scan_state->work_queue(); // How many to take? - int objsFromOverflow = MIN2(work_q->max_elems()/4, - (juint)ParGCDesiredObjsFromOverflowList); + int objsFromOverflow = MIN2((work_q->max_elems() - work_q->size())/4, + (juint)ParGCDesiredObjsFromOverflowList); + assert(par_scan_state->overflow_stack() == NULL, "Error"); + assert(!UseCompressedOops, "Error"); if (_overflow_list == NULL) return false; // Otherwise, there was something there; try claiming the list. @@ -1139,18 +1309,18 @@ // Trim off a prefix of at most objsFromOverflow items int i = 1; oop cur = prefix; - while (i < objsFromOverflow && cur->klass() != NULL) { + while (i < objsFromOverflow && cur->klass_or_null() != NULL) { i++; cur = oop(cur->klass()); } // Reattach remaining (suffix) to overflow list - if (cur->klass() != NULL) { + if (cur->klass_or_null() != NULL) { oop suffix = oop(cur->klass()); cur->set_klass_to_list_ptr(NULL); // Find last item of suffix list oop last = suffix; - while (last->klass() != NULL) { + while (last->klass_or_null() != NULL) { last = oop(last->klass()); } // Atomically prepend suffix to current overflow list @@ -1171,13 +1341,24 @@ int n = 0; while (cur != NULL) { oop obj_to_push = cur->forwardee(); - oop next = oop(cur->klass()); + oop next = oop(cur->klass_or_null()); cur->set_klass(obj_to_push->klass()); - if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { - obj_to_push = cur; + // This may be an array object that is self-forwarded. In that case, the list pointer + // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. + if (!is_in_reserved(cur)) { + // Temporary: change this to an assert. This is to mitigate risk for a change + // that has not been tested heavily (except via non-product stress options). + guarantee(!Universe::heap()->is_in_reserved(cur), "Can't be elsewhere in the heap"); + // This can become a scaling bottleneck when there is work queue overflow coincident + // with promotion failure. + oopDesc* f = cur; + FREE_C_HEAP_ARRAY(oopDesc, f); + } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); + obj_to_push = cur; } - work_q->push(obj_to_push); + bool ok = work_q->push(obj_to_push); + assert(ok, "Should have succeeded"); cur = next; n++; } --- old/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp 2009-08-01 04:10:45.857529886 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp 2009-08-01 04:10:45.784018889 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parNewGeneration.hpp 1.48 07/05/17 15:52:44 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,24 +36,29 @@ // but they must be here to allow ParScanClosure::do_oop_work to be defined // in genOopClosures.inline.hpp. - -typedef OopTaskQueue ObjToScanQueue; -typedef OopTaskQueueSet ObjToScanQueueSet; +typedef OopTaskQueue ObjToScanQueue; +typedef OopTaskQueueSet ObjToScanQueueSet; // Enable this to get push/pop/steal stats. const int PAR_STATS_ENABLED = 0; class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure { + private: ParScanWeakRefClosure* _par_cl; + protected: + template void do_oop_work(T* p); public: ParKeepAliveClosure(ParScanWeakRefClosure* cl); - void do_oop(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; // The state needed by thread performing parallel young-gen collection. class ParScanThreadState { friend class ParScanThreadStateSet; + private: ObjToScanQueue *_work_queue; + GrowableArray* _overflow_stack; ParGCAllocBuffer _to_space_alloc_buffer; @@ -78,6 +83,9 @@ Space* _to_space; Space* to_space() { return _to_space; } + ParNewGeneration* _young_gen; + ParNewGeneration* young_gen() const { return _young_gen; } + Generation* _old_gen; Generation* old_gen() { return _old_gen; } @@ -111,10 +119,12 @@ ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, Generation* old_gen_, int thread_num_, - ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_, + ObjToScanQueueSet* work_queue_set_, + GrowableArray** overflow_stack_set_, + size_t desired_plab_sz_, ParallelTaskTerminator& term_); -public: + public: ageTable* age_table() {return &_ageTable;} ObjToScanQueue* work_queue() { return _work_queue; } @@ -133,6 +143,11 @@ // Decrease queue size below "max_size". void trim_queues(int max_size); + // Private overflow stack usage + GrowableArray* overflow_stack() { return _overflow_stack; } + bool take_from_overflow_stack(); + void push_on_overflow_stack(oop p); + // Is new_obj a candidate for scan_partial_array_and_push_remainder method. inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const; @@ -198,13 +213,13 @@ double elapsed() { return os::elapsedTime() - _start; } - }; class ParNewGenTask: public AbstractGangTask { - ParNewGeneration* _gen; - Generation* _next_gen; - HeapWord* _young_old_boundary; + private: + ParNewGeneration* _gen; + Generation* _next_gen; + HeapWord* _young_old_boundary; class ParScanThreadStateSet* _state_set; public: @@ -219,35 +234,44 @@ }; class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure { + protected: + template void do_oop_work(T* p); public: KeepAliveClosure(ScanWeakRefClosure* cl); - void do_oop(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; class EvacuateFollowersClosureGeneral: public VoidClosure { - GenCollectedHeap* _gch; - int _level; - OopsInGenClosure* _scan_cur_or_nonheap; - OopsInGenClosure* _scan_older; - public: - EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, - OopsInGenClosure* cur, - OopsInGenClosure* older); - void do_void(); + private: + GenCollectedHeap* _gch; + int _level; + OopsInGenClosure* _scan_cur_or_nonheap; + OopsInGenClosure* _scan_older; + public: + EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, + OopsInGenClosure* cur, + OopsInGenClosure* older); + virtual void do_void(); }; // Closure for scanning ParNewGeneration. // Same as ScanClosure, except does parallel GC barrier. class ScanClosureWithParBarrier: public ScanClosure { -public: + protected: + template void do_oop_work(T* p); + public: ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier); - void do_oop(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; // Implements AbstractRefProcTaskExecutor for ParNew. class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor { -public: - + private: + ParNewGeneration& _generation; + ParScanThreadStateSet& _state_set; + public: ParNewRefProcTaskExecutor(ParNewGeneration& generation, ParScanThreadStateSet& state_set) : _generation(generation), _state_set(state_set) @@ -258,9 +282,6 @@ virtual void execute(EnqueueTask& task); // Switch to single threaded mode. virtual void set_single_threaded_mode(); -private: - ParNewGeneration& _generation; - ParScanThreadStateSet& _state_set; }; @@ -272,15 +293,19 @@ friend class ParNewRefProcTaskExecutor; friend class ParScanThreadStateSet; + private: // XXX use a global constant instead of 64! struct ObjToScanQueuePadded { ObjToScanQueue work_queue; char pad[64 - sizeof(ObjToScanQueue)]; // prevent false sharing }; - // The per-thread work queues, available here for stealing. + // The per-worker-thread work queues ObjToScanQueueSet* _task_queues; + // Per-worker-thread local overflow stacks + GrowableArray** _overflow_stacks; + // Desired size of survivor space plab's PLABStats _plab_stats; @@ -317,7 +342,7 @@ // the details of the policy. virtual void adjust_desired_tenuring_threshold(); -public: + public: ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level); ~ParNewGeneration() { @@ -364,13 +389,21 @@ oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state, oop obj, size_t obj_sz, markOop m); + // in support of testing overflow code + NOT_PRODUCT(int _overflow_counter;) + NOT_PRODUCT(bool should_simulate_overflow();) + + // Accessor for overflow list + oop overflow_list() { return _overflow_list; } + // Push the given (from-space) object on the global overflow list. - void push_on_overflow_list(oop from_space_obj); + void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state); // If the global overflow list is non-empty, move some tasks from it // onto "work_q" (which must be empty). No more than 1/4 of the // max_elems of "work_q" are moved. bool take_from_overflow_list(ParScanThreadState* par_scan_state); + bool take_from_overflow_list_work(ParScanThreadState* par_scan_state); // The task queues to be used by parallel GC threads. ObjToScanQueueSet* task_queues() { --- old/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp 2009-08-01 04:10:46.711076747 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp 2009-08-01 04:10:46.637480447 +0100 @@ -29,70 +29,77 @@ class ParScanThreadState; class ParNewGeneration; -template class GenericTaskQueueSet; -typedef GenericTaskQueueSet ObjToScanQueueSet; +typedef OopTaskQueueSet ObjToScanQueueSet; class ParallelTaskTerminator; class ParScanClosure: public OopsInGenClosure { -protected: + protected: ParScanThreadState* _par_scan_state; - ParNewGeneration* _g; - HeapWord* _boundary; - void do_oop_work(oop* p, - bool gc_barrier, - bool root_scan); - - void par_do_barrier(oop* p); - -public: + ParNewGeneration* _g; + HeapWord* _boundary; + template void inline par_do_barrier(T* p); + template void inline do_oop_work(T* p, + bool gc_barrier, + bool root_scan); + public: ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state); }; class ParScanWithBarrierClosure: public ParScanClosure { -public: - void do_oop(oop* p) { do_oop_work(p, true, false); } - void do_oop_nv(oop* p) { do_oop_work(p, true, false); } + public: ParScanWithBarrierClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : ParScanClosure(g, par_scan_state) {} + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p); }; class ParScanWithoutBarrierClosure: public ParScanClosure { -public: + public: ParScanWithoutBarrierClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : ParScanClosure(g, par_scan_state) {} - void do_oop(oop* p) { do_oop_work(p, false, false); } - void do_oop_nv(oop* p) { do_oop_work(p, false, false); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p); }; class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure { -public: + public: ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : ParScanClosure(g, par_scan_state) {} - void do_oop(oop* p) { do_oop_work(p, true, true); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; class ParRootScanWithoutBarrierClosure: public ParScanClosure { -public: + public: ParRootScanWithoutBarrierClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : ParScanClosure(g, par_scan_state) {} - void do_oop(oop* p) { do_oop_work(p, false, true); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; class ParScanWeakRefClosure: public ScanWeakRefClosure { -protected: + protected: ParScanThreadState* _par_scan_state; -public: + template inline void do_oop_work(T* p); + public: ParScanWeakRefClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state); - void do_oop(oop* p); - void do_oop_nv(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p); }; class ParEvacuateFollowersClosure: public VoidClosure { + private: ParScanThreadState* _par_scan_state; ParScanThreadState* par_scan_state() { return _par_scan_state; } @@ -124,8 +131,7 @@ ParallelTaskTerminator* _terminator; ParallelTaskTerminator* terminator() { return _terminator; } - -public: + public: ParEvacuateFollowersClosure( ParScanThreadState* par_scan_state_, ParScanWithoutBarrierClosure* to_space_closure_, @@ -135,5 +141,5 @@ ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, ObjToScanQueueSet* task_queues_, ParallelTaskTerminator* terminator_); - void do_void(); + virtual void do_void(); }; --- old/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp 2009-08-01 04:10:47.589755023 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp 2009-08-01 04:10:47.507742968 +0100 @@ -25,10 +25,9 @@ * */ -inline void ParScanWeakRefClosure::do_oop(oop* p) -{ - oop obj = *p; - assert (obj != NULL, "null weak reference?"); +template inline void ParScanWeakRefClosure::do_oop_work(T* p) { + assert (!oopDesc::is_null(*p), "null weak reference?"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); // weak references are sometimes scanned twice; must check // that to-space doesn't already contain this object if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) { @@ -36,41 +35,43 @@ // ParScanClosure::do_oop_work). klassOop objK = obj->klass(); markOop m = obj->mark(); + oop new_obj; if (m->is_marked()) { // Contains forwarding pointer. - *p = ParNewGeneration::real_forwardee(obj); + new_obj = ParNewGeneration::real_forwardee(obj); } else { - size_t obj_sz = obj->size_given_klass(objK->klass_part()); - *p = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state, - obj, obj_sz, m); + size_t obj_sz = obj->size_given_klass(objK->klass_part()); + new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state, + obj, obj_sz, m); } + oopDesc::encode_store_heap_oop_not_null(p, new_obj); } } -inline void ParScanWeakRefClosure::do_oop_nv(oop* p) -{ - ParScanWeakRefClosure::do_oop(p); -} +inline void ParScanWeakRefClosure::do_oop_nv(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } +inline void ParScanWeakRefClosure::do_oop_nv(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } -inline void ParScanClosure::par_do_barrier(oop* p) { +template inline void ParScanClosure::par_do_barrier(T* p) { assert(generation()->is_in_reserved(p), "expected ref in generation"); - oop obj = *p; - assert(obj != NULL, "expected non-null object"); + assert(!oopDesc::is_null(*p), "expected non-null object"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); // If p points to a younger generation, mark the card. if ((HeapWord*)obj < gen_boundary()) { rs()->write_ref_field_gc_par(p, obj); } } -inline void ParScanClosure::do_oop_work(oop* p, +template +inline void ParScanClosure::do_oop_work(T* p, bool gc_barrier, bool root_scan) { - oop obj = *p; assert((!Universe::heap()->is_in_reserved(p) || - generation()->is_in_reserved(p)) - && (generation()->level() == 0 || gc_barrier), - "The gen must be right, and we must be doing the barrier " - "in older generations."); - if (obj != NULL) { + generation()->is_in_reserved(p)) + && (generation()->level() == 0 || gc_barrier), + "The gen must be right, and we must be doing the barrier " + "in older generations."); + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); // OK, we need to ensure that it is copied. @@ -81,17 +82,20 @@ // forwarded. klassOop objK = obj->klass(); markOop m = obj->mark(); + oop new_obj; if (m->is_marked()) { // Contains forwarding pointer. - *p = ParNewGeneration::real_forwardee(obj); + new_obj = ParNewGeneration::real_forwardee(obj); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); } else { - size_t obj_sz = obj->size_given_klass(objK->klass_part()); - *p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m); - if (root_scan) { - // This may have pushed an object. If we have a root - // category with a lot of roots, can't let the queue get too - // full: - (void)_par_scan_state->trim_queues(10 * ParallelGCThreads); - } + size_t obj_sz = obj->size_given_klass(objK->klass_part()); + new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); + if (root_scan) { + // This may have pushed an object. If we have a root + // category with a lot of roots, can't let the queue get too + // full: + (void)_par_scan_state->trim_queues(10 * ParallelGCThreads); + } } if (gc_barrier) { // Now call parent closure @@ -101,3 +105,8 @@ } } +inline void ParScanWithBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, true, false); } +inline void ParScanWithBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } + +inline void ParScanWithoutBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, false, false); } +inline void ParScanWithoutBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp 2009-08-01 04:10:48.523223087 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp 2009-08-01 04:10:48.432544398 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)asPSYoungGen.cpp 1.24 07/10/04 10:49:36 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +89,7 @@ if (eden_space()->is_empty()) { // Respect the minimum size for eden and for the young gen as a whole. ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t eden_alignment = heap->intra_generation_alignment(); + const size_t eden_alignment = heap->intra_heap_alignment(); const size_t gen_alignment = heap->young_gen_alignment(); assert(eden_space()->capacity_in_bytes() >= eden_alignment, @@ -127,7 +127,7 @@ // to_space can be. size_t ASPSYoungGen::available_to_live() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_generation_alignment(); + const size_t alignment = heap->intra_heap_alignment(); // Include any space that is committed but is not in eden. size_t available = pointer_delta(eden_space()->bottom(), @@ -173,9 +173,20 @@ if (desired_size > orig_size) { // Grow the generation size_t change = desired_size - orig_size; + HeapWord* prev_low = (HeapWord*) virtual_space()->low(); if (!virtual_space()->expand_by(change)) { return false; } + if (ZapUnusedHeapArea) { + // Mangle newly committed space immediately because it + // can be done here more simply that after the new + // spaces have been computed. + HeapWord* new_low = (HeapWord*) virtual_space()->low(); + assert(new_low < prev_low, "Did not grow"); + + MemRegion mangle_region(new_low, prev_low); + SpaceMangler::mangle_region(mangle_region); + } size_changed = true; } else if (desired_size < orig_size) { size_t desired_change = orig_size - desired_size; @@ -218,8 +229,10 @@ // current implementation does not allow holes between the spaces // _young_generation_boundary has to be reset because it changes. // so additional verification -void ASPSYoungGen::resize_spaces(size_t requested_eden_size, - size_t requested_survivor_size) { + +void ASPSYoungGen::resize_spaces(size_t requested_eden_size, + size_t requested_survivor_size) { + assert(UseAdaptiveSizePolicy, "sanity check"); assert(requested_eden_size > 0 && requested_survivor_size > 0, "just checking"); @@ -278,23 +291,43 @@ assert(eden_start < from_start, "Cannot push into from_space"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_generation_alignment(); + const size_t alignment = heap->intra_heap_alignment(); + const bool maintain_minimum = + (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); + bool eden_from_to_order = from_start < to_start; // Check whether from space is below to space - if (from_start < to_start) { + if (eden_from_to_order) { // Eden, from, to + if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr(" Eden, from, to:"); } // Set eden - // Compute how big eden can be, then adjust end. - // See comment in PSYoungGen::resize_spaces() on - // calculating eden_end. - const size_t eden_size = MIN2(requested_eden_size, - pointer_delta(from_start, - eden_start, - sizeof(char))); + // "requested_eden_size" is a goal for the size of eden + // and may not be attainable. "eden_size" below is + // calculated based on the location of from-space and + // the goal for the size of eden. from-space is + // fixed in place because it contains live data. + // The calculation is done this way to avoid 32bit + // overflow (i.e., eden_start + requested_eden_size + // may too large for representation in 32bits). + size_t eden_size; + if (maintain_minimum) { + // Only make eden larger than the requested size if + // the minimum size of the generation has to be maintained. + // This could be done in general but policy at a higher + // level is determining a requested size for eden and that + // should be honored unless there is a fundamental reason. + eden_size = pointer_delta(from_start, + eden_start, + sizeof(char)); + } else { + eden_size = MIN2(requested_eden_size, + pointer_delta(from_start, eden_start, sizeof(char))); + } + eden_end = eden_start + eden_size; assert(eden_end >= eden_start, "addition overflowed") @@ -374,12 +407,14 @@ to_start = MAX2(to_start, eden_start + alignment); // Compute how big eden can be, then adjust end. - // See comment in PSYoungGen::resize_spaces() on - // calculating eden_end. - const size_t eden_size = MIN2(requested_eden_size, - pointer_delta(to_start, - eden_start, - sizeof(char))); + // See comments above on calculating eden_end. + size_t eden_size; + if (maintain_minimum) { + eden_size = pointer_delta(to_start, eden_start, sizeof(char)); + } else { + eden_size = MIN2(requested_eden_size, + pointer_delta(to_start, eden_start, sizeof(char))); + } eden_end = eden_start + eden_size; assert(eden_end >= eden_start, "addition overflowed") @@ -426,9 +461,47 @@ size_t old_from = from_space()->capacity_in_bytes(); size_t old_to = to_space()->capacity_in_bytes(); - eden_space()->initialize(edenMR, true); - to_space()->initialize(toMR , true); - from_space()->initialize(fromMR, false); // Note, not cleared! + if (ZapUnusedHeapArea) { + // NUMA is a special case because a numa space is not mangled + // in order to not prematurely bind its address to memory to + // the wrong memory (i.e., don't want the GC thread to first + // touch the memory). The survivor spaces are not numa + // spaces and are mangled. + if (UseNUMA) { + if (eden_from_to_order) { + mangle_survivors(from_space(), fromMR, to_space(), toMR); + } else { + mangle_survivors(to_space(), toMR, from_space(), fromMR); + } + } + + // If not mangling the spaces, do some checking to verify that + // the spaces are already mangled. + // The spaces should be correctly mangled at this point so + // do some checking here. Note that they are not being mangled + // in the calls to initialize(). + // Must check mangling before the spaces are reshaped. Otherwise, + // the bottom or end of one space may have moved into an area + // covered by another space and a failure of the check may + // not correctly indicate which space is not properly mangled. + + HeapWord* limit = (HeapWord*) virtual_space()->high(); + eden_space()->check_mangled_unused_area(limit); + from_space()->check_mangled_unused_area(limit); + to_space()->check_mangled_unused_area(limit); + } + // When an existing space is being initialized, it is not + // mangled because the space has been previously mangled. + eden_space()->initialize(edenMR, + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); + to_space()->initialize(toMR, + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); + from_space()->initialize(fromMR, + SpaceDecorator::DontClear, + SpaceDecorator::DontMangle); + PSScavenge::set_young_generation_boundary(eden_space()->bottom()); assert(from_space()->top() == old_from_top, "from top changed!"); @@ -449,7 +522,6 @@ } space_invariants(); } - void ASPSYoungGen::reset_after_change() { assert_locked_or_safepoint(Heap_lock); @@ -461,7 +533,9 @@ HeapWord* eden_bottom = eden_space()->bottom(); if (new_eden_bottom != eden_bottom) { MemRegion eden_mr(new_eden_bottom, eden_space()->end()); - eden_space()->initialize(eden_mr, true); + eden_space()->initialize(eden_mr, + SpaceDecorator::Clear, + SpaceDecorator::Mangle); PSScavenge::set_young_generation_boundary(eden_space()->bottom()); } MemRegion cmr((HeapWord*)virtual_space()->low(), --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp 2009-08-01 04:10:49.432070866 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp 2009-08-01 04:10:49.353791503 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cardTableExtension.cpp 1.35 07/09/25 16:47:41 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,17 +31,16 @@ // Checks an individual oop for missing precise marks. Mark // may be either dirty or newgen. class CheckForUnmarkedOops : public OopClosure { - PSYoungGen* _young_gen; + private: + PSYoungGen* _young_gen; CardTableExtension* _card_table; - HeapWord* _unmarked_addr; - jbyte* _unmarked_card; + HeapWord* _unmarked_addr; + jbyte* _unmarked_card; - public: - CheckForUnmarkedOops( PSYoungGen* young_gen, CardTableExtension* card_table ) : - _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } - - virtual void do_oop(oop* p) { - if (_young_gen->is_in_reserved(*p) && + protected: + template void do_oop_work(T* p) { + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + if (_young_gen->is_in_reserved(obj) && !_card_table->addr_is_marked_imprecise(p)) { // Don't overwrite the first missing card mark if (_unmarked_addr == NULL) { @@ -51,6 +50,13 @@ } } + public: + CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : + _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } + + virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } + bool has_unmarked_oop() { return _unmarked_addr != NULL; } @@ -59,7 +65,8 @@ // Checks all objects for the existance of some type of mark, // precise or imprecise, dirty or newgen. class CheckForUnmarkedObjects : public ObjectClosure { - PSYoungGen* _young_gen; + private: + PSYoungGen* _young_gen; CardTableExtension* _card_table; public: @@ -78,7 +85,7 @@ // we test for missing precise marks first. If any are found, we don't // fail unless the object head is also unmarked. virtual void do_object(oop obj) { - CheckForUnmarkedOops object_check( _young_gen, _card_table ); + CheckForUnmarkedOops object_check(_young_gen, _card_table); obj->oop_iterate(&object_check); if (object_check.has_unmarked_oop()) { assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); @@ -88,19 +95,25 @@ // Checks for precise marking of oops as newgen. class CheckForPreciseMarks : public OopClosure { - PSYoungGen* _young_gen; + private: + PSYoungGen* _young_gen; CardTableExtension* _card_table; - public: - CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : - _young_gen(young_gen), _card_table(card_table) { } - - virtual void do_oop(oop* p) { - if (_young_gen->is_in_reserved(*p)) { + protected: + template void do_oop_work(T* p) { + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + if (_young_gen->is_in_reserved(obj)) { assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); _card_table->set_card_newgen(p); } } + + public: + CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : + _young_gen(young_gen), _card_table(card_table) { } + + virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } }; // We get passed the space_top value to prevent us from traversing into @@ -656,9 +669,9 @@ HeapWord* new_end_for_commit = MIN2(cur_committed.end(), _guard_region.start()); - MemRegion new_committed = - MemRegion(new_start_aligned, new_end_for_commit); - if(!new_committed.is_empty()) { + if(new_start_aligned < new_end_for_commit) { + MemRegion new_committed = + MemRegion(new_start_aligned, new_end_for_commit); if (!os::commit_memory((char*)new_committed.start(), new_committed.byte_size())) { vm_exit_out_of_memory(new_committed.byte_size(), --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp 2009-08-01 04:10:50.349779733 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp 2009-08-01 04:10:50.267769142 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cardTableExtension.hpp 1.20 07/05/05 17:05:26 JVM" #endif /* - * Copyright 2001-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,7 +83,7 @@ static bool card_is_verify(int value) { return value == verify_card; } // Card marking - void inline_write_ref_field_gc(oop* field, oop new_val) { + void inline_write_ref_field_gc(void* field, oop new_val) { jbyte* byte = byte_for(field); *byte = youngergen_card; } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp 2009-08-01 04:10:51.200314629 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp 2009-08-01 04:10:51.131641151 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)generationSizer.hpp 1.17 07/05/05 17:05:27 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,10 +42,10 @@ // If the user hasn't explicitly set the number of worker // threads, set the count. - if (ParallelGCThreads == 0) { - assert(UseParallelGC, "Setting ParallelGCThreads without UseParallelGC"); - ParallelGCThreads = os::active_processor_count(); - } + assert(UseSerialGC || + !FLAG_IS_DEFAULT(ParallelGCThreads) || + (ParallelGCThreads > 0), + "ParallelGCThreads should be set before flag initialization"); // The survivor ratio's are calculated "raw", unlike the // default gc, which adds 2 to the ratio value. We need to --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp 2009-08-01 04:10:51.986135552 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp 2009-08-01 04:10:51.908778300 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parMarkBitMap.cpp 1.31 07/10/04 10:49:33 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); - ReservedSpace rs(bytes, rs_align, false); + ReservedSpace rs(bytes, rs_align, rs_align > 0); os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); _virtual_space = new PSVirtualSpace(rs, page_sz); @@ -64,6 +64,8 @@ if (_virtual_space != NULL) { delete _virtual_space; _virtual_space = NULL; + // Release memory reserved in the space. + rs.release(); } return false; } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp 2009-08-01 04:10:52.760875211 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp 2009-08-01 04:10:52.686581009 +0100 @@ -195,16 +195,16 @@ }; inline ParMarkBitMap::ParMarkBitMap(): - _beg_bits(NULL, 0), - _end_bits(NULL, 0) + _beg_bits(), + _end_bits() { _region_start = 0; _virtual_space = 0; } -inline ParMarkBitMap::ParMarkBitMap(MemRegion covered_region): - _beg_bits(NULL, 0), - _end_bits(NULL, 0) +inline ParMarkBitMap::ParMarkBitMap(MemRegion covered_region): + _beg_bits(), + _end_bits() { initialize(covered_region); } @@ -328,7 +328,7 @@ inline size_t ParMarkBitMap::obj_size(idx_t beg_bit) const { - const idx_t end_bit = _end_bits.find_next_one_bit(beg_bit, size()); + const idx_t end_bit = _end_bits.get_next_one_offset_inline(beg_bit, size()); assert(is_marked(beg_bit), "obj not marked"); assert(end_bit < size(), "end bit missing"); return obj_size(beg_bit, end_bit); @@ -387,13 +387,13 @@ inline ParMarkBitMap::idx_t ParMarkBitMap::find_obj_beg(idx_t beg, idx_t end) const { - return _beg_bits.find_next_one_bit(beg, end); + return _beg_bits.get_next_one_offset_inline_aligned_right(beg, end); } inline ParMarkBitMap::idx_t ParMarkBitMap::find_obj_end(idx_t beg, idx_t end) const { - return _end_bits.find_next_one_bit(beg, end); + return _end_bits.get_next_one_offset_inline_aligned_right(beg, end); } inline HeapWord* --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp 2009-08-01 04:10:53.586653097 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp 2009-08-01 04:10:53.500789813 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parallelScavengeHeap.cpp 1.95 07/10/04 10:49:31 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,8 +111,8 @@ // size than is needed or wanted for the perm gen. Use the "compound // alignment" ReservedSpace ctor to avoid having to use the same page size for // all gens. - ReservedSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, - og_align); + ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, + og_align); os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, heap_rs.base(), pg_max_size); os::trace_page_sizes("ps main", og_min_size + yg_min_size, @@ -174,13 +174,13 @@ const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); _size_policy = new PSAdaptiveSizePolicy(eden_capacity, - initial_promo_size, - young_gen()->to_space()->capacity_in_bytes(), - intra_generation_alignment(), - max_gc_pause_sec, - max_gc_minor_pause_sec, - GCTimeRatio - ); + initial_promo_size, + young_gen()->to_space()->capacity_in_bytes(), + intra_heap_alignment(), + max_gc_pause_sec, + max_gc_minor_pause_sec, + GCTimeRatio + ); _perm_gen = new PSPermGen(perm_rs, pg_align, @@ -213,10 +213,6 @@ PSScavenge::initialize(); if (UseParallelOldGC) { PSParallelCompact::post_initialize(); - if (VerifyParallelOldWithMarkSweep) { - // Will be used for verification of par old. - PSMarkSweep::initialize(); - } } else { PSMarkSweep::initialize(); } @@ -405,7 +401,7 @@ return result; } if (!is_tlab && - size >= (young_gen()->eden_space()->capacity_in_words() / 2)) { + size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { result = old_gen()->allocate(size, is_tlab); if (result != NULL) { return result; @@ -593,6 +589,31 @@ full_gc_count = Universe::heap()->total_full_collections(); result = perm_gen()->allocate_permanent(size); + + if (result != NULL) { + return result; + } + + if (GC_locker::is_active_and_needs_gc()) { + // If this thread is not in a jni critical section, we stall + // the requestor until the critical section has cleared and + // GC allowed. When the critical section clears, a GC is + // initiated by the last thread exiting the critical section; so + // we retry the allocation sequence from the beginning of the loop, + // rather than causing more, now probably unnecessary, GC attempts. + JavaThread* jthr = JavaThread::current(); + if (!jthr->in_critical()) { + MutexUnlocker mul(Heap_lock); + GC_locker::stall_until_clear(); + continue; + } else { + if (CheckJNICalls) { + fatal("Possible deadlock due to allocating while" + " in jni critical section"); + } + return NULL; + } + } } if (result == NULL) { @@ -625,14 +646,20 @@ if (op.prologue_succeeded()) { assert(Universe::heap()->is_in_permanent_or_null(op.result()), "result not in heap"); - // If a NULL results is being returned, an out-of-memory - // will be thrown now. Clear the gc_time_limit_exceeded - // flag to avoid the following situation. - // gc_time_limit_exceeded is set during a collection - // the collection fails to return enough space and an OOM is thrown - // the next GC is skipped because the gc_time_limit_exceeded - // flag is set and another OOM is thrown - if (op.result() == NULL) { + // If GC was locked out during VM operation then retry allocation + // and/or stall as necessary. + if (op.gc_locked()) { + assert(op.result() == NULL, "must be NULL if gc_locked() is true"); + continue; // retry and/or stall as necessary + } + // If a NULL results is being returned, an out-of-memory + // will be thrown now. Clear the gc_time_limit_exceeded + // flag to avoid the following situation. + // gc_time_limit_exceeded is set during a collection + // the collection fails to return enough space and an OOM is thrown + // the next GC is skipped because the gc_time_limit_exceeded + // flag is set and another OOM is thrown + if (op.result() == NULL) { size_policy()->set_gc_time_limit_exceeded(false); } return op.result(); @@ -909,4 +936,24 @@ // Delegate the resize to the generation. _old_gen->resize(desired_free_space); -} +} + +#ifndef PRODUCT +void ParallelScavengeHeap::record_gen_tops_before_GC() { + if (ZapUnusedHeapArea) { + young_gen()->record_spaces_top(); + old_gen()->record_spaces_top(); + perm_gen()->record_spaces_top(); + } +} + +void ParallelScavengeHeap::gen_mangle_unused_area() { + if (ZapUnusedHeapArea) { + young_gen()->eden_space()->mangle_unused_area(); + young_gen()->to_space()->mangle_unused_area(); + young_gen()->from_space()->mangle_unused_area(); + old_gen()->object_space()->mangle_unused_area(); + perm_gen()->object_space()->mangle_unused_area(); + } +} +#endif --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp 2009-08-01 04:10:54.501856626 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp 2009-08-01 04:10:54.418966181 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parallelScavengeHeap.hpp 1.62 07/10/04 10:49:30 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,9 +61,9 @@ public: ParallelScavengeHeap() : CollectedHeap() { - set_alignment(_perm_gen_alignment, intra_generation_alignment()); - set_alignment(_young_gen_alignment, intra_generation_alignment()); - set_alignment(_old_gen_alignment, intra_generation_alignment()); + set_alignment(_perm_gen_alignment, intra_heap_alignment()); + set_alignment(_young_gen_alignment, intra_heap_alignment()); + set_alignment(_old_gen_alignment, intra_heap_alignment()); } // For use by VM operations @@ -95,14 +95,14 @@ void post_initialize(); void update_counters(); - // The alignment used for the various generations. size_t perm_gen_alignment() const { return _perm_gen_alignment; } size_t young_gen_alignment() const { return _young_gen_alignment; } size_t old_gen_alignment() const { return _old_gen_alignment; } - // The alignment used for eden and survivors within the young gen. - size_t intra_generation_alignment() const { return 64 * K; } + // The alignment used for eden and survivors within the young gen + // and for boundary between young gen and old gen. + size_t intra_heap_alignment() const { return 64 * K; } size_t capacity() const; size_t used() const; @@ -172,8 +172,9 @@ size_t large_typearray_limit() { return FastAllocateSizeLimit; } bool supports_inline_contig_alloc() const { return !UseNUMA; } - HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : NULL; } - HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : NULL; } + + HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } + HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } void ensure_parsability(bool retire_tlabs); void accumulate_statistics_all_tlabs(); @@ -186,6 +187,20 @@ size_t tlab_capacity(Thread* thr) const; size_t unsafe_max_tlab_alloc(Thread* thr) const; + // Can a compiler initialize a new object without store barriers? + // This permission only extends from the creation of a new object + // via a TLAB up to the first subsequent safepoint. + virtual bool can_elide_tlab_store_barriers() const { + return true; + } + + // Can a compiler elide a store barrier when it writes + // a permanent oop into the heap? Applies when the compiler + // is storing x to the heap, where x->is_perm() is true. + virtual bool can_elide_permanent_oop_store_barriers() const { + return true; + } + void oop_iterate(OopClosure* cl); void object_iterate(ObjectClosure* cl); void permanent_oop_iterate(OopClosure* cl); @@ -215,11 +230,17 @@ // Resize the old generation. The reserved space for the // generation may be expanded in preparation for the resize. void resize_old_gen(size_t desired_free_space); + + // Save the tops of the spaces in all generations + void record_gen_tops_before_GC() PRODUCT_RETURN; + + // Mangle the unused parts of all spaces in the heap + void gen_mangle_unused_area() PRODUCT_RETURN; }; inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) { assert(is_power_of_2((intptr_t)val), "must be a power of 2"); - var = round_to(val, intra_generation_alignment()); + var = round_to(val, intra_heap_alignment()); return var; } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp 2009-08-01 04:10:55.344927566 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp 2009-08-01 04:10:55.260305860 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)pcTasks.cpp 1.22 07/06/22 16:49:49 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -149,7 +149,7 @@ { ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); - TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); + RegionTaskQueueSet* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(parallel_gc_threads, qset); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; iis_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask", + NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask", PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - // Has to drain stacks first because there may be chunks on + // Has to drain stacks first because there may be regions on // preloaded onto the stack and this thread may never have // done a draining task. Are the draining tasks needed? - cm->drain_chunk_stacks(); + cm->drain_region_stacks(); - size_t chunk_index = 0; + size_t region_index = 0; int random_seed = 17; // If we're the termination task, try 10 rounds of stealing before // setting the termination flag while(true) { - if (ParCompactionManager::steal(which, &random_seed, chunk_index)) { - PSParallelCompact::fill_and_update_chunk(cm, chunk_index); - cm->drain_chunk_stacks(); + if (ParCompactionManager::steal(which, &random_seed, region_index)) { + PSParallelCompact::fill_and_update_region(cm, region_index); + cm->drain_region_stacks(); } else { if (terminator()->offer_termination()) { break; @@ -251,12 +251,11 @@ } UpdateDensePrefixTask::UpdateDensePrefixTask( - PSParallelCompact::SpaceId space_id, - size_t chunk_index_start, - size_t chunk_index_end) : - _space_id(space_id), _chunk_index_start(chunk_index_start), - _chunk_index_end(chunk_index_end) -{} + PSParallelCompact::SpaceId space_id, + size_t region_index_start, + size_t region_index_end) : + _space_id(space_id), _region_index_start(region_index_start), + _region_index_end(region_index_end) {} void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { @@ -268,8 +267,8 @@ PSParallelCompact::update_and_deadwood_in_dense_prefix(cm, _space_id, - _chunk_index_start, - _chunk_index_end); + _region_index_start, + _region_index_end); } void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) { @@ -281,7 +280,7 @@ ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - // Process any chunks already in the compaction managers stacks. - cm->drain_chunk_stacks(); + // Process any regions already in the compaction managers stacks. + cm->drain_region_stacks(); } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp 2009-08-01 04:10:56.208617926 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp 2009-08-01 04:10:56.135200329 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)pcTasks.hpp 1.19 07/05/05 17:05:26 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -191,18 +191,18 @@ }; // -// StealChunkCompactionTask +// StealRegionCompactionTask // // This task is used to distribute work to idle threads. // -class StealChunkCompactionTask : public GCTask { +class StealRegionCompactionTask : public GCTask { private: ParallelTaskTerminator* const _terminator; public: - StealChunkCompactionTask(ParallelTaskTerminator* t); + StealRegionCompactionTask(ParallelTaskTerminator* t); - char* name() { return (char *)"steal-chunk-task"; } + char* name() { return (char *)"steal-region-task"; } ParallelTaskTerminator* terminator() { return _terminator; } virtual void do_it(GCTaskManager* manager, uint which); @@ -218,15 +218,15 @@ class UpdateDensePrefixTask : public GCTask { private: PSParallelCompact::SpaceId _space_id; - size_t _chunk_index_start; - size_t _chunk_index_end; + size_t _region_index_start; + size_t _region_index_end; public: char* name() { return (char *)"update-dense_prefix-task"; } UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id, - size_t chunk_index_start, - size_t chunk_index_end); + size_t region_index_start, + size_t region_index_end); virtual void do_it(GCTaskManager* manager, uint which); }; @@ -234,17 +234,17 @@ // // DrainStacksCompactionTask // -// This task processes chunks that have been added to the stacks of each +// This task processes regions that have been added to the stacks of each // compaction manager. // // Trying to use one draining thread does not work because there are no // guarantees about which task will be picked up by which thread. For example, -// if thread A gets all the preloaded chunks, thread A may not get a draining +// if thread A gets all the preloaded regions, thread A may not get a draining // task (they may all be done by other threads). // class DrainStacksCompactionTask : public GCTask { public: - char* name() { return (char *)"drain-chunk-task"; } + char* name() { return (char *)"drain-region-task"; } virtual void do_it(GCTaskManager* manager, uint which); }; --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp 2009-08-01 04:10:57.007280668 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp 2009-08-01 04:10:56.938615836 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)prefetchQueue.hpp 1.13 07/05/05 17:05:28 JVM" #endif /* - * Copyright 2002-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,8 +36,8 @@ class PrefetchQueue : public CHeapObj { private: - oop* _prefetch_queue[PREFETCH_QUEUE_SIZE]; - unsigned int _prefetch_index; + void* _prefetch_queue[PREFETCH_QUEUE_SIZE]; + uint _prefetch_index; public: int length() { return PREFETCH_QUEUE_SIZE; } @@ -49,20 +49,21 @@ _prefetch_index = 0; } - inline oop* push_and_pop(oop* p) { - Prefetch::write((*p)->mark_addr(), 0); + template inline void* push_and_pop(T* p) { + oop o = oopDesc::load_decode_heap_oop_not_null(p); + Prefetch::write(o->mark_addr(), 0); // This prefetch is intended to make sure the size field of array // oops is in cache. It assumes the the object layout is // mark -> klass -> size, and that mark and klass are heapword // sized. If this should change, this prefetch will need updating! - Prefetch::write((*p)->mark_addr() + (HeapWordSize*2), 0); + Prefetch::write(o->mark_addr() + (HeapWordSize*2), 0); _prefetch_queue[_prefetch_index++] = p; _prefetch_index &= (PREFETCH_QUEUE_SIZE-1); return _prefetch_queue[_prefetch_index]; } // Stores a NULL pointer in the pop'd location. - inline oop* pop() { + inline void* pop() { _prefetch_queue[_prefetch_index++] = NULL; _prefetch_index &= (PREFETCH_QUEUE_SIZE-1); return _prefetch_queue[_prefetch_index]; --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp 2009-08-01 04:10:57.753595899 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp 2009-08-01 04:10:57.671495516 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psCompactionManager.cpp 1.17 06/07/10 23:27:02 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,8 +32,8 @@ ParCompactionManager** ParCompactionManager::_manager_array = NULL; OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; ObjectStartArray* ParCompactionManager::_start_array = NULL; -ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; -ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL; +ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; +RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; ParCompactionManager::ParCompactionManager() : _action(CopyAndUpdate) { @@ -49,13 +49,13 @@ // We want the overflow stack to be permanent _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray(10, true); -#ifdef USE_ChunkTaskQueueWithOverflow - chunk_stack()->initialize(); +#ifdef USE_RegionTaskQueueWithOverflow + region_stack()->initialize(); #else - chunk_stack()->initialize(); + region_stack()->initialize(); // We want the overflow stack to be permanent - _chunk_overflow_stack = + _region_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray(10, true); #endif @@ -89,18 +89,18 @@ _stack_array = new OopTaskQueueSet(parallel_gc_threads); guarantee(_stack_array != NULL, "Count not initialize promotion manager"); - _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads); - guarantee(_chunk_array != NULL, "Count not initialize promotion manager"); + _region_array = new RegionTaskQueueSet(parallel_gc_threads); + guarantee(_region_array != NULL, "Count not initialize promotion manager"); // Create and register the ParCompactionManager(s) for the worker threads. for(uint i=0; iregister_queue(i, _manager_array[i]->marking_stack()); -#ifdef USE_ChunkTaskQueueWithOverflow - chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue()); +#ifdef USE_RegionTaskQueueWithOverflow + region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue()); #else - chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()); + region_array()->register_queue(i, _manager_array[i]->region_stack()); #endif } @@ -156,31 +156,31 @@ return NULL; } -// Save chunk on a stack -void ParCompactionManager::save_for_processing(size_t chunk_index) { +// Save region on a stack +void ParCompactionManager::save_for_processing(size_t region_index) { #ifdef ASSERT const ParallelCompactData& sd = PSParallelCompact::summary_data(); - ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index); - assert(chunk_ptr->claimed(), "must be claimed"); - assert(chunk_ptr->_pushed++ == 0, "should only be pushed once"); + ParallelCompactData::RegionData* const region_ptr = sd.region(region_index); + assert(region_ptr->claimed(), "must be claimed"); + assert(region_ptr->_pushed++ == 0, "should only be pushed once"); #endif - chunk_stack_push(chunk_index); + region_stack_push(region_index); } -void ParCompactionManager::chunk_stack_push(size_t chunk_index) { +void ParCompactionManager::region_stack_push(size_t region_index) { -#ifdef USE_ChunkTaskQueueWithOverflow - chunk_stack()->save(chunk_index); +#ifdef USE_RegionTaskQueueWithOverflow + region_stack()->save(region_index); #else - if(!chunk_stack()->push(chunk_index)) { - chunk_overflow_stack()->push(chunk_index); - } + if(!region_stack()->push(region_index)) { + region_overflow_stack()->push(region_index); + } #endif } -bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) { -#ifdef USE_ChunkTaskQueueWithOverflow - return chunk_stack()->retrieve(chunk_index); +bool ParCompactionManager::retrieve_for_processing(size_t& region_index) { +#ifdef USE_RegionTaskQueueWithOverflow + return region_stack()->retrieve(region_index); #else // Should not be used in the parallel case ShouldNotReachHere(); @@ -233,14 +233,14 @@ assert(overflow_stack()->length() == 0, "Sanity"); } -void ParCompactionManager::drain_chunk_overflow_stack() { - size_t chunk_index = (size_t) -1; - while(chunk_stack()->retrieve_from_overflow(chunk_index)) { - PSParallelCompact::fill_and_update_chunk(this, chunk_index); +void ParCompactionManager::drain_region_overflow_stack() { + size_t region_index = (size_t) -1; + while(region_stack()->retrieve_from_overflow(region_index)) { + PSParallelCompact::fill_and_update_region(this, region_index); } } -void ParCompactionManager::drain_chunk_stacks() { +void ParCompactionManager::drain_region_stacks() { #ifdef ASSERT ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); @@ -252,42 +252,42 @@ #if 1 // def DO_PARALLEL - the serial code hasn't been updated do { -#ifdef USE_ChunkTaskQueueWithOverflow +#ifdef USE_RegionTaskQueueWithOverflow // Drain overflow stack first, so other threads can steal from // claimed stack while we work. - size_t chunk_index = (size_t) -1; - while(chunk_stack()->retrieve_from_overflow(chunk_index)) { - PSParallelCompact::fill_and_update_chunk(this, chunk_index); + size_t region_index = (size_t) -1; + while(region_stack()->retrieve_from_overflow(region_index)) { + PSParallelCompact::fill_and_update_region(this, region_index); } - while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) { - PSParallelCompact::fill_and_update_chunk(this, chunk_index); + while (region_stack()->retrieve_from_stealable_queue(region_index)) { + PSParallelCompact::fill_and_update_region(this, region_index); } - } while (!chunk_stack()->is_empty()); + } while (!region_stack()->is_empty()); #else // Drain overflow stack first, so other threads can steal from // claimed stack while we work. - while(!chunk_overflow_stack()->is_empty()) { - size_t chunk_index = chunk_overflow_stack()->pop(); - PSParallelCompact::fill_and_update_chunk(this, chunk_index); + while(!region_overflow_stack()->is_empty()) { + size_t region_index = region_overflow_stack()->pop(); + PSParallelCompact::fill_and_update_region(this, region_index); } - size_t chunk_index = -1; + size_t region_index = -1; // obj is a reference!!! - while (chunk_stack()->pop_local(chunk_index)) { + while (region_stack()->pop_local(region_index)) { // It would be nice to assert about the type of objects we might // pop, but they can come from anywhere, unfortunately. - PSParallelCompact::fill_and_update_chunk(this, chunk_index); + PSParallelCompact::fill_and_update_region(this, region_index); } - } while((chunk_stack()->size() != 0) || - (chunk_overflow_stack()->length() != 0)); + } while((region_stack()->size() != 0) || + (region_overflow_stack()->length() != 0)); #endif -#ifdef USE_ChunkTaskQueueWithOverflow - assert(chunk_stack()->is_empty(), "Sanity"); +#ifdef USE_RegionTaskQueueWithOverflow + assert(region_stack()->is_empty(), "Sanity"); #else - assert(chunk_stack()->size() == 0, "Sanity"); - assert(chunk_overflow_stack()->length() == 0, "Sanity"); + assert(region_stack()->size() == 0, "Sanity"); + assert(region_overflow_stack()->length() == 0, "Sanity"); #endif #else oop obj; --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp 2009-08-01 04:10:58.627480968 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp 2009-08-01 04:10:58.558901212 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psCompactionManager.hpp 1.17 07/05/05 17:05:29 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ friend class ParallelTaskTerminator; friend class ParMarkBitMap; friend class PSParallelCompact; - friend class StealChunkCompactionTask; + friend class StealRegionCompactionTask; friend class UpdateAndFillClosure; friend class RefProcTaskExecutor; @@ -75,27 +75,27 @@ // ------------------------ End don't putback if not needed private: - static ParCompactionManager** _manager_array; - static OopTaskQueueSet* _stack_array; - static ObjectStartArray* _start_array; - static ChunkTaskQueueSet* _chunk_array; - static PSOldGen* _old_gen; + static ParCompactionManager** _manager_array; + static OopTaskQueueSet* _stack_array; + static ObjectStartArray* _start_array; + static RegionTaskQueueSet* _region_array; + static PSOldGen* _old_gen; - OopTaskQueue _marking_stack; - GrowableArray* _overflow_stack; + OopTaskQueue _marking_stack; + GrowableArray* _overflow_stack; // Is there a way to reuse the _marking_stack for the - // saving empty chunks? For now just create a different + // saving empty regions? For now just create a different // type of TaskQueue. -#ifdef USE_ChunkTaskQueueWithOverflow - ChunkTaskQueueWithOverflow _chunk_stack; +#ifdef USE_RegionTaskQueueWithOverflow + RegionTaskQueueWithOverflow _region_stack; #else - ChunkTaskQueue _chunk_stack; - GrowableArray* _chunk_overflow_stack; + RegionTaskQueue _region_stack; + GrowableArray* _region_overflow_stack; #endif #if 1 // does this happen enough to need a per thread stack? - GrowableArray* _revisit_klass_stack; + GrowableArray* _revisit_klass_stack; #endif static ParMarkBitMap* _mark_bitmap; @@ -103,21 +103,22 @@ static PSOldGen* old_gen() { return _old_gen; } static ObjectStartArray* start_array() { return _start_array; } - static OopTaskQueueSet* stack_array() { return _stack_array; } + static OopTaskQueueSet* stack_array() { return _stack_array; } static void initialize(ParMarkBitMap* mbm); protected: // Array of tasks. Needed by the ParallelTaskTerminator. - static ChunkTaskQueueSet* chunk_array() { return _chunk_array; } - - OopTaskQueue* marking_stack() { return &_marking_stack; } - GrowableArray* overflow_stack() { return _overflow_stack; } -#ifdef USE_ChunkTaskQueueWithOverflow - ChunkTaskQueueWithOverflow* chunk_stack() { return &_chunk_stack; } + static RegionTaskQueueSet* region_array() { return _region_array; } + OopTaskQueue* marking_stack() { return &_marking_stack; } + GrowableArray* overflow_stack() { return _overflow_stack; } +#ifdef USE_RegionTaskQueueWithOverflow + RegionTaskQueueWithOverflow* region_stack() { return &_region_stack; } #else - ChunkTaskQueue* chunk_stack() { return &_chunk_stack; } - GrowableArray* chunk_overflow_stack() { return _chunk_overflow_stack; } + RegionTaskQueue* region_stack() { return &_region_stack; } + GrowableArray* region_overflow_stack() { + return _region_overflow_stack; + } #endif // Pushes onto the marking stack. If the marking stack is full, @@ -126,9 +127,9 @@ // Do not implement an equivalent stack_pop. Deal with the // marking stack and overflow stack directly. - // Pushes onto the chunk stack. If the chunk stack is full, - // pushes onto the chunk overflow stack. - void chunk_stack_push(size_t chunk_index); + // Pushes onto the region stack. If the region stack is full, + // pushes onto the region overflow stack. + void region_stack_push(size_t region_index); public: Action action() { return _action; } @@ -163,10 +164,10 @@ // Get a oop for scanning. If returns null, no oop were found. oop retrieve_for_scanning(); - // Save chunk for later processing. Must not fail. - void save_for_processing(size_t chunk_index); - // Get a chunk for processing. If returns null, no chunk were found. - bool retrieve_for_processing(size_t& chunk_index); + // Save region for later processing. Must not fail. + void save_for_processing(size_t region_index); + // Get a region for processing. If returns null, no region were found. + bool retrieve_for_processing(size_t& region_index); // Access function for compaction managers static ParCompactionManager* gc_thread_compaction_manager(int index); @@ -175,18 +176,18 @@ return stack_array()->steal(queue_num, seed, t); } - static bool steal(int queue_num, int* seed, ChunkTask& t) { - return chunk_array()->steal(queue_num, seed, t); + static bool steal(int queue_num, int* seed, RegionTask& t) { + return region_array()->steal(queue_num, seed, t); } // Process tasks remaining on any stack void drain_marking_stacks(OopClosure *blk); // Process tasks remaining on any stack - void drain_chunk_stacks(); + void drain_region_stacks(); // Process tasks remaining on any stack - void drain_chunk_overflow_stack(); + void drain_region_overflow_stack(); // Debugging support #ifdef ASSERT --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp 2009-08-01 04:10:59.489367177 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp 2009-08-01 04:10:59.400551143 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psMarkSweep.cpp 1.92 07/06/08 23:11:01 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,9 +38,7 @@ _ref_processor = new ReferenceProcessor(mr, true, // atomic_discovery false); // mt_discovery - if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { - _counters = new CollectorCounters("PSMarkSweep", 1); - } + _counters = new CollectorCounters("PSMarkSweep", 1); } // This method contains all heap specific policy for invoking mark sweep. @@ -101,6 +99,9 @@ // Increment the invocation count heap->increment_total_collections(true /* full */); + // Save information needed to minimize mangling + heap->record_gen_tops_before_GC(); + // We need to track unique mark sweep invocations as well. _total_invocations++; @@ -174,6 +175,7 @@ COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); + ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); @@ -190,7 +192,13 @@ restore_marks(); deallocate_stacks(); - + + if (ZapUnusedHeapArea) { + // Do a complete mangle (top to end) because the usage for + // scratch does not maintain a top pointer. + young_gen->to_space()->mangle_unused_area_complete(); + } + eden_empty = young_gen->eden_space()->is_empty(); if (!eden_empty) { eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); @@ -200,8 +208,8 @@ // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); - survivors_empty = young_gen->from_space()->is_empty() && - young_gen->to_space()->is_empty(); + survivors_empty = young_gen->from_space()->is_empty() && + young_gen->to_space()->is_empty(); young_gen_empty = eden_empty && survivors_empty; BarrierSet* bs = heap->barrier_set(); @@ -347,6 +355,11 @@ perm_gen->verify_object_start_array(); } + if (ZapUnusedHeapArea) { + old_gen->object_space()->check_mangled_unused_area_complete(); + perm_gen->object_space()->check_mangled_unused_area_complete(); + } + NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); if (PrintHeapAtGC) { @@ -379,7 +392,7 @@ // full GC. const size_t alignment = old_gen->virtual_space()->alignment(); const size_t eden_used = eden_space->used_in_bytes(); - const size_t promoted = (size_t)(size_policy->avg_promoted()->padded_average()); + const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); const size_t absorb_size = align_size_up(eden_used + promoted, alignment); const size_t eden_capacity = eden_space->capacity_in_bytes(); @@ -406,16 +419,14 @@ // Fill the unused part of the old gen. MutableSpace* const old_space = old_gen->object_space(); - MemRegion old_gen_unused(old_space->top(), old_space->end()); - - // If the unused part of the old gen cannot be filled, skip - // absorbing eden. - if (old_gen_unused.word_size() < SharedHeap::min_fill_size()) { - return false; - } + HeapWord* const unused_start = old_space->top(); + size_t const unused_words = pointer_delta(old_space->end(), unused_start); - if (!old_gen_unused.is_empty()) { - SharedHeap::fill_region_with_object(old_gen_unused); + if (unused_words > 0) { + if (unused_words < CollectedHeap::min_fill_size()) { + return false; // If the old gen cannot be filled, must give up. + } + CollectedHeap::fill_with_objects(unused_start, unused_words); } // Take the live data from eden and set both top and end in the old gen to @@ -431,9 +442,8 @@ // Update the object start array for the filler object and the data from eden. ObjectStartArray* const start_array = old_gen->start_array(); - HeapWord* const start = old_gen_unused.start(); - for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { - start_array->allocate_block(addr); + for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { + start_array->allocate_block(p); } // Could update the promoted average here, but it is not typically updated at @@ -507,24 +517,10 @@ follow_stack(); // Process reference objects found during marking - - // Skipping the reference processing for VerifyParallelOldWithMarkSweep - // affects the marking (makes it different). { - ReferencePolicy *soft_ref_policy; - if (clear_all_softrefs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } - assert(soft_ref_policy != NULL,"No soft reference policy"); + ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->process_discovered_references( - soft_ref_policy, is_alive_closure(), mark_and_push_closure(), - follow_stack_closure(), NULL); + is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); } // Follow system dictionary roots and unload classes --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp 2009-08-01 04:11:00.968456635 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp 2009-08-01 04:11:00.890431543 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psMarkSweepDecorator.cpp 1.26 07/05/17 15:52:53 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,10 +93,10 @@ */ bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0); - ssize_t allowed_deadspace = 0; + size_t allowed_deadspace = 0; if (skip_dead) { - int ratio = allowed_dead_ratio(); - allowed_deadspace = (space()->capacity_in_bytes() * ratio / 100) / HeapWordSize; + const size_t ratio = allowed_dead_ratio(); + allowed_deadspace = space()->capacity_in_words() * ratio / 100; } // Fetch the current destination decorator @@ -155,23 +155,18 @@ oop(q)->forward_to(oop(compact_top)); assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark"); } else { - // Don't clear the mark since it's confuses parallel old - // verification. - if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { - // if the object isn't moving we can just set the mark to the default - // mark and handle it specially later on. - oop(q)->init_mark(); - } + // if the object isn't moving we can just set the mark to the default + // mark and handle it specially later on. + oop(q)->init_mark(); assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL"); } // Update object start array - if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { - if (start_array) - start_array->allocate_block(compact_top); + if (start_array) { + start_array->allocate_block(compact_top); } - debug_only(MarkSweep::register_live_oop(oop(q), size)); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size)); compact_top += size; assert(compact_top <= dest->space()->end(), "Exceeding space in destination"); @@ -222,22 +217,17 @@ assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark"); } else { // if the object isn't moving we can just set the mark to the default - // Don't clear the mark since it's confuses parallel old - // verification. - if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { - // mark and handle it specially later on. - oop(q)->init_mark(); - } + // mark and handle it specially later on. + oop(q)->init_mark(); assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL"); } - if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { - // Update object start array - if (start_array) - start_array->allocate_block(compact_top); - } + // Update object start array + if (start_array) { + start_array->allocate_block(compact_top); + } - debug_only(MarkSweep::register_live_oop(oop(q), sz)); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz)); compact_top += sz; assert(compact_top <= dest->space()->end(), "Exceeding space in destination"); @@ -284,26 +274,13 @@ dest->set_compaction_top(compact_top); } -bool PSMarkSweepDecorator::insert_deadspace(ssize_t& allowed_deadspace_words, - HeapWord* q, size_t deadlength) { - allowed_deadspace_words -= deadlength; - if (allowed_deadspace_words >= 0) { - oop(q)->set_mark(markOopDesc::prototype()->set_marked()); - const size_t aligned_min_int_array_size = - align_object_size(typeArrayOopDesc::header_size(T_INT)); - if (deadlength >= aligned_min_int_array_size) { - oop(q)->set_klass(Universe::intArrayKlassObj()); - assert(((deadlength - aligned_min_int_array_size) * (HeapWordSize/sizeof(jint))) < (size_t)max_jint, - "deadspace too big for Arrayoop"); - typeArrayOop(q)->set_length((int)((deadlength - aligned_min_int_array_size) - * (HeapWordSize/sizeof(jint)))); - } else { - assert((int) deadlength == instanceOopDesc::header_size(), - "size for smallest fake dead object doesn't match"); - oop(q)->set_klass(SystemDictionary::object_klass()); - } - assert((int) deadlength == oop(q)->size(), - "make sure size for fake dead object match"); +bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words, + HeapWord* q, size_t deadlength) { + if (allowed_deadspace_words >= deadlength) { + allowed_deadspace_words -= deadlength; + CollectedHeap::fill_with_object(q, deadlength); + oop(q)->set_mark(oop(q)->mark()->set_marked()); + assert((int) deadlength == oop(q)->size(), "bad filler object size"); // Recall that we required "q == compaction_top". return true; } else { @@ -329,15 +306,11 @@ HeapWord* end = _first_dead; while (q < end) { - debug_only(MarkSweep::track_interior_pointers(oop(q))); - + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); // point all the oops to the new location size_t size = oop(q)->adjust_pointers(); - - debug_only(MarkSweep::check_interior_pointers()); - - debug_only(MarkSweep::validate_live_oop(oop(q), size)); - + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); q += size; } @@ -357,11 +330,11 @@ Prefetch::write(q, interval); if (oop(q)->is_gc_marked()) { // q is alive - debug_only(MarkSweep::track_interior_pointers(oop(q))); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); // point all the oops to the new location size_t size = oop(q)->adjust_pointers(); - debug_only(MarkSweep::check_interior_pointers()); - debug_only(MarkSweep::validate_live_oop(oop(q), size)); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); debug_only(prev_q = q); q += size; } else { @@ -395,7 +368,7 @@ while (q < end) { size_t size = oop(q)->size(); assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); - debug_only(MarkSweep::live_oop_moved_to(q, size, q)); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); debug_only(prev_q = q); q += size; } @@ -430,7 +403,7 @@ Prefetch::write(compaction_top, copy_interval); // copy object and reinit its mark - debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top)); assert(q != compaction_top, "everything in this pass should be moving"); Copy::aligned_conjoint_words(q, compaction_top, size); oop(compaction_top)->init_mark(); @@ -445,5 +418,7 @@ "should point inside space"); space()->set_top(compaction_top()); - if (mangle_free_space) space()->mangle_unused_area(); + if (mangle_free_space) { + space()->mangle_unused_area(); + } } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp 2009-08-01 04:11:01.919419707 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp 2009-08-01 04:11:01.846910083 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psMarkSweepDecorator.hpp 1.14 07/05/05 17:05:29 JVM" #endif /* - * Copyright 2001-2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,14 +42,16 @@ HeapWord* _first_dead; HeapWord* _end_of_live; HeapWord* _compaction_top; - unsigned int _allowed_dead_ratio; + size_t _allowed_dead_ratio; - bool insert_deadspace(ssize_t& allowed_deadspace_words, HeapWord* q, size_t word_len); + bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, + size_t word_len); public: PSMarkSweepDecorator(MutableSpace* space, ObjectStartArray* start_array, - unsigned int allowed_dead_ratio) : - _space(space), _start_array(start_array), _allowed_dead_ratio(allowed_dead_ratio) { } + size_t allowed_dead_ratio) : + _space(space), _start_array(start_array), + _allowed_dead_ratio(allowed_dead_ratio) { } // During a compacting collection, we need to collapse objects into // spaces in a given order. We want to fill space A, space B, and so @@ -60,14 +62,14 @@ static PSMarkSweepDecorator* destination_decorator(); // Accessors - MutableSpace* space() { return _space; } - ObjectStartArray* start_array() { return _start_array; } + MutableSpace* space() { return _space; } + ObjectStartArray* start_array() { return _start_array; } - HeapWord* compaction_top() { return _compaction_top; } - void set_compaction_top(HeapWord* value) { _compaction_top = value; } + HeapWord* compaction_top() { return _compaction_top; } + void set_compaction_top(HeapWord* value) { _compaction_top = value; } - unsigned int allowed_dead_ratio() { return _allowed_dead_ratio; } - void set_allowed_dead_ratio(unsigned int value) { _allowed_dead_ratio = value; } + size_t allowed_dead_ratio() { return _allowed_dead_ratio; } + void set_allowed_dead_ratio(size_t value) { _allowed_dead_ratio = value; } // Work methods void adjust_pointers(); --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp 2009-08-01 04:11:02.781028591 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp 2009-08-01 04:11:02.712014964 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psOldGen.cpp 1.54 07/05/05 17:05:28 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +89,16 @@ // MemRegion cmr((HeapWord*)virtual_space()->low(), - (HeapWord*)virtual_space()->high()); + (HeapWord*)virtual_space()->high()); + if (ZapUnusedHeapArea) { + // Mangle newly committed space immediately rather than + // waiting for the initialization of the space even though + // mangling is related to spaces. Doing it here eliminates + // the need to carry along information that a complete mangling + // (bottom to end) needs to be done. + SpaceMangler::mangle_region(cmr); + } + Universe::heap()->barrier_set()->resize_covered_region(cmr); CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set(); @@ -115,7 +124,9 @@ if (_object_space == NULL) vm_exit_during_initialization("Could not allocate an old gen space"); - object_space()->initialize(cmr, true); + object_space()->initialize(cmr, + SpaceDecorator::Clear, + SpaceDecorator::Mangle); _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio); @@ -144,9 +155,7 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Reset start array first. - debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {) start_array()->reset(); - debug_only(}) object_mark_sweep()->precompact(); @@ -207,10 +216,22 @@ } void PSOldGen::expand(size_t bytes) { + if (bytes == 0) { + return; + } MutexLocker x(ExpandHeap_lock); const size_t alignment = virtual_space()->alignment(); size_t aligned_bytes = align_size_up(bytes, alignment); size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); + if (aligned_bytes == 0){ + // The alignment caused the number of bytes to wrap. An expand_by(0) will + // return true with the implication that and expansion was done when it + // was not. A call to expand implies a best effort to expand by "bytes" + // but not a guarantee. Align down to give a best effort. This is likely + // the most that the generation can expand since it has some capacity to + // start with. + aligned_bytes = align_size_down(bytes, alignment); + } bool success = false; if (aligned_expand_bytes > aligned_bytes) { @@ -223,8 +244,8 @@ success = expand_to_reserved(); } - if (GC_locker::is_active()) { - if (PrintGC && Verbose) { + if (PrintGC && Verbose) { + if (success && GC_locker::is_active()) { gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); } } @@ -233,8 +254,24 @@ bool PSOldGen::expand_by(size_t bytes) { assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); + if (bytes == 0) { + return true; // That's what virtual_space()->expand_by(0) would return + } bool result = virtual_space()->expand_by(bytes); if (result) { + if (ZapUnusedHeapArea) { + // We need to mangle the newly expanded area. The memregion spans + // end -> new_end, we assume that top -> end is already mangled. + // Do the mangling before post_resize() is called because + // the space is available for allocation after post_resize(); + HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); + assert(object_space()->end() < virtual_space_high, + "Should be true before post_resize()"); + MemRegion mangle_region(object_space()->end(), virtual_space_high); + // Note that the object space has not yet been updated to + // coincede with the new underlying virtual space. + SpaceMangler::mangle_region(mangle_region); + } post_resize(); if (UsePerfData) { _space_counters->update_capacity(); @@ -351,16 +388,7 @@ start_array()->set_covered_region(new_memregion); Universe::heap()->barrier_set()->resize_covered_region(new_memregion); - // Did we expand? HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); - if (object_space()->end() < virtual_space_high) { - // We need to mangle the newly expanded area. The memregion spans - // end -> new_end, we assume that top -> end is already mangled. - // This cannot be safely tested for, as allocation may be taking - // place. - MemRegion mangle_region(object_space()->end(), virtual_space_high); - object_space()->mangle_region(mangle_region); - } // ALWAYS do this last!! object_space()->set_end(virtual_space_high); @@ -465,3 +493,10 @@ VerifyObjectStartArrayClosure check( this, &_start_array ); object_iterate(&check); } + +#ifndef PRODUCT +void PSOldGen::record_spaces_top() { + assert(ZapUnusedHeapArea, "Not mangling unused space"); + object_space()->set_top_for_allocations(); +} +#endif --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp 2009-08-01 04:11:03.670222790 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp 2009-08-01 04:11:03.589162889 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psOldGen.hpp 1.37 07/05/05 17:05:30 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -188,4 +188,8 @@ // Printing support virtual const char* name() const { return _name; } + + // Debugging support + // Save the tops of all spaces for later use during mangling. + void record_spaces_top() PRODUCT_RETURN; }; --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2009-08-01 04:11:04.551803577 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2009-08-01 04:11:04.453619151 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psParallelCompact.cpp 1.64 08/06/19 15:33:02 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,43 +31,31 @@ #include // All sizes are in HeapWords. -const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words -const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize; -const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize; -const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1; -const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1; -const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask; - -// 32-bit: 128 words covers 4 bitmap words -// 64-bit: 128 words covers 2 bitmap words -const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words -const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize; -const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1; -const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask; - -const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize; - -const ParallelCompactData::ChunkData::chunk_sz_t -ParallelCompactData::ChunkData::dc_shift = 27; - -const ParallelCompactData::ChunkData::chunk_sz_t -ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift; - -const ParallelCompactData::ChunkData::chunk_sz_t -ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift; +const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words +const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize; +const size_t ParallelCompactData::RegionSizeBytes = + RegionSize << LogHeapWordSize; +const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1; +const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1; +const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask; + +const ParallelCompactData::RegionData::region_sz_t +ParallelCompactData::RegionData::dc_shift = 27; + +const ParallelCompactData::RegionData::region_sz_t +ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift; + +const ParallelCompactData::RegionData::region_sz_t +ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift; -const ParallelCompactData::ChunkData::chunk_sz_t -ParallelCompactData::ChunkData::los_mask = ~dc_mask; +const ParallelCompactData::RegionData::region_sz_t +ParallelCompactData::RegionData::los_mask = ~dc_mask; -const ParallelCompactData::ChunkData::chunk_sz_t -ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift; +const ParallelCompactData::RegionData::region_sz_t +ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift; -const ParallelCompactData::ChunkData::chunk_sz_t -ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift; - -#ifdef ASSERT -short ParallelCompactData::BlockData::_cur_phase = 0; -#endif +const ParallelCompactData::RegionData::region_sz_t +ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift; SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; bool PSParallelCompact::_print_phases = false; @@ -84,14 +72,14 @@ #endif // #ifdef ASSERT #ifdef VALIDATE_MARK_SWEEP -GrowableArray* PSParallelCompact::_root_refs_stack = NULL; +GrowableArray* PSParallelCompact::_root_refs_stack = NULL; GrowableArray * PSParallelCompact::_live_oops = NULL; GrowableArray * PSParallelCompact::_live_oops_moved_to = NULL; GrowableArray* PSParallelCompact::_live_oops_size = NULL; size_t PSParallelCompact::_live_oops_index = 0; size_t PSParallelCompact::_live_oops_index_at_perm = 0; -GrowableArray* PSParallelCompact::_other_refs_stack = NULL; -GrowableArray* PSParallelCompact::_adjusted_pointers = NULL; +GrowableArray* PSParallelCompact::_other_refs_stack = NULL; +GrowableArray* PSParallelCompact::_adjusted_pointers = NULL; bool PSParallelCompact::_pointer_tracking = false; bool PSParallelCompact::_root_tracking = true; @@ -103,99 +91,78 @@ GrowableArray * PSParallelCompact::_last_gc_live_oops_size = NULL; #endif -// XXX beg - verification code; only works while we also mark in object headers -static void -verify_mark_bitmap(ParMarkBitMap& _mark_bitmap) +void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size, + HeapWord* destination) { - ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); + assert(src_region_idx != 0, "invalid src_region_idx"); + assert(partial_obj_size != 0, "invalid partial_obj_size argument"); + assert(destination != NULL, "invalid destination argument"); + + _src_region_idx = src_region_idx; + _partial_obj_size = partial_obj_size; + _destination = destination; + + // These fields may not be updated below, so make sure they're clear. + assert(_dest_region_addr == NULL, "should have been cleared"); + assert(_first_src_addr == NULL, "should have been cleared"); - PSPermGen* perm_gen = heap->perm_gen(); - PSOldGen* old_gen = heap->old_gen(); - PSYoungGen* young_gen = heap->young_gen(); + // Determine the number of destination regions for the partial object. + HeapWord* const last_word = destination + partial_obj_size - 1; + const ParallelCompactData& sd = PSParallelCompact::summary_data(); + HeapWord* const beg_region_addr = sd.region_align_down(destination); + HeapWord* const end_region_addr = sd.region_align_down(last_word); - MutableSpace* perm_space = perm_gen->object_space(); - MutableSpace* old_space = old_gen->object_space(); - MutableSpace* eden_space = young_gen->eden_space(); - MutableSpace* from_space = young_gen->from_space(); - MutableSpace* to_space = young_gen->to_space(); - - // 'from_space' here is the survivor space at the lower address. - if (to_space->bottom() < from_space->bottom()) { - from_space = to_space; - to_space = young_gen->from_space(); - } - - HeapWord* boundaries[12]; - unsigned int bidx = 0; - const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]); - - boundaries[0] = perm_space->bottom(); - boundaries[1] = perm_space->top(); - boundaries[2] = old_space->bottom(); - boundaries[3] = old_space->top(); - boundaries[4] = eden_space->bottom(); - boundaries[5] = eden_space->top(); - boundaries[6] = from_space->bottom(); - boundaries[7] = from_space->top(); - boundaries[8] = to_space->bottom(); - boundaries[9] = to_space->top(); - boundaries[10] = to_space->end(); - boundaries[11] = to_space->end(); - - BitMap::idx_t beg_bit = 0; - BitMap::idx_t end_bit; - BitMap::idx_t tmp_bit; - const BitMap::idx_t last_bit = _mark_bitmap.size(); - do { - HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit); - if (_mark_bitmap.is_marked(beg_bit)) { - oop obj = (oop)addr; - assert(obj->is_gc_marked(), "obj header is not marked"); - end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit); - const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit); - assert(size == (size_t)obj->size(), "end bit wrong?"); - beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit); - assert(beg_bit > end_bit, "bit set in middle of an obj"); - } else { - if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) { - // a dead object in the current space. - oop obj = (oop)addr; - end_bit = _mark_bitmap.addr_to_bit(addr + obj->size()); - assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap"); - tmp_bit = beg_bit + 1; - beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); - assert(beg_bit == end_bit, "beg bit set in unmarked obj"); - beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); - assert(beg_bit == end_bit, "end bit set in unmarked obj"); - } else if (addr < boundaries[bidx + 2]) { - // addr is between top in the current space and bottom in the next. - end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr); - tmp_bit = beg_bit; - beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); - assert(beg_bit == end_bit, "beg bit set above top"); - beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); - assert(beg_bit == end_bit, "end bit set above top"); - bidx += 2; - } else if (bidx < bidx_max - 2) { - bidx += 2; // ??? - } else { - tmp_bit = beg_bit; - beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit); - assert(beg_bit == last_bit, "beg bit set outside heap"); - beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit); - assert(beg_bit == last_bit, "end bit set outside heap"); - } + if (beg_region_addr == end_region_addr) { + // One destination region. + _destination_count = 1; + if (end_region_addr == destination) { + // The destination falls on a region boundary, thus the first word of the + // partial object will be the first word copied to the destination region. + _dest_region_addr = end_region_addr; + _first_src_addr = sd.region_to_addr(src_region_idx); } - } while (beg_bit < last_bit); + } else { + // Two destination regions. When copied, the partial object will cross a + // destination region boundary, so a word somewhere within the partial + // object will be the first word copied to the second destination region. + _destination_count = 2; + _dest_region_addr = end_region_addr; + const size_t ofs = pointer_delta(end_region_addr, destination); + assert(ofs < _partial_obj_size, "sanity"); + _first_src_addr = sd.region_to_addr(src_region_idx) + ofs; + } +} + +void SplitInfo::clear() +{ + _src_region_idx = 0; + _partial_obj_size = 0; + _destination = NULL; + _destination_count = 0; + _dest_region_addr = NULL; + _first_src_addr = NULL; + assert(!is_valid(), "sanity"); +} + +#ifdef ASSERT +void SplitInfo::verify_clear() +{ + assert(_src_region_idx == 0, "not clear"); + assert(_partial_obj_size == 0, "not clear"); + assert(_destination == NULL, "not clear"); + assert(_destination_count == 0, "not clear"); + assert(_dest_region_addr == NULL, "not clear"); + assert(_first_src_addr == NULL, "not clear"); } -// XXX end - verification code; only works while we also mark in object headers +#endif // #ifdef ASSERT + #ifndef PRODUCT const char* PSParallelCompact::space_names[] = { "perm", "old ", "eden", "from", "to " }; -void PSParallelCompact::print_chunk_ranges() +void PSParallelCompact::print_region_ranges() { tty->print_cr("space bottom top end new_top"); tty->print_cr("------ ---------- ---------- ---------- ----------"); @@ -203,34 +170,34 @@ for (unsigned int id = 0; id < last_space_id; ++id) { const MutableSpace* space = _space_info[id].space(); tty->print_cr("%u %s " - SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " " - SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ", - id, space_names[id], - summary_data().addr_to_chunk_idx(space->bottom()), - summary_data().addr_to_chunk_idx(space->top()), - summary_data().addr_to_chunk_idx(space->end()), - summary_data().addr_to_chunk_idx(_space_info[id].new_top())); + SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " " + SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ", + id, space_names[id], + summary_data().addr_to_region_idx(space->bottom()), + summary_data().addr_to_region_idx(space->top()), + summary_data().addr_to_region_idx(space->end()), + summary_data().addr_to_region_idx(_space_info[id].new_top())); } } void -print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c) +print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c) { -#define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7") -#define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5") +#define REGION_IDX_FORMAT SIZE_FORMAT_W(7) +#define REGION_DATA_FORMAT SIZE_FORMAT_W(5) ParallelCompactData& sd = PSParallelCompact::summary_data(); - size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0; - tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " " - CHUNK_IDX_FORMAT " " PTR_FORMAT " " - CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " " - CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d", - i, c->data_location(), dci, c->destination(), - c->partial_obj_size(), c->live_obj_size(), - c->data_size(), c->source_chunk(), c->destination_count()); + size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0; + tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " " + REGION_IDX_FORMAT " " PTR_FORMAT " " + REGION_DATA_FORMAT " " REGION_DATA_FORMAT " " + REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d", + i, c->data_location(), dci, c->destination(), + c->partial_obj_size(), c->live_obj_size(), + c->data_size(), c->source_region(), c->destination_count()); -#undef CHUNK_IDX_FORMAT -#undef CHUNK_DATA_FORMAT +#undef REGION_IDX_FORMAT +#undef REGION_DATA_FORMAT } void @@ -239,14 +206,14 @@ HeapWord* const end_addr) { size_t total_words = 0; - size_t i = summary_data.addr_to_chunk_idx(beg_addr); - const size_t last = summary_data.addr_to_chunk_idx(end_addr); + size_t i = summary_data.addr_to_region_idx(beg_addr); + const size_t last = summary_data.addr_to_region_idx(end_addr); HeapWord* pdest = 0; while (i <= last) { - ParallelCompactData::ChunkData* c = summary_data.chunk(i); + ParallelCompactData::RegionData* c = summary_data.region(i); if (c->data_size() != 0 || c->destination() != pdest) { - print_generic_summary_chunk(i, c); + print_generic_summary_region(i, c); total_words += c->data_size(); pdest = c->destination(); } @@ -268,16 +235,16 @@ } void -print_initial_summary_chunk(size_t i, - const ParallelCompactData::ChunkData* c, - bool newline = true) -{ - tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " " - SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " " - SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d", - i, c->destination(), - c->partial_obj_size(), c->live_obj_size(), - c->data_size(), c->source_chunk(), c->destination_count()); +print_initial_summary_region(size_t i, + const ParallelCompactData::RegionData* c, + bool newline = true) +{ + tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " " + SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " + SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d", + i, c->destination(), + c->partial_obj_size(), c->live_obj_size(), + c->data_size(), c->source_region(), c->destination_count()); if (newline) tty->cr(); } @@ -288,63 +255,64 @@ return; } - const size_t chunk_size = ParallelCompactData::ChunkSize; - HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top()); - const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up); - const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1); + const size_t region_size = ParallelCompactData::RegionSize; + typedef ParallelCompactData::RegionData RegionData; + HeapWord* const top_aligned_up = summary_data.region_align_up(space->top()); + const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up); + const RegionData* c = summary_data.region(end_region - 1); HeapWord* end_addr = c->destination() + c->data_size(); const size_t live_in_space = pointer_delta(end_addr, space->bottom()); - // Print (and count) the full chunks at the beginning of the space. - size_t full_chunk_count = 0; - size_t i = summary_data.addr_to_chunk_idx(space->bottom()); - while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) { - print_initial_summary_chunk(i, summary_data.chunk(i)); - ++full_chunk_count; + // Print (and count) the full regions at the beginning of the space. + size_t full_region_count = 0; + size_t i = summary_data.addr_to_region_idx(space->bottom()); + while (i < end_region && summary_data.region(i)->data_size() == region_size) { + print_initial_summary_region(i, summary_data.region(i)); + ++full_region_count; ++i; } - size_t live_to_right = live_in_space - full_chunk_count * chunk_size; + size_t live_to_right = live_in_space - full_region_count * region_size; double max_reclaimed_ratio = 0.0; - size_t max_reclaimed_ratio_chunk = 0; + size_t max_reclaimed_ratio_region = 0; size_t max_dead_to_right = 0; size_t max_live_to_right = 0; - // Print the 'reclaimed ratio' for chunks while there is something live in the - // chunk or to the right of it. The remaining chunks are empty (and + // Print the 'reclaimed ratio' for regions while there is something live in + // the region or to the right of it. The remaining regions are empty (and // uninteresting), and computing the ratio will result in division by 0. - while (i < end_chunk && live_to_right > 0) { - c = summary_data.chunk(i); - HeapWord* const chunk_addr = summary_data.chunk_to_addr(i); - const size_t used_to_right = pointer_delta(space->top(), chunk_addr); + while (i < end_region && live_to_right > 0) { + c = summary_data.region(i); + HeapWord* const region_addr = summary_data.region_to_addr(i); + const size_t used_to_right = pointer_delta(space->top(), region_addr); const size_t dead_to_right = used_to_right - live_to_right; const double reclaimed_ratio = double(dead_to_right) / live_to_right; if (reclaimed_ratio > max_reclaimed_ratio) { - max_reclaimed_ratio = reclaimed_ratio; - max_reclaimed_ratio_chunk = i; - max_dead_to_right = dead_to_right; - max_live_to_right = live_to_right; + max_reclaimed_ratio = reclaimed_ratio; + max_reclaimed_ratio_region = i; + max_dead_to_right = dead_to_right; + max_live_to_right = live_to_right; } - print_initial_summary_chunk(i, c, false); - tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"), - reclaimed_ratio, dead_to_right, live_to_right); + print_initial_summary_region(i, c, false); + tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10), + reclaimed_ratio, dead_to_right, live_to_right); live_to_right -= c->data_size(); ++i; } - // Any remaining chunks are empty. Print one more if there is one. - if (i < end_chunk) { - print_initial_summary_chunk(i, summary_data.chunk(i)); + // Any remaining regions are empty. Print one more if there is one. + if (i < end_region) { + print_initial_summary_region(i, summary_data.region(i)); } - tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " " - "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f", - max_reclaimed_ratio_chunk, max_dead_to_right, - max_live_to_right, max_reclaimed_ratio); + tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " " + "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f", + max_reclaimed_ratio_region, max_dead_to_right, + max_live_to_right, max_reclaimed_ratio); } void @@ -375,13 +343,9 @@ { _region_start = 0; - _chunk_vspace = 0; - _chunk_data = 0; - _chunk_count = 0; - - _block_vspace = 0; - _block_data = 0; - _block_count = 0; + _region_vspace = 0; + _region_data = 0; + _region_count = 0; } bool ParallelCompactData::initialize(MemRegion covered_region) @@ -390,18 +354,12 @@ const size_t region_size = covered_region.word_size(); DEBUG_ONLY(_region_end = _region_start + region_size;) - assert(chunk_align_down(_region_start) == _region_start, - "region start not aligned"); - assert((region_size & ChunkSizeOffsetMask) == 0, - "region size not a multiple of ChunkSize"); - - bool result = initialize_chunk_data(region_size); - - // Initialize the block data if it will be used for updating pointers, or if - // this is a debug build. - if (!UseParallelOldGCChunkPointerCalc || trueInDebug) { - result = result && initialize_block_data(region_size); - } + assert(region_align_down(_region_start) == _region_start, + "region start not aligned"); + assert((region_size & RegionSizeOffsetMask) == 0, + "region size not a multiple of RegionSize"); + + bool result = initialize_region_data(region_size); return result; } @@ -416,7 +374,7 @@ const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); - ReservedSpace rs(bytes, rs_align, false); + ReservedSpace rs(bytes, rs_align, rs_align > 0); os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); @@ -425,30 +383,20 @@ return vspace; } delete vspace; + // Release memory reserved in the space. + rs.release(); } return 0; } -bool ParallelCompactData::initialize_chunk_data(size_t region_size) -{ - const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize; - _chunk_vspace = create_vspace(count, sizeof(ChunkData)); - if (_chunk_vspace != 0) { - _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr(); - _chunk_count = count; - return true; - } - return false; -} - -bool ParallelCompactData::initialize_block_data(size_t region_size) +bool ParallelCompactData::initialize_region_data(size_t region_size) { - const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize; - _block_vspace = create_vspace(count, sizeof(BlockData)); - if (_block_vspace != 0) { - _block_data = (BlockData*)_block_vspace->reserved_low_addr(); - _block_count = count; + const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize; + _region_vspace = create_vspace(count, sizeof(RegionData)); + if (_region_vspace != 0) { + _region_data = (RegionData*)_region_vspace->reserved_low_addr(); + _region_count = count; return true; } return false; @@ -456,38 +404,27 @@ void ParallelCompactData::clear() { - if (_block_data) { - memset(_block_data, 0, _block_vspace->committed_size()); - } - memset(_chunk_data, 0, _chunk_vspace->committed_size()); + memset(_region_data, 0, _region_vspace->committed_size()); } -void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) { - assert(beg_chunk <= _chunk_count, "beg_chunk out of range"); - assert(end_chunk <= _chunk_count, "end_chunk out of range"); - assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize"); +void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) { + assert(beg_region <= _region_count, "beg_region out of range"); + assert(end_region <= _region_count, "end_region out of range"); - const size_t chunk_cnt = end_chunk - beg_chunk; - - if (_block_data) { - const size_t blocks_per_chunk = ChunkSize / BlockSize; - const size_t beg_block = beg_chunk * blocks_per_chunk; - const size_t block_cnt = chunk_cnt * blocks_per_chunk; - memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData)); - } - memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData)); + const size_t region_cnt = end_region - beg_region; + memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData)); } -HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const +HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const { - const ChunkData* cur_cp = chunk(chunk_idx); - const ChunkData* const end_cp = chunk(chunk_count() - 1); + const RegionData* cur_cp = region(region_idx); + const RegionData* const end_cp = region(region_count() - 1); - HeapWord* result = chunk_to_addr(chunk_idx); + HeapWord* result = region_to_addr(region_idx); if (cur_cp < end_cp) { do { result += cur_cp->partial_obj_size(); - } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp); + } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp); } return result; } @@ -495,195 +432,265 @@ void ParallelCompactData::add_obj(HeapWord* addr, size_t len) { const size_t obj_ofs = pointer_delta(addr, _region_start); - const size_t beg_chunk = obj_ofs >> Log2ChunkSize; - const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize; + const size_t beg_region = obj_ofs >> Log2RegionSize; + const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize; DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);) DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);) - if (beg_chunk == end_chunk) { - // All in one chunk. - _chunk_data[beg_chunk].add_live_obj(len); + if (beg_region == end_region) { + // All in one region. + _region_data[beg_region].add_live_obj(len); return; } - // First chunk. - const size_t beg_ofs = chunk_offset(addr); - _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs); + // First region. + const size_t beg_ofs = region_offset(addr); + _region_data[beg_region].add_live_obj(RegionSize - beg_ofs); klassOop klass = ((oop)addr)->klass(); - // Middle chunks--completely spanned by this object. - for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) { - _chunk_data[chunk].set_partial_obj_size(ChunkSize); - _chunk_data[chunk].set_partial_obj_addr(addr); + // Middle regions--completely spanned by this object. + for (size_t region = beg_region + 1; region < end_region; ++region) { + _region_data[region].set_partial_obj_size(RegionSize); + _region_data[region].set_partial_obj_addr(addr); } - // Last chunk. - const size_t end_ofs = chunk_offset(addr + len - 1); - _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1); - _chunk_data[end_chunk].set_partial_obj_addr(addr); + // Last region. + const size_t end_ofs = region_offset(addr + len - 1); + _region_data[end_region].set_partial_obj_size(end_ofs + 1); + _region_data[end_region].set_partial_obj_addr(addr); } void ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) { - assert(chunk_offset(beg) == 0, "not ChunkSize aligned"); - assert(chunk_offset(end) == 0, "not ChunkSize aligned"); + assert(region_offset(beg) == 0, "not RegionSize aligned"); + assert(region_offset(end) == 0, "not RegionSize aligned"); - size_t cur_chunk = addr_to_chunk_idx(beg); - const size_t end_chunk = addr_to_chunk_idx(end); + size_t cur_region = addr_to_region_idx(beg); + const size_t end_region = addr_to_region_idx(end); HeapWord* addr = beg; - while (cur_chunk < end_chunk) { - _chunk_data[cur_chunk].set_destination(addr); - _chunk_data[cur_chunk].set_destination_count(0); - _chunk_data[cur_chunk].set_source_chunk(cur_chunk); - _chunk_data[cur_chunk].set_data_location(addr); - - // Update live_obj_size so the chunk appears completely full. - size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size(); - _chunk_data[cur_chunk].set_live_obj_size(live_size); - - ++cur_chunk; - addr += ChunkSize; + while (cur_region < end_region) { + _region_data[cur_region].set_destination(addr); + _region_data[cur_region].set_destination_count(0); + _region_data[cur_region].set_source_region(cur_region); + _region_data[cur_region].set_data_location(addr); + + // Update live_obj_size so the region appears completely full. + size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size(); + _region_data[cur_region].set_live_obj_size(live_size); + + ++cur_region; + addr += RegionSize; } } -bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end, - HeapWord* source_beg, HeapWord* source_end, - HeapWord** target_next, - HeapWord** source_next) { - // This is too strict. - // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned"); +// Find the point at which a space can be split and, if necessary, record the +// split point. +// +// If the current src region (which overflowed the destination space) doesn't +// have a partial object, the split point is at the beginning of the current src +// region (an "easy" split, no extra bookkeeping required). +// +// If the current src region has a partial object, the split point is in the +// region where that partial object starts (call it the split_region). If +// split_region has a partial object, then the split point is just after that +// partial object (a "hard" split where we have to record the split data and +// zero the partial_obj_size field). With a "hard" split, we know that the +// partial_obj ends within split_region because the partial object that caused +// the overflow starts in split_region. If split_region doesn't have a partial +// obj, then the split is at the beginning of split_region (another "easy" +// split). +HeapWord* +ParallelCompactData::summarize_split_space(size_t src_region, + SplitInfo& split_info, + HeapWord* destination, + HeapWord* target_end, + HeapWord** target_next) +{ + assert(destination <= target_end, "sanity"); + assert(destination + _region_data[src_region].data_size() > target_end, + "region should not fit into target space"); + + size_t split_region = src_region; + HeapWord* split_destination = destination; + size_t partial_obj_size = _region_data[src_region].partial_obj_size(); + + if (destination + partial_obj_size > target_end) { + // The split point is just after the partial object (if any) in the + // src_region that contains the start of the object that overflowed the + // destination space. + // + // Find the start of the "overflow" object and set split_region to the + // region containing it. + HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr(); + split_region = addr_to_region_idx(overflow_obj); + + // Clear the source_region field of all destination regions whose first word + // came from data after the split point (a non-null source_region field + // implies a region must be filled). + // + // An alternative to the simple loop below: clear during post_compact(), + // which uses memcpy instead of individual stores, and is easy to + // parallelize. (The downside is that it clears the entire RegionData + // object as opposed to just one field.) + // + // post_compact() would have to clear the summary data up to the highest + // address that was written during the summary phase, which would be + // + // max(top, max(new_top, clear_top)) + // + // where clear_top is a new field in SpaceInfo. Would have to set clear_top + // to destination + partial_obj_size, where both have the values passed to + // this routine. + const RegionData* const sr = region(split_region); + const size_t beg_idx = + addr_to_region_idx(region_align_up(sr->destination() + + sr->partial_obj_size())); + const size_t end_idx = + addr_to_region_idx(region_align_up(destination + partial_obj_size)); + + if (TraceParallelOldGCSummaryPhase) { + gclog_or_tty->print_cr("split: clearing source_region field in [" + SIZE_FORMAT ", " SIZE_FORMAT ")", + beg_idx, end_idx); + } + for (size_t idx = beg_idx; idx < end_idx; ++idx) { + _region_data[idx].set_source_region(0); + } + + // Set split_destination and partial_obj_size to reflect the split region. + split_destination = sr->destination(); + partial_obj_size = sr->partial_obj_size(); + } + + // The split is recorded only if a partial object extends onto the region. + if (partial_obj_size != 0) { + _region_data[split_region].set_partial_obj_size(0); + split_info.record(split_region, partial_obj_size, split_destination); + } + + // Setup the continuation addresses. + *target_next = split_destination + partial_obj_size; + HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size; if (TraceParallelOldGCSummaryPhase) { - tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " " - "sb=" PTR_FORMAT " se=" PTR_FORMAT " " - "tn=" PTR_FORMAT " sn=" PTR_FORMAT, - target_beg, target_end, - source_beg, source_end, - target_next != 0 ? *target_next : (HeapWord*) 0, - source_next != 0 ? *source_next : (HeapWord*) 0); + const char * split_type = partial_obj_size == 0 ? "easy" : "hard"; + gclog_or_tty->print_cr("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT + " pos=" SIZE_FORMAT, + split_type, source_next, split_region, + partial_obj_size); + gclog_or_tty->print_cr("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT + " tn=" PTR_FORMAT, + split_type, split_destination, + addr_to_region_idx(split_destination), + *target_next); + + if (partial_obj_size != 0) { + HeapWord* const po_beg = split_info.destination(); + HeapWord* const po_end = po_beg + split_info.partial_obj_size(); + gclog_or_tty->print_cr("%s split: " + "po_beg=" PTR_FORMAT " " SIZE_FORMAT " " + "po_end=" PTR_FORMAT " " SIZE_FORMAT, + split_type, + po_beg, addr_to_region_idx(po_beg), + po_end, addr_to_region_idx(po_end)); + } } - size_t cur_chunk = addr_to_chunk_idx(source_beg); - const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end)); + return source_next; +} + +bool ParallelCompactData::summarize(SplitInfo& split_info, + HeapWord* source_beg, HeapWord* source_end, + HeapWord** source_next, + HeapWord* target_beg, HeapWord* target_end, + HeapWord** target_next) +{ + if (TraceParallelOldGCSummaryPhase) { + HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next; + tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT + "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT, + source_beg, source_end, source_next_val, + target_beg, target_end, *target_next); + } + + size_t cur_region = addr_to_region_idx(source_beg); + const size_t end_region = addr_to_region_idx(region_align_up(source_end)); HeapWord *dest_addr = target_beg; - while (cur_chunk < end_chunk) { - size_t words = _chunk_data[cur_chunk].data_size(); + while (cur_region < end_region) { + // The destination must be set even if the region has no data. + _region_data[cur_region].set_destination(dest_addr); -#if 1 - assert(pointer_delta(target_end, dest_addr) >= words, - "source region does not fit into target region"); -#else - // XXX - need some work on the corner cases here. If the chunk does not - // fit, then must either make sure any partial_obj from the chunk fits, or - // 'undo' the initial part of the partial_obj that is in the previous chunk. - if (dest_addr + words >= target_end) { - // Let the caller know where to continue. - *target_next = dest_addr; - *source_next = chunk_to_addr(cur_chunk); - return false; - } -#endif // #if 1 - - _chunk_data[cur_chunk].set_destination(dest_addr); - - // Set the destination_count for cur_chunk, and if necessary, update - // source_chunk for a destination chunk. The source_chunk field is updated - // if cur_chunk is the first (left-most) chunk to be copied to a destination - // chunk. - // - // The destination_count calculation is a bit subtle. A chunk that has data - // that compacts into itself does not count itself as a destination. This - // maintains the invariant that a zero count means the chunk is available - // and can be claimed and then filled. + size_t words = _region_data[cur_region].data_size(); if (words > 0) { - HeapWord* const last_addr = dest_addr + words - 1; - const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr); - const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr); -#if 0 - // Initially assume that the destination chunks will be the same and - // adjust the value below if necessary. Under this assumption, if - // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely - // into itself. - uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1; - if (dest_chunk_1 != dest_chunk_2) { - // Destination chunks differ; adjust destination_count. - destination_count += 1; - // Data from cur_chunk will be copied to the start of dest_chunk_2. - _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); - } else if (chunk_offset(dest_addr) == 0) { - // Data from cur_chunk will be copied to the start of the destination - // chunk. - _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); + // If cur_region does not fit entirely into the target space, find a point + // at which the source space can be 'split' so that part is copied to the + // target space and the rest is copied elsewhere. + if (dest_addr + words > target_end) { + assert(source_next != NULL, "source_next is NULL when splitting"); + *source_next = summarize_split_space(cur_region, split_info, dest_addr, + target_end, target_next); + return false; } -#else - // Initially assume that the destination chunks will be different and + + // Compute the destination_count for cur_region, and if necessary, update + // source_region for a destination region. The source_region field is + // updated if cur_region is the first (left-most) region to be copied to a + // destination region. + // + // The destination_count calculation is a bit subtle. A region that has + // data that compacts into itself does not count itself as a destination. + // This maintains the invariant that a zero count means the region is + // available and can be claimed and then filled. + uint destination_count = 0; + if (split_info.is_split(cur_region)) { + // The current region has been split: the partial object will be copied + // to one destination space and the remaining data will be copied to + // another destination space. Adjust the initial destination_count and, + // if necessary, set the source_region field if the partial object will + // cross a destination region boundary. + destination_count = split_info.destination_count(); + if (destination_count == 2) { + size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr()); + _region_data[dest_idx].set_source_region(cur_region); + } + } + + HeapWord* const last_addr = dest_addr + words - 1; + const size_t dest_region_1 = addr_to_region_idx(dest_addr); + const size_t dest_region_2 = addr_to_region_idx(last_addr); + + // Initially assume that the destination regions will be the same and // adjust the value below if necessary. Under this assumption, if - // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially - // into dest_chunk_1 and partially into itself. - uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2; - if (dest_chunk_1 != dest_chunk_2) { - // Data from cur_chunk will be copied to the start of dest_chunk_2. - _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); - } else { - // Destination chunks are the same; adjust destination_count. - destination_count -= 1; - if (chunk_offset(dest_addr) == 0) { - // Data from cur_chunk will be copied to the start of the destination - // chunk. - _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); - } + // cur_region == dest_region_2, then cur_region will be compacted + // completely into itself. + destination_count += cur_region == dest_region_2 ? 0 : 1; + if (dest_region_1 != dest_region_2) { + // Destination regions differ; adjust destination_count. + destination_count += 1; + // Data from cur_region will be copied to the start of dest_region_2. + _region_data[dest_region_2].set_source_region(cur_region); + } else if (region_offset(dest_addr) == 0) { + // Data from cur_region will be copied to the start of the destination + // region. + _region_data[dest_region_1].set_source_region(cur_region); } -#endif // #if 0 - _chunk_data[cur_chunk].set_destination_count(destination_count); - _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk)); + _region_data[cur_region].set_destination_count(destination_count); + _region_data[cur_region].set_data_location(region_to_addr(cur_region)); dest_addr += words; } - ++cur_chunk; + ++cur_region; } *target_next = dest_addr; return true; } -bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) { - HeapWord* block_addr = block_to_addr(block_index); - HeapWord* block_end_addr = block_addr + BlockSize; - size_t chunk_index = addr_to_chunk_idx(block_addr); - HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index); - - // An object that ends at the end of the block, ends - // in the block (the last word of the object is to - // the left of the end). - if ((block_addr < partial_obj_end_addr) && - (partial_obj_end_addr <= block_end_addr)) { - return true; - } - - return false; -} - HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { - HeapWord* result = NULL; - if (UseParallelOldGCChunkPointerCalc) { - result = chunk_calc_new_pointer(addr); - } else { - result = block_calc_new_pointer(addr); - } - return result; -} - -// This method is overly complicated (expensive) to be called -// for every reference. -// Try to restructure this so that a NULL is returned if -// the object is dead. But don't wast the cycles to explicitly check -// that it is dead since only live objects should be passed in. - -HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) { assert(addr != NULL, "Should detect NULL oop earlier"); assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); #ifdef ASSERT @@ -693,80 +700,36 @@ #endif assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); - // Chunk covering the object. - size_t chunk_index = addr_to_chunk_idx(addr); - const ChunkData* const chunk_ptr = chunk(chunk_index); - HeapWord* const chunk_addr = chunk_align_down(addr); - - assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); - assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); - - HeapWord* result = chunk_ptr->destination(); - - // If all the data in the chunk is live, then the new location of the object - // can be calculated from the destination of the chunk plus the offset of the - // object in the chunk. - if (chunk_ptr->data_size() == ChunkSize) { - result += pointer_delta(addr, chunk_addr); + // Region covering the object. + size_t region_index = addr_to_region_idx(addr); + const RegionData* const region_ptr = region(region_index); + HeapWord* const region_addr = region_align_down(addr); + + assert(addr < region_addr + RegionSize, "Region does not cover object"); + assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check"); + + HeapWord* result = region_ptr->destination(); + + // If all the data in the region is live, then the new location of the object + // can be calculated from the destination of the region plus the offset of the + // object in the region. + if (region_ptr->data_size() == RegionSize) { + result += pointer_delta(addr, region_addr); return result; } // The new location of the object is - // chunk destination + - // size of the partial object extending onto the chunk + - // sizes of the live objects in the Chunk that are to the left of addr - const size_t partial_obj_size = chunk_ptr->partial_obj_size(); - HeapWord* const search_start = chunk_addr + partial_obj_size; + // region destination + + // size of the partial object extending onto the region + + // sizes of the live objects in the Region that are to the left of addr + const size_t partial_obj_size = region_ptr->partial_obj_size(); + HeapWord* const search_start = region_addr + partial_obj_size; const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); result += partial_obj_size + live_to_left; - assert(result <= addr, "object cannot move to the right"); - return result; -} - -HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) { - assert(addr != NULL, "Should detect NULL oop earlier"); - assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); -#ifdef ASSERT - if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { - gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); - } -#endif - assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); - - // Chunk covering the object. - size_t chunk_index = addr_to_chunk_idx(addr); - const ChunkData* const chunk_ptr = chunk(chunk_index); - HeapWord* const chunk_addr = chunk_align_down(addr); - - assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); - assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); - - HeapWord* result = chunk_ptr->destination(); - - // If all the data in the chunk is live, then the new location of the object - // can be calculated from the destination of the chunk plus the offset of the - // object in the chunk. - if (chunk_ptr->data_size() == ChunkSize) { - result += pointer_delta(addr, chunk_addr); - return result; - } - - // The new location of the object is - // chunk destination + - // block offset + - // sizes of the live objects in the Block that are to the left of addr - const size_t block_offset = addr_to_block_ptr(addr)->offset(); - HeapWord* const search_start = chunk_addr + block_offset; - - const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); - size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); - - result += block_offset + live_to_left; - assert(result <= addr, "object cannot move to the right"); - assert(result == chunk_calc_new_pointer(addr), "Should match"); + DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);) return result; } @@ -793,15 +756,14 @@ void ParallelCompactData::verify_clear() { - verify_clear(_chunk_vspace); - verify_clear(_block_vspace); + verify_clear(_region_vspace); } #endif // #ifdef ASSERT #ifdef NOT_PRODUCT -ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) { +ParallelCompactData::RegionData* debug_region(size_t region_index) { ParallelCompactData& sd = PSParallelCompact::summary_data(); - return sd.chunk(chunk_index); + return sd.region(region_index); } #endif @@ -814,46 +776,23 @@ ParallelCompactData PSParallelCompact::_summary_data; PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; + +void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); } +bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } + +void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } +void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } + PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); -void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { -#ifdef VALIDATE_MARK_SWEEP - if (ValidateMarkSweep) { - if (!Universe::heap()->is_in_reserved(p)) { - _root_refs_stack->push(p); - } else { - _other_refs_stack->push(p); - } - } -#endif - mark_and_push(_compaction_manager, p); -} - -void PSParallelCompact::mark_and_follow(ParCompactionManager* cm, - oop* p) { - assert(Universe::heap()->is_in_reserved(p), - "we should only be traversing objects here"); - oop m = *p; - if (m != NULL && mark_bitmap()->is_unmarked(m)) { - if (mark_obj(m)) { - m->follow_contents(cm); // Follow contents of the marked object - } - } -} +void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } +void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } -// Anything associated with this variable is temporary. +void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); } -void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm, - oop* p) { - // Push marked object, contents will be followed later - oop m = *p; - if (mark_obj(m)) { - // This thread marked the object and - // owns the subsequent processing of it. - cm->save_for_scanning(m); - } -} +void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); } +void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } void PSParallelCompact::post_initialize() { ParallelScavengeHeap* heap = gc_heap(); @@ -977,10 +916,17 @@ const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top)); _mark_bitmap.clear_range(beg_bit, end_bit); - const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot); - const size_t end_chunk = - _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top)); - _summary_data.clear_range(beg_chunk, end_chunk); + const size_t beg_region = _summary_data.addr_to_region_idx(bot); + const size_t end_region = + _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top)); + _summary_data.clear_range(beg_region, end_region); + + // Clear the data used to 'split' regions. + SplitInfo& split_info = _space_info[id].split_info(); + if (split_info.is_valid()) { + split_info.clear(); + } + DEBUG_ONLY(split_info.verify_clear();) } void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) @@ -1002,7 +948,7 @@ DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) // Increment the invocation count - heap->increment_total_collections(); + heap->increment_total_collections(true); // We need to track unique mark sweep invocations as well. _total_invocations++; @@ -1039,10 +985,11 @@ { TraceTime tm("post compact", print_phases(), true, gclog_or_tty); - // Clear the marking bitmap and summary data and update top() in each space. for (unsigned int id = perm_space_id; id < last_space_id; ++id) { + // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); - _space_info[id].space()->set_top(_space_info[id].new_top()); + // Update top(). Must be done after clearing the bitmap and summary data. + _space_info[id].publish_new_top(); } MutableSpace* const eden_space = _space_info[eden_space_id].space(); @@ -1084,6 +1031,10 @@ ref_processor()->enqueue_discovered_references(NULL); + if (ZapUnusedHeapArea) { + heap->gen_mangle_unused_area(); + } + // Update time of last GC reset_millis_since_last_gc(); } @@ -1092,19 +1043,19 @@ PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id, bool maximum_compaction) { - const size_t chunk_size = ParallelCompactData::ChunkSize; + const size_t region_size = ParallelCompactData::RegionSize; const ParallelCompactData& sd = summary_data(); const MutableSpace* const space = _space_info[id].space(); - HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); - const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom()); - const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up); + HeapWord* const top_aligned_up = sd.region_align_up(space->top()); + const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom()); + const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up); - // Skip full chunks at the beginning of the space--they are necessarily part + // Skip full regions at the beginning of the space--they are necessarily part // of the dense prefix. size_t full_count = 0; - const ChunkData* cp; - for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) { + const RegionData* cp; + for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) { ++full_count; } @@ -1113,7 +1064,7 @@ const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval; if (maximum_compaction || cp == end_cp || interval_ended) { _maximum_compaction_gc_num = total_invocations(); - return sd.chunk_to_addr(cp); + return sd.region_to_addr(cp); } HeapWord* const new_top = _space_info[id].new_top(); @@ -1136,52 +1087,53 @@ } // XXX - Use binary search? - HeapWord* dense_prefix = sd.chunk_to_addr(cp); - const ChunkData* full_cp = cp; - const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1); + HeapWord* dense_prefix = sd.region_to_addr(cp); + const RegionData* full_cp = cp; + const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1); while (cp < end_cp) { - HeapWord* chunk_destination = cp->destination(); - const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination); + HeapWord* region_destination = cp->destination(); + const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination); if (TraceParallelOldGCDensePrefix && Verbose) { - tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " " - "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"), - sd.chunk(cp), chunk_destination, - dense_prefix, cur_deadwood); + tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " " + "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8), + sd.region(cp), region_destination, + dense_prefix, cur_deadwood); } if (cur_deadwood >= deadwood_goal) { - // Found the chunk that has the correct amount of deadwood to the left. - // This typically occurs after crossing a fairly sparse set of chunks, so - // iterate backwards over those sparse chunks, looking for the chunk that - // has the lowest density of live objects 'to the right.' - size_t space_to_left = sd.chunk(cp) * chunk_size; + // Found the region that has the correct amount of deadwood to the left. + // This typically occurs after crossing a fairly sparse set of regions, so + // iterate backwards over those sparse regions, looking for the region + // that has the lowest density of live objects 'to the right.' + size_t space_to_left = sd.region(cp) * region_size; size_t live_to_left = space_to_left - cur_deadwood; size_t space_to_right = space_capacity - space_to_left; size_t live_to_right = space_live - live_to_left; double density_to_right = double(live_to_right) / space_to_right; while (cp > full_cp) { - --cp; - const size_t prev_chunk_live_to_right = live_to_right - cp->data_size(); - const size_t prev_chunk_space_to_right = space_to_right + chunk_size; - double prev_chunk_density_to_right = - double(prev_chunk_live_to_right) / prev_chunk_space_to_right; - if (density_to_right <= prev_chunk_density_to_right) { - return dense_prefix; - } - if (TraceParallelOldGCDensePrefix && Verbose) { - tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f " - "pc_d2r=%10.8f", sd.chunk(cp), density_to_right, - prev_chunk_density_to_right); - } - dense_prefix -= chunk_size; - live_to_right = prev_chunk_live_to_right; - space_to_right = prev_chunk_space_to_right; - density_to_right = prev_chunk_density_to_right; + --cp; + const size_t prev_region_live_to_right = live_to_right - + cp->data_size(); + const size_t prev_region_space_to_right = space_to_right + region_size; + double prev_region_density_to_right = + double(prev_region_live_to_right) / prev_region_space_to_right; + if (density_to_right <= prev_region_density_to_right) { + return dense_prefix; + } + if (TraceParallelOldGCDensePrefix && Verbose) { + tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f " + "pc_d2r=%10.8f", sd.region(cp), density_to_right, + prev_region_density_to_right); + } + dense_prefix -= region_size; + live_to_right = prev_region_live_to_right; + space_to_right = prev_region_space_to_right; + density_to_right = prev_region_density_to_right; } return dense_prefix; } - dense_prefix += chunk_size; + dense_prefix += region_size; ++cp; } @@ -1194,8 +1146,8 @@ const bool maximum_compaction, HeapWord* const addr) { - const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr); - ChunkData* const cp = summary_data().chunk(chunk_idx); + const size_t region_idx = summary_data().addr_to_region_idx(addr); + RegionData* const cp = summary_data().region(region_idx); const MutableSpace* const space = _space_info[id].space(); HeapWord* const new_top = _space_info[id].new_top(); @@ -1206,16 +1158,16 @@ const size_t live_to_right = new_top - cp->destination(); const size_t dead_to_right = space->top() - addr - live_to_right; - tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " " - "spl=" SIZE_FORMAT " " - "d2l=" SIZE_FORMAT " d2l%%=%6.4f " - "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT - " ratio=%10.8f", - algorithm, addr, chunk_idx, - space_live, - dead_to_left, dead_to_left_pct, - dead_to_right, live_to_right, - double(dead_to_right) / live_to_right); + tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " " + "spl=" SIZE_FORMAT " " + "d2l=" SIZE_FORMAT " d2l%%=%6.4f " + "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT + " ratio=%10.8f", + algorithm, addr, region_idx, + space_live, + dead_to_left, dead_to_left_pct, + dead_to_right, live_to_right, + double(dead_to_right) / live_to_right); } #endif // #ifndef PRODUCT @@ -1273,52 +1225,52 @@ return MAX2(limit, 0.0); } -ParallelCompactData::ChunkData* -PSParallelCompact::first_dead_space_chunk(const ChunkData* beg, - const ChunkData* end) +ParallelCompactData::RegionData* +PSParallelCompact::first_dead_space_region(const RegionData* beg, + const RegionData* end) { - const size_t chunk_size = ParallelCompactData::ChunkSize; + const size_t region_size = ParallelCompactData::RegionSize; ParallelCompactData& sd = summary_data(); - size_t left = sd.chunk(beg); - size_t right = end > beg ? sd.chunk(end) - 1 : left; + size_t left = sd.region(beg); + size_t right = end > beg ? sd.region(end) - 1 : left; // Binary search. while (left < right) { // Equivalent to (left + right) / 2, but does not overflow. const size_t middle = left + (right - left) / 2; - ChunkData* const middle_ptr = sd.chunk(middle); + RegionData* const middle_ptr = sd.region(middle); HeapWord* const dest = middle_ptr->destination(); - HeapWord* const addr = sd.chunk_to_addr(middle); + HeapWord* const addr = sd.region_to_addr(middle); assert(dest != NULL, "sanity"); assert(dest <= addr, "must move left"); if (middle > left && dest < addr) { right = middle - 1; - } else if (middle < right && middle_ptr->data_size() == chunk_size) { + } else if (middle < right && middle_ptr->data_size() == region_size) { left = middle + 1; } else { return middle_ptr; } } - return sd.chunk(left); + return sd.region(left); } -ParallelCompactData::ChunkData* -PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg, - const ChunkData* end, - size_t dead_words) +ParallelCompactData::RegionData* +PSParallelCompact::dead_wood_limit_region(const RegionData* beg, + const RegionData* end, + size_t dead_words) { ParallelCompactData& sd = summary_data(); - size_t left = sd.chunk(beg); - size_t right = end > beg ? sd.chunk(end) - 1 : left; + size_t left = sd.region(beg); + size_t right = end > beg ? sd.region(end) - 1 : left; // Binary search. while (left < right) { // Equivalent to (left + right) / 2, but does not overflow. const size_t middle = left + (right - left) / 2; - ChunkData* const middle_ptr = sd.chunk(middle); + RegionData* const middle_ptr = sd.region(middle); HeapWord* const dest = middle_ptr->destination(); - HeapWord* const addr = sd.chunk_to_addr(middle); + HeapWord* const addr = sd.region_to_addr(middle); assert(dest != NULL, "sanity"); assert(dest <= addr, "must move left"); @@ -1331,16 +1283,16 @@ return middle_ptr; } } - return sd.chunk(left); + return sd.region(left); } // The result is valid during the summary phase, after the initial summarization // of each space into itself, and before final summarization. inline double -PSParallelCompact::reclaimed_ratio(const ChunkData* const cp, - HeapWord* const bottom, - HeapWord* const top, - HeapWord* const new_top) +PSParallelCompact::reclaimed_ratio(const RegionData* const cp, + HeapWord* const bottom, + HeapWord* const top, + HeapWord* const new_top) { ParallelCompactData& sd = summary_data(); @@ -1351,12 +1303,13 @@ assert(top >= new_top, "summary data problem?"); assert(new_top > bottom, "space is empty; should not be here"); assert(new_top >= cp->destination(), "sanity"); - assert(top >= sd.chunk_to_addr(cp), "sanity"); + assert(top >= sd.region_to_addr(cp), "sanity"); HeapWord* const destination = cp->destination(); const size_t dense_prefix_live = pointer_delta(destination, bottom); const size_t compacted_region_live = pointer_delta(new_top, destination); - const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp)); + const size_t compacted_region_used = pointer_delta(top, + sd.region_to_addr(cp)); const size_t reclaimable = compacted_region_used - compacted_region_live; const double divisor = dense_prefix_live + 1.25 * compacted_region_live; @@ -1364,39 +1317,47 @@ } // Return the address of the end of the dense prefix, a.k.a. the start of the -// compacted region. The address is always on a chunk boundary. -// -// Completely full chunks at the left are skipped, since no compaction can occur -// in those chunks. Then the maximum amount of dead wood to allow is computed, -// based on the density (amount live / capacity) of the generation; the chunk -// with approximately that amount of dead space to the left is identified as the -// limit chunk. Chunks between the last completely full chunk and the limit -// chunk are scanned and the one that has the best (maximum) reclaimed_ratio() -// is selected. +// compacted region. The address is always on a region boundary. +// +// Completely full regions at the left are skipped, since no compaction can +// occur in those regions. Then the maximum amount of dead wood to allow is +// computed, based on the density (amount live / capacity) of the generation; +// the region with approximately that amount of dead space to the left is +// identified as the limit region. Regions between the last completely full +// region and the limit region are scanned and the one that has the best +// (maximum) reclaimed_ratio() is selected. HeapWord* PSParallelCompact::compute_dense_prefix(const SpaceId id, bool maximum_compaction) { - const size_t chunk_size = ParallelCompactData::ChunkSize; + if (ParallelOldGCSplitALot) { + if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) { + // The value was chosen to provoke splitting a young gen space; use it. + return _space_info[id].dense_prefix(); + } + } + + const size_t region_size = ParallelCompactData::RegionSize; const ParallelCompactData& sd = summary_data(); const MutableSpace* const space = _space_info[id].space(); HeapWord* const top = space->top(); - HeapWord* const top_aligned_up = sd.chunk_align_up(top); + HeapWord* const top_aligned_up = sd.region_align_up(top); HeapWord* const new_top = _space_info[id].new_top(); - HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top); + HeapWord* const new_top_aligned_up = sd.region_align_up(new_top); HeapWord* const bottom = space->bottom(); - const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom); - const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); - const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up); + const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom); + const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up); + const RegionData* const new_top_cp = + sd.addr_to_region_ptr(new_top_aligned_up); - // Skip full chunks at the beginning of the space--they are necessarily part + // Skip full regions at the beginning of the space--they are necessarily part // of the dense prefix. - const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp); - assert(full_cp->destination() == sd.chunk_to_addr(full_cp) || - space->is_empty(), "no dead space allowed to the left"); - assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1, - "chunk must have dead space"); + const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp); + assert(full_cp->destination() == sd.region_to_addr(full_cp) || + space->is_empty(), "no dead space allowed to the left"); + assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1, + "region must have dead space"); // The gc number is saved whenever a maximum compaction is done, and used to // determine when the maximum compaction interval has expired. This avoids @@ -1407,7 +1368,7 @@ total_invocations() == HeapFirstMaximumCompactionCount; if (maximum_compaction || full_cp == top_cp || interval_ended) { _maximum_compaction_gc_num = total_invocations(); - return sd.chunk_to_addr(full_cp); + return sd.region_to_addr(full_cp); } const size_t space_live = pointer_delta(new_top, bottom); @@ -1433,15 +1394,15 @@ dead_wood_max, dead_wood_limit); } - // Locate the chunk with the desired amount of dead space to the left. - const ChunkData* const limit_cp = - dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit); + // Locate the region with the desired amount of dead space to the left. + const RegionData* const limit_cp = + dead_wood_limit_region(full_cp, top_cp, dead_wood_limit); - // Scan from the first chunk with dead space to the limit chunk and find the + // Scan from the first region with dead space to the limit region and find the // one with the best (largest) reclaimed ratio. double best_ratio = 0.0; - const ChunkData* best_cp = full_cp; - for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) { + const RegionData* best_cp = full_cp; + for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) { double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top); if (tmp_ratio > best_ratio) { best_cp = cp; @@ -1449,39 +1410,244 @@ } } -#if 0 - // Something to consider: if the chunk with the best ratio is 'close to' the - // first chunk w/free space, choose the first chunk with free space - // ("first-free"). The first-free chunk is usually near the start of the +#if 0 + // Something to consider: if the region with the best ratio is 'close to' the + // first region w/free space, choose the first region with free space + // ("first-free"). The first-free region is usually near the start of the // heap, which means we are copying most of the heap already, so copy a bit // more to get complete compaction. - if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) { + if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) { _maximum_compaction_gc_num = total_invocations(); best_cp = full_cp; } #endif // #if 0 - return sd.chunk_to_addr(best_cp); + return sd.region_to_addr(best_cp); +} + +#ifndef PRODUCT +void +PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start, + size_t words) +{ + if (TraceParallelOldGCSummaryPhase) { + tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") " + SIZE_FORMAT, start, start + words, words); + } + + ObjectStartArray* const start_array = _space_info[id].start_array(); + CollectedHeap::fill_with_objects(start, words); + for (HeapWord* p = start; p < start + words; p += oop(p)->size()) { + _mark_bitmap.mark_obj(p, words); + _summary_data.add_obj(p, words); + start_array->allocate_block(p); + } +} + +void +PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start) +{ + ParallelCompactData& sd = summary_data(); + MutableSpace* space = _space_info[id].space(); + + // Find the source and destination start addresses. + HeapWord* const src_addr = sd.region_align_down(start); + HeapWord* dst_addr; + if (src_addr < start) { + dst_addr = sd.addr_to_region_ptr(src_addr)->destination(); + } else if (src_addr > space->bottom()) { + // The start (the original top() value) is aligned to a region boundary so + // the associated region does not have a destination. Compute the + // destination from the previous region. + RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1; + dst_addr = cp->destination() + cp->data_size(); + } else { + // Filling the entire space. + dst_addr = space->bottom(); + } + assert(dst_addr != NULL, "sanity"); + + // Update the summary data. + bool result = _summary_data.summarize(_space_info[id].split_info(), + src_addr, space->top(), NULL, + dst_addr, space->end(), + _space_info[id].new_top_addr()); + assert(result, "should not fail: bad filler object size"); +} + +void +PSParallelCompact::provoke_split_fill_survivor(SpaceId id) +{ + if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) { + return; + } + + MutableSpace* const space = _space_info[id].space(); + if (space->is_empty()) { + HeapWord* b = space->bottom(); + HeapWord* t = b + space->capacity_in_words() / 2; + space->set_top(t); + if (ZapUnusedHeapArea) { + space->set_top_for_allocations(); + } + + size_t obj_len = 8; + while (b + obj_len <= t) { + CollectedHeap::fill_with_object(b, obj_len); + mark_bitmap()->mark_obj(b, obj_len); + summary_data().add_obj(b, obj_len); + b += obj_len; + obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ... + } + if (b < t) { + // The loop didn't completely fill to t (top); adjust top downward. + space->set_top(b); + if (ZapUnusedHeapArea) { + space->set_top_for_allocations(); + } + } + + HeapWord** nta = _space_info[id].new_top_addr(); + bool result = summary_data().summarize(_space_info[id].split_info(), + space->bottom(), space->top(), NULL, + space->bottom(), space->end(), nta); + assert(result, "space must fit into itself"); + } +} + +void +PSParallelCompact::provoke_split(bool & max_compaction) +{ + if (total_invocations() % ParallelOldGCSplitInterval != 0) { + return; + } + + const size_t region_size = ParallelCompactData::RegionSize; + ParallelCompactData& sd = summary_data(); + + MutableSpace* const eden_space = _space_info[eden_space_id].space(); + MutableSpace* const from_space = _space_info[from_space_id].space(); + const size_t eden_live = pointer_delta(eden_space->top(), + _space_info[eden_space_id].new_top()); + const size_t from_live = pointer_delta(from_space->top(), + _space_info[from_space_id].new_top()); + + const size_t min_fill_size = CollectedHeap::min_fill_size(); + const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top()); + const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0; + const size_t from_free = pointer_delta(from_space->end(), from_space->top()); + const size_t from_fillable = from_free >= min_fill_size ? from_free : 0; + + // Choose the space to split; need at least 2 regions live (or fillable). + SpaceId id; + MutableSpace* space; + size_t live_words; + size_t fill_words; + if (eden_live + eden_fillable >= region_size * 2) { + id = eden_space_id; + space = eden_space; + live_words = eden_live; + fill_words = eden_fillable; + } else if (from_live + from_fillable >= region_size * 2) { + id = from_space_id; + space = from_space; + live_words = from_live; + fill_words = from_fillable; + } else { + return; // Give up. + } + assert(fill_words == 0 || fill_words >= min_fill_size, "sanity"); + + if (live_words < region_size * 2) { + // Fill from top() to end() w/live objects of mixed sizes. + HeapWord* const fill_start = space->top(); + live_words += fill_words; + + space->set_top(fill_start + fill_words); + if (ZapUnusedHeapArea) { + space->set_top_for_allocations(); + } + + HeapWord* cur_addr = fill_start; + while (fill_words > 0) { + const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size; + size_t cur_size = MIN2(align_object_size_(r), fill_words); + if (fill_words - cur_size < min_fill_size) { + cur_size = fill_words; // Avoid leaving a fragment too small to fill. + } + + CollectedHeap::fill_with_object(cur_addr, cur_size); + mark_bitmap()->mark_obj(cur_addr, cur_size); + sd.add_obj(cur_addr, cur_size); + + cur_addr += cur_size; + fill_words -= cur_size; + } + + summarize_new_objects(id, fill_start); + } + + max_compaction = false; + + // Manipulate the old gen so that it has room for about half of the live data + // in the target young gen space (live_words / 2). + id = old_space_id; + space = _space_info[id].space(); + const size_t free_at_end = space->free_in_words(); + const size_t free_target = align_object_size(live_words / 2); + const size_t dead = pointer_delta(space->top(), _space_info[id].new_top()); + + if (free_at_end >= free_target + min_fill_size) { + // Fill space above top() and set the dense prefix so everything survives. + HeapWord* const fill_start = space->top(); + const size_t fill_size = free_at_end - free_target; + space->set_top(space->top() + fill_size); + if (ZapUnusedHeapArea) { + space->set_top_for_allocations(); + } + fill_with_live_objects(id, fill_start, fill_size); + summarize_new_objects(id, fill_start); + _space_info[id].set_dense_prefix(sd.region_align_down(space->top())); + } else if (dead + free_at_end > free_target) { + // Find a dense prefix that makes the right amount of space available. + HeapWord* cur = sd.region_align_down(space->top()); + HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination(); + size_t dead_to_right = pointer_delta(space->end(), cur_destination); + while (dead_to_right < free_target) { + cur -= region_size; + cur_destination = sd.addr_to_region_ptr(cur)->destination(); + dead_to_right = pointer_delta(space->end(), cur_destination); + } + _space_info[id].set_dense_prefix(cur); + } } +#endif // #ifndef PRODUCT void PSParallelCompact::summarize_spaces_quick() { for (unsigned int i = 0; i < last_space_id; ++i) { const MutableSpace* space = _space_info[i].space(); - bool result = _summary_data.summarize(space->bottom(), space->end(), - space->bottom(), space->top(), - _space_info[i].new_top_addr()); - assert(result, "should never fail"); + HeapWord** nta = _space_info[i].new_top_addr(); + bool result = _summary_data.summarize(_space_info[i].split_info(), + space->bottom(), space->top(), NULL, + space->bottom(), space->end(), nta); + assert(result, "space must fit into itself"); _space_info[i].set_dense_prefix(space->bottom()); } + +#ifndef PRODUCT + if (ParallelOldGCSplitALot) { + provoke_split_fill_survivor(to_space_id); + } +#endif // #ifndef PRODUCT } void PSParallelCompact::fill_dense_prefix_end(SpaceId id) { HeapWord* const dense_prefix_end = dense_prefix(id); - const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end); + const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end); const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end); - if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) { + if (dead_space_crosses_boundary(region, dense_prefix_bit)) { // Only enough dead space is filled so that any remaining dead space to the // left is larger than the minimum filler object. (The remainder is filled // during the copy/update phase.) @@ -1533,8 +1699,7 @@ } #endif // #ifdef _LP64 - MemRegion region(obj_beg, obj_len); - SharedHeap::fill_region_with_object(region); + CollectedHeap::fill_with_object(obj_beg, obj_len); _mark_bitmap.mark_obj(obj_beg, obj_len); _summary_data.add_obj(obj_beg, obj_len); assert(start_array(id) != NULL, "sanity"); @@ -1543,54 +1708,99 @@ } void +PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr) +{ + RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr); + HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr); + RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up); + for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) { + cur->set_source_region(0); + } +} + +void PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) { assert(id < last_space_id, "id out of range"); + assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() || + ParallelOldGCSplitALot && id == old_space_id, + "should have been reset in summarize_spaces_quick()"); const MutableSpace* space = _space_info[id].space(); - HeapWord** new_top_addr = _space_info[id].new_top_addr(); + if (_space_info[id].new_top() != space->bottom()) { + HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction); + _space_info[id].set_dense_prefix(dense_prefix_end); - HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction); - _space_info[id].set_dense_prefix(dense_prefix_end); - -#ifndef PRODUCT - if (TraceParallelOldGCDensePrefix) { - print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end); - HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction); - print_dense_prefix_stats("density", id, maximum_compaction, addr); - } -#endif // #ifndef PRODUCT - - // If dead space crosses the dense prefix boundary, it is (at least partially) - // filled with a dummy object, marked live and added to the summary data. - // This simplifies the copy/update phase and must be done before the final - // locations of objects are determined, to prevent leaving a fragment of dead - // space that is too small to fill with an object. - if (!maximum_compaction && dense_prefix_end != space->bottom()) { - fill_dense_prefix_end(id); +#ifndef PRODUCT + if (TraceParallelOldGCDensePrefix) { + print_dense_prefix_stats("ratio", id, maximum_compaction, + dense_prefix_end); + HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction); + print_dense_prefix_stats("density", id, maximum_compaction, addr); + } +#endif // #ifndef PRODUCT + + // Recompute the summary data, taking into account the dense prefix. If + // every last byte will be reclaimed, then the existing summary data which + // compacts everything can be left in place. + if (!maximum_compaction && dense_prefix_end != space->bottom()) { + // If dead space crosses the dense prefix boundary, it is (at least + // partially) filled with a dummy object, marked live and added to the + // summary data. This simplifies the copy/update phase and must be done + // before the final locations of objects are determined, to prevent + // leaving a fragment of dead space that is too small to fill. + fill_dense_prefix_end(id); + + // Compute the destination of each Region, and thus each object. + _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); + _summary_data.summarize(_space_info[id].split_info(), + dense_prefix_end, space->top(), NULL, + dense_prefix_end, space->end(), + _space_info[id].new_top_addr()); + } } - // Compute the destination of each Chunk, and thus each object. - _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); - _summary_data.summarize(dense_prefix_end, space->end(), - dense_prefix_end, space->top(), - new_top_addr); - if (TraceParallelOldGCSummaryPhase) { - const size_t chunk_size = ParallelCompactData::ChunkSize; - const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end); + const size_t region_size = ParallelCompactData::RegionSize; + HeapWord* const dense_prefix_end = _space_info[id].dense_prefix(); + const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end); const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom()); - const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr); + HeapWord* const new_top = _space_info[id].new_top(); + const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top); const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end); tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " " - "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " - "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, - id, space->capacity_in_words(), dense_prefix_end, - dp_chunk, dp_words / chunk_size, - cr_words / chunk_size, *new_top_addr); + "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " + "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, + id, space->capacity_in_words(), dense_prefix_end, + dp_region, dp_words / region_size, + cr_words / region_size, new_top); } } +#ifndef PRODUCT +void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id, + HeapWord* dst_beg, HeapWord* dst_end, + SpaceId src_space_id, + HeapWord* src_beg, HeapWord* src_end) +{ + if (TraceParallelOldGCSummaryPhase) { + tty->print_cr("summarizing %d [%s] into %d [%s]: " + "src=" PTR_FORMAT "-" PTR_FORMAT " " + SIZE_FORMAT "-" SIZE_FORMAT " " + "dst=" PTR_FORMAT "-" PTR_FORMAT " " + SIZE_FORMAT "-" SIZE_FORMAT, + src_space_id, space_names[src_space_id], + dst_space_id, space_names[dst_space_id], + src_beg, src_end, + _summary_data.addr_to_region_idx(src_beg), + _summary_data.addr_to_region_idx(src_end), + dst_beg, dst_end, + _summary_data.addr_to_region_idx(dst_beg), + _summary_data.addr_to_region_idx(dst_end)); + } +} +#endif // #ifndef PRODUCT + void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { @@ -1598,12 +1808,7 @@ TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); // trace("2"); -#ifdef ASSERT - if (VerifyParallelOldWithMarkSweep && - (PSParallelCompact::total_invocations() % - VerifyParallelOldWithMarkSweepInterval) == 0) { - verify_mark_bitmap(_mark_bitmap); - } +#ifdef ASSERT if (TraceParallelOldGCMarkingPhase) { tty->print_cr("add_obj_count=" SIZE_FORMAT " " "add_obj_bytes=" SIZE_FORMAT, @@ -1620,7 +1825,7 @@ if (TraceParallelOldGCSummaryPhase) { tty->print_cr("summary_phase: after summarizing each space to self"); Universe::print(); - NOT_PRODUCT(print_chunk_ranges()); + NOT_PRODUCT(print_region_ranges()); if (Verbose) { NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info)); } @@ -1628,294 +1833,93 @@ // The amount of live data that will end up in old space (assuming it fits). size_t old_space_total_live = 0; - unsigned int id; - for (id = old_space_id; id < last_space_id; ++id) { + assert(perm_space_id < old_space_id, "should not count perm data here"); + for (unsigned int id = old_space_id; id < last_space_id; ++id) { old_space_total_live += pointer_delta(_space_info[id].new_top(), _space_info[id].space()->bottom()); } - const MutableSpace* old_space = _space_info[old_space_id].space(); - if (old_space_total_live > old_space->capacity_in_words()) { + MutableSpace* const old_space = _space_info[old_space_id].space(); + const size_t old_capacity = old_space->capacity_in_words(); + if (old_space_total_live > old_capacity) { // XXX - should also try to expand maximum_compaction = true; - } else if (!UseParallelOldGCDensePrefix) { - maximum_compaction = true; } +#ifndef PRODUCT + if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) { + provoke_split(maximum_compaction); + } +#endif // #ifndef PRODUCT // Permanent and Old generations. summarize_space(perm_space_id, maximum_compaction); summarize_space(old_space_id, maximum_compaction); - // Summarize the remaining spaces (those in the young gen) into old space. If - // the live data from a space doesn't fit, the existing summarization is left - // intact, so the data is compacted down within the space itself. - HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr(); - HeapWord* const target_space_end = old_space->end(); - for (id = eden_space_id; id < last_space_id; ++id) { + // Summarize the remaining spaces in the young gen. The initial target space + // is the old gen. If a space does not fit entirely into the target, then the + // remainder is compacted into the space itself and that space becomes the new + // target. + SpaceId dst_space_id = old_space_id; + HeapWord* dst_space_end = old_space->end(); + HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr(); + for (unsigned int id = eden_space_id; id < last_space_id; ++id) { const MutableSpace* space = _space_info[id].space(); const size_t live = pointer_delta(_space_info[id].new_top(), - space->bottom()); - const size_t available = pointer_delta(target_space_end, *new_top_addr); - if (live <= available) { + space->bottom()); + const size_t available = pointer_delta(dst_space_end, *new_top_addr); + + NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end, + SpaceId(id), space->bottom(), space->top());) + if (live > 0 && live <= available) { // All the live data will fit. - if (TraceParallelOldGCSummaryPhase) { - tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT, - id, *new_top_addr); - } - _summary_data.summarize(*new_top_addr, target_space_end, - space->bottom(), space->top(), - new_top_addr); + bool done = _summary_data.summarize(_space_info[id].split_info(), + space->bottom(), space->top(), + NULL, + *new_top_addr, dst_space_end, + new_top_addr); + assert(done, "space must fit into old gen"); // Reset the new_top value for the space. _space_info[id].set_new_top(space->bottom()); - - // Clear the source_chunk field for each chunk in the space. - ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom()); - ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1); - while (beg_chunk <= end_chunk) { - beg_chunk->set_source_chunk(0); - ++beg_chunk; - } + } else if (live > 0) { + // Attempt to fit part of the source space into the target space. + HeapWord* next_src_addr = NULL; + bool done = _summary_data.summarize(_space_info[id].split_info(), + space->bottom(), space->top(), + &next_src_addr, + *new_top_addr, dst_space_end, + new_top_addr); + assert(!done, "space should not fit into old gen"); + assert(next_src_addr != NULL, "sanity"); + + // The source space becomes the new target, so the remainder is compacted + // within the space itself. + dst_space_id = SpaceId(id); + dst_space_end = space->end(); + new_top_addr = _space_info[id].new_top_addr(); + NOT_PRODUCT(summary_phase_msg(dst_space_id, + space->bottom(), dst_space_end, + SpaceId(id), next_src_addr, space->top());) + done = _summary_data.summarize(_space_info[id].split_info(), + next_src_addr, space->top(), + NULL, + space->bottom(), dst_space_end, + new_top_addr); + assert(done, "space must fit when compacted into itself"); + assert(*new_top_addr <= space->top(), "usage should not grow"); } } - // Fill in the block data after any changes to the chunks have - // been made. -#ifdef ASSERT - summarize_blocks(cm, perm_space_id); - summarize_blocks(cm, old_space_id); -#else - if (!UseParallelOldGCChunkPointerCalc) { - summarize_blocks(cm, perm_space_id); - summarize_blocks(cm, old_space_id); - } -#endif - if (TraceParallelOldGCSummaryPhase) { tty->print_cr("summary_phase: after final summarization"); Universe::print(); - NOT_PRODUCT(print_chunk_ranges()); + NOT_PRODUCT(print_region_ranges()); if (Verbose) { NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info)); } } } -// Fill in the BlockData. -// Iterate over the spaces and within each space iterate over -// the chunks and fill in the BlockData for each chunk. - -void PSParallelCompact::summarize_blocks(ParCompactionManager* cm, - SpaceId first_compaction_space_id) { -#if 0 - DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);) - for (SpaceId cur_space_id = first_compaction_space_id; - cur_space_id != last_space_id; - cur_space_id = next_compaction_space_id(cur_space_id)) { - // Iterate over the chunks in the space - size_t start_chunk_index = - _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom()); - BitBlockUpdateClosure bbu(mark_bitmap(), - cm, - start_chunk_index); - // Iterate over blocks. - for (size_t chunk_index = start_chunk_index; - chunk_index < _summary_data.chunk_count() && - _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top(); - chunk_index++) { - - // Reset the closure for the new chunk. Note that the closure - // maintains some data that does not get reset for each chunk - // so a new instance of the closure is no appropriate. - bbu.reset_chunk(chunk_index); - - // Start the iteration with the first live object. This - // may return the end of the chunk. That is acceptable since - // it will properly limit the iterations. - ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit( - _summary_data.first_live_or_end_in_chunk(chunk_index)); - - // End the iteration at the end of the chunk. - HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index); - HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize; - ParMarkBitMap::idx_t right_offset = - mark_bitmap()->addr_to_bit(chunk_end); - - // Blocks that have not objects starting in them can be - // skipped because their data will never be used. - if (left_offset < right_offset) { - - // Iterate through the objects in the chunk. - ParMarkBitMap::idx_t last_offset = - mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset); - - // If last_offset is less than right_offset, then the iterations - // terminated while it was looking for an end bit. "last_offset" - // is then the offset for the last start bit. In this situation - // the "offset" field for the next block to the right (_cur_block + 1) - // will not have been update although there may be live data - // to the left of the chunk. - - size_t cur_block_plus_1 = bbu.cur_block() + 1; - HeapWord* cur_block_plus_1_addr = - _summary_data.block_to_addr(bbu.cur_block()) + - ParallelCompactData::BlockSize; - HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset); - #if 1 // This code works. The else doesn't but should. Why does it? - // The current block (cur_block()) has already been updated. - // The last block that may need to be updated is either the - // next block (current block + 1) or the block where the - // last object starts (which can be greater than the - // next block if there were no objects found in intervening - // blocks). - size_t last_block = - MAX2(bbu.cur_block() + 1, - _summary_data.addr_to_block_idx(last_offset_addr)); - #else - // The current block has already been updated. The only block - // that remains to be updated is the block where the last - // object in the chunk starts. - size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr); - #endif - assert_bit_is_start(last_offset); - assert((last_block == _summary_data.block_count()) || - (_summary_data.block(last_block)->raw_offset() == 0), - "Should not have been set"); - // Is the last block still in the current chunk? If still - // in this chunk, update the last block (the counting that - // included the current block is meant for the offset of the last - // block). If not in this chunk, do nothing. Should not - // update a block in the next chunk. - if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(), - last_block)) { - if (last_offset < right_offset) { - // The last object started in this chunk but ends beyond - // this chunk. Update the block for this last object. - assert(mark_bitmap()->is_marked(last_offset), "Should be marked"); - // No end bit was found. The closure takes care of - // the cases where - // an objects crosses over into the next block - // an objects starts and ends in the next block - // It does not handle the case where an object is - // the first object in a later block and extends - // past the end of the chunk (i.e., the closure - // only handles complete objects that are in the range - // it is given). That object is handed back here - // for any special consideration necessary. - // - // Is the first bit in the last block a start or end bit? - // - // If the partial object ends in the last block L, - // then the 1st bit in L may be an end bit. - // - // Else does the last object start in a block after the current - // block? A block AA will already have been updated if an - // object ends in the next block AA+1. An object found to end in - // the AA+1 is the trigger that updates AA. Objects are being - // counted in the current block for updaing a following - // block. An object may start in later block - // block but may extend beyond the last block in the chunk. - // Updates are only done when the end of an object has been - // found. If the last object (covered by block L) starts - // beyond the current block, then no object ends in L (otherwise - // L would be the current block). So the first bit in L is - // a start bit. - // - // Else the last objects start in the current block and ends - // beyond the chunk. The current block has already been - // updated and there is no later block (with an object - // starting in it) that needs to be updated. - // - if (_summary_data.partial_obj_ends_in_block(last_block)) { - _summary_data.block(last_block)->set_end_bit_offset( - bbu.live_data_left()); - } else if (last_offset_addr >= cur_block_plus_1_addr) { - // The start of the object is on a later block - // (to the right of the current block and there are no - // complete live objects to the left of this last object - // within the chunk. - // The first bit in the block is for the start of the - // last object. - _summary_data.block(last_block)->set_start_bit_offset( - bbu.live_data_left()); - } else { - // The start of the last object was found in - // the current chunk (which has already - // been updated). - assert(bbu.cur_block() == - _summary_data.addr_to_block_idx(last_offset_addr), - "Should be a block already processed"); - } -#ifdef ASSERT - // Is there enough block information to find this object? - // The destination of the chunk has not been set so the - // values returned by calc_new_pointer() and - // block_calc_new_pointer() will only be - // offsets. But they should agree. - HeapWord* moved_obj_with_chunks = - _summary_data.chunk_calc_new_pointer(last_offset_addr); - HeapWord* moved_obj_with_blocks = - _summary_data.calc_new_pointer(last_offset_addr); - assert(moved_obj_with_chunks == moved_obj_with_blocks, - "Block calculation is wrong"); -#endif - } else if (last_block < _summary_data.block_count()) { - // Iterations ended looking for a start bit (but - // did not run off the end of the block table). - _summary_data.block(last_block)->set_start_bit_offset( - bbu.live_data_left()); - } - } -#ifdef ASSERT - // Is there enough block information to find this object? - HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset); - HeapWord* moved_obj_with_chunks = - _summary_data.calc_new_pointer(left_offset_addr); - HeapWord* moved_obj_with_blocks = - _summary_data.calc_new_pointer(left_offset_addr); - assert(moved_obj_with_chunks == moved_obj_with_blocks, - "Block calculation is wrong"); -#endif - - // Is there another block after the end of this chunk? -#ifdef ASSERT - if (last_block < _summary_data.block_count()) { - // No object may have been found in a block. If that - // block is at the end of the chunk, the iteration will - // terminate without incrementing the current block so - // that the current block is not the last block in the - // chunk. That situation precludes asserting that the - // current block is the last block in the chunk. Assert - // the lesser condition that the current block does not - // exceed the chunk. - assert(_summary_data.block_to_addr(last_block) <= - (_summary_data.chunk_to_addr(chunk_index) + - ParallelCompactData::ChunkSize), - "Chunk and block inconsistency"); - assert(last_offset <= right_offset, "Iteration over ran end"); - } -#endif - } -#ifdef ASSERT - if (PrintGCDetails && Verbose) { - if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) { - size_t first_block = - chunk_index / ParallelCompactData::BlocksPerChunk; - gclog_or_tty->print_cr("first_block " PTR_FORMAT - " _offset " PTR_FORMAT - "_first_is_start_bit %d", - first_block, - _summary_data.block(first_block)->raw_offset(), - _summary_data.block(first_block)->first_is_start_bit()); - } - } -#endif - } - } - DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);) -#endif // #if 0 -} - // This method should contain all heap-specific policy for invoking a full // collection. invoke_no_policy() will only attempt to compact the heap; it // will do nothing further. If we need to bail out for policy reasons, scavenge @@ -1950,18 +1954,9 @@ } } -bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) { - size_t addr_chunk_index = addr_to_chunk_idx(addr); - return chunk_index == addr_chunk_index; -} - -bool ParallelCompactData::chunk_contains_block(size_t chunk_index, - size_t block_index) { - size_t first_block_in_chunk = chunk_index * BlocksPerChunk; - size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1; - - return (first_block_in_chunk <= block_index) && - (block_index <= last_block_in_chunk); +bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) { + size_t addr_region_index = addr_to_region_idx(addr); + return region_index == addr_region_index; } // This method contains no policy. You should probably @@ -1970,7 +1965,7 @@ assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); - if (GC_locker::is_active()) { + if (GC_locker::check_active_before_gc()) { return; } @@ -1985,6 +1980,11 @@ PSPermGen* perm_gen = heap->perm_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); + if (ZapUnusedHeapArea) { + // Save information needed to minimize mangling + heap->record_gen_tops_before_GC(); + } + _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; // Make sure data structures are sane, make the heap parsable, and do other @@ -2031,6 +2031,7 @@ COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); + ref_processor()->setup_policy(maximum_heap_compaction); bool marked_for_unloading = false; @@ -2046,39 +2047,9 @@ } #endif // #ifndef PRODUCT -#ifdef ASSERT - if (VerifyParallelOldWithMarkSweep && - (PSParallelCompact::total_invocations() % - VerifyParallelOldWithMarkSweepInterval) == 0) { - gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()"); - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("mark_sweep_phase1:"); - } - // Clear the discovered lists so that discovered objects - // don't look like they have been discovered twice. - ref_processor()->clear_discovered_references(); - - PSMarkSweep::allocate_stacks(); - MemRegion mr = Universe::heap()->reserved_region(); - PSMarkSweep::ref_processor()->enable_discovery(); - PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction); - } -#endif - bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc; summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc); -#ifdef ASSERT - if (VerifyParallelOldWithMarkSweep && - (PSParallelCompact::total_invocations() % - VerifyParallelOldWithMarkSweepInterval) == 0) { - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("mark_sweep_phase2:"); - } - PSMarkSweep::mark_sweep_phase2(); - } -#endif - COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); @@ -2086,28 +2057,6 @@ // needed by the compaction for filling holes in the dense prefix. adjust_roots(); -#ifdef ASSERT - if (VerifyParallelOldWithMarkSweep && - (PSParallelCompact::total_invocations() % - VerifyParallelOldWithMarkSweepInterval) == 0) { - // Do a separate verify phase so that the verify - // code can use the the forwarding pointers to - // check the new pointer calculation. The restore_marks() - // has to be done before the real compact. - vmthread_cm->set_action(ParCompactionManager::VerifyUpdate); - compact_perm(vmthread_cm); - compact_serial(vmthread_cm); - vmthread_cm->set_action(ParCompactionManager::ResetObjects); - compact_perm(vmthread_cm); - compact_serial(vmthread_cm); - vmthread_cm->set_action(ParCompactionManager::UpdateAndCopy); - - // For debugging only - PSMarkSweep::restore_marks(); - PSMarkSweep::deallocate_stacks(); - } -#endif - compaction_start.update(); // Does the perm gen always have to be done serially because // klasses are used in the update of an object? @@ -2146,24 +2095,26 @@ ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimal free space amounts - assert(young_gen->max_size() > - young_gen->from_space()->capacity_in_bytes() + - young_gen->to_space()->capacity_in_bytes(), - "Sizes of space in young gen are out-of-bounds"); - size_t max_eden_size = young_gen->max_size() - - young_gen->from_space()->capacity_in_bytes() - - young_gen->to_space()->capacity_in_bytes(); - size_policy->compute_generation_free_space(young_gen->used_in_bytes(), - young_gen->eden_space()->used_in_bytes(), - old_gen->used_in_bytes(), - perm_gen->used_in_bytes(), - young_gen->eden_space()->capacity_in_bytes(), - old_gen->max_gen_size(), - max_eden_size, - true /* full gc*/, - gc_cause); + assert(young_gen->max_size() > + young_gen->from_space()->capacity_in_bytes() + + young_gen->to_space()->capacity_in_bytes(), + "Sizes of space in young gen are out-of-bounds"); + size_t max_eden_size = young_gen->max_size() - + young_gen->from_space()->capacity_in_bytes() - + young_gen->to_space()->capacity_in_bytes(); + size_policy->compute_generation_free_space( + young_gen->used_in_bytes(), + young_gen->eden_space()->used_in_bytes(), + old_gen->used_in_bytes(), + perm_gen->used_in_bytes(), + young_gen->eden_space()->capacity_in_bytes(), + old_gen->max_gen_size(), + max_eden_size, + true /* full gc*/, + gc_cause); - heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); + heap->resize_old_gen( + size_policy->calculated_old_free_size_in_bytes()); // Don't resize the young generation at an major collection. A // desired young generation size may have been calculated but @@ -2236,6 +2187,11 @@ perm_gen->verify_object_start_array(); } + if (ZapUnusedHeapArea) { + old_gen->object_space()->check_mangled_unused_area_complete(); + perm_gen->object_space()->check_mangled_unused_area_complete(); + } + NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); collection_exit.update(); @@ -2304,9 +2260,14 @@ // Fill the unused part of the old gen. MutableSpace* const old_space = old_gen->object_space(); - MemRegion old_gen_unused(old_space->top(), old_space->end()); - if (!old_gen_unused.is_empty()) { - SharedHeap::fill_region_with_object(old_gen_unused); + HeapWord* const unused_start = old_space->top(); + size_t const unused_words = pointer_delta(old_space->end(), unused_start); + + if (unused_words > 0) { + if (unused_words < CollectedHeap::min_fill_size()) { + return false; // If the old gen cannot be filled, must give up. + } + CollectedHeap::fill_with_objects(unused_start, unused_words); } // Take the live data from eden and set both top and end in the old gen to @@ -2322,9 +2283,8 @@ // Update the object start array for the filler object and the data from eden. ObjectStartArray* const start_array = old_gen->start_array(); - HeapWord* const start = old_gen_unused.start(); - for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { - start_array->allocate_block(addr); + for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { + start_array->allocate_block(p); } // Could update the promoted average here, but it is not typically updated at @@ -2350,7 +2310,7 @@ ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); - TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); + TaskQueueSetSuper* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(parallel_gc_threads, qset); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -2392,26 +2352,14 @@ // Process reference objects found during marking { TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); - ReferencePolicy *soft_ref_policy; - if (maximum_heap_compaction) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } - assert(soft_ref_policy != NULL, "No soft reference policy"); if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; ref_processor()->process_discovered_references( - soft_ref_policy, is_alive_closure(), &mark_and_push_closure, - &follow_stack_closure, &task_executor); + is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, + &task_executor); } else { ref_processor()->process_discovered_references( - soft_ref_policy, is_alive_closure(), &mark_and_push_closure, - &follow_stack_closure, NULL); + is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL); } } @@ -2488,8 +2436,9 @@ move_and_update(cm, perm_space_id); } -void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q, - uint parallel_gc_threads) { +void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, + uint parallel_gc_threads) +{ TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); const unsigned int task_count = MAX2(parallel_gc_threads, 1U); @@ -2497,39 +2446,40 @@ q->enqueue(new DrainStacksCompactionTask()); } - // Find all chunks that are available (can be filled immediately) and + // Find all regions that are available (can be filled immediately) and // distribute them to the thread stacks. The iteration is done in reverse - // order (high to low) so the chunks will be removed in ascending order. + // order (high to low) so the regions will be removed in ascending order. const ParallelCompactData& sd = PSParallelCompact::summary_data(); - size_t fillable_chunks = 0; // A count for diagnostic purposes. - unsigned int which = 0; // The worker thread number. + size_t fillable_regions = 0; // A count for diagnostic purposes. + unsigned int which = 0; // The worker thread number. for (unsigned int id = to_space_id; id > perm_space_id; --id) { SpaceInfo* const space_info = _space_info + id; MutableSpace* const space = space_info->space(); HeapWord* const new_top = space_info->new_top(); - const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix()); - const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top)); - assert(end_chunk > 0, "perm gen cannot be empty"); + const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix()); + const size_t end_region = + sd.addr_to_region_idx(sd.region_align_up(new_top)); + assert(end_region > 0, "perm gen cannot be empty"); - for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) { - if (sd.chunk(cur)->claim_unsafe()) { + for (size_t cur = end_region - 1; cur >= beg_region; --cur) { + if (sd.region(cur)->claim_unsafe()) { ParCompactionManager* cm = ParCompactionManager::manager_array(which); cm->save_for_processing(cur); - if (TraceParallelOldGCCompactionPhase && Verbose) { - const size_t count_mod_8 = fillable_chunks & 7; - if (count_mod_8 == 0) gclog_or_tty->print("fillable: "); - gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur); - if (count_mod_8 == 7) gclog_or_tty->cr(); - } + if (TraceParallelOldGCCompactionPhase && Verbose) { + const size_t count_mod_8 = fillable_regions & 7; + if (count_mod_8 == 0) gclog_or_tty->print("fillable: "); + gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur); + if (count_mod_8 == 7) gclog_or_tty->cr(); + } - NOT_PRODUCT(++fillable_chunks;) + NOT_PRODUCT(++fillable_regions;) - // Assign chunks to threads in round-robin fashion. + // Assign regions to threads in round-robin fashion. if (++which == task_count) { which = 0; } @@ -2538,8 +2488,8 @@ } if (TraceParallelOldGCCompactionPhase) { - if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr(); - gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks); + if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr(); + gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions); } } @@ -2552,95 +2502,92 @@ ParallelCompactData& sd = PSParallelCompact::summary_data(); // Iterate over all the spaces adding tasks for updating - // chunks in the dense prefix. Assume that 1 gc thread + // regions in the dense prefix. Assume that 1 gc thread // will work on opening the gaps and the remaining gc threads // will work on the dense prefix. - SpaceId space_id = old_space_id; - while (space_id != last_space_id) { + unsigned int space_id; + for (space_id = old_space_id; space_id < last_space_id; ++ space_id) { HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix(); const MutableSpace* const space = _space_info[space_id].space(); if (dense_prefix_end == space->bottom()) { // There is no dense prefix for this space. - space_id = next_compaction_space_id(space_id); continue; } - // The dense prefix is before this chunk. - size_t chunk_index_end_dense_prefix = - sd.addr_to_chunk_idx(dense_prefix_end); - ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix); + // The dense prefix is before this region. + size_t region_index_end_dense_prefix = + sd.addr_to_region_idx(dense_prefix_end); + RegionData* const dense_prefix_cp = + sd.region(region_index_end_dense_prefix); assert(dense_prefix_end == space->end() || - dense_prefix_cp->available() || - dense_prefix_cp->claimed(), - "The chunk after the dense prefix should always be ready to fill"); + dense_prefix_cp->available() || + dense_prefix_cp->claimed(), + "The region after the dense prefix should always be ready to fill"); - size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom()); + size_t region_index_start = sd.addr_to_region_idx(space->bottom()); // Is there dense prefix work? - size_t total_dense_prefix_chunks = - chunk_index_end_dense_prefix - chunk_index_start; - // How many chunks of the dense prefix should be given to + size_t total_dense_prefix_regions = + region_index_end_dense_prefix - region_index_start; + // How many regions of the dense prefix should be given to // each thread? - if (total_dense_prefix_chunks > 0) { + if (total_dense_prefix_regions > 0) { uint tasks_for_dense_prefix = 1; if (UseParallelDensePrefixUpdate) { - if (total_dense_prefix_chunks <= - (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { - // Don't over partition. This assumes that - // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value - // so there are not many chunks to process. - tasks_for_dense_prefix = parallel_gc_threads; - } else { - // Over partition - tasks_for_dense_prefix = parallel_gc_threads * - PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; - } + if (total_dense_prefix_regions <= + (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { + // Don't over partition. This assumes that + // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value + // so there are not many regions to process. + tasks_for_dense_prefix = parallel_gc_threads; + } else { + // Over partition + tasks_for_dense_prefix = parallel_gc_threads * + PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; + } } - size_t chunks_per_thread = total_dense_prefix_chunks / + size_t regions_per_thread = total_dense_prefix_regions / tasks_for_dense_prefix; - // Give each thread at least 1 chunk. - if (chunks_per_thread == 0) { - chunks_per_thread = 1; + // Give each thread at least 1 region. + if (regions_per_thread == 0) { + regions_per_thread = 1; } for (uint k = 0; k < tasks_for_dense_prefix; k++) { - if (chunk_index_start >= chunk_index_end_dense_prefix) { + if (region_index_start >= region_index_end_dense_prefix) { break; } - // chunk_index_end is not processed - size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread, - chunk_index_end_dense_prefix); - q->enqueue(new UpdateDensePrefixTask( - space_id, - chunk_index_start, - chunk_index_end)); - chunk_index_start = chunk_index_end; + // region_index_end is not processed + size_t region_index_end = MIN2(region_index_start + regions_per_thread, + region_index_end_dense_prefix); + q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id), + region_index_start, + region_index_end)); + region_index_start = region_index_end; } } // This gets any part of the dense prefix that did not // fit evenly. - if (chunk_index_start < chunk_index_end_dense_prefix) { - q->enqueue(new UpdateDensePrefixTask( - space_id, - chunk_index_start, - chunk_index_end_dense_prefix)); - } - space_id = next_compaction_space_id(space_id); - } // End tasks for dense prefix + if (region_index_start < region_index_end_dense_prefix) { + q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id), + region_index_start, + region_index_end_dense_prefix)); + } + } } -void PSParallelCompact::enqueue_chunk_stealing_tasks( - GCTaskQueue* q, - ParallelTaskTerminator* terminator_ptr, +void PSParallelCompact::enqueue_region_stealing_tasks( + GCTaskQueue* q, + ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); - // Once a thread has drained it's stack, it should try to steal chunks from + // Once a thread has drained it's stack, it should try to steal regions from // other threads. if (parallel_gc_threads > 1) { for (uint j = 0; j < parallel_gc_threads; j++) { - q->enqueue(new StealChunkCompactionTask(terminator_ptr)); + q->enqueue(new StealRegionCompactionTask(terminator_ptr)); } } } @@ -2655,13 +2602,13 @@ PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); - TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); + TaskQueueSetSuper* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(parallel_gc_threads, qset); GCTaskQueue* q = GCTaskQueue::create(); - enqueue_chunk_draining_tasks(q, parallel_gc_threads); + enqueue_region_draining_tasks(q, parallel_gc_threads); enqueue_dense_prefix_tasks(q, parallel_gc_threads); - enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads); + enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads); { TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); @@ -2676,10 +2623,10 @@ // We have to release the barrier tasks! WaitForBarrierGCTask::destroy(fin); -#ifdef ASSERT - // Verify that all chunks have been processed before the deferred updates. +#ifdef ASSERT + // Verify that all regions have been processed before the deferred updates. // Note that perm_space_id is skipped; this type of verification is not - // valid until the perm gen is compacted by chunks. + // valid until the perm gen is compacted by regions. for (unsigned int id = old_space_id; id < last_space_id; ++id) { verify_complete(SpaceId(id)); } @@ -2698,42 +2645,42 @@ #ifdef ASSERT void PSParallelCompact::verify_complete(SpaceId space_id) { - // All Chunks between space bottom() to new_top() should be marked as filled - // and all Chunks between new_top() and top() should be available (i.e., + // All Regions between space bottom() to new_top() should be marked as filled + // and all Regions between new_top() and top() should be available (i.e., // should have been emptied). ParallelCompactData& sd = summary_data(); SpaceInfo si = _space_info[space_id]; - HeapWord* new_top_addr = sd.chunk_align_up(si.new_top()); - HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top()); - const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom()); - const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr); - const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr); + HeapWord* new_top_addr = sd.region_align_up(si.new_top()); + HeapWord* old_top_addr = sd.region_align_up(si.space()->top()); + const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom()); + const size_t new_top_region = sd.addr_to_region_idx(new_top_addr); + const size_t old_top_region = sd.addr_to_region_idx(old_top_addr); bool issued_a_warning = false; - size_t cur_chunk; - for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) { - const ChunkData* const c = sd.chunk(cur_chunk); + size_t cur_region; + for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) { + const RegionData* const c = sd.region(cur_region); if (!c->completed()) { - warning("chunk " SIZE_FORMAT " not filled: " - "destination_count=" SIZE_FORMAT, - cur_chunk, c->destination_count()); + warning("region " SIZE_FORMAT " not filled: " + "destination_count=" SIZE_FORMAT, + cur_region, c->destination_count()); issued_a_warning = true; } } - for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) { - const ChunkData* const c = sd.chunk(cur_chunk); + for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) { + const RegionData* const c = sd.region(cur_region); if (!c->available()) { - warning("chunk " SIZE_FORMAT " not empty: " - "destination_count=" SIZE_FORMAT, - cur_chunk, c->destination_count()); + warning("region " SIZE_FORMAT " not empty: " + "destination_count=" SIZE_FORMAT, + cur_region, c->destination_count()); issued_a_warning = true; } } if (issued_a_warning) { - print_chunk_ranges(); + print_region_ranges(); } } #endif // #ifdef ASSERT @@ -2753,23 +2700,6 @@ young_gen->move_and_update(cm); } -void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) { - assert(!Universe::heap()->is_in_reserved(p), - "roots shouldn't be things within the heap"); -#ifdef VALIDATE_MARK_SWEEP - if (ValidateMarkSweep) { - guarantee(!_root_refs_stack->contains(p), "should only be in here once"); - _root_refs_stack->push(p); - } -#endif - oop m = *p; - if (m != NULL && mark_bitmap()->is_unmarked(m)) { - if (mark_obj(m)) { - m->follow_contents(cm); // Follow contents of the marked object - } - } - follow_stack(cm); -} void PSParallelCompact::follow_stack(ParCompactionManager* cm) { while(!cm->overflow_stack()->is_empty()) { @@ -2809,7 +2739,7 @@ #ifdef VALIDATE_MARK_SWEEP -void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) { +void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) { if (!ValidateMarkSweep) return; @@ -2823,9 +2753,9 @@ if (index != -1) { int l = _root_refs_stack->length(); if (l > 0 && l - 1 != index) { - oop* last = _root_refs_stack->pop(); - assert(last != p, "should be different"); - _root_refs_stack->at_put(index, last); + void* last = _root_refs_stack->pop(); + assert(last != p, "should be different"); + _root_refs_stack->at_put(index, last); } else { _root_refs_stack->remove(p); } @@ -2834,7 +2764,7 @@ } -void PSParallelCompact::check_adjust_pointer(oop* p) { +void PSParallelCompact::check_adjust_pointer(void* p) { _adjusted_pointers->push(p); } @@ -2842,7 +2772,8 @@ class AdjusterTracker: public OopClosure { public: AdjusterTracker() {}; - void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); } + void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); } + void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); } }; @@ -2950,65 +2881,47 @@ } #endif //VALIDATE_MARK_SWEEP -void PSParallelCompact::adjust_pointer(oop* p, bool isroot) { - oop obj = *p; - VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL); - if (obj != NULL) { - oop new_pointer = (oop) summary_data().calc_new_pointer(obj); - assert(new_pointer != NULL || // is forwarding ptr? - obj->is_shared(), // never forwarded? - "should have a new location"); - // Just always do the update unconditionally? - if (new_pointer != NULL) { - *p = new_pointer; - assert(Universe::heap()->is_in_reserved(new_pointer), - "should be in object space"); - VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer); - } - } - VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot)); -} - -// Update interior oops in the ranges of chunks [beg_chunk, end_chunk). +// Update interior oops in the ranges of regions [beg_region, end_region). void PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, - SpaceId space_id, - size_t beg_chunk, - size_t end_chunk) { + SpaceId space_id, + size_t beg_region, + size_t end_region) { ParallelCompactData& sd = summary_data(); ParMarkBitMap* const mbm = mark_bitmap(); - HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk); - HeapWord* const end_addr = sd.chunk_to_addr(end_chunk); - assert(beg_chunk <= end_chunk, "bad chunk range"); + HeapWord* beg_addr = sd.region_to_addr(beg_region); + HeapWord* const end_addr = sd.region_to_addr(end_region); + assert(beg_region <= end_region, "bad region range"); assert(end_addr <= dense_prefix(space_id), "not in the dense prefix"); -#ifdef ASSERT - // Claim the chunks to avoid triggering an assert when they are marked as +#ifdef ASSERT + // Claim the regions to avoid triggering an assert when they are marked as // filled. - for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) { - assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed"); + for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) { + assert(sd.region(claim_region)->claim_unsafe(), "claim() failed"); } #endif // #ifdef ASSERT if (beg_addr != space(space_id)->bottom()) { // Find the first live object or block of dead space that *starts* in this - // range of chunks. If a partial object crosses onto the chunk, skip it; it - // will be marked for 'deferred update' when the object head is processed. - // If dead space crosses onto the chunk, it is also skipped; it will be - // filled when the prior chunk is processed. If neither of those apply, the - // first word in the chunk is the start of a live object or dead space. + // range of regions. If a partial object crosses onto the region, skip it; + // it will be marked for 'deferred update' when the object head is + // processed. If dead space crosses onto the region, it is also skipped; it + // will be filled when the prior region is processed. If neither of those + // apply, the first word in the region is the start of a live object or dead + // space. assert(beg_addr > space(space_id)->bottom(), "sanity"); - const ChunkData* const cp = sd.chunk(beg_chunk); + const RegionData* const cp = sd.region(beg_region); if (cp->partial_obj_size() != 0) { - beg_addr = sd.partial_obj_end(beg_chunk); + beg_addr = sd.partial_obj_end(beg_region); } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) { beg_addr = mbm->find_obj_beg(beg_addr, end_addr); } } if (beg_addr < end_addr) { - // A live object or block of dead space starts in this range of Chunks. + // A live object or block of dead space starts in this range of Regions. HeapWord* const dense_prefix_end = dense_prefix(space_id); // Create closures and iterate. @@ -3022,10 +2935,10 @@ } } - // Mark the chunks as filled. - ChunkData* const beg_cp = sd.chunk(beg_chunk); - ChunkData* const end_cp = sd.chunk(end_chunk); - for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) { + // Mark the regions as filled. + RegionData* const beg_cp = sd.region(beg_region); + RegionData* const end_cp = sd.region(end_region); + for (RegionData* cp = beg_cp; cp < end_cp; ++cp) { cp->set_completed(); } } @@ -3057,13 +2970,13 @@ const MutableSpace* const space = space_info->space(); assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set"); HeapWord* const beg_addr = space_info->dense_prefix(); - HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top()); + HeapWord* const end_addr = sd.region_align_up(space_info->new_top()); - const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr); - const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr); - const ChunkData* cur_chunk; - for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) { - HeapWord* const addr = cur_chunk->deferred_obj_addr(); + const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr); + const RegionData* const end_region = sd.addr_to_region_ptr(end_addr); + const RegionData* cur_region; + for (cur_region = beg_region; cur_region < end_region; ++cur_region) { + HeapWord* const addr = cur_region->deferred_obj_addr(); if (addr != NULL) { if (start_array != NULL) { start_array->allocate_block(addr); @@ -3107,47 +3020,55 @@ return m->bit_to_addr(cur_beg); } -HeapWord* -PSParallelCompact::first_src_addr(HeapWord* const dest_addr, - size_t src_chunk_idx) -{ - ParMarkBitMap* const bitmap = mark_bitmap(); - const ParallelCompactData& sd = summary_data(); - const size_t ChunkSize = ParallelCompactData::ChunkSize; - - assert(sd.is_chunk_aligned(dest_addr), "not aligned"); - - const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx); - const size_t partial_obj_size = src_chunk_ptr->partial_obj_size(); - HeapWord* const src_chunk_destination = src_chunk_ptr->destination(); - - assert(dest_addr >= src_chunk_destination, "wrong src chunk"); - assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty"); +HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr, + SpaceId src_space_id, + size_t src_region_idx) +{ + assert(summary_data().is_region_aligned(dest_addr), "not aligned"); + + const SplitInfo& split_info = _space_info[src_space_id].split_info(); + if (split_info.dest_region_addr() == dest_addr) { + // The partial object ending at the split point contains the first word to + // be copied to dest_addr. + return split_info.first_src_addr(); + } - HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx); - HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize; + const ParallelCompactData& sd = summary_data(); + ParMarkBitMap* const bitmap = mark_bitmap(); + const size_t RegionSize = ParallelCompactData::RegionSize; - HeapWord* addr = src_chunk_beg; - if (dest_addr == src_chunk_destination) { - // Return the first live word in the source chunk. + assert(sd.is_region_aligned(dest_addr), "not aligned"); + const RegionData* const src_region_ptr = sd.region(src_region_idx); + const size_t partial_obj_size = src_region_ptr->partial_obj_size(); + HeapWord* const src_region_destination = src_region_ptr->destination(); + + assert(dest_addr >= src_region_destination, "wrong src region"); + assert(src_region_ptr->data_size() > 0, "src region cannot be empty"); + + HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx); + HeapWord* const src_region_end = src_region_beg + RegionSize; + + HeapWord* addr = src_region_beg; + if (dest_addr == src_region_destination) { + // Return the first live word in the source region. if (partial_obj_size == 0) { - addr = bitmap->find_obj_beg(addr, src_chunk_end); - assert(addr < src_chunk_end, "no objects start in src chunk"); + addr = bitmap->find_obj_beg(addr, src_region_end); + assert(addr < src_region_end, "no objects start in src region"); } return addr; } // Must skip some live data. - size_t words_to_skip = dest_addr - src_chunk_destination; - assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk"); + size_t words_to_skip = dest_addr - src_region_destination; + assert(src_region_ptr->data_size() > words_to_skip, "wrong src region"); if (partial_obj_size >= words_to_skip) { // All the live words to skip are part of the partial object. addr += words_to_skip; if (partial_obj_size == words_to_skip) { // Find the first live word past the partial object. - addr = bitmap->find_obj_beg(addr, src_chunk_end); - assert(addr < src_chunk_end, "wrong src chunk"); + addr = bitmap->find_obj_beg(addr, src_region_end); + assert(addr < src_region_end, "wrong src region"); } return addr; } @@ -3158,63 +3079,79 @@ addr += partial_obj_size; } - // Skip over live words due to objects that start in the chunk. - addr = skip_live_words(addr, src_chunk_end, words_to_skip); - assert(addr < src_chunk_end, "wrong src chunk"); + // Skip over live words due to objects that start in the region. + addr = skip_live_words(addr, src_region_end, words_to_skip); + assert(addr < src_region_end, "wrong src region"); return addr; } void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, - size_t beg_chunk, - HeapWord* end_addr) + SpaceId src_space_id, + size_t beg_region, + HeapWord* end_addr) { ParallelCompactData& sd = summary_data(); - ChunkData* const beg = sd.chunk(beg_chunk); - HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr); - ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up); - size_t cur_idx = beg_chunk; - for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) { - assert(cur->data_size() > 0, "chunk must have live data"); + +#ifdef ASSERT + MutableSpace* const src_space = _space_info[src_space_id].space(); + HeapWord* const beg_addr = sd.region_to_addr(beg_region); + assert(src_space->contains(beg_addr) || beg_addr == src_space->end(), + "src_space_id does not match beg_addr"); + assert(src_space->contains(end_addr) || end_addr == src_space->end(), + "src_space_id does not match end_addr"); +#endif // #ifdef ASSERT + + RegionData* const beg = sd.region(beg_region); + RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr)); + + // Regions up to new_top() are enqueued if they become available. + HeapWord* const new_top = _space_info[src_space_id].new_top(); + RegionData* const enqueue_end = + sd.addr_to_region_ptr(sd.region_align_up(new_top)); + + for (RegionData* cur = beg; cur < end; ++cur) { + assert(cur->data_size() > 0, "region must have live data"); cur->decrement_destination_count(); - if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) { - cm->save_for_processing(cur_idx); + if (cur < enqueue_end && cur->available() && cur->claim()) { + cm->save_for_processing(sd.region(cur)); } } } -size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure, - SpaceId& src_space_id, - HeapWord*& src_space_top, - HeapWord* end_addr) +size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure, + SpaceId& src_space_id, + HeapWord*& src_space_top, + HeapWord* end_addr) { - typedef ParallelCompactData::ChunkData ChunkData; + typedef ParallelCompactData::RegionData RegionData; ParallelCompactData& sd = PSParallelCompact::summary_data(); - const size_t chunk_size = ParallelCompactData::ChunkSize; + const size_t region_size = ParallelCompactData::RegionSize; - size_t src_chunk_idx = 0; + size_t src_region_idx = 0; - // Skip empty chunks (if any) up to the top of the space. - HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr); - ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up); - HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top); - const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up); - while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) { - ++src_chunk_ptr; + // Skip empty regions (if any) up to the top of the space. + HeapWord* const src_aligned_up = sd.region_align_up(end_addr); + RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up); + HeapWord* const top_aligned_up = sd.region_align_up(src_space_top); + const RegionData* const top_region_ptr = + sd.addr_to_region_ptr(top_aligned_up); + while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) { + ++src_region_ptr; } - if (src_chunk_ptr < top_chunk_ptr) { - // The next source chunk is in the current space. Update src_chunk_idx and - // the source address to match src_chunk_ptr. - src_chunk_idx = sd.chunk(src_chunk_ptr); - HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx); - if (src_chunk_addr > closure.source()) { - closure.set_source(src_chunk_addr); + if (src_region_ptr < top_region_ptr) { + // The next source region is in the current space. Update src_region_idx + // and the source address to match src_region_ptr. + src_region_idx = sd.region(src_region_ptr); + HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx); + if (src_region_addr > closure.source()) { + closure.set_source(src_region_addr); } - return src_chunk_idx; + return src_region_idx; } - // Switch to a new source space and find the first non-empty chunk. + // Switch to a new source space and find the first non-empty region. unsigned int space_id = src_space_id + 1; assert(space_id < last_space_id, "not enough spaces"); @@ -3223,65 +3160,65 @@ do { MutableSpace* space = _space_info[space_id].space(); HeapWord* const bottom = space->bottom(); - const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom); + const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom); // Iterate over the spaces that do not compact into themselves. if (bottom_cp->destination() != bottom) { - HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); - const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); + HeapWord* const top_aligned_up = sd.region_align_up(space->top()); + const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up); - for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) { - if (src_cp->live_obj_size() > 0) { - // Found it. - assert(src_cp->destination() == destination, - "first live obj in the space must match the destination"); - assert(src_cp->partial_obj_size() == 0, - "a space cannot begin with a partial obj"); - - src_space_id = SpaceId(space_id); - src_space_top = space->top(); - const size_t src_chunk_idx = sd.chunk(src_cp); - closure.set_source(sd.chunk_to_addr(src_chunk_idx)); - return src_chunk_idx; - } else { - assert(src_cp->data_size() == 0, "sanity"); - } + for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) { + if (src_cp->live_obj_size() > 0) { + // Found it. + assert(src_cp->destination() == destination, + "first live obj in the space must match the destination"); + assert(src_cp->partial_obj_size() == 0, + "a space cannot begin with a partial obj"); + + src_space_id = SpaceId(space_id); + src_space_top = space->top(); + const size_t src_region_idx = sd.region(src_cp); + closure.set_source(sd.region_to_addr(src_region_idx)); + return src_region_idx; + } else { + assert(src_cp->data_size() == 0, "sanity"); + } } } } while (++space_id < last_space_id); - assert(false, "no source chunk was found"); + assert(false, "no source region was found"); return 0; } -void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx) +void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx) { typedef ParMarkBitMap::IterationStatus IterationStatus; - const size_t ChunkSize = ParallelCompactData::ChunkSize; + const size_t RegionSize = ParallelCompactData::RegionSize; ParMarkBitMap* const bitmap = mark_bitmap(); ParallelCompactData& sd = summary_data(); - ChunkData* const chunk_ptr = sd.chunk(chunk_idx); + RegionData* const region_ptr = sd.region(region_idx); // Get the items needed to construct the closure. - HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx); + HeapWord* dest_addr = sd.region_to_addr(region_idx); SpaceId dest_space_id = space_id(dest_addr); ObjectStartArray* start_array = _space_info[dest_space_id].start_array(); HeapWord* new_top = _space_info[dest_space_id].new_top(); assert(dest_addr < new_top, "sanity"); - const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize); + const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize); - // Get the source chunk and related info. - size_t src_chunk_idx = chunk_ptr->source_chunk(); - SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx)); + // Get the source region and related info. + size_t src_region_idx = region_ptr->source_region(); + SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx)); HeapWord* src_space_top = _space_info[src_space_id].space()->top(); MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); - closure.set_source(first_src_addr(dest_addr, src_chunk_idx)); + closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx)); - // Adjust src_chunk_idx to prepare for decrementing destination counts (the - // destination count is not decremented when a chunk is copied to itself). - if (src_chunk_idx == chunk_idx) { - src_chunk_idx += 1; + // Adjust src_region_idx to prepare for decrementing destination counts (the + // destination count is not decremented when a region is copied to itself). + if (src_region_idx == region_idx) { + src_region_idx += 1; } if (bitmap->is_unmarked(closure.source())) { @@ -3291,32 +3228,34 @@ HeapWord* const old_src_addr = closure.source(); closure.copy_partial_obj(); if (closure.is_full()) { - decrement_destination_counts(cm, src_chunk_idx, closure.source()); - chunk_ptr->set_deferred_obj_addr(NULL); - chunk_ptr->set_completed(); + decrement_destination_counts(cm, src_space_id, src_region_idx, + closure.source()); + region_ptr->set_deferred_obj_addr(NULL); + region_ptr->set_completed(); return; } - HeapWord* const end_addr = sd.chunk_align_down(closure.source()); - if (sd.chunk_align_down(old_src_addr) != end_addr) { - // The partial object was copied from more than one source chunk. - decrement_destination_counts(cm, src_chunk_idx, end_addr); + HeapWord* const end_addr = sd.region_align_down(closure.source()); + if (sd.region_align_down(old_src_addr) != end_addr) { + // The partial object was copied from more than one source region. + decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); - // Move to the next source chunk, possibly switching spaces as well. All + // Move to the next source region, possibly switching spaces as well. All // args except end_addr may be modified. - src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, - end_addr); + src_region_idx = next_src_region(closure, src_space_id, src_space_top, + end_addr); } } do { HeapWord* const cur_addr = closure.source(); - HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1), - src_space_top); + HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1), + src_space_top); IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr); if (status == ParMarkBitMap::incomplete) { - // The last obj that starts in the source chunk does not end in the chunk. + // The last obj that starts in the source region does not end in the + // region. assert(closure.source() < end_addr, "sanity") HeapWord* const obj_beg = closure.source(); HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(), @@ -3335,28 +3274,30 @@ if (status == ParMarkBitMap::would_overflow) { // The last object did not fit. Note that interior oop updates were - // deferred, then copy enough of the object to fill the chunk. - chunk_ptr->set_deferred_obj_addr(closure.destination()); + // deferred, then copy enough of the object to fill the region. + region_ptr->set_deferred_obj_addr(closure.destination()); status = closure.copy_until_full(); // copies from closure.source() - decrement_destination_counts(cm, src_chunk_idx, closure.source()); - chunk_ptr->set_completed(); + decrement_destination_counts(cm, src_space_id, src_region_idx, + closure.source()); + region_ptr->set_completed(); return; } if (status == ParMarkBitMap::full) { - decrement_destination_counts(cm, src_chunk_idx, closure.source()); - chunk_ptr->set_deferred_obj_addr(NULL); - chunk_ptr->set_completed(); + decrement_destination_counts(cm, src_space_id, src_region_idx, + closure.source()); + region_ptr->set_deferred_obj_addr(NULL); + region_ptr->set_completed(); return; } - decrement_destination_counts(cm, src_chunk_idx, end_addr); + decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); - // Move to the next source chunk, possibly switching spaces as well. All + // Move to the next source region, possibly switching spaces as well. All // args except end_addr may be modified. - src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, - end_addr); + src_region_idx = next_src_region(closure, src_space_id, src_space_top, + end_addr); } while (true); } @@ -3388,15 +3329,15 @@ } #endif - const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr); - const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr); - if (beg_chunk < dp_chunk) { - update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk); + const size_t beg_region = sd.addr_to_region_idx(beg_addr); + const size_t dp_region = sd.addr_to_region_idx(dp_addr); + if (beg_region < dp_region) { + update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region); } - // The destination of the first live object that starts in the chunk is one - // past the end of the partial object entering the chunk (if any). - HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk); + // The destination of the first live object that starts in the region is one + // past the end of the partial object entering the region (if any). + HeapWord* const dest_addr = sd.partial_obj_end(dp_region); HeapWord* const new_top = _space_info[space_id].new_top(); assert(new_top >= dest_addr, "bad new_top value"); const size_t words = pointer_delta(new_top, dest_addr); @@ -3430,7 +3371,7 @@ ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full() { if (source() != destination()) { - assert(source() > destination(), "must copy to the left"); + DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) Copy::aligned_conjoint_words(source(), destination(), words_remaining()); } update_state(words_remaining()); @@ -3451,7 +3392,7 @@ // This test is necessary; if omitted, the pointer updates to a partial object // that crosses the dense prefix boundary could be overwritten. if (source() != destination()) { - assert(source() > destination(), "must copy to the left"); + DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) Copy::aligned_conjoint_words(source(), destination(), words); } update_state(words); @@ -3476,7 +3417,7 @@ } if (destination() != source()) { - assert(destination() < source(), "must copy to the left"); + DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) Copy::aligned_conjoint_words(source(), destination(), words); } @@ -3505,172 +3446,6 @@ return ParMarkBitMap::incomplete; } -BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm, - ParCompactionManager* cm, - size_t chunk_index) : - ParMarkBitMapClosure(mbm, cm), - _live_data_left(0), - _cur_block(0) { - _chunk_start = - PSParallelCompact::summary_data().chunk_to_addr(chunk_index); - _chunk_end = - PSParallelCompact::summary_data().chunk_to_addr(chunk_index) + - ParallelCompactData::ChunkSize; - _chunk_index = chunk_index; - _cur_block = - PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start); -} - -bool BitBlockUpdateClosure::chunk_contains_cur_block() { - return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block); -} - -void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) { - DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);) - ParallelCompactData& sd = PSParallelCompact::summary_data(); - _chunk_index = chunk_index; - _live_data_left = 0; - _chunk_start = sd.chunk_to_addr(chunk_index); - _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize; - - // The first block in this chunk - size_t first_block = sd.addr_to_block_idx(_chunk_start); - size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size(); - - // Set the offset to 0. By definition it should have that value - // but it may have been written while processing an earlier chunk. - if (partial_live_size == 0) { - // No live object extends onto the chunk. The first bit - // in the bit map for the first chunk must be a start bit. - // Although there may not be any marked bits, it is safe - // to set it as a start bit. - sd.block(first_block)->set_start_bit_offset(0); - sd.block(first_block)->set_first_is_start_bit(true); - } else if (sd.partial_obj_ends_in_block(first_block)) { - sd.block(first_block)->set_end_bit_offset(0); - sd.block(first_block)->set_first_is_start_bit(false); - } else { - // The partial object extends beyond the first block. - // There is no object starting in the first block - // so the offset and bit parity are not needed. - // Set the the bit parity to start bit so assertions - // work when not bit is found. - sd.block(first_block)->set_end_bit_offset(0); - sd.block(first_block)->set_first_is_start_bit(false); - } - _cur_block = first_block; -#ifdef ASSERT - if (sd.block(first_block)->first_is_start_bit()) { - assert(!sd.partial_obj_ends_in_block(first_block), - "Partial object cannot end in first block"); - } - - if (PrintGCDetails && Verbose) { - if (partial_live_size == 1) { - gclog_or_tty->print_cr("first_block " PTR_FORMAT - " _offset " PTR_FORMAT - " _first_is_start_bit %d", - first_block, - sd.block(first_block)->raw_offset(), - sd.block(first_block)->first_is_start_bit()); - } - } -#endif - DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);) -} - -// This method is called when a object has been found (both beginning -// and end of the object) in the range of iteration. This method is -// calculating the words of live data to the left of a block. That live -// data includes any object starting to the left of the block (i.e., -// the live-data-to-the-left of block AAA will include the full size -// of any object entering AAA). - -ParMarkBitMapClosure::IterationStatus -BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) { - // add the size to the block data. - HeapWord* obj = addr; - ParallelCompactData& sd = PSParallelCompact::summary_data(); - - assert(bitmap()->obj_size(obj) == words, "bad size"); - assert(_chunk_start <= obj, "object is not in chunk"); - assert(obj + words <= _chunk_end, "object is not in chunk"); - - // Update the live data to the left - size_t prev_live_data_left = _live_data_left; - _live_data_left = _live_data_left + words; - - // Is this object in the current block. - size_t block_of_obj = sd.addr_to_block_idx(obj); - size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1); - HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last); - if (_cur_block < block_of_obj) { - - // - // No object crossed the block boundary and this object was found - // on the other side of the block boundary. Update the offset for - // the new block with the data size that does not include this object. - // - // The first bit in block_of_obj is a start bit except in the - // case where the partial object for the chunk extends into - // this block. - if (sd.partial_obj_ends_in_block(block_of_obj)) { - sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left); - } else { - sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left); - } - - // Does this object pass beyond the its block? - if (block_of_obj < block_of_obj_last) { - // Object crosses block boundary. Two blocks need to be udpated: - // the current block where the object started - // the block where the object ends - // - // The offset for blocks with no objects starting in them - // (e.g., blocks between _cur_block and block_of_obj_last) - // should not be needed. - // Note that block_of_obj_last may be in another chunk. If so, - // it should be overwritten later. This is a problem (writting - // into a block in a later chunk) for parallel execution. - assert(obj < block_of_obj_last_addr, - "Object should start in previous block"); - - // obj is crossing into block_of_obj_last so the first bit - // is and end bit. - sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); - - _cur_block = block_of_obj_last; - } else { - // _first_is_start_bit has already been set correctly - // in the if-then-else above so don't reset it here. - _cur_block = block_of_obj; - } - } else { - // The current block only changes if the object extends beyound - // the block it starts in. - // - // The object starts in the current block. - // Does this object pass beyond the end of it? - if (block_of_obj < block_of_obj_last) { - // Object crosses block boundary. - // See note above on possible blocks between block_of_obj and - // block_of_obj_last - assert(obj < block_of_obj_last_addr, - "Object should start in previous block"); - - sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); - - _cur_block = block_of_obj_last; - } - } - - // Return incomplete if there are more blocks to be done. - if (chunk_contains_cur_block()) { - return ParMarkBitMap::incomplete; - } - return ParMarkBitMap::complete; -} - // Verify the new location using the forwarding pointer // from MarkSweep::mark_sweep_phase2(). Set the mark_word // to the initial value. @@ -3712,43 +3487,3 @@ summary_data().calc_new_pointer(Universe::intArrayKlassObj()); } -// The initial implementation of this method created a field -// _next_compaction_space_id in SpaceInfo and initialized -// that field in SpaceInfo::initialize_space_info(). That -// required that _next_compaction_space_id be declared a -// SpaceId in SpaceInfo and that would have required that -// either SpaceId be declared in a separate class or that -// it be declared in SpaceInfo. It didn't seem consistent -// to declare it in SpaceInfo (didn't really fit logically). -// Alternatively, defining a separate class to define SpaceId -// seem excessive. This implementation is simple and localizes -// the knowledge. - -PSParallelCompact::SpaceId -PSParallelCompact::next_compaction_space_id(SpaceId id) { - assert(id < last_space_id, "id out of range"); - switch (id) { - case perm_space_id : - return last_space_id; - case old_space_id : - return eden_space_id; - case eden_space_id : - return from_space_id; - case from_space_id : - return to_space_id; - case to_space_id : - return last_space_id; - default: - assert(false, "Bad space id"); - return last_space_id; - } -} - -// Here temporarily for debugging -#ifdef ASSERT - size_t ParallelCompactData::block_idx(BlockData* block) { - size_t index = pointer_delta(block, - PSParallelCompact::summary_data()._block_data, sizeof(BlockData)); - return index; - } -#endif --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp 2009-08-01 04:11:06.077077327 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp 2009-08-01 04:11:05.992221087 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psParallelCompact.hpp 1.48 07/10/04 10:49:38 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,123 @@ class MoveAndUpdateClosure; class RefProcTaskExecutor; +// The SplitInfo class holds the information needed to 'split' a source region +// so that the live data can be copied to two destination *spaces*. Normally, +// all the live data in a region is copied to a single destination space (e.g., +// everything live in a region in eden is copied entirely into the old gen). +// However, when the heap is nearly full, all the live data in eden may not fit +// into the old gen. Copying only some of the regions from eden to old gen +// requires finding a region that does not contain a partial object (i.e., no +// live object crosses the region boundary) somewhere near the last object that +// does fit into the old gen. Since it's not always possible to find such a +// region, splitting is necessary for predictable behavior. +// +// A region is always split at the end of the partial object. This avoids +// additional tests when calculating the new location of a pointer, which is a +// very hot code path. The partial object and everything to its left will be +// copied to another space (call it dest_space_1). The live data to the right +// of the partial object will be copied either within the space itself, or to a +// different destination space (distinct from dest_space_1). +// +// Split points are identified during the summary phase, when region +// destinations are computed: data about the split, including the +// partial_object_size, is recorded in a SplitInfo record and the +// partial_object_size field in the summary data is set to zero. The zeroing is +// possible (and necessary) since the partial object will move to a different +// destination space than anything to its right, thus the partial object should +// not affect the locations of any objects to its right. +// +// The recorded data is used during the compaction phase, but only rarely: when +// the partial object on the split region will be copied across a destination +// region boundary. This test is made once each time a region is filled, and is +// a simple address comparison, so the overhead is negligible (see +// PSParallelCompact::first_src_addr()). +// +// Notes: +// +// Only regions with partial objects are split; a region without a partial +// object does not need any extra bookkeeping. +// +// At most one region is split per space, so the amount of data required is +// constant. +// +// A region is split only when the destination space would overflow. Once that +// happens, the destination space is abandoned and no other data (even from +// other source spaces) is targeted to that destination space. Abandoning the +// destination space may leave a somewhat large unused area at the end, if a +// large object caused the overflow. +// +// Future work: +// +// More bookkeeping would be required to continue to use the destination space. +// The most general solution would allow data from regions in two different +// source spaces to be "joined" in a single destination region. At the very +// least, additional code would be required in next_src_region() to detect the +// join and skip to an out-of-order source region. If the join region was also +// the last destination region to which a split region was copied (the most +// likely case), then additional work would be needed to get fill_region() to +// stop iteration and switch to a new source region at the right point. Basic +// idea would be to use a fake value for the top of the source space. It is +// doable, if a bit tricky. +// +// A simpler (but less general) solution would fill the remainder of the +// destination region with a dummy object and continue filling the next +// destination region. + +class SplitInfo +{ +public: + // Return true if this split info is valid (i.e., if a split has been + // recorded). The very first region cannot have a partial object and thus is + // never split, so 0 is the 'invalid' value. + bool is_valid() const { return _src_region_idx > 0; } + + // Return true if this split holds data for the specified source region. + inline bool is_split(size_t source_region) const; + + // The index of the split region, the size of the partial object on that + // region and the destination of the partial object. + size_t src_region_idx() const { return _src_region_idx; } + size_t partial_obj_size() const { return _partial_obj_size; } + HeapWord* destination() const { return _destination; } + + // The destination count of the partial object referenced by this split + // (either 1 or 2). This must be added to the destination count of the + // remainder of the source region. + unsigned int destination_count() const { return _destination_count; } + + // If a word within the partial object will be written to the first word of a + // destination region, this is the address of the destination region; + // otherwise this is NULL. + HeapWord* dest_region_addr() const { return _dest_region_addr; } + + // If a word within the partial object will be written to the first word of a + // destination region, this is the address of that word within the partial + // object; otherwise this is NULL. + HeapWord* first_src_addr() const { return _first_src_addr; } + + // Record the data necessary to split the region src_region_idx. + void record(size_t src_region_idx, size_t partial_obj_size, + HeapWord* destination); + + void clear(); + + DEBUG_ONLY(void verify_clear();) + +private: + size_t _src_region_idx; + size_t _partial_obj_size; + HeapWord* _destination; + unsigned int _destination_count; + HeapWord* _dest_region_addr; + HeapWord* _first_src_addr; +}; + +inline bool SplitInfo::is_split(size_t region_idx) const +{ + return _src_region_idx == region_idx && is_valid(); +} + class SpaceInfo { public: @@ -61,105 +178,103 @@ // is no start array. ObjectStartArray* start_array() const { return _start_array; } + SplitInfo& split_info() { return _split_info; } + void set_space(MutableSpace* s) { _space = s; } void set_new_top(HeapWord* addr) { _new_top = addr; } void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; } void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; } void set_start_array(ObjectStartArray* s) { _start_array = s; } + void publish_new_top() const { _space->set_top(_new_top); } + private: MutableSpace* _space; HeapWord* _new_top; HeapWord* _min_dense_prefix; HeapWord* _dense_prefix; ObjectStartArray* _start_array; + SplitInfo _split_info; }; class ParallelCompactData { public: // Sizes are in HeapWords, unless indicated otherwise. - static const size_t Log2ChunkSize; - static const size_t ChunkSize; - static const size_t ChunkSizeBytes; - - // Mask for the bits in a size_t to get an offset within a chunk. - static const size_t ChunkSizeOffsetMask; - // Mask for the bits in a pointer to get an offset within a chunk. - static const size_t ChunkAddrOffsetMask; - // Mask for the bits in a pointer to get the address of the start of a chunk. - static const size_t ChunkAddrMask; - - static const size_t Log2BlockSize; - static const size_t BlockSize; - static const size_t BlockOffsetMask; - static const size_t BlockMask; - - static const size_t BlocksPerChunk; + static const size_t Log2RegionSize; + static const size_t RegionSize; + static const size_t RegionSizeBytes; + + // Mask for the bits in a size_t to get an offset within a region. + static const size_t RegionSizeOffsetMask; + // Mask for the bits in a pointer to get an offset within a region. + static const size_t RegionAddrOffsetMask; + // Mask for the bits in a pointer to get the address of the start of a region. + static const size_t RegionAddrMask; - class ChunkData + class RegionData { public: - // Destination address of the chunk. + // Destination address of the region. HeapWord* destination() const { return _destination; } - // The first chunk containing data destined for this chunk. - size_t source_chunk() const { return _source_chunk; } + // The first region containing data destined for this region. + size_t source_region() const { return _source_region; } - // The object (if any) starting in this chunk and ending in a different - // chunk that could not be updated during the main (parallel) compaction + // The object (if any) starting in this region and ending in a different + // region that could not be updated during the main (parallel) compaction // phase. This is different from _partial_obj_addr, which is an object that - // extends onto a source chunk. However, the two uses do not overlap in + // extends onto a source region. However, the two uses do not overlap in // time, so the same field is used to save space. HeapWord* deferred_obj_addr() const { return _partial_obj_addr; } - // The starting address of the partial object extending onto the chunk. + // The starting address of the partial object extending onto the region. HeapWord* partial_obj_addr() const { return _partial_obj_addr; } - // Size of the partial object extending onto the chunk (words). + // Size of the partial object extending onto the region (words). size_t partial_obj_size() const { return _partial_obj_size; } - // Size of live data that lies within this chunk due to objects that start - // in this chunk (words). This does not include the partial object - // extending onto the chunk (if any), or the part of an object that extends - // onto the next chunk (if any). + // Size of live data that lies within this region due to objects that start + // in this region (words). This does not include the partial object + // extending onto the region (if any), or the part of an object that extends + // onto the next region (if any). size_t live_obj_size() const { return _dc_and_los & los_mask; } - // Total live data that lies within the chunk (words). + // Total live data that lies within the region (words). size_t data_size() const { return partial_obj_size() + live_obj_size(); } - // The destination_count is the number of other chunks to which data from - // this chunk will be copied. At the end of the summary phase, the valid + // The destination_count is the number of other regions to which data from + // this region will be copied. At the end of the summary phase, the valid // values of destination_count are // - // 0 - data from the chunk will be compacted completely into itself, or the - // chunk is empty. The chunk can be claimed and then filled. - // 1 - data from the chunk will be compacted into 1 other chunk; some - // data from the chunk may also be compacted into the chunk itself. - // 2 - data from the chunk will be copied to 2 other chunks. + // 0 - data from the region will be compacted completely into itself, or the + // region is empty. The region can be claimed and then filled. + // 1 - data from the region will be compacted into 1 other region; some + // data from the region may also be compacted into the region itself. + // 2 - data from the region will be copied to 2 other regions. // - // During compaction as chunks are emptied, the destination_count is + // During compaction as regions are emptied, the destination_count is // decremented (atomically) and when it reaches 0, it can be claimed and // then filled. - // - // A chunk is claimed for processing by atomically changing the - // destination_count to the claimed value (dc_claimed). After a chunk has + // + // A region is claimed for processing by atomically changing the + // destination_count to the claimed value (dc_claimed). After a region has // been filled, the destination_count should be set to the completed value // (dc_completed). inline uint destination_count() const; inline uint destination_count_raw() const; - // The location of the java heap data that corresponds to this chunk. + // The location of the java heap data that corresponds to this region. inline HeapWord* data_location() const; - // The highest address referenced by objects in this chunk. + // The highest address referenced by objects in this region. inline HeapWord* highest_ref() const; - // Whether this chunk is available to be claimed, has been claimed, or has + // Whether this region is available to be claimed, has been claimed, or has // been completed. // - // Minor subtlety: claimed() returns true if the chunk is marked - // completed(), which is desirable since a chunk must be claimed before it + // Minor subtlety: claimed() returns true if the region is marked + // completed(), which is desirable since a region must be claimed before it // can be completed. bool available() const { return _dc_and_los < dc_one; } bool claimed() const { return _dc_and_los >= dc_claimed; } @@ -167,11 +282,11 @@ // These are not atomic. void set_destination(HeapWord* addr) { _destination = addr; } - void set_source_chunk(size_t chunk) { _source_chunk = chunk; } + void set_source_region(size_t region) { _source_region = region; } void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } void set_partial_obj_size(size_t words) { - _partial_obj_size = (chunk_sz_t) words; + _partial_obj_size = (region_sz_t) words; } inline void set_destination_count(uint count); @@ -187,178 +302,96 @@ inline bool claim(); private: - // The type used to represent object sizes within a chunk. - typedef uint chunk_sz_t; + // The type used to represent object sizes within a region. + typedef uint region_sz_t; // Constants for manipulating the _dc_and_los field, which holds both the // destination count and live obj size. The live obj size lives at the // least significant end so no masking is necessary when adding. - static const chunk_sz_t dc_shift; // Shift amount. - static const chunk_sz_t dc_mask; // Mask for destination count. - static const chunk_sz_t dc_one; // 1, shifted appropriately. - static const chunk_sz_t dc_claimed; // Chunk has been claimed. - static const chunk_sz_t dc_completed; // Chunk has been completed. - static const chunk_sz_t los_mask; // Mask for live obj size. - - HeapWord* _destination; - size_t _source_chunk; - HeapWord* _partial_obj_addr; - chunk_sz_t _partial_obj_size; - chunk_sz_t volatile _dc_and_los; + static const region_sz_t dc_shift; // Shift amount. + static const region_sz_t dc_mask; // Mask for destination count. + static const region_sz_t dc_one; // 1, shifted appropriately. + static const region_sz_t dc_claimed; // Region has been claimed. + static const region_sz_t dc_completed; // Region has been completed. + static const region_sz_t los_mask; // Mask for live obj size. + + HeapWord* _destination; + size_t _source_region; + HeapWord* _partial_obj_addr; + region_sz_t _partial_obj_size; + region_sz_t volatile _dc_and_los; #ifdef ASSERT // These enable optimizations that are only partially implemented. Use // debug builds to prevent the code fragments from breaking. - HeapWord* _data_location; - HeapWord* _highest_ref; -#endif // #ifdef ASSERT + HeapWord* _data_location; + HeapWord* _highest_ref; +#endif // #ifdef ASSERT #ifdef ASSERT public: - uint _pushed; // 0 until chunk is pushed onto a worker's stack + uint _pushed; // 0 until region is pushed onto a worker's stack private: #endif }; - // 'Blocks' allow shorter sections of the bitmap to be searched. Each Block - // holds an offset, which is the amount of live data in the Chunk to the left - // of the first live object in the Block. This amount of live data will - // include any object extending into the block. The first block in - // a chunk does not include any partial object extending into the - // the chunk. - // - // The offset also encodes the - // 'parity' of the first 1 bit in the Block: a positive offset means the - // first 1 bit marks the start of an object, a negative offset means the first - // 1 bit marks the end of an object. - class BlockData - { - public: - typedef short int blk_ofs_t; - - blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; } - blk_ofs_t raw_offset() const { return _offset; } - void set_first_is_start_bit(bool v) { _first_is_start_bit = v; } - -#if 0 - // The need for this method was anticipated but it is - // never actually used. Do not include it for now. If - // it is needed, consider the problem of what is passed - // as "v". To avoid warning errors the method set_start_bit_offset() - // was changed to take a size_t as the parameter and to do the - // check for the possible overflow. Doing the cast in these - // methods better limits the potential problems because of - // the size of the field to this class. - void set_raw_offset(blk_ofs_t v) { _offset = v; } -#endif - void set_start_bit_offset(size_t val) { - assert(val >= 0, "sanity"); - _offset = (blk_ofs_t) val; - assert(val == (size_t) _offset, "Value is too large"); - _first_is_start_bit = true; - } - void set_end_bit_offset(size_t val) { - assert(val >= 0, "sanity"); - _offset = (blk_ofs_t) val; - assert(val == (size_t) _offset, "Value is too large"); - _offset = - _offset; - _first_is_start_bit = false; - } - bool first_is_start_bit() { - assert(_set_phase > 0, "Not initialized"); - return _first_is_start_bit; - } - bool first_is_end_bit() { - assert(_set_phase > 0, "Not initialized"); - return !_first_is_start_bit; - } - - private: - blk_ofs_t _offset; - // This is temporary until the mark_bitmap is separated into - // a start bit array and an end bit array. - bool _first_is_start_bit; -#ifdef ASSERT - short _set_phase; - static short _cur_phase; - public: - static void set_cur_phase(short v) { _cur_phase = v; } -#endif - }; - public: ParallelCompactData(); bool initialize(MemRegion covered_region); - size_t chunk_count() const { return _chunk_count; } - - // Convert chunk indices to/from ChunkData pointers. - inline ChunkData* chunk(size_t chunk_idx) const; - inline size_t chunk(const ChunkData* const chunk_ptr) const; - - // Returns true if the given address is contained within the chunk - bool chunk_contains(size_t chunk_index, HeapWord* addr); + size_t region_count() const { return _region_count; } - size_t block_count() const { return _block_count; } - inline BlockData* block(size_t n) const; + // Convert region indices to/from RegionData pointers. + inline RegionData* region(size_t region_idx) const; + inline size_t region(const RegionData* const region_ptr) const; - // Returns true if the given block is in the given chunk. - static bool chunk_contains_block(size_t chunk_index, size_t block_index); + // Returns true if the given address is contained within the region + bool region_contains(size_t region_index, HeapWord* addr); void add_obj(HeapWord* addr, size_t len); void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } - // Fill in the chunks covering [beg, end) so that no data moves; i.e., the - // destination of chunk n is simply the start of chunk n. The argument beg - // must be chunk-aligned; end need not be. + // Fill in the regions covering [beg, end) so that no data moves; i.e., the + // destination of region n is simply the start of region n. The argument beg + // must be region-aligned; end need not be. void summarize_dense_prefix(HeapWord* beg, HeapWord* end); - - bool summarize(HeapWord* target_beg, HeapWord* target_end, - HeapWord* source_beg, HeapWord* source_end, - HeapWord** target_next, HeapWord** source_next = 0); + + HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info, + HeapWord* destination, HeapWord* target_end, + HeapWord** target_next); + bool summarize(SplitInfo& split_info, + HeapWord* source_beg, HeapWord* source_end, + HeapWord** source_next, + HeapWord* target_beg, HeapWord* target_end, + HeapWord** target_next); void clear(); - void clear_range(size_t beg_chunk, size_t end_chunk); + void clear_range(size_t beg_region, size_t end_region); void clear_range(HeapWord* beg, HeapWord* end) { - clear_range(addr_to_chunk_idx(beg), addr_to_chunk_idx(end)); + clear_range(addr_to_region_idx(beg), addr_to_region_idx(end)); } - // Return the number of words between addr and the start of the chunk + // Return the number of words between addr and the start of the region // containing addr. - inline size_t chunk_offset(const HeapWord* addr) const; + inline size_t region_offset(const HeapWord* addr) const; - // Convert addresses to/from a chunk index or chunk pointer. - inline size_t addr_to_chunk_idx(const HeapWord* addr) const; - inline ChunkData* addr_to_chunk_ptr(const HeapWord* addr) const; - inline HeapWord* chunk_to_addr(size_t chunk) const; - inline HeapWord* chunk_to_addr(size_t chunk, size_t offset) const; - inline HeapWord* chunk_to_addr(const ChunkData* chunk) const; - - inline HeapWord* chunk_align_down(HeapWord* addr) const; - inline HeapWord* chunk_align_up(HeapWord* addr) const; - inline bool is_chunk_aligned(HeapWord* addr) const; - - // Analogous to chunk_offset() for blocks. - size_t block_offset(const HeapWord* addr) const; - size_t addr_to_block_idx(const HeapWord* addr) const; - size_t addr_to_block_idx(const oop obj) const { - return addr_to_block_idx((HeapWord*) obj); - } - inline BlockData* addr_to_block_ptr(const HeapWord* addr) const; - inline HeapWord* block_to_addr(size_t block) const; + // Convert addresses to/from a region index or region pointer. + inline size_t addr_to_region_idx(const HeapWord* addr) const; + inline RegionData* addr_to_region_ptr(const HeapWord* addr) const; + inline HeapWord* region_to_addr(size_t region) const; + inline HeapWord* region_to_addr(size_t region, size_t offset) const; + inline HeapWord* region_to_addr(const RegionData* region) const; + + inline HeapWord* region_align_down(HeapWord* addr) const; + inline HeapWord* region_align_up(HeapWord* addr) const; + inline bool is_region_aligned(HeapWord* addr) const; // Return the address one past the end of the partial object. - HeapWord* partial_obj_end(size_t chunk_idx) const; + HeapWord* partial_obj_end(size_t region_idx) const; // Return the new location of the object p after the // the compaction. HeapWord* calc_new_pointer(HeapWord* addr); - // Same as calc_new_pointer() using blocks. - HeapWord* block_calc_new_pointer(HeapWord* addr); - - // Same as calc_new_pointer() using chunks. - HeapWord* chunk_calc_new_pointer(HeapWord* addr); - HeapWord* calc_new_pointer(oop p) { return calc_new_pointer((HeapWord*) p); } @@ -366,22 +399,13 @@ // Return the updated address for the given klass klassOop calc_new_klass(klassOop); - // Given a block returns true if the partial object for the - // corresponding chunk ends in the block. Returns false, otherwise - // If there is no partial object, returns false. - bool partial_obj_ends_in_block(size_t block_index); - - // Returns the block index for the block - static size_t block_idx(BlockData* block); - -#ifdef ASSERT +#ifdef ASSERT void verify_clear(const PSVirtualSpace* vspace); void verify_clear(); #endif // #ifdef ASSERT private: - bool initialize_block_data(size_t region_size); - bool initialize_chunk_data(size_t region_size); + bool initialize_region_data(size_t region_size); PSVirtualSpace* create_vspace(size_t count, size_t element_size); private: @@ -390,74 +414,70 @@ HeapWord* _region_end; #endif // #ifdef ASSERT - PSVirtualSpace* _chunk_vspace; - ChunkData* _chunk_data; - size_t _chunk_count; - - PSVirtualSpace* _block_vspace; - BlockData* _block_data; - size_t _block_count; + PSVirtualSpace* _region_vspace; + RegionData* _region_data; + size_t _region_count; }; inline uint -ParallelCompactData::ChunkData::destination_count_raw() const +ParallelCompactData::RegionData::destination_count_raw() const { return _dc_and_los & dc_mask; } inline uint -ParallelCompactData::ChunkData::destination_count() const +ParallelCompactData::RegionData::destination_count() const { return destination_count_raw() >> dc_shift; } inline void -ParallelCompactData::ChunkData::set_destination_count(uint count) +ParallelCompactData::RegionData::set_destination_count(uint count) { assert(count <= (dc_completed >> dc_shift), "count too large"); - const chunk_sz_t live_sz = (chunk_sz_t) live_obj_size(); + const region_sz_t live_sz = (region_sz_t) live_obj_size(); _dc_and_los = (count << dc_shift) | live_sz; } -inline void ParallelCompactData::ChunkData::set_live_obj_size(size_t words) +inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words) { assert(words <= los_mask, "would overflow"); - _dc_and_los = destination_count_raw() | (chunk_sz_t)words; + _dc_and_los = destination_count_raw() | (region_sz_t)words; } -inline void ParallelCompactData::ChunkData::decrement_destination_count() +inline void ParallelCompactData::RegionData::decrement_destination_count() { assert(_dc_and_los < dc_claimed, "already claimed"); assert(_dc_and_los >= dc_one, "count would go negative"); Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los); } -inline HeapWord* ParallelCompactData::ChunkData::data_location() const +inline HeapWord* ParallelCompactData::RegionData::data_location() const { DEBUG_ONLY(return _data_location;) NOT_DEBUG(return NULL;) } -inline HeapWord* ParallelCompactData::ChunkData::highest_ref() const +inline HeapWord* ParallelCompactData::RegionData::highest_ref() const { DEBUG_ONLY(return _highest_ref;) NOT_DEBUG(return NULL;) } -inline void ParallelCompactData::ChunkData::set_data_location(HeapWord* addr) +inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr) { DEBUG_ONLY(_data_location = addr;) } -inline void ParallelCompactData::ChunkData::set_completed() +inline void ParallelCompactData::RegionData::set_completed() { assert(claimed(), "must be claimed first"); - _dc_and_los = dc_completed | (chunk_sz_t) live_obj_size(); + _dc_and_los = dc_completed | (region_sz_t) live_obj_size(); } -// MT-unsafe claiming of a chunk. Should only be used during single threaded +// MT-unsafe claiming of a region. Should only be used during single threaded // execution. -inline bool ParallelCompactData::ChunkData::claim_unsafe() +inline bool ParallelCompactData::RegionData::claim_unsafe() { if (available()) { _dc_and_los |= dc_claimed; @@ -466,13 +486,13 @@ return false; } -inline void ParallelCompactData::ChunkData::add_live_obj(size_t words) +inline void ParallelCompactData::RegionData::add_live_obj(size_t words) { assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); Atomic::add((int) words, (volatile int*) &_dc_and_los); } -inline void ParallelCompactData::ChunkData::set_highest_ref(HeapWord* addr) +inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) { #ifdef ASSERT HeapWord* tmp = _highest_ref; @@ -482,7 +502,7 @@ #endif // #ifdef ASSERT } -inline bool ParallelCompactData::ChunkData::claim() +inline bool ParallelCompactData::RegionData::claim() { const int los = (int) live_obj_size(); const int old = Atomic::cmpxchg(dc_claimed | los, @@ -490,119 +510,85 @@ return old == los; } -inline ParallelCompactData::ChunkData* -ParallelCompactData::chunk(size_t chunk_idx) const +inline ParallelCompactData::RegionData* +ParallelCompactData::region(size_t region_idx) const { - assert(chunk_idx <= chunk_count(), "bad arg"); - return _chunk_data + chunk_idx; + assert(region_idx <= region_count(), "bad arg"); + return _region_data + region_idx; } inline size_t -ParallelCompactData::chunk(const ChunkData* const chunk_ptr) const +ParallelCompactData::region(const RegionData* const region_ptr) const { - assert(chunk_ptr >= _chunk_data, "bad arg"); - assert(chunk_ptr <= _chunk_data + chunk_count(), "bad arg"); - return pointer_delta(chunk_ptr, _chunk_data, sizeof(ChunkData)); -} - -inline ParallelCompactData::BlockData* -ParallelCompactData::block(size_t n) const { - assert(n < block_count(), "bad arg"); - return _block_data + n; + assert(region_ptr >= _region_data, "bad arg"); + assert(region_ptr <= _region_data + region_count(), "bad arg"); + return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); } inline size_t -ParallelCompactData::chunk_offset(const HeapWord* addr) const +ParallelCompactData::region_offset(const HeapWord* addr) const { assert(addr >= _region_start, "bad addr"); assert(addr <= _region_end, "bad addr"); - return (size_t(addr) & ChunkAddrOffsetMask) >> LogHeapWordSize; + return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize; } inline size_t -ParallelCompactData::addr_to_chunk_idx(const HeapWord* addr) const +ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const { assert(addr >= _region_start, "bad addr"); assert(addr <= _region_end, "bad addr"); - return pointer_delta(addr, _region_start) >> Log2ChunkSize; + return pointer_delta(addr, _region_start) >> Log2RegionSize; } -inline ParallelCompactData::ChunkData* -ParallelCompactData::addr_to_chunk_ptr(const HeapWord* addr) const +inline ParallelCompactData::RegionData* +ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const { - return chunk(addr_to_chunk_idx(addr)); + return region(addr_to_region_idx(addr)); } inline HeapWord* -ParallelCompactData::chunk_to_addr(size_t chunk) const +ParallelCompactData::region_to_addr(size_t region) const { - assert(chunk <= _chunk_count, "chunk out of range"); - return _region_start + (chunk << Log2ChunkSize); + assert(region <= _region_count, "region out of range"); + return _region_start + (region << Log2RegionSize); } inline HeapWord* -ParallelCompactData::chunk_to_addr(const ChunkData* chunk) const +ParallelCompactData::region_to_addr(const RegionData* region) const { - return chunk_to_addr(pointer_delta(chunk, _chunk_data, sizeof(ChunkData))); + return region_to_addr(pointer_delta(region, _region_data, + sizeof(RegionData))); } inline HeapWord* -ParallelCompactData::chunk_to_addr(size_t chunk, size_t offset) const +ParallelCompactData::region_to_addr(size_t region, size_t offset) const { - assert(chunk <= _chunk_count, "chunk out of range"); - assert(offset < ChunkSize, "offset too big"); // This may be too strict. - return chunk_to_addr(chunk) + offset; + assert(region <= _region_count, "region out of range"); + assert(offset < RegionSize, "offset too big"); // This may be too strict. + return region_to_addr(region) + offset; } inline HeapWord* -ParallelCompactData::chunk_align_down(HeapWord* addr) const +ParallelCompactData::region_align_down(HeapWord* addr) const { assert(addr >= _region_start, "bad addr"); - assert(addr < _region_end + ChunkSize, "bad addr"); - return (HeapWord*)(size_t(addr) & ChunkAddrMask); + assert(addr < _region_end + RegionSize, "bad addr"); + return (HeapWord*)(size_t(addr) & RegionAddrMask); } inline HeapWord* -ParallelCompactData::chunk_align_up(HeapWord* addr) const +ParallelCompactData::region_align_up(HeapWord* addr) const { assert(addr >= _region_start, "bad addr"); assert(addr <= _region_end, "bad addr"); - return chunk_align_down(addr + ChunkSizeOffsetMask); + return region_align_down(addr + RegionSizeOffsetMask); } inline bool -ParallelCompactData::is_chunk_aligned(HeapWord* addr) const +ParallelCompactData::is_region_aligned(HeapWord* addr) const { - return chunk_offset(addr) == 0; -} - -inline size_t -ParallelCompactData::block_offset(const HeapWord* addr) const -{ - assert(addr >= _region_start, "bad addr"); - assert(addr <= _region_end, "bad addr"); - return pointer_delta(addr, _region_start) & BlockOffsetMask; -} - -inline size_t -ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const -{ - assert(addr >= _region_start, "bad addr"); - assert(addr <= _region_end, "bad addr"); - return pointer_delta(addr, _region_start) >> Log2BlockSize; -} - -inline ParallelCompactData::BlockData* -ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const -{ - return block(addr_to_block_idx(addr)); -} - -inline HeapWord* -ParallelCompactData::block_to_addr(size_t block) const -{ - assert(block < _block_count, "block out of range"); - return _region_start + (block << Log2BlockSize); + return region_offset(addr) == 0; } // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the @@ -690,41 +676,98 @@ _words_remaining -= words; } -// Closure for updating the block data during the summary phase. -class BitBlockUpdateClosure: public ParMarkBitMapClosure { - // ParallelCompactData::BlockData::blk_ofs_t _live_data_left; - size_t _live_data_left; - size_t _cur_block; - HeapWord* _chunk_start; - HeapWord* _chunk_end; - size_t _chunk_index; - - public: - BitBlockUpdateClosure(ParMarkBitMap* mbm, - ParCompactionManager* cm, - size_t chunk_index); - - size_t cur_block() { return _cur_block; } - size_t chunk_index() { return _chunk_index; } - size_t live_data_left() { return _live_data_left; } - // Returns true the first bit in the current block (cur_block) is - // a start bit. - // Returns true if the current block is within the chunk for the closure; - bool chunk_contains_cur_block(); - - // Set the chunk index and related chunk values for - // a new chunk. - void reset_chunk(size_t chunk_index); - - virtual IterationStatus do_addr(HeapWord* addr, size_t words); -}; +// The UseParallelOldGC collector is a stop-the-world garbage collector that +// does parts of the collection using parallel threads. The collection includes +// the tenured generation and the young generation. The permanent generation is +// collected at the same time as the other two generations but the permanent +// generation is collect by a single GC thread. The permanent generation is +// collected serially because of the requirement that during the processing of a +// klass AAA, any objects reference by AAA must already have been processed. +// This requirement is enforced by a left (lower address) to right (higher +// address) sliding compaction. +// +// There are four phases of the collection. +// +// - marking phase +// - summary phase +// - compacting phase +// - clean up phase +// +// Roughly speaking these phases correspond, respectively, to +// - mark all the live objects +// - calculate the destination of each object at the end of the collection +// - move the objects to their destination +// - update some references and reinitialize some variables +// +// These three phases are invoked in PSParallelCompact::invoke_no_policy(). The +// marking phase is implemented in PSParallelCompact::marking_phase() and does a +// complete marking of the heap. The summary phase is implemented in +// PSParallelCompact::summary_phase(). The move and update phase is implemented +// in PSParallelCompact::compact(). +// +// A space that is being collected is divided into regions and with each region +// is associated an object of type ParallelCompactData. Each region is of a +// fixed size and typically will contain more than 1 object and may have parts +// of objects at the front and back of the region. +// +// region -----+---------------------+---------- +// objects covered [ AAA )[ BBB )[ CCC )[ DDD ) +// +// The marking phase does a complete marking of all live objects in the heap. +// The marking also compiles the size of the data for all live objects covered +// by the region. This size includes the part of any live object spanning onto +// the region (part of AAA if it is live) from the front, all live objects +// contained in the region (BBB and/or CCC if they are live), and the part of +// any live objects covered by the region that extends off the region (part of +// DDD if it is live). The marking phase uses multiple GC threads and marking +// is done in a bit array of type ParMarkBitMap. The marking of the bit map is +// done atomically as is the accumulation of the size of the live objects +// covered by a region. +// +// The summary phase calculates the total live data to the left of each region +// XXX. Based on that total and the bottom of the space, it can calculate the +// starting location of the live data in XXX. The summary phase calculates for +// each region XXX quantites such as +// +// - the amount of live data at the beginning of a region from an object +// entering the region. +// - the location of the first live data on the region +// - a count of the number of regions receiving live data from XXX. +// +// See ParallelCompactData for precise details. The summary phase also +// calculates the dense prefix for the compaction. The dense prefix is a +// portion at the beginning of the space that is not moved. The objects in the +// dense prefix do need to have their object references updated. See method +// summarize_dense_prefix(). +// +// The summary phase is done using 1 GC thread. +// +// The compaction phase moves objects to their new location and updates all +// references in the object. +// +// A current exception is that objects that cross a region boundary are moved +// but do not have their references updated. References are not updated because +// it cannot easily be determined if the klass pointer KKK for the object AAA +// has been updated. KKK likely resides in a region to the left of the region +// containing AAA. These AAA's have there references updated at the end in a +// clean up phase. See the method PSParallelCompact::update_deferred_objects(). +// An alternate strategy is being investigated for this deferral of updating. +// +// Compaction is done on a region basis. A region that is ready to be filled is +// put on a ready list and GC threads take region off the list and fill them. A +// region is ready to be filled if it empty of live objects. Such a region may +// have been initially empty (only contained dead objects) or may have had all +// its live objects copied out already. A region that compacts into itself is +// also ready for filling. The ready list is initially filled with empty +// regions and regions compacting into themselves. There is always at least 1 +// region that can be put on the ready list. The regions are atomically added +// and removed from the ready list. class PSParallelCompact : AllStatic { public: // Convenient access to type names. typedef ParMarkBitMap::idx_t idx_t; - typedef ParallelCompactData::ChunkData ChunkData; - typedef ParallelCompactData::BlockData BlockData; + typedef ParallelCompactData::RegionData RegionData; typedef enum { perm_space_id, old_space_id, eden_space_id, @@ -732,48 +775,51 @@ } SpaceId; public: - // In line closure decls + // Inline closure decls // - class IsAliveClosure: public BoolObjectClosure { public: - void do_object(oop p) { assert(false, "don't call"); } - bool do_object_b(oop p) { return mark_bitmap()->is_marked(p); } + virtual void do_object(oop p); + virtual bool do_object_b(oop p); }; class KeepAliveClosure: public OopClosure { + private: ParCompactionManager* _compaction_manager; + protected: + template inline void do_oop_work(T* p); public: - KeepAliveClosure(ParCompactionManager* cm) { - _compaction_manager = cm; - } - void do_oop(oop* p); + KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; - class FollowRootClosure: public OopsInGenClosure{ + // Current unused + class FollowRootClosure: public OopsInGenClosure { + private: ParCompactionManager* _compaction_manager; public: - FollowRootClosure(ParCompactionManager* cm) { - _compaction_manager = cm; - } - void do_oop(oop* p) { follow_root(_compaction_manager, p); } + FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); virtual const bool do_nmethods() const { return true; } }; class FollowStackClosure: public VoidClosure { + private: ParCompactionManager* _compaction_manager; public: - FollowStackClosure(ParCompactionManager* cm) { - _compaction_manager = cm; - } - void do_void() { follow_stack(_compaction_manager); } + FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + virtual void do_void(); }; class AdjustPointerClosure: public OopsInGenClosure { + private: bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) {} - void do_oop(oop* p) { adjust_pointer(p, _is_root); } + AdjustPointerClosure(bool is_root) : _is_root(is_root) { } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; // Closure for verifying update of pointers. Does not @@ -808,8 +854,6 @@ friend class instanceKlassKlass; friend class RefProcTaskProxy; - static void mark_and_push_internal(ParCompactionManager* cm, oop* p); - private: static elapsedTimer _accumulated_time; static unsigned int _total_invocations; @@ -841,9 +885,9 @@ private: // Closure accessors - static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } + static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; } - static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } + static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } static void initialize_space_info(); @@ -862,10 +906,11 @@ static void follow_stack(ParCompactionManager* cm); static void follow_weak_klass_links(ParCompactionManager* cm); - static void adjust_pointer(oop* p, bool is_root); + template static inline void adjust_pointer(T* p, bool is_root); static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - static void follow_root(ParCompactionManager* cm, oop* p); + template + static inline void follow_root(ParCompactionManager* cm, T* p); // Compute the dense prefix for the designated space. This is an experimental // implementation currently not used in production. @@ -885,56 +930,68 @@ // not reclaimed). static double dead_wood_limiter(double density, size_t min_percent); - // Find the first (left-most) chunk in the range [beg, end) that has at least + // Find the first (left-most) region in the range [beg, end) that has at least // dead_words of dead space to the left. The argument beg must be the first - // chunk in the space that is not completely live. - static ChunkData* dead_wood_limit_chunk(const ChunkData* beg, - const ChunkData* end, - size_t dead_words); + // region in the space that is not completely live. + static RegionData* dead_wood_limit_region(const RegionData* beg, + const RegionData* end, + size_t dead_words); - // Return a pointer to the first chunk in the range [beg, end) that is not + // Return a pointer to the first region in the range [beg, end) that is not // completely full. - static ChunkData* first_dead_space_chunk(const ChunkData* beg, - const ChunkData* end); + static RegionData* first_dead_space_region(const RegionData* beg, + const RegionData* end); // Return a value indicating the benefit or 'yield' if the compacted region // were to start (or equivalently if the dense prefix were to end) at the - // candidate chunk. Higher values are better. - // + // candidate region. Higher values are better. + // // The value is based on the amount of space reclaimed vs. the costs of (a) // updating references in the dense prefix plus (b) copying objects and // updating references in the compacted region. - static inline double reclaimed_ratio(const ChunkData* const candidate, - HeapWord* const bottom, - HeapWord* const top, - HeapWord* const new_top); + static inline double reclaimed_ratio(const RegionData* const candidate, + HeapWord* const bottom, + HeapWord* const top, + HeapWord* const new_top); // Compute the dense prefix for the designated space. static HeapWord* compute_dense_prefix(const SpaceId id, bool maximum_compaction); - // Return true if dead space crosses onto the specified Chunk; bit must be the - // bit index corresponding to the first word of the Chunk. - static inline bool dead_space_crosses_boundary(const ChunkData* chunk, - idx_t bit); + // Return true if dead space crosses onto the specified Region; bit must be + // the bit index corresponding to the first word of the Region. + static inline bool dead_space_crosses_boundary(const RegionData* region, + idx_t bit); // Summary phase utility routine to fill dead space (if any) at the dense // prefix boundary. Should only be called if the the dense prefix is // non-empty. static void fill_dense_prefix_end(SpaceId id); - static void summarize_spaces_quick(); - static void summarize_space(SpaceId id, bool maximum_compaction); - static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); + // Clear the summary data source_region field for the specified addresses. + static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr); - static bool block_first_offset(size_t block_index, idx_t* block_offset_ptr); +#ifndef PRODUCT + // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot). - // Fill in the BlockData - static void summarize_blocks(ParCompactionManager* cm, - SpaceId first_compaction_space_id); + // Fill the region [start, start + words) with live object(s). Only usable + // for the old and permanent generations. + static void fill_with_live_objects(SpaceId id, HeapWord* const start, + size_t words); + // Include the new objects in the summary data. + static void summarize_new_objects(SpaceId id, HeapWord* start); + + // Add live objects to a survivor space since it's rare that both survivors + // are non-empty. + static void provoke_split_fill_survivor(SpaceId id); - // The space that is compacted after space_id. - static SpaceId next_compaction_space_id(SpaceId space_id); + // Add live objects and/or choose the dense prefix to provoke splitting. + static void provoke_split(bool & maximum_compaction); +#endif + + static void summarize_spaces_quick(); + static void summarize_space(SpaceId id, bool maximum_compaction); + static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); // Adjust addresses in roots. Does not adjust addresses in heap. static void adjust_roots(); @@ -946,19 +1003,19 @@ static void compact_perm(ParCompactionManager* cm); static void compact(); - // Add available chunks to the stack and draining tasks to the task queue. - static void enqueue_chunk_draining_tasks(GCTaskQueue* q, - uint parallel_gc_threads); + // Add available regions to the stack and draining tasks to the task queue. + static void enqueue_region_draining_tasks(GCTaskQueue* q, + uint parallel_gc_threads); // Add dense prefix update tasks to the task queue. static void enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads); - // Add chunk stealing tasks to the task queue. - static void enqueue_chunk_stealing_tasks( - GCTaskQueue* q, - ParallelTaskTerminator* terminator_ptr, - uint parallel_gc_threads); + // Add region stealing tasks to the task queue. + static void enqueue_region_stealing_tasks( + GCTaskQueue* q, + ParallelTaskTerminator* terminator_ptr, + uint parallel_gc_threads); // For debugging only - compacts the old gen serially static void compact_serial(ParCompactionManager* cm); @@ -974,14 +1031,14 @@ protected: #ifdef VALIDATE_MARK_SWEEP - static GrowableArray* _root_refs_stack; + static GrowableArray* _root_refs_stack; static GrowableArray * _live_oops; static GrowableArray * _live_oops_moved_to; static GrowableArray* _live_oops_size; static size_t _live_oops_index; static size_t _live_oops_index_at_perm; - static GrowableArray* _other_refs_stack; - static GrowableArray* _adjusted_pointers; + static GrowableArray* _other_refs_stack; + static GrowableArray* _adjusted_pointers; static bool _pointer_tracking; static bool _root_tracking; @@ -1002,12 +1059,12 @@ public: class MarkAndPushClosure: public OopClosure { + private: ParCompactionManager* _compaction_manager; public: - MarkAndPushClosure(ParCompactionManager* cm) { - _compaction_manager = cm; - } - void do_oop(oop* p) { mark_and_push(_compaction_manager, p); } + MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); virtual const bool do_nmethods() const { return true; } }; @@ -1041,21 +1098,9 @@ // Marking support static inline bool mark_obj(oop obj); - static bool mark_obj(oop* p) { - if (*p != NULL) { - return mark_obj(*p); - } else { - return false; - } - } - static void mark_and_push(ParCompactionManager* cm, oop* p) { - // Check mark and maybe push on - // marking stack - oop m = *p; - if (m != NULL && mark_bitmap()->is_unmarked(m)) { - mark_and_push_internal(cm, p); - } - } + // Check mark and maybe push on marking stack + template static inline void mark_and_push(ParCompactionManager* cm, + T* p); // Compaction support. // Return true if p is in the range [beg_addr, end_addr). @@ -1074,71 +1119,78 @@ // Move and update the live objects in the specified space. static void move_and_update(ParCompactionManager* cm, SpaceId space_id); - // Process the end of the given chunk range in the dense prefix. + // Process the end of the given region range in the dense prefix. // This includes saving any object not updated. - static void dense_prefix_chunks_epilogue(ParCompactionManager* cm, - size_t chunk_start_index, - size_t chunk_end_index, - idx_t exiting_object_offset, - idx_t chunk_offset_start, - idx_t chunk_offset_end); + static void dense_prefix_regions_epilogue(ParCompactionManager* cm, + size_t region_start_index, + size_t region_end_index, + idx_t exiting_object_offset, + idx_t region_offset_start, + idx_t region_offset_end); - // Update a chunk in the dense prefix. For each live object - // in the chunk, update it's interior references. For each + // Update a region in the dense prefix. For each live object + // in the region, update it's interior references. For each // dead object, fill it with deadwood. Dead space at the end - // of a chunk range will be filled to the start of the next - // live object regardless of the chunk_index_end. None of the + // of a region range will be filled to the start of the next + // live object regardless of the region_index_end. None of the // objects in the dense prefix move and dead space is dead // (holds only dead objects that don't need any processing), so // dead space can be filled in any order. static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, - SpaceId space_id, - size_t chunk_index_start, - size_t chunk_index_end); + SpaceId space_id, + size_t region_index_start, + size_t region_index_end); // Return the address of the count + 1st live word in the range [beg, end). static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count); // Return the address of the word to be copied to dest_addr, which must be - // aligned to a chunk boundary. + // aligned to a region boundary. static HeapWord* first_src_addr(HeapWord* const dest_addr, - size_t src_chunk_idx); + SpaceId src_space_id, + size_t src_region_idx); - // Determine the next source chunk, set closure.source() to the start of the - // new chunk return the chunk index. Parameter end_addr is the address one + // Determine the next source region, set closure.source() to the start of the + // new region return the region index. Parameter end_addr is the address one // beyond the end of source range just processed. If necessary, switch to a // new source space and set src_space_id (in-out parameter) and src_space_top // (out parameter) accordingly. - static size_t next_src_chunk(MoveAndUpdateClosure& closure, - SpaceId& src_space_id, - HeapWord*& src_space_top, - HeapWord* end_addr); - - // Decrement the destination count for each non-empty source chunk in the - // range [beg_chunk, chunk(chunk_align_up(end_addr))). + static size_t next_src_region(MoveAndUpdateClosure& closure, + SpaceId& src_space_id, + HeapWord*& src_space_top, + HeapWord* end_addr); + + // Decrement the destination count for each non-empty source region in the + // range [beg_region, region(region_align_up(end_addr))). If the destination + // count for a region goes to 0 and it needs to be filled, enqueue it. static void decrement_destination_counts(ParCompactionManager* cm, - size_t beg_chunk, - HeapWord* end_addr); - - // Fill a chunk, copying objects from one or more source chunks. - static void fill_chunk(ParCompactionManager* cm, size_t chunk_idx); - static void fill_and_update_chunk(ParCompactionManager* cm, size_t chunk) { - fill_chunk(cm, chunk); + SpaceId src_space_id, + size_t beg_region, + HeapWord* end_addr); + + // Fill a region, copying objects from one or more source regions. + static void fill_region(ParCompactionManager* cm, size_t region_idx); + static void fill_and_update_region(ParCompactionManager* cm, size_t region) { + fill_region(cm, region); } // Update the deferred objects in the space. static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); // Mark pointer and follow contents. - static void mark_and_follow(ParCompactionManager* cm, oop* p); + template + static inline void mark_and_follow(ParCompactionManager* cm, T* p); static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } static ParallelCompactData& summary_data() { return _summary_data; } - static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static inline void adjust_pointer(oop* p, - HeapWord* beg_addr, - HeapWord* end_addr); + static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } + static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } + + template + static inline void adjust_pointer(T* p, + HeapWord* beg_addr, + HeapWord* end_addr); // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } @@ -1150,8 +1202,8 @@ static jlong millis_since_last_gc(); #ifdef VALIDATE_MARK_SWEEP - static void track_adjusted_pointer(oop* p, oop newobj, bool isroot); - static void check_adjust_pointer(oop* p); // Adjust this pointer + static void track_adjusted_pointer(void* p, bool isroot); + static void check_adjust_pointer(void* p); static void track_interior_pointers(oop obj); static void check_interior_pointers(); @@ -1175,20 +1227,26 @@ #ifndef PRODUCT // Debugging support. static const char* space_names[last_space_id]; - static void print_chunk_ranges(); + static void print_region_ranges(); static void print_dense_prefix_stats(const char* const algorithm, - const SpaceId id, - const bool maximum_compaction, - HeapWord* const addr); -#endif // #ifndef PRODUCT - -#ifdef ASSERT - // Verify that all the chunks have been emptied. + const SpaceId id, + const bool maximum_compaction, + HeapWord* const addr); + static void summary_phase_msg(SpaceId dst_space_id, + HeapWord* dst_beg, HeapWord* dst_end, + SpaceId src_space_id, + HeapWord* src_beg, HeapWord* src_end); +#endif // #ifndef PRODUCT + +#ifdef ASSERT + // Sanity check the new location of a word in the heap. + static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr); + // Verify that all the regions have been emptied. static void verify_complete(SpaceId space_id); #endif // #ifdef ASSERT }; -bool PSParallelCompact::mark_obj(oop obj) { +inline bool PSParallelCompact::mark_obj(oop obj) { const int obj_size = obj->size(); if (mark_bitmap()->mark_obj(obj, obj_size)) { _summary_data.add_obj(obj, obj_size); @@ -1198,30 +1256,111 @@ } } -inline bool PSParallelCompact::print_phases() -{ +template +inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) { + assert(!Universe::heap()->is_in_reserved(p), + "roots shouldn't be things within the heap"); +#ifdef VALIDATE_MARK_SWEEP + if (ValidateMarkSweep) { + guarantee(!_root_refs_stack->contains(p), "should only be in here once"); + _root_refs_stack->push(p); + } +#endif + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (mark_bitmap()->is_unmarked(obj)) { + if (mark_obj(obj)) { + obj->follow_contents(cm); + } + } + } + follow_stack(cm); +} + +template +inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm, + T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (mark_bitmap()->is_unmarked(obj)) { + if (mark_obj(obj)) { + obj->follow_contents(cm); + } + } + } +} + +template +inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (mark_bitmap()->is_unmarked(obj)) { + if (mark_obj(obj)) { + // This thread marked the object and owns the subsequent processing of it. + cm->save_for_scanning(obj); + } + } + } +} + +template +inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop new_obj = (oop)summary_data().calc_new_pointer(obj); + assert(new_obj != NULL || // is forwarding ptr? + obj->is_shared(), // never forwarded? + "should be forwarded"); + // Just always do the update unconditionally? + if (new_obj != NULL) { + assert(Universe::heap()->is_in_reserved(new_obj), + "should be in object space"); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); + } + } + VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot)); +} + +template +inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) { +#ifdef VALIDATE_MARK_SWEEP + if (ValidateMarkSweep) { + if (!Universe::heap()->is_in_reserved(p)) { + _root_refs_stack->push(p); + } else { + _other_refs_stack->push(p); + } + } +#endif + mark_and_push(_compaction_manager, p); +} + +inline bool PSParallelCompact::print_phases() { return _print_phases; } -inline double PSParallelCompact::normal_distribution(double density) -{ +inline double PSParallelCompact::normal_distribution(double density) { assert(_dwl_initialized, "uninitialized"); const double squared_term = (density - _dwl_mean) / _dwl_std_dev; return _dwl_first_term * exp(-0.5 * squared_term * squared_term); } inline bool -PSParallelCompact::dead_space_crosses_boundary(const ChunkData* chunk, - idx_t bit) +PSParallelCompact::dead_space_crosses_boundary(const RegionData* region, + idx_t bit) { - assert(bit > 0, "cannot call this for the first bit/chunk"); - assert(_summary_data.chunk_to_addr(chunk) == _mark_bitmap.bit_to_addr(bit), - "sanity check"); + assert(bit > 0, "cannot call this for the first bit/region"); + assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit), + "sanity check"); // Dead space crosses the boundary if (1) a partial object does not extend - // onto the chunk, (2) an object does not start at the beginning of the chunk, - // and (3) an object does not end at the end of the prior chunk. - return chunk->partial_obj_size() == 0 && + // onto the region, (2) an object does not start at the beginning of the + // region, and (3) an object does not end at the end of the prior region. + return region->partial_obj_size() == 0 && !_mark_bitmap.is_obj_beg(bit) && !_mark_bitmap.is_obj_end(bit - 1); } @@ -1260,14 +1399,24 @@ return ((HeapWord*) k) >= dense_prefix(perm_space_id); } -inline void PSParallelCompact::adjust_pointer(oop* p, - HeapWord* beg_addr, - HeapWord* end_addr) { - if (is_in(p, beg_addr, end_addr)) { +template +inline void PSParallelCompact::adjust_pointer(T* p, + HeapWord* beg_addr, + HeapWord* end_addr) { + if (is_in((HeapWord*)p, beg_addr, end_addr)) { adjust_pointer(p); } } +#ifdef ASSERT +inline void +PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) +{ + assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr), + "must move left or to a different space"); +} +#endif // ASSERT + class MoveAndUpdateClosure: public ParMarkBitMapClosure { public: inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm, @@ -1335,37 +1484,34 @@ inline void do_addr(HeapWord* addr); }; -inline void UpdateOnlyClosure::do_addr(HeapWord* addr) { +inline void UpdateOnlyClosure::do_addr(HeapWord* addr) +{ _start_array->allocate_block(addr); oop(addr)->update_contents(compaction_manager()); } -class FillClosure: public ParMarkBitMapClosure { +class FillClosure: public ParMarkBitMapClosure +{ public: - FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id): + FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), - _space_id(space_id), _start_array(PSParallelCompact::start_array(space_id)) { - assert(_space_id == PSParallelCompact::perm_space_id || - _space_id == PSParallelCompact::old_space_id, - "cannot use FillClosure in the young gen"); - assert(bitmap() != NULL, "need a bitmap"); - assert(_start_array != NULL, "need a start array"); - } - - void fill_region(HeapWord* addr, size_t size) { - MemRegion region(addr, size); - SharedHeap::fill_region_with_object(region); - _start_array->allocate_block(addr); + assert(space_id == PSParallelCompact::perm_space_id || + space_id == PSParallelCompact::old_space_id, + "cannot use FillClosure in the young gen"); } virtual IterationStatus do_addr(HeapWord* addr, size_t size) { - fill_region(addr, size); + CollectedHeap::fill_with_objects(addr, size); + HeapWord* const end = addr + size; + do { + _start_array->allocate_block(addr); + addr += oop(addr)->size(); + } while (addr < end); return ParMarkBitMap::incomplete; } private: - const PSParallelCompact::SpaceId _space_id; - ObjectStartArray* const _start_array; + ObjectStartArray* const _start_array; }; --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp 2009-08-01 04:11:07.160018186 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp 2009-08-01 04:11:07.081158582 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psPermGen.cpp 1.28 07/05/05 17:05:30 JVM" #endif /* - * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -126,8 +126,6 @@ void PSPermGen::precompact() { // Reset start array first. - debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {) _start_array.reset(); - debug_only(}) object_mark_sweep()->precompact(); } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp 2009-08-01 04:11:08.028559027 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp 2009-08-01 04:11:07.941549106 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psPromotionLAB.cpp 1.17 07/05/05 17:05:30 JVM" #endif /* - * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "incls/_precompiled.incl" #include "incls/_psPromotionLAB.cpp.incl" -const size_t PSPromotionLAB::filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT)); +size_t PSPromotionLAB::filler_header_size; // This is the shared initialization code. It sets up the basic pointers, // and allows enough extra space for a filler object. We call a virtual @@ -44,6 +44,10 @@ set_end(end); set_top(bottom); + // Initialize after VM starts up because header_size depends on compressed + // oops. + filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT)); + // We can be initialized to a zero size! if (free() > 0) { if (ZapUnusedHeapArea) { --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp 2009-08-01 04:11:08.883274947 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp 2009-08-01 04:11:08.803775410 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psPromotionLAB.hpp 1.13 07/05/05 17:05:30 JVM" #endif /* - * Copyright 2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ class PSPromotionLAB : public CHeapObj { protected: - static const size_t filler_header_size; + static size_t filler_header_size; enum LabState { needs_flush, --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp 2009-08-01 04:11:09.721464285 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp 2009-08-01 04:11:09.632156763 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psPromotionManager.cpp 1.30 07/09/25 16:47:41 JVM" #endif /* - * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -185,7 +185,7 @@ claimed_stack_depth()->initialize(); queue_size = claimed_stack_depth()->max_elems(); // We want the overflow stack to be permanent - _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray(10, true); + _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray(10, true); _overflow_stack_breadth = NULL; } else { claimed_stack_breadth()->initialize(); @@ -243,6 +243,7 @@ #endif // PS_PM_STATS } + void PSPromotionManager::drain_stacks_depth(bool totally_drain) { assert(depth_first(), "invariant"); assert(overflow_stack_depth() != NULL, "invariant"); @@ -257,13 +258,15 @@ #endif /* ASSERT */ do { - oop* p; + StarTask p; // Drain overflow stack first, so other threads can steal from // claimed stack while we work. while(!overflow_stack_depth()->is_empty()) { - p = overflow_stack_depth()->pop(); - process_popped_location_depth(p); + // linux compiler wants different overloaded operator= in taskqueue to + // assign to p that the other compilers don't like. + StarTask ptr = overflow_stack_depth()->pop(); + process_popped_location_depth(ptr); } if (totally_drain) { @@ -368,7 +371,7 @@ // oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) { - assert(PSScavenge::should_scavenge(o), "Sanity"); + assert(PSScavenge::should_scavenge(&o), "Sanity"); oop new_obj = NULL; @@ -499,26 +502,15 @@ // We lost, someone else "owns" this object guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); - // Unallocate the space used. NOTE! We may have directly allocated - // the object. If so, we cannot deallocate it, so we have to test! + // Try to deallocate the space. If it was directly allocated we cannot + // deallocate it, so we have to test. If the deallocation fails, + // overwrite with a filler object. if (new_obj_is_tenured) { if (!_old_lab.unallocate_object(new_obj)) { - // The promotion lab failed to unallocate the object. - // We need to overwrite the object with a filler that - // contains no interior pointers. - MemRegion mr((HeapWord*)new_obj, new_obj_size); - // Clean this up and move to oopFactory (see bug 4718422) - SharedHeap::fill_region_with_object(mr); - } - } else { - if (!_young_lab.unallocate_object(new_obj)) { - // The promotion lab failed to unallocate the object. - // We need to overwrite the object with a filler that - // contains no interior pointers. - MemRegion mr((HeapWord*)new_obj, new_obj_size); - // Clean this up and move to oopFactory (see bug 4718422) - SharedHeap::fill_region_with_object(mr); + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); } + } else if (!_young_lab.unallocate_object(new_obj)) { + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); } // don't update this before the unallocation! @@ -533,16 +525,30 @@ // This code must come after the CAS test, or it will print incorrect // information. if (TraceScavenge) { - gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}", - PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring", + gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", + PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); - } #endif return new_obj; } +template void PSPromotionManager::process_array_chunk_work( + oop obj, + int start, int end) { + assert(start < end, "invariant"); + T* const base = (T*)objArrayOop(obj)->base(); + T* p = base + start; + T* const chunk_end = base + end; + while (p < chunk_end) { + if (PSScavenge::should_scavenge(p)) { + claim_or_forward_depth(p); + } + ++p; + } +} + void PSPromotionManager::process_array_chunk(oop old) { assert(PSChunkLargeArrays, "invariant"); assert(old->is_objArray(), "invariant"); @@ -572,15 +578,10 @@ arrayOop(old)->set_length(actual_length); } - assert(start < end, "invariant"); - oop* const base = objArrayOop(obj)->base(); - oop* p = base + start; - oop* const chunk_end = base + end; - while (p < chunk_end) { - if (PSScavenge::should_scavenge(*p)) { - claim_or_forward_depth(p); - } - ++p; + if (UseCompressedOops) { + process_array_chunk_work(obj, start, end); + } else { + process_array_chunk_work(obj, start, end); } } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2009-08-01 04:11:10.610912222 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2009-08-01 04:11:10.533615707 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psPromotionManager.hpp 1.20 07/09/25 16:47:42 JVM" #endif /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,8 +45,6 @@ class PSOldGen; class ParCompactionManager; -#define PS_CHUNKED_ARRAY_OOP_MASK 1 - #define PS_PM_STATS 0 class PSPromotionManager : public CHeapObj { @@ -83,7 +81,7 @@ PrefetchQueue _prefetch_queue; OopStarTaskQueue _claimed_stack_depth; - GrowableArray* _overflow_stack_depth; + GrowableArray* _overflow_stack_depth; OopTaskQueue _claimed_stack_breadth; GrowableArray* _overflow_stack_breadth; @@ -95,13 +93,15 @@ uint _min_array_size_for_chunking; // Accessors - static PSOldGen* old_gen() { return _old_gen; } - static MutableSpace* young_space() { return _young_space; } + static PSOldGen* old_gen() { return _old_gen; } + static MutableSpace* young_space() { return _young_space; } inline static PSPromotionManager* manager_array(int index); + template inline void claim_or_forward_internal_depth(T* p); + template inline void claim_or_forward_internal_breadth(T* p); - GrowableArray* overflow_stack_depth() { return _overflow_stack_depth; } - GrowableArray* overflow_stack_breadth() { return _overflow_stack_breadth; } + GrowableArray* overflow_stack_depth() { return _overflow_stack_depth; } + GrowableArray* overflow_stack_breadth() { return _overflow_stack_breadth; } // On the task queues we push reference locations as well as // partially-scanned arrays (in the latter case, we push an oop to @@ -119,27 +119,37 @@ // (oop). We do all the necessary casting in the mask / unmask // methods to avoid sprinkling the rest of the code with more casts. - bool is_oop_masked(oop* p) { - return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK; + // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any + // future masks) can't conflict with COMPRESSED_OOP_MASK +#define PS_CHUNKED_ARRAY_OOP_MASK 0x2 + + bool is_oop_masked(StarTask p) { + // If something is marked chunked it's always treated like wide oop* + return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) == + PS_CHUNKED_ARRAY_OOP_MASK; } oop* mask_chunked_array_oop(oop obj) { assert(!is_oop_masked((oop*) obj), "invariant"); - oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK); + oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK); assert(is_oop_masked(ret), "invariant"); return ret; } - oop unmask_chunked_array_oop(oop* p) { + oop unmask_chunked_array_oop(StarTask p) { assert(is_oop_masked(p), "invariant"); - oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK); + assert(!p.is_narrow(), "chunked array oops cannot be narrow"); + oop *chunk = (oop*)p; // cast p to oop (uses conversion operator) + oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK)); assert(!is_oop_masked((oop*) ret), "invariant"); return ret; } + template void process_array_chunk_work(oop obj, + int start, int end); void process_array_chunk(oop old); - void push_depth(oop* p) { + template void push_depth(T* p) { assert(depth_first(), "pre-condition"); #if PS_PM_STATS @@ -178,7 +188,7 @@ } protected: - static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } + static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; } public: @@ -230,6 +240,7 @@ drain_stacks_breadth(totally_drain); } } + public: void drain_stacks_cond_depth() { if (claimed_stack_depth()->size() > _target_stack_size) { drain_stacks_depth(false); @@ -259,15 +270,11 @@ return _depth_first; } - inline void process_popped_location_depth(oop* p); + inline void process_popped_location_depth(StarTask p); inline void flush_prefetch_queue(); - - inline void claim_or_forward_depth(oop* p); - inline void claim_or_forward_internal_depth(oop* p); - - inline void claim_or_forward_breadth(oop* p); - inline void claim_or_forward_internal_breadth(oop* p); + template inline void claim_or_forward_depth(T* p); + template inline void claim_or_forward_breadth(T* p); #if PS_PM_STATS void increment_steals(oop* p = NULL) { --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2009-08-01 04:11:11.504960794 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2009-08-01 04:11:11.420963080 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psPromotionManager.inline.hpp 1.19 07/09/25 16:47:42 JVM" #endif /* - * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,64 +31,68 @@ return _manager_array[index]; } -inline void PSPromotionManager::claim_or_forward_internal_depth(oop* p) { - if (p != NULL) { - oop o = *p; +template +inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { + if (p != NULL) { // XXX: error if p != NULL here + oop o = oopDesc::load_decode_heap_oop_not_null(p); if (o->is_forwarded()) { o = o->forwardee(); - // Card mark if (PSScavenge::is_obj_in_young((HeapWord*) o)) { PSScavenge::card_table()->inline_write_ref_field_gc(p, o); } - *p = o; + oopDesc::encode_store_heap_oop_not_null(p, o); } else { push_depth(p); } } } -inline void PSPromotionManager::claim_or_forward_internal_breadth(oop* p) { - if (p != NULL) { - oop o = *p; +template +inline void PSPromotionManager::claim_or_forward_internal_breadth(T* p) { + if (p != NULL) { // XXX: error if p != NULL here + oop o = oopDesc::load_decode_heap_oop_not_null(p); if (o->is_forwarded()) { o = o->forwardee(); } else { o = copy_to_survivor_space(o, false); } - // Card mark if (PSScavenge::is_obj_in_young((HeapWord*) o)) { PSScavenge::card_table()->inline_write_ref_field_gc(p, o); } - *p = o; + oopDesc::encode_store_heap_oop_not_null(p, o); } } inline void PSPromotionManager::flush_prefetch_queue() { assert(!depth_first(), "invariant"); - for (int i=0; i<_prefetch_queue.length(); i++) { - claim_or_forward_internal_breadth(_prefetch_queue.pop()); + for (int i = 0; i < _prefetch_queue.length(); i++) { + claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop()); } } -inline void PSPromotionManager::claim_or_forward_depth(oop* p) { +template +inline void PSPromotionManager::claim_or_forward_depth(T* p) { assert(depth_first(), "invariant"); - assert(PSScavenge::should_scavenge(*p, true), "revisiting object?"); - assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + assert(PSScavenge::should_scavenge(p, true), "revisiting object?"); + assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, + "Sanity"); assert(Universe::heap()->is_in(p), "pointer outside heap"); claim_or_forward_internal_depth(p); } -inline void PSPromotionManager::claim_or_forward_breadth(oop* p) { +template +inline void PSPromotionManager::claim_or_forward_breadth(T* p) { assert(!depth_first(), "invariant"); - assert(PSScavenge::should_scavenge(*p, true), "revisiting object?"); - assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + assert(PSScavenge::should_scavenge(p, true), "revisiting object?"); + assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, + "Sanity"); assert(Universe::heap()->is_in(p), "pointer outside heap"); if (UsePrefetchQueue) { - claim_or_forward_internal_breadth(_prefetch_queue.push_and_pop(p)); + claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p)); } else { // This option is used for testing. The use of the prefetch // queue can delay the processing of the objects and thus @@ -109,12 +113,16 @@ } } -inline void PSPromotionManager::process_popped_location_depth(oop* p) { +inline void PSPromotionManager::process_popped_location_depth(StarTask p) { if (is_oop_masked(p)) { assert(PSChunkLargeArrays, "invariant"); oop const old = unmask_chunked_array_oop(p); process_array_chunk(old); } else { - PSScavenge::copy_and_push_safe_barrier(this, p); + if (p.is_narrow()) { + PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p); + } else { + PSScavenge::copy_and_push_safe_barrier(this, (oop*)p); + } } } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp 2009-08-01 04:11:12.453380570 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp 2009-08-01 04:11:12.377632966 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psScavenge.cpp 1.99 07/09/07 09:53:34 JVM" #endif /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,16 +68,18 @@ assert(_promotion_manager != NULL, "Sanity"); } - void do_oop(oop* p) { - assert (*p != NULL, "expected non-null ref"); - assert ((*p)->is_oop(), "expected an oop while scanning weak refs"); - - oop obj = oop(*p); + template void do_oop_work(T* p) { + assert (!oopDesc::is_null(*p), "expected non-null ref"); + assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), + "expected an oop while scanning weak refs"); + // Weak refs may be visited more than once. - if (PSScavenge::should_scavenge(obj, _to_space)) { + if (PSScavenge::should_scavenge(p, _to_space)) { PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } + virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } }; class PSEvacuateFollowersClosure: public VoidClosure { @@ -86,7 +88,7 @@ public: PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} - void do_void() { + virtual void do_void() { assert(_promotion_manager != NULL, "Sanity"); _promotion_manager->drain_stacks(true); guarantee(_promotion_manager->stacks_empty(), @@ -266,6 +268,11 @@ young_gen->eden_space()->accumulate_statistics(); } + if (ZapUnusedHeapArea) { + // Save information needed to minimize mangling + heap->record_gen_tops_before_GC(); + } + if (PrintHeapAtGC) { Universe::print_heap_before_gc(); } @@ -315,8 +322,8 @@ if (!ScavengeWithObjectsInToSpace) { assert(young_gen->to_space()->is_empty(), - "Attempt to scavenge with live objects in to_space"); - young_gen->to_space()->clear(); + "Attempt to scavenge with live objects in to_space"); + young_gen->to_space()->clear(SpaceDecorator::Mangle); } else if (ZapUnusedHeapArea) { young_gen->to_space()->mangle_unused_area(); } @@ -326,7 +333,8 @@ COMPILER2_PRESENT(DerivedPointerTable::clear()); reference_processor()->enable_discovery(); - + reference_processor()->setup_policy(false); + // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->used_in_bytes(); @@ -390,24 +398,16 @@ // Process reference objects discovered during scavenge { -#ifdef COMPILER2 - ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); -#else - ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - + reference_processor()->setup_policy(false); // not always_clear PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); - assert(soft_ref_policy != NULL,"No soft reference policy"); if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->process_discovered_references( - soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, - &task_executor); + &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); } else { reference_processor()->process_discovered_references( - soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, - NULL); + &_is_alive_closure, &keep_alive, &evac_followers, NULL); } } @@ -438,8 +438,10 @@ if (!promotion_failure_occurred) { // Swap the survivor spaces. - young_gen->eden_space()->clear(); - young_gen->from_space()->clear(); + + + young_gen->eden_space()->clear(SpaceDecorator::Mangle); + young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); size_t survived = young_gen->from_space()->used_in_bytes(); @@ -601,6 +603,12 @@ Universe::print_heap_after_gc(); } + if (ZapUnusedHeapArea) { + young_gen->eden_space()->check_mangled_unused_area_complete(); + young_gen->from_space()->check_mangled_unused_area_complete(); + young_gen->to_space()->check_mangled_unused_area_complete(); + } + scavenge_exit.update(); if (PrintGCTaskTimeStamps) { --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp 2009-08-01 04:11:13.399879480 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp 2009-08-01 04:11:13.312346912 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psScavenge.hpp 1.46 07/05/05 17:05:30 JVM" #endif /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,16 +119,16 @@ // If an attempt to promote fails, this method is invoked static void oop_promotion_failed(oop obj, markOop obj_mark); - static inline bool should_scavenge(oop p); + template static inline bool should_scavenge(T* p); // These call should_scavenge() above and, if it returns true, also check that // the object was not newly copied into to_space. The version with the bool // argument is a convenience wrapper that fetches the to_space pointer from // the heap and calls the other version (if the arg is true). - static inline bool should_scavenge(oop p, MutableSpace* to_space); - static inline bool should_scavenge(oop p, bool check_to_space); + template static inline bool should_scavenge(T* p, MutableSpace* to_space); + template static inline bool should_scavenge(T* p, bool check_to_space); - inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, oop* p); + template inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p); // Is an object in the young generation // This assumes that the HeapWord argument is in the heap, --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp 2009-08-01 04:11:14.235841199 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp 2009-08-01 04:11:14.163666806 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psScavenge.inline.hpp 1.18 07/05/05 17:05:29 JVM" #endif /* - * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,28 +25,33 @@ * */ - inline void PSScavenge::save_to_space_top_before_gc() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); _to_space_top_before_gc = heap->young_gen()->to_space()->top(); } -inline bool PSScavenge::should_scavenge(oop p) { - return p == NULL ? false : PSScavenge::is_obj_in_young((HeapWord*) p); +template inline bool PSScavenge::should_scavenge(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (oopDesc::is_null(heap_oop)) return false; + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + return PSScavenge::is_obj_in_young((HeapWord*)obj); } -inline bool PSScavenge::should_scavenge(oop p, MutableSpace* to_space) { +template +inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) { if (should_scavenge(p)) { + oop obj = oopDesc::load_decode_heap_oop_not_null(p); // Skip objects copied to to_space since the scavenge started. - HeapWord* const addr = (HeapWord*) p; + HeapWord* const addr = (HeapWord*)obj; return addr < to_space_top_before_gc() || addr >= to_space->end(); } return false; } -inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) { +template +inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) { if (check_to_space) { - ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap(); + ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); return should_scavenge(p, heap->young_gen()->to_space()); } return should_scavenge(p); @@ -55,24 +60,23 @@ // Attempt to "claim" oop at p via CAS, push the new obj if successful // This version tests the oop* to make sure it is within the heap before // attempting marking. +template inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm, - oop* p) { - assert(should_scavenge(*p, true), "revisiting object?"); + T* p) { + assert(should_scavenge(p, true), "revisiting object?"); - oop o = *p; - if (o->is_forwarded()) { - *p = o->forwardee(); - } else { - *p = pm->copy_to_survivor_space(o, pm->depth_first()); - } - - // We cannot mark without test, as some code passes us pointers + oop o = oopDesc::load_decode_heap_oop_not_null(p); + oop new_obj = o->is_forwarded() + ? o->forwardee() + : pm->copy_to_survivor_space(o, pm->depth_first()); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); + + // We cannot mark without test, as some code passes us pointers // that are outside the heap. - if ((!PSScavenge::is_obj_in_young((HeapWord*) p)) && + if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) && Universe::heap()->is_in_reserved(p)) { - o = *p; - if (PSScavenge::is_obj_in_young((HeapWord*) o)) { - card_table()->inline_write_ref_field_gc(p, o); + if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) { + card_table()->inline_write_ref_field_gc(p, new_obj); } } } --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp 2009-08-01 04:11:15.104726997 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp 2009-08-01 04:11:15.020498383 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psTasks.cpp 1.29 07/09/25 16:47:43 JVM" #endif /* - * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,15 +37,17 @@ private: PSPromotionManager* _promotion_manager; - public: - PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } - - virtual void do_oop(oop* p) { - if (PSScavenge::should_scavenge(*p)) { + protected: + template void do_oop_work(T *p) { + if (PSScavenge::should_scavenge(p)) { // We never card mark roots, maybe call a func without test? PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } + public: + PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } + void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); } + void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } }; void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) { @@ -138,7 +140,7 @@ int random_seed = 17; if (pm->depth_first()) { while(true) { - oop* p; + StarTask p; if (PSPromotionManager::steal_depth(which, &random_seed, p)) { #if PS_PM_STATS pm->increment_steals(p); @@ -167,8 +169,7 @@ } } } - guarantee(pm->stacks_empty(), - "stacks should be empty at this point"); + guarantee(pm->stacks_empty(), "stacks should be empty at this point"); } // --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp 2009-08-01 04:11:15.917156385 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp 2009-08-01 04:11:15.835583133 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psVirtualspace.cpp 1.16 07/05/05 17:05:31 JVM" #endif /* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,13 +74,8 @@ void PSVirtualSpace::release() { DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this)); - if (reserved_low_addr() != NULL) { - if (special()) { - os::release_memory_special(reserved_low_addr(), reserved_size()); - } else { - (void)os::release_memory(reserved_low_addr(), reserved_size()); - } - } + // This may not release memory it didn't reserve. + // Use rs.release() to release the underlying memory instead. _reserved_low_addr = _reserved_high_addr = NULL; _committed_low_addr = _committed_high_addr = NULL; _special = false; --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp 2009-08-01 04:11:16.761386549 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp 2009-08-01 04:11:16.676123313 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psYoungGen.cpp 1.68 07/10/04 10:49:36 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { assert(_init_gen_size != 0, "Should have a finite size"); _virtual_space = new PSVirtualSpace(rs, alignment); - if (!_virtual_space->expand_by(_init_gen_size)) { + if (!virtual_space()->expand_by(_init_gen_size)) { vm_exit_during_initialization("Could not reserve enough space for " "object heap"); } @@ -52,13 +52,20 @@ void PSYoungGen::initialize_work() { - _reserved = MemRegion((HeapWord*)_virtual_space->low_boundary(), - (HeapWord*)_virtual_space->high_boundary()); + _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), + (HeapWord*)virtual_space()->high_boundary()); - MemRegion cmr((HeapWord*)_virtual_space->low(), - (HeapWord*)_virtual_space->high()); + MemRegion cmr((HeapWord*)virtual_space()->low(), + (HeapWord*)virtual_space()->high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); + if (ZapUnusedHeapArea) { + // Mangle newly committed space immediately because it + // can be done here more simply that after the new + // spaces have been computed. + SpaceMangler::mangle_region(cmr); + } + if (UseNUMA) { _eden_space = new MutableNUMASpace(); } else { @@ -91,8 +98,8 @@ // Compute maximum space sizes for performance counters ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - size_t alignment = heap->intra_generation_alignment(); - size_t size = _virtual_space->reserved_size(); + size_t alignment = heap->intra_heap_alignment(); + size_t size = virtual_space()->reserved_size(); size_t max_survivor_size; size_t max_eden_size; @@ -144,8 +151,8 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Compute sizes - size_t alignment = heap->intra_generation_alignment(); - size_t size = _virtual_space->committed_size(); + size_t alignment = heap->intra_heap_alignment(); + size_t size = virtual_space()->committed_size(); size_t survivor_size = size / InitialSurvivorRatio; survivor_size = align_size_down(survivor_size, alignment); @@ -167,18 +174,18 @@ } void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) { - assert(eden_size < _virtual_space->committed_size(), "just checking"); + assert(eden_size < virtual_space()->committed_size(), "just checking"); assert(eden_size > 0 && survivor_size > 0, "just checking"); // Initial layout is Eden, to, from. After swapping survivor spaces, // that leaves us with Eden, from, to, which is step one in our two // step resize-with-live-data procedure. - char *eden_start = _virtual_space->low(); + char *eden_start = virtual_space()->low(); char *to_start = eden_start + eden_size; char *from_start = to_start + survivor_size; char *from_end = from_start + survivor_size; - assert(from_end == _virtual_space->high(), "just checking"); + assert(from_end == virtual_space()->high(), "just checking"); assert(is_object_aligned((intptr_t)eden_start), "checking alignment"); assert(is_object_aligned((intptr_t)to_start), "checking alignment"); assert(is_object_aligned((intptr_t)from_start), "checking alignment"); @@ -187,15 +194,15 @@ MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start); MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end); - eden_space()->initialize(eden_mr, true); - to_space()->initialize(to_mr , true); - from_space()->initialize(from_mr, true); + eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea); + to_space()->initialize(to_mr , true, ZapUnusedHeapArea); + from_space()->initialize(from_mr, true, ZapUnusedHeapArea); } #ifndef PRODUCT void PSYoungGen::space_invariants() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_generation_alignment(); + const size_t alignment = heap->intra_heap_alignment(); // Currently, our eden size cannot shrink to zero guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small"); @@ -210,7 +217,7 @@ char* to_start = (char*)to_space()->bottom(); char* to_end = (char*)to_space()->end(); - guarantee(eden_start >= _virtual_space->low(), "eden bottom"); + guarantee(eden_start >= virtual_space()->low(), "eden bottom"); guarantee(eden_start < eden_end, "eden space consistency"); guarantee(from_start < from_end, "from space consistency"); guarantee(to_start < to_end, "to space consistency"); @@ -220,29 +227,29 @@ // Eden, from, to guarantee(eden_end <= from_start, "eden/from boundary"); guarantee(from_end <= to_start, "from/to boundary"); - guarantee(to_end <= _virtual_space->high(), "to end"); + guarantee(to_end <= virtual_space()->high(), "to end"); } else { // Eden, to, from guarantee(eden_end <= to_start, "eden/to boundary"); guarantee(to_end <= from_start, "to/from boundary"); - guarantee(from_end <= _virtual_space->high(), "from end"); + guarantee(from_end <= virtual_space()->high(), "from end"); } // More checks that the virtual space is consistent with the spaces - assert(_virtual_space->committed_size() >= + assert(virtual_space()->committed_size() >= (eden_space()->capacity_in_bytes() + to_space()->capacity_in_bytes() + from_space()->capacity_in_bytes()), "Committed size is inconsistent"); - assert(_virtual_space->committed_size() <= _virtual_space->reserved_size(), + assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), "Space invariant"); char* eden_top = (char*)eden_space()->top(); char* from_top = (char*)from_space()->top(); char* to_top = (char*)to_space()->top(); - assert(eden_top <= _virtual_space->high(), "eden top"); - assert(from_top <= _virtual_space->high(), "from top"); - assert(to_top <= _virtual_space->high(), "to top"); + assert(eden_top <= virtual_space()->high(), "eden top"); + assert(from_top <= virtual_space()->high(), "from top"); + assert(to_top <= virtual_space()->high(), "to top"); - _virtual_space->verify(); + virtual_space()->verify(); } #endif @@ -268,8 +275,8 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) { - const size_t alignment = _virtual_space->alignment(); - size_t orig_size = _virtual_space->committed_size(); + const size_t alignment = virtual_space()->alignment(); + size_t orig_size = virtual_space()->committed_size(); bool size_changed = false; // There used to be this guarantee there. @@ -291,10 +298,18 @@ // Grow the generation size_t change = desired_size - orig_size; assert(change % alignment == 0, "just checking"); - if (!_virtual_space->expand_by(change)) { + HeapWord* prev_high = (HeapWord*) virtual_space()->high(); + if (!virtual_space()->expand_by(change)) { return false; // Error if we fail to resize! } - + if (ZapUnusedHeapArea) { + // Mangle newly committed space immediately because it + // can be done here more simply that after the new + // spaces have been computed. + HeapWord* new_high = (HeapWord*) virtual_space()->high(); + MemRegion mangle_region(prev_high, new_high); + SpaceMangler::mangle_region(mangle_region); + } size_changed = true; } else if (desired_size < orig_size) { size_t desired_change = orig_size - desired_size; @@ -324,19 +339,95 @@ post_resize(); if (Verbose && PrintGC) { - size_t current_size = _virtual_space->committed_size(); + size_t current_size = virtual_space()->committed_size(); gclog_or_tty->print_cr("PSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K", orig_size/K, current_size/K); } } - guarantee(eden_plus_survivors <= _virtual_space->committed_size() || - _virtual_space->committed_size() == max_size(), "Sanity"); + guarantee(eden_plus_survivors <= virtual_space()->committed_size() || + virtual_space()->committed_size() == max_size(), "Sanity"); return true; } +#ifndef PRODUCT +// In the numa case eden is not mangled so a survivor space +// moving into a region previously occupied by a survivor +// may find an unmangled region. Also in the PS case eden +// to-space and from-space may not touch (i.e., there may be +// gaps between them due to movement while resizing the +// spaces). Those gaps must be mangled. +void PSYoungGen::mangle_survivors(MutableSpace* s1, + MemRegion s1MR, + MutableSpace* s2, + MemRegion s2MR) { + // Check eden and gap between eden and from-space, in deciding + // what to mangle in from-space. Check the gap between from-space + // and to-space when deciding what to mangle. + // + // +--------+ +----+ +---+ + // | eden | |s1 | |s2 | + // +--------+ +----+ +---+ + // +-------+ +-----+ + // |s1MR | |s2MR | + // +-------+ +-----+ + // All of survivor-space is properly mangled so find the + // upper bound on the mangling for any portion above current s1. + HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end()); + MemRegion delta1_left; + if (s1MR.start() < delta_end) { + delta1_left = MemRegion(s1MR.start(), delta_end); + s1->mangle_region(delta1_left); + } + // Find any portion to the right of the current s1. + HeapWord* delta_start = MAX2(s1->end(), s1MR.start()); + MemRegion delta1_right; + if (delta_start < s1MR.end()) { + delta1_right = MemRegion(delta_start, s1MR.end()); + s1->mangle_region(delta1_right); + } + + // Similarly for the second survivor space except that + // any of the new region that overlaps with the current + // region of the first survivor space has already been + // mangled. + delta_end = MIN2(s2->bottom(), s2MR.end()); + delta_start = MAX2(s2MR.start(), s1->end()); + MemRegion delta2_left; + if (s2MR.start() < delta_end) { + delta2_left = MemRegion(s2MR.start(), delta_end); + s2->mangle_region(delta2_left); + } + delta_start = MAX2(s2->end(), s2MR.start()); + MemRegion delta2_right; + if (delta_start < s2MR.end()) { + s2->mangle_region(delta2_right); + } + + if (TraceZapUnusedHeapArea) { + // s1 + gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") " + "New region: [" PTR_FORMAT ", " PTR_FORMAT ")", + s1->bottom(), s1->end(), s1MR.start(), s1MR.end()); + gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", " + PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")", + delta1_left.start(), delta1_left.end(), delta1_right.start(), + delta1_right.end()); + + // s2 + gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") " + "New region: [" PTR_FORMAT ", " PTR_FORMAT ")", + s2->bottom(), s2->end(), s2MR.start(), s2MR.end()); + gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", " + PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")", + delta2_left.start(), delta2_left.end(), delta2_right.start(), + delta2_right.end()); + } + +} +#endif // NOT PRODUCT void PSYoungGen::resize_spaces(size_t requested_eden_size, size_t requested_survivor_size) { @@ -395,13 +486,15 @@ char* to_end = (char*)to_space()->end(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_generation_alignment(); - const bool maintain_minimum = + const size_t alignment = heap->intra_heap_alignment(); + const bool maintain_minimum = (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); + bool eden_from_to_order = from_start < to_start; // Check whether from space is below to space - if (from_start < to_start) { + if (eden_from_to_order) { // Eden, from, to + eden_from_to_order = true; if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print_cr(" Eden, from, to:"); } @@ -438,9 +531,9 @@ // extra calculations. // First calculate an optimal to-space - to_end = (char*)_virtual_space->high(); - to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, - sizeof(char)); + to_end = (char*)virtual_space()->high(); + to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, + sizeof(char)); // Does the optimal to-space overlap from-space? if (to_start < (char*)from_space()->end()) { @@ -494,9 +587,9 @@ // to space as if we were able to resize from space, even though from // space is not modified. // Giving eden priority was tried and gave poorer performance. - to_end = (char*)pointer_delta(_virtual_space->high(), - (char*)requested_survivor_size, - sizeof(char)); + to_end = (char*)pointer_delta(virtual_space()->high(), + (char*)requested_survivor_size, + sizeof(char)); to_end = MIN2(to_end, from_start); to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, sizeof(char)); @@ -563,9 +656,45 @@ size_t old_from = from_space()->capacity_in_bytes(); size_t old_to = to_space()->capacity_in_bytes(); - eden_space()->initialize(edenMR, true); - to_space()->initialize(toMR , true); - from_space()->initialize(fromMR, false); // Note, not cleared! + if (ZapUnusedHeapArea) { + // NUMA is a special case because a numa space is not mangled + // in order to not prematurely bind its address to memory to + // the wrong memory (i.e., don't want the GC thread to first + // touch the memory). The survivor spaces are not numa + // spaces and are mangled. + if (UseNUMA) { + if (eden_from_to_order) { + mangle_survivors(from_space(), fromMR, to_space(), toMR); + } else { + mangle_survivors(to_space(), toMR, from_space(), fromMR); + } + } + + // If not mangling the spaces, do some checking to verify that + // the spaces are already mangled. + // The spaces should be correctly mangled at this point so + // do some checking here. Note that they are not being mangled + // in the calls to initialize(). + // Must check mangling before the spaces are reshaped. Otherwise, + // the bottom or end of one space may have moved into an area + // covered by another space and a failure of the check may + // not correctly indicate which space is not properly mangled. + HeapWord* limit = (HeapWord*) virtual_space()->high(); + eden_space()->check_mangled_unused_area(limit); + from_space()->check_mangled_unused_area(limit); + to_space()->check_mangled_unused_area(limit); + } + // When an existing space is being initialized, it is not + // mangled because the space has been previously mangled. + eden_space()->initialize(edenMR, + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); + to_space()->initialize(toMR, + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); + from_space()->initialize(fromMR, + SpaceDecorator::DontClear, + SpaceDecorator::DontMangle); assert(from_space()->top() == old_from_top, "from top changed!"); @@ -674,7 +803,7 @@ st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", capacity_in_bytes()/K, used_in_bytes()/K); } - _virtual_space->print_space_boundaries_on(st); + virtual_space()->print_space_boundaries_on(st); st->print(" eden"); eden_space()->print_on(st); st->print(" from"); from_space()->print_on(st); st->print(" to "); to_space()->print_on(st); @@ -711,7 +840,7 @@ size_t PSYoungGen::available_to_live() { size_t delta_in_survivor = 0; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t space_alignment = heap->intra_generation_alignment(); + const size_t space_alignment = heap->intra_heap_alignment(); const size_t gen_alignment = heap->young_gen_alignment(); MutableSpace* space_shrinking = NULL; @@ -777,7 +906,9 @@ // Was there a shrink of the survivor space? if (new_end < space_shrinking->end()) { MemRegion mr(space_shrinking->bottom(), new_end); - space_shrinking->initialize(mr, false /* clear */); + space_shrinking->initialize(mr, + SpaceDecorator::DontClear, + SpaceDecorator::Mangle); } } @@ -812,3 +943,12 @@ from_space()->verify(allow_dirty); to_space()->verify(allow_dirty); } + +#ifndef PRODUCT +void PSYoungGen::record_spaces_top() { + assert(ZapUnusedHeapArea, "Not mangling unused space"); + eden_space()->set_top_for_allocations(); + from_space()->set_top_for_allocations(); + to_space()->set_top_for_allocations(); +} +#endif --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp 2009-08-01 04:11:17.775163293 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp 2009-08-01 04:11:17.694907914 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)psYoungGen.hpp 1.48 07/05/05 17:05:31 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -182,4 +182,12 @@ // Space boundary invariant checker void space_invariants() PRODUCT_RETURN; + + // Helper for mangling survivor spaces. + void mangle_survivors(MutableSpace* s1, + MemRegion s1MR, + MutableSpace* s2, + MemRegion s2MR) PRODUCT_RETURN; + + void record_spaces_top() PRODUCT_RETURN; }; --- old/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp 2009-08-01 04:11:18.600125954 +0100 +++ new/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp 2009-08-01 04:11:18.531245557 +0100 @@ -72,6 +72,9 @@ GCCauseSetter gccs(heap, _gc_cause); _result = heap->failed_permanent_mem_allocate(_size); + if (_result == NULL && GC_locker::is_active_and_needs_gc()) { + set_gc_locked(); + } notify_gc_end(); } --- old/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp 2009-08-01 04:11:19.477367504 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp 2009-08-01 04:11:19.398855847 +0100 @@ -70,6 +70,12 @@ } } +void ageTable::merge_par(ageTable* subTable) { + for (int i = 0; i < table_size; i++) { + Atomic::add_ptr(subTable->sizes[i], &sizes[i]); + } +} + int ageTable::compute_tenuring_threshold(size_t survivor_capacity) { size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100); size_t total = 0; --- old/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp 2009-08-01 04:11:20.323419962 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp 2009-08-01 04:11:20.250465451 +0100 @@ -59,6 +59,7 @@ // Merge another age table with the current one. Used // for parallel young generation gc. void merge(ageTable* subTable); + void merge_par(ageTable* subTable); // calculate new tenuring threshold based on age information int compute_tenuring_threshold(size_t survivor_capacity); --- old/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp 2009-08-01 04:11:21.197220703 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp 2009-08-01 04:11:21.111189693 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)gcUtil.hpp 1.19 07/05/05 17:05:32 JVM" #endif /* - * Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,12 @@ _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) { } + void clear() { + _average = 0; + _sample_count = 0; + _last_sample = 0; + } + // Accessors float average() const { return _average; } unsigned weight() const { return _weight; } @@ -118,6 +124,12 @@ float deviation() const { return _deviation; } unsigned padding() const { return _padding; } + void clear() { + AdaptiveWeightedAverage::clear(); + _padded_avg = 0; + _deviation = 0; + } + // Override void sample(float new_sample); }; --- old/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp 2009-08-01 04:11:22.025095837 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.cpp 2009-08-01 04:11:21.956350363 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)immutableSpace.cpp 1.13 07/05/05 17:05:34 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,7 +69,7 @@ #endif -void ImmutableSpace::verify(bool allow_dirty) const { +void ImmutableSpace::verify(bool allow_dirty) { HeapWord* p = bottom(); HeapWord* t = end(); HeapWord* prev_p = NULL; --- old/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp 2009-08-01 04:11:22.843455262 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp 2009-08-01 04:11:22.774700361 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)immutableSpace.hpp 1.14 07/05/05 17:05:33 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,8 @@ size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; } // Size computations. Sizes are in heapwords. - size_t capacity_in_words() const { return pointer_delta(end(), bottom()); } + size_t capacity_in_words() const { return pointer_delta(end(), bottom()); } + virtual size_t capacity_in_words(Thread*) const { return capacity_in_words(); } // Iteration. virtual void oop_iterate(OopClosure* cl); @@ -62,5 +63,5 @@ // Debugging virtual void print() const PRODUCT_RETURN; virtual void print_short() const PRODUCT_RETURN; - virtual void verify(bool allow_dirty) const; + virtual void verify(bool allow_dirty); }; --- old/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp 2009-08-01 04:11:23.671122296 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp 2009-08-01 04:11:23.593605331 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)markSweep.cpp 1.196 07/05/05 17:05:35 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,16 +39,16 @@ ReferenceProcessor* MarkSweep::_ref_processor = NULL; #ifdef VALIDATE_MARK_SWEEP -GrowableArray* MarkSweep::_root_refs_stack = NULL; +GrowableArray* MarkSweep::_root_refs_stack = NULL; GrowableArray * MarkSweep::_live_oops = NULL; GrowableArray * MarkSweep::_live_oops_moved_to = NULL; GrowableArray* MarkSweep::_live_oops_size = NULL; size_t MarkSweep::_live_oops_index = 0; size_t MarkSweep::_live_oops_index_at_perm = 0; -GrowableArray* MarkSweep::_other_refs_stack = NULL; -GrowableArray* MarkSweep::_adjusted_pointers = NULL; -bool MarkSweep::_pointer_tracking = false; -bool MarkSweep::_root_tracking = true; +GrowableArray* MarkSweep::_other_refs_stack = NULL; +GrowableArray* MarkSweep::_adjusted_pointers = NULL; +bool MarkSweep::_pointer_tracking = false; +bool MarkSweep::_root_tracking = true; GrowableArray* MarkSweep::_cur_gc_live_oops = NULL; GrowableArray* MarkSweep::_cur_gc_live_oops_moved_to = NULL; @@ -62,7 +62,6 @@ _revisit_klass_stack->push(k); } - void MarkSweep::follow_weak_klass_links() { // All klasses on the revisit stack are marked at this point. // Update and follow all subklass, sibling and implementor links. @@ -72,44 +71,15 @@ follow_stack(); } +MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; -void MarkSweep::mark_and_follow(oop* p) { - assert(Universe::heap()->is_in_reserved(p), - "we should only be traversing objects here"); - oop m = *p; - if (m != NULL && !m->mark()->is_marked()) { - mark_object(m); - m->follow_contents(); // Follow contents of the marked object - } -} - -void MarkSweep::_mark_and_push(oop* p) { - // Push marked object, contents will be followed later - oop m = *p; - mark_object(m); - _marking_stack->push(m); -} +void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); } +void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); } MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure; -void MarkSweep::follow_root(oop* p) { - assert(!Universe::heap()->is_in_reserved(p), - "roots shouldn't be things within the heap"); -#ifdef VALIDATE_MARK_SWEEP - if (ValidateMarkSweep) { - guarantee(!_root_refs_stack->contains(p), "should only be in here once"); - _root_refs_stack->push(p); - } -#endif - oop m = *p; - if (m != NULL && !m->mark()->is_marked()) { - mark_object(m); - m->follow_contents(); // Follow contents of the marked object - } - follow_stack(); -} - -MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; +void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); } +void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); } void MarkSweep::follow_stack() { while (!_marking_stack->is_empty()) { @@ -121,6 +91,7 @@ MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure; +void MarkSweep::FollowStackClosure::do_void() { follow_stack(); } // We preserve the mark which should be replaced at the end and the location that it // will go. Note that the object that this markOop belongs to isn't currently at that @@ -145,6 +116,9 @@ MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true); MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false); +void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } +void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } + void MarkSweep::adjust_marks() { assert(_preserved_oop_stack == NULL || _preserved_oop_stack->length() == _preserved_mark_stack->length(), @@ -190,7 +164,7 @@ #ifdef VALIDATE_MARK_SWEEP -void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) { +void MarkSweep::track_adjusted_pointer(void* p, bool isroot) { if (!ValidateMarkSweep) return; @@ -204,9 +178,9 @@ if (index != -1) { int l = _root_refs_stack->length(); if (l > 0 && l - 1 != index) { - oop* last = _root_refs_stack->pop(); - assert(last != p, "should be different"); - _root_refs_stack->at_put(index, last); + void* last = _root_refs_stack->pop(); + assert(last != p, "should be different"); + _root_refs_stack->at_put(index, last); } else { _root_refs_stack->remove(p); } @@ -214,19 +188,17 @@ } } - -void MarkSweep::check_adjust_pointer(oop* p) { +void MarkSweep::check_adjust_pointer(void* p) { _adjusted_pointers->push(p); } - class AdjusterTracker: public OopClosure { public: - AdjusterTracker() {}; - void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); } + AdjusterTracker() {} + void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); } + void do_oop(narrowOop* o) { MarkSweep::check_adjust_pointer(o); } }; - void MarkSweep::track_interior_pointers(oop obj) { if (ValidateMarkSweep) { _adjusted_pointers->clear(); @@ -237,7 +209,6 @@ } } - void MarkSweep::check_interior_pointers() { if (ValidateMarkSweep) { _pointer_tracking = false; @@ -245,7 +216,6 @@ } } - void MarkSweep::reset_live_oop_tracking(bool at_perm) { if (ValidateMarkSweep) { guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops"); @@ -253,7 +223,6 @@ } } - void MarkSweep::register_live_oop(oop p, size_t size) { if (ValidateMarkSweep) { _live_oops->push(p); @@ -286,7 +255,6 @@ } } - void MarkSweep::compaction_complete() { if (RecordMarkSweepCompaction) { GrowableArray* _tmp_live_oops = _cur_gc_live_oops; @@ -302,7 +270,6 @@ } } - void MarkSweep::print_new_location_of_heap_address(HeapWord* q) { if (!RecordMarkSweepCompaction) { tty->print_cr("Requires RecordMarkSweepCompaction to be enabled"); @@ -321,7 +288,7 @@ HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i); size_t offset = (q - old_oop); tty->print_cr("Address " PTR_FORMAT, q); - tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset); + tty->print_cr(" Was in oop " PTR_FORMAT ", size " SIZE_FORMAT ", at offset " SIZE_FORMAT, old_oop, sz, offset); tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset); return; } @@ -331,23 +298,16 @@ } #endif //VALIDATE_MARK_SWEEP -MarkSweep::IsAliveClosure MarkSweep::is_alive; +MarkSweep::IsAliveClosure MarkSweep::is_alive; -void MarkSweep::KeepAliveClosure::do_oop(oop* p) { -#ifdef VALIDATE_MARK_SWEEP - if (ValidateMarkSweep) { - if (!Universe::heap()->is_in_reserved(p)) { - _root_refs_stack->push(p); - } else { - _other_refs_stack->push(p); - } - } -#endif - mark_and_push(p); -} +void MarkSweep::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); } +bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); } MarkSweep::KeepAliveClosure MarkSweep::keep_alive; +void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } +void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } + void marksweep_init() { /* empty */ } #ifndef PRODUCT --- old/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp 2009-08-01 04:11:24.570213216 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp 2009-08-01 04:11:24.488847716 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)markSweep.hpp 1.67 07/05/17 15:52:55 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,55 +49,59 @@ #define VALIDATE_MARK_SWEEP_ONLY(code) #endif - // declared at end class PreservedMark; class MarkSweep : AllStatic { // - // In line closure decls + // Inline closure decls // - - class FollowRootClosure: public OopsInGenClosure{ + class FollowRootClosure: public OopsInGenClosure { public: - void do_oop(oop* p) { follow_root(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); virtual const bool do_nmethods() const { return true; } }; class MarkAndPushClosure: public OopClosure { public: - void do_oop(oop* p) { mark_and_push(p); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); virtual const bool do_nmethods() const { return true; } }; class FollowStackClosure: public VoidClosure { public: - void do_void() { follow_stack(); } + virtual void do_void(); }; class AdjustPointerClosure: public OopsInGenClosure { + private: bool _is_root; public: AdjustPointerClosure(bool is_root) : _is_root(is_root) {} - void do_oop(oop* p) { _adjust_pointer(p, _is_root); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; // Used for java/lang/ref handling class IsAliveClosure: public BoolObjectClosure { public: - void do_object(oop p) { assert(false, "don't call"); } - bool do_object_b(oop p) { return p->is_gc_marked(); } + virtual void do_object(oop p); + virtual bool do_object_b(oop p); }; class KeepAliveClosure: public OopClosure { + protected: + template void do_oop_work(T* p); public: - void do_oop(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; // // Friend decls // - friend class AdjustPointerClosure; friend class KeepAliveClosure; friend class VM_MarkSweep; @@ -123,14 +127,14 @@ static ReferenceProcessor* _ref_processor; #ifdef VALIDATE_MARK_SWEEP - static GrowableArray* _root_refs_stack; + static GrowableArray* _root_refs_stack; static GrowableArray * _live_oops; static GrowableArray * _live_oops_moved_to; static GrowableArray* _live_oops_size; static size_t _live_oops_index; static size_t _live_oops_index_at_perm; - static GrowableArray* _other_refs_stack; - static GrowableArray* _adjusted_pointers; + static GrowableArray* _other_refs_stack; + static GrowableArray* _adjusted_pointers; static bool _pointer_tracking; static bool _root_tracking; @@ -149,9 +153,8 @@ static GrowableArray* _last_gc_live_oops_size; #endif - // Non public closures - static IsAliveClosure is_alive; + static IsAliveClosure is_alive; static KeepAliveClosure keep_alive; // Class unloading. Update subklass/sibling/implementor links at end of marking phase. @@ -162,9 +165,9 @@ public: // Public closures - static FollowRootClosure follow_root_closure; - static MarkAndPushClosure mark_and_push_closure; - static FollowStackClosure follow_stack_closure; + static FollowRootClosure follow_root_closure; + static MarkAndPushClosure mark_and_push_closure; + static FollowStackClosure follow_stack_closure; static AdjustPointerClosure adjust_root_pointer_closure; static AdjustPointerClosure adjust_pointer_closure; @@ -173,39 +176,29 @@ // Call backs for marking static void mark_object(oop obj); - static void follow_root(oop* p); // Mark pointer and follow contents. Empty marking - - // stack afterwards. - - static void mark_and_follow(oop* p); // Mark pointer and follow contents. - static void _mark_and_push(oop* p); // Mark pointer and push obj on - // marking stack. - - - static void mark_and_push(oop* p) { // Check mark and maybe push on - // marking stack - // assert(Universe::is_reserved_heap((oop)p), "we should only be traversing objects here"); - oop m = *p; - if (m != NULL && !m->mark()->is_marked()) { - _mark_and_push(p); - } - } - - static void follow_stack(); // Empty marking stack. - - - static void preserve_mark(oop p, markOop mark); // Save the mark word so it can be restored later - static void adjust_marks(); // Adjust the pointers in the preserved marks table - static void restore_marks(); // Restore the marks that we saved in preserve_mark - - static void _adjust_pointer(oop* p, bool isroot); - - static void adjust_root_pointer(oop* p) { _adjust_pointer(p, true); } - static void adjust_pointer(oop* p) { _adjust_pointer(p, false); } + // Mark pointer and follow contents. Empty marking stack afterwards. + template static inline void follow_root(T* p); + // Mark pointer and follow contents. + template static inline void mark_and_follow(T* p); + // Check mark and maybe push on marking stack + template static inline void mark_and_push(T* p); + + static void follow_stack(); // Empty marking stack. + + static void preserve_mark(oop p, markOop mark); + // Save the mark word so it can be restored later + static void adjust_marks(); // Adjust the pointers in the preserved marks table + static void restore_marks(); // Restore the marks that we saved in preserve_mark + + template static inline void adjust_pointer(T* p, bool isroot); + + static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } + static void adjust_pointer(oop* p) { adjust_pointer(p, false); } + static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } #ifdef VALIDATE_MARK_SWEEP - static void track_adjusted_pointer(oop* p, oop newobj, bool isroot); - static void check_adjust_pointer(oop* p); // Adjust this pointer + static void track_adjusted_pointer(void* p, bool isroot); + static void check_adjust_pointer(void* p); static void track_interior_pointers(oop obj); static void check_interior_pointers(); @@ -226,7 +219,6 @@ static void revisit_weak_klass_link(Klass* k); // Update subklass/sibling/implementor links at end of marking. }; - class PreservedMark VALUE_OBJ_CLASS_SPEC { private: oop _obj; --- old/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp 2009-08-01 04:11:25.442105743 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp 2009-08-01 04:11:25.361009141 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)markSweep.inline.hpp 1.17 07/05/29 09:44:12 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,35 +25,7 @@ * */ -inline void MarkSweep::_adjust_pointer(oop* p, bool isroot) { - oop obj = *p; - VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL); - if (obj != NULL) { - oop new_pointer = oop(obj->mark()->decode_pointer()); - assert(new_pointer != NULL || // is forwarding ptr? - obj->mark() == markOopDesc::prototype() || // not gc marked? - (UseBiasedLocking && obj->mark()->has_bias_pattern()) || // not gc marked? - obj->is_shared(), // never forwarded? - "should contain a forwarding pointer"); - if (new_pointer != NULL) { - *p = new_pointer; - assert(Universe::heap()->is_in_reserved(new_pointer), - "should be in object space"); - VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer); - } - } - VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot)); -} - inline void MarkSweep::mark_object(oop obj) { - -#ifndef SERIALGC - if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) { - assert(PSParallelCompact::mark_bitmap()->is_marked(obj), - "Should be marked in the marking bitmap"); - } -#endif // SERIALGC - // some marks may contain information we need to preserve so we store them away // and overwrite the mark. We'll restore it at the end of markSweep. markOop mark = obj->mark(); @@ -63,3 +35,80 @@ preserve_mark(obj, mark); } } + +template inline void MarkSweep::follow_root(T* p) { + assert(!Universe::heap()->is_in_reserved(p), + "roots shouldn't be things within the heap"); +#ifdef VALIDATE_MARK_SWEEP + if (ValidateMarkSweep) { + guarantee(!_root_refs_stack->contains(p), "should only be in here once"); + _root_refs_stack->push(p); + } +#endif + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!obj->mark()->is_marked()) { + mark_object(obj); + obj->follow_contents(); + } + } + follow_stack(); +} + +template inline void MarkSweep::mark_and_follow(T* p) { +// assert(Universe::heap()->is_in_reserved(p), "should be in object space"); + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!obj->mark()->is_marked()) { + mark_object(obj); + obj->follow_contents(); + } + } +} + +template inline void MarkSweep::mark_and_push(T* p) { +// assert(Universe::heap()->is_in_reserved(p), "should be in object space"); + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!obj->mark()->is_marked()) { + mark_object(obj); + _marking_stack->push(obj); + } + } +} + +template inline void MarkSweep::adjust_pointer(T* p, bool isroot) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop new_obj = oop(obj->mark()->decode_pointer()); + assert(new_obj != NULL || // is forwarding ptr? + obj->mark() == markOopDesc::prototype() || // not gc marked? + (UseBiasedLocking && obj->mark()->has_bias_pattern()) || + // not gc marked? + obj->is_shared(), // never forwarded? + "should be forwarded"); + if (new_obj != NULL) { + assert(Universe::heap()->is_in_reserved(new_obj), + "should be in object space"); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); + } + } + VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot)); +} + +template inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) { +#ifdef VALIDATE_MARK_SWEEP + if (ValidateMarkSweep) { + if (!Universe::heap()->is_in_reserved(p)) { + _root_refs_stack->push(p); + } else { + _other_refs_stack->push(p); + } + } +#endif + mark_and_push(p); +} --- old/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp 2009-08-01 04:11:26.313920755 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp 2009-08-01 04:11:26.229072240 +0100 @@ -3,7 +3,7 @@ #endif /* - * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,17 +45,31 @@ delete lgrp_spaces(); } +#ifndef PRODUCT void MutableNUMASpace::mangle_unused_area() { - for (int i = 0; i < lgrp_spaces()->length(); i++) { - LGRPSpace *ls = lgrp_spaces()->at(i); - MutableSpace *s = ls->space(); - HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); - if (top < s->end()) { - ls->add_invalid_region(MemRegion(top, s->end())); - } - s->mangle_unused_area(); - } + // This method should do nothing. + // It can be called on a numa space during a full compaction. +} +void MutableNUMASpace::mangle_unused_area_complete() { + // This method should do nothing. + // It can be called on a numa space during a full compaction. +} +void MutableNUMASpace::mangle_region(MemRegion mr) { + // This method should do nothing because numa spaces are not mangled. +} +void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { + assert(false, "Do not mangle MutableNUMASpace's"); +} +void MutableNUMASpace::set_top_for_allocations() { + // This method should do nothing. +} +void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { + // This method should do nothing. } +void MutableNUMASpace::check_mangled_unused_area_complete() { + // This method should do nothing. +} +#endif // NOT_PRODUCT // There may be unallocated holes in the middle chunks // that should be filled with dead objects to ensure parseability. @@ -63,42 +77,49 @@ for (int i = 0; i < lgrp_spaces()->length(); i++) { LGRPSpace *ls = lgrp_spaces()->at(i); MutableSpace *s = ls->space(); - if (!s->contains(top())) { + if (s->top() < top()) { // For all spaces preceeding the one containing top() if (s->free_in_words() > 0) { - SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end())); - size_t area_touched_words = pointer_delta(s->end(), s->top(), sizeof(HeapWordSize)); + size_t area_touched_words = pointer_delta(s->end(), s->top()); + CollectedHeap::fill_with_object(s->top(), area_touched_words); #ifndef ASSERT if (!ZapUnusedHeapArea) { area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), area_touched_words); } #endif - MemRegion invalid; - HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); - HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), - os::vm_page_size()); - if (crossing_start != crossing_end) { - // If object header crossed a small page boundary we mark the area - // as invalid rounding it to a page_size(). - HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); - HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), - s->end()); - invalid = MemRegion(start, end); - } + if (!os::numa_has_static_binding()) { + MemRegion invalid; + HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); + HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), + os::vm_page_size()); + if (crossing_start != crossing_end) { + // If object header crossed a small page boundary we mark the area + // as invalid rounding it to a page_size(). + HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); + HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), + s->end()); + invalid = MemRegion(start, end); + } - ls->add_invalid_region(invalid); - s->set_top(s->end()); + ls->add_invalid_region(invalid); + } } } else { + if (!os::numa_has_static_binding()) { #ifdef ASSERT - MemRegion invalid(s->top(), s->end()); - ls->add_invalid_region(invalid); -#else - if (ZapUnusedHeapArea) { MemRegion invalid(s->top(), s->end()); ls->add_invalid_region(invalid); - } else break; +#else + if (ZapUnusedHeapArea) { + MemRegion invalid(s->top(), s->end()); + ls->add_invalid_region(invalid); + } else { + return; + } #endif + } else { + return; + } } } } @@ -123,7 +144,20 @@ size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { guarantee(thr != NULL, "No thread"); int lgrp_id = thr->lgrp_id(); - assert(lgrp_id != -1, "No lgrp_id set"); + if (lgrp_id == -1) { + // This case can occur after the topology of the system has + // changed. Thread can change their location, the new home + // group will be determined during the first allocation + // attempt. For now we can safely assume that all spaces + // have equal size because the whole space will be reinitialized. + if (lgrp_spaces()->length() > 0) { + return capacity_in_bytes() / lgrp_spaces()->length(); + } else { + assert(false, "There should be at least one locality group"); + return 0; + } + } + // That's the normal case, where we know the locality group of the thread. int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); if (i == -1) { return 0; @@ -132,9 +166,17 @@ } size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { + // Please see the comments for tlab_capacity(). guarantee(thr != NULL, "No thread"); int lgrp_id = thr->lgrp_id(); - assert(lgrp_id != -1, "No lgrp_id set"); + if (lgrp_id == -1) { + if (lgrp_spaces()->length() > 0) { + return free_in_bytes() / lgrp_spaces()->length(); + } else { + assert(false, "There should be at least one locality group"); + return 0; + } + } int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); if (i == -1) { return 0; @@ -142,6 +184,25 @@ return lgrp_spaces()->at(i)->space()->free_in_bytes(); } + +size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { + guarantee(thr != NULL, "No thread"); + int lgrp_id = thr->lgrp_id(); + if (lgrp_id == -1) { + if (lgrp_spaces()->length() > 0) { + return capacity_in_words() / lgrp_spaces()->length(); + } else { + assert(false, "There should be at least one locality group"); + return 0; + } + } + int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); + if (i == -1) { + return 0; + } + return lgrp_spaces()->at(i)->space()->capacity_in_words(); +} + // Check if the NUMA topology has changed. Add and remove spaces if needed. // The update can be forced by setting the force parameter equal to true. bool MutableNUMASpace::update_layout(bool force) { @@ -197,7 +258,7 @@ } // Bias region towards the first-touching lgrp. Set the right page sizes. -void MutableNUMASpace::bias_region(MemRegion mr) { +void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); if (end > start) { @@ -205,9 +266,13 @@ assert((intptr_t)aligned_region.start() % page_size() == 0 && (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); assert(region().contains(aligned_region), "Sanity"); - os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); + // First we tell the OS which page size we want in the given range. The underlying + // large page can be broken down if we require small pages. os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); - os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size()); + // Then we uncommit the pages in the range. + os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); + // And make them local/first-touch biased. + os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); } } @@ -228,24 +293,37 @@ void MutableNUMASpace::update() { if (update_layout(false)) { // If the topology has changed, make all chunks zero-sized. + // And clear the alloc-rate statistics. + // In future we may want to handle this more gracefully in order + // to avoid the reallocation of the pages as much as possible. for (int i = 0; i < lgrp_spaces()->length(); i++) { - MutableSpace *s = lgrp_spaces()->at(i)->space(); + LGRPSpace *ls = lgrp_spaces()->at(i); + MutableSpace *s = ls->space(); s->set_end(s->bottom()); s->set_top(s->bottom()); + ls->clear_alloc_rate(); } - initialize(region(), true); + // A NUMA space is never mangled + initialize(region(), + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); } else { bool should_initialize = false; - for (int i = 0; i < lgrp_spaces()->length(); i++) { - if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { - should_initialize = true; - break; + if (!os::numa_has_static_binding()) { + for (int i = 0; i < lgrp_spaces()->length(); i++) { + if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { + should_initialize = true; + break; + } } } if (should_initialize || (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { - initialize(region(), true); + // A NUMA space is never mangled + initialize(region(), + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); } } @@ -316,6 +394,8 @@ } // Produce a new chunk size. page_size() aligned. +// This function is expected to be called on sequence of i's from 0 to +// lgrp_spaces()->length(). size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { size_t pages_available = base_space_size(); for (int j = 0; j < i; j++) { @@ -330,16 +410,27 @@ size_t chunk_size = 0; if (alloc_rate > 0) { LGRPSpace *ls = lgrp_spaces()->at(i); - chunk_size = (size_t)(ls->alloc_rate()->average() * pages_available / alloc_rate) * page_size(); + chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); } chunk_size = MAX2(chunk_size, page_size()); if (limit > 0) { limit = round_down(limit, page_size()); if (chunk_size > current_chunk_size(i)) { - chunk_size = MIN2((off_t)chunk_size, (off_t)current_chunk_size(i) + (off_t)limit); + size_t upper_bound = pages_available * page_size(); + if (upper_bound > limit && + current_chunk_size(i) < upper_bound - limit) { + // The resulting upper bound should not exceed the available + // amount of memory (pages_available * page_size()). + upper_bound = current_chunk_size(i) + limit; + } + chunk_size = MIN2(chunk_size, upper_bound); } else { - chunk_size = MAX2((off_t)chunk_size, (off_t)current_chunk_size(i) - (off_t)limit); + size_t lower_bound = page_size(); + if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow. + lower_bound = current_chunk_size(i) - limit; + } + chunk_size = MAX2(chunk_size, lower_bound); } } assert(chunk_size <= pages_available * page_size(), "Chunk size out of range"); @@ -436,14 +527,17 @@ } } -void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) { +void MutableNUMASpace::initialize(MemRegion mr, + bool clear_space, + bool mangle_space) { assert(clear_space, "Reallocation will destory data!"); assert(lgrp_spaces()->length() > 0, "There should be at least one space"); MemRegion old_region = region(), new_region; set_bottom(mr.start()); set_end(mr.end()); - MutableSpace::set_top(bottom()); + // Must always clear the space + clear(SpaceDecorator::DontMangle); // Compute chunk sizes size_t prev_page_size = page_size(); @@ -475,8 +569,8 @@ intersection = MemRegion(new_region.start(), new_region.start()); } select_tails(new_region, intersection, &bottom_region, &top_region); - bias_region(bottom_region); - bias_region(top_region); + bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); + bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); } // Check if the space layout has changed significantly? @@ -548,21 +642,34 @@ intersection = MemRegion(new_region.start(), new_region.start()); } - MemRegion invalid_region = ls->invalid_region().intersection(new_region); - if (!invalid_region.is_empty()) { - merge_regions(new_region, &intersection, &invalid_region); - free_region(invalid_region); + if (!os::numa_has_static_binding()) { + MemRegion invalid_region = ls->invalid_region().intersection(new_region); + // Invalid region is a range of memory that could've possibly + // been allocated on the other node. That's relevant only on Solaris where + // there is no static memory binding. + if (!invalid_region.is_empty()) { + merge_regions(new_region, &intersection, &invalid_region); + free_region(invalid_region); + ls->set_invalid_region(MemRegion()); + } } + select_tails(new_region, intersection, &bottom_region, &top_region); - free_region(bottom_region); - free_region(top_region); - // If we clear the region, we would mangle it in debug. That would cause page - // allocation in a different place. Hence setting the top directly. - s->initialize(new_region, false); - s->set_top(s->bottom()); + if (!os::numa_has_static_binding()) { + // If that's a system with the first-touch policy then it's enough + // to free the pages. + free_region(bottom_region); + free_region(top_region); + } else { + // In a system with static binding we have to change the bias whenever + // we reshape the heap. + bias_region(bottom_region, ls->lgrp_id()); + bias_region(top_region, ls->lgrp_id()); + } - ls->set_invalid_region(MemRegion()); + // Clear space (set top = bottom) but never mangle. + s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle); set_adaptation_cycles(samples_count()); } @@ -572,13 +679,29 @@ // Mark the the holes in chunks below the top() as invalid. void MutableNUMASpace::set_top(HeapWord* value) { bool found_top = false; - for (int i = 0; i < lgrp_spaces()->length(); i++) { + for (int i = 0; i < lgrp_spaces()->length();) { LGRPSpace *ls = lgrp_spaces()->at(i); MutableSpace *s = ls->space(); HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); if (s->contains(value)) { - if (top < value && top < s->end()) { + // Check if setting the chunk's top to a given value would create a hole less than + // a minimal object; assuming that's not the last chunk in which case we don't care. + if (i < lgrp_spaces()->length() - 1) { + size_t remainder = pointer_delta(s->end(), value); + const size_t min_fill_size = CollectedHeap::min_fill_size(); + if (remainder < min_fill_size && remainder > 0) { + // Add a minimum size filler object; it will cross the chunk boundary. + CollectedHeap::fill_with_object(value, min_fill_size); + value += min_fill_size; + assert(!s->contains(value), "Should be in the next chunk"); + // Restart the loop from the same chunk, since the value has moved + // to the next one. + continue; + } + } + + if (!os::numa_has_static_binding() && top < value && top < s->end()) { ls->add_invalid_region(MemRegion(top, value)); } s->set_top(value); @@ -587,28 +710,43 @@ if (found_top) { s->set_top(s->bottom()); } else { - if (top < s->end()) { - ls->add_invalid_region(MemRegion(top, s->end())); - } - s->set_top(s->end()); + if (!os::numa_has_static_binding() && top < s->end()) { + ls->add_invalid_region(MemRegion(top, s->end())); + } + s->set_top(s->end()); } } + i++; } MutableSpace::set_top(value); } -void MutableNUMASpace::clear() { +void MutableNUMASpace::clear(bool mangle_space) { MutableSpace::set_top(bottom()); for (int i = 0; i < lgrp_spaces()->length(); i++) { - lgrp_spaces()->at(i)->space()->clear(); + // Never mangle NUMA spaces because the mangling will + // bind the memory to a possibly unwanted lgroup. + lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); } } +/* + Linux supports static memory binding, therefore the most part of the + logic dealing with the possible invalid page allocation is effectively + disabled. Besides there is no notion of the home node in Linux. A + thread is allowed to migrate freely. Although the scheduler is rather + reluctant to move threads between the nodes. We check for the current + node every allocation. And with a high probability a thread stays on + the same node for some time allowing local access to recently allocated + objects. + */ + HeapWord* MutableNUMASpace::allocate(size_t size) { - int lgrp_id = Thread::current()->lgrp_id(); - if (lgrp_id == -1) { + Thread* thr = Thread::current(); + int lgrp_id = thr->lgrp_id(); + if (lgrp_id == -1 || !os::numa_has_group_homing()) { lgrp_id = os::numa_get_group_id(); - Thread::current()->set_lgrp_id(lgrp_id); + thr->set_lgrp_id(lgrp_id); } int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); @@ -619,34 +757,41 @@ i = os::random() % lgrp_spaces()->length(); } - MutableSpace *s = lgrp_spaces()->at(i)->space(); + LGRPSpace* ls = lgrp_spaces()->at(i); + MutableSpace *s = ls->space(); HeapWord *p = s->allocate(size); - if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) { - s->set_top(s->top() - size); - p = NULL; + if (p != NULL) { + size_t remainder = s->free_in_words(); + if (remainder < (size_t)oopDesc::header_size() && remainder > 0) { + s->set_top(s->top() - size); + p = NULL; + } } if (p != NULL) { if (top() < s->top()) { // Keep _top updated. MutableSpace::set_top(s->top()); } } - // Make the page allocation happen here. - if (p != NULL) { + // Make the page allocation happen here if there is no static binding.. + if (p != NULL && !os::numa_has_static_binding()) { for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { *(int*)i = 0; } } - + if (p == NULL) { + ls->set_allocation_failed(); + } return p; } // This version is lock-free. HeapWord* MutableNUMASpace::cas_allocate(size_t size) { - int lgrp_id = Thread::current()->lgrp_id(); - if (lgrp_id == -1) { + Thread* thr = Thread::current(); + int lgrp_id = thr->lgrp_id(); + if (lgrp_id == -1 || !os::numa_has_group_homing()) { lgrp_id = os::numa_get_group_id(); - Thread::current()->set_lgrp_id(lgrp_id); + thr->set_lgrp_id(lgrp_id); } int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); @@ -655,13 +800,19 @@ if (i == -1) { i = os::random() % lgrp_spaces()->length(); } - MutableSpace *s = lgrp_spaces()->at(i)->space(); + LGRPSpace *ls = lgrp_spaces()->at(i); + MutableSpace *s = ls->space(); HeapWord *p = s->cas_allocate(size); - if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) { - if (s->cas_deallocate(p, size)) { - // We were the last to allocate and created a fragment less than - // a minimal object. - p = NULL; + if (p != NULL) { + size_t remainder = pointer_delta(s->end(), p + size); + if (remainder < (size_t)oopDesc::header_size() && remainder > 0) { + if (s->cas_deallocate(p, size)) { + // We were the last to allocate and created a fragment less than + // a minimal object. + p = NULL; + } else { + guarantee(false, "Deallocation should always succeed"); + } } } if (p != NULL) { @@ -673,12 +824,15 @@ } } - // Make the page allocation happen here. - if (p != NULL) { + // Make the page allocation happen here if there is no static binding. + if (p != NULL && !os::numa_has_static_binding() ) { for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { *(int*)i = 0; } } + if (p == NULL) { + ls->set_allocation_failed(); + } return p; } @@ -702,6 +856,9 @@ st->print(" lgrp %d", ls->lgrp_id()); ls->space()->print_on(st); if (NUMAStats) { + for (int i = 0; i < lgrp_spaces()->length(); i++) { + lgrp_spaces()->at(i)->accumulate_statistics(page_size()); + } st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n", ls->space_stats()->_local_space / K, ls->space_stats()->_remote_space / K, @@ -713,10 +870,12 @@ } } -void MutableNUMASpace::verify(bool allow_dirty) const { - for (int i = 0; i < lgrp_spaces()->length(); i++) { - lgrp_spaces()->at(i)->space()->verify(allow_dirty); - } +void MutableNUMASpace::verify(bool allow_dirty) { + // This can be called after setting an arbitary value to the space's top, + // so an object can cross the chunk boundary. We ensure the parsablity + // of the space and just walk the objects in linear fashion. + ensure_parsability(); + MutableSpace::verify(allow_dirty); } // Scan pages and gather stats about page placement and size. --- old/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp 2009-08-01 04:11:27.266985808 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp 2009-08-01 04:11:27.186896811 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mutableNUMASpace.hpp 1.8 07/05/05 17:05:34 JVM" #endif /* - * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +63,7 @@ MutableSpace* _space; MemRegion _invalid_region; AdaptiveWeightedAverage *_alloc_rate; + bool _allocation_failed; struct SpaceStats { size_t _local_space, _remote_space, _unbiased_space, _uncommited_space; @@ -84,7 +85,7 @@ char* last_page_scanned() { return _last_page_scanned; } void set_last_page_scanned(char* p) { _last_page_scanned = p; } public: - LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL) { + LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) { _space = new MutableSpace(); _alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight); } @@ -106,8 +107,21 @@ return *(int*)lgrp_id_value == p->lgrp_id(); } + // Report a failed allocation. + void set_allocation_failed() { _allocation_failed = true; } + void sample() { - alloc_rate()->sample(space()->used_in_bytes()); + // If there was a failed allocation make allocation rate equal + // to the size of the whole chunk. This ensures the progress of + // the adaptation process. + size_t alloc_rate_sample; + if (_allocation_failed) { + alloc_rate_sample = space()->capacity_in_bytes(); + _allocation_failed = false; + } else { + alloc_rate_sample = space()->used_in_bytes(); + } + alloc_rate()->sample(alloc_rate_sample); } MemRegion invalid_region() const { return _invalid_region; } @@ -115,6 +129,7 @@ int lgrp_id() const { return _lgrp_id; } MutableSpace* space() const { return _space; } AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; } + void clear_alloc_rate() { _alloc_rate->clear(); } SpaceStats* space_stats() { return &_space_stats; } void clear_space_stats() { _space_stats = SpaceStats(); } @@ -142,8 +157,8 @@ // Check if the NUMA topology has changed. Add and remove spaces if needed. // The update can be forced by setting the force parameter equal to true. bool update_layout(bool force); - // Bias region towards the first-touching lgrp. - void bias_region(MemRegion mr); + // Bias region towards the lgrp. + void bias_region(MemRegion mr, int lgrp_id); // Free pages in a given region. void free_region(MemRegion mr); // Get current chunk size. @@ -174,17 +189,27 @@ MutableNUMASpace(); virtual ~MutableNUMASpace(); // Space initialization. - virtual void initialize(MemRegion mr, bool clear_space); + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); // Update space layout if necessary. Do all adaptive resizing job. virtual void update(); // Update allocation rate averages. virtual void accumulate_statistics(); - virtual void clear(); - virtual void mangle_unused_area(); + virtual void clear(bool mangle_space); + virtual void mangle_unused_area() PRODUCT_RETURN; + virtual void mangle_unused_area_complete() PRODUCT_RETURN; + virtual void mangle_region(MemRegion mr) PRODUCT_RETURN; + virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; + virtual void check_mangled_unused_area_complete() PRODUCT_RETURN; + virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; + virtual void set_top_for_allocations() PRODUCT_RETURN; + virtual void ensure_parsability(); virtual size_t used_in_words() const; virtual size_t free_in_words() const; + + using MutableSpace::capacity_in_words; + virtual size_t capacity_in_words(Thread* thr) const; virtual size_t tlab_capacity(Thread* thr) const; virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; @@ -195,7 +220,7 @@ // Debugging virtual void print_on(outputStream* st) const; virtual void print_short_on(outputStream* st) const; - virtual void verify(bool allow_dirty) const; + virtual void verify(bool allow_dirty); virtual void set_top(HeapWord* value); }; --- old/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp 2009-08-01 04:11:28.182188489 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp 2009-08-01 04:11:28.093735579 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mutableSpace.cpp 1.22 07/05/05 17:05:35 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,17 @@ # include "incls/_precompiled.incl" # include "incls/_mutableSpace.cpp.incl" -void MutableSpace::initialize(MemRegion mr, bool clear_space) { +MutableSpace::MutableSpace(): ImmutableSpace(), _top(NULL) { + _mangler = new MutableSpaceMangler(this); +} + +MutableSpace::~MutableSpace() { + delete _mangler; +} + +void MutableSpace::initialize(MemRegion mr, + bool clear_space, + bool mangle_space) { HeapWord* bottom = mr.start(); HeapWord* end = mr.end(); @@ -37,14 +47,51 @@ set_bottom(bottom); set_end(end); - if (clear_space) clear(); + if (clear_space) { + clear(mangle_space); + } } -void MutableSpace::clear() { +void MutableSpace::clear(bool mangle_space) { set_top(bottom()); - if (ZapUnusedHeapArea) mangle_unused_area(); + if (ZapUnusedHeapArea && mangle_space) { + mangle_unused_area(); + } } +#ifndef PRODUCT +void MutableSpace::check_mangled_unused_area(HeapWord* limit) { + mangler()->check_mangled_unused_area(limit); +} + +void MutableSpace::check_mangled_unused_area_complete() { + mangler()->check_mangled_unused_area_complete(); +} + +// Mangle only the unused space that has not previously +// been mangled and that has not been allocated since being +// mangled. +void MutableSpace::mangle_unused_area() { + mangler()->mangle_unused_area(); +} + +void MutableSpace::mangle_unused_area_complete() { + mangler()->mangle_unused_area_complete(); +} + +void MutableSpace::mangle_region(MemRegion mr) { + SpaceMangler::mangle_region(mr); +} + +void MutableSpace::set_top_for_allocations(HeapWord* v) { + mangler()->set_top_for_allocations(v); +} + +void MutableSpace::set_top_for_allocations() { + mangler()->set_top_for_allocations(top()); +} +#endif + // This version requires locking. */ HeapWord* MutableSpace::allocate(size_t size) { assert(Heap_lock->owned_by_self() || @@ -121,7 +168,7 @@ bottom(), top(), end()); } -void MutableSpace::verify(bool allow_dirty) const { +void MutableSpace::verify(bool allow_dirty) { HeapWord* p = bottom(); HeapWord* t = top(); HeapWord* prev_p = NULL; --- old/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp 2009-08-01 04:11:29.002869308 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp 2009-08-01 04:11:28.921775501 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mutableSpace.hpp 1.22 07/05/05 17:05:35 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,14 +33,23 @@ // Invariant: (ImmutableSpace +) bottom() <= top() <= end() // top() is inclusive and end() is exclusive. +class MutableSpaceMangler; + class MutableSpace: public ImmutableSpace { friend class VMStructs; + + // Helper for mangling unused space in debug builds + MutableSpaceMangler* _mangler; + protected: HeapWord* _top; + MutableSpaceMangler* mangler() { return _mangler; } + public: - virtual ~MutableSpace() {} - MutableSpace() { _top = NULL; } + virtual ~MutableSpace(); + MutableSpace(); + // Accessors HeapWord* top() const { return _top; } virtual void set_top(HeapWord* value) { _top = value; } @@ -55,21 +64,30 @@ MemRegion used_region() { return MemRegion(bottom(), top()); } // Initialization - virtual void initialize(MemRegion mr, bool clear_space); - virtual void clear(); + virtual void initialize(MemRegion mr, + bool clear_space, + bool mangle_space); + virtual void clear(bool mangle_space); + // Does the usual initialization but optionally resets top to bottom. +#if 0 // MANGLE_SPACE + void initialize(MemRegion mr, bool clear_space, bool reset_top); +#endif virtual void update() { } virtual void accumulate_statistics() { } - // Overwrites the unused portion of this space. Note that some collectors - // may use this "scratch" space during collections. - virtual void mangle_unused_area() { - mangle_region(MemRegion(_top, _end)); - } + // Methods used in mangling. See descriptions under SpaceMangler. + virtual void mangle_unused_area() PRODUCT_RETURN; + virtual void mangle_unused_area_complete() PRODUCT_RETURN; + virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; + virtual void check_mangled_unused_area_complete() PRODUCT_RETURN; + virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; + + // Used to save the space's current top for later use during mangling. + virtual void set_top_for_allocations() PRODUCT_RETURN; + virtual void ensure_parsability() { } - void mangle_region(MemRegion mr) { - debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord)); - } + virtual void mangle_region(MemRegion mr) PRODUCT_RETURN; // Boolean querries. bool is_empty() const { return used_in_words() == 0; } @@ -101,5 +119,5 @@ virtual void print_on(outputStream* st) const; virtual void print_short() const; virtual void print_short_on(outputStream* st) const; - virtual void verify(bool allow_dirty) const; + virtual void verify(bool allow_dirty); }; --- old/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2009-08-01 04:11:30.557042171 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2009-08-01 04:11:30.481318749 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmGCOperations.cpp 1.21 07/05/29 09:44:12 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,6 +77,7 @@ // If the GC count has changed someone beat us to the collection // Get the Heap_lock after the pending_list_lock. Heap_lock->lock(); + // Check invocations if (skip_operation()) { // skip collection @@ -85,6 +86,8 @@ _prologue_succeeded = false; } else { _prologue_succeeded = true; + SharedHeap* sh = SharedHeap::heap(); + if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true; } return _prologue_succeeded; } @@ -93,6 +96,8 @@ void VM_GC_Operation::doit_epilogue() { assert(Thread::current()->is_Java_thread(), "just checking"); // Release the Heap_lock first. + SharedHeap* sh = SharedHeap::heap(); + if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false; Heap_lock->unlock(); release_and_notify_pending_list_lock(); } @@ -146,4 +151,34 @@ GCCauseSetter gccs(gch, _gc_cause); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); notify_gc_end(); -} +} + +void VM_GenCollectForPermanentAllocation::doit() { + JvmtiGCForAllocationMarker jgcm; + notify_gc_begin(true); + SharedHeap* heap = (SharedHeap*)Universe::heap(); + GCCauseSetter gccs(heap, _gc_cause); + switch (heap->kind()) { + case (CollectedHeap::GenCollectedHeap): { + GenCollectedHeap* gch = (GenCollectedHeap*)heap; + gch->do_full_collection(gch->must_clear_all_soft_refs(), + gch->n_gens() - 1); + break; + } +#ifndef SERIALGC + case (CollectedHeap::G1CollectedHeap): { + G1CollectedHeap* g1h = (G1CollectedHeap*)heap; + g1h->do_full_collection(_gc_cause == GCCause::_last_ditch_collection); + break; + } +#endif // SERIALGC + default: + ShouldNotReachHere(); + } + _res = heap->perm_gen()->allocate(_size, false); + assert(heap->is_in_reserved_or_null(_res), "result not in heap"); + if (_res == NULL && GC_locker::is_active_and_needs_gc()) { + set_gc_locked(); + } + notify_gc_end(); +} --- old/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp 2009-08-01 04:11:31.393399500 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp 2009-08-01 04:11:31.301931687 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmGCOperations.hpp 1.14 07/05/29 09:44:12 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ // is specified; and also the attach "inspectheap" operation // // VM_GenCollectForAllocation +// VM_GenCollectForPermanentAllocation // VM_ParallelGCFailedAllocation // VM_ParallelGCFailedPermanentAllocation // - this operation is invoked when allocation is failed; @@ -169,3 +170,23 @@ virtual VMOp_Type type() const { return VMOp_GenCollectFull; } virtual void doit(); }; + +class VM_GenCollectForPermanentAllocation: public VM_GC_Operation { + private: + HeapWord* _res; + size_t _size; // size of object to be allocated + public: + VM_GenCollectForPermanentAllocation(size_t size, + unsigned int gc_count_before, + unsigned int full_gc_count_before, + GCCause::Cause gc_cause) + : VM_GC_Operation(gc_count_before, full_gc_count_before, true), + _size(size) { + _res = NULL; + _gc_cause = gc_cause; + } + ~VM_GenCollectForPermanentAllocation() {} + virtual VMOp_Type type() const { return VMOp_GenCollectForPermanentAllocation; } + virtual void doit(); + HeapWord* result() const { return _res; } +}; --- old/hotspot/src/share/vm/gc_interface/collectedHeap.cpp 2009-08-01 04:11:32.253858621 +0100 +++ new/hotspot/src/share/vm/gc_interface/collectedHeap.cpp 2009-08-01 04:11:32.172092046 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)collectedHeap.cpp 1.24 07/07/19 19:08:26 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,13 +33,21 @@ int CollectedHeap::_fire_out_of_memory_count = 0; #endif +size_t CollectedHeap::_filler_array_max_size = 0; + // Memory state functions. -CollectedHeap::CollectedHeap() : - _reserved(), _barrier_set(NULL), _is_gc_active(false), - _total_collections(0), _total_full_collections(0), - _max_heap_capacity(0), - _gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) { +CollectedHeap::CollectedHeap() +{ + const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); + const size_t elements_per_word = HeapWordSize / sizeof(jint); + _filler_array_max_size = align_object_size(filler_array_hdr_size() + + max_len * elements_per_word); + + _barrier_set = NULL; + _is_gc_active = false; + _total_collections = _total_full_collections = 0; + _gc_cause = _gc_lastcause = GCCause::_no_gc; NOT_PRODUCT(_promotion_failure_alot_count = 0;) NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) @@ -132,6 +140,94 @@ return obj; } +size_t CollectedHeap::filler_array_hdr_size() { + return size_t(arrayOopDesc::header_size(T_INT)); +} + +size_t CollectedHeap::filler_array_min_size() { + return align_object_size(filler_array_hdr_size()); +} + +size_t CollectedHeap::filler_array_max_size() { + return _filler_array_max_size; +} + +#ifdef ASSERT +void CollectedHeap::fill_args_check(HeapWord* start, size_t words) +{ + assert(words >= min_fill_size(), "too small to fill"); + assert(words % MinObjAlignment == 0, "unaligned size"); + assert(Universe::heap()->is_in_reserved(start), "not in heap"); + assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); +} + +void CollectedHeap::zap_filler_array(HeapWord* start, size_t words) +{ + if (ZapFillerObjects) { + Copy::fill_to_words(start + filler_array_hdr_size(), + words - filler_array_hdr_size(), 0XDEAFBABE); + } +} +#endif // ASSERT + +void +CollectedHeap::fill_with_array(HeapWord* start, size_t words) +{ + assert(words >= filler_array_min_size(), "too small for an array"); + assert(words <= filler_array_max_size(), "too big for a single object"); + + const size_t payload_size = words - filler_array_hdr_size(); + const size_t len = payload_size * HeapWordSize / sizeof(jint); + + // Set the length first for concurrent GC. + ((arrayOop)start)->set_length((int)len); + post_allocation_setup_common(Universe::intArrayKlassObj(), start, words); + DEBUG_ONLY(zap_filler_array(start, words);) +} + +void +CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words) +{ + assert(words <= filler_array_max_size(), "too big for a single object"); + + if (words >= filler_array_min_size()) { + fill_with_array(start, words); + } else if (words > 0) { + assert(words == min_fill_size(), "unaligned size"); + post_allocation_setup_common(SystemDictionary::object_klass(), start, + words); + } +} + +void CollectedHeap::fill_with_object(HeapWord* start, size_t words) +{ + DEBUG_ONLY(fill_args_check(start, words);) + HandleMark hm; // Free handles before leaving. + fill_with_object_impl(start, words); +} + +void CollectedHeap::fill_with_objects(HeapWord* start, size_t words) +{ + DEBUG_ONLY(fill_args_check(start, words);) + HandleMark hm; // Free handles before leaving. + +#ifdef LP64 + // A single array can fill ~8G, so multiple objects are needed only in 64-bit. + // First fill with arrays, ensuring that any remaining space is big enough to + // fill. The remainder is filled with a single object. + const size_t min = min_fill_size(); + const size_t max = filler_array_max_size(); + while (words > max) { + const size_t cur = words - max >= min ? max : max - min; + fill_with_array(start, cur); + start += cur; + words -= cur; + } +#endif + + fill_with_object_impl(start, words); +} + oop CollectedHeap::new_store_barrier(oop new_obj) { // %%% This needs refactoring. (It was imported from the server compiler.) guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported"); @@ -142,13 +238,6 @@ return new_obj; } -bool CollectedHeap::can_elide_permanent_oop_store_barriers() const { - // %%% This needs refactoring. (It was gating logic from the server compiler.) - guarantee(kind() < CollectedHeap::G1CollectedHeap, ""); - return !UseConcMarkSweepGC; -} - - HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { guarantee(false, "thread-local allocation buffers not supported"); return NULL; --- old/hotspot/src/share/vm/gc_interface/collectedHeap.hpp 2009-08-01 04:11:33.205539320 +0100 +++ new/hotspot/src/share/vm/gc_interface/collectedHeap.hpp 2009-08-01 04:11:33.112288603 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)collectedHeap.hpp 1.58 07/09/07 10:56:50 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,13 +50,15 @@ static int _fire_out_of_memory_count; #endif + // Used for filler objects (static, but initialized in ctor). + static size_t _filler_array_max_size; + protected: MemRegion _reserved; BarrierSet* _barrier_set; bool _is_gc_active; unsigned int _total_collections; // ... started unsigned int _total_full_collections; // ... started - size_t _max_heap_capacity; NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) @@ -123,6 +125,21 @@ // Clears an allocated object. inline static void init_obj(HeapWord* obj, size_t size); + // Filler object utilities. + static inline size_t filler_array_hdr_size(); + static inline size_t filler_array_min_size(); + static inline size_t filler_array_max_size(); + + DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) + DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);) + + // Fill with a single array; caller must ensure filler_array_min_size() <= + // words <= filler_array_max_size(). + static inline void fill_with_array(HeapWord* start, size_t words); + + // Fill with a single object (either an int array or a java.lang.Object). + static inline void fill_with_object_impl(HeapWord* start, size_t words); + // Verification functions virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; @@ -152,10 +169,7 @@ virtual void post_initialize() = 0; MemRegion reserved_region() const { return _reserved; } - - // Return the number of bytes currently reserved, committed, and used, - // respectively, for holding objects. - size_t reserved_obj_bytes() const { return _reserved.byte_size(); } + address base() const { return (address)reserved_region().start(); } // Future cleanup here. The following functions should specify bytes or // heapwords as part of their signature. @@ -301,6 +315,27 @@ // The boundary between a "large" and "small" array of primitives, in words. virtual size_t large_typearray_limit() = 0; + // Utilities for turning raw memory into filler objects. + // + // min_fill_size() is the smallest region that can be filled. + // fill_with_objects() can fill arbitrary-sized regions of the heap using + // multiple objects. fill_with_object() is for regions known to be smaller + // than the largest array of integers; it uses a single object to fill the + // region and has slightly less overhead. + static size_t min_fill_size() { + return size_t(align_object_size(oopDesc::header_size())); + } + + static void fill_with_objects(HeapWord* start, size_t words); + + static void fill_with_object(HeapWord* start, size_t words); + static void fill_with_object(MemRegion region) { + fill_with_object(region.start(), region.word_size()); + } + static void fill_with_object(HeapWord* start, HeapWord* end) { + fill_with_object(start, pointer_delta(end, start)); + } + // Some heaps may offer a contiguous region for shared non-blocking // allocation, via inlined code (by exporting the address of the top and // end fields defining the extent of the contiguous allocation region.) @@ -371,10 +406,8 @@ // Can a compiler initialize a new object without store barriers? // This permission only extends from the creation of a new object // via a TLAB up to the first subsequent safepoint. - virtual bool can_elide_tlab_store_barriers() const { - guarantee(kind() < CollectedHeap::G1CollectedHeap, "else change or refactor this"); - return true; - } + virtual bool can_elide_tlab_store_barriers() const = 0; + // If a compiler is eliding store barriers for TLAB-allocated objects, // there is probably a corresponding slow path which can produce // an object allocated anywhere. The compiler's runtime support @@ -386,12 +419,10 @@ // Can a compiler elide a store barrier when it writes // a permanent oop into the heap? Applies when the compiler // is storing x to the heap, where x->is_perm() is true. - virtual bool can_elide_permanent_oop_store_barriers() const; - + virtual bool can_elide_permanent_oop_store_barriers() const = 0; + // Does this heap support heap inspection (+PrintClassHistogram?) - virtual bool supports_heap_inspection() const { - return false; // Until RFE 5023697 is implemented - } + virtual bool supports_heap_inspection() const = 0; // Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the --- old/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp 2009-08-01 04:11:34.680517940 +0100 +++ new/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp 2009-08-01 04:11:34.598200546 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)collectedHeap.inline.hpp 1.50 07/09/07 10:56:50 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,6 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, HeapWord* objPtr, size_t size) { - oop obj = (oop)objPtr; assert(obj != NULL, "NULL object pointer"); @@ -47,9 +46,6 @@ // May be bootstrapping obj->set_mark(markOopDesc::prototype()); } - - // support low memory notifications (no-op if not enabled) - LowMemoryDetector::detect_low_memory_for_collected_pools(); } void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, @@ -64,8 +60,14 @@ obj->set_klass(klass()); assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL, "missing blueprint"); - - // support for JVMTI VMObjectAlloc event (no-op if not enabled) +} + +// Support for jvmti and dtrace +inline void post_allocation_notify(KlassHandle klass, oop obj) { + // support low memory notifications (no-op if not enabled) + LowMemoryDetector::detect_low_memory_for_collected_pools(); + + // support for JVMTI VMObjectAlloc event (no-op if not enabled) JvmtiExport::vm_object_alloc_event_collector(obj); if (DTraceAllocProbes) { @@ -82,18 +84,23 @@ post_allocation_setup_common(klass, obj, size); assert(Universe::is_bootstrapping() || !((oop)obj)->blueprint()->oop_is_array(), "must not be an array"); -} + // notify jvmti and dtrace + post_allocation_notify(klass, (oop)obj); +} void CollectedHeap::post_allocation_setup_array(KlassHandle klass, HeapWord* obj, size_t size, int length) { - // Set array length before posting jvmti object alloc event - // in post_allocation_setup_common() + // Set array length before setting the _klass field + // in post_allocation_setup_common() because the klass field + // indicates that the object is parsable by concurrent GC. assert(length >= 0, "length should be non-negative"); ((arrayOop)obj)->set_length(length); post_allocation_setup_common(klass, obj, size); assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array"); + // notify jvmti and dtrace (must be after length is set for dtrace) + post_allocation_notify(klass, (oop)obj); } HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) { @@ -117,11 +124,11 @@ return result; } } - bool gc_overhead_limit_was_exceeded; - result = Universe::heap()->mem_allocate(size, - is_noref, - false, - &gc_overhead_limit_was_exceeded); + bool gc_overhead_limit_was_exceeded = false; + result = Universe::heap()->mem_allocate(size, + is_noref, + false, + &gc_overhead_limit_was_exceeded); if (result != NULL) { NOT_PRODUCT(Universe::heap()-> check_for_non_bad_heap_word_value(result, size)); @@ -220,6 +227,7 @@ assert(obj != NULL, "cannot initialize NULL object"); const size_t hs = oopDesc::header_size(); assert(size >= hs, "unexpected object size"); + ((oop)obj)->set_klass_gap(0); Copy::fill_to_aligned_words(obj + hs, size - hs); } --- old/hotspot/src/share/vm/gc_interface/gcCause.hpp 2009-08-01 04:11:35.545248092 +0100 +++ new/hotspot/src/share/vm/gc_interface/gcCause.hpp 2009-08-01 04:11:35.476699078 +0100 @@ -63,6 +63,8 @@ _old_generation_too_full_to_scavenge, _adaptive_size_policy, + _g1_inc_collection_pause, + _last_ditch_collection, _last_gc_cause }; @@ -71,12 +73,14 @@ return (cause == GCCause::_java_lang_system_gc || cause == GCCause::_jvmti_force_gc); } + inline static bool is_serviceability_requested_gc(GCCause::Cause cause) { return (cause == GCCause::_jvmti_force_gc || cause == GCCause::_heap_inspection || cause == GCCause::_heap_dump); } + // Return a string describing the GCCause. static const char* to_string(GCCause::Cause cause); // Return true if the GCCause is for a full collection. --- old/hotspot/src/share/vm/includeDB_compiler1 2009-08-01 04:11:36.387636288 +0100 +++ new/hotspot/src/share/vm/includeDB_compiler1 2009-08-01 04:11:36.305366531 +0100 @@ -1,5 +1,5 @@ // -// Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,9 @@ c1_CFGPrinter.hpp c1_Compilation.hpp c1_CFGPrinter.hpp c1_Instruction.hpp +cardTableModRefBS.cpp c1_LIR.hpp +cardTableModRefBS.cpp c1_LIRGenerator.hpp + c1_Canonicalizer.cpp c1_Canonicalizer.hpp c1_Canonicalizer.cpp c1_InstructionPrinter.hpp c1_Canonicalizer.cpp ciArray.hpp @@ -55,6 +58,7 @@ c1_CodeStubs_.cpp c1_LIRAssembler.hpp c1_CodeStubs_.cpp c1_MacroAssembler.hpp c1_CodeStubs_.cpp c1_Runtime1.hpp +c1_CodeStubs_.cpp g1SATBCardTableModRefBS.hpp c1_CodeStubs_.cpp nativeInst_.hpp c1_CodeStubs_.cpp sharedRuntime.hpp c1_CodeStubs_.cpp vmreg_.inline.hpp @@ -141,6 +145,7 @@ c1_globals_.hpp globalDefinitions.hpp c1_globals_.hpp macros.hpp +c1_GraphBuilder.cpp bitMap.inline.hpp c1_GraphBuilder.cpp bytecode.hpp c1_GraphBuilder.cpp c1_CFGPrinter.hpp c1_GraphBuilder.cpp c1_Canonicalizer.hpp @@ -158,6 +163,7 @@ c1_GraphBuilder.hpp ciMethodData.hpp c1_GraphBuilder.hpp ciStreams.hpp +c1_IR.cpp bitMap.inline.hpp c1_IR.cpp c1_Compilation.hpp c1_IR.cpp c1_FrameMap.hpp c1_IR.cpp c1_GraphBuilder.hpp @@ -232,20 +238,22 @@ c1_LIRAssembler_.hpp generate_platform_dependent_include -c1_LIRGenerator.cpp c1_Compilation.hpp -c1_LIRGenerator.cpp c1_FrameMap.hpp -c1_LIRGenerator.cpp c1_Instruction.hpp -c1_LIRGenerator.cpp c1_LIRAssembler.hpp -c1_LIRGenerator.cpp c1_LIRGenerator.hpp -c1_LIRGenerator.cpp c1_ValueStack.hpp -c1_LIRGenerator.cpp ciArrayKlass.hpp -c1_LIRGenerator.cpp ciInstance.hpp -c1_LIRGenerator.cpp sharedRuntime.hpp - -c1_LIRGenerator.hpp c1_Instruction.hpp -c1_LIRGenerator.hpp c1_LIR.hpp -c1_LIRGenerator.hpp ciMethodData.hpp -c1_LIRGenerator.hpp sizes.hpp +c1_LIRGenerator.cpp bitMap.inline.hpp +c1_LIRGenerator.cpp c1_Compilation.hpp +c1_LIRGenerator.cpp c1_FrameMap.hpp +c1_LIRGenerator.cpp c1_Instruction.hpp +c1_LIRGenerator.cpp c1_LIRAssembler.hpp +c1_LIRGenerator.cpp c1_LIRGenerator.hpp +c1_LIRGenerator.cpp c1_ValueStack.hpp +c1_LIRGenerator.cpp ciArrayKlass.hpp +c1_LIRGenerator.cpp ciInstance.hpp +c1_LIRGenerator.cpp heapRegion.hpp +c1_LIRGenerator.cpp sharedRuntime.hpp + +c1_LIRGenerator.hpp c1_Instruction.hpp +c1_LIRGenerator.hpp c1_LIR.hpp +c1_LIRGenerator.hpp ciMethodData.hpp +c1_LIRGenerator.hpp sizes.hpp c1_LIRGenerator_.cpp c1_Compilation.hpp c1_LIRGenerator_.cpp c1_FrameMap.hpp @@ -258,7 +266,9 @@ c1_LIRGenerator_.cpp ciObjArrayKlass.hpp c1_LIRGenerator_.cpp ciTypeArrayKlass.hpp c1_LIRGenerator_.cpp sharedRuntime.hpp +c1_LIRGenerator_.cpp vmreg_.inline.hpp +c1_LinearScan.cpp bitMap.inline.hpp c1_LinearScan.cpp c1_CFGPrinter.hpp c1_LinearScan.cpp c1_Compilation.hpp c1_LinearScan.cpp c1_FrameMap.hpp @@ -275,13 +285,14 @@ c1_LinearScan.hpp c1_LIR.hpp c1_LinearScan.hpp c1_LIRGenerator.hpp +c1_LinearScan_.cpp bitMap.inline.hpp c1_LinearScan_.cpp c1_Instruction.hpp c1_LinearScan_.cpp c1_LinearScan.hpp c1_LinearScan_.hpp generate_platform_dependent_include c1_MacroAssembler.hpp assembler.hpp -c1_MacroAssembler.hpp assembler_.inline.hpp +c1_MacroAssembler.hpp assembler_.inline.hpp c1_MacroAssembler_.cpp arrayOop.hpp c1_MacroAssembler_.cpp biasedLocking.hpp @@ -297,6 +308,7 @@ c1_MacroAssembler_.hpp generate_platform_dependent_include +c1_Optimizer.cpp bitMap.inline.hpp c1_Optimizer.cpp c1_Canonicalizer.hpp c1_Optimizer.cpp c1_Optimizer.hpp c1_Optimizer.cpp c1_ValueMap.hpp @@ -323,7 +335,7 @@ c1_Runtime1.cpp compilationPolicy.hpp c1_Runtime1.cpp compiledIC.hpp c1_Runtime1.cpp copy.hpp -c1_Runtime1.cpp disassembler_.hpp +c1_Runtime1.cpp disassembler.hpp c1_Runtime1.cpp events.hpp c1_Runtime1.cpp interfaceSupport.hpp c1_Runtime1.cpp interpreter.hpp @@ -362,6 +374,7 @@ c1_Runtime1_.cpp vframeArray.hpp c1_Runtime1_.cpp vmreg_.inline.hpp +c1_ValueMap.cpp bitMap.inline.hpp c1_ValueMap.cpp c1_Canonicalizer.hpp c1_ValueMap.cpp c1_IR.hpp c1_ValueMap.cpp c1_ValueMap.hpp @@ -432,4 +445,3 @@ top.hpp c1_globals.hpp vmStructs.hpp c1_Runtime1.hpp - --- old/hotspot/src/share/vm/includeDB_compiler2 2009-08-01 04:11:37.354316555 +0100 +++ new/hotspot/src/share/vm/includeDB_compiler2 2009-08-01 04:11:37.268831827 +0100 @@ -1,5 +1,5 @@ // -// Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,14 +19,14 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // ad_.cpp adGlobals_.hpp ad_.cpp ad_.hpp ad_.cpp allocation.inline.hpp ad_.cpp assembler.hpp -ad_.cpp assembler_.inline.hpp +ad_.cpp assembler_.inline.hpp ad_.cpp biasedLocking.hpp ad_.cpp cfgnode.hpp ad_.cpp collectedHeap.inline.hpp @@ -164,6 +164,7 @@ callGenerator.hpp type.hpp callnode.cpp callnode.hpp +callnode.cpp bcEscapeAnalyzer.hpp callnode.cpp escape.hpp callnode.cpp locknode.hpp callnode.cpp machnode.hpp @@ -176,7 +177,6 @@ callnode.cpp runtime.hpp callnode.hpp connode.hpp -callnode.hpp escape.hpp callnode.hpp mulnode.hpp callnode.hpp multnode.hpp callnode.hpp opcodes.hpp @@ -347,7 +347,6 @@ connode.cpp allocation.inline.hpp connode.cpp compile.hpp connode.cpp connode.hpp -connode.cpp escape.hpp connode.cpp machnode.hpp connode.cpp matcher.hpp connode.cpp memnode.hpp @@ -410,6 +409,7 @@ escape.cpp allocation.hpp escape.cpp bcEscapeAnalyzer.hpp +escape.cpp c2compiler.hpp escape.cpp callnode.hpp escape.cpp cfgnode.hpp escape.cpp compile.hpp @@ -461,10 +461,13 @@ graphKit.cpp addnode.hpp graphKit.cpp barrierSet.hpp graphKit.cpp cardTableModRefBS.hpp +graphKit.cpp g1SATBCardTableModRefBS.hpp graphKit.cpp collectedHeap.hpp graphKit.cpp compileLog.hpp graphKit.cpp deoptimization.hpp graphKit.cpp graphKit.hpp +graphKit.cpp heapRegion.hpp +graphKit.cpp idealKit.hpp graphKit.cpp locknode.hpp graphKit.cpp machnode.hpp graphKit.cpp parse.hpp @@ -484,6 +487,7 @@ idealKit.cpp callnode.hpp idealKit.cpp cfgnode.hpp idealKit.cpp idealKit.hpp +idealKit.cpp runtime.hpp idealKit.hpp connode.hpp idealKit.hpp mulnode.hpp @@ -582,6 +586,7 @@ loopTransform.cpp addnode.hpp loopTransform.cpp allocation.inline.hpp loopTransform.cpp connode.hpp +loopTransform.cpp compileLog.hpp loopTransform.cpp divnode.hpp loopTransform.cpp loopnode.hpp loopTransform.cpp mulnode.hpp @@ -597,6 +602,7 @@ loopnode.cpp allocation.inline.hpp loopnode.cpp callnode.hpp loopnode.cpp ciMethodData.hpp +loopnode.cpp compileLog.hpp loopnode.cpp connode.hpp loopnode.cpp divnode.hpp loopnode.cpp loopnode.hpp @@ -843,7 +849,6 @@ phaseX.cpp callnode.hpp phaseX.cpp cfgnode.hpp phaseX.cpp connode.hpp -phaseX.cpp escape.hpp phaseX.cpp loopnode.hpp phaseX.cpp machnode.hpp phaseX.cpp opcodes.hpp @@ -916,9 +921,11 @@ runtime.cpp connode.hpp runtime.cpp copy.hpp runtime.cpp fprofiler.hpp +runtime.cpp g1SATBCardTableModRefBS.hpp runtime.cpp gcLocker.inline.hpp runtime.cpp graphKit.hpp runtime.cpp handles.inline.hpp +runtime.cpp heapRegion.hpp runtime.cpp icBuffer.hpp runtime.cpp interfaceSupport.hpp runtime.cpp interpreter.hpp @@ -958,7 +965,7 @@ runtime_.cpp adGlobals_.hpp runtime_.cpp ad_.hpp runtime_.cpp assembler.hpp -runtime_.cpp assembler_.inline.hpp +runtime_.cpp assembler_.inline.hpp runtime_.cpp globalDefinitions.hpp runtime_.cpp interfaceSupport.hpp runtime_.cpp interpreter.hpp @@ -990,6 +997,7 @@ subnode.cpp addnode.hpp subnode.cpp allocation.inline.hpp +subnode.cpp callnode.hpp subnode.cpp cfgnode.hpp subnode.cpp compileLog.hpp subnode.cpp connode.hpp @@ -1084,9 +1092,10 @@ idealGraphPrinter.hpp vectset.hpp idealGraphPrinter.hpp growableArray.hpp idealGraphPrinter.hpp ostream.hpp +idealGraphPrinter.hpp xmlstream.hpp idealGraphPrinter.cpp idealGraphPrinter.hpp -idealGraphPrinter.cpp chaitin.hpp +idealGraphPrinter.cpp chaitin.hpp idealGraphPrinter.cpp machnode.hpp idealGraphPrinter.cpp parse.hpp idealGraphPrinter.cpp threadCritical.hpp @@ -1098,4 +1107,4 @@ parse1.cpp idealGraphPrinter.hpp matcher.cpp idealGraphPrinter.hpp loopnode.cpp idealGraphPrinter.hpp -chaitin.cpp idealGraphPrinter.hpp +chaitin.cpp idealGraphPrinter.hpp --- old/hotspot/src/share/vm/includeDB_core 2009-08-01 04:11:38.361507626 +0100 +++ new/hotspot/src/share/vm/includeDB_core 2009-08-01 04:11:38.247072209 +0100 @@ -1,5 +1,5 @@ // -// Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, // CA 95054 USA or visit www.sun.com if you need additional information or // have any questions. -// +// // // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! @@ -46,13 +46,13 @@ // as dependencies. Header files named H.inline.hpp generally contain // bodies for inline functions declared in H.hpp. // -// NOTE: Files that use the token "generate_platform_dependent_include" +// NOTE: Files that use the token "generate_platform_dependent_include" // are expected to contain macro references like , , ... and // makedeps has a dependency on these platform files looking like: -// foo_.trailing_string +// foo_.trailing_string // (where "trailing_string" can be any legal filename strings but typically // is "hpp" or "inline.hpp"). -// +// // The dependency in makedeps (and enforced) is that an underscore // will precedure the macro invocation. Note that this restriction // is only enforced on filenames that have the dependency token @@ -148,12 +148,6 @@ allocation.inline.hpp os.hpp -allocationStats.cpp allocationStats.hpp - -allocationStats.hpp allocation.hpp -allocationStats.hpp gcUtil.hpp -allocationStats.hpp globalDefinitions.hpp - aprofiler.cpp aprofiler.hpp aprofiler.cpp collectedHeap.inline.hpp aprofiler.cpp oop.inline.hpp @@ -184,6 +178,7 @@ arguments.cpp universe.inline.hpp arguments.cpp vm_version_.hpp +arguments.hpp java.hpp arguments.hpp perfData.hpp arguments.hpp top.hpp @@ -197,7 +192,6 @@ arrayKlass.cpp arrayKlass.hpp arrayKlass.cpp arrayKlassKlass.hpp arrayKlass.cpp arrayOop.hpp -arrayKlass.cpp collectedHeap.hpp arrayKlass.cpp collectedHeap.inline.hpp arrayKlass.cpp gcLocker.hpp arrayKlass.cpp instanceKlass.hpp @@ -217,6 +211,7 @@ arrayKlassKlass.cpp arrayKlassKlass.hpp arrayKlassKlass.cpp handles.inline.hpp arrayKlassKlass.cpp javaClasses.hpp +arrayKlassKlass.cpp markSweep.inline.hpp arrayKlassKlass.cpp oop.inline.hpp arrayKlassKlass.hpp arrayKlass.hpp @@ -233,7 +228,7 @@ assembler.cpp assembler.hpp assembler.cpp assembler.inline.hpp -assembler.cpp assembler_.inline.hpp +assembler.cpp assembler_.inline.hpp assembler.cpp codeBuffer.hpp assembler.cpp icache.hpp assembler.cpp os.hpp @@ -250,32 +245,32 @@ assembler.inline.hpp assembler.hpp assembler.inline.hpp codeBuffer.hpp -assembler.inline.hpp disassembler_.hpp +assembler.inline.hpp disassembler.hpp assembler.inline.hpp threadLocalStorage.hpp -assembler_.cpp assembler_.inline.hpp -assembler_.cpp biasedLocking.hpp -assembler_.cpp cardTableModRefBS.hpp -assembler_.cpp collectedHeap.hpp -assembler_.cpp interfaceSupport.hpp -assembler_.cpp interpreter.hpp -assembler_.cpp objectMonitor.hpp -assembler_.cpp os.hpp -assembler_.cpp resourceArea.hpp -assembler_.cpp sharedRuntime.hpp -assembler_.cpp stubRoutines.hpp - -assembler_.hpp generate_platform_dependent_include - -assembler_.inline.hpp assembler.inline.hpp -assembler_.inline.hpp codeBuffer.hpp -assembler_.inline.hpp codeCache.hpp -assembler_.inline.hpp handles.inline.hpp - -assembler_.cpp assembler.hpp -assembler_.cpp assembler_.inline.hpp -assembler_.cpp os.hpp -assembler_.cpp threadLocalStorage.hpp +assembler_.cpp assembler_.inline.hpp +assembler_.cpp biasedLocking.hpp +assembler_.cpp cardTableModRefBS.hpp +assembler_.cpp collectedHeap.inline.hpp +assembler_.cpp interfaceSupport.hpp +assembler_.cpp interpreter.hpp +assembler_.cpp objectMonitor.hpp +assembler_.cpp os.hpp +assembler_.cpp resourceArea.hpp +assembler_.cpp sharedRuntime.hpp +assembler_.cpp stubRoutines.hpp + +assembler_.hpp generate_platform_dependent_include + +assembler_.inline.hpp assembler.inline.hpp +assembler_.inline.hpp codeBuffer.hpp +assembler_.inline.hpp codeCache.hpp +assembler_.inline.hpp handles.inline.hpp + +assembler_.cpp assembler.hpp +assembler_.cpp assembler_.inline.hpp +assembler_.cpp os.hpp +assembler_.cpp threadLocalStorage.hpp atomic.cpp atomic.hpp atomic.cpp atomic_.inline.hpp @@ -293,6 +288,10 @@ attachListener.hpp debug.hpp attachListener.hpp ostream.hpp +barrierSet.cpp barrierSet.hpp +barrierSet.cpp collectedHeap.hpp +barrierSet.cpp universe.hpp + barrierSet.hpp memRegion.hpp barrierSet.hpp oopsHierarchy.hpp @@ -300,7 +299,7 @@ barrierSet.inline.hpp cardTableModRefBS.hpp bcEscapeAnalyzer.cpp bcEscapeAnalyzer.hpp -bcEscapeAnalyzer.cpp bitMap.hpp +bcEscapeAnalyzer.cpp bitMap.inline.hpp bcEscapeAnalyzer.cpp bytecode.hpp bcEscapeAnalyzer.cpp ciConstant.hpp bcEscapeAnalyzer.cpp ciField.hpp @@ -325,21 +324,19 @@ biasedLocking.hpp growableArray.hpp biasedLocking.hpp handles.hpp -bitMap.cpp bitMap.hpp +bitMap.cpp allocation.inline.hpp bitMap.cpp bitMap.inline.hpp bitMap.cpp copy.hpp bitMap.cpp os_.inline.hpp bitMap.hpp allocation.hpp -bitMap.hpp ostream.hpp bitMap.hpp top.hpp bitMap.inline.hpp atomic.hpp bitMap.inline.hpp bitMap.hpp -blockOffsetTable.cpp blockOffsetTable.hpp blockOffsetTable.cpp blockOffsetTable.inline.hpp -blockOffsetTable.cpp collectedHeap.hpp +blockOffsetTable.cpp collectedHeap.inline.hpp blockOffsetTable.cpp iterator.hpp blockOffsetTable.cpp java.hpp blockOffsetTable.cpp oop.inline.hpp @@ -651,6 +648,7 @@ ciMethod.cpp abstractCompiler.hpp ciMethod.cpp allocation.inline.hpp ciMethod.cpp bcEscapeAnalyzer.hpp +ciMethod.cpp bitMap.inline.hpp ciMethod.cpp ciCallProfile.hpp ciMethod.cpp ciExceptionHandler.hpp ciMethod.cpp ciInstanceKlass.hpp @@ -725,6 +723,11 @@ ciObjArray.cpp ciUtilities.hpp ciObjArray.cpp objArrayOop.hpp +ciObjArray.cpp ciObjArray.hpp +ciObjArray.cpp ciNullObject.hpp +ciObjArray.cpp ciUtilities.hpp +ciObjArray.cpp objArrayOop.hpp + ciObjArrayKlass.cpp ciInstanceKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp @@ -952,7 +955,7 @@ codeBlob.cpp bytecode.hpp codeBlob.cpp codeBlob.hpp codeBlob.cpp codeCache.hpp -codeBlob.cpp disassembler_.hpp +codeBlob.cpp disassembler.hpp codeBlob.cpp forte.hpp codeBlob.cpp handles.inline.hpp codeBlob.cpp heap.hpp @@ -974,7 +977,7 @@ codeBuffer.cpp codeBuffer.hpp codeBuffer.cpp copy.hpp -codeBuffer.cpp disassembler_.hpp +codeBuffer.cpp disassembler.hpp codeBuffer.hpp assembler.hpp codeBuffer.hpp oopRecorder.hpp @@ -996,6 +999,7 @@ codeCache.cpp mutexLocker.hpp codeCache.cpp nmethod.hpp codeCache.cpp objArrayOop.hpp +codeCache.cpp oop.inline.hpp codeCache.cpp pcDesc.hpp codeCache.cpp resourceArea.hpp @@ -1130,7 +1134,7 @@ compiledICHolderKlass.cpp compiledICHolderKlass.hpp compiledICHolderKlass.cpp handles.inline.hpp compiledICHolderKlass.cpp javaClasses.hpp -compiledICHolderKlass.cpp markSweep.hpp +compiledICHolderKlass.cpp markSweep.inline.hpp compiledICHolderKlass.cpp oop.inline.hpp compiledICHolderKlass.cpp oop.inline2.hpp compiledICHolderKlass.cpp permGen.hpp @@ -1198,6 +1202,7 @@ constMethodKlass.cpp gcLocker.hpp constMethodKlass.cpp handles.inline.hpp constMethodKlass.cpp interpreter.hpp +constMethodKlass.cpp markSweep.inline.hpp constMethodKlass.cpp oop.inline.hpp constMethodKlass.cpp oop.inline2.hpp constMethodKlass.cpp resourceArea.hpp @@ -1216,6 +1221,8 @@ constantPoolKlass.cpp constantPoolKlass.hpp constantPoolKlass.cpp constantPoolOop.hpp constantPoolKlass.cpp handles.inline.hpp +constantPoolKlass.cpp javaClasses.hpp +constantPoolKlass.cpp markSweep.inline.hpp constantPoolKlass.cpp oop.inline.hpp constantPoolKlass.cpp oop.inline2.hpp constantPoolKlass.cpp oopFactory.hpp @@ -1267,7 +1274,8 @@ cpCacheKlass.cpp constantPoolOop.hpp cpCacheKlass.cpp cpCacheKlass.hpp cpCacheKlass.cpp handles.inline.hpp -cpCacheKlass.cpp markSweep.hpp +cpCacheKlass.cpp javaClasses.hpp +cpCacheKlass.cpp markSweep.inline.hpp cpCacheKlass.cpp oop.inline.hpp cpCacheKlass.cpp permGen.hpp @@ -1279,7 +1287,6 @@ cpCacheOop.cpp handles.inline.hpp cpCacheOop.cpp interpreter.hpp cpCacheOop.cpp jvmtiRedefineClassesTrace.hpp -cpCacheOop.cpp markSweep.hpp cpCacheOop.cpp markSweep.inline.hpp cpCacheOop.cpp objArrayOop.hpp cpCacheOop.cpp oop.inline.hpp @@ -1329,7 +1336,7 @@ debug.cpp collectedHeap.hpp debug.cpp compileBroker.hpp debug.cpp defaultStream.hpp -debug.cpp disassembler_.hpp +debug.cpp disassembler.hpp debug.cpp events.hpp debug.cpp frame.hpp debug.cpp heapDumper.hpp @@ -1391,7 +1398,6 @@ defNewGeneration.cpp collectorCounters.hpp defNewGeneration.cpp copy.hpp -defNewGeneration.cpp defNewGeneration.hpp defNewGeneration.cpp defNewGeneration.inline.hpp defNewGeneration.cpp gcLocker.inline.hpp defNewGeneration.cpp gcPolicyCounters.hpp @@ -1403,8 +1409,8 @@ defNewGeneration.cpp java.hpp defNewGeneration.cpp oop.inline.hpp defNewGeneration.cpp referencePolicy.hpp -defNewGeneration.cpp space.hpp defNewGeneration.cpp space.inline.hpp +defNewGeneration.cpp spaceDecorator.hpp defNewGeneration.cpp thread_.inline.hpp defNewGeneration.hpp ageTable.hpp @@ -1412,6 +1418,7 @@ defNewGeneration.hpp generation.inline.hpp defNewGeneration.hpp generationCounters.hpp +defNewGeneration.inline.hpp cardTableRS.hpp defNewGeneration.inline.hpp defNewGeneration.hpp defNewGeneration.inline.hpp space.hpp @@ -1448,7 +1455,7 @@ deoptimization.hpp frame.inline.hpp depChecker_.cpp depChecker_.hpp -depChecker_.cpp disassembler_.hpp +depChecker_.cpp disassembler.hpp depChecker_.cpp hpi.hpp dependencies.cpp ciArrayKlass.hpp @@ -1478,21 +1485,21 @@ dictionary.hpp oop.hpp dictionary.hpp systemDictionary.hpp -disassemblerEnv.hpp globals.hpp +disassembler_.hpp generate_platform_dependent_include -disassembler_.cpp cardTableModRefBS.hpp -disassembler_.cpp codeCache.hpp -disassembler_.cpp collectedHeap.hpp -disassembler_.cpp depChecker_.hpp -disassembler_.cpp disassembler_.hpp -disassembler_.cpp fprofiler.hpp -disassembler_.cpp handles.inline.hpp -disassembler_.cpp hpi.hpp -disassembler_.cpp stubCodeGenerator.hpp -disassembler_.cpp stubRoutines.hpp +disassembler.cpp cardTableModRefBS.hpp +disassembler.cpp codeCache.hpp +disassembler.cpp collectedHeap.hpp +disassembler.cpp depChecker_.hpp +disassembler.cpp disassembler.hpp +disassembler.cpp fprofiler.hpp +disassembler.cpp handles.inline.hpp +disassembler.cpp hpi.hpp +disassembler.cpp stubCodeGenerator.hpp +disassembler.cpp stubRoutines.hpp -disassembler_.hpp disassemblerEnv.hpp -disassembler_.hpp os_.inline.hpp +disassembler.hpp globals.hpp +disassembler.hpp os_.inline.hpp dtraceAttacher.cpp codeCache.hpp dtraceAttacher.cpp deoptimization.hpp @@ -1501,6 +1508,30 @@ dtraceAttacher.cpp vmThread.hpp dtraceAttacher.cpp vm_operations.hpp +dtraceJSDT.cpp allocation.hpp +dtraceJSDT.cpp codeBlob.hpp +dtraceJSDT.cpp dtraceJSDT.hpp +dtraceJSDT.cpp exceptions.hpp +dtraceJSDT.cpp globalDefinitions.hpp +dtraceJSDT.cpp javaClasses.hpp +dtraceJSDT.cpp jniHandles.hpp +dtraceJSDT.cpp jvm.h +dtraceJSDT.cpp os.hpp +dtraceJSDT.cpp utf8.hpp + +dtraceJSDT.hpp nativeInst_.hpp +dtraceJSDT.hpp nmethod.hpp + +dtraceJSDT_.cpp allocation.hpp +dtraceJSDT_.cpp codeBlob.hpp +dtraceJSDT_.cpp dtraceJSDT.hpp +dtraceJSDT_.cpp globalDefinitions.hpp +dtraceJSDT_.cpp javaClasses.hpp +dtraceJSDT_.cpp jniHandles.hpp +dtraceJSDT_.cpp jvm.h +dtraceJSDT_.cpp os.hpp +dtraceJSDT_.cpp signature.hpp + // dump is jck optional, put cpp deps in includeDB_features events.cpp allocation.inline.hpp @@ -1642,6 +1673,7 @@ gcLocker.cpp gcLocker.inline.hpp gcLocker.cpp sharedHeap.hpp +gcLocker.cpp resourceArea.hpp gcLocker.hpp collectedHeap.hpp gcLocker.hpp genCollectedHeap.hpp @@ -1731,7 +1763,7 @@ genRemSet.hpp oop.hpp -generateOopMap.cpp bitMap.hpp +generateOopMap.cpp bitMap.inline.hpp generateOopMap.cpp bytecodeStream.hpp generateOopMap.cpp generateOopMap.hpp generateOopMap.cpp handles.inline.hpp @@ -1763,6 +1795,7 @@ generation.cpp java.hpp generation.cpp oop.hpp generation.cpp oop.inline.hpp +generation.cpp spaceDecorator.hpp generation.cpp space.inline.hpp generation.hpp allocation.hpp @@ -1779,6 +1812,8 @@ generation.inline.hpp generation.hpp generation.inline.hpp space.hpp +genOopClosures.hpp oop.hpp + generationSpec.cpp compactPermGen.hpp generationSpec.cpp defNewGeneration.hpp generationSpec.cpp filemap.hpp @@ -1897,7 +1932,7 @@ hpi_imported.h jni.h -icBuffer.cpp assembler_.inline.hpp +icBuffer.cpp assembler_.inline.hpp icBuffer.cpp collectedHeap.inline.hpp icBuffer.cpp compiledIC.hpp icBuffer.cpp icBuffer.hpp @@ -1918,7 +1953,7 @@ icBuffer.hpp stubs.hpp icBuffer_.cpp assembler.hpp -icBuffer_.cpp assembler_.inline.hpp +icBuffer_.cpp assembler_.inline.hpp icBuffer_.cpp bytecodes.hpp icBuffer_.cpp collectedHeap.inline.hpp icBuffer_.cpp icBuffer.hpp @@ -1933,14 +1968,14 @@ icache.hpp allocation.hpp icache.hpp stubCodeGenerator.hpp -icache_.cpp assembler_.inline.hpp +icache_.cpp assembler_.inline.hpp icache_.cpp icache.hpp icache_.hpp generate_platform_dependent_include init.cpp bytecodes.hpp init.cpp collectedHeap.hpp -init.cpp handles.inline.hpp +init.cpp handles.inline.hpp init.cpp icBuffer.hpp init.cpp icache.hpp init.cpp init.hpp @@ -1962,6 +1997,7 @@ instanceKlass.cpp jvmti.h instanceKlass.cpp jvmtiExport.hpp instanceKlass.cpp jvmtiRedefineClassesTrace.hpp +instanceKlass.cpp markSweep.inline.hpp instanceKlass.cpp methodOop.hpp instanceKlass.cpp mutexLocker.hpp instanceKlass.cpp objArrayKlassKlass.hpp @@ -1997,6 +2033,7 @@ instanceKlassKlass.cpp instanceRefKlass.hpp instanceKlassKlass.cpp javaClasses.hpp instanceKlassKlass.cpp jvmtiExport.hpp +instanceKlassKlass.cpp markSweep.inline.hpp instanceKlassKlass.cpp objArrayKlassKlass.hpp instanceKlassKlass.cpp objArrayOop.hpp instanceKlassKlass.cpp oop.inline.hpp @@ -2018,7 +2055,7 @@ instanceRefKlass.cpp genOopClosures.inline.hpp instanceRefKlass.cpp instanceRefKlass.hpp instanceRefKlass.cpp javaClasses.hpp -instanceRefKlass.cpp markSweep.hpp +instanceRefKlass.cpp markSweep.inline.hpp instanceRefKlass.cpp oop.inline.hpp instanceRefKlass.cpp preserveException.hpp instanceRefKlass.cpp systemDictionary.hpp @@ -2056,6 +2093,7 @@ interp_masm_.cpp interpreterRuntime.hpp interp_masm_.cpp interpreter.hpp interp_masm_.cpp jvmtiExport.hpp +interp_masm_.cpp jvmtiRedefineClassesTrace.hpp interp_masm_.cpp jvmtiThreadState.hpp interp_masm_.cpp markOop.hpp interp_masm_.cpp methodDataOop.hpp @@ -2064,7 +2102,7 @@ interp_masm_.cpp synchronizer.hpp interp_masm_.cpp thread_.inline.hpp -interp_masm_.hpp assembler_.inline.hpp +interp_masm_.hpp assembler_.inline.hpp interp_masm_.hpp invocationCounter.hpp interpreter.cpp allocation.inline.hpp @@ -2188,6 +2226,11 @@ invocationCounter.hpp exceptions.hpp invocationCounter.hpp handles.hpp +intHisto.cpp intHisto.hpp + +intHisto.hpp allocation.hpp +intHisto.hpp growableArray.hpp + iterator.cpp iterator.hpp iterator.cpp oop.inline.hpp @@ -2261,6 +2304,7 @@ javaCalls.cpp interfaceSupport.hpp javaCalls.cpp interpreter.hpp javaCalls.cpp javaCalls.hpp +javaCalls.cpp jniCheck.hpp javaCalls.cpp linkResolver.hpp javaCalls.cpp mutexLocker.hpp javaCalls.cpp nmethod.hpp @@ -2371,7 +2415,7 @@ jniFastGetField.hpp allocation.hpp jniFastGetField.hpp jvm_misc.hpp -jniFastGetField_.cpp assembler_.inline.hpp +jniFastGetField_.cpp assembler_.inline.hpp jniFastGetField_.cpp jniFastGetField.hpp jniFastGetField_.cpp jvm_misc.hpp jniFastGetField_.cpp resourceArea.hpp @@ -2402,6 +2446,7 @@ jvm.cpp collectedHeap.inline.hpp jvm.cpp copy.hpp jvm.cpp defaultStream.hpp +jvm.cpp dtraceJSDT.hpp jvm.cpp events.hpp jvm.cpp handles.inline.hpp jvm.cpp histogram.hpp @@ -2498,7 +2543,7 @@ klassKlass.cpp instanceOop.hpp klassKlass.cpp klassKlass.hpp klassKlass.cpp klassOop.hpp -klassKlass.cpp markSweep.hpp +klassKlass.cpp markSweep.inline.hpp klassKlass.cpp methodKlass.hpp klassKlass.cpp objArrayKlass.hpp klassKlass.cpp oop.inline.hpp @@ -2525,7 +2570,7 @@ klassVtable.cpp jvmtiRedefineClassesTrace.hpp klassVtable.cpp klassOop.hpp klassVtable.cpp klassVtable.hpp -klassVtable.cpp markSweep.hpp +klassVtable.cpp markSweep.inline.hpp klassVtable.cpp methodOop.hpp klassVtable.cpp objArrayOop.hpp klassVtable.cpp oop.inline.hpp @@ -2638,6 +2683,9 @@ markOop.inline.hpp markOop.hpp markSweep.cpp compileBroker.hpp + +markSweep.hpp collectedHeap.hpp + memRegion.cpp globals.hpp memRegion.cpp memRegion.hpp @@ -2737,7 +2785,7 @@ methodDataKlass.cpp gcLocker.hpp methodDataKlass.cpp handles.inline.hpp methodDataKlass.cpp klassOop.hpp -methodDataKlass.cpp markSweep.hpp +methodDataKlass.cpp markSweep.inline.hpp methodDataKlass.cpp methodDataKlass.hpp methodDataKlass.cpp methodDataOop.hpp methodDataKlass.cpp oop.inline.hpp @@ -2752,7 +2800,6 @@ methodDataOop.cpp deoptimization.hpp methodDataOop.cpp handles.inline.hpp methodDataOop.cpp linkResolver.hpp -methodDataOop.cpp markSweep.hpp methodDataOop.cpp markSweep.inline.hpp methodDataOop.cpp methodDataOop.hpp methodDataOop.cpp oop.inline.hpp @@ -2770,7 +2817,7 @@ methodKlass.cpp interpreter.hpp methodKlass.cpp javaClasses.hpp methodKlass.cpp klassOop.hpp -methodKlass.cpp markSweep.hpp +methodKlass.cpp markSweep.inline.hpp methodKlass.cpp methodDataOop.hpp methodKlass.cpp methodKlass.hpp methodKlass.cpp oop.inline.hpp @@ -2784,6 +2831,7 @@ methodKlass.hpp methodOop.hpp methodLiveness.cpp allocation.inline.hpp +methodLiveness.cpp bitMap.inline.hpp methodLiveness.cpp bytecode.hpp methodLiveness.cpp bytecodes.hpp methodLiveness.cpp ciMethod.hpp @@ -2871,7 +2919,7 @@ mutex_.inline.hpp os_.inline.hpp mutex_.inline.hpp thread_.inline.hpp -nativeInst_.cpp assembler_.inline.hpp +nativeInst_.cpp assembler_.inline.hpp nativeInst_.cpp handles.hpp nativeInst_.cpp nativeInst_.hpp nativeInst_.cpp oop.hpp @@ -2915,7 +2963,7 @@ nmethod.cpp compileLog.hpp nmethod.cpp compiledIC.hpp nmethod.cpp compilerOracle.hpp -nmethod.cpp disassembler_.hpp +nmethod.cpp disassembler.hpp nmethod.cpp dtrace.hpp nmethod.cpp events.hpp nmethod.cpp jvmtiRedefineClassesTrace.hpp @@ -2930,6 +2978,11 @@ nmethod.hpp codeBlob.hpp nmethod.hpp pcDesc.hpp +numberSeq.cpp debug.hpp +numberSeq.cpp numberSeq.hpp +numberSeq.cpp globalDefinitions.hpp +numberSeq.cpp allocation.inline.hpp + objArrayKlass.cpp collectedHeap.inline.hpp objArrayKlass.cpp copy.hpp objArrayKlass.cpp genOopClosures.inline.hpp @@ -2947,6 +3000,7 @@ objArrayKlass.cpp universe.inline.hpp objArrayKlass.cpp vmSymbols.hpp + objArrayKlass.hpp arrayKlass.hpp objArrayKlass.hpp instanceKlass.hpp objArrayKlass.hpp specialized_oop_closures.hpp @@ -2954,6 +3008,7 @@ objArrayKlassKlass.cpp collectedHeap.inline.hpp objArrayKlassKlass.cpp instanceKlass.hpp objArrayKlassKlass.cpp javaClasses.hpp +objArrayKlassKlass.cpp markSweep.inline.hpp objArrayKlassKlass.cpp objArrayKlassKlass.hpp objArrayKlassKlass.cpp oop.inline.hpp objArrayKlassKlass.cpp oop.inline2.hpp @@ -2962,6 +3017,7 @@ objArrayKlassKlass.hpp arrayKlassKlass.hpp objArrayKlassKlass.hpp objArrayKlass.hpp +objArrayOop.cpp objArrayKlass.hpp objArrayOop.cpp objArrayOop.hpp objArrayOop.cpp oop.inline.hpp @@ -3011,7 +3067,6 @@ oop.inline.hpp klass.hpp oop.inline.hpp klassOop.hpp oop.inline.hpp markOop.inline.hpp -oop.inline.hpp markSweep.hpp oop.inline.hpp markSweep.inline.hpp oop.inline.hpp oop.hpp oop.inline.hpp os.hpp @@ -3067,11 +3122,13 @@ oopMap.cpp signature.hpp oopMap.hpp allocation.hpp +oopMapCache.cpp jvmtiRedefineClassesTrace.hpp oopMap.hpp compressedStream.hpp oopMap.hpp growableArray.hpp oopMap.hpp vmreg.hpp oopMapCache.cpp allocation.inline.hpp +oopMapCache.cpp jvmtiRedefineClassesTrace.hpp oopMapCache.cpp handles.inline.hpp oopMapCache.cpp jvmtiRedefineClassesTrace.hpp oopMapCache.cpp oop.inline.hpp @@ -3137,7 +3194,7 @@ os_.cpp allocation.inline.hpp os_.cpp arguments.hpp -os_.cpp assembler_.inline.hpp +os_.cpp assembler_.inline.hpp os_.cpp classLoader.hpp os_.cpp events.hpp os_.cpp extendedPC.hpp @@ -3171,7 +3228,7 @@ os_.cpp allocation.inline.hpp os_.cpp arguments.hpp -os_.cpp assembler_.inline.hpp +os_.cpp assembler_.inline.hpp os_.cpp attachListener.hpp os_.cpp classLoader.hpp os_.cpp compileBroker.hpp @@ -3180,6 +3237,7 @@ os_.cpp extendedPC.hpp os_.cpp filemap.hpp os_.cpp globals.hpp +os_.cpp growableArray.hpp os_.cpp hpi.hpp os_.cpp icBuffer.hpp os_.cpp interfaceSupport.hpp @@ -3229,7 +3287,7 @@ osThread.hpp objectMonitor.hpp osThread.hpp top.hpp -osThread_.cpp assembler_.inline.hpp +osThread_.cpp assembler_.inline.hpp osThread_.cpp atomic.hpp osThread_.cpp handles.inline.hpp osThread_.cpp mutexLocker.hpp @@ -3321,6 +3379,10 @@ permGen.cpp oop.inline.hpp permGen.cpp permGen.hpp permGen.cpp universe.hpp +permGen.cpp gcLocker.hpp +permGen.cpp gcLocker.inline.hpp +permGen.cpp vmGCOperations.hpp +permGen.cpp vmThread.hpp permGen.hpp gcCause.hpp permGen.hpp generation.hpp @@ -3364,8 +3426,6 @@ referencePolicy.cpp referencePolicy.hpp referencePolicy.cpp universe.hpp -referencePolicy.hpp oop.hpp - referenceProcessor.cpp collectedHeap.hpp referenceProcessor.cpp collectedHeap.inline.hpp referenceProcessor.cpp java.hpp @@ -3377,6 +3437,7 @@ referenceProcessor.cpp systemDictionary.hpp referenceProcessor.hpp instanceRefKlass.hpp +referenceProcessor.hpp referencePolicy.hpp reflection.cpp arguments.hpp reflection.cpp handles.inline.hpp @@ -3438,7 +3499,7 @@ register_definitions_.cpp register.hpp register_definitions_.cpp register_.hpp -relocInfo.cpp assembler_.inline.hpp +relocInfo.cpp assembler_.inline.hpp relocInfo.cpp compiledIC.hpp relocInfo.cpp copy.hpp relocInfo.cpp nativeInst_.hpp @@ -3451,8 +3512,9 @@ relocInfo.hpp top.hpp relocInfo_.cpp assembler.inline.hpp -relocInfo_.cpp assembler_.inline.hpp +relocInfo_.cpp assembler_.inline.hpp relocInfo_.cpp nativeInst_.hpp +relocInfo_.cpp oop.inline.hpp relocInfo_.cpp relocInfo.hpp relocInfo_.cpp safepoint.hpp @@ -3609,6 +3671,7 @@ sharedRuntime.cpp interpreter.hpp sharedRuntime.cpp javaCalls.hpp sharedRuntime.cpp jvmtiExport.hpp +sharedRuntime.cpp jvmtiRedefineClassesTrace.hpp sharedRuntime.cpp nativeInst_.hpp sharedRuntime.cpp nativeLookup.hpp sharedRuntime.cpp oop.inline.hpp @@ -3633,11 +3696,12 @@ sharedRuntime.hpp threadLocalStorage.hpp sharedRuntime_.cpp assembler.hpp -sharedRuntime_.cpp assembler_.inline.hpp +sharedRuntime_.cpp assembler_.inline.hpp sharedRuntime_.cpp compiledICHolderOop.hpp sharedRuntime_.cpp debugInfoRec.hpp sharedRuntime_.cpp icBuffer.hpp sharedRuntime_.cpp interpreter.hpp +sharedRuntime_.cpp jvmtiRedefineClassesTrace.hpp sharedRuntime_.cpp sharedRuntime.hpp sharedRuntime_.cpp vframeArray.hpp sharedRuntime_.cpp vmreg_.inline.hpp @@ -3682,6 +3746,7 @@ space.cpp safepoint.hpp space.cpp space.hpp space.cpp space.inline.hpp +space.cpp spaceDecorator.hpp space.cpp systemDictionary.hpp space.cpp universe.inline.hpp space.cpp vmSymbols.hpp @@ -3704,9 +3769,18 @@ space.inline.hpp space.hpp space.inline.hpp universe.hpp +spaceDecorator.hpp globalDefinitions.hpp +spaceDecorator.hpp mutableSpace.hpp +spaceDecorator.hpp space.hpp + +spaceDecorator.cpp copy.hpp +spaceDecorator.cpp spaceDecorator.hpp + specialized_oop_closures.cpp ostream.hpp specialized_oop_closures.cpp specialized_oop_closures.hpp +specialized_oop_closures.hpp atomic.hpp + stackMapFrame.cpp globalDefinitions.hpp stackMapFrame.cpp handles.inline.hpp stackMapFrame.cpp oop.inline.hpp @@ -3768,8 +3842,8 @@ statSampler.hpp perfData.hpp statSampler.hpp task.hpp -stubCodeGenerator.cpp assembler_.inline.hpp -stubCodeGenerator.cpp disassembler_.hpp +stubCodeGenerator.cpp assembler_.inline.hpp +stubCodeGenerator.cpp disassembler.hpp stubCodeGenerator.cpp forte.hpp stubCodeGenerator.cpp oop.inline.hpp stubCodeGenerator.cpp stubCodeGenerator.hpp @@ -3779,7 +3853,7 @@ stubCodeGenerator.hpp assembler.hpp stubGenerator_.cpp assembler.hpp -stubGenerator_.cpp assembler_.inline.hpp +stubGenerator_.cpp assembler_.inline.hpp stubGenerator_.cpp frame.inline.hpp stubGenerator_.cpp handles.inline.hpp stubGenerator_.cpp instanceOop.hpp @@ -3949,7 +4023,6 @@ taskqueue.hpp allocation.hpp taskqueue.hpp allocation.inline.hpp -taskqueue.hpp debug.hpp taskqueue.hpp mutex.hpp taskqueue.hpp orderAccess_.inline.hpp @@ -3987,6 +4060,7 @@ templateInterpreterGenerator_.hpp generate_platform_dependent_include +templateTable.cpp collectedHeap.hpp templateTable.cpp templateTable.hpp templateTable.cpp timer.hpp @@ -4491,6 +4565,7 @@ vm_operations.cpp compilerOracle.hpp vm_operations.cpp deoptimization.hpp vm_operations.cpp interfaceSupport.hpp +vm_operations.cpp isGCActiveMark.hpp vm_operations.cpp resourceArea.hpp vm_operations.cpp threadService.hpp vm_operations.cpp thread_.inline.hpp @@ -4511,7 +4586,7 @@ vm_version.hpp allocation.hpp vm_version.hpp ostream.hpp -vm_version_.cpp assembler_.inline.hpp +vm_version_.cpp assembler_.inline.hpp vm_version_.cpp java.hpp vm_version_.cpp os_.inline.hpp vm_version_.cpp resourceArea.hpp @@ -4536,12 +4611,13 @@ vmreg_.hpp generate_platform_dependent_include vtableStubs.cpp allocation.inline.hpp -vtableStubs.cpp disassembler_.hpp +vtableStubs.cpp disassembler.hpp vtableStubs.cpp forte.hpp vtableStubs.cpp handles.inline.hpp vtableStubs.cpp instanceKlass.hpp vtableStubs.cpp jvmtiExport.hpp vtableStubs.cpp klassVtable.hpp +vtableStubs.cpp oop.inline.hpp vtableStubs.cpp mutexLocker.hpp vtableStubs.cpp resourceArea.hpp vtableStubs.cpp sharedRuntime.hpp @@ -4551,7 +4627,7 @@ vtableStubs.hpp allocation.hpp vtableStubs_.cpp assembler.hpp -vtableStubs_.cpp assembler_.inline.hpp +vtableStubs_.cpp assembler_.inline.hpp vtableStubs_.cpp instanceKlass.hpp vtableStubs_.cpp interp_masm_.hpp vtableStubs_.cpp klassVtable.hpp --- old/hotspot/src/share/vm/includeDB_features 2009-08-01 04:11:39.602936406 +0100 +++ new/hotspot/src/share/vm/includeDB_features 2009-08-01 04:11:39.533843474 +0100 @@ -1,5 +1,5 @@ // -// Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -51,12 +51,13 @@ dump.cpp oopFactory.hpp dump.cpp resourceArea.hpp dump.cpp signature.hpp +dump.cpp spaceDecorator.hpp dump.cpp symbolTable.hpp dump.cpp systemDictionary.hpp dump.cpp vmThread.hpp dump.cpp vm_operations.hpp -dump_.cpp assembler_.inline.hpp +dump_.cpp assembler_.inline.hpp dump_.cpp compactingPermGenGen.hpp forte.cpp collectedHeap.inline.hpp @@ -98,6 +99,7 @@ heapDumper.cpp reflectionUtils.hpp heapDumper.cpp symbolTable.hpp heapDumper.cpp systemDictionary.hpp +heapDumper.cpp threadService.hpp heapDumper.cpp universe.hpp heapDumper.cpp vframe.hpp heapDumper.cpp vmGCOperations.hpp @@ -113,6 +115,8 @@ heapInspection.cpp os.hpp heapInspection.cpp resourceArea.hpp +javaCalls.cpp jniCheck.hpp + jniCheck.cpp fieldDescriptor.hpp jniCheck.cpp handles.hpp jniCheck.cpp instanceKlass.hpp --- old/hotspot/src/share/vm/includeDB_gc 2009-08-01 04:11:40.421643215 +0100 +++ new/hotspot/src/share/vm/includeDB_gc 2009-08-01 04:11:40.353155093 +0100 @@ -28,21 +28,22 @@ collectedHeap.cpp collectedHeap.inline.hpp collectedHeap.cpp init.hpp collectedHeap.cpp oop.inline.hpp +collectedHeap.cpp systemDictionary.hpp collectedHeap.cpp thread_.inline.hpp collectedHeap.hpp allocation.hpp collectedHeap.hpp barrierSet.hpp collectedHeap.hpp gcCause.hpp collectedHeap.hpp handles.hpp -collectedHeap.hpp perfData.hpp +collectedHeap.hpp perfData.hpp collectedHeap.hpp safepoint.hpp collectedHeap.inline.hpp arrayOop.hpp collectedHeap.inline.hpp collectedHeap.hpp collectedHeap.inline.hpp copy.hpp collectedHeap.inline.hpp jvmtiExport.hpp -collectedHeap.inline.hpp lowMemoryDetector.hpp -collectedHeap.inline.hpp sharedRuntime.hpp +collectedHeap.inline.hpp lowMemoryDetector.hpp +collectedHeap.inline.hpp sharedRuntime.hpp collectedHeap.inline.hpp thread.hpp collectedHeap.inline.hpp threadLocalAllocBuffer.inline.hpp collectedHeap.inline.hpp universe.hpp --- old/hotspot/src/share/vm/includeDB_gc_parallel 2009-08-01 04:11:41.224429678 +0100 +++ new/hotspot/src/share/vm/includeDB_gc_parallel 2009-08-01 04:11:41.147263515 +0100 @@ -21,11 +21,21 @@ // have any questions. // +assembler_.cpp g1SATBCardTableModRefBS.hpp +assembler_.cpp g1CollectedHeap.inline.hpp +assembler_.cpp heapRegion.hpp + collectorPolicy.cpp cmsAdaptiveSizePolicy.hpp collectorPolicy.cpp cmsGCAdaptivePolicyCounters.hpp compiledICHolderKlass.cpp oop.pcgc.inline.hpp +constantPoolKlass.cpp cardTableRS.hpp +constantPoolKlass.cpp oop.pcgc.inline.hpp +constantPoolKlass.cpp psPromotionManager.inline.hpp +constantPoolKlass.cpp psScavenge.inline.hpp +constantPoolKlass.cpp parOopClosures.inline.hpp + genCollectedHeap.cpp concurrentMarkSweepThread.hpp genCollectedHeap.cpp vmCMSOperations.hpp @@ -37,6 +47,9 @@ heapInspection.cpp parallelScavengeHeap.hpp +instanceKlass.cpp heapRegionSeq.inline.hpp +instanceKlass.cpp g1CollectedHeap.inline.hpp +instanceKlass.cpp g1OopClosures.inline.hpp instanceKlass.cpp oop.pcgc.inline.hpp instanceKlass.cpp psPromotionManager.inline.hpp instanceKlass.cpp psScavenge.inline.hpp @@ -48,6 +61,9 @@ instanceKlassKlass.cpp psScavenge.inline.hpp instanceKlassKlass.cpp parOopClosures.inline.hpp +instanceRefKlass.cpp heapRegionSeq.inline.hpp +instanceRefKlass.cpp g1CollectedHeap.inline.hpp +instanceRefKlass.cpp g1OopClosures.inline.hpp instanceRefKlass.cpp oop.pcgc.inline.hpp instanceRefKlass.cpp psPromotionManager.inline.hpp instanceRefKlass.cpp oop.pcgc.inline.hpp @@ -71,6 +87,7 @@ memoryService.cpp cmsPermGen.hpp memoryService.cpp concurrentMarkSweepGeneration.hpp +memoryService.cpp g1CollectedHeap.inline.hpp memoryService.cpp parNewGeneration.hpp memoryService.cpp parallelScavengeHeap.hpp memoryService.cpp psMemoryPool.hpp @@ -81,6 +98,9 @@ methodDataKlass.cpp oop.pcgc.inline.hpp methodDataKlass.cpp psScavenge.inline.hpp +objArrayKlass.cpp heapRegionSeq.inline.hpp +objArrayKlass.cpp g1CollectedHeap.inline.hpp +objArrayKlass.cpp g1OopClosures.inline.hpp objArrayKlass.cpp oop.pcgc.inline.hpp objArrayKlass.cpp psPromotionManager.inline.hpp objArrayKlass.cpp psScavenge.inline.hpp @@ -123,6 +143,9 @@ thread.cpp concurrentMarkSweepThread.hpp thread.cpp pcTasks.hpp +thread.hpp dirtyCardQueue.hpp +thread.hpp satbQueue.hpp + universe.cpp parallelScavengeHeap.hpp universe.cpp cmsCollectorPolicy.hpp universe.cpp cmsAdaptiveSizePolicy.hpp --- old/hotspot/src/share/vm/includeDB_jvmti 2009-08-01 04:11:42.140557394 +0100 +++ new/hotspot/src/share/vm/includeDB_jvmti 2009-08-01 04:11:42.050337967 +0100 @@ -209,6 +209,7 @@ jvmtiManageCapabilities.hpp allocation.hpp jvmtiManageCapabilities.hpp jvmti.h +jvmtiRedefineClasses.cpp bitMap.inline.hpp jvmtiRedefineClasses.cpp codeCache.hpp jvmtiRedefineClasses.cpp deoptimization.hpp jvmtiRedefineClasses.cpp gcLocker.hpp --- old/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp 2009-08-01 04:11:43.063125074 +0100 +++ new/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp 2009-08-01 04:11:42.967441099 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)bytecodeInterpreter.cpp 1.31 07/08/29 13:42:24 JVM" -#endif /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ @@ -40,7 +37,7 @@ */ #undef USELABELS #ifdef __GNUC__ -/* +/* ASSERT signifies debugging. It is much easier to step thru bytecodes if we don't use the computed goto approach. */ @@ -59,7 +56,7 @@ #endif /* - * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next + * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next * opcode before going back to the top of the while loop, rather then having * the top of the while loop handle it. This provides a better opportunity * for instruction scheduling. Some compilers just do this prefetch @@ -100,7 +97,7 @@ Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ } \ RESET_LAST_JAVA_FRAME(); \ - CACHE_STATE(); + CACHE_STATE(); // Normal throw of a java error #define VM_JAVA_ERROR(name, msg) \ @@ -130,7 +127,7 @@ incremented. JvmtiExport::at_single_stepping_point() may cause a breakpoint opcode to get inserted at the current PC to allow the debugger to coalesce single-step events. - + As a result if we call at_single_stepping_point() we refetch opcode to get the current opcode. This will override any other prefetching that might have occurred. @@ -156,7 +153,7 @@ } \ } #else -#define DEBUGGER_SINGLE_STEP_NOTIFY() +#define DEBUGGER_SINGLE_STEP_NOTIFY() #endif /* @@ -315,7 +312,7 @@ /* * Macros for caching and flushing the interpreter state. Some local * variables need to be flushed out to the frame before we do certain - * things (like pushing frames or becomming gc safe) and some need to + * things (like pushing frames or becomming gc safe) and some need to * be recached later (like after popping a frame). We could use one * macro to cache or decache everything, but this would be less then * optimal because we don't always need to cache or decache everything @@ -335,8 +332,8 @@ #define CACHE_CP() cp = istate->constants(); #define CACHE_LOCALS() locals = istate->locals(); #undef CACHE_FRAME -#define CACHE_FRAME() - +#define CACHE_FRAME() + /* * CHECK_NULL - Macro for throwing a NullPointerException if the object * passed is a null ref. @@ -406,7 +403,7 @@ #endif // In order to simplify some tests based on switches set at runtime - // we invoke the interpreter a single time after switches are enabled + // we invoke the interpreter a single time after switches are enabled // and set simpler to to test variables rather than method calls or complex // boolean expressions. @@ -458,7 +455,7 @@ #endif #ifdef USELABELS - const static void* const opclabels_data[256] = { + const static void* const opclabels_data[256] = { /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, @@ -521,16 +518,16 @@ /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, -/* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_fast_igetfield, -/* 0xCC */ &&opc_fastagetfield,&&opc_fast_aload_0, &&opc_fast_iaccess_0, &&opc__fast_aaccess_0, +/* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, +/* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, -/* 0xD0 */ &&opc_fast_linearswitch, &&opc_fast_binaryswitch, &&opc_return_register_finalizer, &&opc_default, +/* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, -/* 0xE4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, +/* 0xE4 */ &&opc_default, &&opc_return_register_finalizer, &&opc_default, &&opc_default, /* 0xE8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, @@ -556,7 +553,7 @@ /* QQQ this should be a stack method so we don't know actual direction */ assert(istate->msg() == initialize || topOfStack >= istate->stack_limit() && - topOfStack < istate->stack_base(), + topOfStack < istate->stack_base(), "Stack top out of range"); switch (istate->msg()) { @@ -663,7 +660,7 @@ // fails we will go in to the runtime to revoke the object's bias. // Note that we first construct the presumed unbiased header so we // don't accidentally blow away another thread's valid bias. - intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place | + intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) { @@ -728,9 +725,9 @@ #ifdef VM_JVMTI if (_jvmti_interp_events) { // Whenever JVMTI puts a thread in interp_only_mode, method - // entry/exit events are sent for that thread to track stack depth. + // entry/exit events are sent for that thread to track stack depth. if (THREAD->is_interp_only_mode()) { - CALL_VM(InterpreterRuntime::post_method_entry(THREAD), + CALL_VM(InterpreterRuntime::post_method_entry(THREAD), handle_exception); } } @@ -769,7 +766,7 @@ if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { goto handle_Pop_Frame; } - + if (THREAD->has_pending_exception()) goto handle_exception; // Update the pc by the saved amount of the invoke bytecode size UPDATE_PC(istate->bcp_advance()); @@ -787,13 +784,13 @@ // Returned from an opcode that has completed. The stack has // the result all we need to do is skip across the bytecode // and continue (assuming there is no exception pending) - // + // // compute continuation length // // Note: it is possible to deopt at a return_register_finalizer opcode // because this requires entering the vm to do the registering. While the // opcode is complete we can't advance because there are no more opcodes - // much like trying to deopt at a poll return. In that has we simply + // much like trying to deopt at a poll return. In that has we simply // get out of here // if ( Bytecodes::code_at(pc, METHOD) == Bytecodes::_return_register_finalizer) { @@ -982,12 +979,12 @@ opcode = pc[1]; switch(opcode) { case Bytecodes::_aload: - SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); + SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); case Bytecodes::_iload: case Bytecodes::_fload: - SET_STACK_SLOT(LOCALS_SLOT(reg), 0); + SET_STACK_SLOT(LOCALS_SLOT(reg), 0); UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); case Bytecodes::_lload: @@ -1016,7 +1013,7 @@ UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); case Bytecodes::_iinc: { - int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); + int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); // Be nice to see what this generates.... QQQ SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); UPDATE_PC_AND_CONTINUE(6); @@ -1061,15 +1058,15 @@ /* stack pop, dup, and insert opcodes */ - + CASE(_pop): /* Discard the top item on the stack */ UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); - + CASE(_pop2): /* Discard the top 2 items on the stack */ UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); - + CASE(_dup): /* Duplicate the top item on the stack */ dup(topOfStack); UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); @@ -1101,7 +1098,7 @@ /* Perform various binary integer operations */ -#undef OPC_INT_BINARY +#undef OPC_INT_BINARY #define OPC_INT_BINARY(opcname, opname, test) \ CASE(_i##opcname): \ if (test && (STACK_INT(-1) == 0)) { \ @@ -1141,7 +1138,7 @@ /* Perform various binary floating number operations */ /* On some machine/platforms/compilers div zero check can be implicit */ -#undef OPC_FLOAT_BINARY +#undef OPC_FLOAT_BINARY #define OPC_FLOAT_BINARY(opcname, opname) \ CASE(_d##opcname): { \ SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ @@ -1162,8 +1159,8 @@ OPC_FLOAT_BINARY(div, Div); OPC_FLOAT_BINARY(rem, Rem); - /* Shift operations - * Shift left int and long: ishl, lshl + /* Shift operations + * Shift left int and long: ishl, lshl * Logical shift right int and long w/zero extension: iushr, lushr * Arithmetic shift right int and long w/sign extension: ishr, lshr */ @@ -1182,13 +1179,13 @@ -2); \ UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ } - + OPC_SHIFT_BINARY(shl, Shl); OPC_SHIFT_BINARY(shr, Shr); OPC_SHIFT_BINARY(ushr, Ushr); - /* Increment local variable by constant */ - CASE(_iinc): + /* Increment local variable by constant */ + CASE(_iinc): { // locals[pc[1]].j.i += (jbyte)(pc[2]); SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); @@ -1198,22 +1195,22 @@ /* negate the value on the top of the stack */ CASE(_ineg): - SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); + SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); UPDATE_PC_AND_CONTINUE(1); CASE(_fneg): - SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); + SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); UPDATE_PC_AND_CONTINUE(1); CASE(_lneg): { - SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); + SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); UPDATE_PC_AND_CONTINUE(1); } CASE(_dneg): { - SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); + SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); UPDATE_PC_AND_CONTINUE(1); } @@ -1250,7 +1247,7 @@ SET_STACK_INT(r, 0); UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); } - + CASE(_l2f): /* convert top of stack long to float */ { jlong r = STACK_LONG(-1); @@ -1268,7 +1265,7 @@ } CASE(_f2i): /* Convert top of stack float to int */ - SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); + SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); UPDATE_PC_AND_CONTINUE(1); CASE(_f2l): /* convert top of stack float to long */ @@ -1433,8 +1430,8 @@ CASE(_fcmpl): CASE(_fcmpg): { - SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), - STACK_FLOAT(-1), + SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), + STACK_FLOAT(-1), (opcode == Bytecodes::_fcmpl ? -1 : 1)), -2); UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); @@ -1446,7 +1443,7 @@ int r = VMdoubleCompare(STACK_DOUBLE(-3), STACK_DOUBLE(-1), (opcode == Bytecodes::_dcmpl ? -1 : 1)); - MORE_STACK(-4); // Pop + MORE_STACK(-4); // Pop SET_STACK_INT(r, 0); UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); } @@ -1498,7 +1495,7 @@ /* Array access byte-codes */ /* Every array access byte-code starts out like this */ -// arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); +// arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); #define ARRAY_INTRO(arrayOff) \ arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ jint index = STACK_INT(arrayOff + 1); \ @@ -1508,7 +1505,7 @@ sprintf(message, "%d", index); \ VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ message); \ - } + } /* 32-bit loads. These handle conversion from < 32-bit types */ #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ @@ -1692,13 +1689,13 @@ ConstantPoolCacheEntry* cache; index = Bytes::get_native_u2(pc+1); - // QQQ Need to make this as inlined as possible. Probably need to - // split all the bytecode cases out so c++ compiler has a chance + // QQQ Need to make this as inlined as possible. Probably need to + // split all the bytecode cases out so c++ compiler has a chance // for constant prop to fold everything possible away. cache = cp->entry_at(index); if (!cache->is_resolved((Bytecodes::Code)opcode)) { - CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), + CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), handle_exception); cache = cp->entry_at(index); } @@ -1707,7 +1704,7 @@ if (_jvmti_interp_events) { int *count_addr; oop obj; - // Check to see if a field modification watch has been set + // Check to see if a field modification watch has been set // before we take the time to call into the VM. count_addr = (int *)JvmtiExport::get_field_access_count_addr(); if ( *count_addr > 0 ) { @@ -1716,8 +1713,8 @@ } else { obj = (oop) STACK_OBJECT(-1); } - CALL_VM(InterpreterRuntime::post_field_access(THREAD, - obj, + CALL_VM(InterpreterRuntime::post_field_access(THREAD, + obj, cache), handle_exception); } @@ -1789,7 +1786,7 @@ u2 index = Bytes::get_native_u2(pc+1); ConstantPoolCacheEntry* cache = cp->entry_at(index); if (!cache->is_resolved((Bytecodes::Code)opcode)) { - CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), + CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), handle_exception); cache = cp->entry_at(index); } @@ -1798,7 +1795,7 @@ if (_jvmti_interp_events) { int *count_addr; oop obj; - // Check to see if a field modification watch has been set + // Check to see if a field modification watch has been set // before we take the time to call into the VM. count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); if ( *count_addr > 0 ) { @@ -1814,9 +1811,9 @@ } CALL_VM(InterpreterRuntime::post_field_modification(THREAD, - obj, - cache, - (jvalue *)STACK_SLOT(-1)), + obj, + cache, + (jvalue *)STACK_SLOT(-1)), handle_exception); } } @@ -1887,7 +1884,7 @@ } UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); - } + } CASE(_new): { u2 index = Bytes::get_Java_u2(pc+1); @@ -1934,6 +1931,7 @@ } else { result->set_mark(markOopDesc::prototype()); } + result->set_klass_gap(0); result->set_klass(k_entry); SET_STACK_OBJECT(result, 0); UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); @@ -1961,7 +1959,7 @@ jint size = STACK_INT(-1); // stack grows down, dimensions are up! jint *dimarray = - (jint*)&topOfStack[dims * Interpreter::stackElementWords()+ + (jint*)&topOfStack[dims * Interpreter::stackElementWords()+ Interpreter::stackElementWords()-1]; //adjust pointer to start of stack element CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), @@ -1988,9 +1986,9 @@ // Check for compatibilty. This check must not GC!! // Seems way more expensive now that we must dispatch // - if (objKlassOop != klassOf && + if (objKlassOop != klassOf && !objKlassOop->klass_part()->is_subtype_of(klassOf)) { - ResourceMark rm(THREAD); + ResourceMark rm(THREAD); const char* objName = Klass::cast(objKlassOop)->external_name(); const char* klassName = Klass::cast(klassOf)->external_name(); char* message = SharedRuntime::generate_class_cast_message( @@ -2064,7 +2062,7 @@ case JVM_CONSTANT_UnresolvedString: case JVM_CONSTANT_UnresolvedClass: - case JVM_CONSTANT_UnresolvedClassInError: + case JVM_CONSTANT_UnresolvedClassInError: CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); SET_STACK_OBJECT(THREAD->vm_result(), 0); THREAD->set_vm_result(NULL); @@ -2113,7 +2111,7 @@ ConstantPoolCacheEntry* cache = cp->entry_at(index); if (!cache->is_resolved((Bytecodes::Code)opcode)) { - CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), + CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), handle_exception); cache = cp->entry_at(index); } @@ -2155,7 +2153,7 @@ // get receiver int parms = cache->parameter_size(); oop rcvr = STACK_OBJECT(-parms); - CHECK_NULL(rcvr); + CHECK_NULL(rcvr); instanceKlass* int2 = (instanceKlass*) rcvr->klass()->klass_part(); itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); int i; @@ -2174,7 +2172,7 @@ if (callee == NULL) { VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), ""); } - + istate->set_callee(callee); istate->set_callee_entry_point(callee->from_interpreted_entry()); #ifdef VM_JVMTI @@ -2196,11 +2194,11 @@ // out so c++ compiler has a chance for constant prop to fold everything possible away. if (!cache->is_resolved((Bytecodes::Code)opcode)) { - CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), + CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), handle_exception); cache = cp->entry_at(index); } - + istate->set_msg(call_method); { methodOop callee; @@ -2212,7 +2210,7 @@ int parms = cache->parameter_size(); // this works but needs a resourcemark and seems to create a vtable on every call: // methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2()); - // + // // this fails with an assert // instanceKlass* rcvrKlass = instanceKlass::cast(STACK_OBJECT(-parms)->klass()); // but this works @@ -2225,7 +2223,7 @@ } a find on rcvr->klass()->klass_part() reports: - {type array char}{type array class} + {type array char}{type array class} - klass: {other class} but using instanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure @@ -2324,9 +2322,9 @@ CASE(_breakpoint): { Bytecodes::Code original_bytecode; - DECACHE_STATE(); - SET_LAST_JAVA_FRAME(); - original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, + DECACHE_STATE(); + SET_LAST_JAVA_FRAME(); + original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, METHOD, pc); RESET_LAST_JAVA_FRAME(); CACHE_STATE(); @@ -2345,9 +2343,9 @@ } /* switch(opc) */ - + #ifdef USELABELS - check_for_exception: + check_for_exception: #endif { if (!THREAD->has_pending_exception()) { @@ -2367,7 +2365,7 @@ HandleMarkCleaner __hmc(THREAD); Handle except_oop(THREAD, THREAD->pending_exception()); - // Prevent any subsequent HandleMarkCleaner in the VM + // Prevent any subsequent HandleMarkCleaner in the VM // from freeing the except_oop handle. HandleMark __hm(THREAD); @@ -2376,7 +2374,7 @@ intptr_t continuation_bci; // expression stack is emptied topOfStack = istate->stack_base() - Interpreter::stackElementWords(); - CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), + CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), handle_exception); except_oop = (oop) THREAD->vm_result(); @@ -2414,7 +2412,7 @@ THREAD->set_pending_exception(except_oop(), NULL, 0); goto handle_return; } /* handle_exception: */ - + // Return from an interpreter invocation with the result of the interpretation @@ -2484,7 +2482,7 @@ // we are at the initial entry then we should throw an exception. // It is not clear the template based interpreter does this // correctly - + BasicObjectLock* base = istate->monitor_base(); BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); bool method_unlock_needed = METHOD->is_synchronized(); @@ -2506,7 +2504,7 @@ end->set_obj(lockee); { // Prevent any HandleMarkCleaner from freeing our live handles - HandleMark __hm(THREAD); + HandleMark __hm(THREAD); CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); } } @@ -2515,7 +2513,7 @@ if (illegal_state_oop() == NULL && !suppress_error) { { // Prevent any HandleMarkCleaner from freeing our live handles - HandleMark __hm(THREAD); + HandleMark __hm(THREAD); CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); } assert(THREAD->has_pending_exception(), "Lost our exception!"); @@ -2532,7 +2530,7 @@ if (illegal_state_oop() == NULL && !suppress_error) { { // Prevent any HandleMarkCleaner from freeing our live handles - HandleMark __hm(THREAD); + HandleMark __hm(THREAD); CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); } assert(THREAD->has_pending_exception(), "Lost our exception!"); @@ -2567,7 +2565,7 @@ base->set_obj(rcvr); { // Prevent any HandleMarkCleaner from freeing our live handles - HandleMark __hm(THREAD); + HandleMark __hm(THREAD); CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); } if (THREAD->has_pending_exception()) { @@ -2608,11 +2606,11 @@ #ifdef VM_JVMTI if (_jvmti_interp_events) { // Whenever JVMTI puts a thread in interp_only_mode, method - // entry/exit events are sent for that thread to track stack depth. + // entry/exit events are sent for that thread to track stack depth. if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { { // Prevent any HandleMarkCleaner from freeing our live handles - HandleMark __hm(THREAD); + HandleMark __hm(THREAD); CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); } } @@ -2628,7 +2626,7 @@ if (illegal_state_oop() != NULL || original_exception() != NULL) { // inform the frame manager we have no result istate->set_msg(throwing_exception); - if (illegal_state_oop() != NULL) + if (illegal_state_oop() != NULL) THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); else THREAD->set_pending_exception(original_exception(), NULL, 0); @@ -2681,8 +2679,8 @@ // interpreter initialization. All other instances should be created by // the frame manager. BytecodeInterpreter::BytecodeInterpreter(messages msg) { - if (msg != initialize) ShouldNotReachHere(); - _msg = msg; + if (msg != initialize) ShouldNotReachHere(); + _msg = msg; _self_link = this; _prev_link = NULL; } @@ -2745,26 +2743,26 @@ *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; } -void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, +void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, int offset) { tag_stack(tos, frame::TagValue, offset); *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; } -void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, +void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, int offset) { tag_stack(tos, frame::TagValue, offset); *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; } -void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, +void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, int offset) { tag_stack(tos, frame::TagReference, offset); *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; } // needs to be platform dep for the 32 bit platforms. -void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, +void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, int offset) { tag_stack(tos, frame::TagValue, offset); tag_stack(tos, frame::TagValue, offset-1); @@ -2779,7 +2777,7 @@ ((VMJavaVal64*)addr)->d); } -void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, +void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, int offset) { tag_stack(tos, frame::TagValue, offset); ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; @@ -2787,7 +2785,7 @@ ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; } -void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, +void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, address addr, int offset) { tag_stack(tos, frame::TagValue, offset); ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; @@ -3002,7 +3000,7 @@ default: return("BAD MSG"); } } -void +void BytecodeInterpreter::print() { tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); @@ -3041,7 +3039,7 @@ extern "C" { void PI(uintptr_t arg) { - ((BytecodeInterpreter*)arg)->print(); + ((BytecodeInterpreter*)arg)->print(); } } #endif // PRODUCT --- old/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xml 2009-08-01 04:11:44.085855353 +0100 +++ new/hotspot/src/share/vm/interpreter/bytecodeInterpreterWithChecks.xml 2009-08-01 04:11:44.008926646 +0100 @@ -1,6 +1,6 @@ + @@ -33,7 +34,6 @@ - --- old/hotspot/src/share/vm/interpreter/bytecodeStream.cpp 2009-08-01 04:11:45.699349260 +0100 +++ new/hotspot/src/share/vm/interpreter/bytecodeStream.cpp 2009-08-01 04:11:45.630877370 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)bytecodeStream.cpp 1.47 07/06/20 14:52:27 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,8 +31,9 @@ Bytecodes::Code RawBytecodeStream::raw_next_special(Bytecodes::Code code) { assert(!is_last_bytecode(), "should have been checked"); // set next bytecode position - address bcp = RawBytecodeStream::bcp(); - int l = Bytecodes::raw_special_length_at(bcp); + address bcp = RawBytecodeStream::bcp(); + address end = method()->code_base() + end_bci(); + int l = Bytecodes::raw_special_length_at(bcp, end); if (l <= 0 || (_bci + l) > _end_bci) { code = Bytecodes::_illegal; } else { @@ -42,8 +43,12 @@ _is_wide = false; // check for special (uncommon) cases if (code == Bytecodes::_wide) { - code = (Bytecodes::Code)bcp[1]; - _is_wide = true; + if (bcp + 1 >= end) { + code = Bytecodes::_illegal; + } else { + code = (Bytecodes::Code)bcp[1]; + _is_wide = true; + } } } _code = code; --- old/hotspot/src/share/vm/interpreter/bytecodes.cpp 2009-08-01 04:11:46.531388156 +0100 +++ new/hotspot/src/share/vm/interpreter/bytecodes.cpp 2009-08-01 04:11:46.451506826 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)bytecodes.cpp 1.97 07/06/20 14:52:27 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,13 +57,19 @@ return method->orig_bytecode_at(method->bci_from(bcp)); } -int Bytecodes::special_length_at(address bcp) { +int Bytecodes::special_length_at(address bcp, address end) { Code code = code_at(bcp); switch (code) { case _wide: + if (end != NULL && bcp + 1 >= end) { + return -1; // don't read past end of code buffer + } return wide_length_for(cast(*(bcp + 1))); case _tableswitch: { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize); + if (end != NULL && aligned_bcp + 3*jintSize >= end) { + return -1; // don't read past end of code buffer + } jlong lo = (jint)Bytes::get_Java_u4(aligned_bcp + 1*jintSize); jlong hi = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize); jlong len = (aligned_bcp - bcp) + (3 + hi - lo + 1)*jintSize; @@ -76,6 +82,9 @@ case _fast_binaryswitch: // fall through case _fast_linearswitch: { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize); + if (end != NULL && aligned_bcp + 2*jintSize >= end) { + return -1; // don't read past end of code buffer + } jlong npairs = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize); jlong len = (aligned_bcp - bcp) + (2 + 2*npairs)*jintSize; // only return len if it can be represented as a positive int; @@ -93,14 +102,17 @@ // verifier when reading in bytecode to verify. Other mechanisms that // run at runtime (such as generateOopMaps) need to iterate over the code // and don't expect to see breakpoints: they want to see the instruction -// which was replaces so that they can get the correct length and find +// which was replaced so that they can get the correct length and find // the next bytecode. -int Bytecodes::raw_special_length_at(address bcp) { +// +// 'end' indicates the end of the code buffer, which we should not try to read +// past. +int Bytecodes::raw_special_length_at(address bcp, address end) { Code code = code_or_bp_at(bcp); if (code == _breakpoint) { return 1; } else { - return special_length_at(bcp); + return special_length_at(bcp, end); } } --- old/hotspot/src/share/vm/interpreter/bytecodes.hpp 2009-08-01 04:11:47.415007203 +0100 +++ new/hotspot/src/share/vm/interpreter/bytecodes.hpp 2009-08-01 04:11:47.337991805 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)bytecodes.hpp 1.79 07/06/20 14:52:28 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -343,8 +343,10 @@ const char* wf = wide_format(code); return (wf == NULL) ? 0 : (int)strlen(wf); } - static int special_length_at(address bcp); - static int raw_special_length_at(address bcp); + // if 'end' is provided, it indicates the end of the code buffer which + // should not be read past when parsing. + static int special_length_at(address bcp, address end = NULL); + static int raw_special_length_at(address bcp, address end = NULL); static int length_at (address bcp) { int l = length_for(code_at(bcp)); return l > 0 ? l : special_length_at(bcp); } static int java_length_at (address bcp) { int l = length_for(java_code_at(bcp)); return l > 0 ? l : special_length_at(bcp); } static bool is_java_code (Code code) { return 0 <= code && code < number_of_java_codes; } --- old/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp 2009-08-01 04:11:48.265618236 +0100 +++ new/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp 2009-08-01 04:11:48.183076820 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)interpreterRuntime.hpp 1.143 07/05/05 17:05:38 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,10 @@ static methodOop method(JavaThread *thread) { return last_frame(thread).interpreter_frame_method(); } static address bcp(JavaThread *thread) { return last_frame(thread).interpreter_frame_bcp(); } static void set_bcp_and_mdp(address bcp, JavaThread*thread); - static Bytecodes::Code code(JavaThread *thread) { return Bytecodes::code_at(bcp(thread)); } + static Bytecodes::Code code(JavaThread *thread) { + // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272) + return Bytecodes::code_at(bcp(thread), method(thread)); + } static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); } static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; } static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); } --- old/hotspot/src/share/vm/interpreter/oopMapCache.cpp 2009-08-01 04:11:49.120191860 +0100 +++ new/hotspot/src/share/vm/interpreter/oopMapCache.cpp 2009-08-01 04:11:49.029553861 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oopMapCache.cpp 1.86 08/11/24 12:22:25 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -600,7 +600,7 @@ entry->fill(method, bci); entry_for->resource_copy(entry); assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); - return; + return; } } --- old/hotspot/src/share/vm/interpreter/templateTable.cpp 2009-08-01 04:11:49.943660530 +0100 +++ new/hotspot/src/share/vm/interpreter/templateTable.cpp 2009-08-01 04:11:49.858263086 +0100 @@ -175,6 +175,7 @@ Template* TemplateTable::_desc; InterpreterMacroAssembler* TemplateTable::_masm; +BarrierSet* TemplateTable::_bs; void TemplateTable::def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(), char filler) { @@ -247,6 +248,8 @@ // Initialize table TraceTime timer("TemplateTable initialization", TraceStartupTime); + _bs = Universe::heap()->barrier_set(); + // For better readability const char _ = ' '; const int ____ = 0; --- old/hotspot/src/share/vm/interpreter/templateTable.hpp 2009-08-01 04:11:50.805944641 +0100 +++ new/hotspot/src/share/vm/interpreter/templateTable.hpp 2009-08-01 04:11:50.717672540 +0100 @@ -85,6 +85,7 @@ static Template* _desc; // the current template to be generated static Bytecodes::Code bytecode() { return _desc->bytecode(); } + static BarrierSet* _bs; // Cache the barrier set. public: //%note templates_1 static InterpreterMacroAssembler* _masm; // the assembler used when generating templates --- old/hotspot/src/share/vm/libadt/dict.cpp 2009-08-01 04:11:51.688264697 +0100 +++ new/hotspot/src/share/vm/libadt/dict.cpp 2009-08-01 04:11:51.606184680 +0100 @@ -349,9 +349,12 @@ return strcmp((const char *)k1,(const char *)k2); } -// Slimey cheap key comparator. +// Cheap key comparator. int32 cmpkey(const void *key1, const void *key2) { - return (int32)((intptr_t)key1 - (intptr_t)key2); + if (key1 == key2) return 0; + intptr_t delta = (intptr_t)key1 - (intptr_t)key2; + if (delta > 0) return 1; + return -1; } //============================================================================= --- old/hotspot/src/share/vm/memory/allocation.hpp 2009-08-01 04:11:52.600162257 +0100 +++ new/hotspot/src/share/vm/memory/allocation.hpp 2009-08-01 04:11:52.518667948 +0100 @@ -341,6 +341,12 @@ DEBUG_ONLY(((ResourceObj *)res)->_allocation = RESOURCE_AREA;) return res; } + void* operator new(size_t size, void* where, allocation_type type) { + void* res = where; + // Set allocation type in the resource object + DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;) + return res; + } void operator delete(void* p); }; --- old/hotspot/src/share/vm/memory/barrierSet.hpp 2009-08-01 04:11:53.448121552 +0100 +++ new/hotspot/src/share/vm/memory/barrierSet.hpp 2009-08-01 04:11:53.372783093 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)barrierSet.hpp 1.18 07/05/05 17:05:43 JVM" #endif /* - * Copyright 2000-2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,8 @@ ModRef, CardTableModRef, CardTableExtension, + G1SATBCT, + G1SATBCTLogging, Other, Uninit }; @@ -45,29 +47,32 @@ public: + BarrierSet() { _kind = Uninit; } // To get around prohibition on RTTI. - virtual BarrierSet::Name kind() { return _kind; } + BarrierSet::Name kind() { return _kind; } virtual bool is_a(BarrierSet::Name bsn) = 0; // These operations indicate what kind of barriers the BarrierSet has. virtual bool has_read_ref_barrier() = 0; virtual bool has_read_prim_barrier() = 0; virtual bool has_write_ref_barrier() = 0; + virtual bool has_write_ref_pre_barrier() = 0; virtual bool has_write_prim_barrier() = 0; // These functions indicate whether a particular access of the given // kinds requires a barrier. - virtual bool read_ref_needs_barrier(oop* field) = 0; + virtual bool read_ref_needs_barrier(void* field) = 0; virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0; - virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0; - virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, juint val1, juint val2) = 0; + virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0; + virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, + juint val1, juint val2) = 0; // The first four operations provide a direct implementation of the // barrier set. An interpreter loop, for example, could call these // directly, as appropriate. // Invoke the barrier, if any, necessary when reading the given ref field. - virtual void read_ref_field(oop* field) = 0; + virtual void read_ref_field(void* field) = 0; // Invoke the barrier, if any, necessary when reading the given primitive // "field" of "bytes" bytes in "obj". @@ -78,9 +83,16 @@ // (For efficiency reasons, this operation is specialized for certain // barrier types. Semantically, it should be thought of as a call to the // virtual "_work" function below, which must implement the barrier.) - inline void write_ref_field(oop* field, oop new_val); + // First the pre-write versions... + inline void write_ref_field_pre(void* field, oop new_val); protected: - virtual void write_ref_field_work(oop* field, oop new_val) = 0; + virtual void write_ref_field_pre_work(void* field, oop new_val) {}; +public: + + // ...then the post-write version. + inline void write_ref_field(void* field, oop new_val); +protected: + virtual void write_ref_field_work(void* field, oop new_val) = 0; public: // Invoke the barrier, if any, necessary when writing the "bytes"-byte @@ -95,6 +107,7 @@ // the particular barrier. virtual bool has_read_ref_array_opt() = 0; virtual bool has_read_prim_array_opt() = 0; + virtual bool has_write_ref_array_pre_opt() { return true; } virtual bool has_write_ref_array_opt() = 0; virtual bool has_write_prim_array_opt() = 0; @@ -106,8 +119,14 @@ // barrier for an array whose elements are all in the given memory region. virtual void read_ref_array(MemRegion mr) = 0; virtual void read_prim_array(MemRegion mr) = 0; - + + virtual void write_ref_array_pre(MemRegion mr) {} inline void write_ref_array(MemRegion mr); + + // Static versions, suitable for calling from generated code. + static void static_write_ref_array_pre(HeapWord* start, size_t count); + static void static_write_ref_array_post(HeapWord* start, size_t count); + protected: virtual void write_ref_array_work(MemRegion mr) = 0; public: @@ -123,33 +142,6 @@ virtual void write_region_work(MemRegion mr) = 0; public: - // The remaining sets of operations are called by compilers or other code - // generators to insert barriers into generated code. There may be - // several such code generators; the signatures of these - // barrier-generating functions may differ from generator to generator. - // There will be a set of four function signatures for each code - // generator, which accomplish the generation of barriers of the four - // kinds listed above. - -#ifdef TBD - // Generates code to invoke the barrier, if any, necessary when reading - // the ref field at "offset" in "obj". - virtual void gen_read_ref_field() = 0; - - // Generates code to invoke the barrier, if any, necessary when reading - // the primitive field of "bytes" bytes at offset" in "obj". - virtual void gen_read_prim_field() = 0; - - // Generates code to invoke the barrier, if any, necessary when writing - // "new_val" into the ref field at "offset" in "obj". - virtual void gen_write_ref_field() = 0; - - // Generates code to invoke the barrier, if any, necessary when writing - // the "bytes"-byte value "new_val" into the primitive field at "offset" - // in "obj". - virtual void gen_write_prim_field() = 0; -#endif - // Some barrier sets create tables whose elements correspond to parts of // the heap; the CardTableModRefBS is an example. Such barrier sets will // normally reserve space for such tables, and commit parts of the table --- old/hotspot/src/share/vm/memory/barrierSet.inline.hpp 2009-08-01 04:11:54.376254616 +0100 +++ new/hotspot/src/share/vm/memory/barrierSet.inline.hpp 2009-08-01 04:11:54.303924706 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)barrierSet.inline.hpp 1.12 07/05/05 17:05:43 JVM" #endif /* - * Copyright 2001-2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,15 @@ // performance-critical calls when when the barrier is the most common // card-table kind. -void BarrierSet::write_ref_field(oop* field, oop new_val) { +void BarrierSet::write_ref_field_pre(void* field, oop new_val) { + if (kind() == CardTableModRef) { + ((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val); + } else { + write_ref_field_pre_work(field, new_val); + } +} + +void BarrierSet::write_ref_field(void* field, oop new_val) { if (kind() == CardTableModRef) { ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val); } else { --- old/hotspot/src/share/vm/memory/blockOffsetTable.cpp 2009-08-01 04:11:55.184839386 +0100 +++ new/hotspot/src/share/vm/memory/blockOffsetTable.cpp 2009-08-01 04:11:55.103467282 +0100 @@ -187,7 +187,7 @@ "Offset card has an unexpected value"); size_t start_card_for_region = start_card; u_char offset = max_jubyte; - for (int i = 0; i <= N_powers-1; i++) { + for (int i = 0; i < N_powers; i++) { // -1 so that the the card with the actual offset is counted. Another -1 // so that the reach ends in this region and not at the start // of the next. --- old/hotspot/src/share/vm/memory/blockOffsetTable.hpp 2009-08-01 04:11:56.088385595 +0100 +++ new/hotspot/src/share/vm/memory/blockOffsetTable.hpp 2009-08-01 04:11:56.012669887 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)blockOffsetTable.hpp 1.57 07/05/05 17:05:43 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -202,6 +202,12 @@ // "index" in "_offset_array". HeapWord* address_for_index(size_t index) const; + // Return the address "p" incremented by the size of + // a region. This method does not align the address + // returned to the start of a region. It is a simple + // primitive. + HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; } + // Shared space support void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end); }; @@ -211,6 +217,7 @@ ////////////////////////////////////////////////////////////////////////// class BlockOffsetArray: public BlockOffsetTable { friend class VMStructs; + friend class G1BlockOffsetArray; // temp. until we restructure and cleanup protected: // The following enums are used by do_block_internal() below enum Action { --- old/hotspot/src/share/vm/memory/cardTableModRefBS.cpp 2009-08-01 04:11:56.879572097 +0100 +++ new/hotspot/src/share/vm/memory/cardTableModRefBS.cpp 2009-08-01 04:11:56.802128570 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cardTableModRefBS.cpp 1.60 07/12/05 23:34:34 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,8 +199,10 @@ assert(_whole_heap.contains(new_region), "attempt to cover area not in reserved area"); debug_only(verify_guard();) - int ind = find_covering_region_by_base(new_region.start()); - MemRegion old_region = _covered[ind]; + // collided is true if the expansion would push into another committed region + debug_only(bool collided = false;) + int const ind = find_covering_region_by_base(new_region.start()); + MemRegion const old_region = _covered[ind]; assert(old_region.start() == new_region.start(), "just checking"); if (new_region.word_size() != old_region.word_size()) { // Commit new or uncommit old pages, if necessary. @@ -208,22 +210,46 @@ // Extend the end of this _commited region // to cover the end of any lower _committed regions. // This forms overlapping regions, but never interior regions. - HeapWord* max_prev_end = largest_prev_committed_end(ind); + HeapWord* const max_prev_end = largest_prev_committed_end(ind); if (max_prev_end > cur_committed.end()) { cur_committed.set_end(max_prev_end); } // Align the end up to a page size (starts are already aligned). - jbyte* new_end = byte_after(new_region.last()); + jbyte* const new_end = byte_after(new_region.last()); HeapWord* new_end_aligned = - (HeapWord*)align_size_up((uintptr_t)new_end, _page_size); + (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); assert(new_end_aligned >= (HeapWord*) new_end, "align up, but less"); + int ri = 0; + for (ri = 0; ri < _cur_covered_regions; ri++) { + if (ri != ind) { + if (_committed[ri].contains(new_end_aligned)) { + assert((new_end_aligned >= _committed[ri].start()) && + (_committed[ri].start() > _committed[ind].start()), + "New end of committed region is inconsistent"); + new_end_aligned = _committed[ri].start(); + assert(new_end_aligned > _committed[ind].start(), + "New end of committed region is before start"); + debug_only(collided = true;) + // Should only collide with 1 region + break; + } + } + } +#ifdef ASSERT + for (++ri; ri < _cur_covered_regions; ri++) { + assert(!_committed[ri].contains(new_end_aligned), + "New end of committed region is in a second committed region"); + } +#endif // The guard page is always committed and should not be committed over. - HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start()); + HeapWord* const new_end_for_commit = MIN2(new_end_aligned, + _guard_region.start()); + if (new_end_for_commit > cur_committed.end()) { // Must commit new pages. - MemRegion new_committed = - MemRegion(cur_committed.end(), new_end_for_commit); + MemRegion const new_committed = + MemRegion(cur_committed.end(), new_end_for_commit); assert(!new_committed.is_empty(), "Region should not be empty here"); if (!os::commit_memory((char*)new_committed.start(), @@ -236,15 +262,17 @@ // the cur_committed region may include the guard region. } else if (new_end_aligned < cur_committed.end()) { // Must uncommit pages. - MemRegion uncommit_region = + MemRegion const uncommit_region = committed_unique_to_self(ind, MemRegion(new_end_aligned, cur_committed.end())); if (!uncommit_region.is_empty()) { if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { - // Do better than this for Merlin - vm_exit_out_of_memory(uncommit_region.byte_size(), - "card table contraction"); + uncommit_region.byte_size())) { + assert(false, "Card table contraction failed"); + // The call failed so don't change the end of the + // committed region. This is better than taking the + // VM down. + new_end_aligned = _committed[ind].end(); } } } @@ -258,10 +286,27 @@ } else { entry = byte_after(old_region.last()); } - assert(index_for(new_region.last()) < (int) _guard_index, + assert(index_for(new_region.last()) < _guard_index, "The guard card will be overwritten"); - jbyte* end = byte_after(new_region.last()); + // This line commented out cleans the newly expanded region and + // not the aligned up expanded region. + // jbyte* const end = byte_after(new_region.last()); + jbyte* const end = (jbyte*) new_end_for_commit; + assert((end >= byte_after(new_region.last())) || collided, + "Expect to be beyond new region unless impacting another region"); // do nothing if we resized downward. +#ifdef ASSERT + for (int ri = 0; ri < _cur_covered_regions; ri++) { + if (ri != ind) { + // The end of the new committed region should not + // be in any existing region unless it matches + // the start of the next region. + assert(!_committed[ri].contains(end) || + (_committed[ri].start() == (HeapWord*) end), + "Overlapping committed regions"); + } + } +#endif if (entry < end) { memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); } @@ -297,10 +342,65 @@ // Note that these versions are precise! The scanning code has to handle the // fact that the write barrier may be either precise or imprecise. -void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) { +void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { inline_write_ref_field(field, newVal); } +/* + Claimed and deferred bits are used together in G1 during the evacuation + pause. These bits can have the following state transitions: + 1. The claimed bit can be put over any other card state. Except that + the "dirty -> dirty and claimed" transition is checked for in + G1 code and is not used. + 2. Deferred bit can be set only if the previous state of the card + was either clean or claimed. mark_card_deferred() is wait-free. + We do not care if the operation is be successful because if + it does not it will only result in duplicate entry in the update + buffer because of the "cache-miss". So it's not worth spinning. + */ + + +bool CardTableModRefBS::claim_card(size_t card_index) { + jbyte val = _byte_map[card_index]; + assert(val != dirty_card_val(), "Shouldn't claim a dirty card"); + while (val == clean_card_val() || + (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) { + jbyte new_val = val; + if (val == clean_card_val()) { + new_val = (jbyte)claimed_card_val(); + } else { + new_val = val | (jbyte)claimed_card_val(); + } + jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val); + if (res == val) { + return true; + } + val = res; + } + return false; +} + +bool CardTableModRefBS::mark_card_deferred(size_t card_index) { + jbyte val = _byte_map[card_index]; + // It's already processed + if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { + return false; + } + // Cached bit can be installed either on a clean card or on a claimed card. + jbyte new_val = val; + if (val == clean_card_val()) { + new_val = (jbyte)deferred_card_val(); + } else { + if (val & claimed_card_val()) { + new_val = val | (jbyte)deferred_card_val(); + } + } + if (new_val != val) { + Atomic::cmpxchg(new_val, &_byte_map[card_index], val); + } + return true; +} + void CardTableModRefBS::non_clean_card_iterate(Space* sp, MemRegion mr, @@ -401,7 +501,7 @@ } } -void CardTableModRefBS::invalidate(MemRegion mr) { +void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) dirty_MemRegion(mri); @@ -429,11 +529,15 @@ } } +void CardTableModRefBS::dirty(MemRegion mr) { + jbyte* first = byte_for(mr.start()); + jbyte* last = byte_after(mr.last()); + memset(first, dirty_card, last-first); +} + // NOTES: // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate() // iterates over dirty cards ranges in increasing address order. -// (2) Unlike, e.g., dirty_card_range_after_preclean() below, -// this method does not make the dirty cards prelceaned. void CardTableModRefBS::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) { for (int i = 0; i < _cur_covered_regions; i++) { @@ -459,7 +563,9 @@ } } -MemRegion CardTableModRefBS::dirty_card_range_after_preclean(MemRegion mr) { +MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, + bool reset, + int reset_val) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { @@ -476,8 +582,10 @@ dirty_cards++, next_entry++); MemRegion cur_cards(addr_for(cur_entry), dirty_cards*card_size_in_words); - for (size_t i = 0; i < dirty_cards; i++) { - cur_entry[i] = precleaned_card; + if (reset) { + for (size_t i = 0; i < dirty_cards; i++) { + cur_entry[i] = reset_val; + } } return cur_cards; } --- old/hotspot/src/share/vm/memory/cardTableModRefBS.hpp 2009-08-01 04:11:57.758686369 +0100 +++ new/hotspot/src/share/vm/memory/cardTableModRefBS.hpp 2009-08-01 04:11:57.679301619 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cardTableModRefBS.hpp 1.53 07/10/04 10:49:32 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,10 +55,15 @@ enum CardValues { clean_card = -1, + // The mask contains zeros in places for all other values. + clean_card_mask = clean_card - 31, + dirty_card = 0, precleaned_card = 1, - last_card = 4, - CT_MR_BS_last_reserved = 10 + claimed_card = 2, + deferred_card = 4, + last_card = 8, + CT_MR_BS_last_reserved = 16 }; // dirty and precleaned are equivalent wrt younger_refs_iter. @@ -153,17 +158,6 @@ return byte_for(p) + 1; } - // Mapping from card marking array entry to address of first word - HeapWord* addr_for(const jbyte* p) const { - assert(p >= _byte_map && p < _byte_map + _byte_map_size, - "out of bounds access to card marking array"); - size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); - HeapWord* result = (HeapWord*) (delta << card_shift); - assert(_whole_heap.contains(result), - "out of bounds accessor from card marking array"); - return result; - } - // Iterate over the portion of the card-table which covers the given // region mr in the given space and apply cl to any dirty sub-regions // of mr. cl and dcto_cl must either be the same closure or cl must @@ -266,17 +260,25 @@ card_size_in_words = card_size / sizeof(HeapWord) }; + static int clean_card_val() { return clean_card; } + static int clean_card_mask_val() { return clean_card_mask; } + static int dirty_card_val() { return dirty_card; } + static int claimed_card_val() { return claimed_card; } + static int precleaned_card_val() { return precleaned_card; } + static int deferred_card_val() { return deferred_card; } + // For RTTI simulation. - BarrierSet::Name kind() { return BarrierSet::CardTableModRef; } bool is_a(BarrierSet::Name bsn) { - return bsn == BarrierSet::CardTableModRef || bsn == BarrierSet::ModRef; + return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn); } CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); // *** Barrier set functions. - inline bool write_ref_needs_barrier(oop* field, oop new_val) { + bool has_write_ref_pre_barrier() { return false; } + + inline bool write_ref_needs_barrier(void* field, oop new_val) { // Note that this assumes the perm gen is the highest generation // in the address space return new_val != NULL && !new_val->is_perm(); @@ -288,7 +290,7 @@ // these functions here for performance. protected: void write_ref_field_work(oop obj, size_t offset, oop newVal); - void write_ref_field_work(oop* field, oop newVal); + void write_ref_field_work(void* field, oop newVal); public: bool has_write_ref_array_opt() { return true; } @@ -318,11 +320,41 @@ // *** Card-table-barrier-specific things. - inline void inline_write_ref_field(oop* field, oop newVal) { + inline void inline_write_ref_field_pre(void* field, oop newVal) {} + + inline void inline_write_ref_field(void* field, oop newVal) { jbyte* byte = byte_for(field); *byte = dirty_card; } + // These are used by G1, when it uses the card table as a temporary data + // structure for card claiming. + bool is_card_dirty(size_t card_index) { + return _byte_map[card_index] == dirty_card_val(); + } + + void mark_card_dirty(size_t card_index) { + _byte_map[card_index] = dirty_card_val(); + } + + bool is_card_claimed(size_t card_index) { + jbyte val = _byte_map[card_index]; + return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); + } + + bool claim_card(size_t card_index); + + bool is_card_clean(size_t card_index) { + return _byte_map[card_index] == clean_card_val(); + } + + bool is_card_deferred(size_t card_index) { + jbyte val = _byte_map[card_index]; + return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); + } + + bool mark_card_deferred(size_t card_index); + // Card marking array base (adjusted for heap low boundary) // This would be the 0th element of _byte_map, if the heap started at 0x0. // But since the heap starts at some higher address, this points to somewhere @@ -347,8 +379,9 @@ } // ModRefBS functions. - void invalidate(MemRegion mr); + virtual void invalidate(MemRegion mr, bool whole_heap = false); void clear(MemRegion mr); + void dirty(MemRegion mr); void mod_oop_in_space_iterate(Space* sp, OopClosure* cl, bool clear = false, bool before_save_marks = false); @@ -378,25 +411,50 @@ static uintx ct_max_alignment_constraint(); - // Apply closure cl to the dirty cards lying completely - // within MemRegion mr, setting the cards to precleaned. - void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); + // Apply closure "cl" to the dirty cards containing some part of + // MemRegion "mr". + void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); // Return the MemRegion corresponding to the first maximal run - // of dirty cards lying completely within MemRegion mr, after - // marking those cards precleaned. - MemRegion dirty_card_range_after_preclean(MemRegion mr); + // of dirty cards lying completely within MemRegion mr. + // If reset is "true", then sets those card table entries to the given + // value. + MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, + int reset_val); // Set all the dirty cards in the given region to precleaned state. void preclean_dirty_cards(MemRegion mr); + // Provide read-only access to the card table array. + const jbyte* byte_for_const(const void* p) const { + return byte_for(p); + } + const jbyte* byte_after_const(const void* p) const { + return byte_after(p); + } + + // Mapping from card marking array entry to address of first word + HeapWord* addr_for(const jbyte* p) const { + assert(p >= _byte_map && p < _byte_map + _byte_map_size, + "out of bounds access to card marking array"); + size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); + HeapWord* result = (HeapWord*) (delta << card_shift); + assert(_whole_heap.contains(result), + "out of bounds accessor from card marking array"); + return result; + } + // Mapping from address to card marking array index. - int index_for(void* p) { + size_t index_for(void* p) { assert(_whole_heap.contains(p), "out of bounds access to card marking array"); return byte_for(p) - _byte_map; } + const jbyte* byte_for_index(const size_t card_index) const { + return _byte_map + card_index; + } + void verify(); void verify_guard(); @@ -405,6 +463,7 @@ static size_t par_chunk_heapword_alignment() { return CardsPerStrideChunk * card_size_in_words; } + }; class CardTableRS; --- old/hotspot/src/share/vm/memory/cardTableRS.cpp 2009-08-01 04:11:58.719304036 +0100 +++ new/hotspot/src/share/vm/memory/cardTableRS.cpp 2009-08-01 04:11:58.643266229 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cardTableRS.cpp 1.45 07/05/25 12:54:50 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,11 +29,26 @@ # include "incls/_cardTableRS.cpp.incl" CardTableRS::CardTableRS(MemRegion whole_heap, - int max_covered_regions) : - GenRemSet(&_ct_bs), - _ct_bs(whole_heap, max_covered_regions), - _cur_youngergen_card_val(youngergenP1_card) + int max_covered_regions) : + GenRemSet(), + _cur_youngergen_card_val(youngergenP1_card), + _regions_to_iterate(max_covered_regions - 1) { +#ifndef SERIALGC + if (UseG1GC) { + if (G1RSBarrierUseQueue) { + _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap, + max_covered_regions); + } else { + _ct_bs = new G1SATBCardTableModRefBS(whole_heap, max_covered_regions); + } + } else { + _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); + } +#else + _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); +#endif + set_bs(_ct_bs); _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1]; if (_last_cur_val_in_gen == NULL) { vm_exit_during_initialization("Could not last_cur_val_in_gen array."); @@ -41,20 +56,19 @@ for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) { _last_cur_val_in_gen[i] = clean_card_val(); } - _ct_bs.set_CTRS(this); + _ct_bs->set_CTRS(this); } void CardTableRS::resize_covered_region(MemRegion new_region) { - _ct_bs.resize_covered_region(new_region); + _ct_bs->resize_covered_region(new_region); } jbyte CardTableRS::find_unused_youngergenP_card_value() { - GenCollectedHeap* gch = GenCollectedHeap::heap(); for (jbyte v = youngergenP1_card; v < cur_youngergen_and_prev_nonclean_card; v++) { bool seen = false; - for (int g = 0; g < gch->n_gens()+1; g++) { + for (int g = 0; g < _regions_to_iterate; g++) { if (_last_cur_val_in_gen[g] == v) { seen = true; break; @@ -194,7 +208,7 @@ // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card // cur-younger-gen ==> cur_younger_gen // cur_youngergen_and_prev_nonclean_card ==> no change. -void CardTableRS::write_ref_field_gc_par(oop* field, oop new_val) { +void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { jbyte* entry = ct_bs()->byte_for(field); do { jbyte entry_val = *entry; @@ -222,13 +236,13 @@ } while (true); } -void CardTableRS::younger_refs_in_space_iterate(Space* sp, - OopsInGenClosure* cl) { - DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs.precision(), +void CardTableRS::younger_refs_in_space_iterate(Space* sp, + OopsInGenClosure* cl) { + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs->precision(), cl->gen_boundary()); ClearNoncleanCardWrapper clear_cl(dcto_cl, this); - _ct_bs.non_clean_card_iterate(sp, sp->used_region_at_save_marks(), + _ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(), dcto_cl, &clear_cl, false); } @@ -293,28 +307,36 @@ class VerifyCleanCardClosure: public OopClosure { - HeapWord* boundary; - HeapWord* begin; HeapWord* end; -public: - void do_oop(oop* p) { +private: + HeapWord* _boundary; + HeapWord* _begin; + HeapWord* _end; +protected: + template void do_oop_work(T* p) { HeapWord* jp = (HeapWord*)p; - if (jp >= begin && jp < end) { - guarantee(*p == NULL || (HeapWord*)p < boundary - || (HeapWord*)(*p) >= boundary, - "pointer on clean card crosses boundary"); + if (jp >= _begin && jp < _end) { + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(obj == NULL || + (HeapWord*)p < _boundary || + (HeapWord*)obj >= _boundary, + "pointer on clean card crosses boundary"); } } - VerifyCleanCardClosure(HeapWord* b, HeapWord* _begin, HeapWord* _end) : - boundary(b), begin(_begin), end(_end) {} +public: + VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : + _boundary(b), _begin(begin), _end(end) {} + virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } }; class VerifyCTSpaceClosure: public SpaceClosure { +private: CardTableRS* _ct; HeapWord* _boundary; public: VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : _ct(ct), _boundary(boundary) {} - void do_space(Space* s) { _ct->verify_space(s, _boundary); } + virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } }; class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { @@ -544,7 +566,7 @@ if (ch->kind() == CollectedHeap::GenCollectedHeap) { GenCollectedHeap::heap()->generation_iterate(&blk, false); - _ct_bs.verify(); + _ct_bs->verify(); // If the old gen collections also collect perm, then we are only // interested in perm-to-young pointers, not perm-to-old pointers. @@ -559,10 +581,16 @@ } -void CardTableRS::verify_empty(MemRegion mr) { +void CardTableRS::verify_aligned_region_empty(MemRegion mr) { if (!mr.is_empty()) { jbyte* cur_entry = byte_for(mr.start()); jbyte* limit = byte_after(mr.last()); + // The region mr may not start on a card boundary so + // the first card may reflect a write to the space + // just prior to mr. + if (!is_aligned(mr.start())) { + cur_entry++; + } for (;cur_entry < limit; cur_entry++) { guarantee(*cur_entry == CardTableModRefBS::clean_card, "Unexpected dirty card found"); --- old/hotspot/src/share/vm/memory/cardTableRS.hpp 2009-08-01 04:12:00.019313677 +0100 +++ new/hotspot/src/share/vm/memory/cardTableRS.hpp 2009-08-01 04:11:59.574842083 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cardTableRS.hpp 1.29 07/05/05 17:05:44 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv); } - CardTableModRefBSForCTRS _ct_bs; + CardTableModRefBSForCTRS* _ct_bs; virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); @@ -76,6 +76,8 @@ jbyte _cur_youngergen_card_val; + int _regions_to_iterate; + jbyte cur_youngergen_card_val() { return _cur_youngergen_card_val; } @@ -99,7 +101,7 @@ CardTableRS* as_CardTableRS() { return this; } - CardTableModRefBS* ct_bs() { return &_ct_bs; } + CardTableModRefBS* ct_bs() { return _ct_bs; } // Override. void prepare_for_younger_refs_iterate(bool parallel); @@ -109,41 +111,43 @@ // closure application. void younger_refs_iterate(Generation* g, OopsInGenClosure* blk); - void inline_write_ref_field_gc(oop* field, oop new_val) { - jbyte* byte = _ct_bs.byte_for(field); + void inline_write_ref_field_gc(void* field, oop new_val) { + jbyte* byte = _ct_bs->byte_for(field); *byte = youngergen_card; } - void write_ref_field_gc_work(oop* field, oop new_val) { + void write_ref_field_gc_work(void* field, oop new_val) { inline_write_ref_field_gc(field, new_val); } // Override. Might want to devirtualize this in the same fashion as // above. Ensures that the value of the card for field says that it's // a younger card in the current collection. - virtual void write_ref_field_gc_par(oop* field, oop new_val); + virtual void write_ref_field_gc_par(void* field, oop new_val); void resize_covered_region(MemRegion new_region); bool is_aligned(HeapWord* addr) { - return _ct_bs.is_card_aligned(addr); + return _ct_bs->is_card_aligned(addr); } void verify(); - void verify_empty(MemRegion mr); + void verify_aligned_region_empty(MemRegion mr); - void clear(MemRegion mr) { _ct_bs.clear(mr); } - void clear_into_younger(Generation* gen, bool clear_perm); + void clear(MemRegion mr) { _ct_bs->clear(mr); } + void clear_into_younger(Generation* gen, bool clear_perm); - void invalidate(MemRegion mr) { _ct_bs.invalidate(mr); } + void invalidate(MemRegion mr, bool whole_heap = false) { + _ct_bs->invalidate(mr, whole_heap); + } void invalidate_or_clear(Generation* gen, bool younger, bool perm); static uintx ct_max_alignment_constraint() { return CardTableModRefBS::ct_max_alignment_constraint(); } - jbyte* byte_for(void* p) { return _ct_bs.byte_for(p); } - jbyte* byte_after(void* p) { return _ct_bs.byte_after(p); } - HeapWord* addr_for(jbyte* p) { return _ct_bs.addr_for(p); } + jbyte* byte_for(void* p) { return _ct_bs->byte_for(p); } + jbyte* byte_after(void* p) { return _ct_bs->byte_after(p); } + HeapWord* addr_for(jbyte* p) { return _ct_bs->addr_for(p); } bool is_prev_nonclean_card_val(jbyte v) { return --- old/hotspot/src/share/vm/memory/collectorPolicy.cpp 2009-08-01 04:12:00.932315567 +0100 +++ new/hotspot/src/share/vm/memory/collectorPolicy.cpp 2009-08-01 04:12:00.842575907 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)collectorPolicy.cpp 1.90 07/10/04 10:49:37 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,11 +34,11 @@ if (PermSize > MaxPermSize) { MaxPermSize = PermSize; } - PermSize = align_size_down(PermSize, min_alignment()); + PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment())); MaxPermSize = align_size_up(MaxPermSize, max_alignment()); - MinPermHeapExpansion = align_size_down(MinPermHeapExpansion, min_alignment()); - MaxPermHeapExpansion = align_size_down(MaxPermHeapExpansion, min_alignment()); + MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment())); + MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment())); MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); @@ -58,47 +58,49 @@ void CollectorPolicy::initialize_size_info() { // User inputs from -mx and ms are aligned - _initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(), - min_alignment()); - _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), - min_alignment()); - _max_heap_byte_size = align_size_up(MaxHeapSize, max_alignment()); - - // Check validity of heap parameters from launcher - if (_initial_heap_byte_size == 0) { - _initial_heap_byte_size = NewSize + OldSize; - } else { - Universe::check_alignment(_initial_heap_byte_size, min_alignment(), - "initial heap"); + set_initial_heap_byte_size(Arguments::initial_heap_size()); + if (initial_heap_byte_size() == 0) { + set_initial_heap_byte_size(NewSize + OldSize); } - if (_min_heap_byte_size == 0) { - _min_heap_byte_size = NewSize + OldSize; - } else { - Universe::check_alignment(_min_heap_byte_size, min_alignment(), - "initial heap"); + set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size, + min_alignment())); + + set_min_heap_byte_size(Arguments::min_heap_size()); + if (min_heap_byte_size() == 0) { + set_min_heap_byte_size(NewSize + OldSize); } + set_min_heap_byte_size(align_size_up(_min_heap_byte_size, + min_alignment())); + + set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); // Check heap parameter properties - if (_initial_heap_byte_size < M) { + if (initial_heap_byte_size() < M) { vm_exit_during_initialization("Too small initial heap"); } // Check heap parameter properties - if (_min_heap_byte_size < M) { + if (min_heap_byte_size() < M) { vm_exit_during_initialization("Too small minimum heap"); } - if (_initial_heap_byte_size <= NewSize) { + if (initial_heap_byte_size() <= NewSize) { // make sure there is at least some room in old space vm_exit_during_initialization("Too small initial heap for new size specified"); } - if (_max_heap_byte_size < _min_heap_byte_size) { + if (max_heap_byte_size() < min_heap_byte_size()) { vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); } - if (_initial_heap_byte_size < _min_heap_byte_size) { + if (initial_heap_byte_size() < min_heap_byte_size()) { vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); } - if (_max_heap_byte_size < _initial_heap_byte_size) { + if (max_heap_byte_size() < initial_heap_byte_size()) { vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); } + + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " + SIZE_FORMAT " Maximum heap " SIZE_FORMAT, + min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); + } } void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) { @@ -118,8 +120,6 @@ int max_covered_regions) { switch (rem_set_name()) { case GenRemSet::CardTable: { - if (barrier_set_name() != BarrierSet::CardTableModRef) - vm_exit_during_initialization("Mismatch between RS and BS."); CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); return res; } @@ -131,10 +131,26 @@ // GenCollectorPolicy methods. +size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { + size_t x = base_size / (NewRatio+1); + size_t new_gen_size = x > min_alignment() ? + align_size_down(x, min_alignment()) : + min_alignment(); + return new_gen_size; +} + +size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, + size_t maximum_size) { + size_t alignment = min_alignment(); + size_t max_minus = maximum_size - alignment; + return desired_size < max_minus ? desired_size : max_minus; +} + + void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, - size_t init_promo_size, - size_t init_survivor_size) { - double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; + size_t init_promo_size, + size_t init_survivor_size) { + const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; _size_policy = new AdaptiveSizePolicy(init_eden_size, init_promo_size, init_survivor_size, @@ -213,74 +229,260 @@ assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); } +// Values set on the command line win over any ergonomically +// set command line parameters. +// Ergonomic choice of parameters are done before this +// method is called. Values for command line parameters such as NewSize +// and MaxNewSize feed those ergonomic choices into this method. +// This method makes the final generation sizings consistent with +// themselves and with overall heap sizings. +// In the absence of explicitly set command line flags, policies +// such as the use of NewRatio are used to size the generation. void GenCollectorPolicy::initialize_size_info() { CollectorPolicy::initialize_size_info(); - // Minimum sizes of the generations may be different than - // the initial sizes. - if (!FLAG_IS_DEFAULT(NewSize)) { - _min_gen0_size = NewSize; + // min_alignment() is used for alignment within a generation. + // There is additional alignment done down stream for some + // collectors that sometimes causes unwanted rounding up of + // generations sizes. + + // Determine maximum size of gen0 + + size_t max_new_size = 0; + if (FLAG_IS_CMDLINE(MaxNewSize)) { + if (MaxNewSize < min_alignment()) { + max_new_size = min_alignment(); + } else if (MaxNewSize >= max_heap_byte_size()) { + max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), + min_alignment()); + warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " + "greater than the entire heap (" SIZE_FORMAT "k). A " + "new generation size of " SIZE_FORMAT "k will be used.", + MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); + } else { + max_new_size = align_size_down(MaxNewSize, min_alignment()); + } + + // The case for FLAG_IS_ERGO(MaxNewSize) could be treated + // specially at this point to just use an ergonomically set + // MaxNewSize to set max_new_size. For cases with small + // heaps such a policy often did not work because the MaxNewSize + // was larger than the entire heap. The interpretation given + // to ergonomically set flags is that the flags are set + // by different collectors for their own special needs but + // are not allowed to badly shape the heap. This allows the + // different collectors to decide what's best for themselves + // without having to factor in the overall heap shape. It + // can be the case in the future that the collectors would + // only make "wise" ergonomics choices and this policy could + // just accept those choices. The choices currently made are + // not always "wise". } else { - _min_gen0_size = align_size_down(_min_heap_byte_size / (NewRatio+1), - min_alignment()); - // We bound the minimum size by NewSize below (since it historically + max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); + // Bound the maximum size by NewSize below (since it historically // would have been NewSize and because the NewRatio calculation could // yield a size that is too small) and bound it by MaxNewSize above. - // This is not always best. The NewSize calculated by CMS (which has - // a fixed minimum of 16m) can sometimes be "too" large. Consider - // the case where -Xmx32m. The CMS calculated NewSize would be about - // half the entire heap which seems too large. But the counter - // example is seen when the client defaults for NewRatio are used. - // An initial young generation size of 640k was observed - // with -Xmx128m -XX:MaxNewSize=32m when NewSize was not used - // as a lower bound as with - // _min_gen0_size = MIN2(_min_gen0_size, MaxNewSize); - // and 640k seemed too small a young generation. - _min_gen0_size = MIN2(MAX2(_min_gen0_size, NewSize), MaxNewSize); - } - - // Parameters are valid, compute area sizes. - size_t max_new_size = align_size_down(_max_heap_byte_size / (NewRatio+1), - min_alignment()); - max_new_size = MIN2(MAX2(max_new_size, _min_gen0_size), MaxNewSize); - - // desired_new_size is used to set the initial size. The - // initial size must be greater than the minimum size. - size_t desired_new_size = - align_size_down(_initial_heap_byte_size / (NewRatio+1), - min_alignment()); + // Ergonomics plays here by previously calculating the desired + // NewSize and MaxNewSize. + max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); + } + assert(max_new_size > 0, "All paths should set max_new_size"); + + // Given the maximum gen0 size, determine the initial and + // minimum sizes. + + if (max_heap_byte_size() == min_heap_byte_size()) { + // The maximum and minimum heap sizes are the same so + // the generations minimum and initial must be the + // same as its maximum. + set_min_gen0_size(max_new_size); + set_initial_gen0_size(max_new_size); + set_max_gen0_size(max_new_size); + } else { + size_t desired_new_size = 0; + if (!FLAG_IS_DEFAULT(NewSize)) { + // If NewSize is set ergonomically (for example by cms), it + // would make sense to use it. If it is used, also use it + // to set the initial size. Although there is no reason + // the minimum size and the initial size have to be the same, + // the current implementation gets into trouble during the calculation + // of the tenured generation sizes if they are different. + // Note that this makes the initial size and the minimum size + // generally small compared to the NewRatio calculation. + _min_gen0_size = NewSize; + desired_new_size = NewSize; + max_new_size = MAX2(max_new_size, NewSize); + } else { + // For the case where NewSize is the default, use NewRatio + // to size the minimum and initial generation sizes. + // Use the default NewSize as the floor for these values. If + // NewRatio is overly large, the resulting sizes can be too + // small. + _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), + NewSize); + desired_new_size = + MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), + NewSize); + } - size_t new_size = MIN2(MAX2(desired_new_size, _min_gen0_size), max_new_size); + assert(_min_gen0_size > 0, "Sanity check"); + set_initial_gen0_size(desired_new_size); + set_max_gen0_size(max_new_size); + + // At this point the desirable initial and minimum sizes have been + // determined without regard to the maximum sizes. + + // Bound the sizes by the corresponding overall heap sizes. + set_min_gen0_size( + bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); + set_initial_gen0_size( + bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); + set_max_gen0_size( + bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); + + // At this point all three sizes have been checked against the + // maximum sizes but have not been checked for consistency + // among the three. + + // Final check min <= initial <= max + set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); + set_initial_gen0_size( + MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); + set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); + } + + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " + SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, + min_gen0_size(), initial_gen0_size(), max_gen0_size()); + } +} - _initial_gen0_size = new_size; - _max_gen0_size = max_new_size; +// Call this method during the sizing of the gen1 to make +// adjustments to gen0 because of gen1 sizing policy. gen0 initially has +// the most freedom in sizing because it is done before the +// policy for gen1 is applied. Once gen1 policies have been applied, +// there may be conflicts in the shape of the heap and this method +// is used to make the needed adjustments. The application of the +// policies could be more sophisticated (iterative for example) but +// keeping it simple also seems a worthwhile goal. +bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, + size_t* gen1_size_ptr, + size_t heap_size, + size_t min_gen0_size) { + bool result = false; + if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { + if (((*gen0_size_ptr + OldSize) > heap_size) && + (heap_size - min_gen0_size) >= min_alignment()) { + // Adjust gen0 down to accomodate OldSize + *gen0_size_ptr = heap_size - min_gen0_size; + *gen0_size_ptr = + MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), + min_alignment()); + assert(*gen0_size_ptr > 0, "Min gen0 is too large"); + result = true; + } else { + *gen1_size_ptr = heap_size - *gen0_size_ptr; + *gen1_size_ptr = + MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), + min_alignment()); + } + } + return result; } +// Minimum sizes of the generations may be different than +// the initial sizes. An inconsistently is permitted here +// in the total size that can be specified explicitly by +// command line specification of OldSize and NewSize and +// also a command line specification of -Xms. Issue a warning +// but allow the values to pass. + void TwoGenerationCollectorPolicy::initialize_size_info() { GenCollectorPolicy::initialize_size_info(); - - // Minimum sizes of the generations may be different than - // the initial sizes. An inconsistently is permitted here - // in the total size that can be specified explicitly by - // command line specification of OldSize and NewSize and - // also a command line specification of -Xms. Issue a warning - // but allow the values to pass. - if (!FLAG_IS_DEFAULT(OldSize)) { - _min_gen1_size = OldSize; + + // At this point the minimum, initial and maximum sizes + // of the overall heap and of gen0 have been determined. + // The maximum gen1 size can be determined from the maximum gen0 + // and maximum heap size since not explicit flags exits + // for setting the gen1 maximum. + _max_gen1_size = max_heap_byte_size() - _max_gen0_size; + _max_gen1_size = + MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), + min_alignment()); + // If no explicit command line flag has been set for the + // gen1 size, use what is left for gen1. + if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { + // The user has not specified any value or ergonomics + // has chosen a value (which may or may not be consistent + // with the overall heap size). In either case make + // the minimum, maximum and initial sizes consistent + // with the gen0 sizes and the overall heap sizes. + assert(min_heap_byte_size() > _min_gen0_size, + "gen0 has an unexpected minimum size"); + set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); + set_min_gen1_size( + MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), + min_alignment())); + set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); + set_initial_gen1_size( + MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), + min_alignment())); + + } else { + // It's been explicitly set on the command line. Use the + // OldSize and then determine the consequences. + set_min_gen1_size(OldSize); + set_initial_gen1_size(OldSize); + + // If the user has explicitly set an OldSize that is inconsistent + // with other command line flags, issue a warning. // The generation minimums and the overall heap mimimum should // be within one heap alignment. - if ((_min_gen1_size + _min_gen0_size + max_alignment()) < - _min_heap_byte_size) { + if ((_min_gen1_size + _min_gen0_size + min_alignment()) < + min_heap_byte_size()) { warning("Inconsistency between minimum heap size and minimum " - "generation sizes: using min heap = " SIZE_FORMAT, - _min_heap_byte_size); + "generation sizes: using minimum heap = " SIZE_FORMAT, + min_heap_byte_size()); + } + if ((OldSize > _max_gen1_size)) { + warning("Inconsistency between maximum heap size and maximum " + "generation sizes: using maximum heap = " SIZE_FORMAT + " -XX:OldSize flag is being ignored", + max_heap_byte_size()); + } + // If there is an inconsistency between the OldSize and the minimum and/or + // initial size of gen0, since OldSize was explicitly set, OldSize wins. + if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, + min_heap_byte_size(), OldSize)) { + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " + SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, + min_gen0_size(), initial_gen0_size(), max_gen0_size()); + } + } + // Initial size + if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, + initial_heap_byte_size(), OldSize)) { + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " + SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, + min_gen0_size(), initial_gen0_size(), max_gen0_size()); + } } - } else { - _min_gen1_size = _min_heap_byte_size - _min_gen0_size; } + // Enforce the maximum gen1 size. + set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); - _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size; - _max_gen1_size = _max_heap_byte_size - _max_gen0_size; + // Check that min gen1 <= initial gen1 <= max gen1 + set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); + set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); + + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " + SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, + min_gen1_size(), initial_gen1_size(), max_gen1_size()); + } } HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, --- old/hotspot/src/share/vm/memory/collectorPolicy.hpp 2009-08-01 04:12:01.855607824 +0100 +++ new/hotspot/src/share/vm/memory/collectorPolicy.hpp 2009-08-01 04:12:01.781784822 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)collectorPolicy.hpp 1.41 07/05/29 09:44:14 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,10 +42,12 @@ // Forward declarations. class GenCollectorPolicy; class TwoGenerationCollectorPolicy; +class AdaptiveSizePolicy; #ifndef SERIALGC class ConcurrentMarkSweepPolicy; +class G1CollectorPolicy; #endif // SERIALGC -class AdaptiveSizePolicy; + class GCPolicyCounters; class PermanentGenerationSpec; class MarkSweepPolicy; @@ -58,7 +60,7 @@ // Requires that the concrete subclass sets the alignment constraints // before calling. virtual void initialize_flags(); - virtual void initialize_size_info() = 0; + virtual void initialize_size_info(); // Initialize "_permanent_generation" to a spec for the given kind of // Perm Gen. void initialize_perm_generation(PermGen::Name pgnm); @@ -85,34 +87,41 @@ size_t max_alignment() { return _max_alignment; } size_t initial_heap_byte_size() { return _initial_heap_byte_size; } + void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; } size_t max_heap_byte_size() { return _max_heap_byte_size; } + void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; } size_t min_heap_byte_size() { return _min_heap_byte_size; } + void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; } enum Name { CollectorPolicyKind, TwoGenerationCollectorPolicyKind, - TrainPolicyKind, ConcurrentMarkSweepPolicyKind, - ASConcurrentMarkSweepPolicyKind + ASConcurrentMarkSweepPolicyKind, + G1CollectorPolicyKind }; // Identification methods. - virtual GenCollectorPolicy* as_generation_policy() { return NULL; } + virtual GenCollectorPolicy* as_generation_policy() { return NULL; } virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; } virtual MarkSweepPolicy* as_mark_sweep_policy() { return NULL; } #ifndef SERIALGC virtual ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return NULL; } -#endif // SERIALGC + virtual G1CollectorPolicy* as_g1_policy() { return NULL; } +#endif // SERIALGC // Note that these are not virtual. bool is_generation_policy() { return as_generation_policy() != NULL; } bool is_two_generation_policy() { return as_two_generation_policy() != NULL; } bool is_mark_sweep_policy() { return as_mark_sweep_policy() != NULL; } #ifndef SERIALGC bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; } + bool is_g1_policy() { return as_g1_policy() != NULL; } #else // SERIALGC bool is_concurrent_mark_sweep_policy() { return false; } + bool is_g1_policy() { return false; } #endif // SERIALGC + virtual PermanentGenerationSpec *permanent_generation() { assert(_permanent_generation != NULL, "Sanity check"); return _permanent_generation; @@ -185,8 +194,24 @@ // compute max heap alignment size_t compute_max_alignment(); + // Scale the base_size by NewRation according to + // result = base_size / (NewRatio + 1) + // and align by min_alignment() + size_t scale_by_NewRatio_aligned(size_t base_size); + + // Bound the value by the given maximum minus the + // min_alignment. + size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); public: + // Accessors + size_t min_gen0_size() { return _min_gen0_size; } + void set_min_gen0_size(size_t v) { _min_gen0_size = v; } + size_t initial_gen0_size() { return _initial_gen0_size; } + void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } + size_t max_gen0_size() { return _max_gen0_size; } + void set_max_gen0_size(size_t v) { _max_gen0_size = v; } + virtual int number_of_generations() = 0; virtual GenerationSpec **generations() { @@ -239,6 +264,14 @@ void initialize_generations() { ShouldNotReachHere(); } public: + // Accessors + size_t min_gen1_size() { return _min_gen1_size; } + void set_min_gen1_size(size_t v) { _min_gen1_size = v; } + size_t initial_gen1_size() { return _initial_gen1_size; } + void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; } + size_t max_gen1_size() { return _max_gen1_size; } + void set_max_gen1_size(size_t v) { _max_gen1_size = v; } + // Inherited methods TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } @@ -249,6 +282,10 @@ virtual CollectorPolicy::Name kind() { return CollectorPolicy::TwoGenerationCollectorPolicyKind; } + + // Returns true is gen0 sizes were adjusted + bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr, + size_t heap_size, size_t min_gen1_size); }; class MarkSweepPolicy : public TwoGenerationCollectorPolicy { --- old/hotspot/src/share/vm/memory/compactingPermGenGen.cpp 2009-08-01 04:12:02.776010672 +0100 +++ new/hotspot/src/share/vm/memory/compactingPermGenGen.cpp 2009-08-01 04:12:02.700433848 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)compactingPermGenGen.cpp 1.22 08/11/24 12:22:45 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,9 +52,9 @@ // to prevent visiting any object twice. class RecursiveAdjustSharedObjectClosure : public OopClosure { -public: - void do_oop(oop* o) { - oop obj = *o; + protected: + template inline void do_oop_work(T* p) { + oop obj = oopDesc::load_decode_heap_oop_not_null(p); if (obj->is_shared_readwrite()) { if (obj->mark()->is_marked()) { obj->init_mark(); // Don't revisit this object. @@ -74,7 +74,10 @@ } } } - }; + } + public: + virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); } }; @@ -89,9 +92,9 @@ // as doing so can cause hash codes to be computed, destroying // forwarding pointers. class TraversePlaceholdersClosure : public OopClosure { - public: - void do_oop(oop* o) { - oop obj = *o; + protected: + template inline void do_oop_work(T* p) { + oop obj = oopDesc::load_decode_heap_oop_not_null(p); if (obj->klass() == Universe::symbolKlassObj() && obj->is_shared_readonly()) { symbolHandle sym((symbolOop) obj); @@ -102,6 +105,10 @@ } } } + public: + virtual void do_oop(oop* p) { TraversePlaceholdersClosure::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); } + }; @@ -417,28 +424,6 @@ } - -bool CompactingPermGenGen::grow_by(size_t bytes) { - // Don't allow _virtual_size to expand into shared spaces. - size_t max_bytes = _virtual_space.uncommitted_size() - _shared_space_size; - if (bytes > _shared_space_size) { - bytes = _shared_space_size; - } - return OneContigSpaceCardGeneration::grow_by(bytes); -} - - -void CompactingPermGenGen::grow_to_reserved() { - // Don't allow _virtual_size to expand into shared spaces. - if (_virtual_space.uncommitted_size() > _shared_space_size) { - size_t remaining_bytes = - _virtual_space.uncommitted_size() - _shared_space_size; - bool success = OneContigSpaceCardGeneration::grow_by(remaining_bytes); - DEBUG_ONLY(if (!success) warning("grow to reserved failed");) - } -} - - // No young generation references, clear this generation's main space's // card table entries. Do NOT clear the card table entries for the // read-only space (always clear) or the read-write space (valuable --- old/hotspot/src/share/vm/memory/compactingPermGenGen.hpp 2009-08-01 04:12:03.631463832 +0100 +++ new/hotspot/src/share/vm/memory/compactingPermGenGen.hpp 2009-08-01 04:12:03.562731237 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)compactingPermGenGen.hpp 1.22 07/08/31 18:41:29 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,7 +103,7 @@ enum { vtbl_list_size = 16, // number of entries in the shared space vtable list. - num_virtuals = 100 // number of virtual methods in Klass (or + num_virtuals = 200 // number of virtual methods in Klass (or // subclass) objects, or greater. }; @@ -186,8 +186,6 @@ void compact(); void post_compact(); size_t contiguous_available() const; - bool grow_by(size_t bytes); - void grow_to_reserved(); void clear_remembered_set(); void invalidate_remembered_set(); --- old/hotspot/src/share/vm/memory/defNewGeneration.cpp 2009-08-01 04:12:04.444262909 +0100 +++ new/hotspot/src/share/vm/memory/defNewGeneration.cpp 2009-08-01 04:12:04.359211125 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)defNewGeneration.cpp 1.73 07/05/22 17:24:57 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,31 +50,9 @@ _rs = (CardTableRS*)rs; } -void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { - // We never expect to see a null reference being processed - // as a weak reference. - assert (*p != NULL, "expected non-null ref"); - assert ((*p)->is_oop(), "expected an oop while scanning weak refs"); - - _cl->do_oop_nv(p); - - // Card marking is trickier for weak refs. - // This oop is a 'next' field which was filled in while we - // were discovering weak references. While we might not need - // to take a special action to keep this reference alive, we - // will need to dirty a card as the field was modified. - // - // Alternatively, we could create a method which iterates through - // each generation, allowing them in turn to examine the modified - // field. - // - // We could check that p is also in an older generation, but - // dirty cards in the youngest gen are never scanned, so the - // extra check probably isn't worthwhile. - if (Universe::heap()->is_in_reserved(p)) { - _rs->inline_write_ref_field_gc(p, *p); - } -} +void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } +void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } + DefNewGeneration::FastKeepAliveClosure:: FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : @@ -82,19 +60,8 @@ _boundary = g->reserved().end(); } -void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { - assert (*p != NULL, "expected non-null ref"); - assert ((*p)->is_oop(), "expected an oop while scanning weak refs"); - - _cl->do_oop_nv(p); - - // Optimized for Defnew generation if it's the youngest generation: - // we set a younger_gen card if we have an older->youngest - // generation pointer. - if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) { - _rs->inline_write_ref_field_gc(p, *p); - } -} +void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } +void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } DefNewGeneration::EvacuateFollowersClosure:: EvacuateFollowersClosure(GenCollectedHeap* gch, int level, @@ -135,13 +102,19 @@ _boundary = _g->reserved().end(); } -FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : +void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } +void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } + +FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) { assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } +void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } +void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } + ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : OopClosure(g->ref_processor()), _g(g) { @@ -149,6 +122,11 @@ _boundary = _g->reserved().end(); } +void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } +void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } + +void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } +void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, @@ -197,15 +175,25 @@ _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); - compute_space_boundaries(0); + compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; } -void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) { - uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); +void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, + bool clear_space, + bool mangle_space) { + uintx alignment = + GenCollectedHeap::heap()->collector_policy()->min_alignment(); + + // If the spaces are being cleared (only done at heap initialization + // currently), the survivor spaces need not be empty. + // Otherwise, no care is taken for used areas in the survivor spaces + // so check. + assert(clear_space || (to()->is_empty() && from()->is_empty()), + "Initialization of the survivor spaces assumes these are empty"); // Compute sizes uintx size = _virtual_space.committed_size(); @@ -239,16 +227,41 @@ MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); - eden()->initialize(edenMR, (minimum_eden_size == 0)); - // If minumum_eden_size != 0, we will not have cleared any + // A minimum eden size implies that there is a part of eden that + // is being used and that affects the initialization of any + // newly formed eden. + bool live_in_eden = minimum_eden_size > 0; + + // If not clearing the spaces, do some checking to verify that + // the space are already mangled. + if (!clear_space) { + // Must check mangling before the spaces are reshaped. Otherwise, + // the bottom or end of one space may have moved into another + // a failure of the check may not correctly indicate which space + // is not properly mangled. + if (ZapUnusedHeapArea) { + HeapWord* limit = (HeapWord*) _virtual_space.high(); + eden()->check_mangled_unused_area(limit); + from()->check_mangled_unused_area(limit); + to()->check_mangled_unused_area(limit); + } + } + + // Reset the spaces for their new regions. + eden()->initialize(edenMR, + clear_space && !live_in_eden, + SpaceDecorator::Mangle); + // If clear_space and live_in_eden, we will not have cleared any // portion of eden above its top. This can cause newly // expanded space not to be mangled if using ZapUnusedHeapArea. // We explicitly do such mangling here. - if (ZapUnusedHeapArea && (minimum_eden_size != 0)) { + if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { eden()->mangle_unused_area(); } - from()->initialize(fromMR, true); - to()->initialize(toMR , true); + from()->initialize(fromMR, clear_space, mangle_space); + to()->initialize(toMR, clear_space, mangle_space); + + // Set next compaction spaces. eden()->set_next_compaction_space(from()); // The to-space is normally empty before a compaction so need // not be considered. The exception is during promotion @@ -275,7 +288,16 @@ bool DefNewGeneration::expand(size_t bytes) { MutexLocker x(ExpandHeap_lock); + HeapWord* prev_high = (HeapWord*) _virtual_space.high(); bool success = _virtual_space.expand_by(bytes); + if (success && ZapUnusedHeapArea) { + // Mangle newly committed space immediately because it + // can be done here more simply that after the new + // spaces have been computed. + HeapWord* new_high = (HeapWord*) _virtual_space.high(); + MemRegion mangle_region(prev_high, new_high); + SpaceMangler::mangle_region(mangle_region); + } // Do not attempt an expand-to-the reserve size. The // request should properly observe the maximum size of @@ -287,7 +309,8 @@ // value. if (GC_locker::is_active()) { if (PrintGC && Verbose) { - gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); + gclog_or_tty->print_cr("Garbage collection disabled, " + "expanded heap instead"); } } @@ -351,16 +374,24 @@ changed = true; } if (changed) { - compute_space_boundaries(eden()->used()); - MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); + // The spaces have already been mangled at this point but + // may not have been cleared (set top = bottom) and should be. + // Mangling was done when the heap was being expanded. + compute_space_boundaries(eden()->used(), + SpaceDecorator::Clear, + SpaceDecorator::DontMangle); + MemRegion cmr((HeapWord*)_virtual_space.low(), + (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); if (Verbose && PrintGC) { size_t new_size_after = _virtual_space.committed_size(); size_t eden_size_after = eden()->capacity(); size_t survivor_size_after = from()->capacity(); - gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" - SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", - new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); + gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" + SIZE_FORMAT "K [eden=" + SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", + new_size_before/K, new_size_after/K, + eden_size_after/K, survivor_size_after/K); if (WizardMode) { gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", thread_increase_size/K, threads_count); @@ -505,21 +536,13 @@ ScanWeakRefClosure scan_weak_ref(this); age_table()->clear(); - to()->clear(); + to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); - // Weak refs. - // FIXME: Are these storage leaks, or are they resource objects? -#ifdef COMPILER2 - ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); -#else - ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); @@ -546,12 +569,24 @@ evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); - ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL); + ReferenceProcessor* rp = ref_processor(); + rp->setup_policy(clear_all_soft_refs); + rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, + NULL); if (!promotion_failed()) { // Swap the survivor spaces. - eden()->clear(); - from()->clear(); + eden()->clear(SpaceDecorator::Mangle); + from()->clear(SpaceDecorator::Mangle); + if (ZapUnusedHeapArea) { + // This is now done here because of the piece-meal mangling which + // can check for valid mangling at intermediate points in the + // collection(s). When a minor collection fails to collect + // sufficient space resizing of the young generation can occur + // an redistribute the spaces in the young generation. Mangle + // here so that unzapped regions don't get distributed to + // other spaces. + to()->mangle_unused_area(); + } swap_spaces(); assert(to()->is_empty(), "to space should be empty now"); @@ -659,7 +694,7 @@ } } -oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) { +oop DefNewGeneration::copy_to_survivor_space(oop old) { assert(is_in_reserved(old) && !old->is_forwarded(), "shouldn't be scavenging this oop"); size_t s = old->size(); @@ -672,7 +707,7 @@ // Otherwise try allocating obj tenured if (obj == NULL) { - obj = _next_gen->promote(old, s, from); + obj = _next_gen->promote(old, s); if (obj == NULL) { if (!HandlePromotionFailure) { // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag @@ -778,6 +813,15 @@ } } +void DefNewGeneration::reset_scratch() { + // If contributing scratch in to_space, mangle all of + // to_space if ZapUnusedHeapArea. This is needed because + // top is not maintained while using to-space as scratch. + if (ZapUnusedHeapArea) { + to()->mangle_unused_area_complete(); + } +} + bool DefNewGeneration::collection_attempt_is_safe() { if (!to()->is_empty()) { return false; @@ -830,12 +874,26 @@ set_should_allocate_from_space(); } } - + + if (ZapUnusedHeapArea) { + eden()->check_mangled_unused_area_complete(); + from()->check_mangled_unused_area_complete(); + to()->check_mangled_unused_area_complete(); + } + // update the generation and space performance counters update_counters(); gch->collector_policy()->counters()->update_counters(); } +void DefNewGeneration::record_spaces_top() { + assert(ZapUnusedHeapArea, "Not mangling unused space"); + eden()->set_top_for_allocations(); + to()->set_top_for_allocations(); + from()->set_top_for_allocations(); +} + + void DefNewGeneration::update_counters() { if (UsePerfData) { _eden_counters->update_all(); @@ -865,3 +923,69 @@ const char* DefNewGeneration::name() const { return "def new generation"; } + +// Moved from inline file as they are not called inline +CompactibleSpace* DefNewGeneration::first_compaction_space() const { + return eden(); +} + +HeapWord* DefNewGeneration::allocate(size_t word_size, + bool is_tlab) { + // This is the slow-path allocation for the DefNewGeneration. + // Most allocations are fast-path in compiled code. + // We try to allocate from the eden. If that works, we are happy. + // Note that since DefNewGeneration supports lock-free allocation, we + // have to use it here, as well. + HeapWord* result = eden()->par_allocate(word_size); + if (result != NULL) { + return result; + } + do { + HeapWord* old_limit = eden()->soft_end(); + if (old_limit < eden()->end()) { + // Tell the next generation we reached a limit. + HeapWord* new_limit = + next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); + if (new_limit != NULL) { + Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); + } else { + assert(eden()->soft_end() == eden()->end(), + "invalid state after allocation_limit_reached returned null"); + } + } else { + // The allocation failed and the soft limit is equal to the hard limit, + // there are no reasons to do an attempt to allocate + assert(old_limit == eden()->end(), "sanity check"); + break; + } + // Try to allocate until succeeded or the soft limit can't be adjusted + result = eden()->par_allocate(word_size); + } while (result == NULL); + + // If the eden is full and the last collection bailed out, we are running + // out of heap space, and we try to allocate the from-space, too. + // allocate_from_space can't be inlined because that would introduce a + // circular dependency at compile time. + if (result == NULL) { + result = allocate_from_space(word_size); + } + return result; +} + +HeapWord* DefNewGeneration::par_allocate(size_t word_size, + bool is_tlab) { + return eden()->par_allocate(word_size); +} + +void DefNewGeneration::gc_prologue(bool full) { + // Ensure that _end and _soft_end are the same in eden space. + eden()->set_soft_end(eden()->end()); +} + +size_t DefNewGeneration::tlab_capacity() const { + return eden()->capacity(); +} + +size_t DefNewGeneration::unsafe_max_tlab_alloc() const { + return unsafe_max_alloc_nogc(); +} --- old/hotspot/src/share/vm/memory/defNewGeneration.hpp 2009-08-01 04:12:05.411089694 +0100 +++ new/hotspot/src/share/vm/memory/defNewGeneration.hpp 2009-08-01 04:12:05.320845893 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)defNewGeneration.hpp 1.40 07/05/17 15:54:44 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ class EdenSpace; class ContiguousSpace; +class ScanClosure; // DefNewGeneration is a young generation containing eden, from- and // to-space. @@ -158,17 +159,21 @@ protected: ScanWeakRefClosure* _cl; CardTableRS* _rs; + template void do_oop_work(T* p); public: KeepAliveClosure(ScanWeakRefClosure* cl); - void do_oop(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; class FastKeepAliveClosure: public KeepAliveClosure { protected: HeapWord* _boundary; + template void do_oop_work(T* p); public: FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl); - void do_oop(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; class EvacuateFollowersClosure: public VoidClosure { @@ -209,7 +214,7 @@ ContiguousSpace* from() const { return _from_space; } ContiguousSpace* to() const { return _to_space; } - inline CompactibleSpace* first_compaction_space() const; + virtual CompactibleSpace* first_compaction_space() const; // Space enquiries size_t capacity() const; @@ -229,8 +234,8 @@ // Thread-local allocation buffers bool supports_tlab_allocation() const { return true; } - inline size_t tlab_capacity() const; - inline size_t unsafe_max_tlab_alloc() const; + size_t tlab_capacity() const; + size_t unsafe_max_tlab_alloc() const; // Grow the generation by the specified number of bytes. // The size of bytes is assumed to be properly aligned. @@ -268,15 +273,18 @@ return result; } - inline HeapWord* allocate(size_t word_size, bool is_tlab); + HeapWord* allocate(size_t word_size, bool is_tlab); HeapWord* allocate_from_space(size_t word_size); - inline HeapWord* par_allocate(size_t word_size, bool is_tlab); + HeapWord* par_allocate(size_t word_size, bool is_tlab); // Prologue & Epilogue - inline virtual void gc_prologue(bool full); + virtual void gc_prologue(bool full); virtual void gc_epilogue(bool full); + // Save the tops for eden, from, and to + virtual void record_spaces_top(); + // Doesn't require additional work during GC prologue and epilogue virtual bool performs_in_place_marking() const { return false; } @@ -297,8 +305,11 @@ // For non-youngest collection, the DefNewGeneration can contribute // "to-space". - void contribute_scratch(ScratchBlock*& list, Generation* requestor, - size_t max_alloc_words); + virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, + size_t max_alloc_words); + + // Reset for contribution of "to-space". + virtual void reset_scratch(); // GC support virtual void compute_new_size(); @@ -310,7 +321,7 @@ bool is_tlab, bool parallel = false); - oop copy_to_survivor_space(oop old, oop* from); + oop copy_to_survivor_space(oop old); int tenuring_threshold() { return _tenuring_threshold; } // Performance Counter support @@ -329,7 +340,12 @@ void verify(bool allow_dirty); protected: - void compute_space_boundaries(uintx minimum_eden_size); + // If clear_space is true, clear the survivor spaces. Eden is + // cleared if the minimum size of eden is 0. If mangle_space + // is true, also mangle the space in debug mode. + void compute_space_boundaries(uintx minimum_eden_size, + bool clear_space, + bool mangle_space); // Scavenge support void swap_spaces(); }; --- old/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp 2009-08-01 04:12:06.329094651 +0100 +++ new/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp 2009-08-01 04:12:06.257101986 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)defNewGeneration.inline.hpp 1.18 07/05/05 17:05:46 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,67 +25,60 @@ * */ -CompactibleSpace* DefNewGeneration::first_compaction_space() const { - return eden(); -} +// Methods of protected closure types -HeapWord* DefNewGeneration::allocate(size_t word_size, - bool is_tlab) { - // This is the slow-path allocation for the DefNewGeneration. - // Most allocations are fast-path in compiled code. - // We try to allocate from the eden. If that works, we are happy. - // Note that since DefNewGeneration supports lock-free allocation, we - // have to use it here, as well. - HeapWord* result = eden()->par_allocate(word_size); - if (result != NULL) { - return result; - } - do { - HeapWord* old_limit = eden()->soft_end(); - if (old_limit < eden()->end()) { - // Tell the next generation we reached a limit. - HeapWord* new_limit = - next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); - if (new_limit != NULL) { - Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); - } else { - assert(eden()->soft_end() == eden()->end(), - "invalid state after allocation_limit_reached returned null"); - } - } else { - // The allocation failed and the soft limit is equal to the hard limit, - // there are no reasons to do an attempt to allocate - assert(old_limit == eden()->end(), "sanity check"); - break; - } - // Try to allocate until succeeded or the soft limit can't be adjusted - result = eden()->par_allocate(word_size); - } while (result == NULL); - - // If the eden is full and the last collection bailed out, we are running - // out of heap space, and we try to allocate the from-space, too. - // allocate_from_space can't be inlined because that would introduce a - // circular dependency at compile time. - if (result == NULL) { - result = allocate_from_space(word_size); +template +inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) { +#ifdef ASSERT + { + // We never expect to see a null reference being processed + // as a weak reference. + assert (!oopDesc::is_null(*p), "expected non-null ref"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + assert (obj->is_oop(), "expected an oop while scanning weak refs"); } - return result; -} +#endif // ASSERT -HeapWord* DefNewGeneration::par_allocate(size_t word_size, - bool is_tlab) { - return eden()->par_allocate(word_size); -} + _cl->do_oop_nv(p); -void DefNewGeneration::gc_prologue(bool full) { - // Ensure that _end and _soft_end are the same in eden space. - eden()->set_soft_end(eden()->end()); + // Card marking is trickier for weak refs. + // This oop is a 'next' field which was filled in while we + // were discovering weak references. While we might not need + // to take a special action to keep this reference alive, we + // will need to dirty a card as the field was modified. + // + // Alternatively, we could create a method which iterates through + // each generation, allowing them in turn to examine the modified + // field. + // + // We could check that p is also in an older generation, but + // dirty cards in the youngest gen are never scanned, so the + // extra check probably isn't worthwhile. + if (Universe::heap()->is_in_reserved(p)) { + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + _rs->inline_write_ref_field_gc(p, obj); + } } -size_t DefNewGeneration::tlab_capacity() const { - return eden()->capacity(); -} +template +inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) { +#ifdef ASSERT + { + // We never expect to see a null reference being processed + // as a weak reference. + assert (!oopDesc::is_null(*p), "expected non-null ref"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + assert (obj->is_oop(), "expected an oop while scanning weak refs"); + } +#endif // ASSERT -size_t DefNewGeneration::unsafe_max_tlab_alloc() const { - return unsafe_max_alloc_nogc(); + _cl->do_oop_nv(p); + + // Optimized for Defnew generation if it's the youngest generation: + // we set a younger_gen card if we have an older->youngest + // generation pointer. + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) { + _rs->inline_write_ref_field_gc(p, obj); + } } --- old/hotspot/src/share/vm/memory/dump.cpp 2009-08-01 04:12:07.228341287 +0100 +++ new/hotspot/src/share/vm/memory/dump.cpp 2009-08-01 04:12:07.140019979 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)dump.cpp 1.33 07/05/23 10:53:38 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,9 +63,9 @@ hash_offset = java_lang_String::hash_offset_in_bytes(); } - void do_oop(oop* pobj) { - if (pobj != NULL) { - oop obj = *pobj; + void do_oop(oop* p) { + if (p != NULL) { + oop obj = *p; if (obj->klass() == SystemDictionary::string_klass()) { int hash; @@ -82,6 +82,7 @@ } } } + void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; @@ -124,9 +125,8 @@ class MarkObjectsOopClosure : public OopClosure { public: - void do_oop(oop* pobj) { - mark_object(*pobj); - } + void do_oop(oop* p) { mark_object(*p); } + void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; @@ -139,6 +139,7 @@ mark_object(obj); } } + void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } }; @@ -557,6 +558,7 @@ } } } + void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } }; @@ -646,7 +648,7 @@ class ClearSpaceClosure : public SpaceClosure { public: void do_space(Space* s) { - s->clear(); + s->clear(SpaceDecorator::Mangle); } }; @@ -693,6 +695,8 @@ ++top; } + void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } + void do_int(int* p) { check_space(); *top = (oop)(intptr_t)*p; @@ -817,6 +821,40 @@ // across the space while doing this, as that causes the vtables to be // patched, undoing our useful work. Instead, iterate to make a list, // then use the list to do the fixing. +// +// Our constructed vtables: +// Dump time: +// 1. init_self_patching_vtbl_list: table of pointers to current virtual method addrs +// 2. generate_vtable_methods: create jump table, appended to above vtbl_list +// 3. PatchKlassVtables: for Klass list, patch the vtable entry to point to jump table +// rather than to current vtbl +// Table layout: NOTE FIXED SIZE +// 1. vtbl pointers +// 2. #Klass X #virtual methods per Klass +// 1 entry for each, in the order: +// Klass1:method1 entry, Klass1:method2 entry, ... Klass1:method entry +// Klass2:method1 entry, Klass2:method2 entry, ... Klass2:method entry +// ... +// Klass:method1 entry, Klass:method2 entry, +// ... Klass:method entry +// Sample entry: (Sparc): +// save(sp, -256, sp) +// ba,pt common_code +// mov XXX, %L0 %L0 gets: Klass index <<8 + method index (note: max method index 255) +// +// Restore time: +// 1. initialize_oops: reserve space for table +// 2. init_self_patching_vtbl_list: update pointers to NEW virtual method addrs in text +// +// Execution time: +// First virtual method call for any object of these Klass types: +// 1. object->klass->klass_part +// 2. vtable entry for that klass_part points to the jump table entries +// 3. branches to common_code with %O0/klass_part, %L0: Klass index <<8 + method index +// 4. common_code: +// Get address of new vtbl pointer for this Klass from updated table +// Update new vtbl pointer in the Klass: future virtual calls go direct +// Jump to method, using new vtbl pointer and method index class PatchKlassVtables: public ObjectClosure { private: @@ -1198,11 +1236,13 @@ _ro_space->set_saved_mark(); mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); _rw_space->set_saved_mark(); - mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), - md_top - _md_vs->low(), SharedMiscDataSize, + mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), + pointer_delta(md_top, _md_vs->low(), sizeof(char)), + SharedMiscDataSize, false, false); - mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), - mc_top - _mc_vs->low(), SharedMiscCodeSize, + mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), + pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), + SharedMiscCodeSize, true, true); // Pass 2 - write data. @@ -1210,11 +1250,13 @@ mapinfo->write_header(); mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); - mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), - md_top - _md_vs->low(), SharedMiscDataSize, + mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), + pointer_delta(md_top, _md_vs->low(), sizeof(char)), + SharedMiscDataSize, false, false); - mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), - mc_top - _mc_vs->low(), SharedMiscCodeSize, + mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), + pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), + SharedMiscCodeSize, true, true); mapinfo->close(); --- old/hotspot/src/share/vm/memory/gcLocker.cpp 2009-08-01 04:12:08.138530825 +0100 +++ new/hotspot/src/share/vm/memory/gcLocker.cpp 2009-08-01 04:12:08.064368545 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)gcLocker.cpp 1.52 07/05/17 15:54:45 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,12 @@ void GC_locker::stall_until_clear() { assert(!JavaThread::current()->in_critical(), "Would deadlock"); + if (PrintJNIGCStalls && PrintGCDetails) { + ResourceMark rm; // JavaThread::name() allocates to convert to UTF8 + gclog_or_tty->print_cr( + "Allocation failed. Thread \"%s\" is stalled by JNI critical section.", + JavaThread::current()->name()); + } MutexLocker ml(JNICritical_lock); // Wait for _needs_gc to be cleared while (GC_locker::needs_gc()) { --- old/hotspot/src/share/vm/memory/gcLocker.hpp 2009-08-01 04:12:08.911353534 +0100 +++ new/hotspot/src/share/vm/memory/gcLocker.hpp 2009-08-01 04:12:08.842635061 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)gcLocker.hpp 1.60 07/05/17 15:54:47 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -187,7 +187,9 @@ Thread *_thread; public: #ifdef ASSERT - No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) : No_GC_Verifier(verifygc) { + No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) : + No_GC_Verifier(verifygc), + _activated(activated) { _thread = Thread::current(); if (_activated) { _thread->_allow_allocation_count++; --- old/hotspot/src/share/vm/memory/genCollectedHeap.cpp 2009-08-01 04:12:09.751080974 +0100 +++ new/hotspot/src/share/vm/memory/genCollectedHeap.cpp 2009-08-01 04:12:09.674178932 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)genCollectedHeap.cpp 1.190 07/06/15 16:44:02 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -225,9 +225,9 @@ *_total_reserved = total_reserved; *_n_covered_regions = n_covered_regions; - *heap_rs = ReservedSpace(total_reserved, alignment, - UseLargePages, heap_address); - + *heap_rs = ReservedHeapSpace(total_reserved, alignment, + UseLargePages, heap_address); + return heap_address; } @@ -468,6 +468,11 @@ _gens[i]->stat_record()->invocations++; _gens[i]->stat_record()->accumulated_time.start(); + // Must be done anew before each collection because + // a previous collection will do mangling and will + // change top of some spaces. + record_gen_tops_before_GC(); + if (PrintGC && Verbose) { gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, i, @@ -523,8 +528,9 @@ if (rp->discovery_is_atomic()) { rp->verify_no_references_recorded(); rp->enable_discovery(); + rp->setup_policy(clear_all_soft_refs); } else { - // collect() will enable discovery as appropriate + // collect() below will enable discovery as appropriate } _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { @@ -627,6 +633,7 @@ void do_oop(oop* p) { assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); } + void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; static AssertIsPermClosure assert_is_perm_closure; @@ -1060,6 +1067,12 @@ return res; } +void GenCollectedHeap::release_scratch() { + for (int i = 0; i < _n_gens; i++) { + _gens[i]->reset_scratch(); + } +} + size_t GenCollectedHeap::large_typearray_limit() { return gen_policy()->large_typearray_limit(); } @@ -1287,6 +1300,24 @@ always_do_update_barrier = UseConcMarkSweepGC; }; +#ifndef PRODUCT +class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { + private: + public: + void do_generation(Generation* gen) { + gen->record_spaces_top(); + } +}; + +void GenCollectedHeap::record_gen_tops_before_GC() { + if (ZapUnusedHeapArea) { + GenGCSaveTopsBeforeGCClosure blk; + generation_iterate(&blk, false); // not old-to-young. + perm_gen()->record_spaces_top(); + } +} +#endif // not PRODUCT + class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { public: void do_generation(Generation* gen) { @@ -1302,9 +1333,8 @@ } oop GenCollectedHeap::handle_failed_promotion(Generation* gen, - oop obj, - size_t obj_size, - oop* ref) { + oop obj, + size_t obj_size) { assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL; --- old/hotspot/src/share/vm/memory/genCollectedHeap.hpp 2009-08-01 04:12:10.636073236 +0100 +++ new/hotspot/src/share/vm/memory/genCollectedHeap.hpp 2009-08-01 04:12:10.566872036 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)genCollectedHeap.hpp 1.106 07/07/22 22:36:34 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,7 @@ friend class CMSCollector; friend class GenMarkSweep; friend class VM_GenCollectForAllocation; + friend class VM_GenCollectForPermanentAllocation; friend class VM_GenCollectFull; friend class VM_GenCollectFullConcurrent; friend class VM_GC_HeapInspection; @@ -254,13 +255,31 @@ virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; virtual HeapWord* allocate_new_tlab(size_t size); - // The "requestor" generation is performing some garbage collection + // Can a compiler initialize a new object without store barriers? + // This permission only extends from the creation of a new object + // via a TLAB up to the first subsequent safepoint. + virtual bool can_elide_tlab_store_barriers() const { + return true; + } + + // Can a compiler elide a store barrier when it writes + // a permanent oop into the heap? Applies when the compiler + // is storing x to the heap, where x->is_perm() is true. + virtual bool can_elide_permanent_oop_store_barriers() const { + // CMS needs to see all, even intra-generational, ref updates. + return !UseConcMarkSweepGC; + } + + // The "requestor" generation is performing some garbage collection // action for which it would be useful to have scratch space. The // requestor promises to allocate no more than "max_alloc_words" in any // older generation (via promotion say.) Any blocks of space that can // be provided are returned as a list of ScratchBlocks, sorted by // decreasing size. ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); + // Allow each generation to reset any scratch space that it has + // contributed as it needs. + void release_scratch(); size_t large_typearray_limit(); @@ -454,9 +473,8 @@ // Otherwise, try expand-and-allocate for obj in each generation starting at // gen; return the new location of obj if successful. Otherwise, return NULL. oop handle_failed_promotion(Generation* gen, - oop obj, - size_t obj_size, - oop* ref); + oop obj, + size_t obj_size); private: // Accessor for memory state verification support @@ -485,6 +503,9 @@ bool should_do_concurrent_full_gc(GCCause::Cause cause); void collect_mostly_concurrent(GCCause::Cause cause); + // Save the tops of the spaces in all generations + void record_gen_tops_before_GC() PRODUCT_RETURN; + protected: virtual void gc_prologue(bool full); virtual void gc_epilogue(bool full); --- old/hotspot/src/share/vm/memory/genMarkSweep.cpp 2009-08-01 04:12:11.507673628 +0100 +++ new/hotspot/src/share/vm/memory/genMarkSweep.cpp 2009-08-01 04:12:11.424212915 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)genMarkSweep.cpp 1.40 07/05/17 15:54:55 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,9 @@ // hook up weak ref data so it can be used during Mark-Sweep assert(ref_processor() == NULL, "no stomping"); - _ref_processor = rp; assert(rp != NULL, "should be non-NULL"); + _ref_processor = rp; + rp->setup_policy(clear_all_softrefs); TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); @@ -76,8 +77,7 @@ VALIDATE_MARK_SWEEP_ONLY( if (ValidateMarkSweep) { - guarantee(_root_refs_stack->length() == 0, - "should be empty by now"); + guarantee(_root_refs_stack->length() == 0, "should be empty by now"); } ) @@ -168,9 +168,9 @@ #ifdef VALIDATE_MARK_SWEEP if (ValidateMarkSweep) { - _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true); - _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true); - _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray(100, true); + _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true); + _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true); + _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray(100, true); _live_oops = new (ResourceObj::C_HEAP) GrowableArray(100, true); _live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray(100, true); _live_oops_size = new (ResourceObj::C_HEAP) GrowableArray(100, true); @@ -194,6 +194,12 @@ void GenMarkSweep::deallocate_stacks() { + + if (!UseG1GC) { + GenCollectedHeap* gch = GenCollectedHeap::heap(); + gch->release_scratch(); + } + if (_preserved_oop_stack) { delete _preserved_mark_stack; _preserved_mark_stack = NULL; @@ -243,20 +249,9 @@ // Process reference objects found during marking { - ReferencePolicy *soft_ref_policy; - if (clear_all_softrefs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } - assert(soft_ref_policy != NULL,"No soft reference policy"); + ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, - &follow_stack_closure, NULL); + &is_alive, &keep_alive, &follow_stack_closure, NULL); } // Follow system dictionary roots and unload classes --- old/hotspot/src/share/vm/memory/genMarkSweep.hpp 2009-08-01 04:12:12.425771668 +0100 +++ new/hotspot/src/share/vm/memory/genMarkSweep.hpp 2009-08-01 04:12:12.354629993 +0100 @@ -27,6 +27,7 @@ class GenMarkSweep : public MarkSweep { friend class VM_MarkSweep; + friend class G1MarkSweep; public: static void invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs); --- old/hotspot/src/share/vm/memory/genOopClosures.hpp 2009-08-01 04:12:13.271304262 +0100 +++ new/hotspot/src/share/vm/memory/genOopClosures.hpp 2009-08-01 04:12:13.193326783 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)genOopClosures.hpp 1.64 07/05/29 09:44:15 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,11 @@ class CardTableModRefBS; class DefNewGeneration; +template class GenericTaskQueue; +typedef GenericTaskQueue OopTaskQueue; +template class GenericTaskQueueSet; +typedef GenericTaskQueueSet OopTaskQueueSet; + // Closure for iterating roots from a particular generation // Note: all classes deriving from this MUST call this do_barrier // method at the end of their own do_oop method! @@ -38,13 +43,13 @@ class OopsInGenClosure : public OopClosure { private: - Generation* _orig_gen; // generation originally set in ctor - Generation* _gen; // generation being scanned + Generation* _orig_gen; // generation originally set in ctor + Generation* _gen; // generation being scanned protected: // Some subtypes need access. - HeapWord* _gen_boundary; // start of generation - CardTableRS* _rs; // remembered set + HeapWord* _gen_boundary; // start of generation + CardTableRS* _rs; // remembered set // For assertions Generation* generation() { return _gen; } @@ -52,7 +57,10 @@ // Derived classes that modify oops so that they might be old-to-young // pointers must call the method below. - void do_barrier(oop* p); + template void do_barrier(T* p); + + // Version for use by closures that may be called in parallel code. + void par_do_barrier(oop* p); public: OopsInGenClosure() : OopClosure(NULL), @@ -78,14 +86,17 @@ // This closure will perform barrier store calls for ALL // pointers in scanned oops. class ScanClosure: public OopsInGenClosure { -protected: + protected: DefNewGeneration* _g; - HeapWord* _boundary; - bool _gc_barrier; -public: + HeapWord* _boundary; + bool _gc_barrier; + template inline void do_oop_work(T* p); + public: ScanClosure(DefNewGeneration* g, bool gc_barrier); - void do_oop(oop* p); - void do_oop_nv(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p); bool do_header() { return false; } Prefetch::style prefetch_style() { return Prefetch::do_write; @@ -98,14 +109,17 @@ // pointers into the DefNewGeneration. This is less // precise, but faster, than a ScanClosure class FastScanClosure: public OopsInGenClosure { -protected: + protected: DefNewGeneration* _g; - HeapWord* _boundary; - bool _gc_barrier; -public: + HeapWord* _boundary; + bool _gc_barrier; + template inline void do_oop_work(T* p); + public: FastScanClosure(DefNewGeneration* g, bool gc_barrier); - void do_oop(oop* p); - void do_oop_nv(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p); bool do_header() { return false; } Prefetch::style prefetch_style() { return Prefetch::do_write; @@ -113,19 +127,27 @@ }; class FilteringClosure: public OopClosure { - HeapWord* _boundary; + private: + HeapWord* _boundary; OopClosure* _cl; -public: + protected: + template inline void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if ((HeapWord*)obj < _boundary) { + _cl->do_oop(p); + } + } + } + public: FilteringClosure(HeapWord* boundary, OopClosure* cl) : OopClosure(cl->_ref_processor), _boundary(boundary), _cl(cl) {} - void do_oop(oop* p); - void do_oop_nv(oop* p) { - oop obj = *p; - if ((HeapWord*)obj < _boundary && obj != NULL) { - _cl->do_oop(p); - } - } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); } + inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); } bool do_header() { return false; } }; @@ -134,19 +156,26 @@ // OopsInGenClosure -- weak references are processed all // at once, with no notion of which generation they were in. class ScanWeakRefClosure: public OopClosure { -protected: - DefNewGeneration* _g; - HeapWord* _boundary; -public: + protected: + DefNewGeneration* _g; + HeapWord* _boundary; + template inline void do_oop_work(T* p); + public: ScanWeakRefClosure(DefNewGeneration* g); - void do_oop(oop* p); - void do_oop_nv(oop* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p); }; class VerifyOopClosure: public OopClosure { -public: - void do_oop(oop* p) { - guarantee((*p)->is_oop_or_null(), "invalid oop"); + protected: + template inline void do_oop_work(T* p) { + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(obj->is_oop_or_null(), "invalid oop"); } + public: + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); static VerifyOopClosure verify_oop; }; --- old/hotspot/src/share/vm/memory/genOopClosures.inline.hpp 2009-08-01 04:12:14.131808656 +0100 +++ new/hotspot/src/share/vm/memory/genOopClosures.inline.hpp 2009-08-01 04:12:14.047835227 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)genOopClosures.inline.hpp 1.40 07/05/29 09:44:15 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,29 +41,38 @@ } } -inline void OopsInGenClosure::do_barrier(oop* p) { +template inline void OopsInGenClosure::do_barrier(T* p) { assert(generation()->is_in_reserved(p), "expected ref in generation"); - oop obj = *p; - assert(obj != NULL, "expected non-null object"); + assert(!oopDesc::is_null(*p), "expected non-null object"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); // If p points to a younger generation, mark the card. if ((HeapWord*)obj < _gen_boundary) { _rs->inline_write_ref_field_gc(p, obj); } } -// NOTE! Any changes made here should also be made -// in FastScanClosure::do_oop(); -inline void ScanClosure::do_oop(oop* p) { +inline void OopsInGenClosure::par_do_barrier(oop* p) { + assert(generation()->is_in_reserved(p), "expected ref in generation"); oop obj = *p; + assert(obj != NULL, "expected non-null object"); + // If p points to a younger generation, mark the card. + if ((HeapWord*)obj < gen_boundary()) { + rs()->write_ref_field_gc_par(p, obj); + } +} + +// NOTE! Any changes made here should also be made +// in FastScanClosure::do_oop_work() +template inline void ScanClosure::do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); // Should we copy the obj? - if (obj != NULL) { + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); - if (obj->is_forwarded()) { - *p = obj->forwardee(); - } else { - *p = _g->copy_to_survivor_space(obj, p); - } + oop new_obj = obj->is_forwarded() ? obj->forwardee() + : _g->copy_to_survivor_space(obj); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); } if (_gc_barrier) { // Now call parent closure @@ -72,23 +81,21 @@ } } -inline void ScanClosure::do_oop_nv(oop* p) { - ScanClosure::do_oop(p); -} +inline void ScanClosure::do_oop_nv(oop* p) { ScanClosure::do_oop_work(p); } +inline void ScanClosure::do_oop_nv(narrowOop* p) { ScanClosure::do_oop_work(p); } // NOTE! Any changes made here should also be made -// in ScanClosure::do_oop(); -inline void FastScanClosure::do_oop(oop* p) { - oop obj = *p; +// in ScanClosure::do_oop_work() +template inline void FastScanClosure::do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); // Should we copy the obj? - if (obj != NULL) { + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); - if (obj->is_forwarded()) { - *p = obj->forwardee(); - } else { - *p = _g->copy_to_survivor_space(obj, p); - } + oop new_obj = obj->is_forwarded() ? obj->forwardee() + : _g->copy_to_survivor_space(obj); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); if (_gc_barrier) { // Now call parent closure do_barrier(p); @@ -97,26 +104,22 @@ } } -inline void FastScanClosure::do_oop_nv(oop* p) { - FastScanClosure::do_oop(p); -} +inline void FastScanClosure::do_oop_nv(oop* p) { FastScanClosure::do_oop_work(p); } +inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); } // Note similarity to ScanClosure; the difference is that // the barrier set is taken care of outside this closure. -inline void ScanWeakRefClosure::do_oop(oop* p) { - oop obj = *p; - assert (obj != NULL, "null weak reference?"); +template inline void ScanWeakRefClosure::do_oop_work(T* p) { + assert(!oopDesc::is_null(*p), "null weak reference?"); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); // weak references are sometimes scanned twice; must check // that to-space doesn't already contain this object if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) { - if (obj->is_forwarded()) { - *p = obj->forwardee(); - } else { - *p = _g->copy_to_survivor_space(obj, p); - } + oop new_obj = obj->is_forwarded() ? obj->forwardee() + : _g->copy_to_survivor_space(obj); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); } } -inline void ScanWeakRefClosure::do_oop_nv(oop* p) { - ScanWeakRefClosure::do_oop(p); -} +inline void ScanWeakRefClosure::do_oop_nv(oop* p) { ScanWeakRefClosure::do_oop_work(p); } +inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } --- old/hotspot/src/share/vm/memory/genRemSet.hpp 2009-08-01 04:12:14.986694090 +0100 +++ new/hotspot/src/share/vm/memory/genRemSet.hpp 2009-08-01 04:12:14.908661561 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)genRemSet.hpp 1.23 07/05/05 17:05:50 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,7 @@ }; GenRemSet(BarrierSet * bs) : _bs(bs) {} + GenRemSet() : _bs(NULL) {} virtual Name rs_kind() = 0; @@ -56,6 +57,9 @@ // Return the barrier set associated with "this." BarrierSet* bs() { return _bs; } + // Set the barrier set. + void set_bs(BarrierSet* bs) { _bs = bs; } + // Do any (sequential) processing necessary to prepare for (possibly // "parallel", if that arg is true) calls to younger_refs_iterate. virtual void prepare_for_younger_refs_iterate(bool parallel) = 0; @@ -71,13 +75,13 @@ // This method is used to notify the remembered set that "new_val" has // been written into "field" by the garbage collector. - void write_ref_field_gc(oop* field, oop new_val); + void write_ref_field_gc(void* field, oop new_val); protected: - virtual void write_ref_field_gc_work(oop* field, oop new_val) = 0; + virtual void write_ref_field_gc_work(void* field, oop new_val) = 0; public: // A version of the above suitable for use by parallel collectors. - virtual void write_ref_field_gc_par(oop* field, oop new_val) = 0; + virtual void write_ref_field_gc_par(void* field, oop new_val) = 0; // Resize one of the regions covered by the remembered set. virtual void resize_covered_region(MemRegion new_region) = 0; @@ -94,9 +98,16 @@ virtual void verify() = 0; // Verify that the remembered set has no entries for - // the heap interval denoted by mr. - virtual void verify_empty(MemRegion mr) = 0; - + // the heap interval denoted by mr. If there are any + // alignment constraints on the remembered set, only the + // part of the region that is aligned is checked. + // + // alignment boundaries + // +--------+-------+--------+-------+ + // [ region mr ) + // [ part checked ) + virtual void verify_aligned_region_empty(MemRegion mr) = 0; + // If appropriate, print some information about the remset on "tty". virtual void print() {} @@ -112,8 +123,11 @@ // Informs the RS that refs in the given "mr" may have changed // arbitrarily, and therefore may contain old-to-young pointers. - virtual void invalidate(MemRegion mr) = 0; - + // If "whole heap" is true, then this invalidation is part of an + // invalidation of the whole heap, which an implementation might + // handle differently than that of a sub-part of the heap. + virtual void invalidate(MemRegion mr, bool whole_heap = false) = 0; + // Informs the RS that refs in this generation // may have changed arbitrarily, and therefore may contain // old-to-young pointers in arbitrary locations. The parameter --- old/hotspot/src/share/vm/memory/genRemSet.inline.hpp 2009-08-01 04:12:15.815274231 +0100 +++ new/hotspot/src/share/vm/memory/genRemSet.inline.hpp 2009-08-01 04:12:15.746644189 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)genRemSet.inline.hpp 1.10 07/05/05 17:05:50 JVM" #endif /* - * Copyright 2001 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ // performance-critical call when when the rem set is the most common // card-table kind. -void GenRemSet::write_ref_field_gc(oop* field, oop new_val) { +void GenRemSet::write_ref_field_gc(void* field, oop new_val) { if (kind() == CardTableModRef) { ((CardTableRS*)this)->inline_write_ref_field_gc(field, new_val); } else { --- old/hotspot/src/share/vm/memory/generation.cpp 2009-08-01 04:12:16.658907705 +0100 +++ new/hotspot/src/share/vm/memory/generation.cpp 2009-08-01 04:12:16.574961014 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)generation.cpp 1.245 07/05/05 17:05:51 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,12 @@ vm_exit_during_initialization("Could not reserve enough space for " "object heap"); } + // Mangle all of the the initial generation. + if (ZapUnusedHeapArea) { + MemRegion mangle_region((HeapWord*)_virtual_space.low(), + (HeapWord*)_virtual_space.high()); + SpaceMangler::mangle_region(mangle_region); + } _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary()); } @@ -174,7 +180,7 @@ } // Ignores "ref" and calls allocate(). -oop Generation::promote(oop obj, size_t obj_size, oop* ref) { +oop Generation::promote(oop obj, size_t obj_size) { assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); #ifndef PRODUCT @@ -189,7 +195,7 @@ return oop(result); } else { GenCollectedHeap* gch = GenCollectedHeap::heap(); - return gch->handle_failed_promotion(this, obj, obj_size, ref); + return gch->handle_failed_promotion(this, obj, obj_size); } } @@ -376,6 +382,41 @@ } } +bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { + assert_locked_or_safepoint(Heap_lock); + if (bytes == 0) { + return true; // That's what grow_by(0) would return + } + size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); + if (aligned_bytes == 0){ + // The alignment caused the number of bytes to wrap. An expand_by(0) will + // return true with the implication that an expansion was done when it + // was not. A call to expand implies a best effort to expand by "bytes" + // but not a guarantee. Align down to give a best effort. This is likely + // the most that the generation can expand since it has some capacity to + // start with. + aligned_bytes = ReservedSpace::page_align_size_down(bytes); + } + size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); + bool success = false; + if (aligned_expand_bytes > aligned_bytes) { + success = grow_by(aligned_expand_bytes); + } + if (!success) { + success = grow_by(aligned_bytes); + } + if (!success) { + success = grow_to_reserved(); + } + if (PrintGC && Verbose) { + if (success && GC_locker::is_active()) { + gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); + } + } + + return success; +} + // No young generation references, clear this generation's cards. void CardGeneration::clear_remembered_set() { @@ -438,25 +479,9 @@ } } -void OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { +bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { GCMutexLocker x(ExpandHeap_lock); - size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); - size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); - bool success = false; - if (aligned_expand_bytes > aligned_bytes) { - success = grow_by(aligned_expand_bytes); - } - if (!success) { - success = grow_by(aligned_bytes); - } - if (!success) { - grow_to_reserved(); - } - if (GC_locker::is_active()) { - if (PrintGC && Verbose) { - gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); - } - } + return CardGeneration::expand(bytes, expand_bytes); } @@ -508,8 +533,11 @@ _bts->resize(new_word_size); // Fix for bug #4668531 - MemRegion mangle_region(_the_space->end(), (HeapWord*)_virtual_space.high()); - _the_space->mangle_region(mangle_region); + if (ZapUnusedHeapArea) { + MemRegion mangle_region(_the_space->end(), + (HeapWord*)_virtual_space.high()); + SpaceMangler::mangle_region(mangle_region); + } // Expand space -- also expands space's BOT // (which uses (part of) shared array above) @@ -625,6 +653,14 @@ // update the generation and space performance counters update_counters(); + if (ZapUnusedHeapArea) { + the_space()->check_mangled_unused_area_complete(); + } +} + +void OneContigSpaceCardGeneration::record_spaces_top() { + assert(ZapUnusedHeapArea, "Not mangling unused space"); + the_space()->set_top_for_allocations(); } void OneContigSpaceCardGeneration::verify(bool allow_dirty) { --- old/hotspot/src/share/vm/memory/generation.hpp 2009-08-01 04:12:17.525858986 +0100 +++ new/hotspot/src/share/vm/memory/generation.hpp 2009-08-01 04:12:17.436262759 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)generation.hpp 1.195 07/05/17 15:55:02 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -298,13 +298,7 @@ // // The "obj_size" argument is just obj->size(), passed along so the caller can // avoid repeating the virtual call to retrieve it. - // - // The "ref" argument, if non-NULL, is the address of some reference to "obj" - // (that is "*ref == obj"); some generations may use this information to, for - // example, influence placement decisions. - // - // The default implementation ignores "ref" and calls allocate(). - virtual oop promote(oop obj, size_t obj_size, oop* ref); + virtual oop promote(oop obj, size_t obj_size); // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote // object "obj", whose original mark word was "m", and whose size is @@ -385,6 +379,9 @@ // The default is to do nothing. virtual void gc_epilogue(bool full) {}; + // Save the high water marks for the used space in a generation. + virtual void record_spaces_top() {}; + // Some generations may need to be "fixed-up" after some allocation // activity to make them parsable again. The default is to do nothing. virtual void ensure_parsability() {}; @@ -485,6 +482,10 @@ virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, size_t max_alloc_words) {} + // Give each generation an opportunity to do clean up for any + // contributed scratch. + virtual void reset_scratch() {}; + // When an older generation has been collected, and perhaps resized, // this method will be invoked on all younger generations (from older to // younger), allowing them to resize themselves as appropriate. @@ -608,11 +609,21 @@ public: + // Attempt to expand the generation by "bytes". Expand by at a + // minimum "expand_bytes". Return true if some amount (not + // necessarily the full "bytes") was done. + virtual bool expand(size_t bytes, size_t expand_bytes); + virtual void clear_remembered_set(); virtual void invalidate_remembered_set(); virtual void prepare_for_verify(); + + // Grow generation with specified size (returns false if unable to grow) + virtual bool grow_by(size_t bytes) = 0; + // Grow generation to reserved size. + virtual bool grow_to_reserved() = 0; }; // OneContigSpaceCardGeneration models a heap of old objects contained in a single @@ -633,14 +644,14 @@ // and after last GC. // Grow generation with specified size (returns false if unable to grow) - bool grow_by(size_t bytes); + virtual bool grow_by(size_t bytes); // Grow generation to reserved size. - bool grow_to_reserved(); + virtual bool grow_to_reserved(); // Shrink generation with specified size (returns false if unable to shrink) void shrink_by(size_t bytes); // Allocation failure - void expand(size_t bytes, size_t expand_bytes); + virtual bool expand(size_t bytes, size_t expand_bytes); void shrink(size_t bytes); // Accessing spaces @@ -708,6 +719,8 @@ virtual void gc_epilogue(bool full); + virtual void record_spaces_top(); + virtual void verify(bool allow_dirty); virtual void print_on(outputStream* st) const; }; --- old/hotspot/src/share/vm/memory/heap.cpp 2009-08-01 04:12:18.451811773 +0100 +++ new/hotspot/src/share/vm/memory/heap.cpp 2009-08-01 04:12:18.374765178 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)heap.cpp 1.55 07/10/04 10:49:31 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,8 +105,9 @@ _log2_segment_size = exact_log2(segment_size); // Reserve and initialize space for _memory. - const size_t page_size = os::page_size_for_region(committed_size, - reserved_size, 8); + const size_t page_size = os::can_execute_large_page_memory() ? + os::page_size_for_region(committed_size, reserved_size, 8) : + os::vm_page_size(); const size_t granularity = os::vm_allocation_granularity(); const size_t r_align = MAX2(page_size, granularity); const size_t r_size = align_size_up(reserved_size, r_align); @@ -114,7 +115,7 @@ const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : MAX2(page_size, granularity); - ReservedSpace rs(r_size, rs_align, false); + ReservedSpace rs(r_size, rs_align, rs_align > 0); os::trace_page_sizes("code heap", committed_size, reserved_size, page_size, rs.base(), rs.size()); if (!_memory.initialize(rs, c_size)) { --- old/hotspot/src/share/vm/memory/heapInspection.cpp 2009-08-01 04:12:19.271080761 +0100 +++ new/hotspot/src/share/vm/memory/heapInspection.cpp 2009-08-01 04:12:19.202235571 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)heapInspection.cpp 1.21 07/05/29 09:44:16 JVM" #endif /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,7 +68,7 @@ name = ""; } // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit - st->print_cr("%13" FORMAT64_MODIFIER "d %13" FORMAT64_MODIFIER "u %s", + st->print_cr(INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13) " %s", (jlong) _instance_count, (julong) _instance_words * HeapWordSize, name); @@ -83,7 +83,10 @@ elt = elt->next(); } elt = new KlassInfoEntry(k, list()); - set_list(elt); + // We may be out of space to allocate the new entry. + if (elt != NULL) { + set_list(elt); + } return elt; } @@ -106,21 +109,25 @@ } KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) { - _size = size; + _size = 0; _ref = ref; - _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, _size); - - for (int index = 0; index < _size; index++) { - _buckets[index].initialize(); + _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size); + if (_buckets != NULL) { + _size = size; + for (int index = 0; index < _size; index++) { + _buckets[index].initialize(); + } } } KlassInfoTable::~KlassInfoTable() { - for (int index = 0; index < _size; index++) { - _buckets[index].empty(); + if (_buckets != NULL) { + for (int index = 0; index < _size; index++) { + _buckets[index].empty(); + } + FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets); + _size = 0; } - FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets); - _size = 0; } uint KlassInfoTable::hash(klassOop p) { @@ -130,19 +137,32 @@ KlassInfoEntry* KlassInfoTable::lookup(const klassOop k) { uint idx = hash(k) % _size; + assert(_buckets != NULL, "Allocation failure should have been caught"); KlassInfoEntry* e = _buckets[idx].lookup(k); - assert(k == e->klass(), "must be equal"); + // Lookup may fail if this is a new klass for which we + // could not allocate space for an new entry. + assert(e == NULL || k == e->klass(), "must be equal"); return e; } -void KlassInfoTable::record_instance(const oop obj) { +// Return false if the entry could not be recorded on account +// of running out of space required to create a new entry. +bool KlassInfoTable::record_instance(const oop obj) { klassOop k = obj->klass(); KlassInfoEntry* elt = lookup(k); - elt->set_count(elt->count() + 1); - elt->set_words(elt->words() + obj->size()); + // elt may be NULL if it's a new klass for which we + // could not allocate space for a new entry in the hashtable. + if (elt != NULL) { + elt->set_count(elt->count() + 1); + elt->set_words(elt->words() + obj->size()); + return true; + } else { + return false; + } } void KlassInfoTable::iterate(KlassInfoClosure* cic) { + assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught"); for (int index = 0; index < _size; index++) { _buckets[index].iterate(cic); } @@ -179,7 +199,7 @@ total += elements()->at(i)->count(); totalw += elements()->at(i)->words(); } - st->print_cr("Total %13" FORMAT64_MODIFIER "d %13" FORMAT64_MODIFIER "u", + st->print_cr("Total " INT64_FORMAT_W(13) " " UINT64_FORMAT_W(13), total, totalw * HeapWordSize); } @@ -202,24 +222,33 @@ class RecordInstanceClosure : public ObjectClosure { private: KlassInfoTable* _cit; + size_t _missed_count; public: - RecordInstanceClosure(KlassInfoTable* cit) : _cit(cit) {} + RecordInstanceClosure(KlassInfoTable* cit) : + _cit(cit), _missed_count(0) {} void do_object(oop obj) { - _cit->record_instance(obj); + if (!_cit->record_instance(obj)) { + _missed_count++; + } } -}; + + size_t missed_count() { return _missed_count; } +}; void HeapInspection::heap_inspection(outputStream* st) { ResourceMark rm; HeapWord* ref; CollectedHeap* heap = Universe::heap(); + bool is_shared_heap = false; switch (heap->kind()) { + case CollectedHeap::G1CollectedHeap: case CollectedHeap::GenCollectedHeap: { - GenCollectedHeap* gch = (GenCollectedHeap*)heap; - gch->gc_prologue(false /* !full */); // get any necessary locks - ref = gch->perm_gen()->used_region().start(); + is_shared_heap = true; + SharedHeap* sh = (SharedHeap*)heap; + sh->gc_prologue(false /* !full */); // get any necessary locks, etc. + ref = sh->perm_gen()->used_region().start(); break; } #ifndef SERIALGC @@ -233,26 +262,37 @@ ShouldNotReachHere(); // Unexpected heap kind for this op } // Collect klass instance info - - // Iterate over objects in the heap KlassInfoTable cit(KlassInfoTable::cit_size, ref); - RecordInstanceClosure ric(&cit); - Universe::heap()->object_iterate(&ric); - - // Sort and print klass instance info - KlassInfoHisto histo("\n" - " num #instances #bytes class name\n" - "----------------------------------------------", - KlassInfoHisto::histo_initial_size); - HistoClosure hc(&histo); - cit.iterate(&hc); - histo.sort(); - histo.print_on(st); + if (!cit.allocation_failed()) { + // Iterate over objects in the heap + RecordInstanceClosure ric(&cit); + Universe::heap()->object_iterate(&ric); + + // Report if certain classes are not counted because of + // running out of C-heap for the histogram. + size_t missed_count = ric.missed_count(); + if (missed_count != 0) { + st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT + " total instances in data below", + missed_count); + } + // Sort and print klass instance info + KlassInfoHisto histo("\n" + " num #instances #bytes class name\n" + "----------------------------------------------", + KlassInfoHisto::histo_initial_size); + HistoClosure hc(&histo); + cit.iterate(&hc); + histo.sort(); + histo.print_on(st); + } else { + st->print_cr("WARNING: Ran out of C-heap; histogram not generated"); + } st->flush(); - if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - gch->gc_epilogue(false /* !full */); // release all acquired locks + if (is_shared_heap) { + SharedHeap* sh = (SharedHeap*)heap; + sh->gc_epilogue(false /* !full */); // release all acquired locks, etc. } } --- old/hotspot/src/share/vm/memory/heapInspection.hpp 2009-08-01 04:12:20.153884571 +0100 +++ new/hotspot/src/share/vm/memory/heapInspection.hpp 2009-08-01 04:12:20.072939086 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)heapInspection.hpp 1.17 07/07/02 11:47:11 JVM" #endif /* - * Copyright 2002-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,8 +101,9 @@ }; KlassInfoTable(int size, HeapWord* ref); ~KlassInfoTable(); - void record_instance(const oop obj); + bool record_instance(const oop obj); void iterate(KlassInfoClosure* cic); + bool allocation_failed() { return _buckets == NULL; } }; class KlassInfoHisto : public StackObj { --- old/hotspot/src/share/vm/memory/iterator.hpp 2009-08-01 04:12:20.989685751 +0100 +++ new/hotspot/src/share/vm/memory/iterator.hpp 2009-08-01 04:12:20.908304667 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)iterator.hpp 1.38 07/05/05 17:05:52 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,15 +29,31 @@ class ReferenceProcessor; +// Closure provides abortability. + +class Closure : public StackObj { + protected: + bool _abort; + void set_abort() { _abort = true; } + public: + Closure() : _abort(false) {} + // A subtype can use this mechanism to indicate to some iterator mapping + // functions that the iteration should cease. + bool abort() { return _abort; } + void clear_abort() { _abort = false; } +}; + // OopClosure is used for iterating through roots (oop*) -class OopClosure : public StackObj { +class OopClosure : public Closure { public: ReferenceProcessor* _ref_processor; OopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { } OopClosure() : _ref_processor(NULL) { } virtual void do_oop(oop* o) = 0; virtual void do_oop_v(oop* o) { do_oop(o); } + virtual void do_oop(narrowOop* o) = 0; + virtual void do_oop_v(narrowOop* o) { do_oop(o); } // In support of post-processing of weak links of KlassKlass objects; // see KlassKlass::oop_oop_iterate(). @@ -55,12 +71,17 @@ // Controls how prefetching is done for invocations of this closure. Prefetch::style prefetch_style() { // Note that this is non-virtual. return Prefetch::do_none; - } + } + + // True iff this closure may be safely applied more than once to an oop + // location without an intervening "major reset" (like the end of a GC). + virtual bool idempotent() { return false; } + virtual bool apply_to_weak_ref_discovered_field() { return false; } }; // ObjectClosure is used for iterating through an object space -class ObjectClosure : public StackObj { +class ObjectClosure : public Closure { public: // Called for each object. virtual void do_object(oop obj) = 0; --- old/hotspot/src/share/vm/memory/modRefBarrierSet.hpp 2009-08-01 04:12:21.817332860 +0100 +++ new/hotspot/src/share/vm/memory/modRefBarrierSet.hpp 2009-08-01 04:12:21.735880469 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)modRefBarrierSet.hpp 1.16 07/05/05 17:05:53 JVM" #endif /* - * Copyright 2000-2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,25 +34,32 @@ class ModRefBarrierSet: public BarrierSet { public: + + ModRefBarrierSet() { _kind = BarrierSet::ModRef; } + + bool is_a(BarrierSet::Name bsn) { + return bsn == BarrierSet::ModRef; + } + // Barriers only on ref writes. bool has_read_ref_barrier() { return false; } bool has_read_prim_barrier() { return false; } bool has_write_ref_barrier() { return true; } bool has_write_prim_barrier() { return false; } - - bool read_ref_needs_barrier(oop* field) { return false; } + + bool read_ref_needs_barrier(void* field) { return false; } bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; } - virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0; + virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0; bool write_prim_needs_barrier(HeapWord* field, size_t bytes, juint val1, juint val2) { return false; } void write_prim_field(oop obj, size_t offset, size_t bytes, juint val1, juint val2) {} - void read_ref_field(oop* field) {} + void read_ref_field(void* field) {} void read_prim_field(HeapWord* field, size_t bytes) {} protected: - virtual void write_ref_field_work(oop* field, oop new_val) = 0; + virtual void write_ref_field_work(void* field, oop new_val) = 0; public: void write_prim_field(HeapWord* field, size_t bytes, juint val1, juint val2) {} @@ -88,8 +95,10 @@ bool clear = false, bool before_save_marks = false) = 0; - // Causes all refs in "mr" to be assumed to be modified. - virtual void invalidate(MemRegion mr) = 0; + // Causes all refs in "mr" to be assumed to be modified. If "whole_heap" + // is true, the caller asserts that the entire heap is being invalidated, + // which may admit an optimized implementation for some barriers. + virtual void invalidate(MemRegion mr, bool whole_heap = false) = 0; // The caller guarantees that "mr" contains no references. (Perhaps it's // objects have been moved elsewhere.) --- old/hotspot/src/share/vm/memory/permGen.cpp 2009-08-01 04:12:22.749480306 +0100 +++ new/hotspot/src/share/vm/memory/permGen.cpp 2009-08-01 04:12:22.664420460 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)permGen.cpp 1.54 07/05/29 09:44:16 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,70 @@ #include "incls/_precompiled.incl" #include "incls/_permGen.cpp.incl" +HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { + GCCause::Cause next_cause = GCCause::_permanent_generation_full; + GCCause::Cause prev_cause = GCCause::_no_gc; + unsigned int gc_count_before, full_gc_count_before; + HeapWord* obj; + + for (;;) { + { + MutexLocker ml(Heap_lock); + if ((obj = gen->allocate(size, false)) != NULL) { + return obj; + } + if (gen->capacity() < _capacity_expansion_limit || + prev_cause != GCCause::_no_gc) { + obj = gen->expand_and_allocate(size, false); + } + if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) { + return obj; + } + if (GC_locker::is_active_and_needs_gc()) { + // If this thread is not in a jni critical section, we stall + // the requestor until the critical section has cleared and + // GC allowed. When the critical section clears, a GC is + // initiated by the last thread exiting the critical section; so + // we retry the allocation sequence from the beginning of the loop, + // rather than causing more, now probably unnecessary, GC attempts. + JavaThread* jthr = JavaThread::current(); + if (!jthr->in_critical()) { + MutexUnlocker mul(Heap_lock); + // Wait for JNI critical section to be exited + GC_locker::stall_until_clear(); + continue; + } else { + if (CheckJNICalls) { + fatal("Possible deadlock due to allocating while" + " in jni critical section"); + } + return NULL; + } + } + // Read the GC count while holding the Heap_lock + gc_count_before = SharedHeap::heap()->total_collections(); + full_gc_count_before = SharedHeap::heap()->total_full_collections(); + } + + // Give up heap lock above, VMThread::execute below gets it back + VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before, + next_cause); + VMThread::execute(&op); + if (!op.prologue_succeeded() || op.gc_locked()) { + assert(op.result() == NULL, "must be NULL if gc_locked() is true"); + continue; // retry and/or stall as necessary + } + obj = op.result(); + assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj), + "result not in heap"); + if (obj != NULL) { + return obj; + } + prev_cause = next_cause; + next_cause = GCCause::_last_ditch_collection; + } +} + CompactingPermGen::CompactingPermGen(ReservedSpace rs, ReservedSpace shared_rs, size_t initial_byte_size, @@ -47,40 +111,7 @@ } HeapWord* CompactingPermGen::mem_allocate(size_t size) { - MutexLocker ml(Heap_lock); - HeapWord* obj = _gen->allocate(size, false); - bool tried_collection = false; - bool tried_expansion = false; - while (obj == NULL) { - if (_gen->capacity() >= _capacity_expansion_limit || tried_expansion) { - // Expansion limit reached, try collection before expanding further - // For now we force a full collection, this could be changed - SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full); - obj = _gen->allocate(size, false); - tried_collection = true; - tried_expansion = false; // ... following the collection: - // the collection may have shrunk the space. - } - if (obj == NULL && !tried_expansion) { - obj = _gen->expand_and_allocate(size, false); - tried_expansion = true; - } - if (obj == NULL && tried_collection && tried_expansion) { - // We have not been able to allocate despite a collection and - // an attempted space expansion. We now make a last-ditch collection - // attempt that will try to reclaim as much space as possible (for - // example by aggressively clearing all soft refs). - SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection); - obj = _gen->allocate(size, false); - if (obj == NULL) { - // An expansion attempt is necessary since the previous - // collection may have shrunk the space. - obj = _gen->expand_and_allocate(size, false); - } - break; - } - } - return obj; + return mem_allocate_in_gen(size, _gen); } void CompactingPermGen::compute_new_size() { --- old/hotspot/src/share/vm/memory/permGen.hpp 2009-08-01 04:12:23.643591235 +0100 +++ new/hotspot/src/share/vm/memory/permGen.hpp 2009-08-01 04:12:23.571997727 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)permGen.hpp 1.38 07/05/29 09:44:16 JVM" #endif /* - * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +41,8 @@ size_t _capacity_expansion_limit; // maximum expansion allowed without a // full gc occuring + HeapWord* mem_allocate_in_gen(size_t size, Generation* gen); + public: enum Name { MarkSweepCompact, MarkSweep, ConcurrentMarkSweep --- old/hotspot/src/share/vm/memory/referencePolicy.cpp 2009-08-01 04:12:24.462043901 +0100 +++ new/hotspot/src/share/vm/memory/referencePolicy.cpp 2009-08-01 04:12:24.393645395 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)referencePolicy.cpp 1.12 07/05/05 17:05:54 JVM" #endif /* - * Copyright 2000-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,11 @@ # include "incls/_referencePolicy.cpp.incl" LRUCurrentHeapPolicy::LRUCurrentHeapPolicy() { + setup(); +} + +// Capture state (of-the-VM) information needed to evaluate the policy +void LRUCurrentHeapPolicy::setup() { _max_interval = (Universe::get_heap_free_at_last_gc() / M) * SoftRefLRUPolicyMSPerMB; assert(_max_interval >= 0,"Sanity check"); } @@ -50,6 +55,11 @@ /////////////////////// MaxHeap ////////////////////// LRUMaxHeapPolicy::LRUMaxHeapPolicy() { + setup(); +} + +// Capture state (of-the-VM) information needed to evaluate the policy +void LRUMaxHeapPolicy::setup() { size_t max_heap = MaxHeapSize; max_heap -= Universe::get_heap_used_at_last_gc(); max_heap /= M; --- old/hotspot/src/share/vm/memory/referencePolicy.hpp 2009-08-01 04:12:25.246797309 +0100 +++ new/hotspot/src/share/vm/memory/referencePolicy.hpp 2009-08-01 04:12:25.178436856 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)referencePolicy.hpp 1.11 07/05/05 17:05:54 JVM" #endif /* - * Copyright 2000 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,9 +29,11 @@ // should be cleared. -class ReferencePolicy : public ResourceObj { +class ReferencePolicy : public CHeapObj { public: virtual bool should_clear_reference(oop p) { ShouldNotReachHere(); return true; } + // Capture state (of-the-VM) information needed to evaluate the policy + virtual void setup() { /* do nothing */ } }; class NeverClearPolicy : public ReferencePolicy { @@ -51,6 +53,8 @@ public: LRUCurrentHeapPolicy(); + // Capture state (of-the-VM) information needed to evaluate the policy + void setup(); bool should_clear_reference(oop p); }; @@ -61,5 +65,7 @@ public: LRUMaxHeapPolicy(); + // Capture state (of-the-VM) information needed to evaluate the policy + void setup(); bool should_clear_reference(oop p); }; --- old/hotspot/src/share/vm/memory/referenceProcessor.cpp 2009-08-01 04:12:26.059868028 +0100 +++ new/hotspot/src/share/vm/memory/referenceProcessor.cpp 2009-08-01 04:12:25.974951709 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)referenceProcessor.cpp 1.57 07/08/17 12:30:18 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,35 +28,54 @@ # include "incls/_precompiled.incl" # include "incls/_referenceProcessor.cpp.incl" +ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; +ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; +oop ReferenceProcessor::_sentinelRef = NULL; +const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; + // List of discovered references. class DiscoveredList { public: - DiscoveredList() : _head(NULL), _len(0) { } - oop head() const { return _head; } - oop* head_ptr() { return &_head; } - void set_head(oop o) { _head = o; } - bool empty() const { return _head == ReferenceProcessor::_sentinelRef; } + DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } + oop head() const { + return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) : + _oop_head; + } + HeapWord* adr_head() { + return UseCompressedOops ? (HeapWord*)&_compressed_head : + (HeapWord*)&_oop_head; + } + void set_head(oop o) { + if (UseCompressedOops) { + // Must compress the head ptr. + _compressed_head = oopDesc::encode_heap_oop_not_null(o); + } else { + _oop_head = o; + } + } + bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } size_t length() { return _len; } - void set_length(size_t len) { _len = len; } + void set_length(size_t len) { _len = len; } + void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } + void dec_length(size_t dec) { _len -= dec; } private: + // Set value depending on UseCompressedOops. This could be a template class + // but then we have to fix all the instantiations and declarations that use this class. + oop _oop_head; + narrowOop _compressed_head; size_t _len; - oop _head; }; - -oop ReferenceProcessor::_sentinelRef = NULL; - -const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; void referenceProcessor_init() { ReferenceProcessor::init_statics(); } void ReferenceProcessor::init_statics() { - assert(_sentinelRef == NULL, "should be initialized precsiely once"); + assert(_sentinelRef == NULL, "should be initialized precisely once"); EXCEPTION_MARK; _sentinelRef = instanceKlass::cast( - SystemDictionary::object_klass())-> - allocate_permanent_instance(THREAD); + SystemDictionary::reference_klass())-> + allocate_permanent_instance(THREAD); // Initialize the master soft ref clock. java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); @@ -67,20 +86,25 @@ } assert(_sentinelRef != NULL && _sentinelRef->is_oop(), "Just constructed it!"); + _always_clear_soft_ref_policy = new AlwaysClearPolicy(); + _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) + NOT_COMPILER2(LRUCurrentHeapPolicy()); + if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { + vm_exit_during_initialization("Could not allocate reference policy object"); + } guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery, "Unrecongnized RefDiscoveryPolicy"); } - -ReferenceProcessor* ReferenceProcessor::create_ref_processor( - MemRegion span, - bool atomic_discovery, - bool mt_discovery, - BoolObjectClosure* is_alive_non_header, - int parallel_gc_threads, - bool mt_processing) -{ +ReferenceProcessor* +ReferenceProcessor::create_ref_processor(MemRegion span, + bool atomic_discovery, + bool mt_discovery, + BoolObjectClosure* is_alive_non_header, + int parallel_gc_threads, + bool mt_processing, + bool dl_needs_barrier) { int mt_degree = 1; if (parallel_gc_threads > 1) { mt_degree = parallel_gc_threads; @@ -88,21 +112,27 @@ ReferenceProcessor* rp = new ReferenceProcessor(span, atomic_discovery, mt_discovery, mt_degree, - mt_processing); + mt_processing && (parallel_gc_threads > 0), + dl_needs_barrier); if (rp == NULL) { vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); } rp->set_is_alive_non_header(is_alive_non_header); + rp->setup_policy(false /* default soft ref policy */); return rp; } - ReferenceProcessor::ReferenceProcessor(MemRegion span, - bool atomic_discovery, bool mt_discovery, int mt_degree, - bool mt_processing) : + bool atomic_discovery, + bool mt_discovery, + int mt_degree, + bool mt_processing, + bool discovered_list_needs_barrier) : _discovering_refs(false), _enqueuing_is_done(false), _is_alive_non_header(NULL), + _discovered_list_needs_barrier(discovered_list_needs_barrier), + _bs(NULL), _processing_is_mt(mt_processing), _next_id(0) { @@ -117,12 +147,16 @@ _discoveredWeakRefs = &_discoveredSoftRefs[_num_q]; _discoveredFinalRefs = &_discoveredWeakRefs[_num_q]; _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q]; - assert(_sentinelRef != NULL, "_sentinelRef is NULL"); + assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); // Initialized all entries to _sentinelRef for (int i = 0; i < _num_q * subclasses_of_ref; i++) { - _discoveredSoftRefs[i].set_head(_sentinelRef); + _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } + // If we do barreirs, cache a copy of the barrier set. + if (discovered_list_needs_barrier) { + _bs = Universe::heap()->barrier_set(); + } } #ifndef PRODUCT @@ -137,16 +171,19 @@ void ReferenceProcessor::weak_oops_do(OopClosure* f) { for (int i = 0; i < _num_q * subclasses_of_ref; i++) { - f->do_oop(_discoveredSoftRefs[i].head_ptr()); + if (UseCompressedOops) { + f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); + } else { + f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); + } } } void ReferenceProcessor::oops_do(OopClosure* f) { - f->do_oop(&_sentinelRef); + f->do_oop(adr_sentinel_ref()); } -void ReferenceProcessor::update_soft_ref_master_clock() -{ +void ReferenceProcessor::update_soft_ref_master_clock() { // Update (advance) the soft ref master clock field. This must be done // after processing the soft ref list. jlong now = os::javaTimeMillis(); @@ -167,10 +204,7 @@ // past clock value. } - -void -ReferenceProcessor::process_discovered_references( - ReferencePolicy* policy, +void ReferenceProcessor::process_discovered_references( BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, @@ -185,7 +219,7 @@ // Soft references { TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredSoftRefs, policy, true, + process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, is_alive, keep_alive, complete_gc, task_executor); } @@ -226,15 +260,13 @@ } } - #ifndef PRODUCT // Calculate the number of jni handles. -unsigned int ReferenceProcessor::count_jni_refs() -{ +uint ReferenceProcessor::count_jni_refs() { class AlwaysAliveClosure: public BoolObjectClosure { public: - bool do_object_b(oop obj) { return true; } - void do_object(oop obj) { assert(false, "Don't call"); } + virtual bool do_object_b(oop obj) { return true; } + virtual void do_object(oop obj) { assert(false, "Don't call"); } }; class CountHandleClosure: public OopClosure { @@ -242,9 +274,8 @@ int _count; public: CountHandleClosure(): _count(0) {} - void do_oop(oop* unused) { - _count++; - } + void do_oop(oop* unused) { _count++; } + void do_oop(narrowOop* unused) { ShouldNotReachHere(); } int count() { return _count; } }; CountHandleClosure global_handle_count; @@ -265,36 +296,48 @@ #endif JNIHandles::weak_oops_do(is_alive, keep_alive); // Finally remember to keep sentinel around - keep_alive->do_oop(&_sentinelRef); + keep_alive->do_oop(adr_sentinel_ref()); complete_gc->do_void(); } -bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { - NOT_PRODUCT(verify_ok_to_handle_reflists()); + +template +static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, + AbstractRefProcTaskExecutor* task_executor) { + // Remember old value of pending references list - oop* pending_list_addr = java_lang_ref_Reference::pending_list_addr(); - oop old_pending_list_value = *pending_list_addr; + T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); + T old_pending_list_value = *pending_list_addr; // Enqueue references that are not made active again, and // clear the decks for the next collection (cycle). - enqueue_discovered_reflists(pending_list_addr, task_executor); + ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); // Do the oop-check on pending_list_addr missed in // enqueue_discovered_reflist. We should probably // do a raw oop_check so that future such idempotent // oop_stores relying on the oop-check side-effect // may be elided automatically and safely without // affecting correctness. - oop_store(pending_list_addr, *(pending_list_addr)); + oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); // Stop treating discovered references specially. - disable_discovery(); + ref->disable_discovery(); // Return true if new pending references were added return old_pending_list_value != *pending_list_addr; } +bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { + NOT_PRODUCT(verify_ok_to_handle_reflists()); + if (UseCompressedOops) { + return enqueue_discovered_ref_helper(this, task_executor); + } else { + return enqueue_discovered_ref_helper(this, task_executor); + } +} + void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, - oop* pending_list_addr) { + HeapWord* pending_list_addr) { // Given a list of refs linked through the "discovered" field // (java.lang.ref.Reference.discovered) chain them through the // "next" field (java.lang.ref.Reference.next) and prepend @@ -308,19 +351,19 @@ // the next field and clearing it (except for the last // non-sentinel object which is treated specially to avoid // confusion with an active reference). - while (obj != _sentinelRef) { + while (obj != sentinel_ref()) { assert(obj->is_instanceRef(), "should be reference object"); oop next = java_lang_ref_Reference::discovered(obj); if (TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, - (oopDesc*) obj, (oopDesc*) next); + gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, + obj, next); } - assert(*java_lang_ref_Reference::next_addr(obj) == NULL, - "The reference should not be enqueued"); - if (next == _sentinelRef) { // obj is last + assert(java_lang_ref_Reference::next(obj) == NULL, + "The reference should not be enqueued"); + if (next == sentinel_ref()) { // obj is last // Swap refs_list into pendling_list_addr and // set obj's next to what we read from pending_list_addr. - oop old = (oop)Atomic::xchg_ptr(refs_list.head(), pending_list_addr); + oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); // Need oop_check on pending_list_addr above; // see special oop-check code at the end of // enqueue_discovered_reflists() further below. @@ -344,15 +387,14 @@ public: RefProcEnqueueTask(ReferenceProcessor& ref_processor, DiscoveredList discovered_refs[], - oop* pending_list_addr, - oop sentinel_ref, + HeapWord* pending_list_addr, + oop sentinel_ref, int n_queues) : EnqueueTask(ref_processor, discovered_refs, pending_list_addr, sentinel_ref, n_queues) { } - - virtual void work(unsigned int work_id) - { + + virtual void work(unsigned int work_id) { assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); // Simplest first cut: static partitioning. int index = work_id; @@ -366,18 +408,18 @@ }; // Enqueue references that are not made active again -void ReferenceProcessor::enqueue_discovered_reflists(oop* pending_list_addr, +void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor) { if (_processing_is_mt && task_executor != NULL) { // Parallel code RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, - pending_list_addr, _sentinelRef, _num_q); + pending_list_addr, sentinel_ref(), _num_q); task_executor->execute(tsk); } else { // Serial code: call the parent class's implementation for (int i = 0; i < _num_q * subclasses_of_ref; i++) { enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); - _discoveredSoftRefs[i].set_head(_sentinelRef); + _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } } @@ -391,15 +433,14 @@ BoolObjectClosure* is_alive); // End Of List. - inline bool has_next() const - { return _next != ReferenceProcessor::_sentinelRef; } - + inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } + // Get oop to the Reference object. - inline oop obj() const { return _ref; } + inline oop obj() const { return _ref; } // Get oop to the referent object. - inline oop referent() const { return _referent; } - + inline oop referent() const { return _referent; } + // Returns true if referent is alive. inline bool is_referent_alive() const; @@ -407,43 +448,55 @@ // The "allow_null_referent" argument tells us to allow for the possibility // of a NULL referent in the discovered Reference object. This typically // happens in the case of concurrent collectors that may have done the - // discovery concurrently or interleaved with mutator execution. + // discovery concurrently, or interleaved, with mutator execution. inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); // Move to the next discovered reference. inline void next(); - - // Remove the current reference from the list and move to the next. + + // Remove the current reference from the list inline void remove(); // Make the Reference object active again. inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } // Make the referent alive. - inline void make_referent_alive() { _keep_alive->do_oop(_referent_addr); } - + inline void make_referent_alive() { + if (UseCompressedOops) { + _keep_alive->do_oop((narrowOop*)_referent_addr); + } else { + _keep_alive->do_oop((oop*)_referent_addr); + } + } + // Update the discovered field. - inline void update_discovered() { _keep_alive->do_oop(_prev_next); } - + inline void update_discovered() { + // First _prev_next ref actually points into DiscoveredList (gross). + if (UseCompressedOops) { + _keep_alive->do_oop((narrowOop*)_prev_next); + } else { + _keep_alive->do_oop((oop*)_prev_next); + } + } + // NULL out referent pointer. - inline void clear_referent() { *_referent_addr = NULL; } + inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } // Statistics NOT_PRODUCT( inline size_t processed() const { return _processed; } inline size_t removed() const { return _removed; } ) - -private: + inline void move_to_next(); private: DiscoveredList& _refs_list; - oop* _prev_next; + HeapWord* _prev_next; oop _ref; - oop* _discovered_addr; + HeapWord* _discovered_addr; oop _next; - oop* _referent_addr; + HeapWord* _referent_addr; oop _referent; OopClosure* _keep_alive; BoolObjectClosure* _is_alive; @@ -460,7 +513,7 @@ OopClosure* keep_alive, BoolObjectClosure* is_alive) : _refs_list(refs_list), - _prev_next(refs_list.head_ptr()), + _prev_next(refs_list.adr_head()), _ref(refs_list.head()), #ifdef ASSERT _first_seen(refs_list.head()), @@ -474,19 +527,18 @@ _is_alive(is_alive) { } -inline bool DiscoveredListIterator::is_referent_alive() const -{ +inline bool DiscoveredListIterator::is_referent_alive() const { return _is_alive->do_object_b(_referent); } -inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) -{ +inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); - assert(_discovered_addr && (*_discovered_addr)->is_oop_or_null(), + oop discovered = java_lang_ref_Reference::discovered(_ref); + assert(_discovered_addr && discovered->is_oop_or_null(), "discovered field is bad"); - _next = *_discovered_addr; + _next = discovered; _referent_addr = java_lang_ref_Reference::referent_addr(_ref); - _referent = *_referent_addr; + _referent = java_lang_ref_Reference::referent(_ref); assert(Universe::heap()->is_in_reserved_or_null(_referent), "Wrong oop found in java.lang.Reference object"); assert(allow_null_referent ? @@ -495,32 +547,32 @@ "bad referent"); } -inline void DiscoveredListIterator::next() -{ +inline void DiscoveredListIterator::next() { _prev_next = _discovered_addr; move_to_next(); } -inline void DiscoveredListIterator::remove() -{ +inline void DiscoveredListIterator::remove() { assert(_ref->is_oop(), "Dropping a bad reference"); - // Clear the discovered_addr field so that the object does - // not look like it has been discovered. - *_discovered_addr = NULL; - // Remove Reference object from list. - *_prev_next = _next; + oop_store_raw(_discovered_addr, NULL); + // First _prev_next ref actually points into DiscoveredList (gross). + if (UseCompressedOops) { + // Remove Reference object from list. + oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); + } else { + // Remove Reference object from list. + oopDesc::store_heap_oop((oop*)_prev_next, _next); + } NOT_PRODUCT(_removed++); - move_to_next(); + _refs_list.dec_length(1); } -inline void DiscoveredListIterator::move_to_next() -{ +inline void DiscoveredListIterator::move_to_next() { _ref = _next; assert(_ref != _first_seen, "cyclic ref_list found"); NOT_PRODUCT(_processed++); } - // NOTE: process_phase*() are largely similar, and at a high level // merely iterate over the extant list applying a predicate to // each of its elements and possibly removing that element from the @@ -534,28 +586,29 @@ // referents are not alive, but that should be kept alive for policy reasons. // Keep alive the transitive closure of all such referents. void -ReferenceProcessor::process_phase1(DiscoveredList& refs_list_addr, +ReferenceProcessor::process_phase1(DiscoveredList& refs_list, ReferencePolicy* policy, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc) { assert(policy != NULL, "Must have a non-NULL policy"); - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); // Decide which softly reachable refs should be kept alive. while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { if (TraceReferenceGC) { - gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", - (address)iter.obj(), iter.obj()->blueprint()->internal_name()); + gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", + iter.obj(), iter.obj()->blueprint()->internal_name()); } + // Remove Reference object from list + iter.remove(); // Make the Reference object active again iter.make_active(); // keep the referent around iter.make_referent_alive(); - // Remove Reference object from list - iter.remove(); + iter.move_to_next(); } else { iter.next(); } @@ -573,28 +626,28 @@ // Traverse the list and remove any Refs that are not active, or // whose referents are either alive or NULL. void -ReferenceProcessor::pp2_work(DiscoveredList& refs_list_addr, +ReferenceProcessor::pp2_work(DiscoveredList& refs_list, BoolObjectClosure* is_alive, - OopClosure* keep_alive) -{ + OopClosure* keep_alive) { assert(discovery_is_atomic(), "Error"); - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); - DEBUG_ONLY(oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());) - assert(*next_addr == NULL, "Should not discover inactive Reference"); + DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) + assert(next == NULL, "Should not discover inactive Reference"); if (iter.is_referent_alive()) { if (TraceReferenceGC) { - gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", - (address)iter.obj(), iter.obj()->blueprint()->internal_name()); + gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", + iter.obj(), iter.obj()->blueprint()->internal_name()); } // The referent is reachable after all. + // Remove Reference object from list. + iter.remove(); // Update the referent pointer as necessary: Note that this // should not entail any recursive marking because the // referent must already have been traversed. iter.make_referent_alive(); - // Remove Reference object from list - iter.remove(); + iter.move_to_next(); } else { iter.next(); } @@ -608,25 +661,29 @@ } void -ReferenceProcessor::pp2_work_concurrent_discovery( - DiscoveredList& refs_list_addr, - BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc) -{ +ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc) { assert(!discovery_is_atomic(), "Error"); - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); - oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); + HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); + oop next = java_lang_ref_Reference::next(iter.obj()); if ((iter.referent() == NULL || iter.is_referent_alive() || - *next_addr != NULL)) { - assert((*next_addr)->is_oop_or_null(), "bad next field"); + next != NULL)) { + assert(next->is_oop_or_null(), "bad next field"); // Remove Reference object from list iter.remove(); // Trace the cohorts iter.make_referent_alive(); - keep_alive->do_oop(next_addr); + if (UseCompressedOops) { + keep_alive->do_oop((narrowOop*)next_addr); + } else { + keep_alive->do_oop((oop*)next_addr); + } + iter.move_to_next(); } else { iter.next(); } @@ -642,15 +699,15 @@ } // Traverse the list and process the referents, by either -// either clearing them or keeping them (and their reachable +// clearing them or keeping them (and their reachable // closure) alive. void -ReferenceProcessor::process_phase3(DiscoveredList& refs_list_addr, +ReferenceProcessor::process_phase3(DiscoveredList& refs_list, bool clear_referent, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc) { - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { iter.update_discovered(); iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); @@ -662,9 +719,9 @@ iter.make_referent_alive(); } if (TraceReferenceGC) { - gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", - clear_referent ? "cleared " : "", - (address)iter.obj(), iter.obj()->blueprint()->internal_name()); + gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", + clear_referent ? "cleared " : "", + iter.obj(), iter.obj()->blueprint()->internal_name()); } assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); // If discovery is concurrent, we may have objects with null referents, @@ -682,21 +739,26 @@ } void -ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& ref_list) { - oop obj = ref_list.head(); - while (obj != _sentinelRef) { - oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj); - obj = *discovered_addr; - *discovered_addr = NULL; +ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { + oop obj = refs_list.head(); + while (obj != sentinel_ref()) { + oop discovered = java_lang_ref_Reference::discovered(obj); + java_lang_ref_Reference::set_discovered_raw(obj, NULL); + obj = discovered; } - ref_list.set_head(_sentinelRef); - ref_list.set_length(0); + refs_list.set_head(sentinel_ref()); + refs_list.set_length(0); } -void -ReferenceProcessor::abandon_partial_discovered_list_arr(DiscoveredList refs_lists[]) { - for (int i = 0; i < _num_q; i++) { - abandon_partial_discovered_list(refs_lists[i]); +void ReferenceProcessor::abandon_partial_discovery() { + // loop over the lists + for (int i = 0; i < _num_q * subclasses_of_ref; i++) { + if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { + gclog_or_tty->print_cr( + "\nAbandoning %s discovered list", + list_name(i)); + } + abandon_partial_discovered_list(_discoveredSoftRefs[i]); } } @@ -780,13 +842,13 @@ // find an element to split the list on for (size_t j = 0; j < refs_to_move; ++j) { move_tail = new_head; - new_head = *java_lang_ref_Reference::discovered_addr(new_head); + new_head = java_lang_ref_Reference::discovered(new_head); } java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); ref_lists[to_idx].set_head(move_head); - ref_lists[to_idx].set_length(ref_lists[to_idx].length() + refs_to_move); + ref_lists[to_idx].inc_length(refs_to_move); ref_lists[from_idx].set_head(new_head); - ref_lists[from_idx].set_length(ref_lists[from_idx].length() - refs_to_move); + ref_lists[from_idx].dec_length(refs_to_move); } else { ++to_idx; } @@ -875,30 +937,28 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { assert(!discovery_is_atomic(), "Else why call this method?"); DiscoveredListIterator iter(refs_list, NULL, NULL); - size_t length = refs_list.length(); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); - oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); - assert((*next_addr)->is_oop_or_null(), "bad next field"); + oop next = java_lang_ref_Reference::next(iter.obj()); + assert(next->is_oop_or_null(), "bad next field"); // If referent has been cleared or Reference is not active, // drop it. - if (iter.referent() == NULL || *next_addr != NULL) { + if (iter.referent() == NULL || next != NULL) { debug_only( - if (PrintGCDetails && TraceReferenceGC) { - gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " - INTPTR_FORMAT " with next field: " INTPTR_FORMAT - " and referent: " INTPTR_FORMAT, - (address)iter.obj(), (address)*next_addr, (address)iter.referent()); - } + if (PrintGCDetails && TraceReferenceGC) { + gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " + INTPTR_FORMAT " with next field: " INTPTR_FORMAT + " and referent: " INTPTR_FORMAT, + iter.obj(), next, iter.referent()); + } ) // Remove Reference object from list iter.remove(); - --length; + iter.move_to_next(); } else { iter.next(); } } - refs_list.set_length(length); NOT_PRODUCT( if (PrintGCDetails && TraceReferenceGC) { gclog_or_tty->print( @@ -953,18 +1013,34 @@ return list; } -inline void ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& list, - oop obj, oop* discovered_addr) { +inline void +ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, + oop obj, + HeapWord* discovered_addr) { assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); // First we must make sure this object is only enqueued once. CAS in a non null // discovered_addr. - oop retest = (oop)Atomic::cmpxchg_ptr(list.head(), discovered_addr, NULL); + oop current_head = refs_list.head(); + + // Note: In the case of G1, this pre-barrier is strictly + // not necessary because the only case we are interested in + // here is when *discovered_addr is NULL, so this will expand to + // nothing. As a result, I am just manually eliding this out for G1. + if (_discovered_list_needs_barrier && !UseG1GC) { + _bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR"); + } + oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, + NULL); if (retest == NULL) { // This thread just won the right to enqueue the object. // We have separate lists for enqueueing so no synchronization // is necessary. - list.set_head(obj); - list.set_length(list.length() + 1); + refs_list.set_head(obj); + refs_list.inc_length(1); + if (_discovered_list_needs_barrier) { + _bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR"); + } + } else { // If retest was non NULL, another thread beat us to it: // The reference has already been discovered... @@ -975,7 +1051,6 @@ } } - // We mention two of several possible choices here: // #0: if the reference object is not in the "originating generation" // (or part of the heap being collected, indicated by our "span" @@ -1009,8 +1084,8 @@ return false; } // We only enqueue active references. - oop* next_addr = java_lang_ref_Reference::next_addr(obj); - if (*next_addr != NULL) { + oop next = java_lang_ref_Reference::next(obj); + if (next != NULL) { return false; } @@ -1027,24 +1102,37 @@ // reachable. if (is_alive_non_header() != NULL) { oop referent = java_lang_ref_Reference::referent(obj); - // We'd like to assert the following: - // assert(referent != NULL, "Refs with null referents already filtered"); - // However, since this code may be executed concurrently with - // mutators, which can clear() the referent, it is not - // guaranteed that the referent is non-NULL. + // In the case of non-concurrent discovery, the last + // disjunct below should hold. It may not hold in the + // case of concurrent discovery because mutators may + // concurrently clear() a Reference. + assert(UseConcMarkSweepGC || UseG1GC || referent != NULL, + "Refs with null referents already filtered"); if (is_alive_non_header()->do_object_b(referent)) { return false; // referent is reachable } } + if (rt == REF_SOFT) { + // For soft refs we can decide now if these are not + // current candidates for clearing, in which case we + // can mark through them now, rather than delaying that + // to the reference-processing phase. Since all current + // time-stamp policies advance the soft-ref clock only + // at a major collection cycle, this is always currently + // accurate. + if (!_current_soft_ref_policy->should_clear_reference(obj)) { + return false; + } + } - oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj); - assert(discovered_addr != NULL && (*discovered_addr)->is_oop_or_null(), - "bad discovered field"); - if (*discovered_addr != NULL) { + HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); + const oop discovered = java_lang_ref_Reference::discovered(obj); + assert(discovered->is_oop_or_null(), "bad discovered field"); + if (discovered != NULL) { // The reference has already been discovered... if (TraceReferenceGC) { - gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", - (oopDesc*)obj, obj->blueprint()->internal_name()); + gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", + obj, obj->blueprint()->internal_name()); } if (RefDiscoveryPolicy == ReferentBasedDiscovery) { // assumes that an object is not processed twice; @@ -1058,7 +1146,7 @@ // discovered twice except by concurrent collectors that potentially // trace the same Reference object twice. assert(UseConcMarkSweepGC, - "Only possible with a concurrent collector"); + "Only possible with an incremental-update concurrent collector"); return true; } } @@ -1086,14 +1174,26 @@ return false; // nothing special needs to be done } - // We do a raw store here, the field will be visited later when - // processing the discovered references. if (_discovery_is_mt) { add_to_discovered_list_mt(*list, obj, discovered_addr); } else { - *discovered_addr = list->head(); + // If "_discovered_list_needs_barrier", we do write barriers when + // updating the discovered reference list. Otherwise, we do a raw store + // here: the field will be visited later when processing the discovered + // references. + oop current_head = list->head(); + // As in the case further above, since we are over-writing a NULL + // pre-value, we can safely elide the pre-barrier here for the case of G1. + assert(discovered == NULL, "control point invariant"); + if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 + _bs->write_ref_field_pre((oop*)discovered_addr, current_head); + } + oop_store_raw(discovered_addr, current_head); + if (_discovered_list_needs_barrier) { + _bs->write_ref_field((oop*)discovered_addr, current_head); + } list->set_head(obj); - list->set_length(list->length() + 1); + list->inc_length(1); } // In the MT discovery case, it is currently possible to see @@ -1108,8 +1208,8 @@ if (TraceReferenceGC) { oop referent = java_lang_ref_Reference::referent(obj); if (PrintGCDetails) { - gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", - (oopDesc*) obj, obj->blueprint()->internal_name()); + gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", + obj, obj->blueprint()->internal_name()); } assert(referent->is_oop(), "Enqueued a bad referent"); } @@ -1134,45 +1234,48 @@ TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Weak references { TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Final references { TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Phantom references { TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, keep_alive, complete_gc, yield); } @@ -1181,20 +1284,25 @@ // Walk the given discovered ref list, and remove all reference objects // whose referents are still alive, whose referents are NULL or which -// are not active (have a non-NULL next field). NOTE: For this to work -// correctly, refs discovery can not be happening concurrently with this -// step. -void ReferenceProcessor::preclean_discovered_reflist( - DiscoveredList& refs_list, BoolObjectClosure* is_alive, - OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield) { - +// are not active (have a non-NULL next field). NOTE: When we are +// thus precleaning the ref lists (which happens single-threaded today), +// we do not disable refs discovery to honour the correct semantics of +// java.lang.Reference. As a result, we need to be careful below +// that ref removal steps interleave safely with ref discovery steps +// (in this thread). +void +ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + YieldClosure* yield) { DiscoveredListIterator iter(refs_list, keep_alive, is_alive); - size_t length = refs_list.length(); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); - oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); - if (iter.referent() == NULL || iter.is_referent_alive() || - *next_addr != NULL) { + oop obj = iter.obj(); + oop next = java_lang_ref_Reference::next(obj); + if (iter.referent() == NULL || iter.is_referent_alive() || + next != NULL) { // The referent has been cleared, or is alive, or the Reference is not // active; we need to trace and mark its cohort. if (TraceReferenceGC) { @@ -1203,16 +1311,20 @@ } // Remove Reference object from list iter.remove(); - --length; // Keep alive its cohort. iter.make_referent_alive(); - keep_alive->do_oop(next_addr); + if (UseCompressedOops) { + narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); + keep_alive->do_oop(next_addr); + } else { + oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); + keep_alive->do_oop(next_addr); + } + iter.move_to_next(); } else { iter.next(); } } - refs_list.set_length(length); - // Close the reachable set complete_gc->do_void(); @@ -1244,7 +1356,7 @@ #endif void ReferenceProcessor::verify() { - guarantee(_sentinelRef != NULL && _sentinelRef->is_oop(), "Lost _sentinelRef"); + guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); } #ifndef PRODUCT @@ -1252,12 +1364,12 @@ guarantee(!_discovering_refs, "Discovering refs?"); for (int i = 0; i < _num_q * subclasses_of_ref; i++) { oop obj = _discoveredSoftRefs[i].head(); - while (obj != _sentinelRef) { + while (obj != sentinel_ref()) { oop next = java_lang_ref_Reference::discovered(obj); java_lang_ref_Reference::set_discovered(obj, (oop) NULL); obj = next; } - _discoveredSoftRefs[i].set_head(_sentinelRef); + _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } } --- old/hotspot/src/share/vm/memory/referenceProcessor.hpp 2009-08-01 04:12:27.091141983 +0100 +++ new/hotspot/src/share/vm/memory/referenceProcessor.hpp 2009-08-01 04:12:27.009586266 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)referenceProcessor.hpp 1.43 07/05/05 17:05:54 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ */ // ReferenceProcessor class encapsulates the per-"collector" processing -// of "weak" references for GC. The interface is useful for supporting +// of java.lang.Reference objects for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple // generations that are being independently collected -- possibly // concurrently and/or incrementally. Note, however, that the @@ -48,8 +48,6 @@ class DiscoveredList; class ReferenceProcessor : public CHeapObj { - friend class DiscoveredList; - friend class DiscoveredListIterator; protected: // End of list marker static oop _sentinelRef; @@ -59,7 +57,15 @@ bool _discovery_is_atomic; // if discovery is atomic wrt // other collectors in configuration bool _discovery_is_mt; // true if reference discovery is MT. - bool _enqueuing_is_done; // true if all weak references enqueued + // If true, setting "next" field of a discovered refs list requires + // write barrier(s). (Must be true if used in a collector in which + // elements of a discovered list may be moved during discovery: for + // example, a collector like Garbage-First that moves objects during a + // long-term concurrent marking phase that does weak reference + // discovery.) + bool _discovered_list_needs_barrier; + BarrierSet* _bs; // Cached copy of BarrierSet. + bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. int _next_id; // round-robin counter in @@ -72,17 +78,35 @@ // all collectors but the CMS collector). BoolObjectClosure* _is_alive_non_header; + // Soft ref clearing policies + // . the default policy + static ReferencePolicy* _default_soft_ref_policy; + // . the "clear all" policy + static ReferencePolicy* _always_clear_soft_ref_policy; + // . the current policy below is either one of the above + ReferencePolicy* _current_soft_ref_policy; + // The discovered ref lists themselves - int _num_q; // the MT'ness degree of the queues below - DiscoveredList* _discoveredSoftRefs; // pointer to array of oops + + // The MT'ness degree of the queues below + int _num_q; + // Arrays of lists of oops, one per thread + DiscoveredList* _discoveredSoftRefs; DiscoveredList* _discoveredWeakRefs; DiscoveredList* _discoveredFinalRefs; DiscoveredList* _discoveredPhantomRefs; public: - int num_q() { return _num_q; } + int num_q() { return _num_q; } DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } - static oop* sentinel_ref() { return &_sentinelRef; } + static oop sentinel_ref() { return _sentinelRef; } + static oop* adr_sentinel_ref() { return &_sentinelRef; } + ReferencePolicy* setup_policy(bool always_clear) { + _current_soft_ref_policy = always_clear ? + _always_clear_soft_ref_policy : _default_soft_ref_policy; + _current_soft_ref_policy->setup(); // snapshot the policy threshold + return _current_soft_ref_policy; + } public: // Process references with a certain reachability level. @@ -101,46 +125,46 @@ // Work methods used by the method process_discovered_reflist // Phase1: keep alive all those referents that are otherwise // dead but which must be kept alive by policy (and their closure). - void process_phase1(DiscoveredList& refs_list_addr, + void process_phase1(DiscoveredList& refs_list, ReferencePolicy* policy, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase2: remove all those references whose referents are // reachable. - inline void process_phase2(DiscoveredList& refs_list_addr, + inline void process_phase2(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc) { if (discovery_is_atomic()) { // complete_gc is ignored in this case for this phase - pp2_work(refs_list_addr, is_alive, keep_alive); + pp2_work(refs_list, is_alive, keep_alive); } else { assert(complete_gc != NULL, "Error"); - pp2_work_concurrent_discovery(refs_list_addr, is_alive, + pp2_work_concurrent_discovery(refs_list, is_alive, keep_alive, complete_gc); } } // Work methods in support of process_phase2 - void pp2_work(DiscoveredList& refs_list_addr, + void pp2_work(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive); void pp2_work_concurrent_discovery( - DiscoveredList& refs_list_addr, + DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase3: process the referents by either clearing them // or keeping them alive (and their closure) - void process_phase3(DiscoveredList& refs_list_addr, + void process_phase3(DiscoveredList& refs_list, bool clear_referent, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Enqueue references with a certain reachability level - void enqueue_discovered_reflist(DiscoveredList& refs_list, oop* pending_list_addr); - + void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); + // "Preclean" all the discovered reference lists // by removing references with strongly reachable referents. // The first argument is a predicate on an oop that indicates @@ -172,6 +196,8 @@ // occupying the i / _num_q slot. const char* list_name(int i); + void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); + protected: // "Preclean" the given discovered reference list // by removing references with strongly reachable referents. @@ -181,8 +207,7 @@ OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); - - void enqueue_discovered_reflists(oop* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); + int next_id() { int id = _next_id; if (++_next_id == _num_q) { @@ -192,11 +217,10 @@ } DiscoveredList* get_discovered_list(ReferenceType rt); inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, - oop* discovered_addr); + HeapWord* discovered_addr); void verify_ok_to_handle_reflists() PRODUCT_RETURN; void abandon_partial_discovered_list(DiscoveredList& refs_list); - void abandon_partial_discovered_list_arr(DiscoveredList refs_lists[]); // Calculate the number of jni handles. unsigned int count_jni_refs(); @@ -217,6 +241,8 @@ _discovery_is_atomic(true), _enqueuing_is_done(false), _discovery_is_mt(false), + _discovered_list_needs_barrier(false), + _bs(NULL), _is_alive_non_header(NULL), _num_q(0), _processing_is_mt(false), @@ -224,18 +250,20 @@ {} ReferenceProcessor(MemRegion span, bool atomic_discovery, - bool mt_discovery, int mt_degree = 1, - bool mt_processing = false); - + bool mt_discovery, + int mt_degree = 1, + bool mt_processing = false, + bool discovered_list_needs_barrier = false); + // Allocates and initializes a reference processor. static ReferenceProcessor* create_ref_processor( MemRegion span, bool atomic_discovery, bool mt_discovery, BoolObjectClosure* is_alive_non_header = NULL, - int parallel_gc_threads = 1, - bool mt_processing = false); - + int parallel_gc_threads = 1, + bool mt_processing = false, + bool discovered_list_needs_barrier = false); // RefDiscoveryPolicy values enum { ReferenceBasedDiscovery = 0, @@ -286,8 +314,7 @@ bool discover_reference(oop obj, ReferenceType rt); // Process references found during GC (called by the garbage collector) - void process_discovered_references(ReferencePolicy* policy, - BoolObjectClosure* is_alive, + void process_discovered_references(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor); @@ -296,6 +323,11 @@ // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); + // If a discovery is in process that is being superceded, abandon it: all + // the discovered lists will be empty, and all the objects on them will + // have NULL discovered fields. Must be called only at a safepoint. + void abandon_partial_discovery(); + // debugging void verify_no_references_recorded() PRODUCT_RETURN; static void verify(); @@ -480,8 +512,8 @@ protected: EnqueueTask(ReferenceProcessor& ref_processor, DiscoveredList refs_lists[], - oop* pending_list_addr, - oop sentinel_ref, + HeapWord* pending_list_addr, + oop sentinel_ref, int n_queues) : _ref_processor(ref_processor), _refs_lists(refs_lists), @@ -496,8 +528,8 @@ protected: ReferenceProcessor& _ref_processor; DiscoveredList* _refs_lists; - oop* _pending_list_addr; - oop _sentinel_ref; + HeapWord* _pending_list_addr; + oop _sentinel_ref; int _n_queues; }; --- old/hotspot/src/share/vm/memory/restore.cpp 2009-08-01 04:12:27.958643590 +0100 +++ new/hotspot/src/share/vm/memory/restore.cpp 2009-08-01 04:12:27.883749728 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)restore.cpp 1.14 07/05/05 17:05:44 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +53,8 @@ *p = obj; } + void do_oop(narrowOop* p) { ShouldNotReachHere(); } + void do_ptr(void** p) { assert(*p == NULL, "initializing previous initialized pointer."); void* obj = nextOop(); --- old/hotspot/src/share/vm/memory/serialize.cpp 2009-08-01 04:12:28.762240933 +0100 +++ new/hotspot/src/share/vm/memory/serialize.cpp 2009-08-01 04:12:28.688508736 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)serialize.cpp 1.9 07/05/05 17:05:55 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,17 +44,18 @@ int tag = 0; soc->do_tag(--tag); + assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive"); // Verify the sizes of various oops in the system. soc->do_tag(sizeof(oopDesc)); soc->do_tag(sizeof(instanceOopDesc)); soc->do_tag(sizeof(methodOopDesc)); soc->do_tag(sizeof(constMethodOopDesc)); soc->do_tag(sizeof(methodDataOopDesc)); - soc->do_tag(sizeof(arrayOopDesc)); + soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); soc->do_tag(sizeof(constantPoolOopDesc)); soc->do_tag(sizeof(constantPoolCacheOopDesc)); - soc->do_tag(sizeof(objArrayOopDesc)); - soc->do_tag(sizeof(typeArrayOopDesc)); + soc->do_tag(objArrayOopDesc::base_offset_in_bytes(T_BYTE)); + soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); soc->do_tag(sizeof(symbolOopDesc)); soc->do_tag(sizeof(klassOopDesc)); soc->do_tag(sizeof(markOopDesc)); --- old/hotspot/src/share/vm/memory/sharedHeap.cpp 2009-08-01 04:12:30.003783948 +0100 +++ new/hotspot/src/share/vm/memory/sharedHeap.cpp 2009-08-01 04:12:29.509867394 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)sharedHeap.cpp 1.59 07/05/17 15:55:10 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,15 +60,24 @@ } _sh = this; // ch is static, should be set only once. if ((UseParNewGC || - (UseConcMarkSweepGC && CMSParallelRemarkEnabled)) && + (UseConcMarkSweepGC && CMSParallelRemarkEnabled) || + UseG1GC) && ParallelGCThreads > 0) { - _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads, true); + _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads, + /* are_GC_task_threads */true, + /* are_ConcurrentGC_threads */false); if (_workers == NULL) { vm_exit_during_initialization("Failed necessary allocation."); } } } +bool SharedHeap::heap_lock_held_for_gc() { + Thread* t = Thread::current(); + return Heap_lock->owned_by_self() + || ( (t->is_GC_task_thread() || t->is_VM_thread()) + && _thread_holds_heap_lock_for_gc); +} void SharedHeap::set_par_threads(int t) { _n_par_threads = t; @@ -77,9 +86,10 @@ class AssertIsPermClosure: public OopClosure { public: - void do_oop(oop* p) { + virtual void do_oop(oop* p) { assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); } + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; static AssertIsPermClosure assert_is_perm_closure; @@ -190,12 +200,13 @@ public: SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {} - void do_oop(oop* p) { + virtual void do_oop(oop* p) { oop o = (*p); if (!o->is_shared_readwrite()) { _clo->do_oop(p); } } + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; // Unmarked shared Strings in the StringTable (which got there due to @@ -240,55 +251,16 @@ perm_gen()->ref_processor_init(); } -void SharedHeap::fill_region_with_object(MemRegion mr) { - // Disable the posting of JVMTI VMObjectAlloc events as we - // don't want the filling of tlabs with filler arrays to be - // reported to the profiler. - NoJvmtiVMObjectAllocMark njm; - - // Disable low memory detector because there is no real allocation. - LowMemoryDetectorDisabler lmd_dis; - - // It turns out that post_allocation_setup_array takes a handle, so the - // call below contains an implicit conversion. Best to free that handle - // as soon as possible. - HandleMark hm; - - size_t word_size = mr.word_size(); - size_t aligned_array_header_size = - align_object_size(typeArrayOopDesc::header_size(T_INT)); - - if (word_size >= aligned_array_header_size) { - const size_t array_length = - pointer_delta(mr.end(), mr.start()) - - typeArrayOopDesc::header_size(T_INT); - const size_t array_length_words = - array_length * (HeapWordSize/sizeof(jint)); - post_allocation_setup_array(Universe::intArrayKlassObj(), - mr.start(), - mr.word_size(), - (int)array_length_words); -#ifdef ASSERT - HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT)); - Copy::fill_to_words(elt_words, array_length, 0xDEAFBABE); -#endif - } else { - assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?"); - post_allocation_setup_obj(SystemDictionary::object_klass(), - mr.start(), - mr.word_size()); - } -} - // Some utilities. -void SharedHeap::print_size_transition(size_t bytes_before, - size_t bytes_after, - size_t capacity) { - tty->print(" %d%s->%d%s(%d%s)", - byte_size_in_proper_unit(bytes_before), - proper_unit_for_byte_size(bytes_before), - byte_size_in_proper_unit(bytes_after), - proper_unit_for_byte_size(bytes_after), - byte_size_in_proper_unit(capacity), - proper_unit_for_byte_size(capacity)); +void SharedHeap::print_size_transition(outputStream* out, + size_t bytes_before, + size_t bytes_after, + size_t capacity) { + out->print(" %d%s->%d%s(%d%s)", + byte_size_in_proper_unit(bytes_before), + proper_unit_for_byte_size(bytes_before), + byte_size_in_proper_unit(bytes_after), + proper_unit_for_byte_size(bytes_after), + byte_size_in_proper_unit(capacity), + proper_unit_for_byte_size(capacity)); } --- old/hotspot/src/share/vm/memory/sharedHeap.hpp 2009-08-01 04:12:31.273480247 +0100 +++ new/hotspot/src/share/vm/memory/sharedHeap.hpp 2009-08-01 04:12:31.204568944 +0100 @@ -47,6 +47,9 @@ class SharedHeap : public CollectedHeap { friend class VMStructs; + friend class VM_GC_Operation; + friend class VM_CGC_Operation; + private: // For claiming strong_roots tasks. SubTasksDone* _process_strong_tasks; @@ -85,6 +88,14 @@ // function. SharedHeap(CollectorPolicy* policy_); + // Returns true if the calling thread holds the heap lock, + // or the calling thread is a par gc thread and the heap_lock is held + // by the vm thread doing a gc operation. + bool heap_lock_held_for_gc(); + // True if the heap_lock is held by the a non-gc thread invoking a gc + // operation. + bool _thread_holds_heap_lock_for_gc; + public: static SharedHeap* heap() { return _sh; } @@ -100,14 +111,6 @@ void set_perm(PermGen* perm_gen) { _perm_gen = perm_gen; } - // A helper function that fills an allocated-but-not-yet-initialized - // region with a garbage object. - static void fill_region_with_object(MemRegion mr); - - // Minimum garbage fill object size - static size_t min_fill_size() { return (size_t)align_object_size(oopDesc::header_size()); } - static size_t min_fill_size_in_bytes() { return min_fill_size() * HeapWordSize; } - // This function returns the "GenRemSet" object that allows us to scan // generations; at least the perm gen, possibly more in a fully // generational heap. @@ -217,13 +220,12 @@ // "SharedHeap" can use in the implementation of its virtual // functions. -protected: +public: // Do anything common to GC's. virtual void gc_prologue(bool full) = 0; virtual void gc_epilogue(bool full) = 0; -public: // // New methods from CollectedHeap // @@ -269,9 +271,10 @@ } // Some utilities. - void print_size_transition(size_t bytes_before, - size_t bytes_after, - size_t capacity); + void print_size_transition(outputStream* out, + size_t bytes_before, + size_t bytes_after, + size_t capacity); }; --- old/hotspot/src/share/vm/memory/space.cpp 2009-08-01 04:12:32.103889149 +0100 +++ new/hotspot/src/share/vm/memory/space.cpp 2009-08-01 04:12:32.018765152 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)space.cpp 1.217 07/05/29 09:44:13 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,9 @@ # include "incls/_precompiled.incl" # include "incls/_space.cpp.incl" +void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } +void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } + HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, HeapWord* top_obj) { if (top_obj != NULL) { @@ -105,9 +108,9 @@ "Only ones we deal with for now."); assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || - _last_bottom == NULL || - top <= _last_bottom, - "Not decreasing"); + _cl->idempotent() || _last_bottom == NULL || + top <= _last_bottom, + "Not decreasing"); NOT_PRODUCT(_last_bottom = mr.start()); bottom_obj = _sp->block_start(bottom); @@ -144,7 +147,14 @@ walk_mem_region(mr, bottom_obj, top); } - _min_done = bottom; + // An idempotent closure might be applied in any order, so we don't + // record a _min_done for it. + if (!_cl->idempotent()) { + _min_done = bottom; + } else { + assert(_min_done == _last_explicit_min_done, + "Don't update _min_done for idempotent cl"); + } } DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl, @@ -153,10 +163,6 @@ return new DirtyCardToOopClosure(this, cl, precision, boundary); } -void FilteringClosure::do_oop(oop* p) { - do_oop_nv(p); -} - HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, HeapWord* top_obj) { if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { @@ -236,34 +242,49 @@ return new ContiguousSpaceDCTOC(this, cl, precision, boundary); } -void Space::initialize(MemRegion mr, bool clear_space) { +void Space::initialize(MemRegion mr, + bool clear_space, + bool mangle_space) { HeapWord* bottom = mr.start(); HeapWord* end = mr.end(); assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), "invalid space boundaries"); set_bottom(bottom); set_end(end); - if (clear_space) clear(); + if (clear_space) clear(mangle_space); +} + +void Space::clear(bool mangle_space) { + if (ZapUnusedHeapArea && mangle_space) { + mangle_unused_area(); + } +} + +ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), + _concurrent_iteration_safe_limit(NULL) { + _mangler = new GenSpaceMangler(this); } -void Space::clear() { - if (ZapUnusedHeapArea) mangle_unused_area(); +ContiguousSpace::~ContiguousSpace() { + delete _mangler; } -void ContiguousSpace::initialize(MemRegion mr, bool clear_space) +void ContiguousSpace::initialize(MemRegion mr, + bool clear_space, + bool mangle_space) { - CompactibleSpace::initialize(mr, clear_space); - _concurrent_iteration_safe_limit = top(); + CompactibleSpace::initialize(mr, clear_space, mangle_space); + set_concurrent_iteration_safe_limit(top()); } -void ContiguousSpace::clear() { +void ContiguousSpace::clear(bool mangle_space) { set_top(bottom()); set_saved_mark(); - Space::clear(); + CompactibleSpace::clear(mangle_space); } bool Space::is_in(const void* p) const { - HeapWord* b = block_start(p); + HeapWord* b = block_start_const(p); return b != NULL && block_is_obj(b); } @@ -275,8 +296,8 @@ return p >= _top; } -void OffsetTableContigSpace::clear() { - ContiguousSpace::clear(); +void OffsetTableContigSpace::clear(bool mangle_space) { + ContiguousSpace::clear(mangle_space); _offsets.initialize_threshold(); } @@ -292,19 +313,53 @@ Space::set_end(new_end); } -void ContiguousSpace::mangle_unused_area() { - // to-space is used for storing marks during mark-sweep - mangle_region(MemRegion(top(), end())); +#ifndef PRODUCT + +void ContiguousSpace::set_top_for_allocations(HeapWord* v) { + mangler()->set_top_for_allocations(v); +} +void ContiguousSpace::set_top_for_allocations() { + mangler()->set_top_for_allocations(top()); +} +void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { + mangler()->check_mangled_unused_area(limit); } +void ContiguousSpace::check_mangled_unused_area_complete() { + mangler()->check_mangled_unused_area_complete(); +} + +// Mangled only the unused space that has not previously +// been mangled and that has not been allocated since being +// mangled. +void ContiguousSpace::mangle_unused_area() { + mangler()->mangle_unused_area(); +} +void ContiguousSpace::mangle_unused_area_complete() { + mangler()->mangle_unused_area_complete(); +} void ContiguousSpace::mangle_region(MemRegion mr) { - debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord)); + // Although this method uses SpaceMangler::mangle_region() which + // is not specific to a space, the when the ContiguousSpace version + // is called, it is always with regard to a space and this + // bounds checking is appropriate. + MemRegion space_mr(bottom(), end()); + assert(space_mr.contains(mr), "Mangling outside space"); + SpaceMangler::mangle_region(mr); +} +#endif // NOT_PRODUCT + +void CompactibleSpace::initialize(MemRegion mr, + bool clear_space, + bool mangle_space) { + Space::initialize(mr, clear_space, mangle_space); + set_compaction_top(bottom()); + _next_compaction_space = NULL; } -void CompactibleSpace::initialize(MemRegion mr, bool clear_space) { - Space::initialize(mr, clear_space); +void CompactibleSpace::clear(bool mangle_space) { + Space::clear(mangle_space); _compaction_top = bottom(); - _next_compaction_space = NULL; } HeapWord* CompactibleSpace::forward(oop q, size_t size, @@ -340,7 +395,7 @@ assert(q->forwardee() == NULL, "should be forwarded to NULL"); } - debug_only(MarkSweep::register_live_oop(q, size)); + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size)); compact_top += size; // we need to update the offset table so that the beginnings of objects can be @@ -357,19 +412,9 @@ HeapWord* q, size_t deadlength) { if (allowed_deadspace_words >= deadlength) { allowed_deadspace_words -= deadlength; - oop(q)->set_mark(markOopDesc::prototype()->set_marked()); - const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT); - if (deadlength >= min_int_array_size) { - oop(q)->set_klass(Universe::intArrayKlassObj()); - typeArrayOop(q)->set_length((int)((deadlength - min_int_array_size) - * (HeapWordSize/sizeof(jint)))); - } else { - assert((int) deadlength == instanceOopDesc::header_size(), - "size for smallest fake dead object doesn't match"); - oop(q)->set_klass(SystemDictionary::object_klass()); - } - assert((int) deadlength == oop(q)->size(), - "make sure size for fake dead object match"); + CollectedHeap::fill_with_object(q, deadlength); + oop(q)->set_mark(oop(q)->mark()->set_marked()); + assert((int) deadlength == oop(q)->size(), "bad filler object size"); // Recall that we required "q == compaction_top". return true; } else { @@ -408,15 +453,15 @@ while (q < t) { if (oop(q)->is_gc_marked()) { // q is alive - - debug_only(MarkSweep::track_interior_pointers(oop(q))); + + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); // point all the oops to the new location size_t size = oop(q)->adjust_pointers(); - debug_only(MarkSweep::check_interior_pointers()); - + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); + debug_only(prev_q = q); - debug_only(MarkSweep::validate_live_oop(oop(q), size)); - + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); + q += size; } else { // q is not a live object. But we're not in a compactible space, @@ -481,9 +526,9 @@ } guarantee(p == top(), "end of last object must match end of space"); if (top() != end()) { - guarantee(top() == block_start(end()-1) && - top() == block_start(top()), - "top should be start of unallocated block, if it exists"); + guarantee(top() == block_start_const(end()-1) && + top() == block_start_const(top()), + "top should be start of unallocated block, if it exists"); } } @@ -714,7 +759,7 @@ #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN // Very general, slow implementation. -HeapWord* ContiguousSpace::block_start(const void* p) const { +HeapWord* ContiguousSpace::block_start_const(const void* p) const { assert(MemRegion(bottom(), end()).contains(p), "p not in space"); if (p >= top()) { return top(); @@ -819,12 +864,13 @@ "size for smallest fake object doesn't match"); instanceOop obj = (instanceOop) allocate(size); obj->set_mark(markOopDesc::prototype()); + obj->set_klass_gap(0); obj->set_klass(SystemDictionary::object_klass()); } } -void EdenSpace::clear() { - ContiguousSpace::clear(); +void EdenSpace::clear(bool mangle_space) { + ContiguousSpace::clear(mangle_space); set_soft_end(end()); } @@ -881,16 +927,19 @@ _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) { _offsets.set_contig_space(this); - initialize(mr, true); + initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); } class VerifyOldOopClosure : public OopClosure { public: - oop the_obj; - bool allow_dirty; + oop _the_obj; + bool _allow_dirty; void do_oop(oop* p) { - the_obj->verify_old_oop(p, allow_dirty); + _the_obj->verify_old_oop(p, _allow_dirty); + } + void do_oop(narrowOop* p) { + _the_obj->verify_old_oop(p, _allow_dirty); } }; @@ -900,8 +949,8 @@ void OffsetTableContigSpace::verify(bool allow_dirty) const { HeapWord* p = bottom(); HeapWord* prev_p = NULL; - VerifyOldOopClosure blk; // Does this do anything? - blk.allow_dirty = allow_dirty; + VerifyOldOopClosure blk; // Does this do anything? + blk._allow_dirty = allow_dirty; int objs = 0; int blocks = 0; @@ -914,7 +963,8 @@ // For a sampling of objects in the space, find it using the // block offset table. if (blocks == BLOCK_SAMPLE_INTERVAL) { - guarantee(p == block_start(p + (size/2)), "check offset computation"); + guarantee(p == block_start_const(p + (size/2)), + "check offset computation"); blocks = 0; } else { blocks++; @@ -922,7 +972,7 @@ if (objs == OBJ_SAMPLE_INTERVAL) { oop(p)->verify(); - blk.the_obj = oop(p); + blk._the_obj = oop(p); oop(p)->oop_iterate(&blk); objs = 0; } else { @@ -940,11 +990,11 @@ } -int TenuredSpace::allowed_dead_ratio() const { +size_t TenuredSpace::allowed_dead_ratio() const { return MarkSweepDeadRatio; } -int ContigPermSpace::allowed_dead_ratio() const { +size_t ContigPermSpace::allowed_dead_ratio() const { return PermMarkSweepDeadRatio; } --- old/hotspot/src/share/vm/memory/space.hpp 2009-08-01 04:12:33.099598091 +0100 +++ new/hotspot/src/share/vm/memory/space.hpp 2009-08-01 04:12:33.008934973 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)space.hpp 1.149 07/05/29 09:44:14 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,21 +55,24 @@ class CardTableRS; class DirtyCardToOopClosure; - // An oop closure that is circumscribed by a filtering memory region. -class SpaceMemRegionOopsIterClosure: public virtual OopClosure { - OopClosure* cl; - MemRegion mr; -public: - void do_oop(oop* p) { - if (mr.contains(p)) { - cl->do_oop(p); +class SpaceMemRegionOopsIterClosure: public OopClosure { + private: + OopClosure* _cl; + MemRegion _mr; + protected: + template void do_oop_work(T* p) { + if (_mr.contains(p)) { + _cl->do_oop(p); } } - SpaceMemRegionOopsIterClosure(OopClosure* _cl, MemRegion _mr): cl(_cl), mr(_mr) {} + public: + SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr): + _cl(cl), _mr(mr) {} + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); }; - // A Space describes a heap area. Class Space is an abstract // base class. // @@ -105,7 +108,7 @@ virtual void set_bottom(HeapWord* value) { _bottom = value; } virtual void set_end(HeapWord* value) { _end = value; } - HeapWord* saved_mark_word() const { return _saved_mark_word; } + virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } MemRegionClosure* preconsumptionDirtyCardClosure() const { @@ -131,15 +134,23 @@ return MemRegion(bottom(), saved_mark_word()); } - // Initialization - virtual void initialize(MemRegion mr, bool clear_space); - virtual void clear(); + // Initialization. + // "initialize" should be called once on a space, before it is used for + // any purpose. The "mr" arguments gives the bounds of the space, and + // the "clear_space" argument should be true unless the memory in "mr" is + // known to be zeroed. + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); + + // The "clear" method must be called on a region that may have + // had allocation performed in it, but is now to be considered empty. + virtual void clear(bool mangle_space); // For detecting GC bugs. Should only be called at GC boundaries, since // some unused space may be used as scratch space during GC's. // Default implementation does nothing. We also call this when expanding // a space to satisfy an allocation request. See bug #4668531 virtual void mangle_unused_area() {} + virtual void mangle_unused_area_complete() {} virtual void mangle_region(MemRegion mr) {} // Testers @@ -216,7 +227,13 @@ // "block" that contains "p". We say "block" instead of "object" since // some heaps may not pack objects densely; a chunk may either be an // object or a non-object. If "p" is not in the space, return NULL. - virtual HeapWord* block_start(const void* p) const = 0; + virtual HeapWord* block_start_const(const void* p) const = 0; + + // The non-const version may have benevolent side effects on the data + // structure supporting these calls, possibly speeding up future calls. + // The default implementation, however, is simply to call the const + // version. + inline virtual HeapWord* block_start(const void* p); // Requires "addr" to be the start of a chunk, and returns its size. // "addr + size" is required to be the start of a new chunk, or the end @@ -288,6 +305,7 @@ // alternatively, the lowest address that // shouldn't be done again. NULL means infinity.) NOT_PRODUCT(HeapWord* _last_bottom;) + NOT_PRODUCT(HeapWord* _last_explicit_min_done;) // Get the actual top of the area on which the closure will // operate, given where the top is assumed to be (the end of the @@ -311,13 +329,15 @@ HeapWord* boundary) : _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), _min_done(NULL) { - NOT_PRODUCT(_last_bottom = NULL;) + NOT_PRODUCT(_last_bottom = NULL); + NOT_PRODUCT(_last_explicit_min_done = NULL); } void do_MemRegion(MemRegion mr); void set_min_done(HeapWord* min_done) { _min_done = min_done; + NOT_PRODUCT(_last_explicit_min_done = _min_done); } #ifndef PRODUCT void set_last_bottom(HeapWord* last_bottom) { @@ -354,7 +374,11 @@ CompactibleSpace* _next_compaction_space; public: - virtual void initialize(MemRegion mr, bool clear_space); + CompactibleSpace() : + _compaction_top(NULL), _next_compaction_space(NULL) {} + + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); + virtual void clear(bool mangle_space); // Used temporarily during a compaction phase to hold the value // top should have when compaction is complete. @@ -400,8 +424,8 @@ // The maximum percentage of objects that can be dead in the compacted // live part of a compacted space ("deadwood" support.) - virtual int allowed_dead_ratio() const { return 0; }; - + virtual size_t allowed_dead_ratio() const { return 0; }; + // Some contiguous spaces may maintain some data structures that should // be updated whenever an allocation crosses a boundary. This function // returns the first such boundary. @@ -486,7 +510,7 @@ \ size_t allowed_deadspace = 0; \ if (skip_dead) { \ - int ratio = allowed_dead_ratio(); \ + const size_t ratio = allowed_dead_ratio(); \ allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ } \ \ @@ -575,124 +599,125 @@ cp->space->set_compaction_top(compact_top); \ } -#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ - /* adjust all the interior pointers to point at the new locations of objects \ - * Used by MarkSweep::mark_sweep_phase3() */ \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ - \ - assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ - \ - if (q < t && _first_dead > q && \ - !oop(q)->is_gc_marked()) { \ - /* we have a chunk of the space which hasn't moved and we've \ - * reinitialized the mark word during the previous pass, so we can't \ - * use is_gc_marked for the traversal. */ \ - HeapWord* end = _first_dead; \ - \ - while (q < end) { \ - /* I originally tried to conjoin "block_start(q) == q" to the \ - * assertion below, but that doesn't work, because you can't \ - * accurately traverse previous objects to get to the current one \ - * after their pointers (including pointers into permGen) have been \ - * updated, until the actual compaction is done. dld, 4/00 */ \ - assert(block_is_obj(q), \ - "should be at block boundaries, and should be looking at objs"); \ - \ - debug_only(MarkSweep::track_interior_pointers(oop(q))); \ - \ - /* point all the oops to the new location */ \ - size_t size = oop(q)->adjust_pointers(); \ - size = adjust_obj_size(size); \ - \ - debug_only(MarkSweep::check_interior_pointers()); \ - \ - debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ - \ - q += size; \ - } \ - \ - if (_first_dead == t) { \ - q = t; \ - } else { \ - /* $$$ This is funky. Using this to read the previously written \ - * LiveRange. See also use below. */ \ - q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ - } \ - } \ - \ +#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ + /* adjust all the interior pointers to point at the new locations of objects \ + * Used by MarkSweep::mark_sweep_phase3() */ \ + \ + HeapWord* q = bottom(); \ + HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ + \ + assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ + \ + if (q < t && _first_dead > q && \ + !oop(q)->is_gc_marked()) { \ + /* we have a chunk of the space which hasn't moved and we've \ + * reinitialized the mark word during the previous pass, so we can't \ + * use is_gc_marked for the traversal. */ \ + HeapWord* end = _first_dead; \ + \ + while (q < end) { \ + /* I originally tried to conjoin "block_start(q) == q" to the \ + * assertion below, but that doesn't work, because you can't \ + * accurately traverse previous objects to get to the current one \ + * after their pointers (including pointers into permGen) have been \ + * updated, until the actual compaction is done. dld, 4/00 */ \ + assert(block_is_obj(q), \ + "should be at block boundaries, and should be looking at objs"); \ + \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ + \ + /* point all the oops to the new location */ \ + size_t size = oop(q)->adjust_pointers(); \ + size = adjust_obj_size(size); \ + \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ + \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ + \ + q += size; \ + } \ + \ + if (_first_dead == t) { \ + q = t; \ + } else { \ + /* $$$ This is funky. Using this to read the previously written \ + * LiveRange. See also use below. */ \ + q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ + } \ + } \ + \ const intx interval = PrefetchScanIntervalInBytes; \ \ debug_only(HeapWord* prev_q = NULL); \ while (q < t) { \ /* prefetch beyond q */ \ Prefetch::write(q, interval); \ - if (oop(q)->is_gc_marked()) { \ - /* q is alive */ \ - debug_only(MarkSweep::track_interior_pointers(oop(q))); \ - /* point all the oops to the new location */ \ - size_t size = oop(q)->adjust_pointers(); \ - size = adjust_obj_size(size); \ - debug_only(MarkSweep::check_interior_pointers()); \ - debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ - debug_only(prev_q = q); \ - q += size; \ - } else { \ - /* q is not a live object, so its mark should point at the next \ - * live object */ \ - debug_only(prev_q = q); \ - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ - assert(q > prev_q, "we should be moving forward through memory"); \ - } \ - } \ - \ - assert(q == t, "just checking"); \ + if (oop(q)->is_gc_marked()) { \ + /* q is alive */ \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ + /* point all the oops to the new location */ \ + size_t size = oop(q)->adjust_pointers(); \ + size = adjust_obj_size(size); \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ + debug_only(prev_q = q); \ + q += size; \ + } else { \ + /* q is not a live object, so its mark should point at the next \ + * live object */ \ + debug_only(prev_q = q); \ + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ + assert(q > prev_q, "we should be moving forward through memory"); \ + } \ + } \ + \ + assert(q == t, "just checking"); \ } -#define SCAN_AND_COMPACT(obj_size) { \ - /* Copy all live objects to their new location \ - * Used by MarkSweep::mark_sweep_phase4() */ \ - \ - HeapWord* q = bottom(); \ - HeapWord* const t = _end_of_live; \ - debug_only(HeapWord* prev_q = NULL); \ - \ - if (q < t && _first_dead > q && \ - !oop(q)->is_gc_marked()) { \ - debug_only( \ - /* we have a chunk of the space which hasn't moved and we've reinitialized the \ - * mark word during the previous pass, so we can't use is_gc_marked for the \ - * traversal. */ \ - HeapWord* const end = _first_dead; \ - \ - while (q < end) { \ - size_t size = obj_size(q); \ - assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \ - debug_only(MarkSweep::live_oop_moved_to(q, size, q)); \ - debug_only(prev_q = q); \ - q += size; \ - } \ - ) /* debug_only */ \ - \ - if (_first_dead == t) { \ - q = t; \ - } else { \ - /* $$$ Funky */ \ - q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ - } \ - } \ - \ - const intx scan_interval = PrefetchScanIntervalInBytes; \ - const intx copy_interval = PrefetchCopyIntervalInBytes; \ - while (q < t) { \ - if (!oop(q)->is_gc_marked()) { \ - /* mark is pointer to next marked oop */ \ - debug_only(prev_q = q); \ - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ - assert(q > prev_q, "we should be moving forward through memory"); \ - } else { \ - /* prefetch beyond q */ \ +#define SCAN_AND_COMPACT(obj_size) { \ + /* Copy all live objects to their new location \ + * Used by MarkSweep::mark_sweep_phase4() */ \ + \ + HeapWord* q = bottom(); \ + HeapWord* const t = _end_of_live; \ + debug_only(HeapWord* prev_q = NULL); \ + \ + if (q < t && _first_dead > q && \ + !oop(q)->is_gc_marked()) { \ + debug_only( \ + /* we have a chunk of the space which hasn't moved and we've reinitialized \ + * the mark word during the previous pass, so we can't use is_gc_marked for \ + * the traversal. */ \ + HeapWord* const end = _first_dead; \ + \ + while (q < end) { \ + size_t size = obj_size(q); \ + assert(!oop(q)->is_gc_marked(), \ + "should be unmarked (special dense prefix handling)"); \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \ + debug_only(prev_q = q); \ + q += size; \ + } \ + ) /* debug_only */ \ + \ + if (_first_dead == t) { \ + q = t; \ + } else { \ + /* $$$ Funky */ \ + q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ + } \ + } \ + \ + const intx scan_interval = PrefetchScanIntervalInBytes; \ + const intx copy_interval = PrefetchCopyIntervalInBytes; \ + while (q < t) { \ + if (!oop(q)->is_gc_marked()) { \ + /* mark is pointer to next marked oop */ \ + debug_only(prev_q = q); \ + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ + assert(q > prev_q, "we should be moving forward through memory"); \ + } else { \ + /* prefetch beyond q */ \ Prefetch::read(q, scan_interval); \ \ /* size and destination */ \ @@ -701,33 +726,38 @@ \ /* prefetch beyond compaction_top */ \ Prefetch::write(compaction_top, copy_interval); \ - \ - /* copy object and reinit its mark */ \ - debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); \ - assert(q != compaction_top, "everything in this pass should be moving"); \ - Copy::aligned_conjoint_words(q, compaction_top, size); \ - oop(compaction_top)->init_mark(); \ - assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ - \ - debug_only(prev_q = q); \ - q += size; \ - } \ - } \ - \ - /* Reset space after compaction is complete */ \ - reset_after_compaction(); \ + \ + /* copy object and reinit its mark */ \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ + compaction_top)); \ + assert(q != compaction_top, "everything in this pass should be moving"); \ + Copy::aligned_conjoint_words(q, compaction_top, size); \ + oop(compaction_top)->init_mark(); \ + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ + \ + debug_only(prev_q = q); \ + q += size; \ + } \ + } \ + \ + /* Let's remember if we were empty before we did the compaction. */ \ + bool was_empty = used_region().is_empty(); \ + /* Reset space after compaction is complete */ \ + reset_after_compaction(); \ /* We do this clear, below, since it has overloaded meanings for some */ \ /* space subtypes. For example, OffsetTableContigSpace's that were */ \ /* compacted into will have had their offset table thresholds updated */ \ /* continuously, but those that weren't need to have their thresholds */ \ /* re-initialized. Also mangles unused area for debugging. */ \ - if (is_empty()) { \ - clear(); \ + if (used_region().is_empty()) { \ + if (!was_empty) clear(SpaceDecorator::Mangle); \ } else { \ if (ZapUnusedHeapArea) mangle_unused_area(); \ } \ } +class GenSpaceMangler; + // A space in which the free area is contiguous. It therefore supports // faster allocation, and compaction. class ContiguousSpace: public CompactibleSpace { @@ -736,30 +766,55 @@ protected: HeapWord* _top; HeapWord* _concurrent_iteration_safe_limit; + // A helper for mangling the unused area of the space in debug builds. + GenSpaceMangler* _mangler; + + GenSpaceMangler* mangler() { return _mangler; } // Allocation helpers (return NULL if full). inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); public: - virtual void initialize(MemRegion mr, bool clear_space); - + ContiguousSpace(); + ~ContiguousSpace(); + + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); + virtual void clear(bool mangle_space); + // Accessors HeapWord* top() const { return _top; } void set_top(HeapWord* value) { _top = value; } - void set_saved_mark() { _saved_mark_word = top(); } - void reset_saved_mark() { _saved_mark_word = bottom(); } - - virtual void clear(); + virtual void set_saved_mark() { _saved_mark_word = top(); } + void reset_saved_mark() { _saved_mark_word = bottom(); } WaterMark bottom_mark() { return WaterMark(this, bottom()); } WaterMark top_mark() { return WaterMark(this, top()); } WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } bool saved_mark_at_top() const { return saved_mark_word() == top(); } - void mangle_unused_area(); - void mangle_region(MemRegion mr); + // In debug mode mangle (write it with a particular bit + // pattern) the unused part of a space. + + // Used to save the an address in a space for later use during mangling. + void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; + // Used to save the space's current top for later use during mangling. + void set_top_for_allocations() PRODUCT_RETURN; + + // Mangle regions in the space from the current top up to the + // previously mangled part of the space. + void mangle_unused_area() PRODUCT_RETURN; + // Mangle [top, end) + void mangle_unused_area_complete() PRODUCT_RETURN; + // Mangle the given MemRegion. + void mangle_region(MemRegion mr) PRODUCT_RETURN; + + // Do some sparse checking on the area that should have been mangled. + void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; + // Check the complete area that should have been mangled. + // This code may be NULL depending on the macro DEBUG_MANGLING. + void check_mangled_unused_area_complete() PRODUCT_RETURN; // Size computations: sizes in bytes. size_t capacity() const { return byte_size(bottom(), end()); } @@ -841,7 +896,7 @@ virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); // Very inefficient implementation. - virtual HeapWord* block_start(const void* p) const; + virtual HeapWord* block_start_const(const void* p) const; size_t block_size(const HeapWord* p) const; // If a block is in the allocated area, it is an object. bool block_is_obj(const HeapWord* p) const { return p < top(); } @@ -946,7 +1001,8 @@ HeapWord* _soft_end; public: - EdenSpace(DefNewGeneration* gen) : _gen(gen) { _soft_end = NULL; } + EdenSpace(DefNewGeneration* gen) : + _gen(gen), _soft_end(NULL) {} // Get/set just the 'soft' limit. HeapWord* soft_end() { return _soft_end; } @@ -954,7 +1010,7 @@ void set_soft_end(HeapWord* value) { _soft_end = value; } // Override. - void clear(); + void clear(bool mangle_space); // Set both the 'hard' and 'soft' limits (_end and _soft_end). void set_end(HeapWord* value) { @@ -998,9 +1054,9 @@ void set_bottom(HeapWord* value); void set_end(HeapWord* value); - void clear(); + void clear(bool mangle_space); - inline HeapWord* block_start(const void* p) const; + inline HeapWord* block_start_const(const void* p) const; // Add offset table update. virtual inline HeapWord* allocate(size_t word_size); @@ -1026,7 +1082,7 @@ friend class VMStructs; protected: // Mark sweep support - int allowed_dead_ratio() const; + size_t allowed_dead_ratio() const; public: // Constructor TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, @@ -1041,7 +1097,7 @@ friend class VMStructs; protected: // Mark sweep support - int allowed_dead_ratio() const; + size_t allowed_dead_ratio() const; public: // Constructor ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : --- old/hotspot/src/share/vm/memory/space.inline.hpp 2009-08-01 04:12:34.075836295 +0100 +++ new/hotspot/src/share/vm/memory/space.inline.hpp 2009-08-01 04:12:34.004240999 +0100 @@ -25,6 +25,10 @@ * */ +inline HeapWord* Space::block_start(const void* p) { + return block_start_const(p); +} + inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { HeapWord* res = ContiguousSpace::allocate(size); if (res != NULL) { @@ -53,7 +57,8 @@ return res; } -inline HeapWord* OffsetTableContigSpace::block_start(const void* p) const { +inline HeapWord* +OffsetTableContigSpace::block_start_const(const void* p) const { return _offsets.block_start(p); } --- old/hotspot/src/share/vm/memory/specialized_oop_closures.hpp 2009-08-01 04:12:34.884764043 +0100 +++ new/hotspot/src/share/vm/memory/specialized_oop_closures.hpp 2009-08-01 04:12:34.810906487 +0100 @@ -62,6 +62,12 @@ // This is split into several because of a Visual C++ 6.0 compiler bug // where very long macros cause the compiler to crash +// Some other heap might define further specialized closures. +#ifndef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES +#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ + /* None */ +#endif + #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f) \ f(ScanClosure,_nv) \ f(FastScanClosure,_nv) \ @@ -80,19 +86,21 @@ SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f) #ifndef SERIALGC -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_3(f) \ - f(MarkRefsIntoAndScanClosure,_nv) \ - f(Par_MarkRefsIntoAndScanClosure,_nv) \ - f(PushAndMarkClosure,_nv) \ +#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) \ + f(MarkRefsIntoAndScanClosure,_nv) \ + f(Par_MarkRefsIntoAndScanClosure,_nv) \ + f(PushAndMarkClosure,_nv) \ f(Par_PushAndMarkClosure,_nv) \ f(PushOrMarkClosure,_nv) \ f(Par_PushOrMarkClosure,_nv) \ f(CMSKeepAliveClosure,_nv) \ - f(CMSInnerParMarkAndPushClosure,_nv) + f(CMSInnerParMarkAndPushClosure,_nv) \ + FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) #else // SERIALGC -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_3(f) +#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) #endif // SERIALGC + // We separate these out, because sometime the general one has // a different definition from the specialized ones, and sometimes it // doesn't. @@ -101,8 +109,8 @@ f(OopClosure,_v) \ SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(f) -#define ALL_OOP_OOP_ITERATE_CLOSURES_3(f) \ - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_3(f) +#define ALL_OOP_OOP_ITERATE_CLOSURES_2(f) \ + SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) #ifndef SERIALGC // This macro applies an argument macro to all OopClosures for which we @@ -128,14 +136,22 @@ // The "root_class" is the most general class to define; this may be // "OopClosure" in some applications and "OopsInGenClosure" in others. + +// Some other heap might define further specialized closures. +#ifndef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES +#define FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) \ + /* None */ +#endif + #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f) \ f(ScanClosure,_nv) \ f(FastScanClosure,_nv) #ifndef SERIALGC #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f) \ - f(ParScanWithBarrierClosure,_nv) \ - f(ParScanWithoutBarrierClosure,_nv) + f(ParScanWithBarrierClosure,_nv) \ + f(ParScanWithoutBarrierClosure,_nv) \ + FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) #else // SERIALGC #define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f) #endif // SERIALGC @@ -182,13 +198,15 @@ #if ENABLE_SPECIALIZATION_STATS private: - static int _numCallsAll; + static bool _init; + static bool _wrapped; + static jint _numCallsAll; - static int _numCallsTotal[NUM_Kinds]; - static int _numCalls_nv[NUM_Kinds]; + static jint _numCallsTotal[NUM_Kinds]; + static jint _numCalls_nv[NUM_Kinds]; - static int _numDoOopCallsTotal[NUM_Kinds]; - static int _numDoOopCalls_nv[NUM_Kinds]; + static jint _numDoOopCallsTotal[NUM_Kinds]; + static jint _numDoOopCalls_nv[NUM_Kinds]; public: #endif static void clear() PRODUCT_RETURN; @@ -206,22 +224,22 @@ #if ENABLE_SPECIALIZATION_STATS inline void SpecializationStats::record_call() { - _numCallsAll++;; + Atomic::inc(&_numCallsAll); } inline void SpecializationStats::record_iterate_call_v(Kind k) { - _numCallsTotal[k]++; + Atomic::inc(&_numCallsTotal[k]); } inline void SpecializationStats::record_iterate_call_nv(Kind k) { - _numCallsTotal[k]++; - _numCalls_nv[k]++; + Atomic::inc(&_numCallsTotal[k]); + Atomic::inc(&_numCalls_nv[k]); } inline void SpecializationStats::record_do_oop_call_v(Kind k) { - _numDoOopCallsTotal[k]++; + Atomic::inc(&_numDoOopCallsTotal[k]); } inline void SpecializationStats::record_do_oop_call_nv(Kind k) { - _numDoOopCallsTotal[k]++; - _numDoOopCalls_nv[k]++; + Atomic::inc(&_numDoOopCallsTotal[k]); + Atomic::inc(&_numDoOopCalls_nv[k]); } #else // !ENABLE_SPECIALIZATION_STATS --- old/hotspot/src/share/vm/memory/tenuredGeneration.cpp 2009-08-01 04:12:35.811176111 +0100 +++ new/hotspot/src/share/vm/memory/tenuredGeneration.cpp 2009-08-01 04:12:35.736285199 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)tenuredGeneration.cpp 1.47 07/05/29 09:44:17 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -390,7 +390,7 @@ "should contain whole object"); buf->undo_allocation(obj, word_sz); } else { - SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); + CollectedHeap::fill_with_object(obj, word_sz); } } @@ -412,10 +412,11 @@ void TenuredGeneration::verify_alloc_buffers_clean() { if (UseParNewGC) { for (uint i = 0; i < ParallelGCThreads; i++) { - _rs->verify_empty(_alloc_buffers[i]->range()); + _rs->verify_aligned_region_empty(_alloc_buffers[i]->range()); } } } + #else // SERIALGC void TenuredGeneration::retire_alloc_buffers_before_full_gc() {} void TenuredGeneration::verify_alloc_buffers_clean() {} --- old/hotspot/src/share/vm/memory/tenuredGeneration.hpp 2009-08-01 04:12:36.652884964 +0100 +++ new/hotspot/src/share/vm/memory/tenuredGeneration.hpp 2009-08-01 04:12:36.563270917 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)tenuredGeneration.hpp 1.27 07/05/29 09:44:17 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,7 +76,6 @@ // Mark sweep support void compute_new_size(); - int allowed_dead_ratio() const; virtual void gc_prologue(bool full); virtual void gc_epilogue(bool full); --- old/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp 2009-08-01 04:12:37.463669599 +0100 +++ new/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp 2009-08-01 04:12:37.394685737 +0100 @@ -103,8 +103,7 @@ void ThreadLocalAllocBuffer::make_parsable(bool retire) { if (end() != NULL) { invariants(); - MemRegion mr(top(), hard_end()); - SharedHeap::fill_region_with_object(mr); + CollectedHeap::fill_with_object(top(), hard_end()); if (retire || ZeroTLAB) { // "Reset" the TLAB set_start(NULL); --- old/hotspot/src/share/vm/memory/universe.cpp 2009-08-01 04:12:38.279411976 +0100 +++ new/hotspot/src/share/vm/memory/universe.cpp 2009-08-01 04:12:38.184483797 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)universe.cpp 1.361 07/09/01 18:01:02 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,16 +52,16 @@ klassOop Universe::_constantPoolCacheKlassObj = NULL; klassOop Universe::_compiledICHolderKlassObj = NULL; klassOop Universe::_systemObjArrayKlassObj = NULL; -oop Universe::_int_mirror = NULL; -oop Universe::_float_mirror = NULL; -oop Universe::_double_mirror = NULL; -oop Universe::_byte_mirror = NULL; -oop Universe::_bool_mirror = NULL; -oop Universe::_char_mirror = NULL; -oop Universe::_long_mirror = NULL; -oop Universe::_short_mirror = NULL; -oop Universe::_void_mirror = NULL; -oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; +oop Universe::_int_mirror = NULL; +oop Universe::_float_mirror = NULL; +oop Universe::_double_mirror = NULL; +oop Universe::_byte_mirror = NULL; +oop Universe::_bool_mirror = NULL; +oop Universe::_char_mirror = NULL; +oop Universe::_long_mirror = NULL; +oop Universe::_short_mirror = NULL; +oop Universe::_void_mirror = NULL; +oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; oop Universe::_main_thread_group = NULL; oop Universe::_system_thread_group = NULL; typeArrayOop Universe::_the_empty_byte_array = NULL; @@ -99,9 +99,10 @@ bool Universe::_fully_initialized = false; size_t Universe::_heap_capacity_at_last_gc; -size_t Universe::_heap_used_at_last_gc; +size_t Universe::_heap_used_at_last_gc = 0; CollectedHeap* Universe::_collectedHeap = NULL; +address Universe::_heap_base = NULL; void Universe::basic_type_classes_do(void f(klassOop)) { @@ -259,16 +260,16 @@ _typeArrayKlassObjs[T_INT] = _intArrayKlassObj; _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj; - _methodKlassObj = methodKlass::create_klass(CHECK); - _constMethodKlassObj = constMethodKlass::create_klass(CHECK); - _methodDataKlassObj = methodDataKlass::create_klass(CHECK); + _methodKlassObj = methodKlass::create_klass(CHECK); + _constMethodKlassObj = constMethodKlass::create_klass(CHECK); + _methodDataKlassObj = methodDataKlass::create_klass(CHECK); _constantPoolKlassObj = constantPoolKlass::create_klass(CHECK); _constantPoolCacheKlassObj = constantPoolCacheKlass::create_klass(CHECK); _compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK); _systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK); - _the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK); + _the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK); _the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK); _the_empty_int_array = oopFactory::new_permanent_intArray(0, CHECK); _the_empty_system_obj_array = oopFactory::new_system_objArray(0, CHECK); @@ -276,7 +277,6 @@ _the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK); _vm_exception = oopFactory::new_symbol("vm exception holder", CHECK); } else { - FileMapInfo *mapinfo = FileMapInfo::current_info(); char* buffer = mapinfo->region_base(CompactingPermGenGen::md); void** vtbl_list = (void**)buffer; @@ -369,26 +369,31 @@ // Only 1.3 or later has the java.lang.Shutdown class. // Only 1.4 or later has the java.lang.CharSequence interface. // Only 1.5 or later has the java.lang.management.MemoryUsage class. - if (JDK_Version::is_pre_jdk16_version()) { - klassOop k = SystemDictionary::resolve_or_null(vmSymbolHandles::java_lang_management_MemoryUsage(), THREAD); + if (JDK_Version::is_partially_initialized()) { + uint8_t jdk_version; + klassOop k = SystemDictionary::resolve_or_null( + vmSymbolHandles::java_lang_management_MemoryUsage(), THREAD); CLEAR_PENDING_EXCEPTION; // ignore exceptions if (k == NULL) { - k = SystemDictionary::resolve_or_null(vmSymbolHandles::java_lang_CharSequence(), THREAD); + k = SystemDictionary::resolve_or_null( + vmSymbolHandles::java_lang_CharSequence(), THREAD); CLEAR_PENDING_EXCEPTION; // ignore exceptions if (k == NULL) { - k = SystemDictionary::resolve_or_null(vmSymbolHandles::java_lang_Shutdown(), THREAD); + k = SystemDictionary::resolve_or_null( + vmSymbolHandles::java_lang_Shutdown(), THREAD); CLEAR_PENDING_EXCEPTION; // ignore exceptions if (k == NULL) { - JDK_Version::set_jdk12x_version(); + jdk_version = 2; } else { - JDK_Version::set_jdk13x_version(); + jdk_version = 3; } } else { - JDK_Version::set_jdk14x_version(); + jdk_version = 4; } } else { - JDK_Version::set_jdk15x_version(); + jdk_version = 5; } + JDK_Version::fully_initialize(jdk_version); } #ifdef ASSERT @@ -467,7 +472,7 @@ class FixupMirrorClosure: public ObjectClosure { public: - void do_object(oop obj) { + virtual void do_object(oop obj) { if (obj->is_klass()) { EXCEPTION_MARK; KlassHandle k(THREAD, klassOop(obj)); @@ -670,7 +675,7 @@ "LogHeapWordSize is incorrect."); guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?"); guarantee(sizeof(oop) % sizeof(HeapWord) == 0, - "oop size is not not a multiple of HeapWord size"); + "oop size is not not a multiple of HeapWord size"); TraceTime timer("Genesis", TraceStartupTime); GC_locker::lock(); // do not allow gc during bootstrapping JavaClasses::compute_hard_coded_offsets(); @@ -736,6 +741,15 @@ fatal("UseParallelGC not supported in java kernel vm."); #endif // SERIALGC + } else if (UseG1GC) { +#ifndef SERIALGC + G1CollectorPolicy* g1p = new G1CollectorPolicy_BestRegionsFirst(); + G1CollectedHeap* g1h = new G1CollectedHeap(g1p); + Universe::_collectedHeap = g1h; +#else // SERIALGC + fatal("UseG1GC not supported in java kernel vm."); +#endif // SERIALGC + } else { GenCollectorPolicy *gc_policy; @@ -762,6 +776,15 @@ if (status != JNI_OK) { return status; } + if (UseCompressedOops) { + // Subtract a page because something can get allocated at heap base. + // This also makes implicit null checking work, because the + // memory+1 page below heap_base needs to cause a signal. + // See needs_explicit_null_check. + // Only set the heap base for compressed oops because it indicates + // compressed oops for pstack code. + Universe::_heap_base = Universe::heap()->base() - os::vm_page_size(); + } // We will never reach the CATCH below since Exceptions::_throw will cause // the VM to exit if an exception is thrown during initialization @@ -926,7 +949,10 @@ // This needs to be done before the first scavenge/gc, since // it's an input to soft ref clearing policy. - Universe::update_heap_info_at_gc(); + { + MutexLocker x(Heap_lock); + Universe::update_heap_info_at_gc(); + } // ("weak") refs processing infrastructure initialization Universe::heap()->post_initialize(); @@ -1182,10 +1208,11 @@ // ???: What if a CollectedHeap doesn't have a permanent generation? ShouldNotReachHere(); break; - case CollectedHeap::GenCollectedHeap: { - GenCollectedHeap* gch = (GenCollectedHeap*) Universe::heap(); - permanent_reserved = gch->perm_gen()->reserved(); - break; + case CollectedHeap::GenCollectedHeap: + case CollectedHeap::G1CollectedHeap: { + SharedHeap* sh = (SharedHeap*) Universe::heap(); + permanent_reserved = sh->perm_gen()->reserved(); + break; } #ifndef SERIALGC case CollectedHeap::ParallelScavengeHeap: { --- old/hotspot/src/share/vm/memory/universe.hpp 2009-08-01 04:12:39.179324296 +0100 +++ new/hotspot/src/share/vm/memory/universe.hpp 2009-08-01 04:12:39.100845040 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)universe.hpp 1.183 07/08/09 09:12:00 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,6 +95,7 @@ class Universe: AllStatic { + // Ugh. Universe is much too friendly. friend class MarkSweep; friend class oopDesc; friend class ClassLoader; @@ -183,11 +184,14 @@ // The particular choice of collected heap. static CollectedHeap* _collectedHeap; + // Base address for oop-within-java-object materialization. + // NULL if using wide oops. Doubles as heap oop null value. + static address _heap_base; // array of dummy objects used with +FullGCAlot debug_only(static objArrayOop _fullgc_alot_dummy_array;) - // index of next entry to clear - debug_only(static int _fullgc_alot_dummy_next;) + // index of next entry to clear + debug_only(static int _fullgc_alot_dummy_next;) // Compiler/dispatch support static int _base_vtable_size; // Java vtbl size of klass Object (in words) @@ -326,6 +330,10 @@ // The particular choice of collected heap. static CollectedHeap* heap() { return _collectedHeap; } + // For UseCompressedOops + static address heap_base() { return _heap_base; } + static address* heap_base_addr() { return &_heap_base; } + // Historic gc information static size_t get_heap_capacity_at_last_gc() { return _heap_capacity_at_last_gc; } static size_t get_heap_free_at_last_gc() { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; } --- old/hotspot/src/share/vm/oops/arrayOop.hpp 2009-08-01 04:12:40.094832899 +0100 +++ new/hotspot/src/share/vm/oops/arrayOop.hpp 2009-08-01 04:12:40.012110729 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)arrayOop.hpp 1.35 07/05/05 17:06:00 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,49 +25,94 @@ * */ -// arrayOopDesc is the abstract baseclass for all arrays. +// arrayOopDesc is the abstract baseclass for all arrays. It doesn't +// declare pure virtual to enforce this because that would allocate a vtbl +// in each instance, which we don't want. + +// The layout of array Oops is: +// +// markOop +// klassOop // 32 bits if compressed but declared 64 in LP64. +// length // shares klass memory or allocated after declared fields. + class arrayOopDesc : public oopDesc { friend class VMStructs; - private: - int _length; // number of elements in the array - public: // Interpreter/Compiler offsets - static int length_offset_in_bytes() { return offset_of(arrayOopDesc, _length); } - static int base_offset_in_bytes(BasicType type) { return header_size(type) * HeapWordSize; } + + // Header size computation. + // The header is considered the oop part of this type plus the length. + // Returns the aligned header_size_in_bytes. This is not equivalent to + // sizeof(arrayOopDesc) which should not appear in the code. + static int header_size_in_bytes() { + size_t hs = align_size_up(length_offset_in_bytes() + sizeof(int), + HeapWordSize); +#ifdef ASSERT + // make sure it isn't called before UseCompressedOops is initialized. + static size_t arrayoopdesc_hs = 0; + if (arrayoopdesc_hs == 0) arrayoopdesc_hs = hs; + assert(arrayoopdesc_hs == hs, "header size can't change"); +#endif // ASSERT + return (int)hs; + } + + public: + // The _length field is not declared in C++. It is allocated after the + // declared nonstatic fields in arrayOopDesc if not compressed, otherwise + // it occupies the second half of the _klass field in oopDesc. + static int length_offset_in_bytes() { + return UseCompressedOops ? klass_gap_offset_in_bytes() : + sizeof(arrayOopDesc); + } + + // Returns the offset of the first element. + static int base_offset_in_bytes(BasicType type) { + return header_size(type) * HeapWordSize; + } // Returns the address of the first element. - void* base(BasicType type) const { return (void*) (((intptr_t) this) + base_offset_in_bytes(type)); } + void* base(BasicType type) const { + return (void*) (((intptr_t) this) + base_offset_in_bytes(type)); + } // Tells whether index is within bounds. bool is_within_bounds(int index) const { return 0 <= index && index < length(); } - // Accessores for instance variable - int length() const { return _length; } - void set_length(int length) { _length = length; } + // Accessors for instance variable which is not a C++ declared nonstatic + // field. + int length() const { + return *(int*)(((intptr_t)this) + length_offset_in_bytes()); + } + void set_length(int length) { + *(int*)(((intptr_t)this) + length_offset_in_bytes()) = length; + } - // Header size computation. - // Should only be called with constants as argument (will not constant fold otherwise) + // Should only be called with constants as argument + // (will not constant fold otherwise) + // Returns the header size in words aligned to the requirements of the + // array object type. static int header_size(BasicType type) { - return Universe::element_type_should_be_aligned(type) - ? align_object_size(sizeof(arrayOopDesc)/HeapWordSize) - : sizeof(arrayOopDesc)/HeapWordSize; + size_t typesize_in_bytes = header_size_in_bytes(); + return (int)(Universe::element_type_should_be_aligned(type) + ? align_object_size(typesize_in_bytes/HeapWordSize) + : typesize_in_bytes/HeapWordSize); } - // This method returns the maximum length that can passed into - // typeArrayOop::object_size(scale, length, header_size) without causing an - // overflow. We substract an extra 2*wordSize to guard against double word - // alignments. It gets the scale from the type2aelembytes array. - static int32_t max_array_length(BasicType type) { + // Return the maximum length of an array of BasicType. The length can passed + // to typeArrayOop::object_size(scale, length, header_size) without causing an + // overflow. + static int32_t max_array_length(BasicType type) { assert(type >= 0 && type < T_CONFLICT, "wrong type"); - assert(type2aelembytes[type] != 0, "wrong type"); - // We use max_jint, since object_size is internally represented by an 'int' - // This gives us an upper bound of max_jint words for the size of the oop. - int32_t max_words = (max_jint - header_size(type) - 2); - int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes[type]; - jlong len = ((jlong)max_words * HeapWordSize) / elembytes; - return (len > max_jint) ? max_jint : (int32_t)len; + assert(type2aelembytes(type) != 0, "wrong type"); + const int bytes_per_element = type2aelembytes(type); + if (bytes_per_element < HeapWordSize) { + return max_jint; + } + + const int32_t max_words = align_size_down(max_jint, MinObjAlignment); + const int32_t max_element_words = max_words - header_size(type); + const int32_t words_per_element = bytes_per_element >> LogHeapWordSize; + return max_element_words / words_per_element; } - }; --- old/hotspot/src/share/vm/oops/constantPoolKlass.cpp 2009-08-01 04:12:41.005396306 +0100 +++ new/hotspot/src/share/vm/oops/constantPoolKlass.cpp 2009-08-01 04:12:40.932200766 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)constantPoolKlass.cpp 1.105 07/05/29 09:44:18 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,12 +31,14 @@ constantPoolOop constantPoolKlass::allocate(int length, TRAPS) { int size = constantPoolOopDesc::object_size(length); KlassHandle klass (THREAD, as_klassOop()); - constantPoolOop c = - (constantPoolOop)CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL); + constantPoolOop c = + (constantPoolOop)CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL); + c->set_length(length); c->set_tags(NULL); c->set_cache(NULL); c->set_pool_holder(NULL); + c->set_flags(0); // only set to non-zero if constant pool is merged by RedefineClasses c->set_orig_length(0); // all fields are initialized; needed for GC @@ -57,15 +59,15 @@ klassOop constantPoolKlass::create_klass(TRAPS) { constantPoolKlass o; - KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj()); - arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL); - arrayKlassHandle super (THREAD, k->super()); - complete_create_array_klass(k, super, CHECK_NULL); + KlassHandle h_this_klass(THREAD, Universe::klassKlassObj()); + KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL); + // Make sure size calculation is right + assert(k()->size() == align_object_size(header_size()), "wrong size for object"); + java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror return k(); } - -int constantPoolKlass::oop_size(oop obj) const { +int constantPoolKlass::oop_size(oop obj) const { assert(obj->is_constantPool(), "must be constantPool"); return constantPoolOop(obj)->object_size(); } @@ -263,10 +265,32 @@ void constantPoolKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPool(), "should be constant pool"); + constantPoolOop cp = (constantPoolOop) obj; + if (AnonymousClasses && cp->has_pseudo_string() && cp->tags() != NULL) { + oop* base = (oop*)cp->base(); + for (int i = 0; i < cp->length(); ++i, ++base) { + if (cp->tag_at(i).is_string()) { + if (PSScavenge::should_scavenge(base)) { + pm->claim_or_forward_breadth(base); + } + } + } + } } void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPool(), "should be constant pool"); + constantPoolOop cp = (constantPoolOop) obj; + if (AnonymousClasses && cp->has_pseudo_string() && cp->tags() != NULL) { + oop* base = (oop*)cp->base(); + for (int i = 0; i < cp->length(); ++i, ++base) { + if (cp->tag_at(i).is_string()) { + if (PSScavenge::should_scavenge(base)) { + pm->claim_or_forward_depth(base); + } + } + } + } } #endif // SERIALGC @@ -278,8 +302,13 @@ EXCEPTION_MARK; oop anObj; assert(obj->is_constantPool(), "must be constantPool"); - arrayKlass::oop_print_on(obj, st); - constantPoolOop cp = constantPoolOop(obj); + Klass::oop_print_on(obj, st); + constantPoolOop cp = constantPoolOop(obj); + if (cp->flags() != 0) { + st->print(" - flags : 0x%x", cp->flags()); + if (cp->has_pseudo_string()) st->print(" has_pseudo_string"); + st->cr(); + } // Temp. remove cache so we can do lookups with original indicies. constantPoolCacheHandle cache (THREAD, cp->cache()); @@ -304,7 +333,11 @@ break; case JVM_CONSTANT_UnresolvedString : case JVM_CONSTANT_String : - anObj = cp->string_at(index, CATCH); + if (cp->is_pseudo_string_at(index)) { + anObj = cp->pseudo_string_at(index); + } else { + anObj = cp->string_at(index, CATCH); + } anObj->print_value_on(st); st->print(" {0x%lx}", (address)anObj); break; @@ -384,8 +417,12 @@ "should be symbol or instance"); } if (cp->tag_at(i).is_string()) { - guarantee((*base)->is_perm(), "should be in permspace"); - guarantee((*base)->is_instance(), "should be instance"); + if (!cp->has_pseudo_string()) { + guarantee((*base)->is_perm(), "should be in permspace"); + guarantee((*base)->is_instance(), "should be instance"); + } else { + // can be non-perm, can be non-instance (array) + } } base++; } --- old/hotspot/src/share/vm/oops/constantPoolKlass.hpp 2009-08-01 04:12:42.203172418 +0100 +++ new/hotspot/src/share/vm/oops/constantPoolKlass.hpp 2009-08-01 04:12:42.130612901 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)constantPoolKlass.hpp 1.51 07/05/29 09:44:18 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,8 @@ // A constantPoolKlass is the klass of a constantPoolOop -class constantPoolKlass : public arrayKlass { +class constantPoolKlass : public Klass { + juint _alloc_size; // allocation profiling support public: // Dispatched klass operations bool oop_is_constantPool() const { return true; } @@ -47,7 +48,7 @@ // Sizing static int header_size() { return oopDesc::header_size() + sizeof(constantPoolKlass)/HeapWordSize; } - int object_size() const { return arrayKlass::object_size(header_size()); } + int object_size() const { return align_object_size(header_size()); } // Garbage collection void oop_follow_contents(oop obj); @@ -60,6 +61,11 @@ int oop_oop_iterate(oop obj, OopClosure* blk); int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr); + // Allocation profiling support + // no idea why this is pure virtual and not in Klass ??? + juint alloc_size() const { return _alloc_size; } + void set_alloc_size(juint n) { _alloc_size = n; } + #ifndef PRODUCT public: // Printing --- old/hotspot/src/share/vm/oops/constantPoolOop.cpp 2009-08-01 04:12:43.071616845 +0100 +++ new/hotspot/src/share/vm/oops/constantPoolOop.cpp 2009-08-01 04:12:42.997999397 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)constantPoolOop.cpp 1.104 07/05/05 17:06:01 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,8 +28,20 @@ # include "incls/_precompiled.incl" # include "incls/_constantPoolOop.cpp.incl" -klassOop constantPoolOopDesc::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) { - // A resolved constantPool entry will contain a klassOop, otherwise a symbolOop. +void constantPoolOopDesc::set_flag_at(FlagBit fb) { + const int MAX_STATE_CHANGES = 2; + for (int i = MAX_STATE_CHANGES + 10; i > 0; i--) { + int oflags = _flags; + int nflags = oflags | (1 << (int)fb); + if (Atomic::cmpxchg(nflags, &_flags, oflags) == oflags) + return; + } + assert(false, "failed to cmpxchg flags"); + _flags |= (1 << (int)fb); // better than nothing +} + +klassOop constantPoolOopDesc::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) { + // A resolved constantPool entry will contain a klassOop, otherwise a symbolOop. // It is not safe to rely on the tag bit's here, since we don't have a lock, and the entry and // tag is not updated atomicly. oop entry = *(this_oop->obj_at_addr(which)); @@ -336,8 +348,10 @@ oop entry = *(obj_at_addr(which)); if (entry->is_symbol()) { return ((symbolOop)entry)->as_C_string(); - } else { + } else if (java_lang_String::is_instance(entry)) { return java_lang_String::as_utf8_string(entry); + } else { + return (char*)""; } } @@ -388,7 +402,20 @@ } -bool constantPoolOopDesc::klass_name_at_matches(instanceKlassHandle k, +bool constantPoolOopDesc::is_pseudo_string_at(int which) { + oop entry = *(obj_at_addr(which)); + if (entry->is_symbol()) + // Not yet resolved, but it will resolve to a string. + return false; + else if (java_lang_String::is_instance(entry)) + return false; // actually, it might be a non-interned or non-perm string + else + // truly pseudo + return true; +} + + +bool constantPoolOopDesc::klass_name_at_matches(instanceKlassHandle k, int which) { // Names are interned, so we can compare symbolOops directly symbolOop cp_name = klass_name_at(which); --- old/hotspot/src/share/vm/oops/constantPoolOop.hpp 2009-08-01 04:12:43.954249375 +0100 +++ new/hotspot/src/share/vm/oops/constantPoolOop.hpp 2009-08-01 04:12:43.874836308 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)constantPoolOop.hpp 1.105 07/08/29 13:42:26 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,13 +37,15 @@ class SymbolHashMap; -class constantPoolOopDesc : public arrayOopDesc { +class constantPoolOopDesc : public oopDesc { friend class VMStructs; friend class BytecodeInterpreter; // Directly extracts an oop in the pool for fast instanceof/checkcast private: typeArrayOop _tags; // the tag array describing the constant pool's contents constantPoolCacheOop _cache; // the cache holding interpreter runtime information klassOop _pool_holder; // the corresponding class + int _flags; // a few header bits to describe contents for GC + int _length; // number of elements in the array // only set to non-zero if constant pool is merged by RedefineClasses int _orig_length; @@ -51,6 +53,16 @@ void tag_at_put(int which, jbyte t) { tags()->byte_at_put(which, t); } void release_tag_at_put(int which, jbyte t) { tags()->release_byte_at_put(which, t); } + enum FlagBit { + FB_has_pseudo_string = 2 + }; + + int flags() const { return _flags; } + void set_flags(int f) { _flags = f; } + bool flag_at(FlagBit fb) const { return (_flags & (1 << (int)fb)) != 0; } + void set_flag_at(FlagBit fb); + // no clear_flag_at function; they only increase + private: intptr_t* base() const { return (intptr_t*) (((char*) this) + sizeof(constantPoolOopDesc)); } oop* tags_addr() { return (oop*)&_tags; } @@ -84,6 +96,9 @@ public: typeArrayOop tags() const { return _tags; } + bool has_pseudo_string() const { return flag_at(FB_has_pseudo_string); } + void set_pseudo_string() { set_flag_at(FB_has_pseudo_string); } + // Klass holding pool klassOop pool_holder() const { return _pool_holder; } void set_pool_holder(klassOop k) { oop_store_without_check((oop*)&_pool_holder, (oop) k); } @@ -274,6 +289,27 @@ return string_at_impl(h_this, which, CHECK_NULL); } + // A "pseudo-string" is an non-string oop that has found is way into + // a String entry. + // Under AnonymousClasses this can happen if the user patches a live + // object into a CONSTANT_String entry of an anonymous class. + // Method oops internally created for method handles may also + // use pseudo-strings to link themselves to related metaobjects. + + bool is_pseudo_string_at(int which); + + oop pseudo_string_at(int which) { + assert(tag_at(which).is_string(), "Corrupted constant pool"); + return *obj_at_addr(which); + } + + void pseudo_string_at_put(int which, oop x) { + assert(AnonymousClasses, ""); + set_pseudo_string(); // mark header + assert(tag_at(which).is_string() || tag_at(which).is_unresolved_string(), "Corrupted constant pool"); + string_at_put(which, x); // this works just fine + } + // only called when we are sure a string entry is already resolved (via an // earlier string_at call. oop resolved_string_at(int which) { @@ -295,6 +331,7 @@ // UTF8 char* representation was chosen to avoid conversion of // java_lang_Strings at resolved entries into symbolOops // or vice versa. + // Caller is responsible for checking for pseudo-strings. char* string_at_noresolve(int which); jint name_and_type_at(int which) { @@ -333,6 +370,14 @@ bool klass_name_at_matches(instanceKlassHandle k, int which); // Sizing + int length() const { return _length; } + void set_length(int length) { _length = length; } + + // Tells whether index is within bounds. + bool is_within_bounds(int index) const { + return 0 <= index && index < length(); + } + static int header_size() { return sizeof(constantPoolOopDesc)/HeapWordSize; } static int object_size(int length) { return align_object_size(header_size() + length); } int object_size() { return object_size(length()); } --- old/hotspot/src/share/vm/oops/cpCacheKlass.cpp 2009-08-01 04:12:44.849794670 +0100 +++ new/hotspot/src/share/vm/oops/cpCacheKlass.cpp 2009-08-01 04:12:44.772699581 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cpCacheKlass.cpp 1.46 07/05/29 09:44:18 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,18 +40,19 @@ int size = constantPoolCacheOopDesc::object_size(length); KlassHandle klass (THREAD, as_klassOop()); constantPoolCacheOop cache = (constantPoolCacheOop) - CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL); + CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL); + cache->set_length(length); cache->set_constant_pool(NULL); return cache; } - klassOop constantPoolCacheKlass::create_klass(TRAPS) { constantPoolCacheKlass o; - KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj()); - arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL); - KlassHandle super (THREAD, k->super()); - complete_create_array_klass(k, super, CHECK_NULL); + KlassHandle h_this_klass(THREAD, Universe::klassKlassObj()); + KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL); + // Make sure size calculation is right + assert(k()->size() == align_object_size(header_size()), "wrong size for object"); + java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror return k(); } @@ -186,7 +187,7 @@ assert(obj->is_constantPoolCache(), "obj must be constant pool cache"); constantPoolCacheOop cache = (constantPoolCacheOop)obj; // super print - arrayKlass::oop_print_on(obj, st); + Klass::oop_print_on(obj, st); // print constant pool cache entries for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->print(st, i); } @@ -197,7 +198,7 @@ guarantee(obj->is_constantPoolCache(), "obj must be constant pool cache"); constantPoolCacheOop cache = (constantPoolCacheOop)obj; // super verify - arrayKlass::oop_verify_on(obj, st); + Klass::oop_verify_on(obj, st); // print constant pool cache entries for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->verify(st); } --- old/hotspot/src/share/vm/oops/cpCacheKlass.hpp 2009-08-01 04:12:45.675045216 +0100 +++ new/hotspot/src/share/vm/oops/cpCacheKlass.hpp 2009-08-01 04:12:45.601019536 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cpCacheKlass.hpp 1.33 07/05/29 09:44:19 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,8 @@ * */ -class constantPoolCacheKlass: public arrayKlass { +class constantPoolCacheKlass: public Klass { + juint _alloc_size; // allocation profiling support public: // Dispatched klass operations bool oop_is_constantPoolCache() const { return true; } @@ -44,8 +45,8 @@ } // Sizing - static int header_size() { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; } - int object_size() const { return arrayKlass::object_size(header_size()); } + static int header_size() { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; } + int object_size() const { return align_object_size(header_size()); } // Garbage collection void oop_follow_contents(oop obj); @@ -58,6 +59,10 @@ int oop_oop_iterate(oop obj, OopClosure* blk); int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr); + // Allocation profiling support + juint alloc_size() const { return _alloc_size; } + void set_alloc_size(juint n) { _alloc_size = n; } + #ifndef PRODUCT public: // Printing --- old/hotspot/src/share/vm/oops/cpCacheOop.cpp 2009-08-01 04:12:46.534176007 +0100 +++ new/hotspot/src/share/vm/oops/cpCacheOop.cpp 2009-08-01 04:12:46.445997715 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cpCacheOop.cpp 1.79 07/05/29 09:44:19 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -221,6 +221,7 @@ public: LocalOopClosure(void f(oop*)) { _f = f; } virtual void do_oop(oop* o) { _f(o); } + virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); } }; --- old/hotspot/src/share/vm/oops/cpCacheOop.hpp 2009-08-01 04:12:47.401994503 +0100 +++ new/hotspot/src/share/vm/oops/cpCacheOop.hpp 2009-08-01 04:12:47.320050726 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cpCacheOop.hpp 1.74 07/05/29 09:44:19 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -289,12 +289,17 @@ // is created and initialized before a class is actively used (i.e., initialized), the indivi- // dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*). -class constantPoolCacheOopDesc: public arrayOopDesc { +class constantPoolCacheOopDesc: public oopDesc { friend class VMStructs; private: + int _length; constantPoolOop _constant_pool; // the corresponding constant pool // Sizing + debug_only(friend class ClassVerifier;) + int length() const { return _length; } + void set_length(int length) { _length = length; } + static int header_size() { return sizeof(constantPoolCacheOopDesc) / HeapWordSize; } static int object_size(int length) { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); } int object_size() { return object_size(length()); } --- old/hotspot/src/share/vm/oops/generateOopMap.cpp 2009-08-01 04:12:48.256888127 +0100 +++ new/hotspot/src/share/vm/oops/generateOopMap.cpp 2009-08-01 04:12:48.154559913 +0100 @@ -372,22 +372,9 @@ void GenerateOopMap ::initialize_bb() { _gc_points = 0; - _bb_count = 0; - int size = binsToHold(method()->code_size()); - _bb_hdr_bits = NEW_RESOURCE_ARRAY(uintptr_t,size); - memset(_bb_hdr_bits, 0, size*sizeof(uintptr_t)); -} - -void GenerateOopMap ::set_bbmark_bit(int bci) { - int idx = bci >> LogBitsPerWord; - uintptr_t bit = (uintptr_t)1 << (bci & (BitsPerWord-1)); - _bb_hdr_bits[idx] |= bit; -} - -void GenerateOopMap ::clear_bbmark_bit(int bci) { - int idx = bci >> LogBitsPerWord; - uintptr_t bit = (uintptr_t)1 << (bci & (BitsPerWord-1)); - _bb_hdr_bits[idx] &= (~bit); + _bb_count = 0; + _bb_hdr_bits.clear(); + _bb_hdr_bits.resize(method()->code_size()); } void GenerateOopMap::bb_mark_fct(GenerateOopMap *c, int bci, int *data) { @@ -955,6 +942,17 @@ _basic_blocks[bbNo-1]._end_bci = prev_bci; + // Check that the correct number of basicblocks was found + if (bbNo !=_bb_count) { + if (bbNo < _bb_count) { + verify_error("jump into the middle of instruction?"); + return; + } else { + verify_error("extra basic blocks - should not happen?"); + return; + } + } + _max_monitors = monitor_count; // Now that we have a bound on the depth of the monitor stack, we can @@ -988,17 +986,6 @@ } #endif - // Check that the correct number of basicblocks was found - if (bbNo !=_bb_count) { - if (bbNo < _bb_count) { - verify_error("jump into the middle of instruction?"); - return; - } else { - verify_error("extra basic blocks - should not happen?"); - return; - } - } - // Mark all alive blocks mark_reachable_code(); } @@ -1025,21 +1012,22 @@ int new_method_size) { assert(new_method_size >= method()->code_size() + delta, "new method size is too small"); - int newWords = binsToHold(new_method_size); - uintptr_t * new_bb_hdr_bits = NEW_RESOURCE_ARRAY(uintptr_t, newWords); + BitMap::bm_word_t* new_bb_hdr_bits = + NEW_RESOURCE_ARRAY(BitMap::bm_word_t, + BitMap::word_align_up(new_method_size)); + _bb_hdr_bits.set_map(new_bb_hdr_bits); + _bb_hdr_bits.set_size(new_method_size); + _bb_hdr_bits.clear(); - BitMap bb_bits(new_bb_hdr_bits, new_method_size); - bb_bits.clear(); - - for(int k = 0; k < _bb_count; k++) { - if (_basic_blocks[k]._bci > bci) { - _basic_blocks[k]._bci += delta; - _basic_blocks[k]._end_bci += delta; - } - bb_bits.at_put(_basic_blocks[k]._bci, true); - } - _bb_hdr_bits = new_bb_hdr_bits ; + + for(int k = 0; k < _bb_count; k++) { + if (_basic_blocks[k]._bci > bci) { + _basic_blocks[k]._bci += delta; + _basic_blocks[k]._end_bci += delta; + } + _bb_hdr_bits.at_put(_basic_blocks[k]._bci, true); + } } // --- old/hotspot/src/share/vm/oops/generateOopMap.hpp 2009-08-01 04:12:49.197554479 +0100 +++ new/hotspot/src/share/vm/oops/generateOopMap.hpp 2009-08-01 04:12:49.128006525 +0100 @@ -344,16 +344,22 @@ BasicBlock * _basic_blocks; // Array of basicblock info int _gc_points; int _bb_count; - uintptr_t * _bb_hdr_bits; + BitMap _bb_hdr_bits; // Basicblocks methods void initialize_bb (); void mark_bbheaders_and_count_gc_points(); - bool is_bb_header (int bci) const { return (_bb_hdr_bits[bci >> LogBitsPerWord] & ((uintptr_t)1 << (bci & (BitsPerWord-1)))) != 0; } + bool is_bb_header (int bci) const { + return _bb_hdr_bits.at(bci); + } int gc_points () const { return _gc_points; } int bb_count () const { return _bb_count; } - void set_bbmark_bit (int bci); - void clear_bbmark_bit (int bci); + void set_bbmark_bit (int bci) { + _bb_hdr_bits.at_put(bci, true); + } + void clear_bbmark_bit (int bci) { + _bb_hdr_bits.at_put(bci, false); + } BasicBlock * get_basic_block_at (int bci) const; BasicBlock * get_basic_block_containing (int bci) const; void interp_bb (BasicBlock *bb); --- old/hotspot/src/share/vm/oops/instanceKlass.cpp 2009-08-01 04:12:50.033733228 +0100 +++ new/hotspot/src/share/vm/oops/instanceKlass.cpp 2009-08-01 04:12:49.948043810 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)instanceKlass.cpp 1.324 08/11/24 12:22:48 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -794,17 +794,39 @@ } +static int compare_fields_by_offset(int* a, int* b) { + return a[0] - b[0]; +} + void instanceKlass::do_nonstatic_fields(FieldClosure* cl) { - fieldDescriptor fd; instanceKlass* super = superklass(); if (super != NULL) { super->do_nonstatic_fields(cl); } + fieldDescriptor fd; int length = fields()->length(); + // In DebugInfo nonstatic fields are sorted by offset. + int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1)); + int j = 0; for (int i = 0; i < length; i += next_offset) { fd.initialize(as_klassOop(), i); - if (!(fd.is_static())) cl->do_field(&fd); - } + if (!fd.is_static()) { + fields_sorted[j + 0] = fd.offset(); + fields_sorted[j + 1] = i; + j += 2; + } + } + if (j > 0) { + length = j; + // _sort_Fn is defined in growableArray.hpp. + qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset); + for (int i = 0; i < length; i += 2) { + fd.initialize(as_klassOop(), fields_sorted[i + 1]); + assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields"); + cl->do_field(&fd); + } + } + FREE_C_HEAP_ARRAY(int, fields_sorted); } @@ -953,7 +975,6 @@ // These allocations will have to be freed if they are unused. // Allocate a new array of methods. - jmethodID* to_dealloc_jmeths = NULL; jmethodID* new_jmeths = NULL; if (length <= idnum) { // A new array will be needed (unless some other thread beats us to it) @@ -964,7 +985,6 @@ } // Allocate a new method ID. - jmethodID to_dealloc_id = NULL; jmethodID new_id = NULL; if (method_h->is_old() && !method_h->is_obsolete()) { // The method passed in is old (but not obsolete), we need to use the current version @@ -978,40 +998,51 @@ new_id = JNIHandles::make_jmethod_id(method_h); } - { + if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) { + // No need and unsafe to lock the JmethodIdCreation_lock at safepoint. + id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths); + } else { MutexLocker ml(JmethodIdCreation_lock); + id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths); + } + } + return id; +} - // We must not go to a safepoint while holding this lock. - debug_only(No_Safepoint_Verifier nosafepoints;) - // Retry lookup after we got the lock - jmeths = ik_h->methods_jmethod_ids_acquire(); - if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) { - if (jmeths != NULL) { - // We have grown the array: copy the existing entries, and delete the old array - for (size_t index = 0; index < length; index++) { - new_jmeths[index+1] = jmeths[index+1]; - } - to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one - } - ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); - } else { - id = jmeths[idnum+1]; - to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one - } - if (id == NULL) { - id = new_id; - jmeths[idnum+1] = id; // install the new method ID - } else { - to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation +jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, size_t idnum, + jmethodID new_id, jmethodID* new_jmeths) { + // Retry lookup after we got the lock or ensured we are at safepoint + jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); + jmethodID id = NULL; + jmethodID to_dealloc_id = NULL; + jmethodID* to_dealloc_jmeths = NULL; + size_t length; + + if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) { + if (jmeths != NULL) { + // We have grown the array: copy the existing entries, and delete the old array + for (size_t index = 0; index < length; index++) { + new_jmeths[index+1] = jmeths[index+1]; } + to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one } + ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); + } else { + id = jmeths[idnum+1]; + to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one + } + if (id == NULL) { + id = new_id; + jmeths[idnum+1] = id; // install the new method ID + } else { + to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation + } - // Free up unneeded or no longer needed resources - FreeHeap(to_dealloc_jmeths); - if (to_dealloc_id != NULL) { - JNIHandles::destroy_jmethod_id(to_dealloc_id); - } + // Free up unneeded or no longer needed resources + FreeHeap(to_dealloc_jmeths); + if (to_dealloc_id != NULL) { + JNIHandles::destroy_jmethod_id(to_dealloc_id); } return id; } @@ -1227,275 +1258,350 @@ #endif //PRODUCT +#ifdef ASSERT +template void assert_is_in(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in(o), "should be in heap"); + } +} +template void assert_is_in_closed_subset(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); + } +} +template void assert_is_in_reserved(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); + } +} +template void assert_nothing(T *p) {} + +#else +template void assert_is_in(T *p) {} +template void assert_is_in_closed_subset(T *p) {} +template void assert_is_in_reserved(T *p) {} +template void assert_nothing(T *p) {} +#endif // ASSERT + +// +// Macros that iterate over areas of oops which are specialized on type of +// oop pointer either narrow or wide, depending on UseCompressedOops +// +// Parameters are: +// T - type of oop to point to (either oop or narrowOop) +// start_p - starting pointer for region to iterate over +// count - number of oops or narrowOops to iterate over +// do_oop - action to perform on each oop (it's arbitrary C code which +// makes it more efficient to put in a macro rather than making +// it a template function) +// assert_fn - assert function which is template function because performance +// doesn't matter when enabled. +#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ + T, start_p, count, do_oop, \ + assert_fn) \ +{ \ + T* p = (T*)(start_p); \ + T* const end = p + (count); \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + +#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ + T, start_p, count, do_oop, \ + assert_fn) \ +{ \ + T* const start = (T*)(start_p); \ + T* p = start + (count); \ + while (start < p) { \ + --p; \ + (assert_fn)(p); \ + do_oop; \ + } \ +} + +#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ + T, start_p, count, low, high, \ + do_oop, assert_fn) \ +{ \ + T* const l = (T*)(low); \ + T* const h = (T*)(high); \ + assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ + mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ + "bounded region must be properly aligned"); \ + T* p = (T*)(start_p); \ + T* end = p + (count); \ + if (p < l) p = l; \ + if (end > h) end = h; \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + + +// The following macros call specialized macros, passing either oop or +// narrowOop as the specialization type. These test the UseCompressedOops +// flag. +#define InstanceKlass_OOP_ITERATE(start_p, count, \ + do_oop, assert_fn) \ +{ \ + if (UseCompressedOops) { \ + InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ + start_p, count, \ + do_oop, assert_fn) \ + } else { \ + InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ + start_p, count, \ + do_oop, assert_fn) \ + } \ +} + +#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ + do_oop, assert_fn) \ +{ \ + if (UseCompressedOops) { \ + InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + start_p, count, \ + low, high, \ + do_oop, assert_fn) \ + } else { \ + InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + start_p, count, \ + low, high, \ + do_oop, assert_fn) \ + } \ +} + +#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ +{ \ + /* Compute oopmap block range. The common case \ + is nonstatic_oop_map_size == 1. */ \ + OopMapBlock* map = start_of_nonstatic_oop_maps(); \ + OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ + if (UseCompressedOops) { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ + obj->obj_field_addr(map->offset()), map->length(), \ + do_oop, assert_fn) \ + ++map; \ + } \ + } else { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ + obj->obj_field_addr(map->offset()), map->length(), \ + do_oop, assert_fn) \ + ++map; \ + } \ + } \ +} + +#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ +{ \ + OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ + OopMapBlock* map = start_map + nonstatic_oop_map_size(); \ + if (UseCompressedOops) { \ + while (start_map < map) { \ + --map; \ + InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ + obj->obj_field_addr(map->offset()), map->length(), \ + do_oop, assert_fn) \ + } \ + } else { \ + while (start_map < map) { \ + --map; \ + InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ + obj->obj_field_addr(map->offset()), map->length(), \ + do_oop, assert_fn) \ + } \ + } \ +} + +#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ + assert_fn) \ +{ \ + /* Compute oopmap block range. The common case is \ + nonstatic_oop_map_size == 1, so we accept the \ + usually non-existent extra overhead of examining \ + all the maps. */ \ + OopMapBlock* map = start_of_nonstatic_oop_maps(); \ + OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ + if (UseCompressedOops) { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + obj->obj_field_addr(map->offset()), map->length(), \ + low, high, \ + do_oop, assert_fn) \ + ++map; \ + } \ + } else { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + obj->obj_field_addr(map->offset()), map->length(), \ + low, high, \ + do_oop, assert_fn) \ + ++map; \ + } \ + } \ +} + void instanceKlass::follow_static_fields() { - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - while (start < end) { - if (*start != NULL) { - assert(Universe::heap()->is_in_closed_subset(*start), - "should be in heap"); - MarkSweep::mark_and_push(start); - } - start++; - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + MarkSweep::mark_and_push(p), \ + assert_is_in_closed_subset) } #ifndef SERIALGC void instanceKlass::follow_static_fields(ParCompactionManager* cm) { - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - while (start < end) { - if (*start != NULL) { - assert(Universe::heap()->is_in(*start), "should be in heap"); - PSParallelCompact::mark_and_push(cm, start); - } - start++; - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + PSParallelCompact::mark_and_push(cm, p), \ + assert_is_in) } #endif // SERIALGC - void instanceKlass::adjust_static_fields() { - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - while (start < end) { - MarkSweep::adjust_pointer(start); - start++; - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + MarkSweep::adjust_pointer(p), \ + assert_nothing) } #ifndef SERIALGC void instanceKlass::update_static_fields() { - oop* const start = start_of_static_fields(); - oop* const beg_oop = start; - oop* const end_oop = start + static_oop_field_size(); - for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { - PSParallelCompact::adjust_pointer(cur_oop); - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + PSParallelCompact::adjust_pointer(p), \ + assert_nothing) } -void -instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) { - oop* const start = start_of_static_fields(); - oop* const beg_oop = MAX2((oop*)beg_addr, start); - oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size()); - for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { - PSParallelCompact::adjust_pointer(cur_oop); - } +void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) { + InstanceKlass_BOUNDED_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + beg_addr, end_addr, \ + PSParallelCompact::adjust_pointer(p), \ + assert_nothing ) } #endif // SERIALGC void instanceKlass::oop_follow_contents(oop obj) { - assert (obj!=NULL, "can't follow the content of NULL object"); + assert(obj != NULL, "can't follow the content of NULL object"); obj->follow_header(); - OopMapBlock* map = start_of_nonstatic_oop_maps(); - OopMapBlock* end_map = map + nonstatic_oop_map_size(); - while (map < end_map) { - oop* start = obj->obj_field_addr(map->offset()); - oop* end = start + map->length(); - while (start < end) { - if (*start != NULL) { - assert(Universe::heap()->is_in_closed_subset(*start), - "should be in heap"); - MarkSweep::mark_and_push(start); - } - start++; - } - map++; - } + InstanceKlass_OOP_MAP_ITERATE( \ + obj, \ + MarkSweep::mark_and_push(p), \ + assert_is_in_closed_subset) } #ifndef SERIALGC void instanceKlass::oop_follow_contents(ParCompactionManager* cm, - oop obj) { - assert (obj!=NULL, "can't follow the content of NULL object"); + oop obj) { + assert(obj != NULL, "can't follow the content of NULL object"); obj->follow_header(cm); - OopMapBlock* map = start_of_nonstatic_oop_maps(); - OopMapBlock* end_map = map + nonstatic_oop_map_size(); - while (map < end_map) { - oop* start = obj->obj_field_addr(map->offset()); - oop* end = start + map->length(); - while (start < end) { - if (*start != NULL) { - assert(Universe::heap()->is_in(*start), "should be in heap"); - PSParallelCompact::mark_and_push(cm, start); - } - start++; - } - map++; - } + InstanceKlass_OOP_MAP_ITERATE( \ + obj, \ + PSParallelCompact::mark_and_push(cm, p), \ + assert_is_in) } #endif // SERIALGC -#define invoke_closure_on(start, closure, nv_suffix) { \ - oop obj = *(start); \ - if (obj != NULL) { \ - assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap"); \ - (closure)->do_oop##nv_suffix(start); \ - } \ -} - // closure's do_header() method dicates whether the given closure should be // applied to the klass ptr in the object header. -#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \ - OopClosureType* closure) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ - /* header */ \ - if (closure->do_header()) { \ - obj->oop_iterate_header(closure); \ - } \ - /* instance variables */ \ - OopMapBlock* map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ - const intx field_offset = PrefetchFieldsAhead; \ - if (field_offset > 0) { \ - while (map < end_map) { \ - oop* start = obj->obj_field_addr(map->offset()); \ - oop* const end = start + map->length(); \ - while (start < end) { \ - prefetch_beyond(start, (oop*)end, field_offset, \ - closure->prefetch_style()); \ - SpecializationStats:: \ - record_do_oop_call##nv_suffix(SpecializationStats::ik); \ - invoke_closure_on(start, closure, nv_suffix); \ - start++; \ - } \ - map++; \ - } \ - } else { \ - while (map < end_map) { \ - oop* start = obj->obj_field_addr(map->offset()); \ - oop* const end = start + map->length(); \ - while (start < end) { \ - SpecializationStats:: \ - record_do_oop_call##nv_suffix(SpecializationStats::ik); \ - invoke_closure_on(start, closure, nv_suffix); \ - start++; \ - } \ - map++; \ - } \ - } \ - return size_helper(); \ +#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ + /* header */ \ + if (closure->do_header()) { \ + obj->oop_iterate_header(closure); \ + } \ + InstanceKlass_OOP_MAP_ITERATE( \ + obj, \ + SpecializationStats:: \ + record_do_oop_call##nv_suffix(SpecializationStats::ik); \ + (closure)->do_oop##nv_suffix(p), \ + assert_is_in_closed_subset) \ + return size_helper(); \ } -#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ +#ifndef SERIALGC +#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ \ -int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ +int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ + OopClosureType* closure) { \ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ /* header */ \ if (closure->do_header()) { \ - obj->oop_iterate_header(closure, mr); \ + obj->oop_iterate_header(closure); \ } \ /* instance variables */ \ - OopMapBlock* map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ - HeapWord* bot = mr.start(); \ - HeapWord* top = mr.end(); \ - oop* start = obj->obj_field_addr(map->offset()); \ - HeapWord* end = MIN2((HeapWord*)(start + map->length()), top); \ - /* Find the first map entry that extends onto mr. */ \ - while (map < end_map && end <= bot) { \ - map++; \ - start = obj->obj_field_addr(map->offset()); \ - end = MIN2((HeapWord*)(start + map->length()), top); \ - } \ - if (map != end_map) { \ - /* The current map's end is past the start of "mr". Skip up to the first \ - entry on "mr". */ \ - while ((HeapWord*)start < bot) { \ - start++; \ - } \ - const intx field_offset = PrefetchFieldsAhead; \ - for (;;) { \ - if (field_offset > 0) { \ - while ((HeapWord*)start < end) { \ - prefetch_beyond(start, (oop*)end, field_offset, \ - closure->prefetch_style()); \ - invoke_closure_on(start, closure, nv_suffix); \ - start++; \ - } \ - } else { \ - while ((HeapWord*)start < end) { \ - invoke_closure_on(start, closure, nv_suffix); \ - start++; \ - } \ - } \ - /* Go to the next map. */ \ - map++; \ - if (map == end_map) { \ - break; \ - } \ - /* Otherwise, */ \ - start = obj->obj_field_addr(map->offset()); \ - if ((HeapWord*)start >= top) { \ - break; \ - } \ - end = MIN2((HeapWord*)(start + map->length()), top); \ - } \ - } \ - return size_helper(); \ + InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ + obj, \ + SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\ + (closure)->do_oop##nv_suffix(p), \ + assert_is_in_closed_subset) \ + return size_helper(); \ +} +#endif // !SERIALGC + +#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ + \ +int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ + OopClosureType* closure, \ + MemRegion mr) { \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ + if (closure->do_header()) { \ + obj->oop_iterate_header(closure, mr); \ + } \ + InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ + obj, mr.start(), mr.end(), \ + (closure)->do_oop##nv_suffix(p), \ + assert_is_in_closed_subset) \ + return size_helper(); \ } ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN) ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) - +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) +#ifndef SERIALGC +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) +#endif // !SERIALGC void instanceKlass::iterate_static_fields(OopClosure* closure) { - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - while (start < end) { - assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap"); - closure->do_oop(start); - start++; - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + closure->do_oop(p), \ + assert_is_in_reserved) } void instanceKlass::iterate_static_fields(OopClosure* closure, MemRegion mr) { - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - // I gather that the the static fields of reference types come first, - // hence the name of "oop_field_size", and that is what makes this safe. - assert((intptr_t)mr.start() == - align_size_up((intptr_t)mr.start(), sizeof(oop)) && - (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)), - "Memregion must be oop-aligned."); - if ((HeapWord*)start < mr.start()) start = (oop*)mr.start(); - if ((HeapWord*)end > mr.end()) end = (oop*)mr.end(); - while (start < end) { - invoke_closure_on(start, closure,_v); - start++; - } + InstanceKlass_BOUNDED_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + mr.start(), mr.end(), \ + (closure)->do_oop_v(p), \ + assert_is_in_closed_subset) } - int instanceKlass::oop_adjust_pointers(oop obj) { int size = size_helper(); - - // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. - OopMapBlock* map = start_of_nonstatic_oop_maps(); - OopMapBlock* const end_map = map + nonstatic_oop_map_size(); - // Iterate over oopmap blocks - while (map < end_map) { - // Compute oop range for this block - oop* start = obj->obj_field_addr(map->offset()); - oop* end = start + map->length(); - // Iterate over oops - while (start < end) { - assert(Universe::heap()->is_in_or_null(*start), "should be in heap"); - MarkSweep::adjust_pointer(start); - start++; - } - map++; - } - + InstanceKlass_OOP_MAP_ITERATE( \ + obj, \ + MarkSweep::adjust_pointer(p), \ + assert_is_in) obj->adjust_header(); return size; } @@ -1503,132 +1609,66 @@ #ifndef SERIALGC void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { assert(!pm->depth_first(), "invariant"); - // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. - OopMapBlock* start_map = start_of_nonstatic_oop_maps(); - OopMapBlock* map = start_map + nonstatic_oop_map_size(); - - // Iterate over oopmap blocks - while (start_map < map) { - --map; - // Compute oop range for this block - oop* start = obj->obj_field_addr(map->offset()); - oop* curr = start + map->length(); - // Iterate over oops - while (start < curr) { - --curr; - if (PSScavenge::should_scavenge(*curr)) { - assert(Universe::heap()->is_in(*curr), "should be in heap"); - pm->claim_or_forward_breadth(curr); - } - } - } + InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ + obj, \ + if (PSScavenge::should_scavenge(p)) { \ + pm->claim_or_forward_breadth(p); \ + }, \ + assert_nothing ) } void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(pm->depth_first(), "invariant"); - // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. - OopMapBlock* start_map = start_of_nonstatic_oop_maps(); - OopMapBlock* map = start_map + nonstatic_oop_map_size(); - - // Iterate over oopmap blocks - while (start_map < map) { - --map; - // Compute oop range for this block - oop* start = obj->obj_field_addr(map->offset()); - oop* curr = start + map->length(); - // Iterate over oops - while (start < curr) { - --curr; - if (PSScavenge::should_scavenge(*curr)) { - assert(Universe::heap()->is_in(*curr), "should be in heap"); - pm->claim_or_forward_depth(curr); - } - } - } + InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ + obj, \ + if (PSScavenge::should_scavenge(p)) { \ + pm->claim_or_forward_depth(p); \ + }, \ + assert_nothing ) } int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - // Compute oopmap block range. The common case is nonstatic_oop_map_size==1. - OopMapBlock* map = start_of_nonstatic_oop_maps(); - OopMapBlock* const end_map = map + nonstatic_oop_map_size(); - // Iterate over oopmap blocks - while (map < end_map) { - // Compute oop range for this oopmap block. - oop* const map_start = obj->obj_field_addr(map->offset()); - oop* const beg_oop = map_start; - oop* const end_oop = map_start + map->length(); - for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { - PSParallelCompact::adjust_pointer(cur_oop); - } - ++map; - } - + InstanceKlass_OOP_MAP_ITERATE( \ + obj, \ + PSParallelCompact::adjust_pointer(p), \ + assert_nothing) return size_helper(); } int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, - HeapWord* beg_addr, HeapWord* end_addr) { - // Compute oopmap block range. The common case is nonstatic_oop_map_size==1. - OopMapBlock* map = start_of_nonstatic_oop_maps(); - OopMapBlock* const end_map = map + nonstatic_oop_map_size(); - // Iterate over oopmap blocks - while (map < end_map) { - // Compute oop range for this oopmap block. - oop* const map_start = obj->obj_field_addr(map->offset()); - oop* const beg_oop = MAX2((oop*)beg_addr, map_start); - oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length()); - for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { - PSParallelCompact::adjust_pointer(cur_oop); - } - ++map; - } - + HeapWord* beg_addr, HeapWord* end_addr) { + InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ + obj, beg_addr, end_addr, \ + PSParallelCompact::adjust_pointer(p), \ + assert_nothing) return size_helper(); } void instanceKlass::copy_static_fields(PSPromotionManager* pm) { assert(!pm->depth_first(), "invariant"); - // Compute oop range - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - // Iterate over oops - while (start < end) { - if (PSScavenge::should_scavenge(*start)) { - assert(Universe::heap()->is_in(*start), "should be in heap"); - pm->claim_or_forward_breadth(start); - } - start++; - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + if (PSScavenge::should_scavenge(p)) { \ + pm->claim_or_forward_breadth(p); \ + }, \ + assert_nothing ) } void instanceKlass::push_static_fields(PSPromotionManager* pm) { assert(pm->depth_first(), "invariant"); - // Compute oop range - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - // Iterate over oops - while (start < end) { - if (PSScavenge::should_scavenge(*start)) { - assert(Universe::heap()->is_in(*start), "should be in heap"); - pm->claim_or_forward_depth(start); - } - start++; - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + if (PSScavenge::should_scavenge(p)) { \ + pm->claim_or_forward_depth(p); \ + }, \ + assert_nothing ) } void instanceKlass::copy_static_fields(ParCompactionManager* cm) { - // Compute oop range - oop* start = start_of_static_fields(); - oop* end = start + static_oop_field_size(); - // Iterate over oops - while (start < end) { - if (*start != NULL) { - assert(Universe::heap()->is_in(*start), "should be in heap"); - // *start = (oop) cm->summary_data()->calc_new_pointer(*start); - PSParallelCompact::adjust_pointer(start); - } - start++; - } + InstanceKlass_OOP_ITERATE( \ + start_of_static_fields(), static_oop_field_size(), \ + PSParallelCompact::adjust_pointer(p), \ + assert_is_in) } #endif // SERIALGC @@ -1659,18 +1699,15 @@ Klass::follow_weak_klass_links(is_alive, keep_alive); } - void instanceKlass::remove_unshareable_info() { Klass::remove_unshareable_info(); init_implementor(); } - static void clear_all_breakpoints(methodOop m) { m->clear_all_breakpoints(); } - void instanceKlass::release_C_heap_structures() { // Deallocate oop map cache if (_oop_map_cache != NULL) { @@ -2019,29 +2056,30 @@ obj->print_address_on(st); } -#endif +#endif // ndef PRODUCT const char* instanceKlass::internal_name() const { return external_name(); } - - // Verification class VerifyFieldClosure: public OopClosure { - public: - void do_oop(oop* p) { + protected: + template void do_oop_work(T* p) { guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap"); - if (!(*p)->is_oop_or_null()) { - tty->print_cr("Failed: %p -> %p",p,(address)*p); + oop obj = oopDesc::load_decode_heap_oop(p); + if (!obj->is_oop_or_null()) { + tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj); Universe::print(); guarantee(false, "boom"); } } + public: + virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } }; - void instanceKlass::oop_verify_on(oop obj, outputStream* st) { Klass::oop_verify_on(obj, st); VerifyFieldClosure blk; @@ -2082,27 +2120,29 @@ } } -#endif +#endif // ndef PRODUCT + +// JNIid class for jfieldIDs only +// Note to reviewers: +// These JNI functions are just moved over to column 1 and not changed +// in the compressed oops workspace. +JNIid::JNIid(klassOop holder, int offset, JNIid* next) { + _holder = holder; + _offset = offset; + _next = next; + debug_only(_is_static_field_id = false;) +} + + +JNIid* JNIid::find(int offset) { + JNIid* current = this; + while (current != NULL) { + if (current->offset() == offset) return current; + current = current->next(); + } + return NULL; +} - -/* JNIid class for jfieldIDs only */ - JNIid::JNIid(klassOop holder, int offset, JNIid* next) { - _holder = holder; - _offset = offset; - _next = next; - debug_only(_is_static_field_id = false;) - } - - - JNIid* JNIid::find(int offset) { - JNIid* current = this; - while (current != NULL) { - if (current->offset() == offset) return current; - current = current->next(); - } - return NULL; - } - void JNIid::oops_do(OopClosure* f) { for (JNIid* cur = this; cur != NULL; cur = cur->next()) { f->do_oop(cur->holder_addr()); @@ -2110,40 +2150,40 @@ } void JNIid::deallocate(JNIid* current) { - while (current != NULL) { - JNIid* next = current->next(); - delete current; - current = next; - } - } - - - void JNIid::verify(klassOop holder) { - int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields(); - int end_field_offset; - end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); - - JNIid* current = this; - while (current != NULL) { - guarantee(current->holder() == holder, "Invalid klass in JNIid"); - #ifdef ASSERT - int o = current->offset(); - if (current->is_static_field_id()) { - guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid"); - } - #endif - current = current->next(); - } - } + while (current != NULL) { + JNIid* next = current->next(); + delete current; + current = next; + } +} + +void JNIid::verify(klassOop holder) { + int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields(); + int end_field_offset; + end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); + JNIid* current = this; + while (current != NULL) { + guarantee(current->holder() == holder, "Invalid klass in JNIid"); #ifdef ASSERT - void instanceKlass::set_init_state(ClassState state) { - bool good_state = as_klassOop()->is_shared() ? (_init_state <= state) - : (_init_state < state); - assert(good_state || state == allocated, "illegal state transition"); - _init_state = state; + int o = current->offset(); + if (current->is_static_field_id()) { + guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid"); + } +#endif + current = current->next(); } +} + + +#ifdef ASSERT +void instanceKlass::set_init_state(ClassState state) { + bool good_state = as_klassOop()->is_shared() ? (_init_state <= state) + : (_init_state < state); + assert(good_state || state == allocated, "illegal state transition"); + _init_state = state; +} #endif @@ -2152,9 +2192,9 @@ // Add an information node that contains weak references to the // interesting parts of the previous version of the_class. void instanceKlass::add_previous_version(instanceKlassHandle ikh, - BitMap * emcp_methods, int emcp_method_count) { + BitMap* emcp_methods, int emcp_method_count) { assert(Thread::current()->is_VM_thread(), - "only VMThread can add previous versions"); + "only VMThread can add previous versions"); if (_previous_versions == NULL) { // This is the first previous version so make some space. --- old/hotspot/src/share/vm/oops/instanceKlass.hpp 2009-08-01 04:12:51.133787761 +0100 +++ new/hotspot/src/share/vm/oops/instanceKlass.hpp 2009-08-01 04:12:51.048387402 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)instanceKlass.hpp 1.201 08/11/24 12:22:50 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -150,6 +150,10 @@ oop _class_loader; // Protection domain. oop _protection_domain; + // Host class, which grants its access privileges to this class also. + // This is only non-null for an anonymous class (AnonymousClasses enabled). + // The host class is either named, or a previously loaded anonymous class. + klassOop _host_klass; // Class signers. objArrayOop _signers; // Name of source file containing this klass, NULL if not specified. @@ -183,12 +187,15 @@ // End of the oop block. // - int _nonstatic_field_size; // number of non-static fields in this klass (including inherited fields) - int _static_field_size; // number of static fields (oop and non-oop) in this klass + // Number of heapOopSize words used by non-static fields in this klass + // (including inherited fields but after header_size()). + int _nonstatic_field_size; + int _static_field_size; // number words used by static fields (oop and non-oop) in this klass int _static_oop_field_size;// number of static oop fields in this klass int _nonstatic_oop_map_size;// number of nonstatic oop-map blocks allocated at end of this klass bool _is_marked_dependent; // used for marking during flushing and deoptimization bool _rewritten; // methods rewritten. + bool _has_nonstatic_fields; // for sizing with UseCompressedOops u2 _minor_version; // minor version number of class file u2 _major_version; // major version number of class file ClassState _init_state; // state of class @@ -224,6 +231,9 @@ friend class SystemDictionary; public: + bool has_nonstatic_fields() const { return _has_nonstatic_fields; } + void set_has_nonstatic_fields(bool b) { _has_nonstatic_fields = b; } + // field sizes int nonstatic_field_size() const { return _nonstatic_field_size; } void set_nonstatic_field_size(int size) { _nonstatic_field_size = size; } @@ -342,9 +352,8 @@ klassOop find_field(symbolOop name, symbolOop sig, bool is_static, fieldDescriptor* fd) const; // find a non-static or static field given its offset within the class. - bool contains_field_offset(int offset) { - return ((offset/wordSize) >= instanceOopDesc::header_size() && - (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size()); + bool contains_field_offset(int offset) { + return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size()); } bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const; @@ -373,6 +382,11 @@ oop protection_domain() { return _protection_domain; } void set_protection_domain(oop pd) { oop_store((oop*) &_protection_domain, pd); } + // host class + oop host_klass() const { return _host_klass; } + void set_host_klass(oop host) { oop_store((oop*) &_host_klass, host); } + bool is_anonymous() const { return _host_klass != NULL; } + // signers objArrayOop signers() const { return _signers; } void set_signers(objArrayOop s) { oop_store((oop*) &_signers, oop(s)); } @@ -435,8 +449,10 @@ _enclosing_method_method_index = method_index; } // jmethodID support - static jmethodID jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h); - jmethodID jmethod_id_or_null(methodOop method); + static jmethodID get_jmethod_id(instanceKlassHandle ik_h, size_t idnum, + jmethodID new_id, jmethodID* new_jmeths); + static jmethodID jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h); + jmethodID jmethod_id_or_null(methodOop method); // cached itable index support void set_cached_itable_index(size_t idnum, int index); @@ -571,12 +587,21 @@ intptr_t* start_of_itable() const { return start_of_vtable() + align_object_offset(vtable_length()); } int itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); } - oop* start_of_static_fields() const { return (oop*)(start_of_itable() + align_object_offset(itable_length())); } + // Static field offset is an offset into the Heap, should be converted by + // based on UseCompressedOop for traversal + HeapWord* start_of_static_fields() const { + return (HeapWord*)(start_of_itable() + align_object_offset(itable_length())); + } + intptr_t* end_of_itable() const { return start_of_itable() + itable_length(); } - oop* end_of_static_fields() const { return start_of_static_fields() + static_field_size(); } - int offset_of_static_fields() const { return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop(); } - OopMapBlock* start_of_nonstatic_oop_maps() const { return (OopMapBlock*) (start_of_static_fields() + static_field_size()); } + int offset_of_static_fields() const { + return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop(); + } + + OopMapBlock* start_of_nonstatic_oop_maps() const { + return (OopMapBlock*) (start_of_static_fields() + static_field_size()); + } // Allocation profiling support juint alloc_size() const { return _alloc_count * size_helper(); } @@ -642,13 +667,21 @@ return oop_oop_iterate_v_m(obj, blk, mr); } -#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ +#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ + int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ MemRegion mr); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DECL) + +#ifndef SERIALGC +#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) +#endif // !SERIALGC void iterate_static_fields(OopClosure* closure); void iterate_static_fields(OopClosure* closure, MemRegion mr); @@ -688,6 +721,7 @@ oop* adr_constants() const { return (oop*)&this->_constants;} oop* adr_class_loader() const { return (oop*)&this->_class_loader;} oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;} + oop* adr_host_klass() const { return (oop*)&this->_host_klass;} oop* adr_signers() const { return (oop*)&this->_signers;} oop* adr_source_file_name() const { return (oop*)&this->_source_file_name;} oop* adr_source_debug_extension() const { return (oop*)&this->_source_debug_extension;} --- old/hotspot/src/share/vm/oops/instanceKlassKlass.cpp 2009-08-01 04:12:52.042920714 +0100 +++ new/hotspot/src/share/vm/oops/instanceKlassKlass.cpp 2009-08-01 04:12:51.957963437 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)instanceKlassKlass.cpp 1.157 07/07/27 16:19:58 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,6 +84,7 @@ MarkSweep::mark_and_push(ik->adr_source_debug_extension()); MarkSweep::mark_and_push(ik->adr_inner_classes()); MarkSweep::mark_and_push(ik->adr_protection_domain()); + MarkSweep::mark_and_push(ik->adr_host_klass()); MarkSweep::mark_and_push(ik->adr_signers()); MarkSweep::mark_and_push(ik->adr_generic_signature()); MarkSweep::mark_and_push(ik->adr_class_annotations()); @@ -123,6 +124,7 @@ PSParallelCompact::mark_and_push(cm, ik->adr_source_debug_extension()); PSParallelCompact::mark_and_push(cm, ik->adr_inner_classes()); PSParallelCompact::mark_and_push(cm, ik->adr_protection_domain()); + PSParallelCompact::mark_and_push(cm, ik->adr_host_klass()); PSParallelCompact::mark_and_push(cm, ik->adr_signers()); PSParallelCompact::mark_and_push(cm, ik->adr_generic_signature()); PSParallelCompact::mark_and_push(cm, ik->adr_class_annotations()); @@ -162,6 +164,7 @@ blk->do_oop(ik->adr_constants()); blk->do_oop(ik->adr_class_loader()); blk->do_oop(ik->adr_protection_domain()); + blk->do_oop(ik->adr_host_klass()); blk->do_oop(ik->adr_signers()); blk->do_oop(ik->adr_source_file_name()); blk->do_oop(ik->adr_source_debug_extension()); @@ -214,6 +217,8 @@ if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_protection_domain(); if (mr.contains(adr)) blk->do_oop(adr); + adr = ik->adr_host_klass(); + if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_signers(); if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_source_file_name(); @@ -263,6 +268,7 @@ MarkSweep::adjust_pointer(ik->adr_constants()); MarkSweep::adjust_pointer(ik->adr_class_loader()); MarkSweep::adjust_pointer(ik->adr_protection_domain()); + MarkSweep::adjust_pointer(ik->adr_host_klass()); MarkSweep::adjust_pointer(ik->adr_signers()); MarkSweep::adjust_pointer(ik->adr_source_file_name()); MarkSweep::adjust_pointer(ik->adr_source_debug_extension()); @@ -289,17 +295,22 @@ ik->copy_static_fields(pm); oop* loader_addr = ik->adr_class_loader(); - if (PSScavenge::should_scavenge(*loader_addr)) { + if (PSScavenge::should_scavenge(loader_addr)) { pm->claim_or_forward_breadth(loader_addr); } oop* pd_addr = ik->adr_protection_domain(); - if (PSScavenge::should_scavenge(*pd_addr)) { + if (PSScavenge::should_scavenge(pd_addr)) { pm->claim_or_forward_breadth(pd_addr); } + oop* hk_addr = ik->adr_host_klass(); + if (PSScavenge::should_scavenge(hk_addr)) { + pm->claim_or_forward_breadth(hk_addr); + } + oop* sg_addr = ik->adr_signers(); - if (PSScavenge::should_scavenge(*sg_addr)) { + if (PSScavenge::should_scavenge(sg_addr)) { pm->claim_or_forward_breadth(sg_addr); } @@ -312,17 +323,22 @@ ik->push_static_fields(pm); oop* loader_addr = ik->adr_class_loader(); - if (PSScavenge::should_scavenge(*loader_addr)) { + if (PSScavenge::should_scavenge(loader_addr)) { pm->claim_or_forward_depth(loader_addr); } oop* pd_addr = ik->adr_protection_domain(); - if (PSScavenge::should_scavenge(*pd_addr)) { + if (PSScavenge::should_scavenge(pd_addr)) { pm->claim_or_forward_depth(pd_addr); } + oop* hk_addr = ik->adr_host_klass(); + if (PSScavenge::should_scavenge(hk_addr)) { + pm->claim_or_forward_depth(hk_addr); + } + oop* sg_addr = ik->adr_signers(); - if (PSScavenge::should_scavenge(*sg_addr)) { + if (PSScavenge::should_scavenge(sg_addr)) { pm->claim_or_forward_depth(sg_addr); } @@ -424,6 +440,7 @@ ik->set_constants(NULL); ik->set_class_loader(NULL); ik->set_protection_domain(NULL); + ik->set_host_klass(NULL); ik->set_signers(NULL); ik->set_source_file_name(NULL); ik->set_source_debug_extension(NULL); @@ -529,6 +546,7 @@ st->print(" - constants: "); ik->constants()->print_value_on(st); st->cr(); st->print(" - class loader: "); ik->class_loader()->print_value_on(st); st->cr(); st->print(" - protection domain: "); ik->protection_domain()->print_value_on(st); st->cr(); + st->print(" - host class: "); ik->host_klass()->print_value_on(st); st->cr(); st->print(" - signers: "); ik->signers()->print_value_on(st); st->cr(); if (ik->source_file_name() != NULL) { st->print(" - source file: "); @@ -584,7 +602,7 @@ OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); OopMapBlock* end_map = map + ik->nonstatic_oop_map_size(); while (map < end_map) { - st->print("%d-%d ", map->offset(), map->offset() + oopSize*(map->length() - 1)); + st->print("%d-%d ", map->offset(), map->offset() + heapOopSize*(map->length() - 1)); map++; } st->cr(); @@ -605,16 +623,18 @@ // Verification - class VerifyFieldClosure: public OopClosure { - public: - void do_oop(oop* p) { + protected: + template void do_oop_work(T* p) { guarantee(Universe::heap()->is_in(p), "should be in heap"); - guarantee((*p)->is_oop_or_null(), "should be in heap"); + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(obj->is_oop_or_null(), "should be in heap"); } + public: + virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); } + virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } }; - void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) { klassKlass::oop_verify_on(obj, st); if (!obj->partially_loaded()) { @@ -627,7 +647,7 @@ ik->_verify_count = Universe::verify_count(); #endif // Verify that klass is present in SystemDictionary - if (ik->is_loaded()) { + if (ik->is_loaded() && !ik->is_anonymous()) { symbolHandle h_name (thread, ik->name()); Handle h_loader (thread, ik->class_loader()); Handle h_obj(thread, obj); @@ -765,6 +785,9 @@ if (ik->protection_domain() != NULL) { guarantee(ik->protection_domain()->is_oop(), "should be oop"); } + if (ik->host_klass() != NULL) { + guarantee(ik->host_klass()->is_oop(), "should be oop"); + } if (ik->signers() != NULL) { guarantee(ik->signers()->is_objArray(), "should be obj array"); } --- old/hotspot/src/share/vm/oops/instanceOop.hpp 2009-08-01 04:12:52.929860099 +0100 +++ new/hotspot/src/share/vm/oops/instanceOop.hpp 2009-08-01 04:12:52.861710297 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)instanceOop.hpp 1.15 07/05/05 17:06:04 JVM" #endif /* - * Copyright 1997-2000 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,21 @@ class instanceOopDesc : public oopDesc { public: + // aligned header size. static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; } + + // If compressed, the offset of the fields of the instance may not be aligned. + static int base_offset_in_bytes() { + return UseCompressedOops ? + klass_gap_offset_in_bytes() : + sizeof(instanceOopDesc); + } + + static bool contains_field_offset(int offset, int nonstatic_field_size) { + int base_in_bytes = base_offset_in_bytes(); + return (offset >= base_in_bytes && + (offset-base_in_bytes) < nonstatic_field_size * heapOopSize); + } }; --- old/hotspot/src/share/vm/oops/instanceRefKlass.cpp 2009-08-01 04:12:53.732822161 +0100 +++ new/hotspot/src/share/vm/oops/instanceRefKlass.cpp 2009-08-01 04:12:53.657107564 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)instanceRefKlass.cpp 1.90 07/05/29 09:44:20 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,23 +28,24 @@ # include "incls/_precompiled.incl" # include "incls/_instanceRefKlass.cpp.incl" -void instanceRefKlass::oop_follow_contents(oop obj) { - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); - oop referent = *referent_addr; +template +static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + oop referent = oopDesc::load_decode_heap_oop(referent_addr); debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj); + gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj); } ) if (referent != NULL) { if (!referent->is_gc_marked() && MarkSweep::ref_processor()-> - discover_reference(obj, reference_type())) { + discover_reference(obj, ref->reference_type())) { // reference already enqueued, referent will be traversed later - instanceKlass::oop_follow_contents(obj); - debug_only( + ref->instanceKlass::oop_follow_contents(obj); + debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (address)obj); + gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj); } ) return; @@ -52,42 +53,52 @@ // treat referent as normal oop debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (address)obj); + gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj); } ) MarkSweep::mark_and_push(referent_addr); } } // treat next as normal oop. next is a link in the pending list. - oop* next_addr = java_lang_ref_Reference::next_addr(obj); + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); debug_only( if(TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr); } ) MarkSweep::mark_and_push(next_addr); - instanceKlass::oop_follow_contents(obj); + ref->instanceKlass::oop_follow_contents(obj); +} + +void instanceRefKlass::oop_follow_contents(oop obj) { + if (UseCompressedOops) { + specialized_oop_follow_contents(this, obj); + } else { + specialized_oop_follow_contents(this, obj); + } } #ifndef SERIALGC -void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm, - oop obj) { - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); - oop referent = *referent_addr; +template +static void specialized_oop_follow_contents(instanceRefKlass* ref, + ParCompactionManager* cm, + oop obj) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + oop referent = oopDesc::load_decode_heap_oop(referent_addr); debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj); + gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj); } ) if (referent != NULL) { if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) && PSParallelCompact::ref_processor()-> - discover_reference(obj, reference_type())) { + discover_reference(obj, ref->reference_type())) { // reference already enqueued, referent will be traversed later - instanceKlass::oop_follow_contents(cm, obj); - debug_only( + ref->instanceKlass::oop_follow_contents(cm, obj); + debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (address)obj); + gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj); } ) return; @@ -95,70 +106,90 @@ // treat referent as normal oop debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (address)obj); + gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj); } ) PSParallelCompact::mark_and_push(cm, referent_addr); } } // treat next as normal oop. next is a link in the pending list. - oop* next_addr = java_lang_ref_Reference::next_addr(obj); + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); debug_only( if(TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr); } ) PSParallelCompact::mark_and_push(cm, next_addr); - instanceKlass::oop_follow_contents(cm, obj); + ref->instanceKlass::oop_follow_contents(cm, obj); } -#endif // SERIALGC - -int instanceRefKlass::oop_adjust_pointers(oop obj) { - int size = size_helper(); - instanceKlass::oop_adjust_pointers(obj); - - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); - MarkSweep::adjust_pointer(referent_addr); - oop* next_addr = java_lang_ref_Reference::next_addr(obj); - MarkSweep::adjust_pointer(next_addr); - oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj); - MarkSweep::adjust_pointer(discovered_addr); +void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm, + oop obj) { + if (UseCompressedOops) { + specialized_oop_follow_contents(this, cm, obj); + } else { + specialized_oop_follow_contents(this, cm, obj); + } +} +#endif // SERIALGC #ifdef ASSERT +template void trace_reference_gc(const char *s, oop obj, + T* referent_addr, + T* next_addr, + T* discovered_addr) { if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("instanceRefKlass::oop_adjust_pointers obj " - INTPTR_FORMAT, (address)obj); + gclog_or_tty->print_cr("%s obj " INTPTR_FORMAT, s, (address)obj); gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, referent_addr, - referent_addr ? (address)*referent_addr : NULL); + INTPTR_FORMAT, referent_addr, + referent_addr ? + (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL); gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, next_addr, - next_addr ? (address)*next_addr : NULL); + INTPTR_FORMAT, next_addr, + next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL); gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, discovered_addr, - discovered_addr ? (address)*discovered_addr : NULL); + INTPTR_FORMAT, discovered_addr, + discovered_addr ? + (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL); } +} #endif +template void specialized_oop_adjust_pointers(instanceRefKlass *ref, oop obj) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + MarkSweep::adjust_pointer(referent_addr); + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + MarkSweep::adjust_pointer(next_addr); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + MarkSweep::adjust_pointer(discovered_addr); + debug_only(trace_reference_gc("instanceRefKlass::oop_adjust_pointers", obj, + referent_addr, next_addr, discovered_addr);) +} + +int instanceRefKlass::oop_adjust_pointers(oop obj) { + int size = size_helper(); + instanceKlass::oop_adjust_pointers(obj); + + if (UseCompressedOops) { + specialized_oop_adjust_pointers(this, obj); + } else { + specialized_oop_adjust_pointers(this, obj); + } return size; } -#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int instanceRefKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ - \ - int size = instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ +#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \ + if (closure->apply_to_weak_ref_discovered_field()) { \ + T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \ + closure->do_oop##nv_suffix(disc_addr); \ + } \ \ - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); \ - oop referent = *referent_addr; \ - if (referent != NULL) { \ + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \ + oop referent = oopDesc::load_decode_heap_oop(referent_addr); \ + if (referent != NULL && contains(referent_addr)) { \ ReferenceProcessor* rp = closure->_ref_processor; \ if (!referent->is_gc_marked() && (rp != NULL) && \ - rp->discover_reference(obj, reference_type())) { \ + rp->discover_reference(obj, reference_type())) { \ return size; \ } else { \ /* treat referent as normal oop */ \ @@ -166,13 +197,54 @@ closure->do_oop##nv_suffix(referent_addr); \ } \ } \ - \ /* treat next as normal oop */ \ - oop* next_addr = java_lang_ref_Reference::next_addr(obj); \ - SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \ - closure->do_oop##nv_suffix(next_addr); \ + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \ + if (contains(next_addr)) { \ + SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \ + closure->do_oop##nv_suffix(next_addr); \ + } \ return size; \ + + +template bool contains(T *t) { return true; } + +// Macro to define instanceRefKlass::oop_oop_iterate for virtual/nonvirtual for +// all closures. Macros calling macros above for each oop size. + +#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int instanceRefKlass:: \ +oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + /* Get size before changing pointers */ \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ + \ + int size = instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ + \ + if (UseCompressedOops) { \ + InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \ + } else { \ + InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \ + } \ +} + +#ifndef SERIALGC +#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +int instanceRefKlass:: \ +oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + /* Get size before changing pointers */ \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ + \ + int size = instanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ + \ + if (UseCompressedOops) { \ + InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \ + } else { \ + InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \ + } \ } +#endif // !SERIALGC + #define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ \ @@ -183,45 +255,33 @@ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ \ int size = instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ - \ - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); \ - oop referent = *referent_addr; \ - if (referent != NULL && mr.contains(referent_addr)) { \ - ReferenceProcessor* rp = closure->_ref_processor; \ - if (!referent->is_gc_marked() && (rp != NULL) && \ - rp->discover_reference(obj, reference_type())) { \ - return size; \ - } else { \ - /* treat referent as normal oop */ \ - SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\ - closure->do_oop##nv_suffix(referent_addr); \ - } \ - } \ - \ - /* treat next as normal oop */ \ - oop* next_addr = java_lang_ref_Reference::next_addr(obj); \ - if (mr.contains(next_addr)) { \ - SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\ - closure->do_oop##nv_suffix(next_addr); \ + if (UseCompressedOops) { \ + InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \ + } else { \ + InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr.contains); \ } \ - return size; \ } ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceRefKlass_OOP_OOP_ITERATE_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN) +#ifndef SERIALGC +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) +#endif // SERIALGC ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m) - +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m) #ifndef SERIALGC -void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { +template +void specialized_oop_copy_contents(instanceRefKlass *ref, + PSPromotionManager* pm, oop obj) { assert(!pm->depth_first(), "invariant"); - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); - if (PSScavenge::should_scavenge(*referent_addr)) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + if (PSScavenge::should_scavenge(referent_addr)) { ReferenceProcessor* rp = PSScavenge::reference_processor(); - if (rp->discover_reference(obj, reference_type())) { + if (rp->discover_reference(obj, ref->reference_type())) { // reference already enqueued, referent and next will be traversed later - instanceKlass::oop_copy_contents(pm, obj); + ref->instanceKlass::oop_copy_contents(pm, obj); return; } else { // treat referent as normal oop @@ -229,21 +289,31 @@ } } // treat next as normal oop - oop* next_addr = java_lang_ref_Reference::next_addr(obj); - if (PSScavenge::should_scavenge(*next_addr)) { + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + if (PSScavenge::should_scavenge(next_addr)) { pm->claim_or_forward_breadth(next_addr); } - instanceKlass::oop_copy_contents(pm, obj); + ref->instanceKlass::oop_copy_contents(pm, obj); } -void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { +void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { + if (UseCompressedOops) { + specialized_oop_copy_contents(this, pm, obj); + } else { + specialized_oop_copy_contents(this, pm, obj); + } +} + +template +void specialized_oop_push_contents(instanceRefKlass *ref, + PSPromotionManager* pm, oop obj) { assert(pm->depth_first(), "invariant"); - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); - if (PSScavenge::should_scavenge(*referent_addr)) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + if (PSScavenge::should_scavenge(referent_addr)) { ReferenceProcessor* rp = PSScavenge::reference_processor(); - if (rp->discover_reference(obj, reference_type())) { + if (rp->discover_reference(obj, ref->reference_type())) { // reference already enqueued, referent and next will be traversed later - instanceKlass::oop_push_contents(pm, obj); + ref->instanceKlass::oop_push_contents(pm, obj); return; } else { // treat referent as normal oop @@ -251,71 +321,68 @@ } } // treat next as normal oop - oop* next_addr = java_lang_ref_Reference::next_addr(obj); - if (PSScavenge::should_scavenge(*next_addr)) { + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + if (PSScavenge::should_scavenge(next_addr)) { pm->claim_or_forward_depth(next_addr); } - instanceKlass::oop_push_contents(pm, obj); + ref->instanceKlass::oop_push_contents(pm, obj); } -int instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - instanceKlass::oop_update_pointers(cm, obj); - - oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); +void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { + if (UseCompressedOops) { + specialized_oop_push_contents(this, pm, obj); + } else { + specialized_oop_push_contents(this, pm, obj); + } +} + +template +void specialized_oop_update_pointers(instanceRefKlass *ref, + ParCompactionManager* cm, oop obj) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); PSParallelCompact::adjust_pointer(referent_addr); - oop* next_addr = java_lang_ref_Reference::next_addr(obj); + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); PSParallelCompact::adjust_pointer(next_addr); - oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); PSParallelCompact::adjust_pointer(discovered_addr); + debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj, + referent_addr, next_addr, discovered_addr);) +} -#ifdef ASSERT - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj " - INTPTR_FORMAT, (oopDesc*) obj); - gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, referent_addr, - referent_addr ? (oopDesc*) *referent_addr : NULL); - gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, next_addr, - next_addr ? (oopDesc*) *next_addr : NULL); - gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, discovered_addr, - discovered_addr ? (oopDesc*) *discovered_addr : NULL); +int instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { + instanceKlass::oop_update_pointers(cm, obj); + if (UseCompressedOops) { + specialized_oop_update_pointers(this, cm, obj); + } else { + specialized_oop_update_pointers(this, cm, obj); } -#endif - return size_helper(); } -int -instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, - HeapWord* beg_addr, HeapWord* end_addr) { - instanceKlass::oop_update_pointers(cm, obj, beg_addr, end_addr); - - oop* p; - oop* referent_addr = p = java_lang_ref_Reference::referent_addr(obj); + +template void +specialized_oop_update_pointers(ParCompactionManager* cm, oop obj, + HeapWord* beg_addr, HeapWord* end_addr) { + T* p; + T* referent_addr = p = (T*)java_lang_ref_Reference::referent_addr(obj); PSParallelCompact::adjust_pointer(p, beg_addr, end_addr); - oop* next_addr = p = java_lang_ref_Reference::next_addr(obj); + T* next_addr = p = (T*)java_lang_ref_Reference::next_addr(obj); PSParallelCompact::adjust_pointer(p, beg_addr, end_addr); - oop* discovered_addr = p = java_lang_ref_Reference::discovered_addr(obj); + T* discovered_addr = p = (T*)java_lang_ref_Reference::discovered_addr(obj); PSParallelCompact::adjust_pointer(p, beg_addr, end_addr); + debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj, + referent_addr, next_addr, discovered_addr);) +} -#ifdef ASSERT - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj " - INTPTR_FORMAT, (oopDesc*) obj); - gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, referent_addr, - referent_addr ? (oopDesc*) *referent_addr : NULL); - gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, next_addr, - next_addr ? (oopDesc*) *next_addr : NULL); - gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, discovered_addr, - discovered_addr ? (oopDesc*) *discovered_addr : NULL); +int +instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, + HeapWord* beg_addr, HeapWord* end_addr) { + instanceKlass::oop_update_pointers(cm, obj, beg_addr, end_addr); + if (UseCompressedOops) { + specialized_oop_update_pointers(cm, obj, beg_addr, end_addr); + } else { + specialized_oop_update_pointers(cm, obj, beg_addr, end_addr); } -#endif - return size_helper(); } #endif // SERIALGC @@ -340,8 +407,8 @@ // Check that the current map is (2,4) - currently points at field with // offset 2 (words) and has 4 map entries. debug_only(int offset = java_lang_ref_Reference::referent_offset); - debug_only(int length = ((java_lang_ref_Reference::discovered_offset - - java_lang_ref_Reference::referent_offset)/wordSize) + 1); + debug_only(int length = ((java_lang_ref_Reference::discovered_offset - + java_lang_ref_Reference::referent_offset)/heapOopSize) + 1); if (UseSharedSpaces) { assert(map->offset() == java_lang_ref_Reference::queue_offset && @@ -371,11 +438,18 @@ if (referent != NULL) { guarantee(referent->is_oop(), "referent field heap failed"); - if (gch != NULL && !gch->is_in_youngest(obj)) + if (gch != NULL && !gch->is_in_youngest(obj)) { // We do a specific remembered set check here since the referent // field is not part of the oop mask and therefore skipped by the // regular verify code. - obj->verify_old_oop(java_lang_ref_Reference::referent_addr(obj), true); + if (UseCompressedOops) { + narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj); + obj->verify_old_oop(referent_addr, true); + } else { + oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj); + obj->verify_old_oop(referent_addr, true); + } + } } // Verify next field oop next = java_lang_ref_Reference::next(obj); @@ -386,7 +460,13 @@ // We do a specific remembered set check here since the next field is // not part of the oop mask and therefore skipped by the regular // verify code. - obj->verify_old_oop(java_lang_ref_Reference::next_addr(obj), true); + if (UseCompressedOops) { + narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); + obj->verify_old_oop(next_addr, true); + } else { + oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); + obj->verify_old_oop(next_addr, true); + } } } } --- old/hotspot/src/share/vm/oops/instanceRefKlass.hpp 2009-08-01 04:12:54.673761361 +0100 +++ new/hotspot/src/share/vm/oops/instanceRefKlass.hpp 2009-08-01 04:12:54.594510222 +0100 @@ -75,7 +75,15 @@ int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceRefKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DECL) + +#ifndef SERIALGC +#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) +#endif // !SERIALGC static void release_and_notify_pending_list_lock(BasicLock *pending_list_basic_lock); static void acquire_pending_list_lock(BasicLock *pending_list_basic_lock); --- old/hotspot/src/share/vm/oops/klass.cpp 2009-08-01 04:12:55.576849693 +0100 +++ new/hotspot/src/share/vm/oops/klass.cpp 2009-08-01 04:12:55.496923748 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)klass.cpp 1.119 07/05/05 17:06:00 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -185,7 +185,7 @@ assert(etype >= T_BOOLEAN && etype <= T_OBJECT, "valid etype"); // Note that T_ARRAY is not allowed here. int hsize = arrayOopDesc::base_offset_in_bytes(etype); - int esize = type2aelembytes[etype]; + int esize = type2aelembytes(etype); bool isobj = (etype == T_OBJECT); int tag = isobj ? _lh_array_tag_obj_value : _lh_array_tag_type_value; int lh = array_layout_helper(tag, hsize, etype, exact_log2(esize)); @@ -481,6 +481,24 @@ const char* Klass::external_name() const { + if (oop_is_instance()) { + instanceKlass* ik = (instanceKlass*) this; + if (ik->is_anonymous()) { + assert(AnonymousClasses, ""); + intptr_t hash = ik->java_mirror()->identity_hash(); + char hash_buf[40]; + sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash); + size_t hash_len = strlen(hash_buf); + + size_t result_len = name()->utf8_length(); + char* result = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1); + name()->as_klass_external_name(result, (int) result_len + 1); + assert(strlen(result) == result_len, ""); + strcpy(result + result_len, hash_buf); + assert(strlen(result) == result_len + hash_len, ""); + return result; + } + } return name()->as_klass_external_name(); } @@ -545,11 +563,10 @@ void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) { /* $$$ I think this functionality should be handled by verification of - RememberedSet::verify_old_oop(obj, p, allow_dirty, false); - the card table. */ } +void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { } #ifndef PRODUCT --- old/hotspot/src/share/vm/oops/klass.hpp 2009-08-01 04:12:56.424721278 +0100 +++ new/hotspot/src/share/vm/oops/klass.hpp 2009-08-01 04:12:56.341440804 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)klass.hpp 1.142 07/05/29 09:44:17 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,14 +137,14 @@ // Every subclass on which vtbl_value is called must include this macro. // Delay the installation of the klassKlass pointer until after the // the vtable for a new klass has been installed (after the call to new()). -#define DEFINE_ALLOCATE_PERMANENT(thisKlass) \ +#define DEFINE_ALLOCATE_PERMANENT(thisKlass) \ void* allocate_permanent(KlassHandle& klass_klass, int size, TRAPS) const { \ - void* result = new(klass_klass, size, THREAD) thisKlass(); \ - if (HAS_PENDING_EXCEPTION) return NULL; \ - klassOop new_klass = ((Klass*) result)->as_klassOop(); \ - OrderAccess::storestore(); \ - post_new_init_klass(klass_klass, new_klass, size); \ - return result; \ + void* result = new(klass_klass, size, THREAD) thisKlass(); \ + if (HAS_PENDING_EXCEPTION) return NULL; \ + klassOop new_klass = ((Klass*) result)->as_klassOop(); \ + OrderAccess::storestore(); \ + post_new_init_klass(klass_klass, new_klass, size); \ + return result; \ } bool null_vtbl() { return *(intptr_t*)this == 0; } @@ -697,6 +697,14 @@ return oop_oop_iterate(obj, blk); } +#ifndef SERIALGC + // In case we don't have a specialized backward scanner use forward + // iteration. + virtual int oop_oop_iterate_backwards_v(oop obj, OopClosure* blk) { + return oop_oop_iterate_v(obj, blk); + } +#endif // !SERIALGC + // Iterates "blk" over all the oops in "obj" (of type "this") within "mr". // (I don't see why the _m should be required, but without it the Solaris // C++ gives warning messages about overridings of the "oop_oop_iterate" @@ -725,7 +733,19 @@ } SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL) - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_3(Klass_OOP_OOP_ITERATE_DECL) + SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL) + +#ifndef SERIALGC +#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, \ + OopClosureType* blk) { \ + /* Default implementation reverts to general version. */ \ + return oop_oop_iterate_backwards_v(obj, blk); \ + } + + SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) + SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) +#endif // !SERIALGC virtual void array_klasses_do(void f(klassOop k)) {} virtual void with_array_klasses_do(void f(klassOop k)); @@ -760,6 +780,7 @@ virtual const char* internal_name() const = 0; virtual void oop_verify_on(oop obj, outputStream* st); virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty); + virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty); // tells whether obj is partially constructed (gc during class loading) virtual bool oop_partially_loaded(oop obj) const { return false; } virtual void oop_set_partially_loaded(oop obj) {}; --- old/hotspot/src/share/vm/oops/klassVtable.cpp 2009-08-01 04:12:57.305631463 +0100 +++ new/hotspot/src/share/vm/oops/klassVtable.cpp 2009-08-01 04:12:57.227908266 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)klassVtable.cpp 1.146 07/07/19 12:19:09 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -875,7 +875,7 @@ void klassItable::initialize_itable(bool checkconstraints, TRAPS) { // Cannot be setup doing bootstrapping, interfaces don't have // itables, and klass with only ones entry have empty itables - if (Universe::is_bootstrapping() || + if (Universe::is_bootstrapping() || _klass->is_interface() || _klass->itable_length() == itableOffsetEntry::size()) return; @@ -885,7 +885,7 @@ if (num_interfaces > 0) { if (TraceItables) tty->print_cr("%3d: Initializing itables for %s", ++initialize_count, _klass->name()->as_C_string()); - + // Interate through all interfaces int i; @@ -896,7 +896,7 @@ initialize_itable_for_interface(ioe->offset(), interf_h, checkconstraints, CHECK); } - } + } // Check that the last entry is empty itableOffsetEntry* ioe = offset_entry(size_offset_table() - 1); guarantee(ioe->interface_klass() == NULL && ioe->offset() == 0, "terminator entry missing"); @@ -995,6 +995,10 @@ methodOop new_method = new_methods[j]; itableMethodEntry* ime = method_entry(0); + // The itable can describe more than one interface and the same + // method signature can be specified by more than one interface. + // This means we have to do an exhaustive search to find all the + // old_method references. for (int i = 0; i < _size_method_table; i++) { if (ime->method() == old_method) { ime->initialize(new_method); @@ -1011,7 +1015,6 @@ new_method->name()->as_C_string(), new_method->signature()->as_C_string())); } - break; } ime++; } @@ -1088,9 +1091,9 @@ // Count no of interfaces and total number of interface methods CountInterfacesClosure cic; visit_all_interfaces(transitive_interfaces(), &cic); - + // There's alway an extra itable entry so we can null-terminate it. - int itable_size = calc_itable_size(cic.nof_interfaces() + 1, cic.nof_methods()); + int itable_size = calc_itable_size(cic.nof_interfaces() + 1, cic.nof_methods()); // Statistics update_stats(itable_size * HeapWordSize); @@ -1109,10 +1112,10 @@ visit_all_interfaces(klass->transitive_interfaces(), &cic); int nof_methods = cic.nof_methods(); int nof_interfaces = cic.nof_interfaces(); - + // Add one extra entry so we can null-terminate the table nof_interfaces++; - + assert(compute_itable_size(objArrayHandle(klass->transitive_interfaces())) == calc_itable_size(nof_interfaces, nof_methods), "mismatch calculation of itable size"); @@ -1121,9 +1124,9 @@ itableOffsetEntry* ioe = (itableOffsetEntry*)klass->start_of_itable(); itableMethodEntry* ime = (itableMethodEntry*)(ioe + nof_interfaces); intptr_t* end = klass->end_of_itable(); - assert((oop*)(ime + nof_methods) <= klass->start_of_static_fields(), "wrong offset calculation (1)"); - assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)"); - + assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_static_fields(), "wrong offset calculation (1)"); + assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)"); + // Visit all interfaces and initialize itable offset table SetupItableClosure sic((address)klass->as_klassOop(), ioe, ime); visit_all_interfaces(klass->transitive_interfaces(), &sic); --- old/hotspot/src/share/vm/oops/klassVtable.hpp 2009-08-01 04:12:58.229143674 +0100 +++ new/hotspot/src/share/vm/oops/klassVtable.hpp 2009-08-01 04:12:58.146947860 +0100 @@ -261,7 +261,7 @@ itableMethodEntry* method_entry(int i) { assert(0 <= i && i <= _size_method_table, "index out of bounds"); return &((itableMethodEntry*)method_start())[i]; } - + int size_offset_table() { return _size_offset_table; } // Initialization --- old/hotspot/src/share/vm/oops/markOop.cpp 2009-08-01 04:12:59.001101502 +0100 +++ new/hotspot/src/share/vm/oops/markOop.cpp 2009-08-01 04:12:58.927674275 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)markOop.cpp 1.30 08/11/24 12:22:53 JVM" #endif /* - * Copyright 1997-1999 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/src/share/vm/oops/markOop.hpp 2009-08-01 04:13:00.749890437 +0100 +++ new/hotspot/src/share/vm/oops/markOop.hpp 2009-08-01 04:13:00.672803377 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)markOop.hpp 1.66 08/11/24 12:22:54 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,8 +32,10 @@ // // Bit-format of an object header (most significant first): // -// -// unused:0/25 hash:25/31 age:4 biased_lock:1 lock:2 = 32/64 bits +// 32 bits: unused:0 hash:25 age:4 biased_lock:1 lock:2 +// 64 bits: unused:24 hash:31 cms:2 age:4 biased_lock:1 lock:2 +// unused:20 size:35 cms:2 age:4 biased_lock:1 lock:2 (if cms +// free chunk) // // - hash contains the identity hash value: largest value is // 31 bits, see os::random(). Also, 64-bit vm's require @@ -92,8 +94,9 @@ enum { age_bits = 4, lock_bits = 2, biased_lock_bits = 1, - max_hash_bits = BitsPerOop - age_bits - lock_bits - biased_lock_bits, + max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits, hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits, + cms_bits = LP64_ONLY(1) NOT_LP64(0), epoch_bits = 2 }; @@ -109,7 +112,8 @@ enum { lock_shift = 0, biased_lock_shift = lock_bits, age_shift = lock_bits + biased_lock_bits, - hash_shift = lock_bits + biased_lock_bits + age_bits, + cms_shift = age_shift + age_bits, + hash_shift = cms_shift + cms_bits, epoch_shift = hash_shift }; @@ -121,7 +125,9 @@ age_mask = right_n_bits(age_bits), age_mask_in_place = age_mask << age_shift, epoch_mask = right_n_bits(epoch_bits), - epoch_mask_in_place = epoch_mask << epoch_shift + epoch_mask_in_place = epoch_mask << epoch_shift, + cms_mask = right_n_bits(cms_bits), + cms_mask_in_place = cms_mask << cms_shift #ifndef _WIN64 ,hash_mask = right_n_bits(hash_bits), hash_mask_in_place = (address_word)hash_mask << hash_shift @@ -219,11 +225,7 @@ static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress // Should this header be preserved during GC? - bool must_be_preserved(oop obj_containing_mark) const { - if (!UseBiasedLocking) - return (!is_unlocked() || !has_no_hash()); - return must_be_preserved_with_bias(obj_containing_mark); - } + inline bool must_be_preserved(oop obj_containing_mark) const; inline bool must_be_preserved_with_bias(oop obj_containing_mark) const; // Should this header (including its age bits) be preserved in the @@ -243,22 +245,14 @@ // observation is that promotion failures are quite rare and // reducing the number of mark words preserved during them isn't a // high priority. - bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const { - if (!UseBiasedLocking) - return (this != prototype()); - return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark); - } + inline bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const; inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const; // Should this header be preserved during a scavenge where CMS is // the old generation? // (This is basically the same body as must_be_preserved_for_promotion_failure(), // but takes the klassOop as argument instead) - bool must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const { - if (!UseBiasedLocking) - return (this != prototype()); - return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark); - } + inline bool must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const; inline bool must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const; // WARNING: The following routines are used EXCLUSIVELY by @@ -363,4 +357,40 @@ // see the definition in markOop.cpp for the gory details bool should_not_be_cached() const; + + // These markOops indicate cms free chunk blocks and not objects. + // In 64 bit, the markOop is set to distinguish them from oops. + // These are defined in 32 bit mode for vmStructs. + const static uintptr_t cms_free_chunk_pattern = 0x1; + + // Constants for the size field. + enum { size_shift = cms_shift + cms_bits, + size_bits = 35 // need for compressed oops 32G + }; + // These values are too big for Win64 + const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits)) + NOT_LP64(0); + const static uintptr_t size_mask_in_place = + (address_word)size_mask << size_shift; + +#ifdef _LP64 + static markOop cms_free_prototype() { + return markOop(((intptr_t)prototype() & ~cms_mask_in_place) | + ((cms_free_chunk_pattern & cms_mask) << cms_shift)); + } + uintptr_t cms_encoding() const { + return mask_bits(value() >> cms_shift, cms_mask); + } + bool is_cms_free_chunk() const { + return is_neutral() && + (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern; + } + + size_t get_size() const { return (size_t)(value() >> size_shift); } + static markOop set_size_and_free(size_t size) { + assert((size & ~size_mask) == 0, "shouldn't overflow size field"); + return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) | + (((intptr_t)size & size_mask) << size_shift)); + } +#endif // _LP64 }; --- old/hotspot/src/share/vm/oops/markOop.inline.hpp 2009-08-01 04:13:01.676584218 +0100 +++ new/hotspot/src/share/vm/oops/markOop.inline.hpp 2009-08-01 04:13:01.584935599 +0100 @@ -42,6 +42,12 @@ return (!is_unlocked() || !has_no_hash()); } +inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const { + if (!UseBiasedLocking) + return (!is_unlocked() || !has_no_hash()); + return must_be_preserved_with_bias(obj_containing_mark); +} + // Should this header (including its age bits) be preserved in the // case of a promotion failure during scavenge? inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const { @@ -62,6 +68,13 @@ return (this != prototype()); } +inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const { + if (!UseBiasedLocking) + return (this != prototype()); + return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark); +} + + // Should this header (including its age bits) be preserved in the // case of a scavenge in which CMS is the old generation? inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const { @@ -73,6 +86,11 @@ } return (this != prototype()); } +inline bool markOopDesc::must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const { + if (!UseBiasedLocking) + return (this != prototype()); + return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark); +} inline markOop markOopDesc::prototype_for_object(oop obj) { #ifdef ASSERT --- old/hotspot/src/share/vm/oops/methodDataKlass.cpp 2009-08-01 04:13:02.546259911 +0100 +++ new/hotspot/src/share/vm/oops/methodDataKlass.cpp 2009-08-01 04:13:02.461998839 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)methodDataKlass.cpp 1.36 07/05/29 09:44:22 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,6 +98,7 @@ } #endif // SERIALGC + int methodDataKlass::oop_oop_iterate(oop obj, OopClosure* blk) { assert (obj->is_methodData(), "object must be method data"); methodDataOop m = methodDataOop(obj); @@ -116,7 +117,6 @@ return size; } - int methodDataKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { assert (obj->is_methodData(), "object must be method data"); methodDataOop m = methodDataOop(obj); @@ -161,14 +161,14 @@ assert (obj->is_methodData(), "object must be method data"); methodDataOop m = methodDataOop(obj); // This should never point into the young gen. - assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity"); + assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity"); } void methodDataKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert (obj->is_methodData(), "object must be method data"); methodDataOop m = methodDataOop(obj); // This should never point into the young gen. - assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity"); + assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity"); } int methodDataKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { --- old/hotspot/src/share/vm/oops/methodDataOop.cpp 2009-08-01 04:13:03.385430457 +0100 +++ new/hotspot/src/share/vm/oops/methodDataOop.cpp 2009-08-01 04:13:03.299665587 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)methodDataOop.cpp 1.51 07/05/29 09:44:22 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ // Some types of data layouts need a length field. bool DataLayout::needs_array_len(u1 tag) { - return (tag == multi_branch_data_tag); + return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag); } // Perform generic initialization of the data. More specific @@ -407,6 +407,17 @@ } #endif +#ifndef PRODUCT +void ArgInfoData::print_data_on(outputStream* st) { + print_shared(st, "ArgInfoData"); + int nargs = number_of_args(); + for (int i = 0; i < nargs; i++) { + st->print(" 0x%x", arg_modified(i)); + } + st->cr(); +} + +#endif // ================================================================== // methodDataOop // @@ -511,6 +522,9 @@ int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); + // Add a cell to record information about modified arguments. + int arg_size = method->size_of_parameters(); + object_size += DataLayout::compute_size_in_bytes(arg_size+1); return object_size; } @@ -629,6 +643,8 @@ return new BranchData(data_layout); case DataLayout::multi_branch_data_tag: return new MultiBranchData(data_layout); + case DataLayout::arg_info_data_tag: + return new ArgInfoData(data_layout); }; } @@ -684,7 +700,17 @@ // Add some extra DataLayout cells (at least one) to track stray traps. int extra_data_count = compute_extra_data_count(data_size, empty_bc_count); - object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); + int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); + + // Add a cell to record information about modified arguments. + // Set up _args_modified array after traps cells so that + // the code for traps cells works. + DataLayout *dp = data_layout_at(data_size + extra_size); + + int arg_size = method->size_of_parameters(); + dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); + + object_size += extra_size + DataLayout::compute_size_in_bytes(arg_size+1); // Set an initial hint. Don't use set_hint_di() because // first_di() may be out of bounds if data_size is 0. @@ -767,6 +793,10 @@ // No need for "OrderAccess::load_acquire" ops, // since the data structure is monotonic. if (dp->tag() == DataLayout::no_tag) break; + if (dp->tag() == DataLayout::arg_info_data_tag) { + dp = end; // ArgInfoData is at the end of extra data section. + break; + } if (dp->bci() == bci) { assert(dp->tag() == DataLayout::bit_data_tag, "sane"); return new BitData(dp); @@ -788,6 +818,16 @@ return NULL; } +ArgInfoData *methodDataOopDesc::arg_info() { + DataLayout* dp = extra_data_base(); + DataLayout* end = extra_data_limit(); + for (; dp < end; dp = next_extra(dp)) { + if (dp->tag() == DataLayout::arg_info_data_tag) + return new ArgInfoData(dp); + } + return NULL; +} + #ifndef PRODUCT void methodDataOopDesc::print_data_on(outputStream* st) { ResourceMark rm; @@ -797,15 +837,20 @@ st->fill_to(6); data->print_data_on(st); } + st->print_cr("--- Extra data:"); DataLayout* dp = extra_data_base(); DataLayout* end = extra_data_limit(); for (; dp < end; dp = next_extra(dp)) { // No need for "OrderAccess::load_acquire" ops, // since the data structure is monotonic. - if (dp->tag() == DataLayout::no_tag) break; - if (dp == extra_data_base()) - st->print_cr("--- Extra data:"); - data = new BitData(dp); + if (dp->tag() == DataLayout::no_tag) continue; + if (dp->tag() == DataLayout::bit_data_tag) { + data = new BitData(dp); + } else { + assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo"); + data = new ArgInfoData(dp); + dp = end; // ArgInfoData is at the end of extra data section. + } st->print("%d", dp_to_di(data->dp())); st->fill_to(6); data->print_data_on(st); --- old/hotspot/src/share/vm/oops/methodDataOop.hpp 2009-08-01 04:13:04.308273416 +0100 +++ new/hotspot/src/share/vm/oops/methodDataOop.hpp 2009-08-01 04:13:04.223114401 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)methodDataOop.hpp 1.54 07/08/29 13:42:27 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,7 +104,8 @@ virtual_call_data_tag, ret_data_tag, branch_data_tag, - multi_branch_data_tag + multi_branch_data_tag, + arg_info_data_tag }; enum { @@ -160,7 +161,6 @@ assert(ProfileTraps, "used only under +ProfileTraps"); uint old_flags = (_header._struct._flags & flag_mask); _header._struct._flags = (new_state << trap_shift) | old_flags; - assert(trap_state() == new_state, "sanity"); } u1 flags() { @@ -248,6 +248,7 @@ class BranchData; class ArrayData; class MultiBranchData; +class ArgInfoData; // ProfileData @@ -379,6 +380,8 @@ virtual bool is_BranchData() { return false; } virtual bool is_ArrayData() { return false; } virtual bool is_MultiBranchData() { return false; } + virtual bool is_ArgInfoData() { return false; } + BitData* as_BitData() { assert(is_BitData(), "wrong type"); @@ -416,6 +419,10 @@ assert(is_MultiBranchData(), "wrong type"); return is_MultiBranchData() ? (MultiBranchData*)this : NULL; } + ArgInfoData* as_ArgInfoData() { + assert(is_ArgInfoData(), "wrong type"); + return is_ArgInfoData() ? (ArgInfoData*)this : NULL; + } // Subclass specific initialization @@ -1050,6 +1057,33 @@ #endif }; +class ArgInfoData : public ArrayData { + +public: + ArgInfoData(DataLayout* layout) : ArrayData(layout) { + assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type"); + } + + virtual bool is_ArgInfoData() { return true; } + + + int number_of_args() { + return array_len(); + } + + uint arg_modified(int arg) { + return array_uint_at(arg); + } + + void set_arg_modified(int arg, uint val) { + array_set_int_at(arg, val); + } + +#ifndef PRODUCT + void print_data_on(outputStream* st); +#endif +}; + // methodDataOop // // A methodDataOop holds information which has been collected about @@ -1186,6 +1220,9 @@ // Find or create an extra ProfileData: ProfileData* bci_to_extra_data(int bci, bool create_if_missing); + // return the argument info cell + ArgInfoData *arg_info(); + public: static int header_size() { return sizeof(methodDataOopDesc)/wordSize; @@ -1218,18 +1255,28 @@ // Support for interprocedural escape analysis, from Thomas Kotzmann. enum EscapeFlag { estimated = 1 << 0, - return_local = 1 << 1 + return_local = 1 << 1, + return_allocated = 1 << 2, + allocated_escapes = 1 << 3, + unknown_modified = 1 << 4 }; intx eflags() { return _eflags; } intx arg_local() { return _arg_local; } intx arg_stack() { return _arg_stack; } intx arg_returned() { return _arg_returned; } + uint arg_modified(int a) { ArgInfoData *aid = arg_info(); + assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); + return aid->arg_modified(a); } void set_eflags(intx v) { _eflags = v; } void set_arg_local(intx v) { _arg_local = v; } void set_arg_stack(intx v) { _arg_stack = v; } void set_arg_returned(intx v) { _arg_returned = v; } + void set_arg_modified(int a, uint v) { ArgInfoData *aid = arg_info(); + assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); + + aid->set_arg_modified(a, v); } void clear_escape_info() { _eflags = _arg_local = _arg_stack = _arg_returned = 0; } --- old/hotspot/src/share/vm/oops/methodOop.cpp 2009-08-01 04:13:05.244166191 +0100 +++ new/hotspot/src/share/vm/oops/methodOop.cpp 2009-08-01 04:13:05.158726060 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)methodOop.cpp 1.314 08/11/24 12:22:56 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -433,11 +433,11 @@ bool methodOopDesc::is_accessor() const { if (code_size() != 5) return false; if (size_of_parameters() != 1) return false; - if (Bytecodes::java_code_at(code_base()+0) != Bytecodes::_aload_0 ) return false; - if (Bytecodes::java_code_at(code_base()+1) != Bytecodes::_getfield) return false; - Bytecodes::Code ret_bc = Bytecodes::java_code_at(code_base()+4); - if (Bytecodes::java_code_at(code_base()+4) != Bytecodes::_areturn && - Bytecodes::java_code_at(code_base()+4) != Bytecodes::_ireturn ) return false; + methodOop m = (methodOop)this; // pass to code_at() to avoid method_from_bcp + if (Bytecodes::java_code_at(code_base()+0, m) != Bytecodes::_aload_0 ) return false; + if (Bytecodes::java_code_at(code_base()+1, m) != Bytecodes::_getfield) return false; + if (Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_areturn && + Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_ireturn ) return false; return true; } @@ -674,10 +674,7 @@ } -address methodOopDesc::make_adapters(methodHandle mh, TRAPS) { - // If running -Xint we need no adapters. - if (Arguments::mode() == Arguments::_int) return NULL; - +address methodOopDesc::make_adapters(methodHandle mh, TRAPS) { // Adapters for compiled code are made eagerly here. They are fairly // small (generally < 100 bytes) and quick to make (and cached and shared) // so making them eagerly shouldn't be too expensive. @@ -790,7 +787,7 @@ } -methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, +methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) { // Code below does not work for native methods - they should never get rewritten anyway assert(!m->is_native(), "cannot rewrite native methods"); @@ -891,10 +888,11 @@ symbolHandle name (THREAD, sym); klassOop klass = SystemDictionary::resolve_or_null(name, class_loader, protection_domain, THREAD); - // We are loading classes eagerly. If a ClassNotFoundException was generated, - // be sure to ignore it. + // We are loading classes eagerly. If a ClassNotFoundException or + // a LinkageError was generated, be sure to ignore it. if (HAS_PENDING_EXCEPTION) { - if (PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass())) { + if (PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass()) || + PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) { CLEAR_PENDING_EXCEPTION; } else { return false; @@ -957,7 +955,7 @@ // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array static void reorder_based_on_method_index(objArrayOop methods, objArrayOop annotations, - oop* temp_array) { + GrowableArray* temp_array) { if (annotations == NULL) { return; } @@ -965,12 +963,15 @@ int length = methods->length(); int i; // Copy to temp array - memcpy(temp_array, annotations->obj_at_addr(0), length * sizeof(oop)); + temp_array->clear(); + for (i = 0; i < length; i++) { + temp_array->append(annotations->obj_at(i)); + } // Copy back using old method indices for (i = 0; i < length; i++) { methodOop m = (methodOop) methods->obj_at(i); - annotations->obj_at_put(i, temp_array[m->method_idnum()]); + annotations->obj_at_put(i, temp_array->at(m->method_idnum())); } } @@ -999,7 +1000,7 @@ // Use a simple bubble sort for small number of methods since // qsort requires a functional pointer call for each comparison. - if (length < 8) { + if (UseCompressedOops || length < 8) { bool sorted = true; for (int i=length-1; i>0; i--) { for (int j=0; jobj_at_addr(0), length, oopSize, compare); + qsort(methods->base(), length, heapOopSize, compare); } // Sort annotations if necessary @@ -1024,8 +1028,9 @@ assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), ""); assert(methods_default_annotations == NULL || methods_default_annotations->length() == methods->length(), ""); if (do_annotations) { + ResourceMark rm; // Allocate temporary storage - oop* temp_array = NEW_RESOURCE_ARRAY(oop, length); + GrowableArray* temp_array = new GrowableArray(length); reorder_based_on_method_index(methods, methods_annotations, temp_array); reorder_based_on_method_index(methods, methods_parameter_annotations, temp_array); reorder_based_on_method_index(methods, methods_default_annotations, temp_array); --- old/hotspot/src/share/vm/oops/methodOop.hpp 2009-08-01 04:13:06.200719131 +0100 +++ new/hotspot/src/share/vm/oops/methodOop.hpp 2009-08-01 04:13:06.111990843 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)methodOop.hpp 1.221 08/11/24 12:22:56 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/src/share/vm/oops/objArrayKlass.cpp 2009-08-01 04:13:07.102075951 +0100 +++ new/hotspot/src/share/vm/oops/objArrayKlass.cpp 2009-08-01 04:13:07.022006073 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)objArrayKlass.cpp 1.147 07/05/29 09:44:23 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,44 +83,24 @@ return h_array(); } -void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, - int dst_pos, int length, TRAPS) { - assert(s->is_objArray(), "must be obj array"); +// Either oop or narrowOop depending on UseCompressedOops. +template void objArrayKlass::do_copy(arrayOop s, T* src, + arrayOop d, T* dst, int length, TRAPS) { - if (!d->is_objArray()) { - THROW(vmSymbols::java_lang_ArrayStoreException()); - } - - // Check is all offsets and lengths are non negative - if (src_pos < 0 || dst_pos < 0 || length < 0) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); - } - // Check if the ranges are valid - if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) - || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); - } + const size_t word_len = objArrayOopDesc::array_size(length); - // Special case. Boundary cases must be checked first - // This allows the following call: copy_array(s, s.length(), d.length(), 0). - // This is correct, since the position is supposed to be an 'in between point', i.e., s.length(), - // points to the right of the last element. - if (length==0) { - return; - } - - oop* const src = objArrayOop(s)->obj_at_addr(src_pos); - oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos); - const size_t word_len = length * HeapWordsPerOop; - - // For performance reasons, we assume we are using a card marking write - // barrier. The assert will fail if this is not the case. BarrierSet* bs = Universe::heap()->barrier_set(); + // For performance reasons, we assume we are that the write barrier we + // are using has optimized modes for arrays of references. At least one + // of the asserts below will fail if this is not the case. assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); + MemRegion dst_mr = MemRegion((HeapWord*)dst, word_len); if (s == d) { - // since source and destination are equal we do not need conversion checks. + // since source and destination are equal we do not need conversion checks. assert(length > 0, "sanity check"); + bs->write_ref_array_pre(dst_mr); Copy::conjoint_oops_atomic(src, dst, length); } else { // We have to make sure all elements conform to the destination array @@ -128,20 +108,31 @@ klassOop stype = objArrayKlass::cast(s->klass())->element_klass(); if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { // elements are guaranteed to be subtypes, so no check necessary + bs->write_ref_array_pre(dst_mr); Copy::conjoint_oops_atomic(src, dst, length); } else { // slow case: need individual subtype checks // note: don't use obj_at_put below because it includes a redundant store check - oop* from = src; - oop* end = from + length; - for (oop* p = dst; from < end; from++, p++) { - oop element = *from; - if (element == NULL || Klass::cast(element->klass())->is_subtype_of(bound)) { - *p = element; + T* from = src; + T* end = from + length; + for (T* p = dst; from < end; from++, p++) { + // XXX this is going to be slow. + T element = *from; + // even slower now + bool element_is_null = oopDesc::is_null(element); + oop new_val = element_is_null ? oop(NULL) + : oopDesc::decode_heap_oop_not_null(element); + if (element_is_null || + Klass::cast((new_val->klass()))->is_subtype_of(bound)) { + bs->write_ref_field_pre(p, new_val); + *p = *from; } else { - // We must do a barrier to cover the partial copy. - const size_t done_word_len = pointer_delta(p, dst, oopSize) * - HeapWordsPerOop; + // We must do a barrier to cover the partial copy. + const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize); + // pointer delta is scaled to number of elements (length field in + // objArrayOop) which we assume is 32 bit. + assert(pd == (size_t)(int)pd, "length field overflow"); + const size_t done_word_len = objArrayOopDesc::array_size((int)pd); bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len)); THROW(vmSymbols::java_lang_ArrayStoreException()); return; @@ -152,6 +143,42 @@ bs->write_ref_array(MemRegion((HeapWord*)dst, word_len)); } +void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, + int dst_pos, int length, TRAPS) { + assert(s->is_objArray(), "must be obj array"); + + if (!d->is_objArray()) { + THROW(vmSymbols::java_lang_ArrayStoreException()); + } + + // Check is all offsets and lengths are non negative + if (src_pos < 0 || dst_pos < 0 || length < 0) { + THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + } + // Check if the ranges are valid + if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) + || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { + THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + } + + // Special case. Boundary cases must be checked first + // This allows the following call: copy_array(s, s.length(), d.length(), 0). + // This is correct, since the position is supposed to be an 'in between point', i.e., s.length(), + // points to the right of the last element. + if (length==0) { + return; + } + if (UseCompressedOops) { + narrowOop* const src = objArrayOop(s)->obj_at_addr(src_pos); + narrowOop* const dst = objArrayOop(d)->obj_at_addr(dst_pos); + do_copy(s, src, d, dst, length, CHECK); + } else { + oop* const src = objArrayOop(s)->obj_at_addr(src_pos); + oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos); + do_copy (s, src, d, dst, length, CHECK); + } +} + klassOop objArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) { objArrayKlassHandle h_this(THREAD, as_klassOop()); @@ -245,49 +272,75 @@ return element_klass()->klass_part()->is_subtype_of(oak->element_klass()); } - void objArrayKlass::initialize(TRAPS) { Klass::cast(bottom_klass())->initialize(THREAD); // dispatches to either instanceKlass or typeArrayKlass } +#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \ +{ \ + T* p = (T*)(a)->base(); \ + T* const end = p + (a)->length(); \ + while (p < end) { \ + do_oop; \ + p++; \ + } \ +} + +#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \ +{ \ + T* const l = (T*)(low); \ + T* const h = (T*)(high); \ + T* p = (T*)(a)->base(); \ + T* end = p + (a)->length(); \ + if (p < l) p = l; \ + if (end > h) end = h; \ + while (p < end) { \ + do_oop; \ + ++p; \ + } \ +} + +#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \ + if (UseCompressedOops) { \ + ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ + a, p, do_oop) \ + } else { \ + ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \ + a, p, do_oop) \ + } + +#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \ + if (UseCompressedOops) { \ + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + a, p, low, high, do_oop) \ + } else { \ + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + a, p, low, high, do_oop) \ + } void objArrayKlass::oop_follow_contents(oop obj) { assert (obj->is_array(), "obj must be array"); - arrayOop a = arrayOop(obj); + objArrayOop a = objArrayOop(obj); a->follow_header(); - oop* base = (oop*)a->base(T_OBJECT); - oop* const end = base + a->length(); - while (base < end) { - if (*base != NULL) - // we call mark_and_follow here to avoid excessive marking stack usage - MarkSweep::mark_and_follow(base); - base++; - } + ObjArrayKlass_OOP_ITERATE( \ + a, p, \ + /* we call mark_and_follow here to avoid excessive marking stack usage */ \ + MarkSweep::mark_and_follow(p)) } #ifndef SERIALGC void objArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj) { assert (obj->is_array(), "obj must be array"); - arrayOop a = arrayOop(obj); + objArrayOop a = objArrayOop(obj); a->follow_header(cm); - oop* base = (oop*)a->base(T_OBJECT); - oop* const end = base + a->length(); - while (base < end) { - if (*base != NULL) - // we call mark_and_follow here to avoid excessive marking stack usage - PSParallelCompact::mark_and_follow(cm, base); - base++; - } + ObjArrayKlass_OOP_ITERATE( \ + a, p, \ + /* we call mark_and_follow here to avoid excessive marking stack usage */ \ + PSParallelCompact::mark_and_follow(cm, p)) } #endif // SERIALGC -#define invoke_closure_on(base, closure, nv_suffix) { \ - if (*(base) != NULL) { \ - (closure)->do_oop##nv_suffix(base); \ - } \ -} - #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ int objArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \ @@ -301,21 +354,7 @@ if (closure->do_header()) { \ a->oop_iterate_header(closure); \ } \ - oop* base = a->base(); \ - oop* const end = base + a->length(); \ - const intx field_offset = PrefetchFieldsAhead; \ - if (field_offset > 0) { \ - while (base < end) { \ - prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \ - invoke_closure_on(base, closure, nv_suffix); \ - base++; \ - } \ - } else { \ - while (base < end) { \ - invoke_closure_on(base, closure, nv_suffix); \ - base++; \ - } \ - } \ + ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p)) \ return size; \ } @@ -333,36 +372,53 @@ if (closure->do_header()) { \ a->oop_iterate_header(closure, mr); \ } \ - oop* bottom = (oop*)mr.start(); \ - oop* top = (oop*)mr.end(); \ - oop* base = a->base(); \ - oop* end = base + a->length(); \ - if (base < bottom) { \ - base = bottom; \ - } \ - if (end > top) { \ - end = top; \ - } \ - const intx field_offset = PrefetchFieldsAhead; \ - if (field_offset > 0) { \ - while (base < end) { \ - prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \ - invoke_closure_on(base, closure, nv_suffix); \ - base++; \ + ObjArrayKlass_BOUNDED_OOP_ITERATE( \ + a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p)) \ + return size; \ +} + +// Like oop_oop_iterate but only iterates over a specified range and only used +// for objArrayOops. +#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \ + \ +int objArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \ + OopClosureType* closure, \ + int start, int end) { \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \ + assert(obj->is_array(), "obj must be array"); \ + objArrayOop a = objArrayOop(obj); \ + /* Get size before changing pointers. */ \ + /* Don't call size() or oop_size() since that is a virtual call */ \ + int size = a->object_size(); \ + if (UseCompressedOops) { \ + HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start);\ + /* this might be wierd if end needs to be aligned on HeapWord boundary */ \ + HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); \ + MemRegion mr(low, high); \ + if (closure->do_header()) { \ + a->oop_iterate_header(closure, mr); \ } \ + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ } else { \ - while (base < end) { \ - invoke_closure_on(base, closure, nv_suffix); \ - base++; \ + HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start); \ + HeapWord* high = (HeapWord*)((oop*)a->base() + end); \ + MemRegion mr(low, high); \ + if (closure->do_header()) { \ + a->oop_iterate_header(closure, mr); \ } \ + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ } \ return size; \ } ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) +ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) +ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) +ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) int objArrayKlass::oop_adjust_pointers(oop obj) { assert(obj->is_objArray(), "obj must be obj array"); @@ -371,12 +427,7 @@ // Don't call size() or oop_size() since that is a virtual call. int size = a->object_size(); a->adjust_header(); - oop* base = a->base(); - oop* const end = base + a->length(); - while (base < end) { - MarkSweep::adjust_pointer(base); - base++; - } + ObjArrayKlass_OOP_ITERATE(a, p, MarkSweep::adjust_pointer(p)) return size; } @@ -384,51 +435,27 @@ void objArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { assert(!pm->depth_first(), "invariant"); assert(obj->is_objArray(), "obj must be obj array"); - // Compute oop range - oop* curr = objArrayOop(obj)->base(); - oop* end = curr + objArrayOop(obj)->length(); - // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size"); - assert(align_object_size(pointer_delta(end, obj, sizeof(oop*))) - == oop_size(obj), "checking size"); - - // Iterate over oops - while (curr < end) { - if (PSScavenge::should_scavenge(*curr)) { - pm->claim_or_forward_breadth(curr); - } - ++curr; - } + ObjArrayKlass_OOP_ITERATE( \ + objArrayOop(obj), p, \ + if (PSScavenge::should_scavenge(p)) { \ + pm->claim_or_forward_breadth(p); \ + }) } void objArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(pm->depth_first(), "invariant"); assert(obj->is_objArray(), "obj must be obj array"); - // Compute oop range - oop* curr = objArrayOop(obj)->base(); - oop* end = curr + objArrayOop(obj)->length(); - // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size"); - assert(align_object_size(pointer_delta(end, obj, sizeof(oop*))) - == oop_size(obj), "checking size"); - - // Iterate over oops - while (curr < end) { - if (PSScavenge::should_scavenge(*curr)) { - pm->claim_or_forward_depth(curr); - } - ++curr; - } + ObjArrayKlass_OOP_ITERATE( \ + objArrayOop(obj), p, \ + if (PSScavenge::should_scavenge(p)) { \ + pm->claim_or_forward_depth(p); \ + }) } int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { assert (obj->is_objArray(), "obj must be obj array"); objArrayOop a = objArrayOop(obj); - - oop* const base = a->base(); - oop* const beg_oop = base; - oop* const end_oop = base + a->length(); - for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { - PSParallelCompact::adjust_pointer(cur_oop); - } + ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p)) return a->object_size(); } @@ -436,13 +463,9 @@ HeapWord* beg_addr, HeapWord* end_addr) { assert (obj->is_objArray(), "obj must be obj array"); objArrayOop a = objArrayOop(obj); - - oop* const base = a->base(); - oop* const beg_oop = MAX2((oop*)beg_addr, base); - oop* const end_oop = MIN2((oop*)end_addr, base + a->length()); - for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { - PSParallelCompact::adjust_pointer(cur_oop); - } + ObjArrayKlass_BOUNDED_OOP_ITERATE( \ + a, p, beg_addr, end_addr, \ + PSParallelCompact::adjust_pointer(p)) return a->object_size(); } #endif // SERIALGC @@ -455,8 +478,8 @@ assert(Universe::is_bootstrapping(), "partial objArray only at startup"); return JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC; } - // Recurse down the element list - jint element_flags = Klass::cast(element_klass())->compute_modifier_flags(CHECK_0); + // Return the flags of the bottom element type. + jint element_flags = Klass::cast(bottom_klass())->compute_modifier_flags(CHECK_0); return (element_flags & (JVM_ACC_PUBLIC | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED)) | (JVM_ACC_ABSTRACT | JVM_ACC_FINAL); @@ -512,3 +535,4 @@ RememberedSet::verify_old_oop(obj, p, allow_dirty, true); */ } +void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {} --- old/hotspot/src/share/vm/oops/objArrayKlass.hpp 2009-08-01 04:13:08.020520769 +0100 +++ new/hotspot/src/share/vm/oops/objArrayKlass.hpp 2009-08-01 04:13:07.940216976 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)objArrayKlass.hpp 1.87 07/05/29 09:44:23 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,6 +66,11 @@ // Compute class loader oop class_loader() const { return Klass::cast(bottom_klass())->class_loader(); } + private: + // Either oop or narrowOop depending on UseCompressedOops. + // must be called from within objArrayKlass.cpp + template void do_copy(arrayOop s, T* src, arrayOop d, + T* dst, int length, TRAPS); protected: // Returns the objArrayKlass for n'th dimension. virtual klassOop array_klass_impl(bool or_null, int n, TRAPS); @@ -104,10 +109,12 @@ #define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ - MemRegion mr); + MemRegion mr); \ + int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* blk, \ + int start, int end); ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DECL) // JVM support jint compute_modifier_flags(TRAPS) const; @@ -127,6 +134,7 @@ const char* internal_name() const; void oop_verify_on(oop obj, outputStream* st); void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty); + void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty); }; --- old/hotspot/src/share/vm/oops/objArrayOop.cpp 2009-08-01 04:13:08.779211830 +0100 +++ new/hotspot/src/share/vm/oops/objArrayOop.cpp 2009-08-01 04:13:08.710997417 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)objArrayOop.cpp 1.14 07/05/05 17:06:07 JVM" #endif /* - * Copyright 1997 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,5 +28,12 @@ # include "incls/_precompiled.incl" # include "incls/_objArrayOop.cpp.incl" -// <> +#define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \ + SpecializationStats::record_call(); \ + return ((objArrayKlass*)blueprint())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \ +} +ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayOop_OOP_ITERATE_DEFN) --- old/hotspot/src/share/vm/oops/objArrayOop.hpp 2009-08-01 04:13:09.597424353 +0100 +++ new/hotspot/src/share/vm/oops/objArrayOop.hpp 2009-08-01 04:13:09.522731857 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)objArrayOop.hpp 1.29 07/05/05 17:06:07 JVM" #endif /* - * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,20 +29,69 @@ // Evaluating "String arg[10]" will create an objArrayOop. class objArrayOopDesc : public arrayOopDesc { + friend class objArrayKlass; + friend class Runtime1; + friend class psPromotionManager; + friend class CSMarkOopClosure; + friend class G1ParScanPartialArrayClosure; + + template T* obj_at_addr(int index) const { + assert(is_within_bounds(index), "index out of bounds"); + return &((T*)base())[index]; + } + public: + // base is the address following the header. + HeapWord* base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); } + // Accessing - oop obj_at(int index) const { return *obj_at_addr(index); } - void obj_at_put(int index, oop value) { oop_store(obj_at_addr(index), value); } - oop* base() const { return (oop*) arrayOopDesc::base(T_OBJECT); } + oop obj_at(int index) const { + // With UseCompressedOops decode the narrow oop in the objArray to an + // uncompressed oop. Otherwise this is simply a "*" operator. + if (UseCompressedOops) { + return load_decode_heap_oop(obj_at_addr(index)); + } else { + return load_decode_heap_oop(obj_at_addr(index)); + } + } + void obj_at_put(int index, oop value) { + if (UseCompressedOops) { + oop_store(obj_at_addr(index), value); + } else { + oop_store(obj_at_addr(index), value); + } + } // Sizing - static int header_size() { return arrayOopDesc::header_size(T_OBJECT); } - static int object_size(int length) { return align_object_size(header_size() + length); } - int object_size() { return object_size(length()); } + static int header_size() { return arrayOopDesc::header_size(T_OBJECT); } + int object_size() { return object_size(length()); } + int array_size() { return array_size(length()); } - // Returns the address of the index'th element - oop* obj_at_addr(int index) const { - assert(is_within_bounds(index), "index out of bounds"); - return &base()[index]; + static int object_size(int length) { + // This returns the object size in HeapWords. + return align_object_size(header_size() + array_size(length)); + } + + // Give size of objArrayOop in HeapWords minus the header + static int array_size(int length) { + // Without UseCompressedOops, this is simply: + // oop->length() * HeapWordsPerOop; + // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer. + // The oop elements are aligned up to wordSize + const int HeapWordsPerOop = heapOopSize/HeapWordSize; + if (HeapWordsPerOop > 0) { + return length * HeapWordsPerOop; + } else { + const int OopsPerHeapWord = HeapWordSize/heapOopSize; + int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord; + return word_len; + } } + + // special iterators for index ranges, returns size of object +#define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_iterate_range(OopClosureType* blk, int start, int end); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayOop_OOP_ITERATE_DECL) }; --- old/hotspot/src/share/vm/oops/oop.cpp 2009-08-01 04:13:10.499110359 +0100 +++ new/hotspot/src/share/vm/oops/oop.cpp 2009-08-01 04:13:10.410096321 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oop.cpp 1.99 07/05/29 09:44:21 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,10 +108,14 @@ } +// XXX verify_old_oop doesn't do anything (should we remove?) void oopDesc::verify_old_oop(oop* p, bool allow_dirty) { blueprint()->oop_verify_old_oop(this, p, allow_dirty); } +void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) { + blueprint()->oop_verify_old_oop(this, p, allow_dirty); +} bool oopDesc::partially_loaded() { return blueprint()->oop_partially_loaded(this); @@ -133,3 +137,6 @@ } VerifyOopClosure VerifyOopClosure::verify_oop; + +void VerifyOopClosure::do_oop(oop* p) { VerifyOopClosure::do_oop_work(p); } +void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); } --- old/hotspot/src/share/vm/oops/oop.hpp 2009-08-01 04:13:11.378612920 +0100 +++ new/hotspot/src/share/vm/oops/oop.hpp 2009-08-01 04:13:11.294005309 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oop.hpp 1.118 07/08/31 18:42:30 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,12 +33,12 @@ // no virtual functions allowed // store into oop with store check -void oop_store(oop* p, oop v); -void oop_store(volatile oop* p, oop v); +template void oop_store(T* p, oop v); +template void oop_store(volatile T* p, oop v); // store into oop without store check -void oop_store_without_check(oop* p, oop v); -void oop_store_without_check(volatile oop* p, oop v); +template void oop_store_without_check(T* p, oop v); +template void oop_store_without_check(volatile T* p, oop v); extern bool always_do_update_barrier; @@ -58,7 +58,10 @@ friend class VMStructs; private: volatile markOop _mark; - klassOop _klass; + union _metadata { + wideKlassOop _klass; + narrowOop _compressed_klass; + } _metadata; // Fast access to barrier set. Must be initialized. static BarrierSet* _bs; @@ -76,16 +79,21 @@ // objects during a GC) -- requires a valid klass pointer void init_mark(); - klassOop klass() const { return _klass; } - oop* klass_addr() const { return (oop*) &_klass; } + klassOop klass() const; + klassOop klass_or_null() const volatile; + oop* klass_addr(); + narrowOop* compressed_klass_addr(); void set_klass(klassOop k); + + // For klass field compression + int klass_gap() const; + void set_klass_gap(int z); // For when the klass pointer is being used as a linked list "next" field. void set_klass_to_list_ptr(oop k); - // size of object header - static int header_size() { return sizeof(oopDesc)/HeapWordSize; } - static int header_size_in_bytes() { return sizeof(oopDesc); } + // size of object header, aligned to platform wordSize + static int header_size() { return sizeof(oopDesc)/HeapWordSize; } Klass* blueprint() const; @@ -122,7 +130,6 @@ private: // field addresses in oop - // byte/char/bool/short fields are always stored as full words void* field_base(int offset) const; jbyte* byte_field_addr(int offset) const; @@ -133,13 +140,70 @@ jlong* long_field_addr(int offset) const; jfloat* float_field_addr(int offset) const; jdouble* double_field_addr(int offset) const; + address* address_field_addr(int offset) const; public: - // need this as public for garbage collection - oop* obj_field_addr(int offset) const; + // Need this as public for garbage collection. + template T* obj_field_addr(int offset) const; + + // Oop encoding heap max + static const uint64_t OopEncodingHeapMax = + (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes; + + static bool is_null(oop obj); + static bool is_null(narrowOop obj); + + // Decode an oop pointer from a narrowOop if compressed. + // These are overloaded for oop and narrowOop as are the other functions + // below so that they can be called in template functions. + static oop decode_heap_oop_not_null(oop v); + static oop decode_heap_oop_not_null(narrowOop v); + static oop decode_heap_oop(oop v); + static oop decode_heap_oop(narrowOop v); + + // Encode an oop pointer to a narrow oop. The or_null versions accept + // null oop pointer, others do not in order to eliminate the + // null checking branches. + static narrowOop encode_heap_oop_not_null(oop v); + static narrowOop encode_heap_oop(oop v); + + // Load an oop out of the Java heap + static narrowOop load_heap_oop(narrowOop* p); + static oop load_heap_oop(oop* p); + + // Load an oop out of Java heap and decode it to an uncompressed oop. + static oop load_decode_heap_oop_not_null(narrowOop* p); + static oop load_decode_heap_oop_not_null(oop* p); + static oop load_decode_heap_oop(narrowOop* p); + static oop load_decode_heap_oop(oop* p); + + // Store an oop into the heap. + static void store_heap_oop(narrowOop* p, narrowOop v); + static void store_heap_oop(oop* p, oop v); + + // Encode oop if UseCompressedOops and store into the heap. + static void encode_store_heap_oop_not_null(narrowOop* p, oop v); + static void encode_store_heap_oop_not_null(oop* p, oop v); + static void encode_store_heap_oop(narrowOop* p, oop v); + static void encode_store_heap_oop(oop* p, oop v); + + static void release_store_heap_oop(volatile narrowOop* p, narrowOop v); + static void release_store_heap_oop(volatile oop* p, oop v); + + static void release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v); + static void release_encode_store_heap_oop_not_null(volatile oop* p, oop v); + static void release_encode_store_heap_oop(volatile narrowOop* p, oop v); + static void release_encode_store_heap_oop(volatile oop* p, oop v); + + static oop atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest); + static oop atomic_compare_exchange_oop(oop exchange_value, + volatile HeapWord *dest, + oop compare_value); + // Access to fields in a instanceOop through these methods. oop obj_field(int offset) const; void obj_field_put(int offset, oop value); + void obj_field_raw_put(int offset, oop value); jbyte byte_field(int offset) const; void byte_field_put(int offset, jbyte contents); @@ -165,6 +229,9 @@ jdouble double_field(int offset) const; void double_field_put(int offset, jdouble contents); + address address_field(int offset) const; + void address_field_put(int offset, address contents); + oop obj_field_acquire(int offset) const; void release_obj_field_put(int offset, oop value); @@ -210,6 +277,7 @@ void verify_on(outputStream* st); void verify(); void verify_old_oop(oop* p, bool allow_dirty); + void verify_old_oop(narrowOop* p, bool allow_dirty); // tells whether this oop is partially constructed (gc during class loading) bool partially_loaded(); @@ -231,8 +299,8 @@ bool is_gc_marked() const; // Apply "MarkSweep::mark_and_push" to (the address of) every non-NULL // reference field in "this". - void follow_contents(); - void follow_header(); + void follow_contents(void); + void follow_header(void); #ifndef SERIALGC // Parallel Scavenge @@ -298,12 +366,21 @@ static void set_bs(BarrierSet* bs) { _bs = bs; } // iterators, returns size of object -#define OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ +#define OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ int oop_iterate(OopClosureType* blk); \ int oop_iterate(OopClosureType* blk, MemRegion mr); // Only in mr. - ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DECL) + +#ifndef SERIALGC + +#define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_iterate_backwards(OopClosureType* blk); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL) +#endif void oop_iterate_header(OopClosure* blk); void oop_iterate_header(OopClosure* blk, MemRegion mr); @@ -320,6 +397,7 @@ void set_displaced_mark(markOop m); // for code generation - static int klass_offset_in_bytes() { return offset_of(oopDesc, _klass); } static int mark_offset_in_bytes() { return offset_of(oopDesc, _mark); } + static int klass_offset_in_bytes() { return offset_of(oopDesc, _metadata._klass); } + static int klass_gap_offset_in_bytes(); }; --- old/hotspot/src/share/vm/oops/oop.inline.hpp 2009-08-01 04:13:12.328388543 +0100 +++ new/hotspot/src/share/vm/oops/oop.inline.hpp 2009-08-01 04:13:12.240738981 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oop.inline.hpp 1.142 07/09/25 16:47:44 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,6 @@ // Implementation of all inlined member functions defined in oop.hpp // We need a separate file to avoid circular references - inline void oopDesc::release_set_mark(markOop m) { OrderAccess::release_store_ptr(&_mark, m); } @@ -37,17 +36,69 @@ return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); } +inline klassOop oopDesc::klass() const { + if (UseCompressedOops) { + return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass); + } else { + return _metadata._klass; + } +} + +inline klassOop oopDesc::klass_or_null() const volatile { + // can be NULL in CMS + if (UseCompressedOops) { + return (klassOop)decode_heap_oop(_metadata._compressed_klass); + } else { + return _metadata._klass; + } +} + +inline int oopDesc::klass_gap_offset_in_bytes() { + assert(UseCompressedOops, "only applicable to compressed headers"); + return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop); +} + +inline oop* oopDesc::klass_addr() { + // Only used internally and with CMS and will not work with + // UseCompressedOops + assert(!UseCompressedOops, "only supported with uncompressed oops"); + return (oop*) &_metadata._klass; +} + +inline narrowOop* oopDesc::compressed_klass_addr() { + assert(UseCompressedOops, "only called by compressed oops"); + return (narrowOop*) &_metadata._compressed_klass; +} + inline void oopDesc::set_klass(klassOop k) { // since klasses are promoted no store check is needed assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop"); assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop"); - oop_store_without_check((oop*) &_klass, (oop) k); + if (UseCompressedOops) { + oop_store_without_check(compressed_klass_addr(), (oop)k); + } else { + oop_store_without_check(klass_addr(), (oop) k); + } +} + +inline int oopDesc::klass_gap() const { + return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); +} + +inline void oopDesc::set_klass_gap(int v) { + if (UseCompressedOops) { + *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; + } } inline void oopDesc::set_klass_to_list_ptr(oop k) { // This is only to be used during GC, for from-space objects, so no // barrier is needed. - _klass = (klassOop)k; + if (UseCompressedOops) { + _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling) + } else { + _metadata._klass = (klassOop)k; + } } inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } @@ -73,7 +124,7 @@ inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } -inline oop* oopDesc::obj_field_addr(int offset) const { return (oop*) field_base(offset); } +template inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); } inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } @@ -82,9 +133,158 @@ inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } +inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } + + +// Functions for getting and setting oops within instance objects. +// If the oops are compressed, the type passed to these overloaded functions +// is narrowOop. All functions are overloaded so they can be called by +// template functions without conditionals (the compiler instantiates via +// the right type and inlines the appopriate code). + +inline bool oopDesc::is_null(oop obj) { return obj == NULL; } +inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; } + +// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit +// offset from the heap base. Saving the check for null can save instructions +// in inner GC loops so these are separated. + +inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { + assert(!is_null(v), "oop value can never be zero"); + address heap_base = Universe::heap_base(); + uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1)); + assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); + uint64_t result = pd >> LogMinObjAlignmentInBytes; + assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); + return (narrowOop)result; +} + +inline narrowOop oopDesc::encode_heap_oop(oop v) { + return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); +} + +inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) { + assert(!is_null(v), "narrow oop value can never be zero"); + address heap_base = Universe::heap_base(); + return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes)); +} + +inline oop oopDesc::decode_heap_oop(narrowOop v) { + return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); +} + +inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; } +inline oop oopDesc::decode_heap_oop(oop v) { return v; } + +// Load an oop out of the Java heap as is without decoding. +// Called by GC to check for null before decoding. +inline oop oopDesc::load_heap_oop(oop* p) { return *p; } +inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; } + +// Load and decode an oop out of the Java heap into a wide oop. +inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; } +inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { + return decode_heap_oop_not_null(*p); +} + +// Load and decode an oop out of the heap accepting null +inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; } +inline oop oopDesc::load_decode_heap_oop(narrowOop* p) { + return decode_heap_oop(*p); +} + +// Store already encoded heap oop into the heap. +inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; } +inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; } + +// Encode and store a heap oop. +inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { + *p = encode_heap_oop_not_null(v); +} +inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; } + +// Encode and store a heap oop allowing for null. +inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { + *p = encode_heap_oop(v); +} +inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; } + +// Store heap oop as is for volatile fields. +inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { + OrderAccess::release_store_ptr(p, v); +} +inline void oopDesc::release_store_heap_oop(volatile narrowOop* p, + narrowOop v) { + OrderAccess::release_store(p, v); +} + +inline void oopDesc::release_encode_store_heap_oop_not_null( + volatile narrowOop* p, oop v) { + // heap oop is not pointer sized. + OrderAccess::release_store(p, encode_heap_oop_not_null(v)); +} + +inline void oopDesc::release_encode_store_heap_oop_not_null( + volatile oop* p, oop v) { + OrderAccess::release_store_ptr(p, v); +} + +inline void oopDesc::release_encode_store_heap_oop(volatile oop* p, + oop v) { + OrderAccess::release_store_ptr(p, v); +} +inline void oopDesc::release_encode_store_heap_oop( + volatile narrowOop* p, oop v) { + OrderAccess::release_store(p, encode_heap_oop(v)); +} + + +// These functions are only used to exchange oop fields in instances, +// not headers. +inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { + if (UseCompressedOops) { + // encode exchange value from oop to T + narrowOop val = encode_heap_oop(exchange_value); + narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); + // decode old from T to oop + return decode_heap_oop(old); + } else { + return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); + } +} -inline oop oopDesc::obj_field(int offset) const { return *obj_field_addr(offset); } -inline void oopDesc::obj_field_put(int offset, oop value) { oop_store(obj_field_addr(offset), value); } +inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value, + volatile HeapWord *dest, + oop compare_value) { + if (UseCompressedOops) { + // encode exchange and compare value from oop to T + narrowOop val = encode_heap_oop(exchange_value); + narrowOop cmp = encode_heap_oop(compare_value); + + narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); + // decode old from T to oop + return decode_heap_oop(old); + } else { + return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); + } +} + +// In order to put or get a field out of an instance, must first check +// if the field has been compressed and uncompress it. +inline oop oopDesc::obj_field(int offset) const { + return UseCompressedOops ? + load_decode_heap_oop(obj_field_addr(offset)) : + load_decode_heap_oop(obj_field_addr(offset)); +} +inline void oopDesc::obj_field_put(int offset, oop value) { + UseCompressedOops ? oop_store(obj_field_addr(offset), value) : + oop_store(obj_field_addr(offset), value); +} +inline void oopDesc::obj_field_raw_put(int offset, oop value) { + UseCompressedOops ? + encode_store_heap_oop(obj_field_addr(offset), value) : + encode_store_heap_oop(obj_field_addr(offset), value); +} inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } @@ -110,8 +310,21 @@ inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } -inline oop oopDesc::obj_field_acquire(int offset) const { return (oop)OrderAccess::load_ptr_acquire(obj_field_addr(offset)); } -inline void oopDesc::release_obj_field_put(int offset, oop value) { oop_store((volatile oop*)obj_field_addr(offset), value); } +inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); } +inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; } + +inline oop oopDesc::obj_field_acquire(int offset) const { + return UseCompressedOops ? + decode_heap_oop((narrowOop) + OrderAccess::load_acquire(obj_field_addr(offset))) + : decode_heap_oop((oop) + OrderAccess::load_ptr_acquire(obj_field_addr(offset))); +} +inline void oopDesc::release_obj_field_put(int offset, oop value) { + UseCompressedOops ? + oop_store((volatile narrowOop*)obj_field_addr(offset), value) : + oop_store((volatile oop*) obj_field_addr(offset), value); +} inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } @@ -137,7 +350,6 @@ inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } - inline int oopDesc::size_given_klass(Klass* klass) { int lh = klass->layout_helper(); int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize @@ -186,10 +398,11 @@ s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize); - // UseParNewGC can change the length field of an "old copy" of an object - // array in the young gen so it indicates the stealable portion of - // an already copied array. This will cause the first disjunct below - // to fail if the sizes are computed across such a concurrent change. + // UseParNewGC, UseParallelGC and UseG1GC can change the length field + // of an "old copy" of an object array in the young gen so it indicates + // the grey portion of an already copied array. This will cause the first + // disjunct below to fail if the two comparands are computed across such + // a concurrent change. // UseParNewGC also runs with promotion labs (which look like int // filler arrays) which are subject to changing their declared size // when finally retiring a PLAB; this also can cause the first disjunct @@ -199,13 +412,11 @@ // is_objArray() && is_forwarded() // covers first scenario above // || is_typeArray() // covers second scenario above // If and when UseParallelGC uses the same obj array oop stealing/chunking - // technique, or when G1 is integrated (and currently uses this array chunking - // technique) we will need to suitably modify the assertion. + // technique, we will need to suitably modify the assertion. assert((s == klass->oop_size(this)) || - (((UseParNewGC || UseParallelGC) && - Universe::heap()->is_gc_active()) && - (is_typeArray() || - (is_objArray() && is_forwarded()))), + (Universe::heap()->is_gc_active() && + ((is_typeArray() && UseParNewGC) || + (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))), "wrong array object size"); } else { // Must be zero, so bite the bullet and take the virtual call. @@ -227,52 +438,64 @@ return blueprint()->oop_is_parsable(this); } - -inline void update_barrier_set(oop *p, oop v) { +inline void update_barrier_set(void* p, oop v) { assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); oopDesc::bs()->write_ref_field(p, v); } +inline void update_barrier_set_pre(void* p, oop v) { + oopDesc::bs()->write_ref_field_pre(p, v); +} -inline void oop_store(oop* p, oop v) { +template inline void oop_store(T* p, oop v) { if (always_do_update_barrier) { - oop_store((volatile oop*)p, v); + oop_store((volatile T*)p, v); } else { - *p = v; + update_barrier_set_pre(p, v); + oopDesc::encode_store_heap_oop(p, v); update_barrier_set(p, v); } } -inline void oop_store(volatile oop* p, oop v) { +template inline void oop_store(volatile T* p, oop v) { + update_barrier_set_pre((void*)p, v); // Used by release_obj_field_put, so use release_store_ptr. - OrderAccess::release_store_ptr(p, v); - update_barrier_set((oop *)p, v); + oopDesc::release_encode_store_heap_oop(p, v); + update_barrier_set((void*)p, v); } -inline void oop_store_without_check(oop* p, oop v) { +template inline void oop_store_without_check(T* p, oop v) { // XXX YSR FIX ME!!! if (always_do_update_barrier) { - oop_store(p, v); + oop_store(p, v); } else { assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v), "oop store without store check failed"); - *p = v; + oopDesc::encode_store_heap_oop(p, v); } } // When it absolutely has to get there. -inline void oop_store_without_check(volatile oop* p, oop v) { +template inline void oop_store_without_check(volatile T* p, oop v) { // XXX YSR FIX ME!!! if (always_do_update_barrier) { oop_store(p, v); } else { - assert(!Universe::heap()->barrier_set()-> - write_ref_needs_barrier((oop *)p, v), + assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v), "oop store without store check failed"); - OrderAccess::release_store_ptr(p, v); + oopDesc::release_encode_store_heap_oop(p, v); } } +// Should replace *addr = oop assignments where addr type depends on UseCompressedOops +// (without having to remember the function name this calls). +inline void oop_store_raw(HeapWord* addr, oop value) { + if (UseCompressedOops) { + oopDesc::encode_store_heap_oop((narrowOop*)addr, value); + } else { + oopDesc::encode_store_heap_oop((oop*)addr, value); + } +} // Used only for markSweep, scavenging inline bool oopDesc::is_gc_marked() const { @@ -305,7 +528,7 @@ // try to find metaclass cycle safely without seg faulting on bad input // we should reach klassKlassObj by following klass link at most 3 times for (int i = 0; i < 3; i++) { - obj = obj->klass(); + obj = obj->klass_or_null(); // klass should be aligned and in permspace if (!check_obj_alignment(obj)) return false; if (!Universe::heap()->is_in_permanent(obj)) return false; @@ -343,15 +566,17 @@ if (!Universe::heap()->is_in_reserved(this)) return false; return mark()->is_unlocked(); } - - #endif // PRODUCT -inline void oopDesc::follow_header() { - MarkSweep::mark_and_push((oop*)&_klass); +inline void oopDesc::follow_header() { + if (UseCompressedOops) { + MarkSweep::mark_and_push(compressed_klass_addr()); + } else { + MarkSweep::mark_and_push(klass_addr()); + } } -inline void oopDesc::follow_contents() { +inline void oopDesc::follow_contents(void) { assert (is_gc_marked(), "should be marked"); blueprint()->oop_follow_contents(this); } @@ -365,7 +590,6 @@ return mark()->is_marked(); } - // Used by scavengers inline void oopDesc::forward_to(oop p) { assert(Universe::heap()->is_in_reserved(p), @@ -387,8 +611,9 @@ // Note that the forwardee is not the same thing as the displaced_mark. // The forwardee is used when copying during scavenge and mark-sweep. // It does need to clear the low two locking- and GC-related bits. -inline oop oopDesc::forwardee() const { return (oop) mark()->decode_pointer(); } - +inline oop oopDesc::forwardee() const { + return (oop) mark()->decode_pointer(); +} inline bool oopDesc::has_displaced_mark() const { return mark()->has_displaced_mark_helper(); @@ -435,17 +660,24 @@ } } - inline void oopDesc::oop_iterate_header(OopClosure* blk) { - blk->do_oop((oop*)&_klass); + if (UseCompressedOops) { + blk->do_oop(compressed_klass_addr()); + } else { + blk->do_oop(klass_addr()); + } } - inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) { - if (mr.contains(&_klass)) blk->do_oop((oop*)&_klass); + if (UseCompressedOops) { + if (mr.contains(compressed_klass_addr())) { + blk->do_oop(compressed_klass_addr()); + } + } else { + if (mr.contains(klass_addr())) blk->do_oop(klass_addr()); + } } - inline int oopDesc::adjust_pointers() { debug_only(int check_size = size()); int s = blueprint()->oop_adjust_pointers(this); @@ -454,7 +686,11 @@ } inline void oopDesc::adjust_header() { - MarkSweep::adjust_pointer((oop*)&_klass); + if (UseCompressedOops) { + MarkSweep::adjust_pointer(compressed_klass_addr()); + } else { + MarkSweep::adjust_pointer(klass_addr()); + } } #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ @@ -469,9 +705,20 @@ return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ } -ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_3(OOP_ITERATE_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) + +#ifndef SERIALGC +#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ + SpecializationStats::record_call(); \ + return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ +} +ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN) +#endif // !SERIALGC inline bool oopDesc::is_shared() const { return CompactingPermGenGen::is_shared(this); --- old/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp 2009-08-01 04:13:13.305752596 +0100 +++ new/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp 2009-08-01 04:13:13.216174847 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oop.pcgc.inline.hpp 1.16 07/05/29 09:44:24 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,8 +70,8 @@ // update_header(); // The klass has moved. Is the location of the klass // within the limits? - if ((((HeapWord*)&_klass) >= begin_limit) && - (((HeapWord*)&_klass) < end_limit)) { + if ((((HeapWord*)&_metadata._klass) >= begin_limit) && + (((HeapWord*)&_metadata._klass) < end_limit)) { set_klass(updated_klass); } @@ -92,7 +92,11 @@ // Used by parallel old GC. inline void oopDesc::follow_header(ParCompactionManager* cm) { - PSParallelCompact::mark_and_push(cm, (oop*)&_klass); + if (UseCompressedOops) { + PSParallelCompact::mark_and_push(cm, compressed_klass_addr()); + } else { + PSParallelCompact::mark_and_push(cm, klass_addr()); + } } inline oop oopDesc::forward_to_atomic(oop p) { @@ -117,9 +121,18 @@ } inline void oopDesc::update_header() { - PSParallelCompact::adjust_pointer((oop*)&_klass); + if (UseCompressedOops) { + PSParallelCompact::adjust_pointer(compressed_klass_addr()); + } else { + PSParallelCompact::adjust_pointer(klass_addr()); + } } inline void oopDesc::update_header(HeapWord* beg_addr, HeapWord* end_addr) { - PSParallelCompact::adjust_pointer((oop*)&_klass, beg_addr, end_addr); + if (UseCompressedOops) { + PSParallelCompact::adjust_pointer(compressed_klass_addr(), + beg_addr, end_addr); + } else { + PSParallelCompact::adjust_pointer(klass_addr(), beg_addr, end_addr); + } } --- old/hotspot/src/share/vm/oops/oopsHierarchy.hpp 2009-08-01 04:13:14.149057979 +0100 +++ new/hotspot/src/share/vm/oops/oopsHierarchy.hpp 2009-08-01 04:13:14.064965634 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)oopsHierarchy.hpp 1.31 07/05/17 15:57:10 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,21 +29,25 @@ // This hierarchy is a representation hierarchy, i.e. if A is a superclass // of B, A's representation is a prefix of B's representation. +typedef juint narrowOop; // Offset instead of address for an oop within a java object +typedef class klassOopDesc* wideKlassOop; // to keep SA happy and unhandled oop + // detector happy. + #ifndef CHECK_UNHANDLED_OOPS -typedef class oopDesc* oop; -typedef class instanceOopDesc* instanceOop; -typedef class methodOopDesc* methodOop; -typedef class constMethodOopDesc* constMethodOop; -typedef class methodDataOopDesc* methodDataOop; -typedef class arrayOopDesc* arrayOop; -typedef class constantPoolOopDesc* constantPoolOop; -typedef class constantPoolCacheOopDesc* constantPoolCacheOop; -typedef class objArrayOopDesc* objArrayOop; -typedef class typeArrayOopDesc* typeArrayOop; -typedef class symbolOopDesc* symbolOop; -typedef class klassOopDesc* klassOop; -typedef class markOopDesc* markOop; +typedef class oopDesc* oop; +typedef class instanceOopDesc* instanceOop; +typedef class methodOopDesc* methodOop; +typedef class constMethodOopDesc* constMethodOop; +typedef class methodDataOopDesc* methodDataOop; +typedef class arrayOopDesc* arrayOop; +typedef class objArrayOopDesc* objArrayOop; +typedef class typeArrayOopDesc* typeArrayOop; +typedef class constantPoolOopDesc* constantPoolOop; +typedef class constantPoolCacheOopDesc* constantPoolCacheOop; +typedef class symbolOopDesc* symbolOop; +typedef class klassOopDesc* klassOop; +typedef class markOopDesc* markOop; typedef class compiledICHolderOopDesc* compiledICHolderOop; #else @@ -175,9 +179,9 @@ class objArrayKlassKlass; class typeArrayKlassKlass; class arrayKlass; -class constantPoolKlass; -class constantPoolCacheKlass; class objArrayKlass; class typeArrayKlass; -class symbolKlass; +class constantPoolKlass; +class constantPoolCacheKlass; +class symbolKlass; class compiledICHolderKlass; --- old/hotspot/src/share/vm/oops/symbolKlass.cpp 2009-08-01 04:13:14.956717072 +0100 +++ new/hotspot/src/share/vm/oops/symbolKlass.cpp 2009-08-01 04:13:14.887839404 +0100 @@ -212,10 +212,7 @@ void symbolKlass::oop_print_on(oop obj, outputStream* st) { st->print("Symbol: '"); - symbolOop sym = symbolOop(obj); - for (int i = 0; i < sym->utf8_length(); i++) { - st->print("%c", sym->byte_at(i)); - } + symbolOop(obj)->print_symbol_on(st); st->print("'"); } --- old/hotspot/src/share/vm/oops/symbolOop.cpp 2009-08-01 04:13:15.766566109 +0100 +++ new/hotspot/src/share/vm/oops/symbolOop.cpp 2009-08-01 04:13:15.693017371 +0100 @@ -71,8 +71,17 @@ void symbolOopDesc::print_symbol_on(outputStream* st) { st = st ? st : tty; - for (int index = 0; index < utf8_length(); index++) - st->put((char)byte_at(index)); + int length = UTF8::unicode_length((const char*)bytes(), utf8_length()); + const char *ptr = (const char *)bytes(); + jchar value; + for (int index = 0; index < length; index++) { + ptr = UTF8::next(ptr, &value); + if (value >= 32 && value < 127 || value == '\'' || value == '\\') { + st->put(value); + } else { + st->print("\\u%04x", value); + } + } } jchar* symbolOopDesc::as_unicode(int& length) const { --- old/hotspot/src/share/vm/oops/typeArrayKlass.cpp 2009-08-01 04:13:16.630274521 +0100 +++ new/hotspot/src/share/vm/oops/typeArrayKlass.cpp 2009-08-01 04:13:16.555151252 +0100 @@ -39,14 +39,15 @@ return element_type() == tak->element_type(); } -klassOop typeArrayKlass::create_klass(BasicType type, int scale, TRAPS) { +klassOop typeArrayKlass::create_klass(BasicType type, int scale, + const char* name_str, TRAPS) { typeArrayKlass o; symbolHandle sym(symbolOop(NULL)); // bootstrapping: don't create sym if symbolKlass not created yet - if (Universe::symbolKlassObj() != NULL) { - sym = oopFactory::new_symbol_handle(external_name(type), CHECK_NULL); - } + if (Universe::symbolKlassObj() != NULL && name_str != NULL) { + sym = oopFactory::new_symbol_handle(name_str, CHECK_NULL); + } KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj()); arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL); --- old/hotspot/src/share/vm/oops/typeArrayKlass.hpp 2009-08-01 04:13:17.462738911 +0100 +++ new/hotspot/src/share/vm/oops/typeArrayKlass.hpp 2009-08-01 04:13:17.382662172 +0100 @@ -42,7 +42,11 @@ // klass allocation DEFINE_ALLOCATE_PERMANENT(typeArrayKlass); - static klassOop create_klass(BasicType type, int scale, TRAPS); + static klassOop create_klass(BasicType type, int scale, const char* name_str, + TRAPS); + static inline klassOop create_klass(BasicType type, int scale, TRAPS) { + return create_klass(type, scale, external_name(type), CHECK_NULL); + } int oop_size(oop obj) const; int klass_oop_size() const { return object_size(); } --- old/hotspot/src/share/vm/opto/addnode.cpp 2009-08-01 04:13:18.365334064 +0100 +++ new/hotspot/src/share/vm/opto/addnode.cpp 2009-08-01 04:13:18.280179646 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)addnode.cpp 1.142 07/10/23 13:12:52 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,9 +73,14 @@ // Convert "Load+x" into "x+Load". // Now check for loads - if( in2->is_Load() ) return false; - // Left is a Load and Right is not; move it right. - if( in1->is_Load() ) { + if (in2->is_Load()) { + if (!in1->is_Load()) { + // already x+Load to return + return false; + } + // both are loads, so fall through to sort inputs by idx + } else if( in1->is_Load() ) { + // Left is a Load and Right is not; move it right. add->swap_edges(1, 2); return true; } @@ -154,7 +159,9 @@ if( add1_op == this_op && !con_right ) { Node *a12 = add1->in(2); const Type *t12 = phase->type( a12 ); - if( t12->singleton() && t12 != Type::TOP && (add1 != add1->in(1)) ) { + if( t12->singleton() && t12 != Type::TOP && (add1 != add1->in(1)) && + !(add1->in(1)->is_Phi() && add1->in(1)->as_Phi()->is_tripcount()) ) { + assert(add1->in(1) != this, "dead loop in AddNode::Ideal"); add2 = add1->clone(); add2->set_req(2, in(2)); add2 = phase->transform(add2); @@ -170,7 +177,9 @@ if( add2_op == this_op && !con_left ) { Node *a22 = add2->in(2); const Type *t22 = phase->type( a22 ); - if( t22->singleton() && t22 != Type::TOP && (add2 != add2->in(1)) ) { + if( t22->singleton() && t22 != Type::TOP && (add2 != add2->in(1)) && + !(add2->in(1)->is_Phi() && add2->in(1)->as_Phi()->is_tripcount()) ) { + assert(add2->in(1) != this, "dead loop in AddNode::Ideal"); Node *addx = add2->clone(); addx->set_req(1, in(1)); addx->set_req(2, add2->in(1)); @@ -221,34 +230,63 @@ //============================================================================= //------------------------------Idealize--------------------------------------- Node *AddINode::Ideal(PhaseGVN *phase, bool can_reshape) { - int op1 = in(1)->Opcode(); - int op2 = in(2)->Opcode(); + Node* in1 = in(1); + Node* in2 = in(2); + int op1 = in1->Opcode(); + int op2 = in2->Opcode(); // Fold (con1-x)+con2 into (con1+con2)-x + if ( op1 == Op_AddI && op2 == Op_SubI ) { + // Swap edges to try optimizations below + in1 = in2; + in2 = in(1); + op1 = op2; + op2 = in2->Opcode(); + } if( op1 == Op_SubI ) { - const Type *t_sub1 = phase->type( in(1)->in(1) ); - const Type *t_2 = phase->type( in(2) ); + const Type *t_sub1 = phase->type( in1->in(1) ); + const Type *t_2 = phase->type( in2 ); if( t_sub1->singleton() && t_2->singleton() && t_sub1 != Type::TOP && t_2 != Type::TOP ) return new (phase->C, 3) SubINode(phase->makecon( add_ring( t_sub1, t_2 ) ), - in(1)->in(2) ); + in1->in(2) ); // Convert "(a-b)+(c-d)" into "(a+c)-(b+d)" if( op2 == Op_SubI ) { // Check for dead cycle: d = (a-b)+(c-d) - assert( in(1)->in(2) != this && in(2)->in(2) != this, + assert( in1->in(2) != this && in2->in(2) != this, "dead loop in AddINode::Ideal" ); Node *sub = new (phase->C, 3) SubINode(NULL, NULL); - sub->init_req(1, phase->transform(new (phase->C, 3) AddINode(in(1)->in(1), in(2)->in(1) ) )); - sub->init_req(2, phase->transform(new (phase->C, 3) AddINode(in(1)->in(2), in(2)->in(2) ) )); + sub->init_req(1, phase->transform(new (phase->C, 3) AddINode(in1->in(1), in2->in(1) ) )); + sub->init_req(2, phase->transform(new (phase->C, 3) AddINode(in1->in(2), in2->in(2) ) )); return sub; } + // Convert "(a-b)+(b+c)" into "(a+c)" + if( op2 == Op_AddI && in1->in(2) == in2->in(1) ) { + assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddINode::Ideal"); + return new (phase->C, 3) AddINode(in1->in(1), in2->in(2)); + } + // Convert "(a-b)+(c+b)" into "(a+c)" + if( op2 == Op_AddI && in1->in(2) == in2->in(2) ) { + assert(in1->in(1) != this && in2->in(1) != this,"dead loop in AddINode::Ideal"); + return new (phase->C, 3) AddINode(in1->in(1), in2->in(1)); + } + // Convert "(a-b)+(b-c)" into "(a-c)" + if( op2 == Op_SubI && in1->in(2) == in2->in(1) ) { + assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddINode::Ideal"); + return new (phase->C, 3) SubINode(in1->in(1), in2->in(2)); + } + // Convert "(a-b)+(c-a)" into "(c-b)" + if( op2 == Op_SubI && in1->in(1) == in2->in(2) ) { + assert(in1->in(2) != this && in2->in(1) != this,"dead loop in AddINode::Ideal"); + return new (phase->C, 3) SubINode(in2->in(1), in1->in(2)); + } } // Convert "x+(0-y)" into "(x-y)" - if( op2 == Op_SubI && phase->type(in(2)->in(1)) == TypeInt::ZERO ) - return new (phase->C, 3) SubINode(in(1), in(2)->in(2) ); + if( op2 == Op_SubI && phase->type(in2->in(1)) == TypeInt::ZERO ) + return new (phase->C, 3) SubINode(in1, in2->in(2) ); // Convert "(0-y)+x" into "(x-y)" - if( op1 == Op_SubI && phase->type(in(1)->in(1)) == TypeInt::ZERO ) - return new (phase->C, 3) SubINode( in(2), in(1)->in(2) ); + if( op1 == Op_SubI && phase->type(in1->in(1)) == TypeInt::ZERO ) + return new (phase->C, 3) SubINode( in2, in1->in(2) ); // Convert (x>>>z)+y into (x+(y<>>z for small constant z and y. // Helps with array allocation math constant folding @@ -262,15 +300,15 @@ // Have not observed cases where type information exists to support // positive y and (x <= -(y << z)) if( op1 == Op_URShiftI && op2 == Op_ConI && - in(1)->in(2)->Opcode() == Op_ConI ) { - jint z = phase->type( in(1)->in(2) )->is_int()->get_con() & 0x1f; // only least significant 5 bits matter - jint y = phase->type( in(2) )->is_int()->get_con(); + in1->in(2)->Opcode() == Op_ConI ) { + jint z = phase->type( in1->in(2) )->is_int()->get_con() & 0x1f; // only least significant 5 bits matter + jint y = phase->type( in2 )->is_int()->get_con(); if( z < 5 && -5 < y && y < 0 ) { - const Type *t_in11 = phase->type(in(1)->in(1)); + const Type *t_in11 = phase->type(in1->in(1)); if( t_in11 != Type::TOP && (t_in11->is_int()->_lo >= -(y << z)) ) { - Node *a = phase->transform( new (phase->C, 3) AddINode( in(1)->in(1), phase->intcon(y<C, 3) URShiftINode( a, in(1)->in(2) ); + Node *a = phase->transform( new (phase->C, 3) AddINode( in1->in(1), phase->intcon(y<C, 3) URShiftINode( a, in1->in(2) ); } } } @@ -324,39 +362,73 @@ //============================================================================= //------------------------------Idealize--------------------------------------- Node *AddLNode::Ideal(PhaseGVN *phase, bool can_reshape) { - int op1 = in(1)->Opcode(); - int op2 = in(2)->Opcode(); + Node* in1 = in(1); + Node* in2 = in(2); + int op1 = in1->Opcode(); + int op2 = in2->Opcode(); + // Fold (con1-x)+con2 into (con1+con2)-x + if ( op1 == Op_AddL && op2 == Op_SubL ) { + // Swap edges to try optimizations below + in1 = in2; + in2 = in(1); + op1 = op2; + op2 = in2->Opcode(); + } // Fold (con1-x)+con2 into (con1+con2)-x if( op1 == Op_SubL ) { - const Type *t_sub1 = phase->type( in(1)->in(1) ); - const Type *t_2 = phase->type( in(2) ); + const Type *t_sub1 = phase->type( in1->in(1) ); + const Type *t_2 = phase->type( in2 ); if( t_sub1->singleton() && t_2->singleton() && t_sub1 != Type::TOP && t_2 != Type::TOP ) return new (phase->C, 3) SubLNode(phase->makecon( add_ring( t_sub1, t_2 ) ), - in(1)->in(2) ); + in1->in(2) ); // Convert "(a-b)+(c-d)" into "(a+c)-(b+d)" if( op2 == Op_SubL ) { // Check for dead cycle: d = (a-b)+(c-d) - assert( in(1)->in(2) != this && in(2)->in(2) != this, + assert( in1->in(2) != this && in2->in(2) != this, "dead loop in AddLNode::Ideal" ); Node *sub = new (phase->C, 3) SubLNode(NULL, NULL); - sub->init_req(1, phase->transform(new (phase->C, 3) AddLNode(in(1)->in(1), in(2)->in(1) ) )); - sub->init_req(2, phase->transform(new (phase->C, 3) AddLNode(in(1)->in(2), in(2)->in(2) ) )); + sub->init_req(1, phase->transform(new (phase->C, 3) AddLNode(in1->in(1), in2->in(1) ) )); + sub->init_req(2, phase->transform(new (phase->C, 3) AddLNode(in1->in(2), in2->in(2) ) )); return sub; } + // Convert "(a-b)+(b+c)" into "(a+c)" + if( op2 == Op_AddL && in1->in(2) == in2->in(1) ) { + assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddLNode::Ideal"); + return new (phase->C, 3) AddLNode(in1->in(1), in2->in(2)); + } + // Convert "(a-b)+(c+b)" into "(a+c)" + if( op2 == Op_AddL && in1->in(2) == in2->in(2) ) { + assert(in1->in(1) != this && in2->in(1) != this,"dead loop in AddLNode::Ideal"); + return new (phase->C, 3) AddLNode(in1->in(1), in2->in(1)); + } + // Convert "(a-b)+(b-c)" into "(a-c)" + if( op2 == Op_SubL && in1->in(2) == in2->in(1) ) { + assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddLNode::Ideal"); + return new (phase->C, 3) SubLNode(in1->in(1), in2->in(2)); + } + // Convert "(a-b)+(c-a)" into "(c-b)" + if( op2 == Op_SubL && in1->in(1) == in1->in(2) ) { + assert(in1->in(2) != this && in2->in(1) != this,"dead loop in AddLNode::Ideal"); + return new (phase->C, 3) SubLNode(in2->in(1), in1->in(2)); + } } // Convert "x+(0-y)" into "(x-y)" - if( op2 == Op_SubL && phase->type(in(2)->in(1)) == TypeLong::ZERO ) - return new (phase->C, 3) SubLNode(in(1), in(2)->in(2) ); + if( op2 == Op_SubL && phase->type(in2->in(1)) == TypeLong::ZERO ) + return new (phase->C, 3) SubLNode( in1, in2->in(2) ); + + // Convert "(0-y)+x" into "(x-y)" + if( op1 == Op_SubL && phase->type(in1->in(1)) == TypeInt::ZERO ) + return new (phase->C, 3) SubLNode( in2, in1->in(2) ); // Convert "X+X+X+X+X...+X+Y" into "k*X+Y" or really convert "X+(X+Y)" // into "(X<<1)+Y" and let shift-folding happen. if( op2 == Op_AddL && - in(2)->in(1) == in(1) && - op1 != Op_ConL && + in2->in(1) == in1 && + op1 != Op_ConL && 0 ) { - Node *shift = phase->transform(new (phase->C, 3) LShiftLNode(in(1),phase->intcon(1))); - return new (phase->C, 3) AddLNode(shift,in(2)->in(2)); + Node *shift = phase->transform(new (phase->C, 3) LShiftLNode(in1,phase->intcon(1))); + return new (phase->C, 3) AddLNode(shift,in2->in(2)); } return AddNode::Ideal(phase, can_reshape); @@ -508,15 +580,25 @@ const Type *temp_t2 = phase->type( in(Offset) ); if( temp_t2 == Type::TOP ) return NULL; const TypeX *t2 = temp_t2->is_intptr_t(); + Node* address; + Node* offset; if( t2->is_con() ) { // The Add of the flattened expression - set_req(Address, addp->in(Address)); - set_req(Offset , phase->MakeConX(t2->get_con() + t12->get_con())); - return this; // Made progress + address = addp->in(Address); + offset = phase->MakeConX(t2->get_con() + t12->get_con()); + } else { + // Else move the constant to the right. ((A+con)+B) into ((A+B)+con) + address = phase->transform(new (phase->C, 4) AddPNode(in(Base),addp->in(Address),in(Offset))); + offset = addp->in(Offset); + } + PhaseIterGVN *igvn = phase->is_IterGVN(); + if( igvn ) { + set_req_X(Address,address,igvn); + set_req_X(Offset,offset,igvn); + } else { + set_req(Address,address); + set_req(Offset,offset); } - // Else move the constant to the right. ((A+con)+B) into ((A+B)+con) - set_req(Address, phase->transform(new (phase->C, 4) AddPNode(in(Base),addp->in(Address),in(Offset)))); - set_req(Offset , addp->in(Offset)); return this; } } @@ -561,8 +643,6 @@ intptr_t txoffset = Type::OffsetBot; if (tx->is_con()) { // Left input is an add of a constant? txoffset = tx->get_con(); - if (txoffset != (int)txoffset) - txoffset = Type::OffsetBot; // oops: add_offset will choke on it } return tp->add_offset(txoffset); } @@ -583,8 +663,6 @@ intptr_t p2offset = Type::OffsetBot; if (p2->is_con()) { // Left input is an add of a constant? p2offset = p2->get_con(); - if (p2offset != (int)p2offset) - p2offset = Type::OffsetBot; // oops: add_offset will choke on it } return p1->add_offset(p2offset); } @@ -663,7 +741,7 @@ // Check for any interesting operand info. // In particular, check for both memory and non-memory operands. // %%%%% Clean this up: use xadd_offset - int con = opnd->constant(); + intptr_t con = opnd->constant(); if ( con == TypePtr::OffsetBot ) goto bottom_out; offset += con; con = opnd->constant_disp(); @@ -683,6 +761,8 @@ guarantee(tptr == NULL, "must be only one pointer operand"); tptr = et->isa_oopptr(); guarantee(tptr != NULL, "non-int operand must be pointer"); + if (tptr->higher_equal(tp->add_offset(tptr->offset()))) + tp = tptr; // Set more precise type for bailout continue; } if ( eti->_hi != eti->_lo ) goto bottom_out; --- old/hotspot/src/share/vm/opto/block.cpp 2009-08-01 04:13:20.177422835 +0100 +++ new/hotspot/src/share/vm/opto/block.cpp 2009-08-01 04:13:20.080196036 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)block.cpp 1.172 07/09/28 10:23:15 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,6 +60,14 @@ _blocks[i] = b; } +#ifndef PRODUCT +void Block_List::print() { + for (uint i=0; i < size(); i++) { + tty->print("B%d ", _blocks[i]->_pre_order); + } + tty->print("size = %d\n", size()); +} +#endif //============================================================================= @@ -69,6 +77,12 @@ // Check for Start block if( _pre_order == 1 ) return InteriorEntryAlignment; // Check for loop alignment + if (has_loop_alignment()) return loop_alignment(); + + return 1; // no particular alignment +} + +uint Block::compute_loop_alignment() { Node *h = head(); if( h->is_Loop() && h->as_Loop()->is_inner_loop() ) { // Pre- and post-loops have low trip count so do not bother with @@ -86,14 +100,16 @@ } return OptoLoopAlignment; // Otherwise align loop head } + return 1; // no particular alignment } //----------------------------------------------------------------------------- // Compute the size of first 'inst_cnt' instructions in this block. -// Return the number of instructions left to compute if the block has -// less then 'inst_cnt' instructions. -uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, +// Return the number of instructions left to compute if the block has +// less then 'inst_cnt' instructions. Stop, and return 0 if sum_size +// exceeds OptoLoopAlignment. +uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra) { uint last_inst = _nodes.size(); for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { @@ -310,6 +326,8 @@ tty->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order); // Dump any loop-specific bits, especially for CountedLoops. loop->dump_spec(tty); + } else if (has_loop_alignment()) { + tty->print(" top-of-loop"); } tty->print(" Freq: %g",_freq); if( Verbose || WizardMode ) { @@ -470,6 +488,10 @@ // get successor block succ_no assert(succ_no < in->_num_succs, "illegal successor number"); Block* out = in->_succs[succ_no]; + // Compute frequency of the new block. Do this before inserting + // new block in case succ_prob() needs to infer the probability from + // surrounding blocks. + float freq = in->_freq * in->succ_prob(succ_no); // get ProjNode corresponding to the succ_no'th successor of the in block ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj(); // create region for basic block @@ -494,6 +516,8 @@ } // remap predecessor's successor to new block in->_succs.map(succ_no, block); + // Set the frequency of the new block + block->_freq = freq; // add new basic block to basic block list _blocks.insert(block_no + 1, block); _num_blocks++; @@ -506,9 +530,11 @@ int branch_idx = b->_nodes.size() - b->_num_succs-1; if( branch_idx < 1 ) return false; Node *bra = b->_nodes[branch_idx]; - if( bra->is_Catch() ) return true; + if( bra->is_Catch() ) + return true; if( bra->is_Mach() ) { - if( bra->is_MachNullCheck() ) return true; + if( bra->is_MachNullCheck() ) + return true; int iop = bra->as_Mach()->ideal_Opcode(); if( iop == Op_FastLock || iop == Op_FastUnlock ) return true; @@ -554,10 +580,10 @@ dead->_nodes[k]->del_req(j); } -//------------------------------MoveToNext------------------------------------- +//------------------------------move_to_next----------------------------------- // Helper function to move block bx to the slot following b_index. Return // true if the move is successful, otherwise false -bool PhaseCFG::MoveToNext(Block* bx, uint b_index) { +bool PhaseCFG::move_to_next(Block* bx, uint b_index) { if (bx == NULL) return false; // Return false if bx is already scheduled. @@ -588,9 +614,9 @@ return true; } -//------------------------------MoveToEnd-------------------------------------- +//------------------------------move_to_end------------------------------------ // Move empty and uncommon blocks to the end. -void PhaseCFG::MoveToEnd(Block *b, uint i) { +void PhaseCFG::move_to_end(Block *b, uint i) { int e = b->is_Empty(); if (e != Block::not_empty) { if (e == Block::empty_with_goto) { @@ -606,15 +632,31 @@ _blocks.push(b); } -//------------------------------RemoveEmpty------------------------------------ -// Remove empty basic blocks and useless branches. -void PhaseCFG::RemoveEmpty() { +//---------------------------set_loop_alignment-------------------------------- +// Set loop alignment for every block +void PhaseCFG::set_loop_alignment() { + uint last = _num_blocks; + assert( _blocks[0] == _broot, "" ); + + for (uint i = 1; i < last; i++ ) { + Block *b = _blocks[i]; + if (b->head()->is_Loop()) { + b->set_loop_alignment(b); + } + } +} + +//-----------------------------remove_empty------------------------------------ +// Make empty basic blocks to be "connector" blocks, Move uncommon blocks +// to the end. +void PhaseCFG::remove_empty() { // Move uncommon blocks to the end uint last = _num_blocks; - uint i; assert( _blocks[0] == _broot, "" ); - for( i = 1; i < last; i++ ) { + + for (uint i = 1; i < last; i++) { Block *b = _blocks[i]; + if (b->is_connector()) break; // Check for NeverBranch at block end. This needs to become a GOTO to the // true target. NeverBranch are treated as a conditional branch that @@ -626,37 +668,40 @@ convert_NeverBranch_to_Goto(b); // Look for uncommon blocks and move to end. - if( b->is_uncommon(_bbs) ) { - MoveToEnd(b, i); - last--; // No longer check for being uncommon! - if( no_flip_branch(b) ) { // Fall-thru case must follow? - b = _blocks[i]; // Find the fall-thru block - MoveToEnd(b, i); - last--; + if (!C->do_freq_based_layout()) { + if( b->is_uncommon(_bbs) ) { + move_to_end(b, i); + last--; // No longer check for being uncommon! + if( no_flip_branch(b) ) { // Fall-thru case must follow? + b = _blocks[i]; // Find the fall-thru block + move_to_end(b, i); + last--; + } + i--; // backup block counter post-increment } - i--; // backup block counter post-increment } } - // Remove empty blocks - uint j1; + // Move empty blocks to the end last = _num_blocks; - for( i=0; i < last; i++ ) { + for (uint i = 1; i < last; i++) { Block *b = _blocks[i]; - if (i > 0) { - if (b->is_Empty() != Block::not_empty) { - MoveToEnd(b, i); - last--; - i--; - } + if (b->is_Empty() != Block::not_empty) { + move_to_end(b, i); + last--; + i--; } } // End of for all blocks +} +//-----------------------------fixup_flow-------------------------------------- +// Fix up the final control flow for basic blocks. +void PhaseCFG::fixup_flow() { // Fixup final control flow for the blocks. Remove jump-to-next // block. If neither arm of a IF follows the conditional branch, we // have to add a second jump after the conditional. We place the // TRUE branch target in succs[0] for both GOTOs and IFs. - for( i=0; i < _num_blocks; i++ ) { + for (uint i=0; i < _num_blocks; i++) { Block *b = _blocks[i]; b->_pre_order = i; // turn pre-order into block-index @@ -697,7 +742,7 @@ } } // Remove all CatchProjs - for (j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop(); + for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop(); } else if (b->_num_succs == 1) { // Block ends in a Goto? @@ -727,8 +772,7 @@ // successors after the current one, provided that the // successor was previously unscheduled, but moveable // (i.e., all paths to it involve a branch). - if( bnext != bs0 && bnext != bs1 ) { - + if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) { // Choose the more common successor based on the probability // of the conditional branch. Block *bx = bs0; @@ -748,9 +792,9 @@ } // Attempt the more common successor first - if (MoveToNext(bx, i)) { + if (move_to_next(bx, i)) { bnext = bx; - } else if (MoveToNext(by, i)) { + } else if (move_to_next(by, i)) { bnext = by; } } @@ -771,10 +815,8 @@ // Flip projection for each target { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; } - } else if( bnext == bs1 ) { // Fall-thru is already in succs[1] - - } else { // Else need a double-branch - + } else if( bnext != bs1 ) { + // Need a double-branch // The existing conditional branch need not change. // Add a unconditional branch to the false target. // Alas, it must appear in its own block and adding a @@ -783,9 +825,10 @@ } // Make sure we TRUE branch to the target - if( proj0->Opcode() == Op_IfFalse ) + if( proj0->Opcode() == Op_IfFalse ) { iff->negate(); - + } + b->_nodes.pop(); // Remove IfFalse & IfTrue projections b->_nodes.pop(); @@ -793,9 +836,7 @@ // Multi-exit block, e.g. a switch statement // But we don't need to do anything here } - } // End of for all blocks - } @@ -901,8 +942,8 @@ assert( max <= max_uint, "Must fit within uint" ); // Force the Union-Find mapping to be at least this large extend(max,0); - // Initialize to be the ID mapping. - for( uint i=0; i<_max; i++ ) map(i,i); + // Initialize to be the ID mapping. + for( uint i=0; i= _max ) return idx; uint next = lookup(idx); while( next != idx ) { // Scan chain of equivalences - assert( next < idx, "always union smaller" ); idx = next; // until find a fixed-point next = lookup(idx); } @@ -953,3 +993,491 @@ assert( src < dst, "always union smaller" ); map(dst,src); } + +#ifndef PRODUCT +static void edge_dump(GrowableArray *edges) { + tty->print_cr("---- Edges ----"); + for (int i = 0; i < edges->length(); i++) { + CFGEdge *e = edges->at(i); + if (e != NULL) { + edges->at(i)->dump(); + } + } +} + +static void trace_dump(Trace *traces[], int count) { + tty->print_cr("---- Traces ----"); + for (int i = 0; i < count; i++) { + Trace *tr = traces[i]; + if (tr != NULL) { + tr->dump(); + } + } +} + +void Trace::dump( ) const { + tty->print_cr("Trace (freq %f)", first_block()->_freq); + for (Block *b = first_block(); b != NULL; b = next(b)) { + tty->print(" B%d", b->_pre_order); + if (b->head()->is_Loop()) { + tty->print(" (L%d)", b->compute_loop_alignment()); + } + if (b->has_loop_alignment()) { + tty->print(" (T%d)", b->code_alignment()); + } + } + tty->cr(); +} + +void CFGEdge::dump( ) const { + tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ", + from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct); + switch(state()) { + case connected: + tty->print("connected"); + break; + case open: + tty->print("open"); + break; + case interior: + tty->print("interior"); + break; + } + if (infrequent()) { + tty->print(" infrequent"); + } + tty->cr(); +} +#endif + +//============================================================================= + +//------------------------------edge_order------------------------------------- +// Comparison function for edges +static int edge_order(CFGEdge **e0, CFGEdge **e1) { + float freq0 = (*e0)->freq(); + float freq1 = (*e1)->freq(); + if (freq0 != freq1) { + return freq0 > freq1 ? -1 : 1; + } + + int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo; + int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo; + + return dist1 - dist0; +} + +//------------------------------trace_frequency_order-------------------------- +// Comparison function for edges +static int trace_frequency_order(const void *p0, const void *p1) { + Trace *tr0 = *(Trace **) p0; + Trace *tr1 = *(Trace **) p1; + Block *b0 = tr0->first_block(); + Block *b1 = tr1->first_block(); + + // The trace of connector blocks goes at the end; + // we only expect one such trace + if (b0->is_connector() != b1->is_connector()) { + return b1->is_connector() ? -1 : 1; + } + + // Pull more frequently executed blocks to the beginning + float freq0 = b0->_freq; + float freq1 = b1->_freq; + if (freq0 != freq1) { + return freq0 > freq1 ? -1 : 1; + } + + int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo; + + return diff; +} + +//------------------------------find_edges------------------------------------- +// Find edges of interest, i.e, those which can fall through. Presumes that +// edges which don't fall through are of low frequency and can be generally +// ignored. Initialize the list of traces. +void PhaseBlockLayout::find_edges() +{ + // Walk the blocks, creating edges and Traces + uint i; + Trace *tr = NULL; + for (i = 0; i < _cfg._num_blocks; i++) { + Block *b = _cfg._blocks[i]; + tr = new Trace(b, next, prev); + traces[tr->id()] = tr; + + // All connector blocks should be at the end of the list + if (b->is_connector()) break; + + // If this block and the next one have a one-to-one successor + // predecessor relationship, simply append the next block + int nfallthru = b->num_fall_throughs(); + while (nfallthru == 1 && + b->succ_fall_through(0)) { + Block *n = b->_succs[0]; + + // Skip over single-entry connector blocks, we don't want to + // add them to the trace. + while (n->is_connector() && n->num_preds() == 1) { + n = n->_succs[0]; + } + + // We see a merge point, so stop search for the next block + if (n->num_preds() != 1) break; + + i++; + assert(n = _cfg._blocks[i], "expecting next block"); + tr->append(n); + uf->map(n->_pre_order, tr->id()); + traces[n->_pre_order] = NULL; + nfallthru = b->num_fall_throughs(); + b = n; + } + + if (nfallthru > 0) { + // Create a CFGEdge for each outgoing + // edge that could be a fall-through. + for (uint j = 0; j < b->_num_succs; j++ ) { + if (b->succ_fall_through(j)) { + Block *target = b->non_connector_successor(j); + float freq = b->_freq * b->succ_prob(j); + int from_pct = (int) ((100 * freq) / b->_freq); + int to_pct = (int) ((100 * freq) / target->_freq); + edges->append(new CFGEdge(b, target, freq, from_pct, to_pct)); + } + } + } + } + + // Group connector blocks into one trace + for (i++; i < _cfg._num_blocks; i++) { + Block *b = _cfg._blocks[i]; + assert(b->is_connector(), "connector blocks at the end"); + tr->append(b); + uf->map(b->_pre_order, tr->id()); + traces[b->_pre_order] = NULL; + } +} + +//------------------------------union_traces---------------------------------- +// Union two traces together in uf, and null out the trace in the list +void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) +{ + uint old_id = old_trace->id(); + uint updated_id = updated_trace->id(); + + uint lo_id = updated_id; + uint hi_id = old_id; + + // If from is greater than to, swap values to meet + // UnionFind guarantee. + if (updated_id > old_id) { + lo_id = old_id; + hi_id = updated_id; + + // Fix up the trace ids + traces[lo_id] = traces[updated_id]; + updated_trace->set_id(lo_id); + } + + // Union the lower with the higher and remove the pointer + // to the higher. + uf->Union(lo_id, hi_id); + traces[hi_id] = NULL; +} + +//------------------------------grow_traces------------------------------------- +// Append traces together via the most frequently executed edges +void PhaseBlockLayout::grow_traces() +{ + // Order the edges, and drive the growth of Traces via the most + // frequently executed edges. + edges->sort(edge_order); + for (int i = 0; i < edges->length(); i++) { + CFGEdge *e = edges->at(i); + + if (e->state() != CFGEdge::open) continue; + + Block *src_block = e->from(); + Block *targ_block = e->to(); + + // Don't grow traces along backedges? + if (!BlockLayoutRotateLoops) { + if (targ_block->_rpo <= src_block->_rpo) { + targ_block->set_loop_alignment(targ_block); + continue; + } + } + + Trace *src_trace = trace(src_block); + Trace *targ_trace = trace(targ_block); + + // If the edge in question can join two traces at their ends, + // append one trace to the other. + if (src_trace->last_block() == src_block) { + if (src_trace == targ_trace) { + e->set_state(CFGEdge::interior); + if (targ_trace->backedge(e)) { + // Reset i to catch any newly eligible edge + // (Or we could remember the first "open" edge, and reset there) + i = 0; + } + } else if (targ_trace->first_block() == targ_block) { + e->set_state(CFGEdge::connected); + src_trace->append(targ_trace); + union_traces(src_trace, targ_trace); + } + } + } +} + +//------------------------------merge_traces----------------------------------- +// Embed one trace into another, if the fork or join points are sufficiently +// balanced. +void PhaseBlockLayout::merge_traces(bool fall_thru_only) +{ + // Walk the edge list a another time, looking at unprocessed edges. + // Fold in diamonds + for (int i = 0; i < edges->length(); i++) { + CFGEdge *e = edges->at(i); + + if (e->state() != CFGEdge::open) continue; + if (fall_thru_only) { + if (e->infrequent()) continue; + } + + Block *src_block = e->from(); + Trace *src_trace = trace(src_block); + bool src_at_tail = src_trace->last_block() == src_block; + + Block *targ_block = e->to(); + Trace *targ_trace = trace(targ_block); + bool targ_at_start = targ_trace->first_block() == targ_block; + + if (src_trace == targ_trace) { + // This may be a loop, but we can't do much about it. + e->set_state(CFGEdge::interior); + continue; + } + + if (fall_thru_only) { + // If the edge links the middle of two traces, we can't do anything. + // Mark the edge and continue. + if (!src_at_tail & !targ_at_start) { + continue; + } + + // Don't grow traces along backedges? + if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) { + continue; + } + + // If both ends of the edge are available, why didn't we handle it earlier? + assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier."); + + if (targ_at_start) { + // Insert the "targ" trace in the "src" trace if the insertion point + // is a two way branch. + // Better profitability check possible, but may not be worth it. + // Someday, see if the this "fork" has an associated "join"; + // then make a policy on merging this trace at the fork or join. + // For example, other things being equal, it may be better to place this + // trace at the join point if the "src" trace ends in a two-way, but + // the insertion point is one-way. + assert(src_block->num_fall_throughs() == 2, "unexpected diamond"); + e->set_state(CFGEdge::connected); + src_trace->insert_after(src_block, targ_trace); + union_traces(src_trace, targ_trace); + } else if (src_at_tail) { + if (src_trace != trace(_cfg._broot)) { + e->set_state(CFGEdge::connected); + targ_trace->insert_before(targ_block, src_trace); + union_traces(targ_trace, src_trace); + } + } + } else if (e->state() == CFGEdge::open) { + // Append traces, even without a fall-thru connection. + // But leave root entry at the begining of the block list. + if (targ_trace != trace(_cfg._broot)) { + e->set_state(CFGEdge::connected); + src_trace->append(targ_trace); + union_traces(src_trace, targ_trace); + } + } + } +} + +//----------------------------reorder_traces----------------------------------- +// Order the sequence of the traces in some desirable way, and fixup the +// jumps at the end of each block. +void PhaseBlockLayout::reorder_traces(int count) +{ + ResourceArea *area = Thread::current()->resource_area(); + Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count); + Block_List worklist; + int new_count = 0; + + // Compact the traces. + for (int i = 0; i < count; i++) { + Trace *tr = traces[i]; + if (tr != NULL) { + new_traces[new_count++] = tr; + } + } + + // The entry block should be first on the new trace list. + Trace *tr = trace(_cfg._broot); + assert(tr == new_traces[0], "entry trace misplaced"); + + // Sort the new trace list by frequency + qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order); + + // Patch up the successor blocks + _cfg._blocks.reset(); + _cfg._num_blocks = 0; + for (int i = 0; i < new_count; i++) { + Trace *tr = new_traces[i]; + if (tr != NULL) { + tr->fixup_blocks(_cfg); + } + } +} + +//------------------------------PhaseBlockLayout------------------------------- +// Order basic blocks based on frequency +PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) : + Phase(BlockLayout), + _cfg(cfg) +{ + ResourceMark rm; + ResourceArea *area = Thread::current()->resource_area(); + + // List of traces + int size = _cfg._num_blocks + 1; + traces = NEW_ARENA_ARRAY(area, Trace *, size); + memset(traces, 0, size*sizeof(Trace*)); + next = NEW_ARENA_ARRAY(area, Block *, size); + memset(next, 0, size*sizeof(Block *)); + prev = NEW_ARENA_ARRAY(area, Block *, size); + memset(prev , 0, size*sizeof(Block *)); + + // List of edges + edges = new GrowableArray; + + // Mapping block index --> block_trace + uf = new UnionFind(size); + uf->reset(size); + + // Find edges and create traces. + find_edges(); + + // Grow traces at their ends via most frequent edges. + grow_traces(); + + // Merge one trace into another, but only at fall-through points. + // This may make diamonds and other related shapes in a trace. + merge_traces(true); + + // Run merge again, allowing two traces to be catenated, even if + // one does not fall through into the other. This appends loosely + // related traces to be near each other. + merge_traces(false); + + // Re-order all the remaining traces by frequency + reorder_traces(size); + + assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink"); +} + + +//------------------------------backedge--------------------------------------- +// Edge e completes a loop in a trace. If the target block is head of the +// loop, rotate the loop block so that the loop ends in a conditional branch. +bool Trace::backedge(CFGEdge *e) { + bool loop_rotated = false; + Block *src_block = e->from(); + Block *targ_block = e->to(); + + assert(last_block() == src_block, "loop discovery at back branch"); + if (first_block() == targ_block) { + if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) { + // Find the last block in the trace that has a conditional + // branch. + Block *b; + for (b = last_block(); b != NULL; b = prev(b)) { + if (b->num_fall_throughs() == 2) { + break; + } + } + + if (b != last_block() && b != NULL) { + loop_rotated = true; + + // Rotate the loop by doing two-part linked-list surgery. + append(first_block()); + break_loop_after(b); + } + } + + // Backbranch to the top of a trace + // Scroll foward through the trace from the targ_block. If we find + // a loop head before another loop top, use the the loop head alignment. + for (Block *b = targ_block; b != NULL; b = next(b)) { + if (b->has_loop_alignment()) { + break; + } + if (b->head()->is_Loop()) { + targ_block = b; + break; + } + } + + first_block()->set_loop_alignment(targ_block); + + } else { + // Backbranch into the middle of a trace + targ_block->set_loop_alignment(targ_block); + } + + return loop_rotated; +} + +//------------------------------fixup_blocks----------------------------------- +// push blocks onto the CFG list +// ensure that blocks have the correct two-way branch sense +void Trace::fixup_blocks(PhaseCFG &cfg) { + Block *last = last_block(); + for (Block *b = first_block(); b != NULL; b = next(b)) { + cfg._blocks.push(b); + cfg._num_blocks++; + if (!b->is_connector()) { + int nfallthru = b->num_fall_throughs(); + if (b != last) { + if (nfallthru == 2) { + // Ensure that the sense of the branch is correct + Block *bnext = next(b); + Block *bs0 = b->non_connector_successor(0); + + MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach(); + ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj(); + ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj(); + + if (bnext == bs0) { + // Fall-thru case in succs[0], should be in succs[1] + + // Flip targets in _succs map + Block *tbs0 = b->_succs[0]; + Block *tbs1 = b->_succs[1]; + b->_succs.map( 0, tbs1 ); + b->_succs.map( 1, tbs0 ); + + // Flip projections to match targets + b->_nodes.map(b->_nodes.size()-2, proj1); + b->_nodes.map(b->_nodes.size()-1, proj0); + } + } + } + } + } +} --- old/hotspot/src/share/vm/opto/block.hpp 2009-08-01 04:13:21.145319160 +0100 +++ new/hotspot/src/share/vm/opto/block.hpp 2009-08-01 04:13:21.076861126 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)block.hpp 1.102 07/09/25 09:22:14 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,6 +78,7 @@ void insert( uint i, Block *n ); uint size() const { return _cnt; } void reset() { _cnt = 0; } + void print(); }; @@ -132,7 +133,11 @@ uint _rpo; // Number in reverse post order walk virtual bool is_block() { return true; } - float succ_prob(uint i); // return probability of i'th successor + float succ_prob(uint i); // return probability of i'th successor + int num_fall_throughs(); // How many fall-through candidate this block has + void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code + bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate + Block* lone_fall_through(); // Return lone fall-through Block or null Block* dom_lca(Block* that); // Compute LCA in dominator tree. #ifdef ASSERT @@ -147,6 +152,7 @@ // Report the alignment required by this block. Must be a power of 2. // The previous block will insert nops to get this alignment. uint code_alignment(); + uint compute_loop_alignment(); // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies. // It is currently also used to scale such frequencies relative to @@ -187,11 +193,12 @@ int current_alignment = current_offset & max_pad; if( current_alignment != 0 ) { uint padding = (block_alignment-current_alignment) & max_pad; - if( !head()->is_Loop() || - padding <= (uint)MaxLoopPad || - first_inst_size() > padding ) { - return padding; + if( has_loop_alignment() && + padding > (uint)MaxLoopPad && + first_inst_size() <= padding ) { + return 0; } + return padding; } } return 0; @@ -205,6 +212,21 @@ void set_connector() { _connector = true; } bool is_connector() const { return _connector; }; + // Loop_alignment will be set for blocks which are at the top of loops. + // The block layout pass may rotate loops such that the loop head may not + // be the sequentially first block of the loop encountered in the linear + // list of blocks. If the layout pass is not run, loop alignment is set + // for each block which is the head of a loop. + uint _loop_alignment; + void set_loop_alignment(Block *loop_top) { + uint new_alignment = loop_top->compute_loop_alignment(); + if (new_alignment > _loop_alignment) { + _loop_alignment = new_alignment; + } + } + uint loop_alignment() const { return _loop_alignment; } + bool has_loop_alignment() const { return loop_alignment() > 0; } + // Create a new Block with given head Node. // Creates the (empty) predecessor arrays. Block( Arena *a, Node *headnode ) @@ -221,9 +243,10 @@ _fhrp_index(1), _raise_LCA_mark(0), _raise_LCA_visited(0), - _first_inst_size(999999), - _connector(false) { - _nodes.push(headnode); + _first_inst_size(999999), + _connector(false), + _loop_alignment(0) { + _nodes.push(headnode); } // Index of 'end' Node @@ -278,7 +301,17 @@ return s; } - // Successor block, after forwarding through connectors + // Return true if b is a successor of this block + bool has_successor(Block* b) const { + for (uint i = 0; i < _num_succs; i++ ) { + if (non_connector_successor(i) == b) { + return true; + } + } + return false; + } + + // Successor block, after forwarding through connectors Block* non_connector_successor(int i) const { return _succs[i]->non_connector(); } @@ -322,7 +355,6 @@ // I'll need a few machine-specific GotoNodes. Clone from this one. MachNode *_goto; - void insert_goto_at(uint block_no, uint succ_no); Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); void verify_anti_dependences(Block* LCA, Node* load) { @@ -382,10 +414,15 @@ // Compute the instruction global latency with a backwards walk void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack); + // Set loop alignment + void set_loop_alignment(); + // Remove empty basic blocks - void RemoveEmpty(); - bool MoveToNext(Block* bx, uint b_index); - void MoveToEnd(Block* bx, uint b_index); + void remove_empty(); + void fixup_flow(); + bool move_to_next(Block* bx, uint b_index); + void move_to_end(Block* bx, uint b_index); + void insert_goto_at(uint block_no, uint succ_no); // Check for NeverBranch at block end. This needs to become a GOTO to the // true target. NeverBranch are treated as a conditional branch that always @@ -416,7 +453,7 @@ }; -//------------------------------UnionFindInfo---------------------------------- +//------------------------------UnionFind-------------------------------------- // Map Block indices to a block-index for a cfg-cover. // Array lookup in the optimized case. class UnionFind : public ResourceObj { @@ -511,3 +548,166 @@ void dump_tree() const; #endif }; + + +//----------------------------------CFGEdge------------------------------------ +// A edge between two basic blocks that will be embodied by a branch or a +// fall-through. +class CFGEdge : public ResourceObj { + private: + Block * _from; // Source basic block + Block * _to; // Destination basic block + float _freq; // Execution frequency (estimate) + int _state; + bool _infrequent; + int _from_pct; + int _to_pct; + + // Private accessors + int from_pct() const { return _from_pct; } + int to_pct() const { return _to_pct; } + int from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; } + int to_infrequent() const { return to_pct() < BlockLayoutMinDiamondPercentage; } + + public: + enum { + open, // initial edge state; unprocessed + connected, // edge used to connect two traces together + interior // edge is interior to trace (could be backedge) + }; + + CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) : + _from(from), _to(to), _freq(freq), + _from_pct(from_pct), _to_pct(to_pct), _state(open) { + _infrequent = from_infrequent() || to_infrequent(); + } + + float freq() const { return _freq; } + Block* from() const { return _from; } + Block* to () const { return _to; } + int infrequent() const { return _infrequent; } + int state() const { return _state; } + + void set_state(int state) { _state = state; } + +#ifndef PRODUCT + void dump( ) const; +#endif +}; + + +//-----------------------------------Trace------------------------------------- +// An ordered list of basic blocks. +class Trace : public ResourceObj { + private: + uint _id; // Unique Trace id (derived from initial block) + Block ** _next_list; // Array mapping index to next block + Block ** _prev_list; // Array mapping index to previous block + Block * _first; // First block in the trace + Block * _last; // Last block in the trace + + // Return the block that follows "b" in the trace. + Block * next(Block *b) const { return _next_list[b->_pre_order]; } + void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; } + + // Return the block that preceeds "b" in the trace. + Block * prev(Block *b) const { return _prev_list[b->_pre_order]; } + void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; } + + // We've discovered a loop in this trace. Reset last to be "b", and first as + // the block following "b + void break_loop_after(Block *b) { + _last = b; + _first = next(b); + set_prev(_first, NULL); + set_next(_last, NULL); + } + + public: + + Trace(Block *b, Block **next_list, Block **prev_list) : + _first(b), + _last(b), + _next_list(next_list), + _prev_list(prev_list), + _id(b->_pre_order) { + set_next(b, NULL); + set_prev(b, NULL); + }; + + // Return the id number + uint id() const { return _id; } + void set_id(uint id) { _id = id; } + + // Return the first block in the trace + Block * first_block() const { return _first; } + + // Return the last block in the trace + Block * last_block() const { return _last; } + + // Insert a trace in the middle of this one after b + void insert_after(Block *b, Trace *tr) { + set_next(tr->last_block(), next(b)); + if (next(b) != NULL) { + set_prev(next(b), tr->last_block()); + } + + set_next(b, tr->first_block()); + set_prev(tr->first_block(), b); + + if (b == _last) { + _last = tr->last_block(); + } + } + + void insert_before(Block *b, Trace *tr) { + Block *p = prev(b); + assert(p != NULL, "use append instead"); + insert_after(p, tr); + } + + // Append another trace to this one. + void append(Trace *tr) { + insert_after(_last, tr); + } + + // Append a block at the end of this trace + void append(Block *b) { + set_next(_last, b); + set_prev(b, _last); + _last = b; + } + + // Adjust the the blocks in this trace + void fixup_blocks(PhaseCFG &cfg); + bool backedge(CFGEdge *e); + +#ifndef PRODUCT + void dump( ) const; +#endif +}; + +//------------------------------PhaseBlockLayout------------------------------- +// Rearrange blocks into some canonical order, based on edges and their frequencies +class PhaseBlockLayout : public Phase { + PhaseCFG &_cfg; // Control flow graph + + GrowableArray *edges; + Trace **traces; + Block **next; + Block **prev; + UnionFind *uf; + + // Given a block, find its encompassing Trace + Trace * trace(Block *b) { + return traces[uf->Find_compress(b->_pre_order)]; + } + public: + PhaseBlockLayout(PhaseCFG &cfg); + + void find_edges(); + void grow_traces(); + void merge_traces(bool loose_connections); + void reorder_traces(int count); + void union_traces(Trace* from, Trace* to); +}; --- old/hotspot/src/share/vm/opto/buildOopMap.cpp 2009-08-01 04:13:22.044542048 +0100 +++ new/hotspot/src/share/vm/opto/buildOopMap.cpp 2009-08-01 04:13:21.975330956 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)buildOopMap.cpp 1.37 07/05/05 17:06:11 JVM" #endif /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -318,6 +318,26 @@ } } + } else if( t->isa_narrowoop() ) { + assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" ); + // Check for a legal reg name in the oopMap and bailout if it is not. + if (!omap->legal_vm_reg_name(r)) { + regalloc->C->record_method_not_compilable("illegal oopMap register name"); + continue; + } + if( mcall ) { + // Outgoing argument GC mask responsibility belongs to the callee, + // not the caller. Inspect the inputs to the call, to see if + // this live-range is one of them. + uint cnt = mcall->tf()->domain()->cnt(); + uint j; + for( j = TypeFunc::Parms; j < cnt; j++) + if( mcall->in(j) == def ) + break; // reaching def is an argument oop + if( j < cnt ) // arg oops dont go in GC map + continue; // Continue on to the next register + } + omap->set_narrowoop(r); } else if( OptoReg::is_valid(_callees[reg])) { // callee-save? // It's a callee-save value assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" ); --- old/hotspot/src/share/vm/opto/bytecodeInfo.cpp 2009-08-01 04:13:22.881097328 +0100 +++ new/hotspot/src/share/vm/opto/bytecodeInfo.cpp 2009-08-01 04:13:22.804715909 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)bytecodeInfo.cpp 1.122 07/05/05 17:06:12 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,19 +28,6 @@ #include "incls/_precompiled.incl" #include "incls/_bytecodeInfo.cpp.incl" -// These variables are declared in parse1.cpp -extern int explicit_null_checks_inserted; -extern int explicit_null_checks_elided; -extern int explicit_null_checks_inserted_old; -extern int explicit_null_checks_elided_old; -extern int nodes_created_old; -extern int nodes_created; -extern int methods_parsed_old; -extern int methods_parsed; -extern int methods_seen; -extern int methods_seen_old; - - //============================================================================= //------------------------------InlineTree------------------------------------- InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* callee, JVMState* caller_jvms, int caller_bci, float site_invoke_ratio ) @@ -82,8 +69,20 @@ for (int i = depth; i != 0; --i) tty->print(" "); } -// positive filter: should send be inlined? returns NULL, if yes, or rejection msg -const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { +static bool is_init_with_ea(ciMethod* callee_method, + ciMethod* caller_method, Compile* C) { + // True when EA is ON and a java constructor is called or + // a super constructor is called from an inlined java constructor. + return C->do_escape_analysis() && EliminateAllocations && + ( callee_method->is_initializer() || + (caller_method->is_initializer() && + caller_method != C->method() && + caller_method->holder()->is_subclass_of(callee_method->holder())) + ); +} + +// positive filter: should send be inlined? returns NULL, if yes, or rejection msg +const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { // Allows targeted inlining if(callee_method->should_inline()) { *wci_result = *(WarmCallInfo::always_hot()); @@ -100,7 +99,8 @@ int size = callee_method->code_size(); // Check for too many throws (and not too huge) - if(callee_method->interpreter_throwout_count() > InlineThrowCount && size < InlineThrowMaxSize ) { + if(callee_method->interpreter_throwout_count() > InlineThrowCount && + size < InlineThrowMaxSize ) { wci_result->set_profit(wci_result->profit() * 100); if (PrintInlining && Verbose) { print_indent(inline_depth()); @@ -117,8 +117,12 @@ int invoke_count = method()->interpreter_invocation_count(); assert( invoke_count != 0, "Require invokation count greater than zero"); int freq = call_site_count/invoke_count; + // bump the max size if the call is frequent - if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount)) { + if ((freq >= InlineFrequencyRatio) || + (call_site_count >= InlineFrequencyCount) || + is_init_with_ea(callee_method, caller_method, C)) { + max_size = C->freq_inline_size(); if (size <= max_size && TraceFrequencyInlining) { print_indent(inline_depth()); @@ -129,7 +133,8 @@ } } else { // Not hot. Check for medium-sized pre-existing nmethod at cold sites. - if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode/4) + if (callee_method->has_compiled_code() && + callee_method->instructions_size() > InlineSmallCode/4) return "already compiled into a medium method"; } if (size > max_size) { @@ -141,9 +146,9 @@ } -// negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg -const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* wci_result) const { - // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg +// negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg +const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const { + // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg if (!UseOldInlining) { const char* fail = NULL; if (callee_method->is_abstract()) fail = "abstract method"; @@ -206,10 +211,24 @@ } // use frequency-based objections only for non-trivial methods - if (callee_method->code_size() <= MaxTrivialSize) return NULL; - if (UseInterpreter && !CompileTheWorld) { // don't use counts with -Xcomp or CTW - if (!callee_method->has_compiled_code() && !callee_method->was_executed_more_than(0)) return "never executed"; - if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return "executed < MinInliningThreshold times"; + if (callee_method->code_size() <= MaxTrivialSize) return NULL; + + // don't use counts with -Xcomp or CTW + if (UseInterpreter && !CompileTheWorld) { + + if (!callee_method->has_compiled_code() && + !callee_method->was_executed_more_than(0)) { + return "never executed"; + } + + if (is_init_with_ea(callee_method, caller_method, C)) { + + // Escape Analysis: inline all executed constructors + + } else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, + CompileThreshold >> 1))) { + return "executed < MinInliningThreshold times"; + } } if (callee_method->should_not_inline()) { @@ -222,8 +241,7 @@ //-----------------------------try_to_inline----------------------------------- // return NULL if ok, reason for not inlining otherwise // Relocated from "InliningClosure::try_to_inline" -const char* InlineTree::try_to_inline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) { - ciMethod* caller_method = method(); +const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) { // Old algorithm had funny accumulating BC-size counters if (UseOldInlining && ClipInlining @@ -232,25 +250,47 @@ } const char *msg = NULL; - if ((msg = shouldInline(callee_method, caller_bci, profile, wci_result)) != NULL) return msg; - if ((msg = shouldNotInline(callee_method, wci_result)) != NULL) return msg; + if ((msg = shouldInline(callee_method, caller_method, caller_bci, + profile, wci_result)) != NULL) { + return msg; + } + if ((msg = shouldNotInline(callee_method, caller_method, + wci_result)) != NULL) { + return msg; + } bool is_accessor = InlineAccessors && callee_method->is_accessor(); // suppress a few checks for accessors and trivial methods if (!is_accessor && callee_method->code_size() > MaxTrivialSize) { + // don't inline into giant methods - if (C->unique() > (uint)NodeCountInliningCutoff) return "NodeCountInliningCutoff"; + if (C->unique() > (uint)NodeCountInliningCutoff) { + return "NodeCountInliningCutoff"; + } - // don't inline unreached call sites - if (profile.count() == 0) return "call site not reached"; - } + if ((!UseInterpreter || CompileTheWorld) && + is_init_with_ea(callee_method, caller_method, C)) { - if (!C->do_inlining() && InlineAccessors && !is_accessor) return "not an accessor"; + // Escape Analysis stress testing when running Xcomp or CTW: + // inline constructors even if they are not reached. - if( inline_depth() > MaxInlineLevel ) return "inlining too deep"; + } else if (profile.count() == 0) { + // don't inline unreached call sites + return "call site not reached"; + } + } + + if (!C->do_inlining() && InlineAccessors && !is_accessor) { + return "not an accessor"; + } + if( inline_depth() > MaxInlineLevel ) { + return "inlining too deep"; + } if( method() == callee_method && - inline_depth() > MaxRecursiveInlineLevel ) return "recursively inlining too deep"; + inline_depth() > MaxRecursiveInlineLevel ) { + return "recursively inlining too deep"; + } int size = callee_method->code_size(); @@ -339,7 +379,7 @@ // Check if inlining policy says no. WarmCallInfo wci = *(initial_wci); - failure_msg = try_to_inline(callee_method, caller_bci, profile, &wci); + failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci); if (failure_msg != NULL && C->log() != NULL) { C->log()->begin_elem("inline_fail reason='"); C->log()->text("%s", failure_msg); @@ -467,27 +507,3 @@ } return iltp; } - -// ---------------------------------------------------------------------------- -#ifndef PRODUCT - -static void per_method_stats() { - // Compute difference between this method's cumulative totals and old totals - int explicit_null_checks_cur = explicit_null_checks_inserted - explicit_null_checks_inserted_old; - int elided_null_checks_cur = explicit_null_checks_elided - explicit_null_checks_elided_old; - - // Print differences - if( explicit_null_checks_cur ) - tty->print_cr("XXX Explicit NULL checks inserted: %d", explicit_null_checks_cur); - if( elided_null_checks_cur ) - tty->print_cr("XXX Explicit NULL checks removed at parse time: %d", elided_null_checks_cur); - - // Store the current cumulative totals - nodes_created_old = nodes_created; - methods_parsed_old = methods_parsed; - methods_seen_old = methods_seen; - explicit_null_checks_inserted_old = explicit_null_checks_inserted; - explicit_null_checks_elided_old = explicit_null_checks_elided; -} - -#endif --- old/hotspot/src/share/vm/opto/c2_globals.hpp 2009-08-01 04:13:23.797875170 +0100 +++ new/hotspot/src/share/vm/opto/c2_globals.hpp 2009-08-01 04:13:23.708764820 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c2_globals.hpp 1.96 07/10/23 13:12:52 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -259,10 +259,10 @@ develop(intx, PrintIdealGraphPort, 4444, \ "Ideal graph printer to network port") \ \ - develop(ccstr, PrintIdealGraphAddress, "127.0.0.1", \ + notproduct(ccstr, PrintIdealGraphAddress, "127.0.0.1", \ "IP address to connect to visualizer") \ \ - develop(ccstr, PrintIdealGraphFile, NULL, \ + notproduct(ccstr, PrintIdealGraphFile, NULL, \ "File to dump ideal graph to. If set overrides the " \ "use of the network") \ \ @@ -385,8 +385,33 @@ product(bool, EliminateAllocations, true, \ "Use escape analysis to eliminate allocations") \ \ - product(intx, MaxLabelRootDepth, 1100, \ + notproduct(bool, PrintEliminateAllocations, false, \ + "Print out when allocations are eliminated") \ + \ + product(intx, EliminateAllocationArraySizeLimit, 64, \ + "Array size (number of elements) limit for scalar replacement") \ + \ + product(bool, UseOptoBiasInlining, true, \ + "Generate biased locking code in C2 ideal graph") \ + \ + product(intx, ValueSearchLimit, 1000, \ + "Recursion limit in PhaseMacroExpand::value_from_mem_phi") \ + \ + product(intx, MaxLabelRootDepth, 1100, \ "Maximum times call Label_Root to prevent stack overflow") \ + \ + diagnostic(intx, DominatorSearchLimit, 1000, \ + "Iterations limit in Node::dominates") \ + \ + product(bool, BlockLayoutByFrequency, true, \ + "Use edge frequencies to drive block ordering") \ + \ + product(intx, BlockLayoutMinDiamondPercentage, 20, \ + "Miniumum %% of a successor (predecessor) for which block layout "\ + "a will allow a fork (join) in a single chain") \ + \ + product(bool, BlockLayoutRotateLoops, false, \ + "Allow back branches to be fall throughs in the block layour") \ C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG) --- old/hotspot/src/share/vm/opto/c2compiler.cpp 2009-08-01 04:13:24.737998684 +0100 +++ new/hotspot/src/share/vm/opto/c2compiler.cpp 2009-08-01 04:13:24.655192750 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c2compiler.cpp 1.29 07/05/05 17:06:11 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,9 @@ const char* C2Compiler::retry_no_subsuming_loads() { return "retry without subsuming loads"; } +const char* C2Compiler::retry_no_escape_analysis() { + return "retry without escape analysis"; +} void C2Compiler::initialize_runtime() { // Check assumptions used while running ADLC @@ -104,17 +107,23 @@ initialize(); } bool subsume_loads = true; + bool do_escape_analysis = DoEscapeAnalysis; while (!env->failing()) { // Attempt to compile while subsuming loads into machine instructions. - Compile C(env, this, target, entry_bci, subsume_loads); + Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis); // Check result and retry if appropriate. if (C.failure_reason() != NULL) { - if (C.failure_reason_is(retry_no_subsuming_loads())) { + if (C.failure_reason_is(retry_no_subsuming_loads())) { assert(subsume_loads, "must make progress"); subsume_loads = false; continue; // retry } + if (C.failure_reason_is(retry_no_escape_analysis())) { + assert(do_escape_analysis, "must make progress"); + do_escape_analysis = false; + continue; // retry + } // Pass any other failure reason up to the ciEnv. // Note that serious, irreversible failures are already logged // on the ciEnv via env->record_method_not_compilable(). --- old/hotspot/src/share/vm/opto/c2compiler.hpp 2009-08-01 04:13:25.608540750 +0100 +++ new/hotspot/src/share/vm/opto/c2compiler.hpp 2009-08-01 04:13:25.540249281 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)c2compiler.hpp 1.28 07/05/05 17:06:11 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +53,7 @@ // sentinel value used to trigger backtracking in compile_method(). static const char* retry_no_subsuming_loads(); + static const char* retry_no_escape_analysis(); // Print compilation timers and statistics void print_timers(); --- old/hotspot/src/share/vm/opto/callGenerator.cpp 2009-08-01 04:13:26.446734353 +0100 +++ new/hotspot/src/share/vm/opto/callGenerator.cpp 2009-08-01 04:13:26.377513097 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)callGenerator.cpp 1.49 07/08/07 15:24:21 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -467,6 +467,12 @@ } } + if (kit.stopped()) { + // Instance exactly does not matches the desired type. + kit.set_jvms(slow_jvms); + return kit.transfer_exceptions_into_jvms(); + } + // fall through if the instance exactly matches the desired type kit.replace_in_map(receiver, exact_receiver); --- old/hotspot/src/share/vm/opto/callnode.cpp 2009-08-01 04:13:27.335943441 +0100 +++ new/hotspot/src/share/vm/opto/callnode.cpp 2009-08-01 04:13:27.249307460 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)callnode.cpp 1.238 07/10/04 14:36:00 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -233,6 +233,7 @@ _locoff = TypeFunc::Parms; _stkoff = _locoff + _method->max_locals(); _monoff = _stkoff + _method->max_stack(); + _scloff = _monoff; _endoff = _monoff; _sp = 0; } @@ -245,6 +246,7 @@ _locoff = TypeFunc::Parms; _stkoff = _locoff; _monoff = _stkoff + stack_size; + _scloff = _monoff; _endoff = _monoff; _sp = 0; } @@ -300,12 +302,22 @@ return total; } +#ifndef PRODUCT + //------------------------------format_helper---------------------------------- // Given an allocation (a Chaitin object) and a Node decide if the Node carries // any defined value or not. If it does, print out the register or constant. -#ifndef PRODUCT -static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i ) { +static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray *scobjs ) { if (n == NULL) { st->print(" NULL"); return; } + if (n->is_SafePointScalarObject()) { + // Scalar replacement. + SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); + scobjs->append_if_missing(spobj); + int sco_n = scobjs->find(spobj); + assert(sco_n >= 0, ""); + st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); + return; + } if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined char buf[50]; regalloc->dump_register(n,buf); @@ -325,7 +337,10 @@ case Type::InstPtr: st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop()); break; - case Type::RawPtr: + case Type::NarrowOop: + st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop()); + break; + case Type::RawPtr: st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr()); break; case Type::DoubleCon: @@ -345,10 +360,8 @@ } } } -#endif //------------------------------format----------------------------------------- -#ifndef PRODUCT void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { st->print(" #"); if( _method ) { @@ -359,24 +372,25 @@ return; } if (n->is_MachSafePoint()) { + GrowableArray scobjs; MachSafePointNode *mcall = n->as_MachSafePoint(); uint i; // Print locals - for( i = 0; i < (uint)loc_size(); i++ ) - format_helper( regalloc, st, mcall->local(this, i), "L[", i ); + for( i = 0; i < (uint)loc_size(); i++ ) + format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs ); // Print stack for (i = 0; i < (uint)stk_size(); i++) { if ((uint)(_stkoff + i) >= mcall->len()) st->print(" oob "); else - format_helper( regalloc, st, mcall->stack(this, i), "STK[", i ); + format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs ); } for (i = 0; (int)i < nof_monitors(); i++) { Node *box = mcall->monitor_box(this, i); Node *obj = mcall->monitor_obj(this, i); if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) { while( !box->is_BoxLock() ) box = box->in(1); - format_helper( regalloc, st, box, "MON-BOX[", i ); + format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs ); } else { OptoReg::Name box_reg = BoxLockNode::stack_slot(box); st->print(" MON-BOX%d=%s+%d", @@ -384,16 +398,78 @@ OptoReg::regname(OptoReg::c_frame_pointer), regalloc->reg2offset(box_reg)); } - format_helper( regalloc, st, obj, "MON-OBJ[", i ); + const char* obj_msg = "MON-OBJ["; + if (EliminateLocks) { + while( !box->is_BoxLock() ) box = box->in(1); + if (box->as_BoxLock()->is_eliminated()) + obj_msg = "MON-OBJ(LOCK ELIMINATED)["; + } + format_helper( regalloc, st, obj, obj_msg, i, &scobjs ); + } + + for (i = 0; i < (uint)scobjs.length(); i++) { + // Scalar replaced objects. + st->print_cr(""); + st->print(" # ScObj" INT32_FORMAT " ", i); + SafePointScalarObjectNode* spobj = scobjs.at(i); + ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); + assert(cik->is_instance_klass() || + cik->is_array_klass(), "Not supported allocation."); + ciInstanceKlass *iklass = NULL; + if (cik->is_instance_klass()) { + cik->print_name_on(st); + iklass = cik->as_instance_klass(); + } else if (cik->is_type_array_klass()) { + cik->as_array_klass()->base_element_type()->print_name_on(st); + st->print("[%d]=", spobj->n_fields()); + } else if (cik->is_obj_array_klass()) { + ciType* cie = cik->as_array_klass()->base_element_type(); + int ndim = 1; + while (cie->is_obj_array_klass()) { + ndim += 1; + cie = cie->as_array_klass()->base_element_type(); + } + cie->print_name_on(st); + while (ndim-- > 0) { + st->print("[]"); + } + st->print("[%d]=", spobj->n_fields()); + } + st->print("{"); + uint nf = spobj->n_fields(); + if (nf > 0) { + uint first_ind = spobj->first_index(); + Node* fld_node = mcall->in(first_ind); + ciField* cifield; + if (iklass != NULL) { + st->print(" ["); + cifield = iklass->nonstatic_field_at(0); + cifield->print_name_on(st); + format_helper( regalloc, st, fld_node, ":", 0, &scobjs ); + } else { + format_helper( regalloc, st, fld_node, "[", 0, &scobjs ); + } + for (uint j = 1; j < nf; j++) { + fld_node = mcall->in(first_ind+j); + if (iklass != NULL) { + st->print(", ["); + cifield = iklass->nonstatic_field_at(j); + cifield->print_name_on(st); + format_helper( regalloc, st, fld_node, ":", j, &scobjs ); + } else { + format_helper( regalloc, st, fld_node, ", [", j, &scobjs ); + } + } + } + st->print(" }"); } } st->print_cr(""); if (caller() != NULL) caller()->format(regalloc, n, st); } -#endif -#ifndef PRODUCT -void JVMState::dump_spec(outputStream *st) const { + +void JVMState::dump_spec(outputStream *st) const { if (_method != NULL) { bool printed = false; if (!Verbose) { @@ -422,9 +498,8 @@ } if (caller() != NULL) caller()->dump_spec(st); } -#endif -#ifndef PRODUCT + void JVMState::dump_on(outputStream* st) const { if (_map && !((uintptr_t)_map & 1)) { if (_map->len() > _map->req()) { // _map->has_exceptions() @@ -437,8 +512,8 @@ } _map->dump(2); } - st->print("JVMS depth=%d loc=%d stk=%d mon=%d end=%d mondepth=%d sp=%d bci=%d method=", - depth(), locoff(), stkoff(), monoff(), endoff(), monitor_depth(), sp(), bci()); + st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d method=", + depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci()); if (_method == NULL) { st->print_cr("(none)"); } else { @@ -468,6 +543,7 @@ n->set_locoff(_locoff); n->set_stkoff(_stkoff); n->set_monoff(_monoff); + n->set_scloff(_scloff); n->set_endoff(_endoff); n->set_sp(_sp); n->set_map(_map); @@ -560,6 +636,58 @@ return 0; } +// +// Determine whether the call could modify the field of the specified +// instance at the specified offset. +// +bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { + const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); + + // If not an OopPtr or not an instance type, assume the worst. + // Note: currently this method is called only for instance types. + if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { + return true; + } + // The instance_id is set only for scalar-replaceable allocations which + // are not passed as arguments according to Escape Analysis. + return false; +} + +// Does this call have a direct reference to n other than debug information? +bool CallNode::has_non_debug_use(Node *n) { + const TypeTuple * d = tf()->domain(); + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { + Node *arg = in(i); + if (arg == n) { + return true; + } + } + return false; +} + +// Returns the unique CheckCastPP of a call +// or 'this' if there are several CheckCastPP +// or returns NULL if there is no one. +Node *CallNode::result_cast() { + Node *cast = NULL; + + Node *p = proj_out(TypeFunc::Parms); + if (p == NULL) + return NULL; + + for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { + Node *use = p->fast_out(i); + if (use->is_CheckCastPP()) { + if (cast != NULL) { + return this; // more than 1 CheckCastPP + } + cast = use; + } + } + return cast; +} + + //============================================================================= uint CallJavaNode::size_of() const { return sizeof(*this); } uint CallJavaNode::cmp( const Node &n ) const { @@ -713,9 +841,7 @@ //------------------------------Ideal------------------------------------------ // Skip over any collapsed Regions Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if (remove_dead_region(phase, can_reshape)) return this; - - return NULL; + return remove_dead_region(phase, can_reshape) ? this : NULL; } //------------------------------Identity--------------------------------------- @@ -768,6 +894,7 @@ void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { assert((int)grow_by > 0, "sanity"); int monoff = jvms->monoff(); + int scloff = jvms->scloff(); int endoff = jvms->endoff(); assert(endoff == (int)req(), "no other states or debug info after me"); Node* top = Compile::current()->top(); @@ -775,6 +902,7 @@ ins_req(monoff, top); } jvms->set_monoff(monoff + grow_by); + jvms->set_scloff(scloff + grow_by); jvms->set_endoff(endoff + grow_by); } @@ -784,13 +912,16 @@ const int MonitorEdges = 2; assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); assert(req() == jvms()->endoff(), "correct sizing"); + int nextmon = jvms()->scloff(); if (GenerateSynchronizationCode) { add_req(lock->box_node()); add_req(lock->obj_node()); } else { - add_req(NULL); - add_req(NULL); + Node* top = Compile::current()->top(); + add_req(top); + add_req(top); } + jvms()->set_scloff(nextmon+MonitorEdges); jvms()->set_endoff(req()); } @@ -798,10 +929,13 @@ // Delete last monitor from debug info debug_only(int num_before_pop = jvms()->nof_monitors()); const int MonitorEdges = (1<scloff(); int endoff = jvms()->endoff(); + int new_scloff = scloff - MonitorEdges; int new_endoff = endoff - MonitorEdges; + jvms()->set_scloff(new_scloff); jvms()->set_endoff(new_endoff); - while (endoff > new_endoff) del_req(--endoff); + while (scloff > new_scloff) del_req(--scloff); assert(jvms()->nof_monitors() == num_before_pop-1, ""); } @@ -825,6 +959,64 @@ return (TypeFunc::Parms == idx); } +//============== SafePointScalarObjectNode ============== + +SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, +#ifdef ASSERT + AllocateNode* alloc, +#endif + uint first_index, + uint n_fields) : + TypeNode(tp, 1), // 1 control input -- seems required. Get from root. +#ifdef ASSERT + _alloc(alloc), +#endif + _first_index(first_index), + _n_fields(n_fields) +{ + init_class_id(Class_SafePointScalarObject); +} + +bool SafePointScalarObjectNode::pinned() const { return true; } + +uint SafePointScalarObjectNode::ideal_reg() const { + return 0; // No matching to machine instruction +} + +const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { + return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); +} + +const RegMask &SafePointScalarObjectNode::out_RegMask() const { + return RegMask::Empty; +} + +uint SafePointScalarObjectNode::match_edge(uint idx) const { + return 0; +} + +SafePointScalarObjectNode* +SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const { + void* cached = (*sosn_map)[(void*)this]; + if (cached != NULL) { + return (SafePointScalarObjectNode*)cached; + } + Compile* C = Compile::current(); + SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); + res->_first_index += jvms_adj; + sosn_map->Insert((void*)this, (void*)res); + return res; +} + + +#ifndef PRODUCT +void SafePointScalarObjectNode::dump_spec(outputStream *st) const { + st->print(" # fields@[%d..%d]", first_index(), + first_index() + n_fields() - 1); +} + +#endif + //============================================================================= uint AllocateNode::size_of() const { return sizeof(*this); } @@ -835,6 +1027,7 @@ { init_class_id(Class_Allocate); init_flags(Flag_is_macro); + _is_scalar_replaceable = false; Node *topnode = C->top(); init_req( TypeFunc::Control , ctrl ); @@ -852,6 +1045,39 @@ //============================================================================= uint AllocateArrayNode::size_of() const { return sizeof(*this); } +// Retrieve the length from the AllocateArrayNode. Narrow the type with a +// CastII, if appropriate. If we are not allowed to create new nodes, and +// a CastII is appropriate, return NULL. +Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { + Node *length = in(AllocateNode::ALength); + assert(length != NULL, "length is not null"); + + const TypeInt* length_type = phase->find_int_type(length); + const TypeAryPtr* ary_type = oop_type->isa_aryptr(); + + if (ary_type != NULL && length_type != NULL) { + const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); + if (narrow_length_type != length_type) { + // Assert one of: + // - the narrow_length is 0 + // - the narrow_length is not wider than length + assert(narrow_length_type == TypeInt::ZERO || + (narrow_length_type->_hi <= length_type->_hi && + narrow_length_type->_lo >= length_type->_lo), + "narrow type must be narrower than length type"); + + // Return NULL if new nodes are not allowed + if (!allow_new_nodes) return NULL; + // Create a cast which is control dependent on the initialization to + // propagate the fact that the array length must be positive. + length = new (phase->C, 2) CastIINode(length, narrow_length_type); + length->set_req(0, initialization()->proj_out(0)); + } + } + + return length; +} + //============================================================================= uint LockNode::size_of() const { return sizeof(*this); } @@ -1154,7 +1380,7 @@ //============================================================================= Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { - // perform any generic optimizations first + // perform any generic optimizations first (returns 'this' or NULL) Node *result = SafePointNode::Ideal(phase, can_reshape); // Now see if we can optimize away this lock. We don't actually @@ -1162,7 +1388,20 @@ // prevents macro expansion from expanding the lock. Since we don't // modify the graph, the value returned from this function is the // one computed above. - if (EliminateLocks && !is_eliminated()) { + if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) { + // + // If we are locking an unescaped object, the lock/unlock is unnecessary + // + ConnectionGraph *cgr = phase->C->congraph(); + PointsToNode::EscapeState es = PointsToNode::GlobalEscape; + if (cgr != NULL) + es = cgr->escape_state(obj_node(), phase); + if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { + // Mark it eliminated to update any counters + this->set_eliminated(); + return result; + } + // // Try lock coarsening // @@ -1202,8 +1441,10 @@ int unlocks = 0; for (int i = 0; i < lock_ops.length(); i++) { AbstractLockNode* lock = lock_ops.at(i); - if (lock->Opcode() == Op_Lock) locks++; - else unlocks++; + if (lock->Opcode() == Op_Lock) + locks++; + else + unlocks++; if (Verbose) { lock->dump(1); } @@ -1219,6 +1460,7 @@ // Mark it eliminated to update any counters lock->set_eliminated(); + lock->set_coarsened(); } } else if (result != NULL && ctrl->is_Region() && iter->_worklist.member(ctrl)) { @@ -1240,7 +1482,7 @@ //============================================================================= Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { - // perform any generic optimizations first + // perform any generic optimizations first (returns 'this' or NULL) Node * result = SafePointNode::Ideal(phase, can_reshape); // Now see if we can optimize away this unlock. We don't actually @@ -1248,66 +1490,18 @@ // prevents macro expansion from expanding the unlock. Since we don't // modify the graph, the value returned from this function is the // one computed above. - if (EliminateLocks && !is_eliminated()) { + // Escape state is defined after Parse phase. + if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) { // - // If we are unlocking an unescaped object, the lock/unlock is unnecessary - // We can eliminate them if there are no safepoints in the locked region. + // If we are unlocking an unescaped object, the lock/unlock is unnecessary. // - ConnectionGraph *cgr = Compile::current()->congraph(); - if (cgr != NULL && cgr->escape_state(obj_node(), phase) == PointsToNode::NoEscape) { - GrowableArray lock_ops; - LockNode *lock = find_matching_lock(this); - if (lock != NULL) { - lock_ops.append(this); - lock_ops.append(lock); - // find other unlocks which pair with the lock we found and add them - // to the list - Node * box = box_node(); - - for (DUIterator_Fast imax, i = box->fast_outs(imax); i < imax; i++) { - Node *use = box->fast_out(i); - if (use->is_Unlock() && use != this) { - UnlockNode *unlock1 = use->as_Unlock(); - if (!unlock1->is_eliminated()) { - LockNode *lock1 = find_matching_lock(unlock1); - if (lock == lock1) - lock_ops.append(unlock1); - else if (lock1 == NULL) { - // we can't find a matching lock, we must assume the worst - lock_ops.trunc_to(0); - break; - } - } - } - } - if (lock_ops.length() > 0) { - - #ifndef PRODUCT - if (PrintEliminateLocks) { - int locks = 0; - int unlocks = 0; - for (int i = 0; i < lock_ops.length(); i++) { - AbstractLockNode* lock = lock_ops.at(i); - if (lock->Opcode() == Op_Lock) locks++; - else unlocks++; - if (Verbose) { - lock->dump(1); - } - } - tty->print_cr("***Eliminated %d unescaped unlocks and %d unescaped locks", unlocks, locks); - } - #endif - - // for each of the identified locks, mark them - // as eliminatable - for (int i = 0; i < lock_ops.length(); i++) { - AbstractLockNode* lock = lock_ops.at(i); - - // Mark it eliminated to update any counters - lock->set_eliminated(); - } - } - } + ConnectionGraph *cgr = phase->C->congraph(); + PointsToNode::EscapeState es = PointsToNode::GlobalEscape; + if (cgr != NULL) + es = cgr->escape_state(obj_node(), phase); + if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { + // Mark it eliminated to update any counters + this->set_eliminated(); } } return result; --- old/hotspot/src/share/vm/opto/callnode.hpp 2009-08-01 04:13:28.313071563 +0100 +++ new/hotspot/src/share/vm/opto/callnode.hpp 2009-08-01 04:13:28.228352310 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)callnode.hpp 1.195 07/10/04 14:36:00 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ class CallLeafNode; class CallLeafNoFPNode; class AllocateNode; -class AllocateArrayNode; +class AllocateArrayNode; class LockNode; class UnlockNode; class JVMState; @@ -94,7 +94,9 @@ class ParmNode : public ProjNode { static const char * const names[TypeFunc::Parms+1]; public: - ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {} + ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { + init_class_id(Class_Parm); + } virtual int Opcode() const; virtual bool is_CFG() const { return (_con == TypeFunc::Control); } virtual uint ideal_reg() const; @@ -185,6 +187,7 @@ uint _locoff; // Offset to locals in input edge mapping uint _stkoff; // Offset to stack in input edge mapping uint _monoff; // Offset to monitors in input edge mapping + uint _scloff; // Offset to fields of scalar objs in input edge mapping uint _endoff; // Offset to end of input edge mapping uint _sp; // Jave Expression Stack Pointer for this state int _bci; // Byte Code Index of this JVM point @@ -208,16 +211,19 @@ uint stkoff() const { return _stkoff; } uint argoff() const { return _stkoff + _sp; } uint monoff() const { return _monoff; } + uint scloff() const { return _scloff; } uint endoff() const { return _endoff; } uint oopoff() const { return debug_end(); } int loc_size() const { return _stkoff - _locoff; } int stk_size() const { return _monoff - _stkoff; } - int mon_size() const { return _endoff - _monoff; } + int mon_size() const { return _scloff - _monoff; } + int scl_size() const { return _endoff - _scloff; } bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } - bool is_mon(uint i) const { return i >= _monoff && i < _endoff; } + bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } + bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } uint sp() const { return _sp; } int bci() const { return _bci; } @@ -228,7 +234,9 @@ uint depth() const { return _depth; } uint debug_start() const; // returns locoff of root caller uint debug_end() const; // returns endoff of self - uint debug_size() const { return loc_size() + sp() + mon_size(); } + uint debug_size() const { + return loc_size() + sp() + mon_size() + scl_size(); + } uint debug_depth() const; // returns sum of debug_size values at all depths // Returns the JVM state at the desired depth (1 == root). @@ -255,8 +263,11 @@ void set_locoff(uint off) { _locoff = off; } void set_stkoff(uint off) { _stkoff = off; } void set_monoff(uint off) { _monoff = off; } + void set_scloff(uint off) { _scloff = off; } void set_endoff(uint off) { _endoff = off; } - void set_offsets(uint off) { _locoff = _stkoff = _monoff = _endoff = off; } + void set_offsets(uint off) { + _locoff = _stkoff = _monoff = _scloff = _endoff = off; + } void set_map(SafePointNode *map) { _map = map; } void set_sp(uint sp) { _sp = sp; } void set_bci(int bci) { _bci = bci; } @@ -400,6 +411,51 @@ #endif }; +//------------------------------SafePointScalarObjectNode---------------------- +// A SafePointScalarObjectNode represents the state of a scalarized object +// at a safepoint. + +class SafePointScalarObjectNode: public TypeNode { + uint _first_index; // First input edge index of a SafePoint node where + // states of the scalarized object fields are collected. + uint _n_fields; // Number of non-static fields of the scalarized object. + DEBUG_ONLY(AllocateNode* _alloc;) +public: + SafePointScalarObjectNode(const TypeOopPtr* tp, +#ifdef ASSERT + AllocateNode* alloc, +#endif + uint first_index, uint n_fields); + virtual int Opcode() const; + virtual uint ideal_reg() const; + virtual const RegMask &in_RegMask(uint) const; + virtual const RegMask &out_RegMask() const; + virtual uint match_edge(uint idx) const; + + uint first_index() const { return _first_index; } + uint n_fields() const { return _n_fields; } + DEBUG_ONLY(AllocateNode* alloc() const { return _alloc; }) + + // SafePointScalarObject should be always pinned to the control edge + // of the SafePoint node for which it was generated. + virtual bool pinned() const; // { return true; } + + virtual uint size_of() const { return sizeof(*this); } + + // Assumes that "this" is an argument to a safepoint node "s", and that + // "new_call" is being created to correspond to "s". But the difference + // between the start index of the jvmstates of "new_call" and "s" is + // "jvms_adj". Produce and return a SafePointScalarObjectNode that + // corresponds appropriately to "this" in "new_call". Assumes that + // "sosn_map" is a map, specific to the translation of "s" to "new_call", + // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. + SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const; + +#ifndef PRODUCT + virtual void dump_spec(outputStream *st) const; +#endif +}; + //------------------------------CallNode--------------------------------------- // Call nodes now subsume the function of debug nodes at callsites, so they // contain the functionality of a full scope chain of debug nodes. @@ -408,7 +464,6 @@ const TypeFunc *_tf; // Function type address _entry_point; // Address of method being called float _cnt; // Estimate of number of times called - PointsToNode::EscapeState _escape_state; CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) : SafePointNode(tf->domain()->cnt(), NULL, adr_type), @@ -418,7 +473,6 @@ { init_class_id(Class_Call); init_flags(Flag_is_Call); - _escape_state = PointsToNode::UnknownEscape; } const TypeFunc* tf() const { return _tf; } @@ -444,6 +498,15 @@ // the node the JVMState must be cloned. virtual void clone_jvms() { } // default is not to clone + // Returns true if the call may modify n + virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase); + // Does this node have a use of n other than in debug information? + bool has_non_debug_use(Node *n); + // Returns the unique CheckCastPP of a call + // or result projection is there are several CheckCastPP + // or returns NULL if there is no one. + Node *result_cast(); + virtual uint match_edge(uint idx) const; #ifndef PRODUCT @@ -627,6 +690,8 @@ return TypeFunc::make(domain, range); } + bool _is_scalar_replaceable; // Result of Escape Analysis + virtual uint size_of() const; // Size is bigger AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node *size, Node *klass_node, Node *initial_test); @@ -638,6 +703,9 @@ virtual uint ideal_reg() const { return Op_RegP; } virtual bool guaranteed_safepoint() { return false; } + // allocations do not modify their arguments + virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;} + // Pattern-match a possible usage of AllocateNode. // Return null if no allocation is recognized. // The operand is the pointer produced by the (possible) allocation. @@ -661,7 +729,8 @@ // Conservatively small estimate of offset of first non-header byte. int minimum_header_size() { - return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc); + return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : + instanceOopDesc::base_offset_in_bytes(); } // Return the corresponding initialization barrier (or null if none). @@ -693,6 +762,15 @@ virtual int Opcode() const; virtual uint size_of() const; // Size is bigger + // Dig the length operand out of a array allocation site. + Node* Ideal_length() { + return in(AllocateNode::ALength); + } + + // Dig the length operand out of a array allocation site and narrow the + // type with a CastII, if necesssary + Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); + // Pattern-match a possible usage of AllocateArrayNode. // Return null if no allocation is recognized. static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { @@ -700,18 +778,13 @@ return (allo == NULL || !allo->is_AllocateArray()) ? NULL : allo->as_AllocateArray(); } - - // Dig the length operand out of a (possible) array allocation site. - static Node* Ideal_length(Node* ptr, PhaseTransform* phase) { - AllocateArrayNode* allo = Ideal_array_allocation(ptr, phase); - return (allo == NULL) ? NULL : allo->in(AllocateNode::ALength); - } }; //------------------------------AbstractLockNode----------------------------------- class AbstractLockNode: public CallNode { private: - bool _eliminate; // indicates this lock can be safely eliminated + bool _eliminate; // indicates this lock can be safely eliminated + bool _coarsened; // indicates this lock was coarsened #ifndef PRODUCT NamedCounter* _counter; #endif @@ -732,6 +805,7 @@ public: AbstractLockNode(const TypeFunc *tf) : CallNode(tf, NULL, TypeRawPtr::BOTTOM), + _coarsened(false), _eliminate(false) { #ifndef PRODUCT @@ -750,6 +824,12 @@ // mark node as eliminated and update the counter if there is one void set_eliminated(); + bool is_coarsened() { return _coarsened; } + void set_coarsened() { _coarsened = true; } + + // locking does not modify its arguments + virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;} + #ifndef PRODUCT void create_lock_counter(JVMState* s); NamedCounter* counter() const { return _counter; } --- old/hotspot/src/share/vm/opto/cfgnode.cpp 2009-08-01 04:13:29.284805757 +0100 +++ new/hotspot/src/share/vm/opto/cfgnode.cpp 2009-08-01 04:13:29.190576700 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cfgnode.cpp 1.262 08/11/24 12:22:57 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -707,6 +707,69 @@ return mem; } +//------------------------split_out_instance----------------------------------- +// Split out an instance type from a bottom phi. +PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const { + const TypeOopPtr *t_oop = at->isa_oopptr(); + assert(t_oop != NULL && t_oop->is_known_instance(), "expecting instance oopptr"); + const TypePtr *t = adr_type(); + assert(type() == Type::MEMORY && + (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || + t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && + t->is_oopptr()->cast_to_exactness(true) + ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) + ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop), + "bottom or raw memory required"); + + // Check if an appropriate node already exists. + Node *region = in(0); + for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { + Node* use = region->fast_out(k); + if( use->is_Phi()) { + PhiNode *phi2 = use->as_Phi(); + if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) { + return phi2; + } + } + } + Compile *C = igvn->C; + Arena *a = Thread::current()->resource_area(); + Node_Array node_map = new Node_Array(a); + Node_Stack stack(a, C->unique() >> 4); + PhiNode *nphi = slice_memory(at); + igvn->register_new_node_with_optimizer( nphi ); + node_map.map(_idx, nphi); + stack.push((Node *)this, 1); + while(!stack.is_empty()) { + PhiNode *ophi = stack.node()->as_Phi(); + uint i = stack.index(); + assert(i >= 1, "not control edge"); + stack.pop(); + nphi = node_map[ophi->_idx]->as_Phi(); + for (; i < ophi->req(); i++) { + Node *in = ophi->in(i); + if (in == NULL || igvn->type(in) == Type::TOP) + continue; + Node *opt = MemNode::optimize_simple_memory_chain(in, at, igvn); + PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL; + if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) { + opt = node_map[optphi->_idx]; + if (opt == NULL) { + stack.push(ophi, i); + nphi = optphi->slice_memory(at); + igvn->register_new_node_with_optimizer( nphi ); + node_map.map(optphi->_idx, nphi); + ophi = optphi; + i = 0; // will get incremented at top of loop + continue; + } + } + nphi->set_req(i, opt); + } + } + return nphi; +} + //------------------------verify_adr_type-------------------------------------- #ifdef ASSERT void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const { @@ -796,13 +859,20 @@ // Until we have harmony between classes and interfaces in the type // lattice, we must tread carefully around phis which implicitly // convert the one to the other. - const TypeInstPtr* ttip = _type->isa_instptr(); + const TypePtr* ttp = _type->make_ptr(); + const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL; + const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_klassptr() : NULL; bool is_intf = false; if (ttip != NULL) { ciKlass* k = ttip->klass(); if (k->is_loaded() && k->is_interface()) is_intf = true; } + if (ttkp != NULL) { + ciKlass* k = ttkp->klass(); + if (k->is_loaded() && k->is_interface()) + is_intf = true; + } // Default case: merge all inputs const Type *t = Type::TOP; // Merged type starting value @@ -815,7 +885,8 @@ // of all the input types. The lattice is not distributive in // such cases. Ward off asserts in type.cpp by refusing to do // meets between interfaces and proper classes. - const TypeInstPtr* tiip = ti->isa_instptr(); + const TypePtr* tip = ti->make_ptr(); + const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL; if (tiip) { bool ti_is_intf = false; ciKlass* k = tiip->klass(); @@ -859,6 +930,8 @@ // uplift the type. if( !t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface() ) { assert(ft == _type, ""); } // Uplift to interface + else if( !t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface() ) + { assert(ft == _type, ""); } // Uplift to interface // Otherwise it's something stupid like non-overlapping int ranges // found on dying counted loops. else @@ -872,12 +945,25 @@ // class-typed Phi and an interface flows in, it's possible that the meet & // join report an interface back out. This isn't possible but happens // because the type system doesn't interact well with interfaces. - const TypeInstPtr *jtip = jt->isa_instptr(); + const TypePtr *jtp = jt->make_ptr(); + const TypeInstPtr *jtip = (jtp != NULL) ? jtp->isa_instptr() : NULL; + const TypeKlassPtr *jtkp = (jtp != NULL) ? jtp->isa_klassptr() : NULL; if( jtip && ttip ) { - if( jtip->is_loaded() && jtip->klass()->is_interface() && - ttip->is_loaded() && !ttip->klass()->is_interface() ) + if( jtip->is_loaded() && jtip->klass()->is_interface() && + ttip->is_loaded() && !ttip->klass()->is_interface() ) { // Happens in a CTW of rt.jar, 320-341, no extra flags - { assert(ft == ttip->cast_to_ptr_type(jtip->ptr()), ""); jt = ft; } + assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) || + ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr()), ""); + jt = ft; + } + } + if( jtkp && ttkp ) { + if( jtkp->is_loaded() && jtkp->klass()->is_interface() && + ttkp->is_loaded() && !ttkp->klass()->is_interface() ) { + assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) || + ft->isa_narrowoop() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), ""); + jt = ft; + } } if (jt != ft && jt->base() == ft->base()) { if (jt->isa_int() && @@ -1025,6 +1111,8 @@ if (rc == NULL || phase->type(rc) == Type::TOP) continue; // ignore unreachable control path Node* n = in(i); + if (n == NULL) + continue; Node* un = n->uncast(); if (un == NULL || un == this || phase->type(un) == Type::TOP) { continue; // ignore if top, or in(i) and "this" are in a data cycle @@ -1287,7 +1375,7 @@ Node *n = phi->in(i); if( !n ) return NULL; if( phase->type(n) == Type::TOP ) return NULL; - if( n->Opcode() == Op_ConP ) + if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN ) break; } if( i >= phi->req() ) // Only split for constants @@ -1364,7 +1452,8 @@ // Check inputs of phi's inputs also. // It is much less expensive then full graph walk. uint cnt = in->req(); - for (uint i = 1; i < cnt; ++i) { + uint i = (in->is_Proj() && !in->is_CFG()) ? 0 : 1; + for (; i < cnt; ++i) { Node* m = in->in(i); if (m == (Node*)this) return UnsafeLoop; // Unsafe loop @@ -1412,7 +1501,8 @@ while (nstack.size() != 0) { Node* n = nstack.pop(); uint cnt = n->req(); - for (uint i = 1; i < cnt; i++) { // Only data paths + uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1; + for (; i < cnt; i++) { Node* m = n->in(i); if (m == (Node*)this) { return true; // Data loop @@ -1595,7 +1685,11 @@ // compress paths and change unreachable cycles to TOP // If not, we can update the input infinitely along a MergeMem cycle // Equivalent code is in MemNode::Ideal_common - Node *m = phase->transform(n); + Node *m = phase->transform(n); + if (outcnt() == 0) { // Above transform() may kill us! + progress = phase->C->top(); + break; + } // If tranformed to a MergeMem, get the desired slice // Otherwise the returned node represents memory for every slice Node *new_mem = (m->is_MergeMem()) ? @@ -1681,11 +1775,74 @@ return result; } } + // + // Other optimizations on the memory chain + // + const TypePtr* at = adr_type(); + for( uint i=1; iis_DecodeN() && ii->bottom_type() == bottom_type()) { + has_decodeN = true; + in_decodeN = ii->in(1); + } else if (!ii->is_Phi()) { + may_push = false; + } + } + + if (has_decodeN && may_push) { + PhaseIterGVN *igvn = phase->is_IterGVN(); + // Note: in_decodeN is used only to define the type of new phi here. + PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN); + uint orig_cnt = req(); + for (uint i=1; iis_DecodeN()) { + assert(ii->bottom_type() == bottom_type(), "sanity"); + new_ii = ii->in(1); + } else { + assert(ii->is_Phi(), "sanity"); + if (ii->as_Phi() == this) { + new_ii = new_phi; + } else { + new_ii = new (phase->C, 2) EncodePNode(ii, in_decodeN->bottom_type()); + igvn->register_new_node_with_optimizer(new_ii); + } + } + new_phi->set_req(i, new_ii); + } + igvn->register_new_node_with_optimizer(new_phi, this); + progress = new (phase->C, 2) DecodeNNode(new_phi, bottom_type()); + } } +#endif return progress; // Return any progress } +//------------------------------is_tripcount----------------------------------- +bool PhiNode::is_tripcount() const { + return (in(0) != NULL && in(0)->is_CountedLoop() && + in(0)->as_CountedLoop()->phi() == this); +} + //------------------------------out_RegMask------------------------------------ const RegMask &PhiNode::in_RegMask(uint i) const { return i ? out_RegMask() : RegMask::Empty; @@ -1701,9 +1858,7 @@ #ifndef PRODUCT void PhiNode::dump_spec(outputStream *st) const { TypeNode::dump_spec(st); - if (in(0) != NULL && - in(0)->is_CountedLoop() && - in(0)->as_CountedLoop()->phi() == this) { + if (is_tripcount()) { st->print(" #tripcount"); } } @@ -1892,6 +2047,28 @@ } //============================================================================= +//------------------------------Value------------------------------------------ +// Check for being unreachable. +const Type *NeverBranchNode::Value( PhaseTransform *phase ) const { + if (!in(0) || in(0)->is_top()) return Type::TOP; + return bottom_type(); +} + +//------------------------------Ideal------------------------------------------ +// Check for no longer being part of a loop +Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (can_reshape && !in(0)->is_Loop()) { + // Dead code elimination can sometimes delete this projection so + // if it's not there, there's nothing to do. + Node* fallthru = proj_out(0); + if (fallthru != NULL) { + phase->is_IterGVN()->subsume_node(fallthru, in(0)); + } + return phase->C->top(); + } + return NULL; +} + #ifndef PRODUCT void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const { st->print("%s", Name()); --- old/hotspot/src/share/vm/opto/cfgnode.hpp 2009-08-01 04:13:30.752540171 +0100 +++ new/hotspot/src/share/vm/opto/cfgnode.hpp 2009-08-01 04:13:30.670490622 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)cfgnode.hpp 1.117 07/10/23 13:12:52 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -113,14 +113,15 @@ // input in slot 0. class PhiNode : public TypeNode { const TypePtr* const _adr_type; // non-null only for Type::MEMORY nodes. + const int _inst_id; // Instance id of the memory slice. + const int _inst_index; // Alias index of the instance memory slice. + // Array elements references have the same alias_idx but different offset. + const int _inst_offset; // Offset of the instance memory slice. // Size is bigger to hold the _adr_type field. virtual uint hash() const; // Check the type virtual uint cmp( const Node &n ) const; virtual uint size_of() const { return sizeof(*this); } - // Determine a unique non-trivial input, if any. - // Ignore casts if it helps. Return NULL on failure. - Node* unique_input(PhaseTransform *phase); // Determine if CMoveNode::is_cmove_id can be used at this join point. Node* is_cmove_id(PhaseTransform* phase, int true_path); @@ -130,8 +131,16 @@ Input // Input values are [1..len) }; - PhiNode( Node *r, const Type *t, const TypePtr* at = NULL ) - : TypeNode(t,r->req()), _adr_type(at) { + PhiNode( Node *r, const Type *t, const TypePtr* at = NULL, + const int iid = TypeOopPtr::InstanceTop, + const int iidx = Compile::AliasIdxTop, + const int ioffs = Type::OffsetTop ) + : TypeNode(t,r->req()), + _adr_type(at), + _inst_id(iid), + _inst_index(iidx), + _inst_offset(ioffs) + { init_class_id(Class_Phi); init_req(0, r); verify_adr_type(); @@ -142,6 +151,7 @@ static PhiNode* make( Node* r, Node* x, const Type *t, const TypePtr* at = NULL ); // create a new phi with narrowed memory type PhiNode* slice_memory(const TypePtr* adr_type) const; + PhiNode* split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const; // like make(r, x), but does not initialize the in edges to x static PhiNode* make_blank( Node* r, Node* x ); @@ -155,6 +165,12 @@ return NULL; // not a copy! } + bool is_tripcount() const; + + // Determine a unique non-trivial input, if any. + // Ignore casts if it helps. Return NULL on failure. + Node* unique_input(PhaseTransform *phase); + // Check for a simple dead loop. enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop }; LoopSafety simple_data_loop_check(Node *in) const; @@ -164,6 +180,18 @@ virtual int Opcode() const; virtual bool pinned() const { return in(0) != 0; } virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; } + + const int inst_id() const { return _inst_id; } + const int inst_index() const { return _inst_index; } + const int inst_offset() const { return _inst_offset; } + bool is_same_inst_field(const Type* tp, int id, int index, int offset) { + return type()->basic_type() == tp->basic_type() && + inst_id() == id && + inst_index() == index && + inst_offset() == offset && + type()->higher_equal(tp); + } + virtual const Type *Value( PhaseTransform *phase ) const; virtual Node *Identity( PhaseTransform *phase ); virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); @@ -221,6 +249,8 @@ MultiBranchNode( uint required ) : MultiNode(required) { init_class_id(Class_MultiBranch); } + // returns required number of users to be well formed. + virtual int required_outcnt() const = 0; }; //------------------------------IfNode----------------------------------------- @@ -310,6 +340,7 @@ virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; } virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual const Type *Value( PhaseTransform *phase ) const; + virtual int required_outcnt() const { return 2; } virtual const RegMask &out_RegMask() const; void dominated_by(Node* prev_dom, PhaseIterGVN* igvn); int is_range_check(Node* &range, Node* &index, jint &offset); @@ -368,6 +399,7 @@ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual const Type *bottom_type() const; virtual bool pinned() const { return true; } + virtual int required_outcnt() const { return _size; } }; //------------------------------JumpNode--------------------------------------- @@ -481,7 +513,9 @@ virtual int Opcode() const; virtual bool pinned() const { return true; }; virtual const Type *bottom_type() const { return TypeTuple::IFBOTH; } - + virtual const Type *Value( PhaseTransform *phase ) const; + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + virtual int required_outcnt() const { return 2; } virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { } virtual uint size(PhaseRegAlloc *ra_) const { return 0; } #ifndef PRODUCT --- old/hotspot/src/share/vm/opto/chaitin.cpp 2009-08-01 04:13:31.618279127 +0100 +++ new/hotspot/src/share/vm/opto/chaitin.cpp 2009-08-01 04:13:31.524257522 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)chaitin.cpp 1.116 07/09/28 10:23:12 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -443,9 +443,7 @@ assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough"); // This frame must preserve the required fp alignment - const int stack_alignment_in_words = Matcher::stack_alignment_in_slots(); - if (stack_alignment_in_words > 0) - _framesize = round_to(_framesize, Matcher::stack_alignment_in_bytes()); + _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots()); assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" ); #ifndef PRODUCT _total_framesize += _framesize; @@ -685,6 +683,7 @@ break; case Op_RegF: case Op_RegI: + case Op_RegN: case Op_RegFlags: case 0: // not an ideal register lrg.set_num_regs(1); @@ -766,7 +765,7 @@ } // if the LRG is an unaligned pair, we will have to spill // so clear the LRG's register mask if it is not already spilled - if ( !n->is_SpillCopy() && + if ( !n->is_SpillCopy() && (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) && lrgmask.is_misaligned_Pair()) { lrg.Clear(); @@ -1387,8 +1386,8 @@ cisc->ins_req(1,src); // Requires a memory edge } b->_nodes.map(j,cisc); // Insert into basic block - n->replace_by(cisc); // Correct graph - // + n->subsume_by(cisc); // Correct graph + // ++_used_cisc_instructions; #ifndef PRODUCT if( TraceCISCSpill ) { --- old/hotspot/src/share/vm/opto/chaitin.hpp 2009-08-01 04:13:32.540164996 +0100 +++ new/hotspot/src/share/vm/opto/chaitin.hpp 2009-08-01 04:13:32.461240845 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)chaitin.hpp 1.161 08/03/26 10:13:00 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/src/share/vm/opto/classes.hpp 2009-08-01 04:13:33.383585064 +0100 +++ new/hotspot/src/share/vm/opto/classes.hpp 2009-08-01 04:13:33.298779448 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)classes.hpp 1.178 07/07/19 19:08:27 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,7 @@ macro(AllocateArray) macro(AndI) macro(AndL) +macro(AryEq) macro(AtanD) macro(Binary) macro(Bool) @@ -67,6 +68,8 @@ macro(CMoveI) macro(CMoveL) macro(CMoveP) +macro(CMoveN) +macro(CmpN) macro(CmpD) macro(CmpD3) macro(CmpF) @@ -80,7 +83,9 @@ macro(CompareAndSwapI) macro(CompareAndSwapL) macro(CompareAndSwapP) +macro(CompareAndSwapN) macro(Con) +macro(ConN) macro(ConD) macro(ConF) macro(ConI) @@ -103,6 +108,7 @@ macro(CountedLoop) macro(CountedLoopEnd) macro(CreateEx) +macro(DecodeN) macro(DivD) macro(DivF) macro(DivI) @@ -110,6 +116,7 @@ macro(DivMod) macro(DivModI) macro(DivModL) +macro(EncodeP) macro(ExpD) macro(FastLock) macro(FastUnlock) @@ -131,11 +138,13 @@ macro(LoadF) macro(LoadI) macro(LoadKlass) +macro(LoadNKlass) macro(LoadL) macro(LoadL_unaligned) macro(LoadPLocked) macro(LoadLLocked) macro(LoadP) +macro(LoadN) macro(LoadRange) macro(LoadS) macro(Lock) @@ -161,6 +170,7 @@ macro(MoveD2L) macro(MulD) macro(MulF) +macro(MulHiL) macro(MulI) macro(MulL) macro(Multi) @@ -188,6 +198,7 @@ macro(RoundDouble) macro(RoundFloat) macro(SafePoint) +macro(SafePointScalarObject) macro(SCMemProj) macro(SinD) macro(SqrtD) @@ -197,12 +208,14 @@ macro(StoreC) macro(StoreCM) macro(StorePConditional) +macro(StoreIConditional) macro(StoreLConditional) macro(StoreD) macro(StoreF) macro(StoreI) macro(StoreL) macro(StoreP) +macro(StoreN) macro(StrComp) macro(SubD) macro(SubF) --- old/hotspot/src/share/vm/opto/coalesce.cpp 2009-08-01 04:13:34.211488932 +0100 +++ new/hotspot/src/share/vm/opto/coalesce.cpp 2009-08-01 04:13:34.126120120 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)coalesce.cpp 1.196 07/09/28 10:23:11 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -608,7 +608,7 @@ // and def_copy powers the other. After merging, src_def powers // the combined live range. lrgs(lr1)._def = (lrgs(lr1).is_multidef() || - lrgs(lr2).is_multidef() ) + lrgs(lr2).is_multidef() ) ? NodeSentinel : src_def; lrgs(lr2)._def = NULL; // No def for lrg 2 lrgs(lr2).Clear(); // Force empty mask for LRG 2 --- old/hotspot/src/share/vm/opto/compile.cpp 2009-08-01 04:13:35.154614695 +0100 +++ new/hotspot/src/share/vm/opto/compile.cpp 2009-08-01 04:13:35.061461836 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)compile.cpp 1.633 07/09/28 10:23:11 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -316,9 +316,6 @@ _compile->begin_method(); } CompileWrapper::~CompileWrapper() { - if (_compile->failing()) { - _compile->print_method("Failed"); - } _compile->end_method(); if (_compile->scratch_buffer_blob() != NULL) BufferBlob::free(_compile->scratch_buffer_blob()); @@ -336,6 +333,12 @@ tty->print_cr("** Bailout: Recompile without subsuming loads **"); tty->print_cr("*********************************************************"); } + if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) { + // Recompiling without escape analysis + tty->print_cr("*********************************************************"); + tty->print_cr("** Bailout: Recompile without escape analysis **"); + tty->print_cr("*********************************************************"); + } if (env()->break_at_compile()) { // Open the debugger when compiing this method. tty->print("### Breaking when compiling: "); @@ -365,7 +368,12 @@ BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size); // Record the buffer blob for next time. set_scratch_buffer_blob(blob); - guarantee(scratch_buffer_blob() != NULL, "Need BufferBlob for code generation"); + // Have we run out of code space? + if (scratch_buffer_blob() == NULL) { + // Let CompilerBroker disable further compilations. + record_failure("Not enough space for scratch buffer in CodeCache"); + return; + } // Initialize the relocation buffers relocInfo* locs_buf = (relocInfo*) blob->instructions_end() - MAX_locs_size; @@ -404,11 +412,6 @@ return buf.code_size(); } -void Compile::record_for_escape_analysis(Node* n) { - if (_congraph != NULL) - _congraph->record_for_escape_analysis(n); -} - // ============================================================================ //------------------------------Compile standard------------------------------- @@ -418,7 +421,7 @@ // the continuation bci for on stack replacement. -Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads ) +Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis ) : Phase(Compiler), _env(ci_env), _log(ci_env->log()), @@ -433,6 +436,7 @@ _for_igvn(NULL), _warm_calls(NULL), _subsume_loads(subsume_loads), + _do_escape_analysis(do_escape_analysis), _failure_reason(NULL), _code_buffer("Compile::Fill_buffer"), _orig_pc_slot(0), @@ -457,7 +461,16 @@ } TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2); TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false); - set_print_assembly(PrintOptoAssembly || _method->should_print_assembly()); + bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly"); + if (!print_opto_assembly) { + bool print_assembly = (PrintAssembly || _method->should_print_assembly()); + if (print_assembly && !Disassembler::can_decode()) { + tty->print_cr("PrintAssembly request changed to PrintOptoAssembly"); + print_opto_assembly = true; + } + } + set_print_assembly(print_opto_assembly); + set_parsed_irreducible_loop(false); #endif if (ProfileTraps) { @@ -490,9 +503,6 @@ PhaseGVN gvn(node_arena(), estimated_size); set_initial_gvn(&gvn); - if (DoEscapeAnalysis) - _congraph = new ConnectionGraph(this); - { // Scope for timing the parser TracePhase t3("parse", &_t_parser, true); @@ -544,6 +554,8 @@ rethrow_exceptions(kit.transfer_exceptions_into_jvms()); } + print_method("Before RemoveUseless", 3); + // Remove clutter produced by parsing. if (!failing()) { ResourceMark rm; @@ -577,14 +589,32 @@ NOT_PRODUCT( verify_graph_edges(); ) // Perform escape analysis - if (_congraph != NULL) { - NOT_PRODUCT( TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, TimeCompiler); ) - _congraph->compute_escape(); + if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { + TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true); + // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction. + PhaseGVN* igvn = initial_gvn(); + Node* oop_null = igvn->zerocon(T_OBJECT); + Node* noop_null = igvn->zerocon(T_NARROWOOP); + + _congraph = new(comp_arena()) ConnectionGraph(this); + bool has_non_escaping_obj = _congraph->compute_escape(); + #ifndef PRODUCT if (PrintEscapeAnalysis) { _congraph->dump(); } #endif + // Cleanup. + if (oop_null->outcnt() == 0) + igvn->hash_delete(oop_null); + if (noop_null->outcnt() == 0) + igvn->hash_delete(noop_null); + + if (!has_non_escaping_obj) { + _congraph = NULL; + } + + if (failing()) return; } // Now optimize Optimize(); @@ -678,6 +708,7 @@ _orig_pc_slot(0), _orig_pc_slot_offset_in_bytes(0), _subsume_loads(true), + _do_escape_analysis(false), _failure_reason(NULL), _code_buffer("Compile::Fill_buffer"), _node_bundling_limit(0), @@ -693,6 +724,7 @@ TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false); TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false); set_print_assembly(PrintFrameConverterAssembly); + set_parsed_irreducible_loop(false); #endif CompileWrapper cw(this); Init(/*AliasLevel=*/ 0); @@ -793,6 +825,7 @@ Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist)); set_decompile_count(0); + set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency")); // Compilation level related initialization if (env()->comp_level() == CompLevel_fast_compile) { set_num_loop_opts(Tier1LoopOptsCount); @@ -825,7 +858,7 @@ // Type::update_loaded_types(_method, _method->constants()); // Init alias_type map. - if (!DoEscapeAnalysis && aliaslevel == 3) + if (!_do_escape_analysis && aliaslevel == 3) aliaslevel = 2; // No unique types without escape analysis _AliasLevel = aliaslevel; const int grow_ats = 16; @@ -982,9 +1015,14 @@ int offset = tj->offset(); TypePtr::PTR ptr = tj->ptr(); + // Known instance (scalarizable allocation) alias only with itself. + bool is_known_inst = tj->isa_oopptr() != NULL && + tj->is_oopptr()->is_known_instance(); + // Process weird unsafe references. if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) { assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops"); + assert(!is_known_inst, "scalarizable allocation should not have unsafe references"); tj = TypeOopPtr::BOTTOM; ptr = tj->ptr(); offset = tj->offset(); @@ -992,14 +1030,20 @@ // Array pointers need some flattening const TypeAryPtr *ta = tj->isa_aryptr(); - if( ta && _AliasLevel >= 2 ) { + if( ta && is_known_inst ) { + if ( offset != Type::OffsetBot && + offset > arrayOopDesc::length_offset_in_bytes() ) { + offset = Type::OffsetBot; // Flatten constant access into array body only + tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id()); + } + } else if( ta && _AliasLevel >= 2 ) { // For arrays indexed by constant indices, we flatten the alias // space to include all of the array body. Only the header, klass // and array length can be accessed un-aliased. if( offset != Type::OffsetBot ) { if( ta->const_oop() ) { // methodDataOop or methodOop offset = Type::OffsetBot; // Flatten constant access into array body - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::OffsetBot, ta->instance_id()); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset); } else if( offset == arrayOopDesc::length_offset_in_bytes() ) { // range is OK as-is. tj = ta = TypeAryPtr::RANGE; @@ -1013,25 +1057,29 @@ ptr = TypePtr::BotPTR; } else { // Random constant offset into array body offset = Type::OffsetBot; // Flatten constant access into array body - tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::OffsetBot, ta->instance_id()); + tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); } } // Arrays of fixed size alias with arrays of unknown size. if (ta->size() != TypeInt::POS) { const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset, ta->instance_id()); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset); } // Arrays of known objects become arrays of unknown objects. + if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { + const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); + } if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset, ta->instance_id()); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); } // Arrays of bytes and of booleans both use 'bastore' and 'baload' so // cannot be distinguished by bytecode alone. if (ta->elem() == TypeInt::BOOL) { const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size()); ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset, ta->instance_id()); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset); } // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. @@ -1051,19 +1099,24 @@ if( ptr == TypePtr::Constant ) { // No constant oop pointers (such as Strings); they alias with // unknown strings. + assert(!is_known_inst, "not scalarizable allocation"); tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); + } else if( is_known_inst ) { + tj = to; // Keep NotNull and klass_is_exact for instance type } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. - tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset, to->instance_id()); + tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); } // Canonicalize the holder of this field ciInstanceKlass *k = to->klass()->as_instance_klass(); - if (offset >= 0 && offset < oopDesc::header_size() * wordSize) { + if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). - tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset, to->instance_id()); + if (!is_known_inst) { // Do it only for non-instance types + tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); + } } else if (offset < 0 || offset >= k->size_helper() * wordSize) { to = NULL; tj = TypeOopPtr::BOTTOM; @@ -1071,7 +1124,11 @@ } else { ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); if (!k->equals(canonical_holder) || tj->offset() != offset) { - tj = to = TypeInstPtr::make(TypePtr::BotPTR, canonical_holder, false, NULL, offset, to->instance_id()); + if( is_known_inst ) { + tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id()); + } else { + tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset); + } } } } @@ -1171,8 +1228,8 @@ _field = NULL; _is_rewritable = true; // default const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL; - if (atoop != NULL && atoop->is_instance()) { - const TypeOopPtr *gt = atoop->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE); + if (atoop != NULL && atoop->is_known_instance()) { + const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot); _general_index = Compile::current()->get_alias_index(gt); } else { _general_index = 0; @@ -1257,7 +1314,9 @@ assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr"); if (flat->isa_oopptr() && !flat->isa_klassptr()) { const TypeOopPtr* foop = flat->is_oopptr(); - const TypePtr* xoop = foop->cast_to_exactness(!foop->klass_is_exact())->is_ptr(); + // Scalarizable allocations have exact klass always. + bool exact = !foop->klass_is_exact() || foop->is_known_instance(); + const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr(); assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type"); } assert(flat == flatten_alias_type(flat), "exact bit doesn't matter"); @@ -1301,7 +1360,7 @@ // Check for final instance fields. const TypeInstPtr* tinst = flat->isa_instptr(); - if (tinst && tinst->offset() >= oopDesc::header_size() * wordSize) { + if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { ciInstanceKlass *k = tinst->klass()->as_instance_klass(); ciField* field = k->get_field_by_offset(tinst->offset(), false); // Set field() and is_rewritable() attributes. @@ -1461,7 +1520,7 @@ NOT_PRODUCT( verify_graph_edges(); ) - print_method("Start"); + print_method("After Parsing"); { // Iterative Global Value Numbering, including ideal transforms @@ -1476,11 +1535,6 @@ if (failing()) return; - // get rid of the connection graph since it's information is not - // updated by optimizations - _congraph = NULL; - - // Loop transforms on the ideal graph. Range Check Elimination, // peeling, unrolling, etc. @@ -1646,8 +1700,14 @@ // are not adding any new instructions. If any basic block is empty, we // can now safely remove it. { - NOT_PRODUCT( TracePhase t2("removeEmpty", &_t_removeEmptyBlocks, TimeCompiler); ) - cfg.RemoveEmpty(); + NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); ) + cfg.remove_empty(); + if (do_freq_based_layout()) { + PhaseBlockLayout layout(cfg); + } else { + cfg.set_loop_alignment(); + } + cfg.fixup_flow(); } // Perform any platform dependent postallocation verifications. @@ -1668,7 +1728,7 @@ Output(); } - print_method("End"); + print_method("Final Code"); // He's dead, Jim. _cfg = (PhaseCFG*)0xdeadbeef; @@ -1722,6 +1782,8 @@ starts_bundle = '+'; } + if (WizardMode) n->dump(); + if( !n->is_Region() && // Dont print in the Assembly !n->is_Phi() && // a few noisely useless nodes !n->is_Proj() && @@ -1746,6 +1808,8 @@ // then back up and print it if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) { assert(delay != NULL, "no unconditional delay instruction"); + if (WizardMode) delay->dump(); + if (node_bundling(delay)->starts_bundle()) starts_bundle = '+'; if (pcs && n->_idx < pc_limit) @@ -1810,7 +1874,7 @@ static bool oop_offset_is_sane(const TypeInstPtr* tp) { ciInstanceKlass *k = tp->klass()->as_instance_klass(); // Make sure the offset goes inside the instance layout. - return (uint)tp->offset() < (uint)(oopDesc::header_size() + k->nonstatic_field_size())*wordSize; + return k->contains_field_offset(tp->offset()); // Note that OffsetBot and OffsetTop are very negative. } @@ -1818,6 +1882,7 @@ // Implement items 1-5 from final_graph_reshaping below. static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) { + if ( n->outcnt() == 0 ) return; // dead node uint nop = n->Opcode(); // Check for 2-input instruction with "last use" on right input. @@ -1884,7 +1949,7 @@ break; case Op_Opaque1: // Remove Opaque Nodes before matching case Op_Opaque2: // Remove Opaque Nodes before matching - n->replace_by(n->in(1)); + n->subsume_by(n->in(1)); break; case Op_CallStaticJava: case Op_CallJava: @@ -1905,10 +1970,11 @@ Node *n = call->in(TypeFunc::Parms); int nop = n->Opcode(); // Clone shared simple arguments to uncommon calls, item (1). - if( n->outcnt() > 1 && - !n->is_Proj() && - nop != Op_CreateEx && - nop != Op_CheckCastPP && + if( n->outcnt() > 1 && + !n->is_Proj() && + nop != Op_CreateEx && + nop != Op_CheckCastPP && + nop != Op_DecodeN && !n->is_Mem() ) { Node *x = n->clone(); call->set_req( TypeFunc::Parms, x ); @@ -1933,20 +1999,25 @@ case Op_StorePConditional: case Op_StoreI: case Op_StoreL: + case Op_StoreIConditional: case Op_StoreLConditional: case Op_CompareAndSwapI: case Op_CompareAndSwapL: case Op_CompareAndSwapP: + case Op_CompareAndSwapN: case Op_StoreP: + case Op_StoreN: case Op_LoadB: case Op_LoadC: case Op_LoadI: case Op_LoadKlass: + case Op_LoadNKlass: case Op_LoadL: case Op_LoadL_unaligned: case Op_LoadPLocked: case Op_LoadLLocked: case Op_LoadP: + case Op_LoadN: case Op_LoadRange: case Op_LoadS: { handle_mem: @@ -1961,20 +2032,214 @@ #endif break; } - case Op_If: - case Op_CountedLoopEnd: - fpu._tests.push(n); // Collect CFG split points - break; case Op_AddP: { // Assert sane base pointers - const Node *addp = n->in(AddPNode::Address); - assert( !addp->is_AddP() || + Node *addp = n->in(AddPNode::Address); + assert( !addp->is_AddP() || addp->in(AddPNode::Base)->is_top() || // Top OK for allocation addp->in(AddPNode::Base) == n->in(AddPNode::Base), "Base pointers must match" ); +#ifdef _LP64 + if (UseCompressedOops && + addp->Opcode() == Op_ConP && + addp == n->in(AddPNode::Base) && + n->in(AddPNode::Offset)->is_Con()) { + // Use addressing with narrow klass to load with offset on x86. + // On sparc loading 32-bits constant and decoding it have less + // instructions (4) then load 64-bits constant (7). + // Do this transformation here since IGVN will convert ConN back to ConP. + const Type* t = addp->bottom_type(); + if (t->isa_oopptr()) { + Node* nn = NULL; + + // Look for existing ConN node of the same exact type. + Compile* C = Compile::current(); + Node* r = C->root(); + uint cnt = r->outcnt(); + for (uint i = 0; i < cnt; i++) { + Node* m = r->raw_out(i); + if (m!= NULL && m->Opcode() == Op_ConN && + m->bottom_type()->make_ptr() == t) { + nn = m; + break; + } + } + if (nn != NULL) { + // Decode a narrow oop to match address + // [R12 + narrow_oop_reg<<3 + offset] + nn = new (C, 2) DecodeNNode(nn, t); + n->set_req(AddPNode::Base, nn); + n->set_req(AddPNode::Address, nn); + if (addp->outcnt() == 0) { + addp->disconnect_inputs(NULL); + } + } + } + } +#endif + break; + } + +#ifdef _LP64 + case Op_CastPP: + if (n->in(1)->is_DecodeN() && UseImplicitNullCheckForNarrowOop) { + Compile* C = Compile::current(); + Node* in1 = n->in(1); + const Type* t = n->bottom_type(); + Node* new_in1 = in1->clone(); + new_in1->as_DecodeN()->set_type(t); + + if (!Matcher::clone_shift_expressions) { + // + // x86, ARM and friends can handle 2 adds in addressing mode + // and Matcher can fold a DecodeN node into address by using + // a narrow oop directly and do implicit NULL check in address: + // + // [R12 + narrow_oop_reg<<3 + offset] + // NullCheck narrow_oop_reg + // + // On other platforms (Sparc) we have to keep new DecodeN node and + // use it to do implicit NULL check in address: + // + // decode_not_null narrow_oop_reg, base_reg + // [base_reg + offset] + // NullCheck base_reg + // + // Pin the new DecodeN node to non-null path on these patforms (Sparc) + // to keep the information to which NULL check the new DecodeN node + // corresponds to use it as value in implicit_null_check(). + // + new_in1->set_req(0, n->in(0)); + } + + n->subsume_by(new_in1); + if (in1->outcnt() == 0) { + in1->disconnect_inputs(NULL); + } + } + break; + + case Op_CmpP: + // Do this transformation here to preserve CmpPNode::sub() and + // other TypePtr related Ideal optimizations (for example, ptr nullness). + if (n->in(1)->is_DecodeN() || n->in(2)->is_DecodeN()) { + Node* in1 = n->in(1); + Node* in2 = n->in(2); + if (!in1->is_DecodeN()) { + in2 = in1; + in1 = n->in(2); + } + assert(in1->is_DecodeN(), "sanity"); + + Compile* C = Compile::current(); + Node* new_in2 = NULL; + if (in2->is_DecodeN()) { + new_in2 = in2->in(1); + } else if (in2->Opcode() == Op_ConP) { + const Type* t = in2->bottom_type(); + if (t == TypePtr::NULL_PTR && UseImplicitNullCheckForNarrowOop) { + new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR); + // + // This transformation together with CastPP transformation above + // will generated code for implicit NULL checks for compressed oops. + // + // The original code after Optimize() + // + // LoadN memory, narrow_oop_reg + // decode narrow_oop_reg, base_reg + // CmpP base_reg, NULL + // CastPP base_reg // NotNull + // Load [base_reg + offset], val_reg + // + // after these transformations will be + // + // LoadN memory, narrow_oop_reg + // CmpN narrow_oop_reg, NULL + // decode_not_null narrow_oop_reg, base_reg + // Load [base_reg + offset], val_reg + // + // and the uncommon path (== NULL) will use narrow_oop_reg directly + // since narrow oops can be used in debug info now (see the code in + // final_graph_reshaping_walk()). + // + // At the end the code will be matched to + // on x86: + // + // Load_narrow_oop memory, narrow_oop_reg + // Load [R12 + narrow_oop_reg<<3 + offset], val_reg + // NullCheck narrow_oop_reg + // + // and on sparc: + // + // Load_narrow_oop memory, narrow_oop_reg + // decode_not_null narrow_oop_reg, base_reg + // Load [base_reg + offset], val_reg + // NullCheck base_reg + // + } else if (t->isa_oopptr()) { + new_in2 = ConNode::make(C, t->make_narrowoop()); + } + } + if (new_in2 != NULL) { + Node* cmpN = new (C, 3) CmpNNode(in1->in(1), new_in2); + n->subsume_by( cmpN ); + if (in1->outcnt() == 0) { + in1->disconnect_inputs(NULL); + } + if (in2->outcnt() == 0) { + in2->disconnect_inputs(NULL); + } + } + } + break; + + case Op_DecodeN: + assert(!n->in(1)->is_EncodeP(), "should be optimized out"); + // DecodeN could be pinned on Sparc where it can't be fold into + // an address expression, see the code for Op_CastPP above. + assert(n->in(0) == NULL || !Matcher::clone_shift_expressions, "no control except on sparc"); + break; + + case Op_EncodeP: { + Node* in1 = n->in(1); + if (in1->is_DecodeN()) { + n->subsume_by(in1->in(1)); + } else if (in1->Opcode() == Op_ConP) { + Compile* C = Compile::current(); + const Type* t = in1->bottom_type(); + if (t == TypePtr::NULL_PTR) { + n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR)); + } else if (t->isa_oopptr()) { + n->subsume_by(ConNode::make(C, t->make_narrowoop())); + } + } + if (in1->outcnt() == 0) { + in1->disconnect_inputs(NULL); + } break; } + case Op_Phi: + if (n->as_Phi()->bottom_type()->isa_narrowoop()) { + // The EncodeP optimization may create Phi with the same edges + // for all paths. It is not handled well by Register Allocator. + Node* unique_in = n->in(1); + assert(unique_in != NULL, ""); + uint cnt = n->req(); + for (uint i = 2; i < cnt; i++) { + Node* m = n->in(i); + assert(m != NULL, ""); + if (unique_in != m) + unique_in = NULL; + } + if (unique_in != NULL) { + n->subsume_by(unique_in); + } + } + break; + +#endif + case Op_ModI: if (UseDivMod) { // Check if a%b and a/b both exist @@ -1984,13 +2249,13 @@ Compile* C = Compile::current(); if (Matcher::has_match_rule(Op_DivModI)) { DivModINode* divmod = DivModINode::make(C, n); - d->replace_by(divmod->div_proj()); - n->replace_by(divmod->mod_proj()); + d->subsume_by(divmod->div_proj()); + n->subsume_by(divmod->mod_proj()); } else { // replace a%b with a-((a/b)*b) Node* mult = new (C, 3) MulINode(d, d->in(2)); Node* sub = new (C, 3) SubINode(d->in(1), mult); - n->replace_by( sub ); + n->subsume_by( sub ); } } } @@ -2005,13 +2270,13 @@ Compile* C = Compile::current(); if (Matcher::has_match_rule(Op_DivModL)) { DivModLNode* divmod = DivModLNode::make(C, n); - d->replace_by(divmod->div_proj()); - n->replace_by(divmod->mod_proj()); + d->subsume_by(divmod->div_proj()); + n->subsume_by(divmod->mod_proj()); } else { // replace a%b with a-((a/b)*b) Node* mult = new (C, 3) MulLNode(d, d->in(2)); Node* sub = new (C, 3) SubLNode(d->in(1), mult); - n->replace_by( sub ); + n->subsume_by( sub ); } } } @@ -2057,22 +2322,27 @@ // Replace many operand PackNodes with a binary tree for matching PackNode* p = (PackNode*) n; Node* btp = p->binaryTreePack(Compile::current(), 1, n->req()); - n->replace_by(btp); + n->subsume_by(btp); } break; default: assert( !n->is_Call(), "" ); assert( !n->is_Mem(), "" ); - if( n->is_If() || n->is_PCTable() ) - fpu._tests.push(n); // Collect CFG split points break; } + + // Collect CFG split points + if (n->is_MultiBranch()) + fpu._tests.push(n); } //------------------------------final_graph_reshaping_walk--------------------- // Replacing Opaque nodes with their input in final_graph_reshaping_impl(), // requires that the walk visits a node's inputs before visiting the node. static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &fpu ) { + ResourceArea *area = Thread::current()->resource_area(); + Unique_Node_List sfpt(area); + fpu._visited.set(root->_idx); // first, mark node as visited uint cnt = root->req(); Node *n = root; @@ -2083,6 +2353,8 @@ Node* m = n->in(i); ++i; if (m != NULL && !fpu._visited.test_set(m->_idx)) { + if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) + sfpt.push(m); cnt = m->req(); nstack.push(n, i); // put on stack parent and next input's index n = m; @@ -2099,6 +2371,41 @@ nstack.pop(); // Shift to the next node on stack } } + + // Go over safepoints nodes to skip DecodeN nodes for debug edges. + // It could be done for an uncommon traps or any safepoints/calls + // if the DecodeN node is referenced only in a debug info. + while (sfpt.size() > 0) { + n = sfpt.pop(); + JVMState *jvms = n->as_SafePoint()->jvms(); + assert(jvms != NULL, "sanity"); + int start = jvms->debug_start(); + int end = n->req(); + bool is_uncommon = (n->is_CallStaticJava() && + n->as_CallStaticJava()->uncommon_trap_request() != 0); + for (int j = start; j < end; j++) { + Node* in = n->in(j); + if (in->is_DecodeN()) { + bool safe_to_skip = true; + if (!is_uncommon ) { + // Is it safe to skip? + for (uint i = 0; i < in->outcnt(); i++) { + Node* u = in->raw_out(i); + if (!u->is_SafePoint() || + u->is_Call() && u->as_Call()->has_non_debug_use(n)) { + safe_to_skip = false; + } + } + } + if (safe_to_skip) { + n->set_req(j, in->in(1)); + } + if (in->outcnt() == 0) { + in->disconnect_inputs(NULL); + } + } + } + } } //------------------------------final_graph_reshaping-------------------------- @@ -2145,19 +2452,18 @@ // Check for unreachable (from below) code (i.e., infinite loops). for( uint i = 0; i < fpu._tests.size(); i++ ) { - Node *n = fpu._tests[i]; - assert( n->is_PCTable() || n->is_If(), "either PCTables or IfNodes" ); - // Get number of CFG targets; 2 for IfNodes or _size for PCTables. + MultiBranchNode *n = fpu._tests[i]->as_MultiBranch(); + // Get number of CFG targets. // Note that PCTables include exception targets after calls. - uint expected_kids = n->is_PCTable() ? n->as_PCTable()->_size : 2; - if (n->outcnt() != expected_kids) { + uint required_outcnt = n->required_outcnt(); + if (n->outcnt() != required_outcnt) { // Check for a few special cases. Rethrow Nodes never take the // 'fall-thru' path, so expected kids is 1 less. if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) { if (n->in(0)->in(0)->is_Call()) { CallNode *call = n->in(0)->in(0)->as_Call(); if (call->entry_point() == OptoRuntime::rethrow_stub()) { - expected_kids--; // Rethrow always has 1 less kid + required_outcnt--; // Rethrow always has 1 less kid } else if (call->req() > TypeFunc::Parms && call->is_CallDynamicJava()) { // Check for null receiver. In such case, the optimizer has @@ -2166,8 +2472,8 @@ // will not be populated. Node *arg0 = call->in(TypeFunc::Parms); if (arg0->is_Type() && - arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) { - expected_kids--; + arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) { + required_outcnt--; } } else if (call->entry_point() == OptoRuntime::new_array_Java() && call->req() > TypeFunc::Parms+1 && @@ -2178,13 +2484,13 @@ Node *arg1 = call->in(TypeFunc::Parms+1); if (arg1->is_Type() && arg1->as_Type()->type()->join(TypeInt::POS)->empty()) { - expected_kids--; + required_outcnt--; } } } } - // Recheck with a better notion of 'expected_kids' - if (n->outcnt() != expected_kids) { + // Recheck with a better notion of 'required_outcnt' + if (n->outcnt() != required_outcnt) { record_method_not_compilable("malformed control flow"); return true; // Not all targets reachable! } @@ -2360,6 +2666,9 @@ // Record the first failure reason. _failure_reason = reason; } + if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { + C->print_method(_failure_reason); + } _root = NULL; // flush the graph, too } --- old/hotspot/src/share/vm/opto/compile.hpp 2009-08-01 04:13:36.247277627 +0100 +++ new/hotspot/src/share/vm/opto/compile.hpp 2009-08-01 04:13:36.162004156 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)compile.hpp 1.232 07/09/28 10:23:10 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ class Int_Array; class Matcher; class MachNode; +class MachSafePointNode; class Node; class Node_Array; class Node_Notes; @@ -55,9 +56,6 @@ class Unique_Node_List; class nmethod; class WarmCallInfo; -#ifdef ENABLE_ZAP_DEAD_LOCALS -class MachSafePointNode; -#endif //------------------------------Compile---------------------------------------- // This class defines a top-level Compiler invocation. @@ -130,6 +128,7 @@ const int _compile_id; const bool _save_argument_registers; // save/restore arg regs for trampolines const bool _subsume_loads; // Load can be matched as part of a larger op. + const bool _do_escape_analysis; // Do escape analysis. ciMethod* _method; // The method being compiled. int _entry_bci; // entry bci for osr methods. const TypeFunc* _tf; // My kind of signature @@ -158,12 +157,14 @@ uint _decompile_count; // Cumulative decompilation counts. bool _do_inlining; // True if we intend to do inlining bool _do_scheduling; // True if we intend to do scheduling + bool _do_freq_based_layout; // True if we intend to do frequency based block layout bool _do_count_invocations; // True if we generate code to count invocations bool _do_method_data_update; // True if we generate code to update methodDataOops int _AliasLevel; // Locally-adjusted version of AliasLevel flag. bool _print_assembly; // True if we should dump assembly code for this compilation #ifndef PRODUCT bool _trace_opto_output; + bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing #endif // Compilation environment. @@ -263,6 +264,8 @@ // instructions that subsume a load may result in an unschedulable // instruction sequence. bool subsume_loads() const { return _subsume_loads; } + // Do escape analysis. + bool do_escape_analysis() const { return _do_escape_analysis; } bool save_argument_registers() const { return _save_argument_registers; } @@ -308,6 +311,8 @@ void set_do_inlining(bool z) { _do_inlining = z; } bool do_scheduling() const { return _do_scheduling; } void set_do_scheduling(bool z) { _do_scheduling = z; } + bool do_freq_based_layout() const{ return _do_freq_based_layout; } + void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } bool do_count_invocations() const{ return _do_count_invocations; } void set_do_count_invocations(bool z){ _do_count_invocations = z; } bool do_method_data_update() const { return _do_method_data_update; } @@ -321,6 +326,8 @@ } #ifndef PRODUCT bool trace_opto_output() const { return _trace_opto_output; } + bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } + void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } #endif void begin_method() { @@ -487,7 +494,6 @@ PhaseGVN* initial_gvn() { return _initial_gvn; } Unique_Node_List* for_igvn() { return _for_igvn; } inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. - void record_for_escape_analysis(Node* n); void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } @@ -563,7 +569,7 @@ // replacement, entry_bci indicates the bytecode for which to compile a // continuation. Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, - int entry_bci, bool subsume_loads); + int entry_bci, bool subsume_loads, bool do_escape_analysis); // Second major entry point. From the TypeFunc signature, generate code // to pass arguments from the Java calling convention to the C calling @@ -608,8 +614,20 @@ // Build OopMaps for each GC point void BuildOopMaps(); - // Append debug info for the node to the array - void FillLocArray( int idx, Node *local, GrowableArray *array ); + + // Append debug info for the node "local" at safepoint node "sfpt" to the + // "array", May also consult and add to "objs", which describes the + // scalar-replaced objects. + void FillLocArray( int idx, MachSafePointNode* sfpt, + Node *local, GrowableArray *array, + GrowableArray *objs ); + + // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. + static ObjectValue* sv_for_node_id(GrowableArray *objs, int id); + // Requres that "objs" does not contains an ObjectValue whose id matches + // that of "sv. Appends "sv". + static void set_sv_for_object_node(GrowableArray *objs, + ObjectValue* sv ); // Process an OopMap Element while emitting nodes void Process_OopMap_Node(MachNode *mach, int code_offset); --- old/hotspot/src/share/vm/opto/connode.cpp 2009-08-01 04:13:37.178657022 +0100 +++ new/hotspot/src/share/vm/opto/connode.cpp 2009-08-01 04:13:37.086993275 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)connode.cpp 1.222 07/10/16 13:32:21 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,13 +40,14 @@ ConNode *ConNode::make( Compile* C, const Type *t ) { switch( t->basic_type() ) { case T_INT: return new (C, 1) ConINode( t->is_int() ); - case T_ARRAY: return new (C, 1) ConPNode( t->is_aryptr() ); case T_LONG: return new (C, 1) ConLNode( t->is_long() ); case T_FLOAT: return new (C, 1) ConFNode( t->is_float_constant() ); case T_DOUBLE: return new (C, 1) ConDNode( t->is_double_constant() ); case T_VOID: return new (C, 1) ConNode ( Type::TOP ); case T_OBJECT: return new (C, 1) ConPNode( t->is_oopptr() ); + case T_ARRAY: return new (C, 1) ConPNode( t->is_aryptr() ); case T_ADDRESS: return new (C, 1) ConPNode( t->is_ptr() ); + case T_NARROWOOP: return new (C, 1) ConNNode( t->is_narrowoop() ); // Expected cases: TypePtr::NULL_PTR, any is_rawptr() // Also seen: AnyPtr(TopPTR *+top); from command line: // r -XX:+PrintOpto -XX:CIStart=285 -XX:+CompileTheWorld -XX:CompileTheWorldStartAt=660 @@ -103,8 +104,10 @@ // Move constants to the right. Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) { if( in(0) && remove_dead_region(phase, can_reshape) ) return this; - assert( !phase->eqv(in(Condition), this) && - !phase->eqv(in(IfFalse), this) && + // Don't bother trying to transform a dead node + if( in(0) && in(0)->is_top() ) return NULL; + assert( !phase->eqv(in(Condition), this) && + !phase->eqv(in(IfFalse), this) && !phase->eqv(in(IfTrue), this), "dead loop in CMoveNode::Ideal" ); if( phase->type(in(Condition)) == Type::TOP ) return NULL; // return NULL when Condition is dead @@ -187,6 +190,7 @@ case T_LONG: return new (C, 4) CMoveLNode( bol, left, right, t->is_long() ); case T_OBJECT: return new (C, 4) CMovePNode( c, bol, left, right, t->is_oopptr() ); case T_ADDRESS: return new (C, 4) CMovePNode( c, bol, left, right, t->is_ptr() ); + case T_NARROWOOP: return new (C, 4) CMoveNNode( c, bol, left, right, t ); default: ShouldNotReachHere(); return NULL; @@ -432,8 +436,8 @@ // If not converting int->oop, throw away cast after constant propagation Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { const Type *t = ccp->type(in(1)); - if (!t->isa_oop_ptr()) { - return NULL; // do not transform raw pointers + if (!t->isa_oop_ptr() || in(1)->is_DecodeN()) { + return NULL; // do not transform raw pointers or narrow oops } return ConstraintCastNode::Ideal_DU_postCCP(ccp); } @@ -464,7 +468,8 @@ possible_alias = n->is_Phi() || opc == Op_CheckCastPP || opc == Op_StorePConditional || - opc == Op_CompareAndSwapP; + opc == Op_CompareAndSwapP || + opc == Op_CompareAndSwapN; } return possible_alias; } @@ -552,6 +557,52 @@ return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL; } + +Node* DecodeNNode::Identity(PhaseTransform* phase) { + const Type *t = phase->type( in(1) ); + if( t == Type::TOP ) return in(1); + + if (in(1)->is_EncodeP()) { + // (DecodeN (EncodeP p)) -> p + return in(1)->in(1); + } + return this; +} + +const Type *DecodeNNode::Value( PhaseTransform *phase ) const { + const Type *t = phase->type( in(1) ); + if (t == Type::TOP) return Type::TOP; + if (t == TypeNarrowOop::NULL_PTR) return TypePtr::NULL_PTR; + + assert(t->isa_narrowoop(), "only narrowoop here"); + return t->make_ptr(); +} + +Node* EncodePNode::Identity(PhaseTransform* phase) { + const Type *t = phase->type( in(1) ); + if( t == Type::TOP ) return in(1); + + if (in(1)->is_DecodeN()) { + // (EncodeP (DecodeN p)) -> p + return in(1)->in(1); + } + return this; +} + +const Type *EncodePNode::Value( PhaseTransform *phase ) const { + const Type *t = phase->type( in(1) ); + if (t == Type::TOP) return Type::TOP; + if (t == TypePtr::NULL_PTR) return TypeNarrowOop::NULL_PTR; + + assert(t->isa_oopptr(), "only oopptr here"); + return t->make_narrowoop(); +} + + +Node *EncodePNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { + return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1)); +} + //============================================================================= //------------------------------Identity--------------------------------------- Node *Conv2BNode::Identity( PhaseTransform *phase ) { @@ -985,34 +1036,9 @@ return new (phase->C, 3) AddINode(add1,add2); } - // Fold up with a prior LoadL: LoadL->ConvL2I ==> LoadI - // Requires we understand the 'endianess' of Longs. - if( andl_op == Op_LoadL ) { - Node *adr = andl->in(MemNode::Address); - // VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles -#ifndef VM_LITTLE_ENDIAN - // The transformation can cause problems on BIG_ENDIAN architectures - // where the jint is not the same address as the jlong. Specifically, we - // will fail to insert an anti-dependence in GCM between the LoadI and a - // subsequent StoreL because different memory offsets provoke - // flatten_alias_type() into indicating two different types. See bug - // 4755222. - - // Node *base = adr->is_AddP() ? adr->in(AddPNode::Base) : adr; - // adr = phase->transform( new (phase->C, 4) AddPNode(base,adr,phase->MakeConX(sizeof(jint)))); - return NULL; -#else - if (phase->C->alias_type(andl->adr_type())->is_volatile()) { - // Picking up the low half by itself bypasses the atomic load and we could - // end up with more than one non-atomic load. See bugs 4432655 and 4526490. - // We could go to the trouble of iterating over andl's output edges and - // punting only if there's more than one real use, but we don't bother. - return NULL; - } - return new (phase->C, 3) LoadINode(andl->in(MemNode::Control),andl->in(MemNode::Memory),adr,((LoadLNode*)andl)->raw_adr_type()); -#endif - } - + // Disable optimization: LoadL->ConvL2I ==> LoadI. + // It causes problems (sizes of Load and Store nodes do not match) + // in objects initialization code and Escape Analysis. return NULL; } --- old/hotspot/src/share/vm/opto/connode.hpp 2009-08-01 04:13:38.126950509 +0100 +++ new/hotspot/src/share/vm/opto/connode.hpp 2009-08-01 04:13:38.048384179 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)connode.hpp 1.160 07/05/05 17:06:13 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,11 +73,15 @@ else return new (C, 1) ConPNode( TypeRawPtr::make(con) ); } +}; - static ConPNode* make( Compile *C, ciObject* con ) { - return new (C, 1) ConPNode( TypeOopPtr::make_from_constant(con) ); - } +//------------------------------ConNNode-------------------------------------- +// Simple narrow oop constants +class ConNNode : public ConNode { +public: + ConNNode( const TypeNarrowOop *t ) : ConNode(t) {} + virtual int Opcode() const; }; @@ -199,7 +203,14 @@ virtual int Opcode() const; }; -//------------------------------ConstraintCastNode------------------------------------- +//------------------------------CMoveNNode------------------------------------- +class CMoveNNode : public CMoveNode { +public: + CMoveNNode( Node *c, Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); } + virtual int Opcode() const; +}; + +//------------------------------ConstraintCastNode----------------------------- // cast to a different range class ConstraintCastNode: public TypeNode { public: @@ -228,10 +239,7 @@ // cast pointer to pointer (different type) class CastPPNode: public ConstraintCastNode { public: - CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) { - // Only CastPP is safe. CastII can cause optimizer loops. - init_flags(Flag_is_dead_loop_safe); - } + CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {} virtual int Opcode() const; virtual uint ideal_reg() const { return Op_RegP; } virtual Node *Ideal_DU_postCCP( PhaseCCP * ); @@ -243,10 +251,10 @@ public: CheckCastPPNode( Node *c, Node *n, const Type *t ) : TypeNode(t,2) { init_class_id(Class_CheckCastPP); - init_flags(Flag_is_dead_loop_safe); init_req(0, c); init_req(1, n); } + virtual Node *Identity( PhaseTransform *phase ); virtual const Type *Value( PhaseTransform *phase ) const; virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); @@ -257,6 +265,45 @@ //virtual Node *Ideal_DU_postCCP( PhaseCCP * ); }; + +//------------------------------EncodeP-------------------------------- +// Encodes an oop pointers into its compressed form +// Takes an extra argument which is the real heap base as a long which +// may be useful for code generation in the backend. +class EncodePNode : public TypeNode { + public: + EncodePNode(Node* value, const Type* type): + TypeNode(type, 2) { + init_class_id(Class_EncodeP); + init_req(0, NULL); + init_req(1, value); + } + virtual int Opcode() const; + virtual Node *Identity( PhaseTransform *phase ); + virtual const Type *Value( PhaseTransform *phase ) const; + virtual uint ideal_reg() const { return Op_RegN; } + + virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); +}; + +//------------------------------DecodeN-------------------------------- +// Converts a narrow oop into a real oop ptr. +// Takes an extra argument which is the real heap base as a long which +// may be useful for code generation in the backend. +class DecodeNNode : public TypeNode { + public: + DecodeNNode(Node* value, const Type* type): + TypeNode(type, 2) { + init_class_id(Class_DecodeN); + init_req(0, NULL); + init_req(1, value); + } + virtual int Opcode() const; + virtual Node *Identity( PhaseTransform *phase ); + virtual const Type *Value( PhaseTransform *phase ) const; + virtual uint ideal_reg() const { return Op_RegP; } +}; + //------------------------------Conv2BNode------------------------------------- // Convert int/pointer to a Boolean. Map zero to zero, all else to 1. class Conv2BNode : public Node { @@ -502,10 +549,18 @@ virtual uint hash() const ; // { return NO_HASH; } virtual uint cmp( const Node &n ) const; public: - Opaque1Node( Node *n ) : Node(0,n) {} + Opaque1Node( Compile* C, Node *n ) : Node(0,n) { + // Put it on the Macro nodes list to removed during macro nodes expansion. + init_flags(Flag_is_macro); + C->add_macro_node(this); + } // Special version for the pre-loop to hold the original loop limit // which is consumed by range check elimination. - Opaque1Node( Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {} + Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) { + // Put it on the Macro nodes list to removed during macro nodes expansion. + init_flags(Flag_is_macro); + C->add_macro_node(this); + } Node* original_loop_limit() { return req()==3 ? in(2) : NULL; } virtual int Opcode() const; virtual const Type *bottom_type() const { return TypeInt::INT; } @@ -525,7 +580,11 @@ virtual uint hash() const ; // { return NO_HASH; } virtual uint cmp( const Node &n ) const; public: - Opaque2Node( Node *n ) : Node(0,n) {} + Opaque2Node( Compile* C, Node *n ) : Node(0,n) { + // Put it on the Macro nodes list to removed during macro nodes expansion. + init_flags(Flag_is_macro); + C->add_macro_node(this); + } virtual int Opcode() const; virtual const Type *bottom_type() const { return TypeInt::INT; } }; --- old/hotspot/src/share/vm/opto/divnode.cpp 2009-08-01 04:13:39.063476547 +0100 +++ new/hotspot/src/share/vm/opto/divnode.cpp 2009-08-01 04:13:38.972538572 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)divnode.cpp 1.88 07/05/05 17:06:13 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,119 +33,371 @@ #include "incls/_divnode.cpp.incl" #include -// Implement the integer constant divide -> long multiply transform found in -// "Division by Invariant Integers using Multiplication" -// by Granlund and Montgomery -static Node *transform_int_divide_to_long_multiply( PhaseGVN *phase, Node *dividend, int divisor ) { +//----------------------magic_int_divide_constants----------------------------- +// Compute magic multiplier and shift constant for converting a 32 bit divide +// by constant into a multiply/shift/add series. Return false if calculations +// fail. +// +// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with +// minor type name and parameter changes. +static bool magic_int_divide_constants(jint d, jint &M, jint &s) { + int32_t p; + uint32_t ad, anc, delta, q1, r1, q2, r2, t; + const uint32_t two31 = 0x80000000L; // 2**31. + + ad = ABS(d); + if (d == 0 || d == 1) return false; + t = two31 + ((uint32_t)d >> 31); + anc = t - 1 - t%ad; // Absolute value of nc. + p = 31; // Init. p. + q1 = two31/anc; // Init. q1 = 2**p/|nc|. + r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|). + q2 = two31/ad; // Init. q2 = 2**p/|d|. + r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|). + do { + p = p + 1; + q1 = 2*q1; // Update q1 = 2**p/|nc|. + r1 = 2*r1; // Update r1 = rem(2**p, |nc|). + if (r1 >= anc) { // (Must be an unsigned + q1 = q1 + 1; // comparison here). + r1 = r1 - anc; + } + q2 = 2*q2; // Update q2 = 2**p/|d|. + r2 = 2*r2; // Update r2 = rem(2**p, |d|). + if (r2 >= ad) { // (Must be an unsigned + q2 = q2 + 1; // comparison here). + r2 = r2 - ad; + } + delta = ad - r2; + } while (q1 < delta || (q1 == delta && r1 == 0)); + + M = q2 + 1; + if (d < 0) M = -M; // Magic number and + s = p - 32; // shift amount to return. + + return true; +} + +//--------------------------transform_int_divide------------------------------- +// Convert a division by constant divisor into an alternate Ideal graph. +// Return NULL if no transformation occurs. +static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor ) { // Check for invalid divisors - assert( divisor != 0 && divisor != min_jint && divisor != 1, - "bad divisor for transforming to long multiply" ); + assert( divisor != 0 && divisor != min_jint, + "bad divisor for transforming to long multiply" ); - // Compute l = ceiling(log2(d)) - // presumes d is more likely small bool d_pos = divisor >= 0; - int d = d_pos ? divisor : -divisor; - unsigned ud = (unsigned)d; + jint d = d_pos ? divisor : -divisor; const int N = 32; - int l = log2_intptr(d-1)+1; - int sh_post = l; - - const uint64_t U1 = (uint64_t)1; - - // Cliff pointed out how to prevent overflow (from the paper) - uint64_t m_low = (((U1 << l) - ud) << N) / ud + (U1 << N); - uint64_t m_high = ((((U1 << l) - ud) << N) + (U1 << (l+1))) / ud + (U1 << N); - - // Reduce to lowest terms - for ( ; sh_post > 0; sh_post-- ) { - uint64_t m_low_1 = m_low >> 1; - uint64_t m_high_1 = m_high >> 1; - if ( m_low_1 >= m_high_1 ) - break; - m_low = m_low_1; - m_high = m_high_1; - } // Result - Node *q; + Node *q = NULL; - // division by +/- 1 if (d == 1) { - // Filtered out as identity above - if (d_pos) - return NULL; - - // Just negate the value - else { + // division by +/- 1 + if (!d_pos) { + // Just negate the value q = new (phase->C, 3) SubINode(phase->intcon(0), dividend); } - } - - // division by +/- a power of 2 - else if ( is_power_of_2(d) ) { + } else if ( is_power_of_2(d) ) { + // division by +/- a power of 2 // See if we can simply do a shift without rounding bool needs_rounding = true; const Type *dt = phase->type(dividend); const TypeInt *dti = dt->isa_int(); - - // we don't need to round a positive dividend - if (dti && dti->_lo >= 0) + if (dti && dti->_lo >= 0) { + // we don't need to round a positive dividend needs_rounding = false; - - // An AND mask of sufficient size clears the low bits and - // I can avoid rounding. - else if( dividend->Opcode() == Op_AndI ) { - const TypeInt *andconi = phase->type( dividend->in(2) )->isa_int(); - if( andconi && andconi->is_con(-d) ) { - dividend = dividend->in(1); - needs_rounding = false; + } else if( dividend->Opcode() == Op_AndI ) { + // An AND mask of sufficient size clears the low bits and + // I can avoid rounding. + const TypeInt *andconi_t = phase->type( dividend->in(2) )->isa_int(); + if( andconi_t && andconi_t->is_con() ) { + jint andconi = andconi_t->get_con(); + if( andconi < 0 && is_power_of_2(-andconi) && (-andconi) >= d ) { + dividend = dividend->in(1); + needs_rounding = false; + } } } // Add rounding to the shift to handle the sign bit - if( needs_rounding ) { - Node *t1 = phase->transform(new (phase->C, 3) RShiftINode(dividend, phase->intcon(l - 1))); - Node *t2 = phase->transform(new (phase->C, 3) URShiftINode(t1, phase->intcon(N - l))); - dividend = phase->transform(new (phase->C, 3) AddINode(dividend, t2)); + int l = log2_intptr(d-1)+1; + if (needs_rounding) { + // Divide-by-power-of-2 can be made into a shift, but you have to do + // more math for the rounding. You need to add 0 for positive + // numbers, and "i-1" for negative numbers. Example: i=4, so the + // shift is by 2. You need to add 3 to negative dividends and 0 to + // positive ones. So (-7+3)>>2 becomes -1, (-4+3)>>2 becomes -1, + // (-2+3)>>2 becomes 0, etc. + + // Compute 0 or -1, based on sign bit + Node *sign = phase->transform(new (phase->C, 3) RShiftINode(dividend, phase->intcon(N - 1))); + // Mask sign bit to the low sign bits + Node *round = phase->transform(new (phase->C, 3) URShiftINode(sign, phase->intcon(N - l))); + // Round up before shifting + dividend = phase->transform(new (phase->C, 3) AddINode(dividend, round)); } + // Shift for division q = new (phase->C, 3) RShiftINode(dividend, phase->intcon(l)); - if (!d_pos) + if (!d_pos) { q = new (phase->C, 3) SubINode(phase->intcon(0), phase->transform(q)); + } + } else { + // Attempt the jint constant divide -> multiply transform found in + // "Division by Invariant Integers using Multiplication" + // by Granlund and Montgomery + // See also "Hacker's Delight", chapter 10 by Warren. + + jint magic_const; + jint shift_const; + if (magic_int_divide_constants(d, magic_const, shift_const)) { + Node *magic = phase->longcon(magic_const); + Node *dividend_long = phase->transform(new (phase->C, 2) ConvI2LNode(dividend)); + + // Compute the high half of the dividend x magic multiplication + Node *mul_hi = phase->transform(new (phase->C, 3) MulLNode(dividend_long, magic)); + + if (magic_const < 0) { + mul_hi = phase->transform(new (phase->C, 3) RShiftLNode(mul_hi, phase->intcon(N))); + mul_hi = phase->transform(new (phase->C, 2) ConvL2INode(mul_hi)); + + // The magic multiplier is too large for a 32 bit constant. We've adjusted + // it down by 2^32, but have to add 1 dividend back in after the multiplication. + // This handles the "overflow" case described by Granlund and Montgomery. + mul_hi = phase->transform(new (phase->C, 3) AddINode(dividend, mul_hi)); + + // Shift over the (adjusted) mulhi + if (shift_const != 0) { + mul_hi = phase->transform(new (phase->C, 3) RShiftINode(mul_hi, phase->intcon(shift_const))); + } + } else { + // No add is required, we can merge the shifts together. + mul_hi = phase->transform(new (phase->C, 3) RShiftLNode(mul_hi, phase->intcon(N + shift_const))); + mul_hi = phase->transform(new (phase->C, 2) ConvL2INode(mul_hi)); + } + + // Get a 0 or -1 from the sign of the dividend. + Node *addend0 = mul_hi; + Node *addend1 = phase->transform(new (phase->C, 3) RShiftINode(dividend, phase->intcon(N-1))); + + // If the divisor is negative, swap the order of the input addends; + // this has the effect of negating the quotient. + if (!d_pos) { + Node *temp = addend0; addend0 = addend1; addend1 = temp; + } + + // Adjust the final quotient by subtracting -1 (adding 1) + // from the mul_hi. + q = new (phase->C, 3) SubINode(addend0, addend1); + } } - // division by something else - else if (m_high < (U1 << (N-1))) { - Node *t1 = phase->transform(new (phase->C, 2) ConvI2LNode(dividend)); - Node *t2 = phase->transform(new (phase->C, 3) MulLNode(t1, phase->longcon(m_high))); - Node *t3 = phase->transform(new (phase->C, 3) RShiftLNode(t2, phase->intcon(sh_post+N))); - Node *t4 = phase->transform(new (phase->C, 2) ConvL2INode(t3)); - Node *t5 = phase->transform(new (phase->C, 3) RShiftINode(dividend, phase->intcon(N-1))); - - q = new (phase->C, 3) SubINode(d_pos ? t4 : t5, d_pos ? t5 : t4); - } - - // This handles that case where m_high is >= 2**(N-1). In that case, - // we subtract out 2**N from the multiply and add it in later as - // "dividend" in the equation (t5). This case computes the same result - // as the immediately preceeding case, save that rounding and overflow - // are accounted for. - else { - Node *t1 = phase->transform(new (phase->C, 2) ConvI2LNode(dividend)); - Node *t2 = phase->transform(new (phase->C, 3) MulLNode(t1, phase->longcon(m_high - (U1 << N)))); - Node *t3 = phase->transform(new (phase->C, 3) RShiftLNode(t2, phase->intcon(N))); - Node *t4 = phase->transform(new (phase->C, 2) ConvL2INode(t3)); - Node *t5 = phase->transform(new (phase->C, 3) AddINode(dividend, t4)); - Node *t6 = phase->transform(new (phase->C, 3) RShiftINode(t5, phase->intcon(sh_post))); - Node *t7 = phase->transform(new (phase->C, 3) RShiftINode(dividend, phase->intcon(N-1))); + return q; +} + +//---------------------magic_long_divide_constants----------------------------- +// Compute magic multiplier and shift constant for converting a 64 bit divide +// by constant into a multiply/shift/add series. Return false if calculations +// fail. +// +// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with +// minor type name and parameter changes. Adjusted to 64 bit word width. +static bool magic_long_divide_constants(jlong d, jlong &M, jint &s) { + int64_t p; + uint64_t ad, anc, delta, q1, r1, q2, r2, t; + const uint64_t two63 = 0x8000000000000000LL; // 2**63. + + ad = ABS(d); + if (d == 0 || d == 1) return false; + t = two63 + ((uint64_t)d >> 63); + anc = t - 1 - t%ad; // Absolute value of nc. + p = 63; // Init. p. + q1 = two63/anc; // Init. q1 = 2**p/|nc|. + r1 = two63 - q1*anc; // Init. r1 = rem(2**p, |nc|). + q2 = two63/ad; // Init. q2 = 2**p/|d|. + r2 = two63 - q2*ad; // Init. r2 = rem(2**p, |d|). + do { + p = p + 1; + q1 = 2*q1; // Update q1 = 2**p/|nc|. + r1 = 2*r1; // Update r1 = rem(2**p, |nc|). + if (r1 >= anc) { // (Must be an unsigned + q1 = q1 + 1; // comparison here). + r1 = r1 - anc; + } + q2 = 2*q2; // Update q2 = 2**p/|d|. + r2 = 2*r2; // Update r2 = rem(2**p, |d|). + if (r2 >= ad) { // (Must be an unsigned + q2 = q2 + 1; // comparison here). + r2 = r2 - ad; + } + delta = ad - r2; + } while (q1 < delta || (q1 == delta && r1 == 0)); + + M = q2 + 1; + if (d < 0) M = -M; // Magic number and + s = p - 64; // shift amount to return. + + return true; +} + +//---------------------long_by_long_mulhi-------------------------------------- +// Generate ideal node graph for upper half of a 64 bit x 64 bit multiplication +static Node *long_by_long_mulhi( PhaseGVN *phase, Node *dividend, jlong magic_const) { + // If the architecture supports a 64x64 mulhi, there is + // no need to synthesize it in ideal nodes. + if (Matcher::has_match_rule(Op_MulHiL)) { + Node *v = phase->longcon(magic_const); + return new (phase->C, 3) MulHiLNode(dividend, v); + } + + const int N = 64; + + Node *u_hi = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N / 2))); + Node *u_lo = phase->transform(new (phase->C, 3) AndLNode(dividend, phase->longcon(0xFFFFFFFF))); + + Node *v_hi = phase->longcon(magic_const >> N/2); + Node *v_lo = phase->longcon(magic_const & 0XFFFFFFFF); + + Node *hihi_product = phase->transform(new (phase->C, 3) MulLNode(u_hi, v_hi)); + Node *hilo_product = phase->transform(new (phase->C, 3) MulLNode(u_hi, v_lo)); + Node *lohi_product = phase->transform(new (phase->C, 3) MulLNode(u_lo, v_hi)); + Node *lolo_product = phase->transform(new (phase->C, 3) MulLNode(u_lo, v_lo)); + + Node *t1 = phase->transform(new (phase->C, 3) URShiftLNode(lolo_product, phase->intcon(N / 2))); + Node *t2 = phase->transform(new (phase->C, 3) AddLNode(hilo_product, t1)); + + // Construct both t3 and t4 before transforming so t2 doesn't go dead + // prematurely. + Node *t3 = new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2)); + Node *t4 = new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF)); + t3 = phase->transform(t3); + t4 = phase->transform(t4); + + Node *t5 = phase->transform(new (phase->C, 3) AddLNode(t4, lohi_product)); + Node *t6 = phase->transform(new (phase->C, 3) RShiftLNode(t5, phase->intcon(N / 2))); + Node *t7 = phase->transform(new (phase->C, 3) AddLNode(t3, hihi_product)); + + return new (phase->C, 3) AddLNode(t7, t6); +} + + +//--------------------------transform_long_divide------------------------------ +// Convert a division by constant divisor into an alternate Ideal graph. +// Return NULL if no transformation occurs. +static Node *transform_long_divide( PhaseGVN *phase, Node *dividend, jlong divisor ) { + // Check for invalid divisors + assert( divisor != 0L && divisor != min_jlong, + "bad divisor for transforming to long multiply" ); + + bool d_pos = divisor >= 0; + jlong d = d_pos ? divisor : -divisor; + const int N = 64; + + // Result + Node *q = NULL; + + if (d == 1) { + // division by +/- 1 + if (!d_pos) { + // Just negate the value + q = new (phase->C, 3) SubLNode(phase->longcon(0), dividend); + } + } else if ( is_power_of_2_long(d) ) { + + // division by +/- a power of 2 - q = new (phase->C, 3) SubINode(d_pos ? t6 : t7, d_pos ? t7 : t6); + // See if we can simply do a shift without rounding + bool needs_rounding = true; + const Type *dt = phase->type(dividend); + const TypeLong *dtl = dt->isa_long(); + + if (dtl && dtl->_lo > 0) { + // we don't need to round a positive dividend + needs_rounding = false; + } else if( dividend->Opcode() == Op_AndL ) { + // An AND mask of sufficient size clears the low bits and + // I can avoid rounding. + const TypeLong *andconl_t = phase->type( dividend->in(2) )->isa_long(); + if( andconl_t && andconl_t->is_con() ) { + jlong andconl = andconl_t->get_con(); + if( andconl < 0 && is_power_of_2_long(-andconl) && (-andconl) >= d ) { + dividend = dividend->in(1); + needs_rounding = false; + } + } + } + + // Add rounding to the shift to handle the sign bit + int l = log2_long(d-1)+1; + if (needs_rounding) { + // Divide-by-power-of-2 can be made into a shift, but you have to do + // more math for the rounding. You need to add 0 for positive + // numbers, and "i-1" for negative numbers. Example: i=4, so the + // shift is by 2. You need to add 3 to negative dividends and 0 to + // positive ones. So (-7+3)>>2 becomes -1, (-4+3)>>2 becomes -1, + // (-2+3)>>2 becomes 0, etc. + + // Compute 0 or -1, based on sign bit + Node *sign = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N - 1))); + // Mask sign bit to the low sign bits + Node *round = phase->transform(new (phase->C, 3) URShiftLNode(sign, phase->intcon(N - l))); + // Round up before shifting + dividend = phase->transform(new (phase->C, 3) AddLNode(dividend, round)); + } + + // Shift for division + q = new (phase->C, 3) RShiftLNode(dividend, phase->intcon(l)); + + if (!d_pos) { + q = new (phase->C, 3) SubLNode(phase->longcon(0), phase->transform(q)); + } + } else { + // Attempt the jlong constant divide -> multiply transform found in + // "Division by Invariant Integers using Multiplication" + // by Granlund and Montgomery + // See also "Hacker's Delight", chapter 10 by Warren. + + jlong magic_const; + jint shift_const; + if (magic_long_divide_constants(d, magic_const, shift_const)) { + // Compute the high half of the dividend x magic multiplication + Node *mul_hi = phase->transform(long_by_long_mulhi(phase, dividend, magic_const)); + + // The high half of the 128-bit multiply is computed. + if (magic_const < 0) { + // The magic multiplier is too large for a 64 bit constant. We've adjusted + // it down by 2^64, but have to add 1 dividend back in after the multiplication. + // This handles the "overflow" case described by Granlund and Montgomery. + mul_hi = phase->transform(new (phase->C, 3) AddLNode(dividend, mul_hi)); + } + + // Shift over the (adjusted) mulhi + if (shift_const != 0) { + mul_hi = phase->transform(new (phase->C, 3) RShiftLNode(mul_hi, phase->intcon(shift_const))); + } + + // Get a 0 or -1 from the sign of the dividend. + Node *addend0 = mul_hi; + Node *addend1 = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N-1))); + + // If the divisor is negative, swap the order of the input addends; + // this has the effect of negating the quotient. + if (!d_pos) { + Node *temp = addend0; addend0 = addend1; addend1 = temp; + } + + // Adjust the final quotient by subtracting -1 (adding 1) + // from the mul_hi. + q = new (phase->C, 3) SubLNode(addend0, addend1); + } } - return (q); + return q; } //============================================================================= @@ -159,6 +411,8 @@ // Divides can be changed to multiplies and/or shifts Node *DivINode::Ideal(PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; + // Don't bother trying to transform a dead node + if( in(0) && in(0)->is_top() ) return NULL; const Type *t = phase->type( in(2) ); if( t == TypeInt::ONE ) // Identity? @@ -167,7 +421,7 @@ const TypeInt *ti = t->isa_int(); if( !ti ) return NULL; if( !ti->is_con() ) return NULL; - int i = ti->get_con(); // Get divisor + jint i = ti->get_con(); // Get divisor if (i == 0) return NULL; // Dividing by zero constant does not idealize @@ -176,7 +430,7 @@ // Dividing by MININT does not optimize as a power-of-2 shift. if( i == min_jint ) return NULL; - return transform_int_divide_to_long_multiply( phase, in(1), i ); + return transform_int_divide( phase, in(1), i ); } //------------------------------Value------------------------------------------ @@ -256,87 +510,26 @@ // Dividing by a power of 2 is a shift. Node *DivLNode::Ideal( PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; + // Don't bother trying to transform a dead node + if( in(0) && in(0)->is_top() ) return NULL; const Type *t = phase->type( in(2) ); - if( t == TypeLong::ONE ) // Identity? + if( t == TypeLong::ONE ) // Identity? return NULL; // Skip it - const TypeLong *ti = t->isa_long(); - if( !ti ) return NULL; - if( !ti->is_con() ) return NULL; - jlong i = ti->get_con(); // Get divisor - if( i ) set_req(0, NULL); // Dividing by a not-zero constant; no faulting + const TypeLong *tl = t->isa_long(); + if( !tl ) return NULL; + if( !tl->is_con() ) return NULL; + jlong l = tl->get_con(); // Get divisor - // Dividing by MININT does not optimize as a power-of-2 shift. - if( i == min_jlong ) return NULL; + if (l == 0) return NULL; // Dividing by zero constant does not idealize - // Check for negative power of 2 divisor, if so, negate it and set a flag - // to indicate result needs to be negated. Note that negating the dividend - // here does not work when it has the value MININT - Node *dividend = in(1); - bool negate_res = false; - if (is_power_of_2_long(-i)) { - i = -i; // Flip divisor - negate_res = true; - } - - // Check for power of 2 - if (!is_power_of_2_long(i)) // Is divisor a power of 2? - return NULL; // Not a power of 2 - - // Compute number of bits to shift - int log_i = log2_long(i); - - // See if we can simply do a shift without rounding - bool needs_rounding = true; - const Type *dt = phase->type(dividend); - const TypeLong *dtl = dt->isa_long(); - - if (dtl && dtl->_lo > 0) { - // we don't need to round a positive dividend - needs_rounding = false; - } else if( dividend->Opcode() == Op_AndL ) { - // An AND mask of sufficient size clears the low bits and - // I can avoid rounding. - const TypeLong *andconi = phase->type( dividend->in(2) )->isa_long(); - if( andconi && - andconi->is_con() && - andconi->get_con() == -i ) { - dividend = dividend->in(1); - needs_rounding = false; - } - } + set_req(0,NULL); // Dividing by a not-zero constant; no faulting - if (!needs_rounding) { - Node *result = new (phase->C, 3) RShiftLNode(dividend, phase->intcon(log_i)); - if (negate_res) { - result = phase->transform(result); - result = new (phase->C, 3) SubLNode(phase->longcon(0), result); - } - return result; - } - - // Divide-by-power-of-2 can be made into a shift, but you have to do - // more math for the rounding. You need to add 0 for positive - // numbers, and "i-1" for negative numbers. Example: i=4, so the - // shift is by 2. You need to add 3 to negative dividends and 0 to - // positive ones. So (-7+3)>>2 becomes -1, (-4+3)>>2 becomes -1, - // (-2+3)>>2 becomes 0, etc. - - // Compute 0 or -1, based on sign bit - Node *sign = phase->transform(new (phase->C, 3) RShiftLNode(dividend,phase->intcon(63))); - // Mask sign bit to the low sign bits - Node *round = phase->transform(new (phase->C, 3) AndLNode(sign,phase->longcon(i-1))); - // Round up before shifting - Node *sum = phase->transform(new (phase->C, 3) AddLNode(dividend,round)); - // Shift for division - Node *result = new (phase->C, 3) RShiftLNode(sum, phase->intcon(log_i)); - if (negate_res) { - result = phase->transform(result); - result = new (phase->C, 3) SubLNode(phase->longcon(0), result); - } + // Dividing by MININT does not optimize as a power-of-2 shift. + if( l == min_jlong ) return NULL; - return result; + return transform_long_divide( phase, in(1), l ); } //------------------------------Value------------------------------------------ @@ -424,7 +617,7 @@ // x/x == 1, we ignore 0/0. // Note: if t1 and t2 are zero then result is NaN (JVMS page 213) - // does not work for variables because of NaN's + // Does not work for variables because of NaN's if( phase->eqv( in(1), in(2) ) && t1->base() == Type::FloatCon) if (!g_isnan(t1->getf()) && g_isfinite(t1->getf()) && t1->getf() != 0.0) // could be negative ZERO or NaN return TypeF::ONE; @@ -460,6 +653,8 @@ //------------------------------Idealize--------------------------------------- Node *DivFNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; + // Don't bother trying to transform a dead node + if( in(0) && in(0)->is_top() ) return NULL; const Type *t2 = phase->type( in(2) ); if( t2 == TypeF::ONE ) // Identity? @@ -494,7 +689,7 @@ //============================================================================= //------------------------------Value------------------------------------------ // An DivDNode divides its inputs. The third input is a Control input, used to -// prvent hoisting the divide above an unsafe test. +// prevent hoisting the divide above an unsafe test. const Type *DivDNode::Value( PhaseTransform *phase ) const { // Either input is TOP ==> the result is TOP const Type *t1 = phase->type( in(1) ); @@ -518,11 +713,18 @@ if( t2 == TypeD::ONE ) return t1; - // If divisor is a constant and not zero, divide them numbers - if( t1->base() == Type::DoubleCon && - t2->base() == Type::DoubleCon && - t2->getd() != 0.0 ) // could be negative zero - return TypeD::make( t1->getd()/t2->getd() ); +#if defined(IA32) + if (!phase->C->method()->is_strict()) + // Can't trust native compilers to properly fold strict double + // division with round-to-zero on this platform. +#endif + { + // If divisor is a constant and not zero, divide them numbers + if( t1->base() == Type::DoubleCon && + t2->base() == Type::DoubleCon && + t2->getd() != 0.0 ) // could be negative zero + return TypeD::make( t1->getd()/t2->getd() ); + } // If the dividend is a constant zero // Note: if t1 and t2 are zero then result is NaN (JVMS page 213) @@ -545,6 +747,8 @@ //------------------------------Idealize--------------------------------------- Node *DivDNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; + // Don't bother trying to transform a dead node + if( in(0) && in(0)->is_top() ) return NULL; const Type *t2 = phase->type( in(2) ); if( t2 == TypeD::ONE ) // Identity? @@ -580,7 +784,9 @@ //------------------------------Idealize--------------------------------------- Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input - if( remove_dead_region(phase, can_reshape) ) return this; + if( in(0) && remove_dead_region(phase, can_reshape) ) return this; + // Don't bother trying to transform a dead node + if( in(0) && in(0)->is_top() ) return NULL; // Get the modulus const Type *t = phase->type( in(2) ); @@ -618,10 +824,10 @@ hook->init_req(0, x); // Add a use to x to prevent him from dying // Generate code to reduce X rapidly to nearly 2^k-1. for( int i = 0; i < trip_count; i++ ) { - Node *xl = phase->transform( new (phase->C, 3) AndINode(x,divisor) ); - Node *xh = phase->transform( new (phase->C, 3) RShiftINode(x,phase->intcon(k)) ); // Must be signed - x = phase->transform( new (phase->C, 3) AddINode(xh,xl) ); - hook->set_req(0, x); + Node *xl = phase->transform( new (phase->C, 3) AndINode(x,divisor) ); + Node *xh = phase->transform( new (phase->C, 3) RShiftINode(x,phase->intcon(k)) ); // Must be signed + x = phase->transform( new (phase->C, 3) AddINode(xh,xl) ); + hook->set_req(0, x); } // Generate sign-fixup code. Was original value positive? @@ -678,18 +884,21 @@ hook->init_req(0, in(1)); // Divide using the transform from DivI to MulL - Node *divide = phase->transform( transform_int_divide_to_long_multiply( phase, in(1), pos_con ) ); - - // Re-multiply, using a shift if this is a power of two - Node *mult = NULL; + Node *result = transform_int_divide( phase, in(1), pos_con ); + if (result != NULL) { + Node *divide = phase->transform(result); + + // Re-multiply, using a shift if this is a power of two + Node *mult = NULL; + + if( log2_con >= 0 ) + mult = phase->transform( new (phase->C, 3) LShiftINode( divide, phase->intcon( log2_con ) ) ); + else + mult = phase->transform( new (phase->C, 3) MulINode( divide, phase->intcon( pos_con ) ) ); - if( log2_con >= 0 ) - mult = phase->transform( new (phase->C, 3) LShiftINode( divide, phase->intcon( log2_con ) ) ); - else - mult = phase->transform( new (phase->C, 3) MulINode( divide, phase->intcon( pos_con ) ) ); - - // Finally, subtract the multiplied divided value from the original - Node *result = new (phase->C, 3) SubINode( in(1), mult ); + // Finally, subtract the multiplied divided value from the original + result = new (phase->C, 3) SubINode( in(1), mult ); + } // Now remove the bogus extra edges used to keep things alive if (can_reshape) { @@ -746,78 +955,133 @@ //------------------------------Idealize--------------------------------------- Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input - if( remove_dead_region(phase, can_reshape) ) return this; + if( in(0) && remove_dead_region(phase, can_reshape) ) return this; + // Don't bother trying to transform a dead node + if( in(0) && in(0)->is_top() ) return NULL; // Get the modulus const Type *t = phase->type( in(2) ); if( t == Type::TOP ) return NULL; - const TypeLong *ti = t->is_long(); + const TypeLong *tl = t->is_long(); // Check for useless control input // Check for excluding mod-zero case - if( in(0) && (ti->_hi < 0 || ti->_lo > 0) ) { + if( in(0) && (tl->_hi < 0 || tl->_lo > 0) ) { set_req(0, NULL); // Yank control input return this; } // See if we are MOD'ing by 2^k or 2^k-1. - if( !ti->is_con() ) return NULL; - jlong con = ti->get_con(); - bool m1 = false; - if( !is_power_of_2_long(con) ) { // Not 2^k - if( !is_power_of_2_long(con+1) ) // Not 2^k-1? - return NULL; // No interesting mod hacks - m1 = true; // Found 2^k-1 - con++; // Convert to 2^k form - } - uint k = log2_long(con); // Extract k + if( !tl->is_con() ) return NULL; + jlong con = tl->get_con(); + + Node *hook = new (phase->C, 1) Node(1); // Expand mod - if( !m1 ) { // Case 2^k - } else { // Case 2^k-1 + if( con >= 0 && con < max_jlong && is_power_of_2_long(con+1) ) { + uint k = exact_log2_long(con+1); // Extract k + // Basic algorithm by David Detlefs. See fastmod_long.java for gory details. // Used to help a popular random number generator which does a long-mod // of 2^31-1 and shows up in SpecJBB and SciMark. static int unroll_factor[] = { 999, 999, 61, 30, 20, 15, 12, 10, 8, 7, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1 /*past here we assume 1 forever*/}; int trip_count = 1; if( k < ARRAY_SIZE(unroll_factor)) trip_count = unroll_factor[k]; - if( trip_count > 4 ) return NULL; // Too much unrolling - if (ConditionalMoveLimit == 0) return NULL; // cmov is required - - Node *x = in(1); // Value being mod'd - Node *divisor = in(2); // Also is mask - - Node *hook = new (phase->C, 1) Node(x); - // Generate code to reduce X rapidly to nearly 2^k-1. - for( int i = 0; i < trip_count; i++ ) { + + // If the unroll factor is not too large, and if conditional moves are + // ok, then use this case + if( trip_count <= 5 && ConditionalMoveLimit != 0 ) { + Node *x = in(1); // Value being mod'd + Node *divisor = in(2); // Also is mask + + hook->init_req(0, x); // Add a use to x to prevent him from dying + // Generate code to reduce X rapidly to nearly 2^k-1. + for( int i = 0; i < trip_count; i++ ) { Node *xl = phase->transform( new (phase->C, 3) AndLNode(x,divisor) ); Node *xh = phase->transform( new (phase->C, 3) RShiftLNode(x,phase->intcon(k)) ); // Must be signed x = phase->transform( new (phase->C, 3) AddLNode(xh,xl) ); hook->set_req(0, x); // Add a use to x to prevent him from dying + } + + // Generate sign-fixup code. Was original value positive? + // long hack_res = (i >= 0) ? divisor : CONST64(1); + Node *cmp1 = phase->transform( new (phase->C, 3) CmpLNode( in(1), phase->longcon(0) ) ); + Node *bol1 = phase->transform( new (phase->C, 2) BoolNode( cmp1, BoolTest::ge ) ); + Node *cmov1= phase->transform( new (phase->C, 4) CMoveLNode(bol1, phase->longcon(1), divisor, TypeLong::LONG) ); + // if( x >= hack_res ) x -= divisor; + Node *sub = phase->transform( new (phase->C, 3) SubLNode( x, divisor ) ); + Node *cmp2 = phase->transform( new (phase->C, 3) CmpLNode( x, cmov1 ) ); + Node *bol2 = phase->transform( new (phase->C, 2) BoolNode( cmp2, BoolTest::ge ) ); + // Convention is to not transform the return value of an Ideal + // since Ideal is expected to return a modified 'this' or a new node. + Node *cmov2= new (phase->C, 4) CMoveLNode(bol2, x, sub, TypeLong::LONG); + // cmov2 is now the mod + + // Now remove the bogus extra edges used to keep things alive + if (can_reshape) { + phase->is_IterGVN()->remove_dead_node(hook); + } else { + hook->set_req(0, NULL); // Just yank bogus edge during Parse phase + } + return cmov2; } - // Generate sign-fixup code. Was original value positive? - // long hack_res = (i >= 0) ? divisor : CONST64(1); - Node *cmp1 = phase->transform( new (phase->C, 3) CmpLNode( in(1), phase->longcon(0) ) ); - Node *bol1 = phase->transform( new (phase->C, 2) BoolNode( cmp1, BoolTest::ge ) ); - Node *cmov1= phase->transform( new (phase->C, 4) CMoveLNode(bol1, phase->longcon(1), divisor, TypeLong::LONG) ); - // if( x >= hack_res ) x -= divisor; - Node *sub = phase->transform( new (phase->C, 3) SubLNode( x, divisor ) ); - Node *cmp2 = phase->transform( new (phase->C, 3) CmpLNode( x, cmov1 ) ); - Node *bol2 = phase->transform( new (phase->C, 2) BoolNode( cmp2, BoolTest::ge ) ); - // Convention is to not transform the return value of an Ideal - // since Ideal is expected to return a modified 'this' or a new node. - Node *cmov2= new (phase->C, 4) CMoveLNode(bol2, x, sub, TypeLong::LONG); - // cmov2 is now the mod - - // Now remove the bogus extra edges used to keep things alive - if (can_reshape) { - phase->is_IterGVN()->remove_dead_node(hook); - } else { - hook->set_req(0, NULL); // Just yank bogus edge during Parse phase - } - return cmov2; } - return NULL; + + // Fell thru, the unroll case is not appropriate. Transform the modulo + // into a long multiply/int multiply/subtract case + + // Cannot handle mod 0, and min_jint isn't handled by the transform + if( con == 0 || con == min_jlong ) return NULL; + + // Get the absolute value of the constant; at this point, we can use this + jlong pos_con = (con >= 0) ? con : -con; + + // integer Mod 1 is always 0 + if( pos_con == 1 ) return new (phase->C, 1) ConLNode(TypeLong::ZERO); + + int log2_con = -1; + + // If this is a power of two, they maybe we can mask it + if( is_power_of_2_long(pos_con) ) { + log2_con = log2_long(pos_con); + + const Type *dt = phase->type(in(1)); + const TypeLong *dtl = dt->isa_long(); + + // See if this can be masked, if the dividend is non-negative + if( dtl && dtl->_lo >= 0 ) + return ( new (phase->C, 3) AndLNode( in(1), phase->longcon( pos_con-1 ) ) ); + } + + // Save in(1) so that it cannot be changed or deleted + hook->init_req(0, in(1)); + + // Divide using the transform from DivI to MulL + Node *result = transform_long_divide( phase, in(1), pos_con ); + if (result != NULL) { + Node *divide = phase->transform(result); + + // Re-multiply, using a shift if this is a power of two + Node *mult = NULL; + + if( log2_con >= 0 ) + mult = phase->transform( new (phase->C, 3) LShiftLNode( divide, phase->intcon( log2_con ) ) ); + else + mult = phase->transform( new (phase->C, 3) MulLNode( divide, phase->longcon( pos_con ) ) ); + + // Finally, subtract the multiplied divided value from the original + result = new (phase->C, 3) SubLNode( in(1), mult ); + } + + // Now remove the bogus extra edges used to keep things alive + if (can_reshape) { + phase->is_IterGVN()->remove_dead_node(hook); + } else { + hook->set_req(0, NULL); // Just yank bogus edge during Parse phase + } + + // return the value + return result; } //------------------------------Value------------------------------------------ @@ -875,56 +1139,32 @@ (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) return bot; - // If either is a NaN, return an input NaN - if( g_isnan(t1->getf()) ) return t1; - if( g_isnan(t2->getf()) ) return t2; + // If either number is not a constant, we know nothing. + if ((t1->base() != Type::FloatCon) || (t2->base() != Type::FloatCon)) { + return Type::FLOAT; // note: x%x can be either NaN or 0 + } - // It is not worth trying to constant fold this stuff! - return Type::FLOAT; + float f1 = t1->getf(); + float f2 = t2->getf(); + jint x1 = jint_cast(f1); // note: *(int*)&f1, not just (int)f1 + jint x2 = jint_cast(f2); - /* - // If dividend is infinity or divisor is zero, or both, the result is NaN - if( !g_isfinite(t1->getf()) || ((t2->getf() == 0.0) || (jint_cast(t2->getf()) == 0x80000000)) ) - - // X MOD infinity = X - if( !g_isfinite(t2->getf()) && !g_isnan(t2->getf()) ) return t1; - // 0 MOD finite = dividend (positive or negative zero) - // Not valid for: NaN MOD any; any MOD nan; 0 MOD 0; or for 0 MOD NaN - // NaNs are handled previously. - if( !(t2->getf() == 0.0) && !((int)t2->getf() == 0x80000000)) { - if (((t1->getf() == 0.0) || ((int)t1->getf() == 0x80000000)) && g_isfinite(t2->getf()) ) { - return t1; - } - } - // X MOD X is 0 - // Does not work for variables because of NaN's - if( phase->eqv( in(1), in(2) ) && t1->base() == Type::FloatCon) - if (!g_isnan(t1->getf()) && (t1->getf() != 0.0) && ((int)t1->getf() != 0x80000000)) { - if(t1->getf() < 0.0) { - float result = jfloat_cast(0x80000000); - return TypeF::make( result ); - } - else - return TypeF::ZERO; - } + // If either is a NaN, return an input NaN + if (g_isnan(f1)) return t1; + if (g_isnan(f2)) return t2; - // If both numbers are not constants, we know nothing. - if( (t1->base() != Type::FloatCon) || (t2->base() != Type::FloatCon) ) + // If an operand is infinity or the divisor is +/- zero, punt. + if (!g_isfinite(f1) || !g_isfinite(f2) || x2 == 0 || x2 == min_jint) return Type::FLOAT; // We must be modulo'ing 2 float constants. // Make sure that the sign of the fmod is equal to the sign of the dividend - float result = (float)fmod( t1->getf(), t2->getf() ); - float dividend = t1->getf(); - if( (dividend < 0.0) || ((int)dividend == 0x80000000) ) { - if( result > 0.0 ) - result = 0.0 - result; - else if( result == 0.0 ) { - result = jfloat_cast(0x80000000); - } + jint xr = jint_cast(fmod(f1, f2)); + if ((x1 ^ xr) < 0) { + xr ^= min_jint; } - return TypeF::make( result ); - */ + + return TypeF::make(jfloat_cast(xr)); } @@ -943,33 +1183,32 @@ (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) return bot; - // If either is a NaN, return an input NaN - if( g_isnan(t1->getd()) ) return t1; - if( g_isnan(t2->getd()) ) return t2; - // X MOD infinity = X - if( !g_isfinite(t2->getd())) return t1; - // 0 MOD finite = dividend (positive or negative zero) - // Not valid for: NaN MOD any; any MOD nan; 0 MOD 0; or for 0 MOD NaN - // NaNs are handled previously. - if( !(t2->getd() == 0.0) ) { - if( t1->getd() == 0.0 && g_isfinite(t2->getd()) ) { - return t1; - } + // If either number is not a constant, we know nothing. + if ((t1->base() != Type::DoubleCon) || (t2->base() != Type::DoubleCon)) { + return Type::DOUBLE; // note: x%x can be either NaN or 0 } - // X MOD X is 0 - // does not work for variables because of NaN's - if( phase->eqv( in(1), in(2) ) && t1->base() == Type::DoubleCon ) - if (!g_isnan(t1->getd()) && t1->getd() != 0.0) - return TypeD::ZERO; - + double f1 = t1->getd(); + double f2 = t2->getd(); + jlong x1 = jlong_cast(f1); // note: *(long*)&f1, not just (long)f1 + jlong x2 = jlong_cast(f2); - // If both numbers are not constants, we know nothing. - if( (t1->base() != Type::DoubleCon) || (t2->base() != Type::DoubleCon) ) + // If either is a NaN, return an input NaN + if (g_isnan(f1)) return t1; + if (g_isnan(f2)) return t2; + + // If an operand is infinity or the divisor is +/- zero, punt. + if (!g_isfinite(f1) || !g_isfinite(f2) || x2 == 0 || x2 == min_jlong) return Type::DOUBLE; // We must be modulo'ing 2 double constants. - return TypeD::make( fmod( t1->getd(), t2->getd() ) ); + // Make sure that the sign of the fmod is equal to the sign of the dividend + jlong xr = jlong_cast(fmod(f1, f2)); + if ((x1 ^ xr) < 0) { + xr ^= min_jlong; + } + + return TypeD::make(jdouble_cast(xr)); } //============================================================================= --- old/hotspot/src/share/vm/opto/doCall.cpp 2009-08-01 04:13:40.059958247 +0100 +++ new/hotspot/src/share/vm/opto/doCall.cpp 2009-08-01 04:13:39.974591921 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)doCall.cpp 1.207 07/07/19 19:08:29 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -393,6 +393,8 @@ } if (cg->is_inline()) { + // Accumulate has_loops estimate + C->set_has_loops(C->has_loops() || call_method->has_loops()); C->env()->notice_inlined_method(call_method); } @@ -581,7 +583,7 @@ Node* ex_klass_node = NULL; if (has_ex_handler() && !ex_type->klass_is_exact()) { Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); - ex_klass_node = _gvn.transform(new (C, 3) LoadKlassNode(NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); + ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); // Compute the exception klass a little more cleverly. // Obvious solution is to simple do a LoadKlass from the 'ex_node'. @@ -593,7 +595,7 @@ ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); for( uint i = 1; i < ex_node->req(); i++ ) { Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() ); - Node* k = _gvn.transform(new (C, 3) LoadKlassNode(0, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); + Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); ex_klass_node->init_req( i, k ); } _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT); @@ -796,9 +798,9 @@ ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass(); if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() && - (ikl == actual_receiver || ikl->is_subclass_of(actual_receiver))) { - // ikl is a same or better type than the original actual_receiver, - // e.g. static receiver from bytecodes. + (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) { + // ikl is a same or better type than the original actual_receiver, + // e.g. static receiver from bytecodes. actual_receiver = ikl; // Is the actual_receiver exact? actual_receiver_is_exact = receiver_type->klass_is_exact(); --- old/hotspot/src/share/vm/opto/escape.cpp 2009-08-01 04:13:41.013957701 +0100 +++ new/hotspot/src/share/vm/opto/escape.cpp 2009-08-01 04:13:40.923561368 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)escape.cpp 1.10 07/05/17 15:58:23 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,16 +28,6 @@ #include "incls/_precompiled.incl" #include "incls/_escape.cpp.incl" -uint PointsToNode::edge_target(uint e) const { - assert(_edges != NULL && e < (uint)_edges->length(), "valid edge index"); - return (_edges->at(e) >> EdgeShift); -} - -PointsToNode::EdgeType PointsToNode::edge_type(uint e) const { - assert(_edges != NULL && e < (uint)_edges->length(), "valid edge index"); - return (EdgeType) (_edges->at(e) & EdgeMask); -} - void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) { uint v = (targIdx << EdgeShift) + ((uint) et); if (_edges == NULL) { @@ -63,9 +53,9 @@ static const char *esc_names[] = { "UnknownEscape", - "NoEscape ", - "ArgEscape ", - "GlobalEscape " + "NoEscape", + "ArgEscape", + "GlobalEscape" }; static const char *edge_type_suffix[] = { @@ -75,10 +65,14 @@ "F" // FieldEdge }; -void PointsToNode::dump() const { +void PointsToNode::dump(bool print_state) const { NodeType nt = node_type(); - EscapeState es = escape_state(); - tty->print("%s %s [[", node_type_names[(int) nt], esc_names[(int) es]); + tty->print("%s ", node_type_names[(int) nt]); + if (print_state) { + EscapeState es = escape_state(); + tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR"); + } + tty->print("[["); for (uint i = 0; i < edge_count(); i++) { tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]); } @@ -90,15 +84,29 @@ } #endif -ConnectionGraph::ConnectionGraph(Compile * C) : _processed(C->comp_arena()), _node_map(C->comp_arena()) { - _collecting = true; - this->_compile = C; - const PointsToNode &dummy = PointsToNode(); - _nodes = new(C->comp_arena()) GrowableArray(C->comp_arena(), (int) INITIAL_NODE_COUNT, 0, dummy); - _phantom_object = C->top()->_idx; - PointsToNode *phn = ptnode_adr(_phantom_object); - phn->set_node_type(PointsToNode::JavaObject); - phn->set_escape_state(PointsToNode::GlobalEscape); +ConnectionGraph::ConnectionGraph(Compile * C) : + _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()), + _processed(C->comp_arena()), + _collecting(true), + _compile(C), + _node_map(C->comp_arena()) { + + _phantom_object = C->top()->_idx, + add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true); + + // Add ConP(#NULL) and ConN(#NULL) nodes. + PhaseGVN* igvn = C->initial_gvn(); + Node* oop_null = igvn->zerocon(T_OBJECT); + _oop_null = oop_null->_idx; + assert(_oop_null < C->unique(), "should be created already"); + add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); + + if (UseCompressedOops) { + Node* noop_null = igvn->zerocon(T_NARROWOOP); + _noop_null = noop_null->_idx; + assert(_noop_null < C->unique(), "should be created already"); + add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); + } } void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) { @@ -124,8 +132,20 @@ f->add_edge(to_i, PointsToNode::DeferredEdge); } -int ConnectionGraph::type_to_offset(const Type *t) { - const TypePtr *t_ptr = t->isa_ptr(); +int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { + const Type *adr_type = phase->type(adr); + if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && + adr->in(AddPNode::Address)->is_Proj() && + adr->in(AddPNode::Address)->in(0)->is_Allocate()) { + // We are computing a raw address for a store captured by an Initialize + // compute an appropriate address type. AddP cases #3 and #5 (see below). + int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); + assert(offs != Type::OffsetBot || + adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), + "offset must be a constant or it is initialization of array"); + return offs; + } + const TypePtr *t_ptr = adr_type->isa_ptr(); assert(t_ptr != NULL, "must be a pointer type"); return t_ptr->offset(); } @@ -150,37 +170,58 @@ npt->set_escape_state(es); } +void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt, + PointsToNode::EscapeState es, bool done) { + PointsToNode* ptadr = ptnode_adr(n->_idx); + ptadr->_node = n; + ptadr->set_node_type(nt); + + // inline set_escape_state(idx, es); + PointsToNode::EscapeState old_es = ptadr->escape_state(); + if (es > old_es) + ptadr->set_escape_state(es); + + if (done) + _processed.set(n->_idx); +} + PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) { uint idx = n->_idx; PointsToNode::EscapeState es; - // If we are still collecting we don't know the answer yet + // If we are still collecting or there were no non-escaping allocations + // we don't know the answer yet if (_collecting) return PointsToNode::UnknownEscape; // if the node was created after the escape computation, return // UnknownEscape - if (idx >= (uint)_nodes->length()) + if (idx >= nodes_size()) return PointsToNode::UnknownEscape; - es = _nodes->at_grow(idx).escape_state(); + es = ptnode_adr(idx)->escape_state(); // if we have already computed a value, return it - if (es != PointsToNode::UnknownEscape) + if (es != PointsToNode::UnknownEscape && + ptnode_adr(idx)->node_type() == PointsToNode::JavaObject) return es; + // PointsTo() calls n->uncast() which can return a new ideal node. + if (n->uncast()->_idx >= nodes_size()) + return PointsToNode::UnknownEscape; + // compute max escape state of anything this node could point to VectorSet ptset(Thread::current()->resource_area()); PointsTo(ptset, n, phase); - for( VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i ) { + for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) { uint pt = i.elem; - PointsToNode::EscapeState pes = _nodes->at(pt).escape_state(); + PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state(); if (pes > es) es = pes; } // cache the computed escape state assert(es != PointsToNode::UnknownEscape, "should have computed an escape state"); - _nodes->adr_at(idx)->set_escape_state(es); + ptnode_adr(idx)->set_escape_state(es); return es; } @@ -188,83 +229,103 @@ VectorSet visited(Thread::current()->resource_area()); GrowableArray worklist; - n = skip_casts(n); - PointsToNode npt = _nodes->at_grow(n->_idx); +#ifdef ASSERT + Node *orig_n = n; +#endif + + n = n->uncast(); + PointsToNode* npt = ptnode_adr(n->_idx); // If we have a JavaObject, return just that object - if (npt.node_type() == PointsToNode::JavaObject) { + if (npt->node_type() == PointsToNode::JavaObject) { ptset.set(n->_idx); return; } - // we may have a Phi which has not been processed - if (npt._node == NULL) { - assert(n->is_Phi(), "unprocessed node must be a Phi"); - record_for_escape_analysis(n); - npt = _nodes->at(n->_idx); +#ifdef ASSERT + if (npt->_node == NULL) { + if (orig_n != n) + orig_n->dump(); + n->dump(); + assert(npt->_node != NULL, "unregistered node"); } +#endif worklist.push(n->_idx); while(worklist.length() > 0) { int ni = worklist.pop(); - PointsToNode pn = _nodes->at_grow(ni); - if (!visited.test(ni)) { - visited.set(ni); - - // ensure that all inputs of a Phi have been processed - if (_collecting && pn._node->is_Phi()) { - PhiNode *phi = pn._node->as_Phi(); - process_phi_escape(phi, phase); - } + if (visited.test_set(ni)) + continue; - int edges_processed = 0; - for (uint e = 0; e < pn.edge_count(); e++) { - PointsToNode::EdgeType et = pn.edge_type(e); - if (et == PointsToNode::PointsToEdge) { - ptset.set(pn.edge_target(e)); - edges_processed++; - } else if (et == PointsToNode::DeferredEdge) { - worklist.push(pn.edge_target(e)); - edges_processed++; - } - } - if (edges_processed == 0) { - // no deferred or pointsto edges found. Assume the value was set outside - // this method. Add the phantom object to the pointsto set. - ptset.set(_phantom_object); + PointsToNode* pn = ptnode_adr(ni); + // ensure that all inputs of a Phi have been processed + assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),""); + + int edges_processed = 0; + uint e_cnt = pn->edge_count(); + for (uint e = 0; e < e_cnt; e++) { + uint etgt = pn->edge_target(e); + PointsToNode::EdgeType et = pn->edge_type(e); + if (et == PointsToNode::PointsToEdge) { + ptset.set(etgt); + edges_processed++; + } else if (et == PointsToNode::DeferredEdge) { + worklist.push(etgt); + edges_processed++; + } else { + assert(false,"neither PointsToEdge or DeferredEdge"); } } + if (edges_processed == 0) { + // no deferred or pointsto edges found. Assume the value was set + // outside this method. Add the phantom object to the pointsto set. + ptset.set(_phantom_object); + } } } -void ConnectionGraph::remove_deferred(uint ni) { - VectorSet visited(Thread::current()->resource_area()); +void ConnectionGraph::remove_deferred(uint ni, GrowableArray* deferred_edges, VectorSet* visited) { + // This method is most expensive during ConnectionGraph construction. + // Reuse vectorSet and an additional growable array for deferred edges. + deferred_edges->clear(); + visited->Clear(); - uint i = 0; + visited->set(ni); PointsToNode *ptn = ptnode_adr(ni); - while(i < ptn->edge_count()) { - if (ptn->edge_type(i) != PointsToNode::DeferredEdge) { - i++; - } else { - uint t = ptn->edge_target(i); - PointsToNode *ptt = ptnode_adr(t); + // Mark current edges as visited and move deferred edges to separate array. + for (uint i = 0; i < ptn->edge_count(); ) { + uint t = ptn->edge_target(i); +#ifdef ASSERT + assert(!visited->test_set(t), "expecting no duplications"); +#else + visited->set(t); +#endif + if (ptn->edge_type(i) == PointsToNode::DeferredEdge) { ptn->remove_edge(t, PointsToNode::DeferredEdge); - if(!visited.test(t)) { - visited.set(t); - for (uint j = 0; j < ptt->edge_count(); j++) { - uint n1 = ptt->edge_target(j); - PointsToNode *pt1 = ptnode_adr(n1); - switch(ptt->edge_type(j)) { - case PointsToNode::PointsToEdge: - add_pointsto_edge(ni, n1); - break; - case PointsToNode::DeferredEdge: - add_deferred_edge(ni, n1); - break; - case PointsToNode::FieldEdge: - assert(false, "invalid connection graph"); - break; - } + deferred_edges->append(t); + } else { + i++; + } + } + for (int next = 0; next < deferred_edges->length(); ++next) { + uint t = deferred_edges->at(next); + PointsToNode *ptt = ptnode_adr(t); + uint e_cnt = ptt->edge_count(); + for (uint e = 0; e < e_cnt; e++) { + uint etgt = ptt->edge_target(e); + if (visited->test_set(etgt)) + continue; + + PointsToNode::EdgeType et = ptt->edge_type(e); + if (et == PointsToNode::PointsToEdge) { + add_pointsto_edge(ni, etgt); + if(etgt == _phantom_object) { + // Special case - field set outside (globally escaping). + ptn->set_escape_state(PointsToNode::GlobalEscape); } + } else if (et == PointsToNode::DeferredEdge) { + deferred_edges->append(etgt); + } else { + assert(false,"invalid connection graph"); } } } @@ -276,15 +337,15 @@ // a pointsto edge is added if it is a JavaObject void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) { - PointsToNode an = _nodes->at_grow(adr_i); - PointsToNode to = _nodes->at_grow(to_i); - bool deferred = (to.node_type() == PointsToNode::LocalVar); - - for (uint fe = 0; fe < an.edge_count(); fe++) { - assert(an.edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); - int fi = an.edge_target(fe); - PointsToNode pf = _nodes->at_grow(fi); - int po = pf.offset(); + PointsToNode* an = ptnode_adr(adr_i); + PointsToNode* to = ptnode_adr(to_i); + bool deferred = (to->node_type() == PointsToNode::LocalVar); + + for (uint fe = 0; fe < an->edge_count(); fe++) { + assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); + int fi = an->edge_target(fe); + PointsToNode* pf = ptnode_adr(fi); + int po = pf->offset(); if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) { if (deferred) add_deferred_edge(fi, to_i); @@ -294,16 +355,16 @@ } } -// Add a deferred edge from node given by "from_i" to any field of adr_i whose offset -// matches "offset" +// Add a deferred edge from node given by "from_i" to any field of adr_i +// whose offset matches "offset". void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) { - PointsToNode an = _nodes->at_grow(adr_i); - for (uint fe = 0; fe < an.edge_count(); fe++) { - assert(an.edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); - int fi = an.edge_target(fe); - PointsToNode pf = _nodes->at_grow(fi); - int po = pf.offset(); - if (pf.edge_count() == 0) { + PointsToNode* an = ptnode_adr(adr_i); + for (uint fe = 0; fe < an->edge_count(); fe++) { + assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); + int fi = an->edge_target(fe); + PointsToNode* pf = ptnode_adr(fi); + int po = pf->offset(); + if (pf->edge_count() == 0) { // we have not seen any stores to this field, assume it was set outside this method add_pointsto_edge(fi, _phantom_object); } @@ -313,70 +374,198 @@ } } -// -// Search memory chain of "mem" to find a MemNode whose address -// is the specified alias index. Returns the MemNode found or the -// first non-MemNode encountered. -// -Node *ConnectionGraph::find_mem(Node *mem, int alias_idx, PhaseGVN *igvn) { - if (mem == NULL) - return mem; - while (mem->is_Mem()) { - const Type *at = igvn->type(mem->in(MemNode::Address)); - if (at != Type::TOP) { - assert (at->isa_ptr() != NULL, "pointer type required."); - int idx = _compile->get_alias_index(at->is_ptr()); - if (idx == alias_idx) - break; - } - mem = mem->in(MemNode::Memory); +// Helper functions + +static Node* get_addp_base(Node *addp) { + assert(addp->is_AddP(), "must be AddP"); + // + // AddP cases for Base and Address inputs: + // case #1. Direct object's field reference: + // Allocate + // | + // Proj #5 ( oop result ) + // | + // CheckCastPP (cast to instance type) + // | | + // AddP ( base == address ) + // + // case #2. Indirect object's field reference: + // Phi + // | + // CastPP (cast to instance type) + // | | + // AddP ( base == address ) + // + // case #3. Raw object's field reference for Initialize node: + // Allocate + // | + // Proj #5 ( oop result ) + // top | + // \ | + // AddP ( base == top ) + // + // case #4. Array's element reference: + // {CheckCastPP | CastPP} + // | | | + // | AddP ( array's element offset ) + // | | + // AddP ( array's offset ) + // + // case #5. Raw object's field reference for arraycopy stub call: + // The inline_native_clone() case when the arraycopy stub is called + // after the allocation before Initialize and CheckCastPP nodes. + // Allocate + // | + // Proj #5 ( oop result ) + // | | + // AddP ( base == address ) + // + // case #6. Constant Pool, ThreadLocal, CastX2P or + // Raw object's field reference: + // {ConP, ThreadLocal, CastX2P, raw Load} + // top | + // \ | + // AddP ( base == top ) + // + // case #7. Klass's field reference. + // LoadKlass + // | | + // AddP ( base == address ) + // + // case #8. narrow Klass's field reference. + // LoadNKlass + // | + // DecodeN + // | | + // AddP ( base == address ) + // + Node *base = addp->in(AddPNode::Base)->uncast(); + if (base->is_top()) { // The AddP case #3 and #6. + base = addp->in(AddPNode::Address)->uncast(); + assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal || + base->Opcode() == Op_CastX2P || base->is_DecodeN() || + (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) || + (base->is_Proj() && base->in(0)->is_Allocate()), "sanity"); + } + return base; +} + +static Node* find_second_addp(Node* addp, Node* n) { + assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); + + Node* addp2 = addp->raw_out(0); + if (addp->outcnt() == 1 && addp2->is_AddP() && + addp2->in(AddPNode::Base) == n && + addp2->in(AddPNode::Address) == addp) { + + assert(addp->in(AddPNode::Base) == n, "expecting the same base"); + // + // Find array's offset to push it on worklist first and + // as result process an array's element offset first (pushed second) + // to avoid CastPP for the array's offset. + // Otherwise the inserted CastPP (LocalVar) will point to what + // the AddP (Field) points to. Which would be wrong since + // the algorithm expects the CastPP has the same point as + // as AddP's base CheckCastPP (LocalVar). + // + // ArrayAllocation + // | + // CheckCastPP + // | + // memProj (from ArrayAllocation CheckCastPP) + // | || + // | || Int (element index) + // | || | ConI (log(element size)) + // | || | / + // | || LShift + // | || / + // | AddP (array's element offset) + // | | + // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) + // | / / + // AddP (array's offset) + // | + // Load/Store (memory operation on array's element) + // + return addp2; } - return mem; + return NULL; } // // Adjust the type and inputs of an AddP which computes the // address of a field of an instance // -void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { - const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); +bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); - assert(t != NULL, "expecting oopptr"); - assert(base_t != NULL && base_t->is_instance(), "expecting instance oopptr"); - uint inst_id = base_t->instance_id(); - assert(!t->is_instance() || t->instance_id() == inst_id, + assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); + const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); + if (t == NULL) { + // We are computing a raw address for a store captured by an Initialize + // compute an appropriate address type (cases #3 and #5). + assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); + assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); + intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); + assert(offs != Type::OffsetBot, "offset must be a constant"); + t = base_t->add_offset(offs)->is_oopptr(); + } + int inst_id = base_t->instance_id(); + assert(!t->is_known_instance() || t->instance_id() == inst_id, "old type must be non-instance or match new type"); + + // The type 't' could be subclass of 'base_t'. + // As result t->offset() could be large then base_t's size and it will + // cause the failure in add_offset() with narrow oops since TypeOopPtr() + // constructor verifies correctness of the offset. + // + // It could happend on subclass's branch (from the type profiling + // inlining) which was not eliminated during parsing since the exactness + // of the allocation type was not propagated to the subclass type check. + // + // Do nothing for such AddP node and don't process its users since + // this code branch will go away. + // + if (!t->is_known_instance() && + !t->klass()->equals(base_t->klass()) && + t->klass()->is_subtype_of(base_t->klass())) { + return false; // bail out + } + const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); - // ensure an alias index is allocated for the instance type + // Do NOT remove the next call: ensure an new alias index is allocated + // for the instance type int alias_idx = _compile->get_alias_index(tinst); igvn->set_type(addp, tinst); // record the allocation in the node map set_map(addp->_idx, get_map(base->_idx)); - // if the Address input is not the appropriate instance type (due to intervening - // casts,) insert a cast - Node *adr = addp->in(AddPNode::Address); - const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); - if (atype->instance_id() != inst_id) { - assert(!atype->is_instance(), "no conflicting instances"); - const TypeOopPtr *new_atype = base_t->add_offset(atype->offset())->isa_oopptr(); - Node *acast = new (_compile, 2) CastPPNode(adr, new_atype); - acast->set_req(0, adr->in(0)); - igvn->set_type(acast, new_atype); - record_for_optimizer(acast); - Node *bcast = acast; - Node *abase = addp->in(AddPNode::Base); - if (abase != adr) { - bcast = new (_compile, 2) CastPPNode(abase, base_t); - bcast->set_req(0, abase->in(0)); - igvn->set_type(bcast, base_t); - record_for_optimizer(bcast); - } - igvn->hash_delete(addp); - addp->set_req(AddPNode::Base, bcast); - addp->set_req(AddPNode::Address, acast); - igvn->hash_insert(addp); - record_for_optimizer(addp); + + // Set addp's Base and Address to 'base'. + Node *abase = addp->in(AddPNode::Base); + Node *adr = addp->in(AddPNode::Address); + if (adr->is_Proj() && adr->in(0)->is_Allocate() && + adr->in(0)->_idx == (uint)inst_id) { + // Skip AddP cases #3 and #5. + } else { + assert(!abase->is_top(), "sanity"); // AddP case #3 + if (abase != base) { + igvn->hash_delete(addp); + addp->set_req(AddPNode::Base, base); + if (abase == adr) { + addp->set_req(AddPNode::Address, base); + } else { + // AddP case #4 (adr is array's element offset AddP node) +#ifdef ASSERT + const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); + assert(adr->is_AddP() && atype != NULL && + atype->instance_id() == inst_id, "array's element offset should be processed first"); +#endif + } + igvn->hash_insert(addp); + } } + // Put on IGVN worklist since at least addp's type was changed above. + record_for_optimizer(addp); + return true; } // @@ -389,17 +578,25 @@ new_created = false; int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); // nothing to do if orig_phi is bottom memory or matches alias_idx - if (phi_alias_idx == Compile::AliasIdxBot || phi_alias_idx == alias_idx) { + if (phi_alias_idx == alias_idx) { return orig_phi; } // have we already created a Phi for this alias index? PhiNode *result = get_map_phi(orig_phi->_idx); - const TypePtr *atype = C->get_adr_type(alias_idx); if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { return result; } - + if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) { + if (C->do_escape_analysis() == true && !C->failing()) { + // Retry compilation without escape analysis. + // If this is the first failure, the sentinel string will "stick" + // to the Compile object, and the C2Compiler will see it and retry. + C->record_failure(C2Compiler::retry_no_escape_analysis()); + } + return NULL; + } orig_phi_worklist.append_if_missing(orig_phi); + const TypePtr *atype = C->get_adr_type(alias_idx); result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); set_map_phi(orig_phi->_idx, result); igvn->set_type(result, result->bottom_type()); @@ -417,7 +614,7 @@ assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); Compile *C = _compile; bool new_phi_created; - PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created); + PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created); if (!new_phi_created) { return result; } @@ -430,45 +627,158 @@ bool finished = false; while(!finished) { while (idx < phi->req()) { - Node *mem = find_mem(phi->in(idx), alias_idx, igvn); + Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn); if (mem != NULL && mem->is_Phi()) { - PhiNode *nphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created); + PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created); if (new_phi_created) { // found an phi for which we created a new split, push current one on worklist and begin // processing new one phi_list.push(phi); cur_input.push(idx); phi = mem->as_Phi(); - result = nphi; + result = newphi; idx = 1; continue; } else { - mem = nphi; + mem = newphi; } } + if (C->failing()) { + return NULL; + } result->set_req(idx++, mem); } #ifdef ASSERT // verify that the new Phi has an input for each input of the original assert( phi->req() == result->req(), "must have same number of inputs."); assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); +#endif + // Check if all new phi's inputs have specified alias index. + // Otherwise use old phi. for (uint i = 1; i < phi->req(); i++) { - assert((phi->in(i) == NULL) == (result->in(i) == NULL), "inputs must correspond."); + Node* in = result->in(i); + assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); } -#endif // we have finished processing a Phi, see if there are any more to do finished = (phi_list.length() == 0 ); if (!finished) { phi = phi_list.pop(); idx = cur_input.pop(); - PhiNode *prev_phi = get_map_phi(phi->_idx); - prev_phi->set_req(idx++, result); - result = prev_phi; + PhiNode *prev_result = get_map_phi(phi->_idx); + prev_result->set_req(idx++, result); + result = prev_result; + } + } + return result; +} + + +// +// The next methods are derived from methods in MemNode. +// +static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *tinst) { + Node *mem = mmem; + // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally + // means an array I have not precisely typed yet. Do not do any + // alias stuff with it any time soon. + if( tinst->base() != Type::AnyPtr && + !(tinst->klass()->is_java_lang_Object() && + tinst->offset() == Type::OffsetBot) ) { + mem = mmem->memory_at(alias_idx); + // Update input if it is progress over what we have now + } + return mem; +} + +// +// Search memory chain of "mem" to find a MemNode whose address +// is the specified alias index. +// +Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray &orig_phis, PhaseGVN *phase) { + if (orig_mem == NULL) + return orig_mem; + Compile* C = phase->C; + const TypeOopPtr *tinst = C->get_adr_type(alias_idx)->isa_oopptr(); + bool is_instance = (tinst != NULL) && tinst->is_known_instance(); + Node *start_mem = C->start()->proj_out(TypeFunc::Memory); + Node *prev = NULL; + Node *result = orig_mem; + while (prev != result) { + prev = result; + if (result == start_mem) + break; // hit one of our sentinals + if (result->is_Mem()) { + const Type *at = phase->type(result->in(MemNode::Address)); + if (at != Type::TOP) { + assert (at->isa_ptr() != NULL, "pointer type required."); + int idx = C->get_alias_index(at->is_ptr()); + if (idx == alias_idx) + break; + } + result = result->in(MemNode::Memory); + } + if (!is_instance) + continue; // don't search further for non-instance types + // skip over a call which does not affect this memory slice + if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { + Node *proj_in = result->in(0); + if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) { + break; // hit one of our sentinals + } else if (proj_in->is_Call()) { + CallNode *call = proj_in->as_Call(); + if (!call->may_modify(tinst, phase)) { + result = call->in(TypeFunc::Memory); + } + } else if (proj_in->is_Initialize()) { + AllocateNode* alloc = proj_in->as_Initialize()->allocation(); + // Stop if this is the initialization for the object instance which + // which contains this memory slice, otherwise skip over it. + if (alloc == NULL || alloc->_idx != (uint)tinst->instance_id()) { + result = proj_in->in(TypeFunc::Memory); + } + } else if (proj_in->is_MemBar()) { + result = proj_in->in(TypeFunc::Memory); + } + } else if (result->is_MergeMem()) { + MergeMemNode *mmem = result->as_MergeMem(); + result = step_through_mergemem(mmem, alias_idx, tinst); + if (result == mmem->base_memory()) { + // Didn't find instance memory, search through general slice recursively. + result = mmem->memory_at(C->get_general_index(alias_idx)); + result = find_inst_mem(result, alias_idx, orig_phis, phase); + if (C->failing()) { + return NULL; + } + mmem->set_memory_at(alias_idx, result); + } + } else if (result->is_Phi() && + C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { + Node *un = result->as_Phi()->unique_input(phase); + if (un != NULL) { + result = un; + } else { + break; + } } } + if (result->is_Phi()) { + PhiNode *mphi = result->as_Phi(); + assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); + const TypePtr *t = mphi->adr_type(); + if (C->get_alias_index(t) != alias_idx) { + // Create a new Phi with the specified alias index type. + result = split_memory_phi(mphi, alias_idx, orig_phis, phase); + } else if (!is_instance) { + // Push all non-instance Phis on the orig_phis worklist to update inputs + // during Phase 4 if needed. + orig_phis.append_if_missing(mphi); + } + } + // the result is either MemNode, PhiNode, InitializeNode. return result; } + // // Convert the types of unescaped object to instance types where possible, // propagate the new type information through the graph, and update memory @@ -567,51 +877,129 @@ VectorSet visited(Thread::current()->resource_area()); VectorSet ptset(Thread::current()->resource_area()); - // Phase 1: Process possible allocations from alloc_worklist. Create instance - // types for the CheckCastPP for allocations where possible. + + // Phase 1: Process possible allocations from alloc_worklist. + // Create instance types for the CheckCastPP for allocations where possible. + // + // (Note: don't forget to change the order of the second AddP node on + // the alloc_worklist if the order of the worklist processing is changed, + // see the comment in find_second_addp().) + // while (alloc_worklist.length() != 0) { Node *n = alloc_worklist.pop(); uint ni = n->_idx; + const TypeOopPtr* tinst = NULL; if (n->is_Call()) { CallNode *alloc = n->as_Call(); // copy escape information to call node - PointsToNode ptn = _nodes->at(alloc->_idx); + PointsToNode* ptn = ptnode_adr(alloc->_idx); PointsToNode::EscapeState es = escape_state(alloc, igvn); - alloc->_escape_state = es; + // We have an allocation or call which returns a Java object, + // see if it is unescaped. + if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable) + continue; + if (alloc->is_Allocate()) { + // Set the scalar_replaceable flag before the next check. + alloc->as_Allocate()->_is_scalar_replaceable = true; + } // find CheckCastPP of call return value - n = alloc->proj_out(TypeFunc::Parms); - if (n != NULL && n->outcnt() == 1) { - n = n->unique_out(); - if (n->Opcode() != Op_CheckCastPP) { + n = alloc->result_cast(); + if (n == NULL || // No uses accept Initialize or + !n->is_CheckCastPP()) // not unique CheckCastPP. + continue; + // The inline code for Object.clone() casts the allocation result to + // java.lang.Object and then to the actual type of the allocated + // object. Detect this case and use the second cast. + // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when + // the allocation result is cast to java.lang.Object and then + // to the actual Array type. + if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL + && (alloc->is_AllocateArray() || + igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { + Node *cast2 = NULL; + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node *use = n->fast_out(i); + if (use->is_CheckCastPP()) { + cast2 = use; + break; + } + } + if (cast2 != NULL) { + n = cast2; + } else { continue; } - } else { - continue; - } - // we have an allocation or call which returns a Java object, see if it is unescaped - if (es != PointsToNode::NoEscape || !ptn._unique_type) { - continue; // can't make a unique type } + set_escape_state(n->_idx, es); + // in order for an object to be scalar-replaceable, it must be: + // - a direct allocation (not a call returning an object) + // - non-escaping + // - eligible to be a unique type + // - not determined to be ineligible by escape analysis set_map(alloc->_idx, n); set_map(n->_idx, alloc); - const TypeInstPtr *t = igvn->type(n)->isa_instptr(); - // Unique types which are arrays are not currently supported. - // The check for AllocateArray is needed in case an array - // allocation is immediately cast to Object - if (t == NULL || alloc->is_AllocateArray()) + const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); + if (t == NULL) continue; // not a TypeInstPtr - const TypeOopPtr *tinst = t->cast_to_instance(ni); + tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); igvn->hash_delete(n); igvn->set_type(n, tinst); n->raise_bottom_type(tinst); igvn->hash_insert(n); + record_for_optimizer(n); + if (alloc->is_Allocate() && ptn->_scalar_replaceable && + (t->isa_instptr() || t->isa_aryptr())) { + + // First, put on the worklist all Field edges from Connection Graph + // which is more accurate then putting immediate users from Ideal Graph. + for (uint e = 0; e < ptn->edge_count(); e++) { + Node *use = ptnode_adr(ptn->edge_target(e))->_node; + assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(), + "only AddP nodes are Field edges in CG"); + if (use->outcnt() > 0) { // Don't process dead nodes + Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); + if (addp2 != NULL) { + assert(alloc->is_AllocateArray(),"array allocation was expected"); + alloc_worklist.append_if_missing(addp2); + } + alloc_worklist.append_if_missing(use); + } + } + + // An allocation may have an Initialize which has raw stores. Scan + // the users of the raw allocation result and push AddP users + // on alloc_worklist. + Node *raw_result = alloc->proj_out(TypeFunc::Parms); + assert (raw_result != NULL, "must have an allocation result"); + for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { + Node *use = raw_result->fast_out(i); + if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes + Node* addp2 = find_second_addp(use, raw_result); + if (addp2 != NULL) { + assert(alloc->is_AllocateArray(),"array allocation was expected"); + alloc_worklist.append_if_missing(addp2); + } + alloc_worklist.append_if_missing(use); + } else if (use->is_Initialize()) { + memnode_worklist.append_if_missing(use); + } + } + } } else if (n->is_AddP()) { ptset.Clear(); - PointsTo(ptset, n->in(AddPNode::Address), igvn); + PointsTo(ptset, get_addp_base(n), igvn); assert(ptset.Size() == 1, "AddP address is unique"); - Node *base = get_map(ptset.getelem()); - split_AddP(n, base, igvn); - } else if (n->is_Phi() || n->Opcode() == Op_CastPP || n->Opcode() == Op_CheckCastPP) { + uint elem = ptset.getelem(); // Allocation node's index + if (elem == _phantom_object) + continue; // Assume the value was set outside this method. + Node *base = get_map(elem); // CheckCastPP node + if (!split_AddP(n, base, igvn)) continue; // wrong type + tinst = igvn->type(base)->isa_oopptr(); + } else if (n->is_Phi() || + n->is_CheckCastPP() || + n->is_EncodeP() || + n->is_DecodeN() || + (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { if (visited.test_set(n->_idx)) { assert(n->is_Phi(), "loops only through Phi's"); continue; // already processed @@ -619,17 +1007,37 @@ ptset.Clear(); PointsTo(ptset, n, igvn); if (ptset.Size() == 1) { + uint elem = ptset.getelem(); // Allocation node's index + if (elem == _phantom_object) + continue; // Assume the value was set outside this method. + Node *val = get_map(elem); // CheckCastPP node TypeNode *tn = n->as_Type(); - Node *val = get_map(ptset.getelem()); - const TypeInstPtr *val_t = igvn->type(val)->isa_instptr();; - assert(val_t != NULL && val_t->is_instance(), "instance type expected."); - const TypeInstPtr *tn_t = igvn->type(tn)->isa_instptr();; + tinst = igvn->type(val)->isa_oopptr(); + assert(tinst != NULL && tinst->is_known_instance() && + (uint)tinst->instance_id() == elem , "instance type expected."); + + const Type *tn_type = igvn->type(tn); + const TypeOopPtr *tn_t; + if (tn_type->isa_narrowoop()) { + tn_t = tn_type->make_ptr()->isa_oopptr(); + } else { + tn_t = tn_type->isa_oopptr(); + } - if (tn_t != NULL && val_t->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE)->higher_equal(tn_t)) { + if (tn_t != NULL && + tinst->cast_to_instance_id(TypeOopPtr::InstanceBot)->higher_equal(tn_t)) { + if (tn_type->isa_narrowoop()) { + tn_type = tinst->make_narrowoop(); + } else { + tn_type = tinst; + } igvn->hash_delete(tn); - igvn->set_type(tn, val_t); - tn->set_type(val_t); + igvn->set_type(tn, tn_type); + tn->set_type(tn_type); igvn->hash_insert(tn); + record_for_optimizer(n); + } else { + continue; // wrong type } } } else { @@ -639,13 +1047,40 @@ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); if(use->is_Mem() && use->in(MemNode::Address) == n) { - memnode_worklist.push(use); - } else if (use->is_AddP() || use->is_Phi() || use->Opcode() == Op_CastPP || use->Opcode() == Op_CheckCastPP) { - alloc_worklist.push(use); + memnode_worklist.append_if_missing(use); + } else if (use->is_Initialize()) { + memnode_worklist.append_if_missing(use); + } else if (use->is_MergeMem()) { + mergemem_worklist.append_if_missing(use); + } else if (use->is_SafePoint() && tinst != NULL) { + // Look for MergeMem nodes for calls which reference unique allocation + // (through CheckCastPP nodes) even for debug info. + Node* m = use->in(TypeFunc::Memory); + uint iid = tinst->instance_id(); + while (m->is_Proj() && m->in(0)->is_SafePoint() && + m->in(0) != use && !m->in(0)->_idx != iid) { + m = m->in(0)->in(TypeFunc::Memory); + } + if (m->is_MergeMem()) { + mergemem_worklist.append_if_missing(m); + } + } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes + Node* addp2 = find_second_addp(use, n); + if (addp2 != NULL) { + alloc_worklist.append_if_missing(addp2); + } + alloc_worklist.append_if_missing(use); + } else if (use->is_Phi() || + use->is_CheckCastPP() || + use->is_EncodeP() || + use->is_DecodeN() || + (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { + alloc_worklist.append_if_missing(use); } } } + // New alias types were created in split_AddP(). uint new_index_end = (uint) _compile->num_alias_types(); // Phase 2: Process MemNode's from memnode_worklist. compute new address type and @@ -654,29 +1089,37 @@ if (memnode_worklist.length() == 0) return; // nothing to do - while (memnode_worklist.length() != 0) { Node *n = memnode_worklist.pop(); + if (visited.test_set(n->_idx)) + continue; if (n->is_Phi()) { assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required"); // we don't need to do anything, but the users must be pushed if we haven't processed // this Phi before - if (visited.test_set(n->_idx)) + } else if (n->is_Initialize()) { + // we don't need to do anything, but the users of the memory projection must be pushed + n = n->as_Initialize()->proj_out(TypeFunc::Memory); + if (n == NULL) continue; } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); + assert(addr->is_AddP(), "AddP required"); const Type *addr_t = igvn->type(addr); if (addr_t == Type::TOP) continue; assert (addr_t->isa_ptr() != NULL, "pointer type required."); int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); - Node *mem = find_mem(n->in(MemNode::Memory), alias_idx, igvn); - if (mem->is_Phi()) { - mem = split_memory_phi(mem->as_Phi(), alias_idx, orig_phis, igvn); + assert ((uint)alias_idx < new_index_end, "wrong alias index"); + Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn); + if (_compile->failing()) { + return; } - if (mem != n->in(MemNode::Memory)) + if (mem != n->in(MemNode::Memory)) { set_map(n->_idx, mem); + ptnode_adr(n->_idx)->_node = n; + } if (n->is_Load()) { continue; // don't push users } else if (n->is_LoadStore()) { @@ -695,29 +1138,33 @@ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); if (use->is_Phi()) { - memnode_worklist.push(use); + memnode_worklist.append_if_missing(use); } else if(use->is_Mem() && use->in(MemNode::Memory) == n) { - memnode_worklist.push(use); + memnode_worklist.append_if_missing(use); + } else if (use->is_Initialize()) { + memnode_worklist.append_if_missing(use); } else if (use->is_MergeMem()) { - mergemem_worklist.push(use); + mergemem_worklist.append_if_missing(use); } } } - // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice - // moving the first node encountered of each instance type to the - // the input corresponding to its alias index. + // Phase 3: Process MergeMem nodes from mergemem_worklist. + // Walk each memory moving the first node encountered of each + // instance type to the the input corresponding to its alias index. while (mergemem_worklist.length() != 0) { Node *n = mergemem_worklist.pop(); assert(n->is_MergeMem(), "MergeMem node required."); + if (visited.test_set(n->_idx)) + continue; MergeMemNode *nmm = n->as_MergeMem(); // Note: we don't want to use MergeMemStream here because we only want to - // scan inputs which exist at the start, not ones we add during processing + // scan inputs which exist at the start, not ones we add during processing. uint nslices = nmm->req(); igvn->hash_delete(nmm); for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { - Node * mem = nmm->in(i); - Node * cur = NULL; + Node* mem = nmm->in(i); + Node* cur = NULL; if (mem == NULL || mem->is_top()) continue; while (mem->is_Mem()) { @@ -737,38 +1184,91 @@ mem = mem->in(MemNode::Memory); } nmm->set_memory_at(i, (cur != NULL) ? cur : mem); - if (mem->is_Phi()) { - // We have encountered a Phi, we need to split the Phi for - // any instance of the current type if we haven't encountered - // a value of the instance along the chain. - for (uint ni = new_index_start; ni < new_index_end; ni++) { - if((uint)_compile->get_general_index(ni) == i) { - Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); - if (nmm->is_empty_memory(m)) { - nmm->set_memory_at(ni, split_memory_phi(mem->as_Phi(), ni, orig_phis, igvn)); + // Find any instance of the current type if we haven't encountered + // a value of the instance along the chain. + for (uint ni = new_index_start; ni < new_index_end; ni++) { + if((uint)_compile->get_general_index(ni) == i) { + Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); + if (nmm->is_empty_memory(m)) { + Node* result = find_inst_mem(mem, ni, orig_phis, igvn); + if (_compile->failing()) { + return; } + nmm->set_memory_at(ni, result); } } } } + // Find the rest of instances values + for (uint ni = new_index_start; ni < new_index_end; ni++) { + const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr(); + Node* result = step_through_mergemem(nmm, ni, tinst); + if (result == nmm->base_memory()) { + // Didn't find instance memory, search through general slice recursively. + result = nmm->memory_at(igvn->C->get_general_index(ni)); + result = find_inst_mem(result, ni, orig_phis, igvn); + if (_compile->failing()) { + return; + } + nmm->set_memory_at(ni, result); + } + } igvn->hash_insert(nmm); record_for_optimizer(nmm); + + // Propagate new memory slices to following MergeMem nodes. + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node *use = n->fast_out(i); + if (use->is_Call()) { + CallNode* in = use->as_Call(); + if (in->proj_out(TypeFunc::Memory) != NULL) { + Node* m = in->proj_out(TypeFunc::Memory); + for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { + Node* mm = m->fast_out(j); + if (mm->is_MergeMem()) { + mergemem_worklist.append_if_missing(mm); + } + } + } + if (use->is_Allocate()) { + use = use->as_Allocate()->initialization(); + if (use == NULL) { + continue; + } + } + } + if (use->is_Initialize()) { + InitializeNode* in = use->as_Initialize(); + if (in->proj_out(TypeFunc::Memory) != NULL) { + Node* m = in->proj_out(TypeFunc::Memory); + for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { + Node* mm = m->fast_out(j); + if (mm->is_MergeMem()) { + mergemem_worklist.append_if_missing(mm); + } + } + } + } + } } - // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes - // + // Phase 4: Update the inputs of non-instance memory Phis and + // the Memory input of memnodes // First update the inputs of any non-instance Phi's from // which we split out an instance Phi. Note we don't have // to recursively process Phi's encounted on the input memory // chains as is done in split_memory_phi() since they will // also be processed here. - while (orig_phis.length() != 0) { - PhiNode *phi = orig_phis.pop(); + for (int j = 0; j < orig_phis.length(); j++) { + PhiNode *phi = orig_phis.at(j); int alias_idx = _compile->get_alias_index(phi->adr_type()); igvn->hash_delete(phi); for (uint i = 1; i < phi->req(); i++) { Node *mem = phi->in(i); - Node *new_mem = find_mem(mem, alias_idx, igvn); + Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn); + if (_compile->failing()) { + return; + } if (mem != new_mem) { phi->set_req(i, new_mem); } @@ -779,10 +1279,10 @@ // Update the memory inputs of MemNodes with the value we computed // in Phase 2. - for (int i = 0; i < _nodes->length(); i++) { + for (uint i = 0; i < nodes_size(); i++) { Node *nmem = get_map(i); if (nmem != NULL) { - Node *n = _nodes->at(i)._node; + Node *n = ptnode_adr(i)->_node; if (n != NULL && n->is_Mem()) { igvn->hash_delete(n); n->set_req(MemNode::Memory, nmem); @@ -793,197 +1293,420 @@ } } -void ConnectionGraph::compute_escape() { - GrowableArray worklist; - GrowableArray alloc_worklist; - VectorSet visited(Thread::current()->resource_area()); - PhaseGVN *igvn = _compile->initial_gvn(); +bool ConnectionGraph::has_candidates(Compile *C) { + // EA brings benefits only when the code has allocations and/or locks which + // are represented by ideal Macro nodes. + int cnt = C->macro_count(); + for( int i=0; i < cnt; i++ ) { + Node *n = C->macro_node(i); + if ( n->is_Allocate() ) + return true; + if( n->is_Lock() ) { + Node* obj = n->as_Lock()->obj_node()->uncast(); + if( !(obj->is_Parm() || obj->is_Con()) ) + return true; + } + } + return false; +} + +bool ConnectionGraph::compute_escape() { + Compile* C = _compile; + + // 1. Populate Connection Graph (CG) with Ideal nodes. + + Unique_Node_List worklist_init; + worklist_init.map(C->unique(), NULL); // preallocate space + + // Initialize worklist + if (C->root() != NULL) { + worklist_init.push(C->root()); + } + + GrowableArray cg_worklist; + PhaseGVN* igvn = C->initial_gvn(); + bool has_allocations = false; + + // Push all useful nodes onto CG list and set their type. + for( uint next = 0; next < worklist_init.size(); ++next ) { + Node* n = worklist_init.at(next); + record_for_escape_analysis(n, igvn); + // Only allocations and java static calls results are checked + // for an escape status. See process_call_result() below. + if (n->is_Allocate() || n->is_CallStaticJava() && + ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) { + has_allocations = true; + } + if(n->is_AddP()) + cg_worklist.append(n->_idx); + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node* m = n->fast_out(i); // Get user + worklist_init.push(m); + } + } - // process Phi nodes from the deferred list, they may not have - while(_deferred.size() > 0) { - Node * n = _deferred.pop(); - PhiNode * phi = n->as_Phi(); + if (!has_allocations) { + _collecting = false; + return false; // Nothing to do. + } - process_phi_escape(phi, igvn); + // 2. First pass to create simple CG edges (doesn't require to walk CG). + uint delayed_size = _delayed_worklist.size(); + for( uint next = 0; next < delayed_size; ++next ) { + Node* n = _delayed_worklist.at(next); + build_connection_graph(n, igvn); } - VectorSet ptset(Thread::current()->resource_area()); + // 3. Pass to create fields edges (Allocate -F-> AddP). + uint cg_length = cg_worklist.length(); + for( uint next = 0; next < cg_length; ++next ) { + int ni = cg_worklist.at(next); + build_connection_graph(ptnode_adr(ni)->_node, igvn); + } - // remove deferred edges from the graph and collect - // information we will need for type splitting - for (uint ni = 0; ni < (uint)_nodes->length(); ni++) { - PointsToNode * ptn = _nodes->adr_at(ni); - PointsToNode::NodeType nt = ptn->node_type(); + cg_worklist.clear(); + cg_worklist.append(_phantom_object); - if (nt == PointsToNode::UnknownType) { - continue; // not a node we are interested in - } + // 4. Build Connection Graph which need + // to walk the connection graph. + for (uint ni = 0; ni < nodes_size(); ni++) { + PointsToNode* ptn = ptnode_adr(ni); Node *n = ptn->_node; + if (n != NULL) { // Call, AddP, LoadP, StoreP + build_connection_graph(n, igvn); + if (ptn->node_type() != PointsToNode::UnknownType) + cg_worklist.append(n->_idx); // Collect CG nodes + } + } + + VectorSet ptset(Thread::current()->resource_area()); + GrowableArray deferred_edges; + VectorSet visited(Thread::current()->resource_area()); + + // 5. Remove deferred edges from the graph and collect + // information needed for type splitting. + cg_length = cg_worklist.length(); + for( uint next = 0; next < cg_length; ++next ) { + int ni = cg_worklist.at(next); + PointsToNode* ptn = ptnode_adr(ni); + PointsToNode::NodeType nt = ptn->node_type(); if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) { - remove_deferred(ni); + remove_deferred(ni, &deferred_edges, &visited); + Node *n = ptn->_node; if (n->is_AddP()) { - // if this AddP computes an address which may point to more that one - // object, nothing the address points to can be a unique type. - Node *base = n->in(AddPNode::Base); + // Search for objects which are not scalar replaceable. + // Mark their escape state as ArgEscape to propagate the state + // to referenced objects. + // Note: currently there are no difference in compiler optimizations + // for ArgEscape objects and NoEscape objects which are not + // scalar replaceable. + + int offset = ptn->offset(); + Node *base = get_addp_base(n); ptset.Clear(); PointsTo(ptset, base, igvn); - if (ptset.Size() > 1) { + int ptset_size = ptset.Size(); + + // Check if a field's initializing value is recorded and add + // a corresponding NULL field's value if it is not recorded. + // Connection Graph does not record a default initialization by NULL + // captured by Initialize node. + // + // Note: it will disable scalar replacement in some cases: + // + // Point p[] = new Point[1]; + // p[0] = new Point(); // Will be not scalar replaced + // + // but it will save us from incorrect optimizations in next cases: + // + // Point p[] = new Point[1]; + // if ( x ) p[0] = new Point(); // Will be not scalar replaced + // + // Without a control flow analysis we can't distinguish above cases. + // + if (offset != Type::OffsetBot && ptset_size == 1) { + uint elem = ptset.getelem(); // Allocation node's index + // It does not matter if it is not Allocation node since + // only non-escaping allocations are scalar replaced. + if (ptnode_adr(elem)->_node->is_Allocate() && + ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) { + AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate(); + InitializeNode* ini = alloc->initialization(); + Node* value = NULL; + if (ini != NULL) { + BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT; + Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn); + if (store != NULL && store->is_Store()) + value = store->in(MemNode::ValueIn); + } + if (value == NULL || value != ptnode_adr(value->_idx)->_node) { + // A field's initializing value was not recorded. Add NULL. + uint null_idx = UseCompressedOops ? _noop_null : _oop_null; + add_pointsto_edge(ni, null_idx); + } + } + } + + // An object is not scalar replaceable if the field which may point + // to it has unknown offset (unknown element of an array of objects). + // + if (offset == Type::OffsetBot) { + uint e_cnt = ptn->edge_count(); + for (uint ei = 0; ei < e_cnt; ei++) { + uint npi = ptn->edge_target(ei); + set_escape_state(npi, PointsToNode::ArgEscape); + ptnode_adr(npi)->_scalar_replaceable = false; + } + } + + // Currently an object is not scalar replaceable if a LoadStore node + // access its field since the field value is unknown after it. + // + bool has_LoadStore = false; + for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { + Node *use = n->fast_out(i); + if (use->is_LoadStore()) { + has_LoadStore = true; + break; + } + } + // An object is not scalar replaceable if the address points + // to unknown field (unknown element for arrays, offset is OffsetBot). + // + // Or the address may point to more then one object. This may produce + // the false positive result (set scalar_replaceable to false) + // since the flow-insensitive escape analysis can't separate + // the case when stores overwrite the field's value from the case + // when stores happened on different control branches. + // + if (ptset_size > 1 || ptset_size != 0 && + (has_LoadStore || offset == Type::OffsetBot)) { for( VectorSetI j(&ptset); j.test(); ++j ) { - PointsToNode *ptaddr = _nodes->adr_at(j.elem); - ptaddr->_unique_type = false; + set_escape_state(j.elem, PointsToNode::ArgEscape); + ptnode_adr(j.elem)->_scalar_replaceable = false; } } } - } else if (n->is_Call()) { - // initialize _escape_state of calls to GlobalEscape - n->as_Call()->_escape_state = PointsToNode::GlobalEscape; - // push call on alloc_worlist (alocations are calls) - // for processing by split_unique_types() - alloc_worklist.push(n); } } + + // 6. Propagate escape states. + GrowableArray worklist; + bool has_non_escaping_obj = false; + // push all GlobalEscape nodes on the worklist - for (uint nj = 0; nj < (uint)_nodes->length(); nj++) { - if (_nodes->at(nj).escape_state() == PointsToNode::GlobalEscape) { - worklist.append(nj); - } + for( uint next = 0; next < cg_length; ++next ) { + int nk = cg_worklist.at(next); + if (ptnode_adr(nk)->escape_state() == PointsToNode::GlobalEscape) + worklist.push(nk); } - // mark all node reachable from GlobalEscape nodes + // mark all nodes reachable from GlobalEscape nodes while(worklist.length() > 0) { - PointsToNode n = _nodes->at(worklist.pop()); - for (uint ei = 0; ei < n.edge_count(); ei++) { - uint npi = n.edge_target(ei); + PointsToNode* ptn = ptnode_adr(worklist.pop()); + uint e_cnt = ptn->edge_count(); + for (uint ei = 0; ei < e_cnt; ei++) { + uint npi = ptn->edge_target(ei); PointsToNode *np = ptnode_adr(npi); - if (np->escape_state() != PointsToNode::GlobalEscape) { + if (np->escape_state() < PointsToNode::GlobalEscape) { np->set_escape_state(PointsToNode::GlobalEscape); - worklist.append_if_missing(npi); + worklist.push(npi); } } } // push all ArgEscape nodes on the worklist - for (uint nk = 0; nk < (uint)_nodes->length(); nk++) { - if (_nodes->at(nk).escape_state() == PointsToNode::ArgEscape) + for( uint next = 0; next < cg_length; ++next ) { + int nk = cg_worklist.at(next); + if (ptnode_adr(nk)->escape_state() == PointsToNode::ArgEscape) worklist.push(nk); } - // mark all node reachable from ArgEscape nodes + // mark all nodes reachable from ArgEscape nodes while(worklist.length() > 0) { - PointsToNode n = _nodes->at(worklist.pop()); - - for (uint ei = 0; ei < n.edge_count(); ei++) { - uint npi = n.edge_target(ei); + PointsToNode* ptn = ptnode_adr(worklist.pop()); + if (ptn->node_type() == PointsToNode::JavaObject) + has_non_escaping_obj = true; // Non GlobalEscape + uint e_cnt = ptn->edge_count(); + for (uint ei = 0; ei < e_cnt; ei++) { + uint npi = ptn->edge_target(ei); PointsToNode *np = ptnode_adr(npi); - if (np->escape_state() != PointsToNode::ArgEscape) { + if (np->escape_state() < PointsToNode::ArgEscape) { np->set_escape_state(PointsToNode::ArgEscape); - worklist.append_if_missing(npi); + worklist.push(npi); + } + } + } + + GrowableArray alloc_worklist; + + // push all NoEscape nodes on the worklist + for( uint next = 0; next < cg_length; ++next ) { + int nk = cg_worklist.at(next); + if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape) + worklist.push(nk); + } + // mark all nodes reachable from NoEscape nodes + while(worklist.length() > 0) { + PointsToNode* ptn = ptnode_adr(worklist.pop()); + if (ptn->node_type() == PointsToNode::JavaObject) + has_non_escaping_obj = true; // Non GlobalEscape + Node* n = ptn->_node; + if (n->is_Allocate() && ptn->_scalar_replaceable ) { + // Push scalar replaceable alocations on alloc_worklist + // for processing in split_unique_types(). + alloc_worklist.append(n); + } + uint e_cnt = ptn->edge_count(); + for (uint ei = 0; ei < e_cnt; ei++) { + uint npi = ptn->edge_target(ei); + PointsToNode *np = ptnode_adr(npi); + if (np->escape_state() < PointsToNode::NoEscape) { + np->set_escape_state(PointsToNode::NoEscape); + worklist.push(npi); } } } + _collecting = false; + assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build"); - // Now use the escape information to create unique types for - // unescaped objects - split_unique_types(alloc_worklist); -} - -Node * ConnectionGraph::skip_casts(Node *n) { - while(n->Opcode() == Op_CastPP || n->Opcode() == Op_CheckCastPP) { - n = n->in(1); - } - return n; -} - -void ConnectionGraph::process_phi_escape(PhiNode *phi, PhaseTransform *phase) { - - if (phi->type()->isa_oopptr() == NULL) - return; // nothing to do if not an oop - - PointsToNode *ptadr = ptnode_adr(phi->_idx); - int incount = phi->req(); - int non_null_inputs = 0; - - for (int i = 1; i < incount ; i++) { - if (phi->in(i) != NULL) - non_null_inputs++; - } - if (non_null_inputs == ptadr->_inputs_processed) - return; // no new inputs since the last time this node was processed, - // the current information is valid - - ptadr->_inputs_processed = non_null_inputs; // prevent recursive processing of this node - for (int j = 1; j < incount ; j++) { - Node * n = phi->in(j); - if (n == NULL) - continue; // ignore NULL - n = skip_casts(n); - if (n->is_top() || n == phi) - continue; // ignore top or inputs which go back this node - int nopc = n->Opcode(); - PointsToNode npt = _nodes->at(n->_idx); - if (_nodes->at(n->_idx).node_type() == PointsToNode::JavaObject) { - add_pointsto_edge(phi->_idx, n->_idx); - } else { - add_deferred_edge(phi->_idx, n->_idx); + bool has_scalar_replaceable_candidates = alloc_worklist.length() > 0; + if ( has_scalar_replaceable_candidates && + C->AliasLevel() >= 3 && EliminateAllocations ) { + + // Now use the escape information to create unique types for + // scalar replaceable objects. + split_unique_types(alloc_worklist); + + if (C->failing()) return false; + + // Clean up after split unique types. + ResourceMark rm; + PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn()); + + C->print_method("After Escape Analysis", 2); + +#ifdef ASSERT + } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { + tty->print("=== No allocations eliminated for "); + C->method()->print_short_name(); + if(!EliminateAllocations) { + tty->print(" since EliminateAllocations is off ==="); + } else if(!has_scalar_replaceable_candidates) { + tty->print(" since there are no scalar replaceable candidates ==="); + } else if(C->AliasLevel() < 3) { + tty->print(" since AliasLevel < 3 ==="); } + tty->cr(); +#endif } + return has_non_escaping_obj; } void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) { - _processed.set(call->_idx); switch (call->Opcode()) { - - // arguments to allocation and locking don't escape +#ifdef ASSERT case Op_Allocate: case Op_AllocateArray: case Op_Lock: case Op_Unlock: + assert(false, "should be done already"); + break; +#endif + case Op_CallLeafNoFP: + { + // Stub calls, objects do not escape but they are not scale replaceable. + // Adjust escape state for outgoing arguments. + const TypeTuple * d = call->tf()->domain(); + VectorSet ptset(Thread::current()->resource_area()); + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { + const Type* at = d->field_at(i); + Node *arg = call->in(i)->uncast(); + const Type *aat = phase->type(arg); + if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) { + assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || + aat->isa_ptr() != NULL, "expecting an Ptr"); + set_escape_state(arg->_idx, PointsToNode::ArgEscape); + if (arg->is_AddP()) { + // + // The inline_native_clone() case when the arraycopy stub is called + // after the allocation before Initialize and CheckCastPP nodes. + // + // Set AddP's base (Allocate) as not scalar replaceable since + // pointer to the base (with offset) is passed as argument. + // + arg = get_addp_base(arg); + } + ptset.Clear(); + PointsTo(ptset, arg, phase); + for( VectorSetI j(&ptset); j.test(); ++j ) { + uint pt = j.elem; + set_escape_state(pt, PointsToNode::ArgEscape); + } + } + } break; + } case Op_CallStaticJava: // For a static call, we know exactly what method is being called. // Use bytecode estimator to record the call's escape affects { ciMethod *meth = call->as_CallJava()->method(); - if (meth != NULL) { + BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; + // fall-through if not a Java method or no analyzer information + if (call_analyzer != NULL) { const TypeTuple * d = call->tf()->domain(); - BCEscapeAnalyzer call_analyzer(meth); VectorSet ptset(Thread::current()->resource_area()); + bool copy_dependencies = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); int k = i - TypeFunc::Parms; if (at->isa_oopptr() != NULL) { - Node *arg = skip_casts(call->in(i)); - - if (!call_analyzer.is_arg_stack(k)) { + Node *arg = call->in(i)->uncast(); + + bool global_escapes = false; + bool fields_escapes = false; + if (!call_analyzer->is_arg_stack(k)) { // The argument global escapes, mark everything it could point to - ptset.Clear(); - PointsTo(ptset, arg, phase); - for( VectorSetI j(&ptset); j.test(); ++j ) { - uint pt = j.elem; - - set_escape_state(pt, PointsToNode::GlobalEscape); + set_escape_state(arg->_idx, PointsToNode::GlobalEscape); + global_escapes = true; + } else { + if (!call_analyzer->is_arg_local(k)) { + // The argument itself doesn't escape, but any fields might + fields_escapes = true; } - } else if (!call_analyzer.is_arg_local(k)) { - // The argument itself doesn't escape, but any fields might - ptset.Clear(); - PointsTo(ptset, arg, phase); - for( VectorSetI j(&ptset); j.test(); ++j ) { - uint pt = j.elem; - add_edge_from_fields(pt, _phantom_object, Type::OffsetBot); + set_escape_state(arg->_idx, PointsToNode::ArgEscape); + copy_dependencies = true; + } + + ptset.Clear(); + PointsTo(ptset, arg, phase); + for( VectorSetI j(&ptset); j.test(); ++j ) { + uint pt = j.elem; + if (global_escapes) { + //The argument global escapes, mark everything it could point to + set_escape_state(pt, PointsToNode::GlobalEscape); + } else { + if (fields_escapes) { + // The argument itself doesn't escape, but any fields might + add_edge_from_fields(pt, _phantom_object, Type::OffsetBot); + } + set_escape_state(pt, PointsToNode::ArgEscape); } } } } - call_analyzer.copy_dependencies(C()->dependencies()); + if (copy_dependencies) + call_analyzer->copy_dependencies(_compile->dependencies()); break; } - // fall-through if not a Java method } default: - // Some other type of call, assume the worst case: all arguments + // Fall-through here if not a Java method or no analyzer information + // or some other type of call, assume the worst case: all arguments // globally escape. { // adjust escape state for outgoing arguments @@ -991,14 +1714,13 @@ VectorSet ptset(Thread::current()->resource_area()); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); - if (at->isa_oopptr() != NULL) { - Node *arg = skip_casts(call->in(i)); + Node *arg = call->in(i)->uncast(); + set_escape_state(arg->_idx, PointsToNode::GlobalEscape); ptset.Clear(); PointsTo(ptset, arg, phase); for( VectorSetI j(&ptset); j.test(); ++j ) { uint pt = j.elem; - set_escape_state(pt, PointsToNode::GlobalEscape); } } @@ -1007,14 +1729,9 @@ } } void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) { - CallNode *call = resproj->in(0)->as_Call(); - - PointsToNode *ptadr = ptnode_adr(resproj->_idx); - - ptadr->_node = resproj; - ptadr->set_node_type(PointsToNode::LocalVar); - set_escape_state(resproj->_idx, PointsToNode::UnknownEscape); - _processed.set(resproj->_idx); + CallNode *call = resproj->in(0)->as_Call(); + uint call_idx = call->_idx; + uint resproj_idx = resproj->_idx; switch (call->Opcode()) { case Op_Allocate: @@ -1024,43 +1741,46 @@ if (k->Opcode() == Op_LoadKlass) { kt = k->as_Load()->type()->isa_klassptr(); } else { + // Also works for DecodeN(LoadNKlass). kt = k->as_Type()->type()->isa_klassptr(); } assert(kt != NULL, "TypeKlassPtr required."); ciKlass* cik = kt->klass(); ciInstanceKlass* ciik = cik->as_instance_klass(); - PointsToNode *ptadr = ptnode_adr(call->_idx); - ptadr->set_node_type(PointsToNode::JavaObject); + PointsToNode::EscapeState es; + uint edge_to; if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) { - set_escape_state(call->_idx, PointsToNode::GlobalEscape); - add_pointsto_edge(resproj->_idx, _phantom_object); + es = PointsToNode::GlobalEscape; + edge_to = _phantom_object; // Could not be worse } else { - set_escape_state(call->_idx, PointsToNode::NoEscape); - add_pointsto_edge(resproj->_idx, call->_idx); + es = PointsToNode::NoEscape; + edge_to = call_idx; } - _processed.set(call->_idx); + set_escape_state(call_idx, es); + add_pointsto_edge(resproj_idx, edge_to); + _processed.set(resproj_idx); break; } case Op_AllocateArray: { - PointsToNode *ptadr = ptnode_adr(call->_idx); - ptadr->set_node_type(PointsToNode::JavaObject); - set_escape_state(call->_idx, PointsToNode::NoEscape); - _processed.set(call->_idx); - add_pointsto_edge(resproj->_idx, call->_idx); + int length = call->in(AllocateNode::ALength)->find_int_con(-1); + if (length < 0 || length > EliminateAllocationArraySizeLimit) { + // Not scalar replaceable if the length is not constant or too big. + ptnode_adr(call_idx)->_scalar_replaceable = false; + } + set_escape_state(call_idx, PointsToNode::NoEscape); + add_pointsto_edge(resproj_idx, call_idx); + _processed.set(resproj_idx); break; } - case Op_Lock: - case Op_Unlock: - break; - case Op_CallStaticJava: // For a static call, we know exactly what method is being called. // Use bytecode estimator to record whether the call's return value escapes { + bool done = true; const TypeTuple *r = call->tf()->range(); const Type* ret_type = NULL; @@ -1069,46 +1789,74 @@ // Note: we use isa_ptr() instead of isa_oopptr() here because the // _multianewarray functions return a TypeRawPtr. - if (ret_type == NULL || ret_type->isa_ptr() == NULL) + if (ret_type == NULL || ret_type->isa_ptr() == NULL) { + _processed.set(resproj_idx); break; // doesn't return a pointer type - + } ciMethod *meth = call->as_CallJava()->method(); + const TypeTuple * d = call->tf()->domain(); if (meth == NULL) { // not a Java method, assume global escape - set_escape_state(call->_idx, PointsToNode::GlobalEscape); - if (resproj != NULL) - add_pointsto_edge(resproj->_idx, _phantom_object); + set_escape_state(call_idx, PointsToNode::GlobalEscape); + add_pointsto_edge(resproj_idx, _phantom_object); } else { - BCEscapeAnalyzer call_analyzer(meth); - VectorSet ptset(Thread::current()->resource_area()); + BCEscapeAnalyzer *call_analyzer = meth->get_bcea(); + bool copy_dependencies = false; - if (call_analyzer.is_return_local() && resproj != NULL) { + if (call_analyzer->is_return_allocated()) { + // Returns a newly allocated unescaped object, simply + // update dependency information. + // Mark it as NoEscape so that objects referenced by + // it's fields will be marked as NoEscape at least. + set_escape_state(call_idx, PointsToNode::NoEscape); + add_pointsto_edge(resproj_idx, call_idx); + copy_dependencies = true; + } else if (call_analyzer->is_return_local()) { // determine whether any arguments are returned - const TypeTuple * d = call->tf()->domain(); - set_escape_state(call->_idx, PointsToNode::NoEscape); + set_escape_state(call_idx, PointsToNode::NoEscape); + bool ret_arg = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); if (at->isa_oopptr() != NULL) { - Node *arg = skip_casts(call->in(i)); - - if (call_analyzer.is_arg_returned(i - TypeFunc::Parms)) { - PointsToNode *arg_esp = _nodes->adr_at(arg->_idx); - if (arg_esp->node_type() == PointsToNode::JavaObject) - add_pointsto_edge(resproj->_idx, arg->_idx); + Node *arg = call->in(i)->uncast(); + + if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { + ret_arg = true; + PointsToNode *arg_esp = ptnode_adr(arg->_idx); + if (arg_esp->node_type() == PointsToNode::UnknownType) + done = false; + else if (arg_esp->node_type() == PointsToNode::JavaObject) + add_pointsto_edge(resproj_idx, arg->_idx); else - add_deferred_edge(resproj->_idx, arg->_idx); + add_deferred_edge(resproj_idx, arg->_idx); arg_esp->_hidden_alias = true; } } } + if (done && !ret_arg) { + // Returns unknown object. + set_escape_state(call_idx, PointsToNode::GlobalEscape); + add_pointsto_edge(resproj_idx, _phantom_object); + } + copy_dependencies = true; } else { - set_escape_state(call->_idx, PointsToNode::GlobalEscape); - if (resproj != NULL) - add_pointsto_edge(resproj->_idx, _phantom_object); + set_escape_state(call_idx, PointsToNode::GlobalEscape); + add_pointsto_edge(resproj_idx, _phantom_object); + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { + const Type* at = d->field_at(i); + if (at->isa_oopptr() != NULL) { + Node *arg = call->in(i)->uncast(); + PointsToNode *arg_esp = ptnode_adr(arg->_idx); + arg_esp->_hidden_alias = true; + } + } } - call_analyzer.copy_dependencies(C()->dependencies()); + if (copy_dependencies) + call_analyzer->copy_dependencies(_compile->dependencies()); } + if (done) + _processed.set(resproj_idx); break; } @@ -1117,196 +1865,454 @@ // returned value, if any, globally escapes. { const TypeTuple *r = call->tf()->range(); - if (r->cnt() > TypeFunc::Parms) { const Type* ret_type = r->field_at(TypeFunc::Parms); // Note: we use isa_ptr() instead of isa_oopptr() here because the // _multianewarray functions return a TypeRawPtr. if (ret_type->isa_ptr() != NULL) { - PointsToNode *ptadr = ptnode_adr(call->_idx); - ptadr->set_node_type(PointsToNode::JavaObject); - set_escape_state(call->_idx, PointsToNode::GlobalEscape); - if (resproj != NULL) - add_pointsto_edge(resproj->_idx, _phantom_object); + set_escape_state(call_idx, PointsToNode::GlobalEscape); + add_pointsto_edge(resproj_idx, _phantom_object); } } + _processed.set(resproj_idx); } } } -void ConnectionGraph::record_for_escape_analysis(Node *n) { - if (_collecting) { - if (n->is_Phi()) { - PhiNode *phi = n->as_Phi(); - const Type *pt = phi->type(); - if ((pt->isa_oopptr() != NULL) || pt == TypePtr::NULL_PTR) { - PointsToNode *ptn = ptnode_adr(phi->_idx); - ptn->set_node_type(PointsToNode::LocalVar); - ptn->_node = n; - _deferred.push(n); +// Populate Connection Graph with Ideal nodes and create simple +// connection graph edges (do not need to check the node_type of inputs +// or to call PointsTo() to walk the connection graph). +void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) { + if (_processed.test(n->_idx)) + return; // No need to redefine node's state. + + if (n->is_Call()) { + // Arguments to allocation and locking don't escape. + if (n->is_Allocate()) { + add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true); + record_for_optimizer(n); + } else if (n->is_Lock() || n->is_Unlock()) { + // Put Lock and Unlock nodes on IGVN worklist to process them during + // the first IGVN optimization when escape information is still available. + record_for_optimizer(n); + _processed.set(n->_idx); + } else { + // Have to process call's arguments first. + PointsToNode::NodeType nt = PointsToNode::UnknownType; + + // Check if a call returns an object. + const TypeTuple *r = n->as_Call()->tf()->range(); + if (n->is_CallStaticJava() && r->cnt() > TypeFunc::Parms && + n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { + // Note: use isa_ptr() instead of isa_oopptr() here because + // the _multianewarray functions return a TypeRawPtr. + if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { + nt = PointsToNode::JavaObject; + } } + add_node(n, nt, PointsToNode::UnknownEscape, false); } + return; } -} -void ConnectionGraph::record_escape_work(Node *n, PhaseTransform *phase) { + // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because + // ThreadLocal has RawPrt type. + switch (n->Opcode()) { + case Op_AddP: + { + add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false); + break; + } + case Op_CastX2P: + { // "Unsafe" memory access. + add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); + break; + } + case Op_CastPP: + case Op_CheckCastPP: + case Op_EncodeP: + case Op_DecodeN: + { + add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); + int ti = n->in(1)->_idx; + PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); + if (nt == PointsToNode::UnknownType) { + _delayed_worklist.push(n); // Process it later. + break; + } else if (nt == PointsToNode::JavaObject) { + add_pointsto_edge(n->_idx, ti); + } else { + add_deferred_edge(n->_idx, ti); + } + _processed.set(n->_idx); + break; + } + case Op_ConP: + { + // assume all pointer constants globally escape except for null + PointsToNode::EscapeState es; + if (phase->type(n) == TypePtr::NULL_PTR) + es = PointsToNode::NoEscape; + else + es = PointsToNode::GlobalEscape; - int opc = n->Opcode(); - PointsToNode *ptadr = ptnode_adr(n->_idx); + add_node(n, PointsToNode::JavaObject, es, true); + break; + } + case Op_ConN: + { + // assume all narrow oop constants globally escape except for null + PointsToNode::EscapeState es; + if (phase->type(n) == TypeNarrowOop::NULL_PTR) + es = PointsToNode::NoEscape; + else + es = PointsToNode::GlobalEscape; - if (_processed.test(n->_idx)) - return; + add_node(n, PointsToNode::JavaObject, es, true); + break; + } + case Op_CreateEx: + { + // assume that all exception objects globally escape + add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); + break; + } + case Op_LoadKlass: + case Op_LoadNKlass: + { + add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true); + break; + } + case Op_LoadP: + case Op_LoadN: + { + const Type *t = phase->type(n); + if (t->make_ptr() == NULL) { + _processed.set(n->_idx); + return; + } + add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); + break; + } + case Op_Parm: + { + _processed.set(n->_idx); // No need to redefine it state. + uint con = n->as_Proj()->_con; + if (con < TypeFunc::Parms) + return; + const Type *t = n->in(0)->as_Start()->_domain->field_at(con); + if (t->isa_ptr() == NULL) + return; + // We have to assume all input parameters globally escape + // (Note: passing 'false' since _processed is already set). + add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false); + break; + } + case Op_Phi: + { + const Type *t = n->as_Phi()->type(); + if (t->make_ptr() == NULL) { + // nothing to do if not an oop or narrow oop + _processed.set(n->_idx); + return; + } + add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); + uint i; + for (i = 1; i < n->req() ; i++) { + Node* in = n->in(i); + if (in == NULL) + continue; // ignore NULL + in = in->uncast(); + if (in->is_top() || in == n) + continue; // ignore top or inputs which go back this node + int ti = in->_idx; + PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); + if (nt == PointsToNode::UnknownType) { + break; + } else if (nt == PointsToNode::JavaObject) { + add_pointsto_edge(n->_idx, ti); + } else { + add_deferred_edge(n->_idx, ti); + } + } + if (i >= n->req()) + _processed.set(n->_idx); + else + _delayed_worklist.push(n); + break; + } + case Op_Proj: + { + // we are only interested in the result projection from a call + if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { + add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); + process_call_result(n->as_Proj(), phase); + if (!_processed.test(n->_idx)) { + // The call's result may need to be processed later if the call + // returns it's argument and the argument is not processed yet. + _delayed_worklist.push(n); + } + } else { + _processed.set(n->_idx); + } + break; + } + case Op_Return: + { + if( n->req() > TypeFunc::Parms && + phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { + // Treat Return value as LocalVar with GlobalEscape escape state. + add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false); + int ti = n->in(TypeFunc::Parms)->_idx; + PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); + if (nt == PointsToNode::UnknownType) { + _delayed_worklist.push(n); // Process it later. + break; + } else if (nt == PointsToNode::JavaObject) { + add_pointsto_edge(n->_idx, ti); + } else { + add_deferred_edge(n->_idx, ti); + } + } + _processed.set(n->_idx); + break; + } + case Op_StoreP: + case Op_StoreN: + { + const Type *adr_type = phase->type(n->in(MemNode::Address)); + adr_type = adr_type->make_ptr(); + if (adr_type->isa_oopptr()) { + add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); + } else { + Node* adr = n->in(MemNode::Address); + if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL && + adr->in(AddPNode::Address)->is_Proj() && + adr->in(AddPNode::Address)->in(0)->is_Allocate()) { + add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); + // We are computing a raw address for a store captured + // by an Initialize compute an appropriate address type. + int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); + assert(offs != Type::OffsetBot, "offset must be a constant"); + } else { + _processed.set(n->_idx); + return; + } + } + break; + } + case Op_StorePConditional: + case Op_CompareAndSwapP: + case Op_CompareAndSwapN: + { + const Type *adr_type = phase->type(n->in(MemNode::Address)); + adr_type = adr_type->make_ptr(); + if (adr_type->isa_oopptr()) { + add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false); + } else { + _processed.set(n->_idx); + return; + } + break; + } + case Op_ThreadLocal: + { + add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); + break; + } + default: + ; + // nothing to do + } + return; +} + +void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) { + uint n_idx = n->_idx; + + // Don't set processed bit for AddP, LoadP, StoreP since + // they may need more then one pass to process. + if (_processed.test(n_idx)) + return; // No need to redefine node's state. - ptadr->_node = n; if (n->is_Call()) { CallNode *call = n->as_Call(); process_call_arguments(call, phase); + _processed.set(n_idx); return; } - switch (opc) { + switch (n->Opcode()) { case Op_AddP: { - Node *base = skip_casts(n->in(AddPNode::Base)); - ptadr->set_node_type(PointsToNode::Field); - - // create a field edge to this node from everything adr could point to + Node *base = get_addp_base(n); + // Create a field edge to this node from everything base could point to. VectorSet ptset(Thread::current()->resource_area()); PointsTo(ptset, base, phase); for( VectorSetI i(&ptset); i.test(); ++i ) { uint pt = i.elem; - add_field_edge(pt, n->_idx, type_to_offset(phase->type(n))); + add_field_edge(pt, n_idx, address_offset(n, phase)); } break; } - case Op_Parm: + case Op_CastX2P: { - ProjNode *nproj = n->as_Proj(); - uint con = nproj->_con; - if (con < TypeFunc::Parms) - return; - const Type *t = nproj->in(0)->as_Start()->_domain->field_at(con); - if (t->isa_ptr() == NULL) - return; - ptadr->set_node_type(PointsToNode::JavaObject); - if (t->isa_oopptr() != NULL) { - set_escape_state(n->_idx, PointsToNode::ArgEscape); + assert(false, "Op_CastX2P"); + break; + } + case Op_CastPP: + case Op_CheckCastPP: + case Op_EncodeP: + case Op_DecodeN: + { + int ti = n->in(1)->_idx; + if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { + add_pointsto_edge(n_idx, ti); } else { - // this must be the incoming state of an OSR compile, we have to assume anything - // passed in globally escapes - assert(_compile->is_osr_compilation(), "bad argument type for non-osr compilation"); - set_escape_state(n->_idx, PointsToNode::GlobalEscape); + add_deferred_edge(n_idx, ti); } - _processed.set(n->_idx); + _processed.set(n_idx); break; } - case Op_Phi: + case Op_ConP: { - PhiNode *phi = n->as_Phi(); - if (phi->type()->isa_oopptr() == NULL) - return; // nothing to do if not an oop - ptadr->set_node_type(PointsToNode::LocalVar); - process_phi_escape(phi, phase); + assert(false, "Op_ConP"); break; } - case Op_CreateEx: + case Op_ConN: { - // assume that all exception objects globally escape - ptadr->set_node_type(PointsToNode::JavaObject); - set_escape_state(n->_idx, PointsToNode::GlobalEscape); - _processed.set(n->_idx); + assert(false, "Op_ConN"); break; } - case Op_ConP: + case Op_CreateEx: { - const Type *t = phase->type(n); - ptadr->set_node_type(PointsToNode::JavaObject); - // assume all pointer constants globally escape except for null - if (t == TypePtr::NULL_PTR) - set_escape_state(n->_idx, PointsToNode::NoEscape); - else - set_escape_state(n->_idx, PointsToNode::GlobalEscape); - _processed.set(n->_idx); + assert(false, "Op_CreateEx"); break; } case Op_LoadKlass: + case Op_LoadNKlass: { - ptadr->set_node_type(PointsToNode::JavaObject); - set_escape_state(n->_idx, PointsToNode::GlobalEscape); - _processed.set(n->_idx); + assert(false, "Op_LoadKlass"); break; } case Op_LoadP: + case Op_LoadN: { const Type *t = phase->type(n); - if (!t->isa_oopptr()) - return; - ptadr->set_node_type(PointsToNode::LocalVar); - set_escape_state(n->_idx, PointsToNode::UnknownEscape); +#ifdef ASSERT + if (t->make_ptr() == NULL) + assert(false, "Op_LoadP"); +#endif - Node *adr = skip_casts(n->in(MemNode::Address)); + Node* adr = n->in(MemNode::Address)->uncast(); const Type *adr_type = phase->type(adr); - Node *adr_base = skip_casts((adr->Opcode() == Op_AddP) ? adr->in(AddPNode::Base) : adr); + Node* adr_base; + if (adr->is_AddP()) { + adr_base = get_addp_base(adr); + } else { + adr_base = adr; + } - // For everything "adr" could point to, create a deferred edge from - // this node to each field with the same offset as "adr_type" + // For everything "adr_base" could point to, create a deferred edge from + // this node to each field with the same offset. VectorSet ptset(Thread::current()->resource_area()); PointsTo(ptset, adr_base, phase); - // If ptset is empty, then this value must have been set outside - // this method, so we add the phantom node - if (ptset.Size() == 0) - ptset.set(_phantom_object); + int offset = address_offset(adr, phase); for( VectorSetI i(&ptset); i.test(); ++i ) { uint pt = i.elem; - add_deferred_edge_to_fields(n->_idx, pt, type_to_offset(adr_type)); + add_deferred_edge_to_fields(n_idx, pt, offset); + } + break; + } + case Op_Parm: + { + assert(false, "Op_Parm"); + break; + } + case Op_Phi: + { +#ifdef ASSERT + const Type *t = n->as_Phi()->type(); + if (t->make_ptr() == NULL) + assert(false, "Op_Phi"); +#endif + for (uint i = 1; i < n->req() ; i++) { + Node* in = n->in(i); + if (in == NULL) + continue; // ignore NULL + in = in->uncast(); + if (in->is_top() || in == n) + continue; // ignore top or inputs which go back this node + int ti = in->_idx; + PointsToNode::NodeType nt = ptnode_adr(ti)->node_type(); + assert(nt != PointsToNode::UnknownType, "all nodes should be known"); + if (nt == PointsToNode::JavaObject) { + add_pointsto_edge(n_idx, ti); + } else { + add_deferred_edge(n_idx, ti); + } + } + _processed.set(n_idx); + break; + } + case Op_Proj: + { + // we are only interested in the result projection from a call + if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { + process_call_result(n->as_Proj(), phase); + assert(_processed.test(n_idx), "all call results should be processed"); + } else { + assert(false, "Op_Proj"); + } + break; + } + case Op_Return: + { +#ifdef ASSERT + if( n->req() <= TypeFunc::Parms || + !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { + assert(false, "Op_Return"); + } +#endif + int ti = n->in(TypeFunc::Parms)->_idx; + if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { + add_pointsto_edge(n_idx, ti); + } else { + add_deferred_edge(n_idx, ti); } + _processed.set(n_idx); break; } case Op_StoreP: + case Op_StoreN: case Op_StorePConditional: case Op_CompareAndSwapP: + case Op_CompareAndSwapN: { Node *adr = n->in(MemNode::Address); - Node *val = skip_casts(n->in(MemNode::ValueIn)); - const Type *adr_type = phase->type(adr); + const Type *adr_type = phase->type(adr)->make_ptr(); +#ifdef ASSERT if (!adr_type->isa_oopptr()) - return; - - assert(adr->Opcode() == Op_AddP, "expecting an AddP"); - Node *adr_base = adr->in(AddPNode::Base); + assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP"); +#endif - // For everything "adr_base" could point to, create a deferred edge to "val" from each field - // with the same offset as "adr_type" + assert(adr->is_AddP(), "expecting an AddP"); + Node *adr_base = get_addp_base(adr); + Node *val = n->in(MemNode::ValueIn)->uncast(); + // For everything "adr_base" could point to, create a deferred edge + // to "val" from each field with the same offset. VectorSet ptset(Thread::current()->resource_area()); PointsTo(ptset, adr_base, phase); for( VectorSetI i(&ptset); i.test(); ++i ) { uint pt = i.elem; - add_edge_from_fields(pt, val->_idx, type_to_offset(adr_type)); + add_edge_from_fields(pt, val->_idx, address_offset(adr, phase)); } break; } - case Op_Proj: + case Op_ThreadLocal: { - ProjNode *nproj = n->as_Proj(); - Node *n0 = nproj->in(0); - // we are only interested in the result projection from a call - if (nproj->_con == TypeFunc::Parms && n0->is_Call() ) { - process_call_result(nproj, phase); - } - - break; - } - case Op_CastPP: - case Op_CheckCastPP: - { - ptadr->set_node_type(PointsToNode::LocalVar); - int ti = n->in(1)->_idx; - if (_nodes->at(ti).node_type() == PointsToNode::JavaObject) { - add_pointsto_edge(n->_idx, ti); - } else { - add_deferred_edge(n->_idx, ti); - } + assert(false, "Op_ThreadLocal"); break; } default: @@ -1315,34 +2321,46 @@ } } -void ConnectionGraph::record_escape(Node *n, PhaseTransform *phase) { - if (_collecting) - record_escape_work(n, phase); -} - #ifndef PRODUCT void ConnectionGraph::dump() { PhaseGVN *igvn = _compile->initial_gvn(); bool first = true; - for (uint ni = 0; ni < (uint)_nodes->length(); ni++) { - PointsToNode *esp = _nodes->adr_at(ni); - if (esp->node_type() == PointsToNode::UnknownType || esp->_node == NULL) + uint size = nodes_size(); + for (uint ni = 0; ni < size; ni++) { + PointsToNode *ptn = ptnode_adr(ni); + PointsToNode::NodeType ptn_type = ptn->node_type(); + + if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL) continue; - PointsToNode::EscapeState es = escape_state(esp->_node, igvn); - if (es == PointsToNode::NoEscape || (Verbose && - (es != PointsToNode::UnknownEscape || esp->edge_count() != 0))) { - // don't print null pointer node which almost every method has - if (esp->_node->Opcode() != Op_ConP || igvn->type(esp->_node) != TypePtr::NULL_PTR) { - if (first) { - tty->print("======== Connection graph for "); - C()->method()->print_short_name(); - tty->cr(); - first = false; + PointsToNode::EscapeState es = escape_state(ptn->_node, igvn); + if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { + if (first) { + tty->cr(); + tty->print("======== Connection graph for "); + _compile->method()->print_short_name(); + tty->cr(); + first = false; + } + tty->print("%6d ", ni); + ptn->dump(); + // Print all locals which reference this allocation + for (uint li = ni; li < size; li++) { + PointsToNode *ptn_loc = ptnode_adr(li); + PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type(); + if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL && + ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) { + ptnode_adr(li)->dump(false); + } + } + if (Verbose) { + // Print all fields which reference this allocation + for (uint i = 0; i < ptn->edge_count(); i++) { + uint ei = ptn->edge_target(i); + ptnode_adr(ei)->dump(false); } - tty->print("%4d ", ni); - esp->dump(); } + tty->cr(); } } } --- old/hotspot/src/share/vm/opto/escape.hpp 2009-08-01 04:13:42.221537900 +0100 +++ new/hotspot/src/share/vm/opto/escape.hpp 2009-08-01 04:13:42.133244616 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)escape.hpp 1.9 07/05/17 15:58:25 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,14 +28,15 @@ // // Adaptation for C2 of the escape analysis algorithm described in: // -// [Choi99] Jong-Deok Shoi, Manish Gupta, Mauricio Seffano, Vugranam C. Sreedhar, -// Sam Midkiff, "Escape Analysis for Java", Procedings of ACM SIGPLAN -// OOPSLA Conference, November 1, 1999 +// [Choi99] Jong-Deok Shoi, Manish Gupta, Mauricio Seffano, +// Vugranam C. Sreedhar, Sam Midkiff, +// "Escape Analysis for Java", Procedings of ACM SIGPLAN +// OOPSLA Conference, November 1, 1999 // // The flow-insensitive analysis described in the paper has been implemented. // -// The analysis requires construction of a "connection graph" (CG) for the method being -// analyzed. The nodes of the connection graph are: +// The analysis requires construction of a "connection graph" (CG) for +// the method being analyzed. The nodes of the connection graph are: // // - Java objects (JO) // - Local variables (LV) @@ -43,48 +44,52 @@ // // The CG contains 3 types of edges: // -// - PointsTo (-P>) {LV,OF} to JO -// - Deferred (-D>) from {LV, OF} to {LV, OF} +// - PointsTo (-P>) {LV, OF} to JO +// - Deferred (-D>) from {LV, OF} to {LV, OF} // - Field (-F>) from JO to OF // // The following utility functions is used by the algorithm: // -// PointsTo(n) - n is any CG node, it returns the set of JO that n could -// point to. +// PointsTo(n) - n is any CG node, it returns the set of JO that n could +// point to. // -// The algorithm describes how to construct the connection graph in the following 4 cases: +// The algorithm describes how to construct the connection graph +// in the following 4 cases: // // Case Edges Created // -// (1) p = new T() LV -P> JO -// (2) p = q LV -D> LV -// (3) p.f = q JO -F> OF, OF -D> LV -// (4) p = q.f JO -F> OF, LV -D> OF -// -// In all these cases, p and q are local variables. For static field references, we can -// construct a local variable containing a reference to the static memory. +// (1) p = new T() LV -P> JO +// (2) p = q LV -D> LV +// (3) p.f = q JO -F> OF, OF -D> LV +// (4) p = q.f JO -F> OF, LV -D> OF +// +// In all these cases, p and q are local variables. For static field +// references, we can construct a local variable containing a reference +// to the static memory. // // C2 does not have local variables. However for the purposes of constructing // the connection graph, the following IR nodes are treated as local variables: // Phi (pointer values) // LoadP -// Proj (value returned from callnodes including allocations) -// CheckCastPP +// Proj#5 (value returned from callnodes including allocations) +// CheckCastPP, CastPP // -// The LoadP, Proj and CheckCastPP behave like variables assigned to only once. Only -// a Phi can have multiple assignments. Each input to a Phi is treated +// The LoadP, Proj and CheckCastPP behave like variables assigned to only once. +// Only a Phi can have multiple assignments. Each input to a Phi is treated // as an assignment to it. // -// The following note types are JavaObject: +// The following node types are JavaObject: // // top() // Allocate // AllocateArray // Parm (for incoming arguments) +// CastX2P ("unsafe" operations) // CreateEx // ConP // LoadKlass -// +// ThreadLocal +// // AddP nodes are fields. // // After building the graph, a pass is made over the nodes, deleting deferred @@ -92,7 +97,7 @@ // source. This results in a graph with no deferred edges, only: // // LV -P> JO -// OF -P> JO +// OF -P> JO (the object whose oop is stored in the field) // JO -F> OF // // Then, for each node which is GlobalEscape, anything it could point to @@ -113,17 +118,18 @@ friend class ConnectionGraph; public: typedef enum { - UnknownType = 0, - JavaObject = 1, - LocalVar = 2, - Field = 3 + UnknownType = 0, + JavaObject = 1, + LocalVar = 2, + Field = 3 } NodeType; typedef enum { UnknownEscape = 0, - NoEscape = 1, - ArgEscape = 2, - GlobalEscape = 3 + NoEscape = 1, // A scalar replaceable object with unique type. + ArgEscape = 2, // An object passed as argument or referenced by + // argument (and not globally escape during call). + GlobalEscape = 3 // An object escapes the method and thread. } EscapeState; typedef enum { @@ -143,18 +149,24 @@ NodeType _type; EscapeState _escape; - GrowableArray* _edges; // outgoing edges - int _offset; // for fields + GrowableArray* _edges; // outgoing edges - bool _unique_type; // For allocated objects, this node may be a unique type public: - Node* _node; // Ideal node corresponding to this PointsTo node - int _inputs_processed; // the number of Phi inputs that have been processed so far - bool _hidden_alias; // this node is an argument to a function which may return it - // creating a hidden alias - + Node* _node; // Ideal node corresponding to this PointsTo node. + int _offset; // Object fields offsets. + bool _scalar_replaceable;// Not escaped object could be replaced with scalar + bool _hidden_alias; // This node is an argument to a function. + // which may return it creating a hidden alias. + + PointsToNode(): + _type(UnknownType), + _escape(UnknownEscape), + _edges(NULL), + _node(NULL), + _offset(-1), + _scalar_replaceable(true), + _hidden_alias(false) {} - PointsToNode(): _offset(-1), _type(UnknownType), _escape(UnknownEscape), _edges(NULL), _node(NULL), _inputs_processed(0), _hidden_alias(false), _unique_type(true) {} EscapeState escape_state() const { return _escape; } NodeType node_type() const { return _type;} @@ -169,50 +181,67 @@ // count of outgoing edges uint edge_count() const { return (_edges == NULL) ? 0 : _edges->length(); } + // node index of target of outgoing edge "e" - uint edge_target(uint e) const; + uint edge_target(uint e) const { + assert(_edges != NULL, "valid edge index"); + return (_edges->at(e) >> EdgeShift); + } // type of outgoing edge "e" - EdgeType edge_type(uint e) const; + EdgeType edge_type(uint e) const { + assert(_edges != NULL, "valid edge index"); + return (EdgeType) (_edges->at(e) & EdgeMask); + } + // add a edge of the specified type pointing to the specified target void add_edge(uint targIdx, EdgeType et); + // remove an edge of the specified type pointing to the specified target void remove_edge(uint targIdx, EdgeType et); + #ifndef PRODUCT - void dump() const; + void dump(bool print_state=true) const; #endif }; class ConnectionGraph: public ResourceObj { private: - enum { - INITIAL_NODE_COUNT = 100 // initial size of _nodes array - }; + GrowableArray _nodes; // Connection graph nodes indexed + // by ideal node index. + + Unique_Node_List _delayed_worklist; // Nodes to be processed before + // the call build_connection_graph(). + VectorSet _processed; // Records which nodes have been + // processed. - GrowableArray* _nodes; // connection graph nodes Indexed by ideal - // node index - Unique_Node_List _deferred; // Phi's to be processed after parsing - VectorSet _processed; // records which nodes have been processed - bool _collecting; // indicates whether escape information is - // still being collected. If false, no new - // nodes will be processed - uint _phantom_object; // index of globally escaping object that - // pointer values loaded from a field which - // has not been set are assumed to point to - Compile * _compile; // Compile object for current compilation - - // address of an element in _nodes. Used when the element is to be modified - PointsToNode *ptnode_adr(uint idx) { - if ((uint)_nodes->length() <= idx) { - // expand _nodes array - PointsToNode dummy = _nodes->at_grow(idx); - } - return _nodes->adr_at(idx); + bool _collecting; // Indicates whether escape information + // is still being collected. If false, + // no new nodes will be processed. + + uint _phantom_object; // Index of globally escaping object + // that pointer values loaded from + // a field which has not been set + // are assumed to point to. + uint _oop_null; // ConP(#NULL) + uint _noop_null; // ConN(#NULL) + + Compile * _compile; // Compile object for current compilation + + // Address of an element in _nodes. Used when the element is to be modified + PointsToNode *ptnode_adr(uint idx) const { + // There should be no new ideal nodes during ConnectionGraph build, + // growableArray::adr_at() will throw assert otherwise. + return _nodes.adr_at(idx); } + uint nodes_size() const { return _nodes.length(); } + + // Add node to ConnectionGraph. + void add_node(Node *n, PointsToNode::NodeType nt, PointsToNode::EscapeState es, bool done); // offset of a field reference - int type_to_offset(const Type *t); + int address_offset(Node* adr, PhaseTransform *phase); // compute the escape state for arguments to a call void process_call_arguments(CallNode *call, PhaseTransform *phase); @@ -220,12 +249,11 @@ // compute the escape state for the return value of a call void process_call_result(ProjNode *resproj, PhaseTransform *phase); - // compute the escape state of a Phi. This may be called multiple - // times as new inputs are added to the Phi. - void process_phi_escape(PhiNode *phi, PhaseTransform *phase); + // Populate Connection Graph with Ideal nodes. + void record_for_escape_analysis(Node *n, PhaseTransform *phase); - // compute the escape state of an ideal node. - void record_escape_work(Node *n, PhaseTransform *phase); + // Build Connection Graph and set nodes escape state. + void build_connection_graph(Node *n, PhaseTransform *phase); // walk the connection graph starting at the node corresponding to "n" and // add the index of everything it could point to, to "ptset". This may cause @@ -244,15 +272,15 @@ // a pointsto edge is added if it is a JavaObject void add_edge_from_fields(uint adr, uint to_i, int offs); - // Add a deferred edge from node given by "from_i" to any field of adr_i whose offset - // matches "offset" + // Add a deferred edge from node given by "from_i" to any field + // of adr_i whose offset matches "offset" void add_deferred_edge_to_fields(uint from_i, uint adr, int offs); // Remove outgoing deferred edges from the node referenced by "ni". // Any outgoing edges from the target of the deferred edge are copied // to "ni". - void remove_deferred(uint ni); + void remove_deferred(uint ni, GrowableArray* deferred_edges, VectorSet* visited); Node_Array _node_map; // used for bookeeping during type splitting // Used for the following purposes: @@ -261,10 +289,12 @@ // MemNode - new memory input for this node // ChecCastPP - allocation that this is a cast of // allocation - CheckCastPP of the allocation - void split_AddP(Node *addp, Node *base, PhaseGVN *igvn); + bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn); PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn, bool &new_created); PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn); Node *find_mem(Node *mem, int alias_idx, PhaseGVN *igvn); + Node *find_inst_mem(Node *mem, int alias_idx,GrowableArray &orig_phi_worklist, PhaseGVN *igvn); + // Propagate unique types created for unescaped allocated objects // through the graph void split_unique_types(GrowableArray &alloc_worklist); @@ -288,32 +318,30 @@ // Set the escape state of a node void set_escape_state(uint ni, PointsToNode::EscapeState es); - // bypass any casts and return the node they refer to - Node * skip_casts(Node *n); - - // Get Compile object for current compilation. - Compile *C() const { return _compile; } - public: ConnectionGraph(Compile *C); - // record a Phi for later processing. - void record_for_escape_analysis(Node *n); + // Check for non-escaping candidates + static bool has_candidates(Compile *C); - // process a node and fill in its connection graph node - void record_escape(Node *n, PhaseTransform *phase); - - // All nodes have been recorded, compute the escape information - void compute_escape(); + // Compute the escape information + bool compute_escape(); // escape state of a node PointsToNode::EscapeState escape_state(Node *n, PhaseTransform *phase); + // other information we have collected + bool is_scalar_replaceable(Node *n) { + if (_collecting || (n->_idx >= nodes_size())) + return false; + PointsToNode* ptn = ptnode_adr(n->_idx); + return ptn->escape_state() == PointsToNode::NoEscape && ptn->_scalar_replaceable; + } bool hidden_alias(Node *n) { - if (_collecting) + if (_collecting || (n->_idx >= nodes_size())) return true; - PointsToNode ptn = _nodes->at_grow(n->_idx); - return (ptn.escape_state() != PointsToNode::NoEscape) || ptn._hidden_alias; + PointsToNode* ptn = ptnode_adr(n->_idx); + return (ptn->escape_state() != PointsToNode::NoEscape) || ptn->_hidden_alias; } #ifndef PRODUCT --- old/hotspot/src/share/vm/opto/gcm.cpp 2009-08-01 04:13:43.182204708 +0100 +++ new/hotspot/src/share/vm/opto/gcm.cpp 2009-08-01 04:13:43.097088220 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)gcm.cpp 1.259 08/07/10 14:40:09 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,9 @@ #include "incls/_precompiled.incl" #include "incls/_gcm.cpp.incl" +// To avoid float value underflow +#define MIN_BLOCK_FREQUENCY 1.e-35f + //----------------------------schedule_node_into_block------------------------- // Insert node n into block b. Look for projections of n and make sure they // are in b also. @@ -453,9 +456,9 @@ ResourceArea *area = Thread::current()->resource_area(); Node_List worklist_mem(area); // prior memory state to store Node_List worklist_store(area); // possible-def to explore + Node_List worklist_visited(area); // visited mergemem nodes Node_List non_early_stores(area); // all relevant stores outside of early bool must_raise_LCA = false; - DEBUG_ONLY(VectorSet should_not_repeat(area)); #ifdef TRACK_PHI_INPUTS // %%% This extra checking fails because MergeMem nodes are not GVNed. @@ -484,8 +487,8 @@ Node* initial_mem = load->in(MemNode::Memory); worklist_store.push(initial_mem); + worklist_visited.push(initial_mem); worklist_mem.push(NULL); - DEBUG_ONLY(should_not_repeat.test_set(initial_mem->_idx)); while (worklist_store.size() > 0) { // Examine a nearby store to see if it might interfere with our load. Node* mem = worklist_mem.pop(); @@ -499,18 +502,20 @@ || op == Op_MergeMem // internal node of tree we are searching ) { mem = store; // It's not a possibly interfering store. + if (store == initial_mem) + initial_mem = NULL; // only process initial memory once + for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { store = mem->fast_out(i); if (store->is_MergeMem()) { // Be sure we don't get into combinatorial problems. // (Allow phis to be repeated; they can merge two relevant states.) - uint i = worklist_store.size(); - for (; i > 0; i--) { - if (worklist_store.at(i-1) == store) break; + uint j = worklist_visited.size(); + for (; j > 0; j--) { + if (worklist_visited.at(j-1) == store) break; } - if (i > 0) continue; // already on work list; do not repeat - DEBUG_ONLY(int repeated = should_not_repeat.test_set(store->_idx)); - assert(!repeated, "do not walk merges twice"); + if (j > 0) continue; // already on work list; do not repeat + worklist_visited.push(store); } worklist_mem.push(mem); worklist_store.push(store); @@ -1320,11 +1325,33 @@ //------------------------------Estimate_Block_Frequency----------------------- // Estimate block frequencies based on IfNode probabilities. void PhaseCFG::Estimate_Block_Frequency() { - int cnts = C->method() ? C->method()->interpreter_invocation_count() : 1; - // Most of our algorithms will die horribly if frequency can become - // negative so make sure cnts is a sane value. - if( cnts <= 0 ) cnts = 1; - float f = (float)cnts/(float)FreqCountInvocations; + + // Force conditional branches leading to uncommon traps to be unlikely, + // not because we get to the uncommon_trap with less relative frequency, + // but because an uncommon_trap typically causes a deopt, so we only get + // there once. + if (C->do_freq_based_layout()) { + Block_List worklist; + Block* root_blk = _blocks[0]; + for (uint i = 1; i < root_blk->num_preds(); i++) { + Block *pb = _bbs[root_blk->pred(i)->_idx]; + if (pb->has_uncommon_code()) { + worklist.push(pb); + } + } + while (worklist.size() > 0) { + Block* uct = worklist.pop(); + if (uct == _broot) continue; + for (uint i = 1; i < uct->num_preds(); i++) { + Block *pb = _bbs[uct->pred(i)->_idx]; + if (pb->_num_succs == 1) { + worklist.push(pb); + } else if (pb->num_fall_throughs() == 2) { + pb->update_uncommon_branch(uct); + } + } + } + } // Create the loop tree and calculate loop depth. _root_loop = create_loop_tree(); @@ -1332,30 +1359,39 @@ // Compute block frequency of each block, relative to a single loop entry. _root_loop->compute_freq(); - - // Adjust all frequencies to be relative to a single method entry - _root_loop->_freq = f * 1.0; + + // Adjust all frequencies to be relative to a single method entry + _root_loop->_freq = 1.0; _root_loop->scale_freq(); // force paths ending at uncommon traps to be infrequent - Block_List worklist; - Block* root_blk = _blocks[0]; - for (uint i = 0; i < root_blk->num_preds(); i++) { - Block *pb = _bbs[root_blk->pred(i)->_idx]; - if (pb->has_uncommon_code()) { - worklist.push(pb); - } - } - while (worklist.size() > 0) { - Block* uct = worklist.pop(); - uct->_freq = PROB_MIN; - for (uint i = 0; i < uct->num_preds(); i++) { - Block *pb = _bbs[uct->pred(i)->_idx]; - if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { + if (!C->do_freq_based_layout()) { + Block_List worklist; + Block* root_blk = _blocks[0]; + for (uint i = 1; i < root_blk->num_preds(); i++) { + Block *pb = _bbs[root_blk->pred(i)->_idx]; + if (pb->has_uncommon_code()) { worklist.push(pb); } } + while (worklist.size() > 0) { + Block* uct = worklist.pop(); + uct->_freq = PROB_MIN; + for (uint i = 1; i < uct->num_preds(); i++) { + Block *pb = _bbs[uct->pred(i)->_idx]; + if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { + worklist.push(pb); + } + } + } + } + +#ifdef ASSERT + for (uint i = 0; i < _num_blocks; i++ ) { + Block *b = _blocks[i]; + assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); } +#endif #ifndef PRODUCT if (PrintCFGBlockFreq) { @@ -1557,23 +1593,7 @@ } } -#if 0 - // Raise frequency of the loop backedge block, in an effort - // to keep it empty. Skip the method level "loop". - if (_parent != NULL) { - CFGElement* s = _members.at(_members.length() - 1); - if (s->is_block()) { - Block* bk = s->as_Block(); - if (bk->_num_succs == 1 && bk->_succs[0] == hd) { - // almost any value >= 1.0f works - // FIXME: raw constant - bk->_freq = 1.05f; - } - } - } -#endif - - // For all loops other than the outer, "method" loop, + // For all loops other than the outer, "method" loop, // sum and normalize the exit probability. The "method" loop // should keep the initial exit probability of 1, so that // inner blocks do not get erroneously scaled. @@ -1590,12 +1610,15 @@ // the probability of exit per loop entry. for (int i = 0; i < _exits.length(); i++) { Block* et = _exits.at(i).get_target(); - float new_prob = _exits.at(i).get_prob() / exits_sum; + float new_prob = 0.0f; + if (_exits.at(i).get_prob() > 0.0f) { + new_prob = _exits.at(i).get_prob() / exits_sum; + } BlockProbPair bpp(et, new_prob); _exits.at_put(i, bpp); } - - // Save the total, but guard against unreasoable probability, + + // Save the total, but guard against unreasonable probability, // as the value is used to estimate the loop trip count. // An infinite trip count would blur relative block // frequencies. @@ -1610,7 +1633,30 @@ float Block::succ_prob(uint i) { int eidx = end_idx(); Node *n = _nodes[eidx]; // Get ending Node - int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode(); + + int op = n->Opcode(); + if (n->is_Mach()) { + if (n->is_MachNullCheck()) { + // Can only reach here if called after lcm. The original Op_If is gone, + // so we attempt to infer the probability from one or both of the + // successor blocks. + assert(_num_succs == 2, "expecting 2 successors of a null check"); + // If either successor has only one predecessor, then the + // probabiltity estimate can be derived using the + // relative frequency of the successor and this block. + if (_succs[i]->num_preds() == 2) { + return _succs[i]->_freq / _freq; + } else if (_succs[1-i]->num_preds() == 2) { + return 1 - (_succs[1-i]->_freq / _freq); + } else { + // Estimate using both successor frequencies + float freq = _succs[i]->_freq; + return freq / (freq + _succs[1-i]->_freq); + } + } + op = n->as_Mach()->ideal_Opcode(); + } + // Switch on branch type switch( op ) { @@ -1666,6 +1712,137 @@ return 0.0f; } +//------------------------------num_fall_throughs----------------------------- +// Return the number of fall-through candidates for a block +int Block::num_fall_throughs() { + int eidx = end_idx(); + Node *n = _nodes[eidx]; // Get ending Node + + int op = n->Opcode(); + if (n->is_Mach()) { + if (n->is_MachNullCheck()) { + // In theory, either side can fall-thru, for simplicity sake, + // let's say only the false branch can now. + return 1; + } + op = n->as_Mach()->ideal_Opcode(); + } + + // Switch on branch type + switch( op ) { + case Op_CountedLoopEnd: + case Op_If: + return 2; + + case Op_Root: + case Op_Goto: + return 1; + + case Op_Catch: { + for (uint i = 0; i < _num_succs; i++) { + const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); + if (ci->_con == CatchProjNode::fall_through_index) { + return 1; + } + } + return 0; + } + + case Op_Jump: + case Op_NeverBranch: + case Op_TailCall: + case Op_TailJump: + case Op_Return: + case Op_Halt: + case Op_Rethrow: + return 0; + + default: + ShouldNotReachHere(); + } + + return 0; +} + +//------------------------------succ_fall_through----------------------------- +// Return true if a specific successor could be fall-through target. +bool Block::succ_fall_through(uint i) { + int eidx = end_idx(); + Node *n = _nodes[eidx]; // Get ending Node + + int op = n->Opcode(); + if (n->is_Mach()) { + if (n->is_MachNullCheck()) { + // In theory, either side can fall-thru, for simplicity sake, + // let's say only the false branch can now. + return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse; + } + op = n->as_Mach()->ideal_Opcode(); + } + + // Switch on branch type + switch( op ) { + case Op_CountedLoopEnd: + case Op_If: + case Op_Root: + case Op_Goto: + return true; + + case Op_Catch: { + const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); + return ci->_con == CatchProjNode::fall_through_index; + } + + case Op_Jump: + case Op_NeverBranch: + case Op_TailCall: + case Op_TailJump: + case Op_Return: + case Op_Halt: + case Op_Rethrow: + return false; + + default: + ShouldNotReachHere(); + } + + return false; +} + +//------------------------------update_uncommon_branch------------------------ +// Update the probability of a two-branch to be uncommon +void Block::update_uncommon_branch(Block* ub) { + int eidx = end_idx(); + Node *n = _nodes[eidx]; // Get ending Node + + int op = n->as_Mach()->ideal_Opcode(); + + assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); + assert(num_fall_throughs() == 2, "must be a two way branch block"); + + // Which successor is ub? + uint s; + for (s = 0; s <_num_succs; s++) { + if (_succs[s] == ub) break; + } + assert(s < 2, "uncommon successor must be found"); + + // If ub is the true path, make the proability small, else + // ub is the false path, and make the probability large + bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse); + + // Get existing probability + float p = n->as_MachIf()->_prob; + + if (invert) p = 1.0 - p; + if (p > PROB_MIN) { + p = PROB_MIN; + } + if (invert) p = 1.0 - p; + + n->as_MachIf()->_prob = p; +} + //------------------------------update_succ_freq------------------------------- // Update the appropriate frequency associated with block 'b', a succesor of // a block in this loop. @@ -1713,7 +1890,10 @@ float loop_freq = _freq * trip_count(); for (int i = 0; i < _members.length(); i++) { CFGElement* s = _members.at(i); - s->_freq *= loop_freq; + float block_freq = s->_freq * loop_freq; + if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) + block_freq = MIN_BLOCK_FREQUENCY; + s->_freq = block_freq; } CFGLoop* ch = _child; while (ch != NULL) { --- old/hotspot/src/share/vm/opto/graphKit.cpp 2009-08-01 04:13:44.175290072 +0100 +++ new/hotspot/src/share/vm/opto/graphKit.cpp 2009-08-01 04:13:44.081525063 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)graphKit.cpp 1.132 07/10/04 14:36:00 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -535,7 +535,7 @@ C->log()->elem("hot_throw preallocated='1' reason='%s'", Deoptimization::trap_reason_name(reason)); const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj); - Node* ex_node = _gvn.transform(new (C, 1) ConPNode(ex_con)); + Node* ex_node = _gvn.transform( ConNode::make(C, ex_con) ); // Clear the detail message of the preallocated exception object. // Weblogic sometimes mutates the detail message of exceptions @@ -590,7 +590,7 @@ #ifdef ASSERT _bci = kit->bci(); Parse* parser = kit->is_Parse(); - int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->pre_order(); + int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); _block = block; #endif } @@ -599,7 +599,7 @@ #ifdef ASSERT assert(kit->bci() == _bci, "bci must not shift"); Parse* parser = kit->is_Parse(); - int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->pre_order(); + int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); assert(block == _block, "block must not shift"); #endif kit->set_map(_map); @@ -860,6 +860,13 @@ for (j = 0; j < l; j++) call->set_req(p++, in_map->in(k+j)); + // Copy any scalar object fields. + k = in_jvms->scloff(); + l = in_jvms->scl_size(); + out_jvms->set_scloff(p); + for (j = 0; j < l; j++) + call->set_req(p++, in_map->in(k+j)); + // Finish the new jvms. out_jvms->set_endoff(p); @@ -867,6 +874,7 @@ assert(out_jvms->depth() == in_jvms->depth(), "depth must match"); assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match"); assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match"); + assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match"); assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match"); // Update the two tail pointers in parallel. @@ -1038,16 +1046,25 @@ Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); if (akls != NULL) return akls; Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); - return _gvn.transform( new (C, 3) LoadKlassNode(0, immutable_memory(), k_adr, TypeInstPtr::KLASS) ); + return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) ); } //-------------------------load_array_length----------------------------------- Node* GraphKit::load_array_length(Node* array) { // Special-case a fresh allocation to avoid building nodes: - Node* alen = AllocateArrayNode::Ideal_length(array, &_gvn); - if (alen != NULL) return alen; - Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); - return _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); + AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn); + Node *alen; + if (alloc == NULL) { + Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); + alen = _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); + } else { + alen = alloc->Ideal_length(); + Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_aryptr(), &_gvn); + if (ccast != alen) { + alen = _gvn.transform(ccast); + } + } + return alen; } //------------------------------do_null_check---------------------------------- @@ -1175,6 +1192,12 @@ else reason = Deoptimization::Reason_div0_check; + // %%% Since Reason_unhandled is not recorded on a per-bytecode basis, + // ciMethodData::has_trap_at will return a conservative -1 if any + // must-be-null assertion has failed. This could cause performance + // problems for a method after its first do_null_assert failure. + // Consider using 'Reason_class_check' instead? + // To cause an implicit null check, we set the not-null probability // to the maximum (PROB_MAX). For an explicit check the probablity // is set to a smaller value. @@ -1207,6 +1230,7 @@ Deoptimization::Action_make_not_entrant, NULL, "assert_null"); } else { + replace_in_map(value, zerocon(type)); builtin_throw(reason); } } @@ -1323,7 +1347,7 @@ if (require_atomic_access && bt == T_LONG) { ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t); } else { - ld = LoadNode::make(C, ctl, mem, adr, adr_type, t, bt); + ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); } return _gvn.transform(ld); } @@ -1339,7 +1363,7 @@ if (require_atomic_access && bt == T_LONG) { st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val); } else { - st = StoreNode::make(C, ctl, mem, adr, adr_type, val, bt); + st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt); } st = _gvn.transform(st); set_memory(st, adr_idx); @@ -1361,6 +1385,10 @@ BarrierSet* bs = Universe::heap()->barrier_set(); set_control(ctl); switch (bs->kind()) { + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt); + break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: @@ -1385,6 +1413,10 @@ BarrierSet* bs = Universe::heap()->barrier_set(); set_control(ctl); switch (bs->kind()) { + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise); + break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: @@ -1450,7 +1482,7 @@ //-------------------------array_element_address------------------------- Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, const TypeInt* sizetype) { - uint shift = exact_log2(type2aelembytes[elembt]); + uint shift = exact_log2(type2aelembytes(elembt)); uint header = arrayOopDesc::base_offset_in_bytes(elembt); // short-circuit a common case (saves lots of confusing waste motion) @@ -1955,6 +1987,7 @@ // method will be compiled to handle NULLs. PreserveJVMState pjvms(this); set_control(*null_control); + replace_in_map(value, null()); uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_make_not_entrant); (*null_control) = top(); // NULL path is dead @@ -2205,7 +2238,7 @@ // cache which is mutable so can't use immutable memory. Other // types load from the super-class display table which is immutable. Node *kmem = might_be_cache ? memory(p2) : immutable_memory(); - Node *nkls = _gvn.transform( new (C, 3) LoadKlassNode( NULL, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) ); + Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) ); // Compile speed common case: ARE a subtype and we canNOT fail if( superklass == nkls ) @@ -2796,7 +2829,6 @@ // initialization, and source them from the new InitializeNode. // This will allow us to observe initializations when they occur, // and link them properly (as a group) to the InitializeNode. - Node* klass_node = alloc->in(AllocateNode::KlassNode); assert(init->in(InitializeNode::Memory) == malloc, ""); MergeMemNode* minit_in = MergeMemNode::make(C, malloc); init->set_req(InitializeNode::Memory, minit_in); @@ -2811,7 +2843,7 @@ ciInstanceKlass* ik = oop_type->klass()->as_instance_klass(); for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { ciField* field = ik->nonstatic_field_at(i); - if (field->offset() >= TrackedInitializationLimit) + if (field->offset() >= TrackedInitializationLimit * HeapWordSize) continue; // do not bother to track really large numbers of fields // Find (or create) the alias category for this field: int fieldidx = C->alias_type(field)->index(); @@ -2827,20 +2859,18 @@ assert(just_allocated_object(control()) == javaoop, "just allocated"); #ifdef ASSERT - { // Verify that the AllocateNode::Ideal_foo recognizers work: - Node* kn = alloc->in(AllocateNode::KlassNode); - Node* ln = alloc->in(AllocateNode::ALength); - assert(AllocateNode::Ideal_klass(rawoop, &_gvn) == kn, - "Ideal_klass works"); - assert(AllocateNode::Ideal_klass(javaoop, &_gvn) == kn, - "Ideal_klass works"); + { // Verify that the AllocateNode::Ideal_allocation recognizers work: + assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc, + "Ideal_allocation works"); + assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc, + "Ideal_allocation works"); if (alloc->is_AllocateArray()) { - assert(AllocateArrayNode::Ideal_length(rawoop, &_gvn) == ln, - "Ideal_length works"); - assert(AllocateArrayNode::Ideal_length(javaoop, &_gvn) == ln, - "Ideal_length works"); + assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(), + "Ideal_allocation works"); + assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(), + "Ideal_allocation works"); } else { - assert(ln->is_top(), "no length, please"); + assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please"); } } #endif //ASSERT @@ -2917,10 +2947,22 @@ const TypeOopPtr* oop_type = tklass->as_instance_type(); // Now generate allocation code + + // With escape analysis, the entire memory state is needed to be able to + // eliminate the allocation. If the allocations cannot be eliminated, this + // will be optimized to the raw slice when the allocation is expanded. + Node *mem; + if (C->do_escape_analysis()) { + mem = reset_memory(); + set_all_memory(mem); + } else { + mem = memory(Compile::AliasIdxRaw); + } + AllocateNode* alloc = new (C, AllocateNode::ParmLimit) AllocateNode(C, AllocateNode::alloc_type(), - control(), memory(Compile::AliasIdxRaw), i_o(), + control(), mem, i_o(), size, klass_node, initial_slow_test); @@ -3051,11 +3093,23 @@ } // Now generate allocation code + + // With escape analysis, the entire memory state is needed to be able to + // eliminate the allocation. If the allocations cannot be eliminated, this + // will be optimized to the raw slice when the allocation is expanded. + Node *mem; + if (C->do_escape_analysis()) { + mem = reset_memory(); + set_all_memory(mem); + } else { + mem = memory(Compile::AliasIdxRaw); + } + // Create the AllocateArrayNode and its result projections AllocateArrayNode* alloc = new (C, AllocateArrayNode::ParmLimit) AllocateArrayNode(C, AllocateArrayNode::alloc_type(), - control(), memory(Compile::AliasIdxRaw), i_o(), + control(), mem, i_o(), size, klass_node, initial_slow_test, length); @@ -3065,25 +3119,20 @@ // (This happens via a non-constant argument to inline_native_newArray.) // In any case, the value of klass_node provides the desired array type. const TypeInt* length_type = _gvn.find_int_type(length); - const TypeInt* narrow_length_type = NULL; const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); if (ary_type->isa_aryptr() && length_type != NULL) { // Try to get a better type than POS for the size ary_type = ary_type->is_aryptr()->cast_to_size(length_type); - narrow_length_type = ary_type->is_aryptr()->size(); - if (narrow_length_type == length_type) - narrow_length_type = NULL; } Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only); - // Cast length on remaining path to be positive: - if (narrow_length_type != NULL) { - Node* ccast = new (C, 2) CastIINode(length, narrow_length_type); - ccast->set_req(0, control()); - _gvn.set_type_bottom(ccast); - record_for_igvn(ccast); - if (map()->find_edge(length) >= 0) { + // Cast length on remaining path to be as narrow as possible + if (map()->find_edge(length) >= 0) { + Node* ccast = alloc->make_ideal_length(ary_type, &_gvn); + if (ccast != length) { + _gvn.set_type_bottom(ccast); + record_for_igvn(ccast); replace_in_map(length, ccast); } } @@ -3147,3 +3196,251 @@ } return NULL; } + +void GraphKit::g1_write_barrier_pre(Node* obj, + Node* adr, + uint alias_idx, + Node* val, + const Type* val_type, + BasicType bt) { + IdealKit ideal(gvn(), control(), merged_memory(), true); +#define __ ideal. + __ declares_done(); + + Node* thread = __ thread(); + + Node* no_ctrl = NULL; + Node* no_base = __ top(); + Node* zero = __ ConI(0); + + float likely = PROB_LIKELY(0.999); + float unlikely = PROB_UNLIKELY(0.999); + + BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE; + assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width"); + + // Offsets into the thread + const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 648 + PtrQueue::byte_offset_of_active()); + const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 656 + PtrQueue::byte_offset_of_index()); + const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652 + PtrQueue::byte_offset_of_buf()); + // Now the actual pointers into the thread + + // set_control( ctl); + + Node* marking_adr = __ AddP(no_base, thread, __ ConX(marking_offset)); + Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset)); + Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset)); + + // Now some of the values + + Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); + + // if (!marking) + __ if_then(marking, BoolTest::ne, zero); { + Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + + const Type* t1 = adr->bottom_type(); + const Type* t2 = val->bottom_type(); + + Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx); + // if (orig != NULL) + __ if_then(orig, BoolTest::ne, null()); { + Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); + + // load original value + // alias_idx correct?? + + // is the queue for this thread full? + __ if_then(index, BoolTest::ne, zero, likely); { + + // decrement the index + Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); + Node* next_indexX = next_index; +#ifdef _LP64 + // We could refine the type for what it's worth + // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); + next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); +#endif // _LP64 + + // Now get the buffer location we will log the original value into and store it + + Node *log_addr = __ AddP(no_base, buffer, next_indexX); + // __ store(__ ctrl(), log_addr, orig, T_OBJECT, C->get_alias_index(TypeOopPtr::BOTTOM)); + __ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw); + + + // update the index + // __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + // This is a hack to force this store to occur before the oop store that is coming up + __ store(__ ctrl(), index_adr, next_index, T_INT, C->get_alias_index(TypeOopPtr::BOTTOM)); + + } __ else_(); { + + // logging buffer is full, call the runtime + const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); + // __ make_leaf_call(tf, OptoRuntime::g1_wb_pre_Java(), "g1_wb_pre", orig, thread); + __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, thread); + } __ end_if(); + } __ end_if(); + } __ end_if(); + + __ drain_delay_transform(); + set_control( __ ctrl()); + set_all_memory( __ merged_memory()); + +#undef __ +} + +// +// Update the card table and add card address to the queue +// +void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node* index, Node* index_adr, Node* buffer, const TypeFunc* tf) { +#define __ ideal-> + Node* zero = __ ConI(0); + Node* no_base = __ top(); + BasicType card_bt = T_BYTE; + // Smash zero into card. MUST BE ORDERED WRT TO STORE + __ storeCM(__ ctrl(), card_adr, zero, store, card_bt, Compile::AliasIdxRaw); + + // Now do the queue work + __ if_then(index, BoolTest::ne, zero); { + + Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); + Node* next_indexX = next_index; +#ifdef _LP64 + // We could refine the type for what it's worth + // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); + next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); +#endif // _LP64 + Node* log_addr = __ AddP(no_base, buffer, next_indexX); + + __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); + __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + + } __ else_(); { + __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); + } __ end_if(); +#undef __ +} + +void GraphKit::g1_write_barrier_post(Node* store, + Node* obj, + Node* adr, + uint alias_idx, + Node* val, + BasicType bt, + bool use_precise) { + // If we are writing a NULL then we need no post barrier + + if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) { + // Must be NULL + const Type* t = val->bottom_type(); + assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL"); + // No post barrier if writing NULLx + return; + } + + if (!use_precise) { + // All card marks for a (non-array) instance are in one place: + adr = obj; + } + // (Else it's an array (or unknown), and we want more precise card marks.) + assert(adr != NULL, ""); + + IdealKit ideal(gvn(), control(), merged_memory(), true); +#define __ ideal. + __ declares_done(); + + Node* thread = __ thread(); + + Node* no_ctrl = NULL; + Node* no_base = __ top(); + float likely = PROB_LIKELY(0.999); + float unlikely = PROB_UNLIKELY(0.999); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); + + // Get the alias_index for raw card-mark memory + const TypePtr* card_type = TypeRawPtr::BOTTOM; + + const TypeFunc *tf = OptoRuntime::g1_wb_post_Type(); + + // Get the address of the card table + CardTableModRefBS* ct = + (CardTableModRefBS*)(Universe::heap()->barrier_set()); + Node *card_table = __ makecon(TypeRawPtr::make((address)ct->byte_map_base)); + // Get base of card map + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + + + // Offsets into the thread + const int index_offset = in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_index()); + const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_buf()); + + // Pointers into the thread + + Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset)); + Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset)); + + // Now some values + + Node* index = __ load(no_ctrl, index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + Node* buffer = __ load(no_ctrl, buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); + + + // Convert the store obj pointer to an int prior to doing math on it + // Use addr not obj gets accurate card marks + + // Node* cast = __ CastPX(no_ctrl, adr /* obj */); + + // Must use ctrl to prevent "integerized oop" existing across safepoint + Node* cast = __ CastPX(__ ctrl(), ( use_precise ? adr : obj )); + + // Divide pointer by card size + Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) ); + + // Combine card table base and card offset + Node *card_adr = __ AddP(no_base, card_table, card_offset ); + + // If we know the value being stored does it cross regions? + + if (val != NULL) { + // Does the store cause us to cross regions? + + // Should be able to do an unsigned compare of region_size instead of + // and extra shift. Do we have an unsigned compare?? + // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes); + Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes)); + + // if (xor_res == 0) same region so skip + __ if_then(xor_res, BoolTest::ne, zeroX); { + + // No barrier if we are storing a NULL + __ if_then(val, BoolTest::ne, null(), unlikely); { + + // Ok must mark the card if not already dirty + + // load the original value of the card + Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); + + __ if_then(card_val, BoolTest::ne, zero); { + g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf); + } __ end_if(); + } __ end_if(); + } __ end_if(); + } else { + g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf); + } + + + __ drain_delay_transform(); + set_control( __ ctrl()); + set_all_memory( __ merged_memory()); +#undef __ + +} --- old/hotspot/src/share/vm/opto/graphKit.hpp 2009-08-01 04:13:45.246352421 +0100 +++ new/hotspot/src/share/vm/opto/graphKit.hpp 2009-08-01 04:13:45.169381413 +0100 @@ -27,6 +27,7 @@ class FastLockNode; class FastUnlockNode; +class IdealKit; class Parse; class RootNode; @@ -584,6 +585,27 @@ && Universe::heap()->can_elide_tlab_store_barriers()); } + // G1 pre/post barriers + void g1_write_barrier_pre(Node* obj, + Node* adr, + uint alias_idx, + Node* val, + const Type* val_type, + BasicType bt); + + void g1_write_barrier_post(Node* store, + Node* obj, + Node* adr, + uint alias_idx, + Node* val, + BasicType bt, + bool use_precise); + // Helper function for g1 + private: + void g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node* index, Node* index_adr, + Node* buffer, const TypeFunc* tf); + + public: // Helper function to round double arguments before a call void round_double_arguments(ciMethod* dest_method); void round_double_result(ciMethod* dest_method); --- old/hotspot/src/share/vm/opto/idealGraphPrinter.cpp 2009-08-01 04:13:46.137165295 +0100 +++ new/hotspot/src/share/vm/opto/idealGraphPrinter.cpp 2009-08-01 04:13:46.055375477 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)idealGraphPrinter.cpp 1.2 07/10/02 11:32:04 JVM" -#endif /* - * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #include "incls/_precompiled.incl" @@ -103,16 +100,18 @@ // Constructor, either file or network output IdealGraphPrinter::IdealGraphPrinter() { - _traverse_outs = false; + // By default dump both ins and outs since dead or unreachable code + // needs to appear in the graph. There are also some special cases + // in the mach where kill projections have no users but should + // appear in the dump. + _traverse_outs = true; _should_send_method = true; _output = NULL; buffer[0] = 0; _depth = 0; _current_method = NULL; assert(!_current_method, "current method must be initialized to NULL"); - _arena = new Arena(); - - _stream = new (ResourceObj::C_HEAP) networkStream(); + _stream = NULL; if (PrintIdealGraphFile != NULL) { ThreadCritical tc; @@ -127,12 +126,16 @@ } else { st.print("%s%d", PrintIdealGraphFile, _file_count); } - _output = new (ResourceObj::C_HEAP) fileStream(st.as_string()); + fileStream *stream = new (ResourceObj::C_HEAP) fileStream(st.as_string()); + _output = stream; } else { - _output = new (ResourceObj::C_HEAP) fileStream(PrintIdealGraphFile); + fileStream *stream = new (ResourceObj::C_HEAP) fileStream(PrintIdealGraphFile); + _output = stream; } _file_count++; } else { + _stream = new (ResourceObj::C_HEAP) networkStream(); + // Try to connect to visualizer if (_stream->connect(PrintIdealGraphAddress, PrintIdealGraphPort)) { char c = 0; @@ -152,14 +155,25 @@ } } - start_element(TOP_ELEMENT); + _xml = new (ResourceObj::C_HEAP) xmlStream(_output); + + head(TOP_ELEMENT); } // Destructor, close file or network stream -IdealGraphPrinter::~IdealGraphPrinter() { - - end_element(TOP_ELEMENT); - +IdealGraphPrinter::~IdealGraphPrinter() { + + tail(TOP_ELEMENT); + + // tty->print_cr("Walk time: %d", (int)_walk_time.milliseconds()); + // tty->print_cr("Output time: %d", (int)_output_time.milliseconds()); + // tty->print_cr("Build blocks time: %d", (int)_build_blocks_time.milliseconds()); + + if(_xml) { + delete _xml; + _xml = NULL; + } + if (_stream) { delete _stream; if (_stream == _output) { @@ -174,94 +188,93 @@ } } -void IdealGraphPrinter::print_ifg(PhaseIFG* ifg) { - // Code to print an interference graph to tty, currently not used +void IdealGraphPrinter::begin_elem(const char *s) { + _xml->begin_elem(s); +} - /* - if (!_current_method) return; - // Remove neighbor colors +void IdealGraphPrinter::end_elem() { + _xml->end_elem(); +} - for (uint i = 0; i < ifg._maxlrg; i++) { +void IdealGraphPrinter::begin_head(const char *s) { + _xml->begin_head(s); +} - IndexSet *s = ifg.neighbors(i); - IndexSetIterator elements(s); - uint neighbor; - while ((neighbor = elements.next()) != 0) { - tty->print_cr("Edge between %d and %d\n", i, neighbor); - } - } +void IdealGraphPrinter::end_head() { + _xml->end_head(); +} - - for (uint i = 0; i < ifg._maxlrg; i++) { - LRG &l = ifg.lrgs(i); - if (l._def) { - OptoReg::Name name = l.reg(); - tty->print("OptoReg::dump: "); - OptoReg::dump(name); - tty->print_cr(""); - tty->print_cr("name=%d\n", name); - if (name) { - if (OptoReg::is_stack(name)) { - tty->print_cr("Stack number %d\n", OptoReg::reg2stack(name)); - - } else if (!OptoReg::is_valid(name)) { - tty->print_cr("BAD!!!"); - } else { +void IdealGraphPrinter::print_attr(const char *name, intptr_t val) { + stringStream stream; + stream.print(INTX_FORMAT, val); + print_attr(name, stream.as_string()); +} - if (OptoReg::is_reg(name)) { - tty->print_cr(OptoReg::regname(name)); - } else { - int x = 0; - } - } - int x = 0; - } +void IdealGraphPrinter::print_attr(const char *name, const char *val) { + _xml->print(" %s='", name); + text(val); + _xml->print("'"); +} - if (l._def == NodeSentinel) { - tty->print("multiple mapping from %d: ", i); - for (int j=0; jlength(); j++) { - tty->print("%d ", l._defs->at(j)->_idx); - } - tty->print_cr(""); - } else { - tty->print_cr("mapping between %d and %d\n", i, l._def->_idx); - } - } - }*/ +void IdealGraphPrinter::head(const char *name) { + _xml->head(name); +} + +void IdealGraphPrinter::tail(const char *name) { + _xml->tail(name); +} + +void IdealGraphPrinter::text(const char *s) { + _xml->text(s); +} + +void IdealGraphPrinter::print_prop(const char *name, int val) { + + stringStream stream; + stream.print("%d", val); + print_prop(name, stream.as_string()); +} + +void IdealGraphPrinter::print_prop(const char *name, const char *val) { + begin_head(PROPERTY_ELEMENT); + print_attr(PROPERTY_NAME_PROPERTY, name); + end_head(); + text(val); + tail(PROPERTY_ELEMENT); } void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree) { + begin_head(METHOD_ELEMENT); - Properties properties; stringStream str; method->print_name(&str); stringStream shortStr; method->print_short_name(&shortStr); + print_attr(METHOD_NAME_PROPERTY, str.as_string()); + print_attr(METHOD_SHORT_NAME_PROPERTY, shortStr.as_string()); + print_attr(METHOD_BCI_PROPERTY, bci); - properties.add(new Property(METHOD_NAME_PROPERTY, str.as_string())); - properties.add(new Property(METHOD_SHORT_NAME_PROPERTY, shortStr.as_string())); - properties.add(new Property(METHOD_BCI_PROPERTY, bci)); - start_element(METHOD_ELEMENT, &properties); + end_head(); - start_element(BYTECODES_ELEMENT); + head(BYTECODES_ELEMENT); output()->print_cr("print_codes_on(output()); output()->print_cr("]]>"); - end_element(BYTECODES_ELEMENT); + tail(BYTECODES_ELEMENT); - start_element(INLINE_ELEMENT); + head(INLINE_ELEMENT); if (tree != NULL) { GrowableArray subtrees = tree->subtrees(); for (int i = 0; i < subtrees.length(); i++) { print_inline_tree(subtrees.at(i)); } } - end_element(INLINE_ELEMENT); + tail(INLINE_ELEMENT); - end_element(METHOD_ELEMENT); + tail(METHOD_ELEMENT); output()->flush(); } @@ -274,12 +287,6 @@ } -void IdealGraphPrinter::clear_nodes() { - // for (int i = 0; i < _nodes.length(); i++) { - // _nodes.at(i)->clear_node(); - // } -} - void IdealGraphPrinter::print_inlining(Compile* compile) { // Print inline tree @@ -301,143 +308,56 @@ assert(method, "null methods are not allowed!"); assert(!_current_method, "current method must be null!"); - _arena->destruct_contents(); + head(GROUP_ELEMENT); - start_element(GROUP_ELEMENT); - - // Print properties - Properties properties; - + head(PROPERTIES_ELEMENT); + + // Print properties // Add method name stringStream strStream; method->print_name(&strStream); - properties.add(new Property(METHOD_NAME_PROPERTY, strStream.as_string())); + print_prop(METHOD_NAME_PROPERTY, strStream.as_string()); if (method->flags().is_public()) { - properties.add(new Property(METHOD_IS_PUBLIC_PROPERTY, TRUE_VALUE)); + print_prop(METHOD_IS_PUBLIC_PROPERTY, TRUE_VALUE); } if (method->flags().is_static()) { - properties.add(new Property(METHOD_IS_STATIC_PROPERTY, TRUE_VALUE)); + print_prop(METHOD_IS_STATIC_PROPERTY, TRUE_VALUE); } - properties.print(this); + tail(PROPERTIES_ELEMENT); if (_stream) { char answer = 0; - _stream->flush(); + _xml->flush(); int result = _stream->read(&answer, 1); _should_send_method = (answer == 'y'); } - this->_nodes = GrowableArray(_arena, 2, 0, NULL); - this->_edges = GrowableArray< EdgeDescription * >(_arena, 2, 0, NULL); - - this->_current_method = method; - - - _output->flush(); + _xml->flush(); } // Has to be called whenever a method has finished compilation void IdealGraphPrinter::end_method() { -// if (finish && !in_method) return; - nmethod* method = (nmethod*)this->_current_method->code(); - - start_element(ASSEMBLY_ELEMENT); - // Disassembler::decode(method, _output); - end_element(ASSEMBLY_ELEMENT); - - end_element(GROUP_ELEMENT); + tail(GROUP_ELEMENT); _current_method = NULL; - _output->flush(); - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc) { - delete desc; - _nodes.at_put(i, NULL); - } - } - this->_nodes.clear(); - - - for (int i = 0; i < _edges.length(); i++) { - // for (int j=0; j<_edges.at(i)->length(); j++) { - EdgeDescription *conn = _edges.at(i); - conn->print(this); - if (conn) { - delete conn; - _edges.at_put(i, NULL); - } - //} - //_edges.at(i)->clear(); - //delete _edges.at(i); - //_edges.at_put(i, NULL); - } - this->_edges.clear(); - -// in_method = false; -} - -// Outputs an XML start element -void IdealGraphPrinter::start_element(const char *s, Properties *properties /* = NULL */, bool print_indent /* = false */, bool print_return /* = true */) { - - start_element_helper(s, properties, false, print_indent, print_return); - _depth++; - -} - -// Outputs an XML start element without body -void IdealGraphPrinter::simple_element(const char *s, Properties *properties /* = NULL */, bool print_indent /* = false */) { - start_element_helper(s, properties, true, print_indent, true); -} - -// Outputs an XML start element. If outputEnd is true, the element has no body. -void IdealGraphPrinter::start_element_helper(const char *s, Properties *properties, bool outputEnd, bool print_indent /* = false */, bool print_return /* = true */) { - - assert(_output, "output stream must exist!"); - - if (print_indent) this->print_indent(); - _output->print("<"); - _output->print(s); - if (properties) properties->print_as_attributes(this); - - if (outputEnd) { - _output->print("/"); - } - - _output->print(">"); - if (print_return) _output->print_cr(""); - + _xml->flush(); } // Print indent void IdealGraphPrinter::print_indent() { + tty->print_cr("printing ident %d", _depth); for (int i = 0; i < _depth; i++) { - _output->print(INDENT); + _xml->print(INDENT); } } -// Outputs an XML end element -void IdealGraphPrinter::end_element(const char *s, bool print_indent /* = true */, bool print_return /* = true */) { - - assert(_output, "output stream must exist!"); - - _depth--; - - if (print_indent) this->print_indent(); - _output->print("print(s); - _output->print(">"); - if (print_return) _output->print_cr(""); - -} - bool IdealGraphPrinter::traverse_outs() { return _traverse_outs; } @@ -446,1477 +366,341 @@ _traverse_outs = b; } -void IdealGraphPrinter::walk(Node *start) { +intptr_t IdealGraphPrinter::get_node_id(Node *n) { + return (intptr_t)(n); +} - - VectorSet visited(Thread::current()->resource_area()); - GrowableArray nodeStack(Thread::current()->resource_area(), 0, 0, NULL); - nodeStack.push(start); - visited.test_set(start->_idx); - while(nodeStack.length() > 0) { - - Node *n = nodeStack.pop(); - IdealGraphPrinter::pre_node(n, this); +void IdealGraphPrinter::visit_node(Node *n, void *param) { - if (_traverse_outs) { - for (DUIterator i = n->outs(); n->has_out(i); i++) { - Node* p = n->out(i); - if (!visited.test_set(p->_idx)) { - nodeStack.push(p); - } - } - } + if(param) { + // Output edge + intptr_t dest_id = get_node_id(n); for ( uint i = 0; i < n->len(); i++ ) { if ( n->in(i) ) { - if (!visited.test_set(n->in(i)->_idx)) { - nodeStack.push(n->in(i)); - } + Node *source = n->in(i); + begin_elem(EDGE_ELEMENT); + intptr_t source_id = get_node_id(source); + print_attr(FROM_PROPERTY, source_id); + print_attr(TO_PROPERTY, dest_id); + print_attr(INDEX_PROPERTY, i); + end_elem(); } } - } -} - -void IdealGraphPrinter::compress(int index, GrowableArray* blocks) { - Block *block = blocks->adr_at(index); - int ancestor = block->ancestor(); - assert(ancestor != -1, ""); - - Block *ancestor_block = blocks->adr_at(ancestor); - if (ancestor_block->ancestor() != -1) { - compress(ancestor, blocks); - - int label = block->label(); - Block *label_block = blocks->adr_at(label); + } else { - int ancestor_label = ancestor_block->label(); - Block *ancestor_label_block = blocks->adr_at(label); - if (ancestor_label_block->semi() < label_block->semi()) { - block->set_label(ancestor_label); - } + // Output node + begin_head(NODE_ELEMENT); + print_attr(NODE_ID_PROPERTY, get_node_id(n)); + end_head(); - block->set_ancestor(ancestor_block->ancestor()); - } -} + head(PROPERTIES_ELEMENT); -int IdealGraphPrinter::eval(int index, GrowableArray* blocks) { - Block *block = blocks->adr_at(index); - if (block->ancestor() == -1) { - return index; - } else { - compress(index, blocks); - return block->label(); - } -} + Node *node = n; +#ifndef PRODUCT + node->_in_dump_cnt++; + print_prop(NODE_NAME_PROPERTY, (const char *)node->Name()); + const Type *t = node->bottom_type(); + print_prop("type", (const char *)Type::msg[t->base()]); + print_prop("idx", node->_idx); +#ifdef ASSERT + print_prop("debug_idx", node->_debug_idx); +#endif -void IdealGraphPrinter::link(int index1, int index2, GrowableArray* blocks) { - Block *block2 = blocks->adr_at(index2); - block2->set_ancestor(index1); -} - -void IdealGraphPrinter::build_dominators(GrowableArray* blocks) { - - if (blocks->length() == 0) return; - - GrowableArray stack; - stack.append(0); - - GrowableArray array; - - assert(blocks->length() > 0, ""); - blocks->adr_at(0)->set_dominator(0); - - int n = 0; - while(!stack.is_empty()) { - int index = stack.pop(); - Block *block = blocks->adr_at(index); - block->set_semi(n); - array.append(block); - n = n + 1; - for (int i = 0; i < block->succs()->length(); i++) { - int succ_index = block->succs()->at(i); - Block *succ = blocks->adr_at(succ_index); - if (succ->semi() == -1) { - succ->set_parent(index); - stack.push(succ_index); + if(C->cfg() != NULL) { + Block *block = C->cfg()->_bbs[node->_idx]; + if(block == NULL) { + print_prop("block", C->cfg()->_blocks[0]->_pre_order); + } else { + print_prop("block", block->_pre_order); } - succ->add_pred(index); } - } - for (int i=n-1; i>0; i--) { - Block *block = array.at(i); - int block_index = block->index(); - for (int j=0; jpred()->length(); j++) { - int pred_index = block->pred()->at(j); - int cur_index = eval(pred_index, blocks); - - Block *cur_block = blocks->adr_at(cur_index); - if (cur_block->semi() < block->semi()) { - block->set_semi(cur_block->semi()); - } + const jushort flags = node->flags(); + if (flags & Node::Flag_is_Copy) { + print_prop("is_copy", "true"); } - - int semi_index = block->semi(); - Block *semi_block = array.at(semi_index); - semi_block->add_to_bucket(block_index); - - link(block->parent(), block_index, blocks); - Block *parent_block = blocks->adr_at(block->parent()); - - for (int j=0; jbucket()->length(); j++) { - int cur_index = parent_block->bucket()->at(j); - int new_index = eval(cur_index, blocks); - Block *cur_block = blocks->adr_at(cur_index); - Block *new_block = blocks->adr_at(new_index); - int dom = block->parent(); - - if (new_block->semi() < cur_block->semi()) { - dom = new_index; - } - - cur_block->set_dominator(dom); + if (flags & Node::Flag_is_Call) { + print_prop("is_call", "true"); } - - parent_block->clear_bucket(); - } - - for (int i=1; i < n; i++) { - - Block *block = array.at(i); - int block_index = block->index(); - - int semi_index = block->semi(); - Block *semi_block = array.at(semi_index); - - if (block->dominator() != semi_block->index()) { - int new_dom = blocks->adr_at(block->dominator())->dominator(); - block->set_dominator(new_dom); + if (flags & Node::Flag_rematerialize) { + print_prop("rematerialize", "true"); } - } - - for (int i = 0; i < blocks->length(); i++) { - if (blocks->adr_at(i)->dominator() == -1) { - blocks->adr_at(i)->set_dominator(0); + if (flags & Node::Flag_needs_anti_dependence_check) { + print_prop("needs_anti_dependence_check", "true"); } - } - - // Build dominates array - for (int i=1; i < blocks->length(); i++) { - Block *block = blocks->adr_at(i); - int dominator = block->dominator(); - Block *dom_block = blocks->adr_at(dominator); - dom_block->add_dominates(i); - dom_block->add_child(i); - - while(dominator != 0) { - dominator = dom_block->dominator(); - dom_block = blocks->adr_at(dominator); - dom_block->add_child(i); + if (flags & Node::Flag_is_macro) { + print_prop("is_macro", "true"); } - } -} - -void IdealGraphPrinter::build_common_dominator(int **common_dominator, int index, GrowableArray* blocks) { - - common_dominator[index][index] = index; - Block *block = blocks->adr_at(index); - for (int i = 0; i < block->dominates()->length(); i++) { - Block *dominated = blocks->adr_at(block->dominates()->at(i)); - - for (int j=0; jchildren()->length(); j++) { - Block *child = blocks->adr_at(dominated->children()->at(j)); - common_dominator[index][child->index()] = common_dominator[child->index()][index] = index; - - for (int k=0; kadr_at(block->dominates()->at(k)); - common_dominator[child->index()][other_dominated->index()] = common_dominator[other_dominated->index()][child->index()] = index; - - for (int l=0 ; lchildren()->length(); l++) { - Block *other_child = blocks->adr_at(other_dominated->children()->at(l)); - common_dominator[child->index()][other_child->index()] = common_dominator[other_child->index()][child->index()] = index; - } - } + if (flags & Node::Flag_is_Con) { + print_prop("is_con", "true"); } - - build_common_dominator(common_dominator, dominated->index(), blocks); - } -} - -void IdealGraphPrinter::schedule_latest(int **common_dominator, GrowableArray* blocks) { - - int queue_size = _nodes.length() + 1; - NodeDescription **queue = NEW_RESOURCE_ARRAY(NodeDescription *, queue_size); - int queue_start = 0; - int queue_end = 0; - Arena *a = new Arena(); - VectorSet on_queue(a); - - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc) { - desc->init_succs(); + if (flags & Node::Flag_is_cisc_alternate) { + print_prop("is_cisc_alternate", "true"); } - } - - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc) { - for (uint j=0; jnode()->len(); j++) { - Node *n = desc->node()->in(j); - if (n) { - NodeDescription *other_desc = _nodes.at(n->_idx); - other_desc->add_succ(desc); - } - } + if (flags & Node::Flag_is_Branch) { + print_prop("is_branch", "true"); + } + if (flags & Node::Flag_is_block_start) { + print_prop("is_block_start", "true"); + } + if (flags & Node::Flag_is_Goto) { + print_prop("is_goto", "true"); + } + if (flags & Node::Flag_is_dead_loop_safe) { + print_prop("is_dead_loop_safe", "true"); + } + if (flags & Node::Flag_may_be_short_branch) { + print_prop("may_be_short_branch", "true"); + } + if (flags & Node::Flag_is_safepoint_node) { + print_prop("is_safepoint_node", "true"); + } + if (flags & Node::Flag_is_pc_relative) { + print_prop("is_pc_relative", "true"); } - } - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc && desc->block_index() == -1) { - - // Put Phi into same block as region - if (desc->node()->is_Phi() && desc->node()->in(0) && _nodes.at(desc->node()->in(0)->_idx)->block_index() != -1) { - int index = _nodes.at(desc->node()->in(0)->_idx)->block_index(); - desc->set_block_index(index); - blocks->adr_at(index)->add_node(desc); - - // Put Projections to same block as parent - } else if (desc->node()->is_block_proj() && _nodes.at(desc->node()->is_block_proj()->_idx)->block_index() != -1) { - int index = _nodes.at(desc->node()->is_block_proj()->_idx)->block_index(); - desc->set_block_index(index); - blocks->adr_at(index)->add_node(desc); + if (C->matcher() != NULL) { + if (C->matcher()->is_shared(node)) { + print_prop("is_shared", "true"); } else { - queue[queue_end] = desc; - queue_end++; - on_queue.set(desc->node()->_idx); + print_prop("is_shared", "false"); } - } - } - - - int z = 0; - while(queue_start != queue_end && z < 10000) { - - NodeDescription *desc = queue[queue_start]; - queue_start = (queue_start + 1) % queue_size; - on_queue >>= desc->node()->_idx; - - Node* node = desc->node(); - - if (desc->succs()->length() == 0) { - int x = 0; - } - - int block_index = -1; - if (desc->succs()->length() != 0) { - for (int i = 0; i < desc->succs()->length(); i++) { - NodeDescription *cur_desc = desc->succs()->at(i); - if (cur_desc != desc) { - if (cur_desc->succs()->length() == 0) { - - // Ignore nodes with 0 successors - - } else if (cur_desc->block_index() == -1) { - - // Let this node schedule first - block_index = -1; - break; - - } else if (cur_desc->node()->is_Phi()){ - - // Special treatment for Phi functions - PhiNode *phi = cur_desc->node()->as_Phi(); - assert(phi->in(0) && phi->in(0)->is_Region(), "Must have region node in first input"); - RegionNode *region = phi->in(0)->as_Region(); - - for (uint j=1; jlen(); j++) { - Node *cur_phi_input = phi->in(j); - if (cur_phi_input == desc->node() && region->in(j)) { - NodeDescription *cur_region_input = _nodes.at(region->in(j)->_idx); - if (cur_region_input->block_index() == -1) { - - // Let this node schedule first - block_index = -1; - break; - } else { - if (block_index == -1) { - block_index = cur_region_input->block_index(); - } else { - block_index = common_dominator[block_index][cur_region_input->block_index()]; - } - } - } - } - - } else { - if (block_index == -1) { - block_index = cur_desc->block_index(); - } else { - block_index = common_dominator[block_index][cur_desc->block_index()]; - } - } - } + if (C->matcher()->is_dontcare(node)) { + print_prop("is_dontcare", "true"); + } else { + print_prop("is_dontcare", "false"); } - } - if (block_index == -1) { - queue[queue_end] = desc; - queue_end = (queue_end + 1) % queue_size; - on_queue.set(desc->node()->_idx); - z++; - } else { - assert(desc->block_index() == -1, ""); - desc->set_block_index(block_index); - blocks->adr_at(block_index)->add_node(desc); - z = 0; +#ifdef ASSERT + Node* old = C->matcher()->find_old_node(node); + if (old != NULL) { + print_prop("old_node_idx", old->_idx); + } +#endif } - } - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc && desc->block_index() == -1) { - - //if (desc->node()->is_Proj() || desc->node()->is_Con()) { - Node *parent = desc->node()->in(0); - uint cur = 1; - while(!parent && cur < desc->node()->len()) { - parent = desc->node()->in(cur); - cur++; - } - - if (parent && _nodes.at(parent->_idx)->block_index() != -1) { - int index = _nodes.at(parent->_idx)->block_index(); - desc->set_block_index(index); - blocks->adr_at(index)->add_node(desc); - } else { - desc->set_block_index(0); - blocks->adr_at(0)->add_node(desc); - //ShouldNotReachHere(); - } - //} - /* - if (desc->node()->is_block_proj() && _nodes.at(desc->node()->is_block_proj()->_idx)->block_index() != -1) { - int index = _nodes.at(desc->node()->is_block_proj()->_idx)->block_index(); - desc->set_block_index(index); - blocks->adr_at(index)->add_node(desc); - } */ + if (node->is_Proj()) { + print_prop("con", (int)node->as_Proj()->_con); } - } - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc) { - desc->clear_succs(); + if (node->is_Mach()) { + print_prop("idealOpcode", (const char *)NodeClassNames[node->as_Mach()->ideal_Opcode()]); } - } - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc) { - int block_index = desc->block_index(); + buffer[0] = 0; + stringStream s2(buffer, sizeof(buffer) - 1); - assert(block_index >= 0 && block_index < blocks->length(), "Block index must be in range"); - assert(blocks->adr_at(block_index)->nodes()->contains(desc), "Node must be child of block"); + node->dump_spec(&s2); + if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) { + const TypeInstPtr *toop = t->isa_instptr(); + const TypeKlassPtr *tkls = t->isa_klassptr(); + ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL ); + if( klass && klass->is_loaded() && klass->is_interface() ) { + s2.print(" Interface:"); + } else if( toop ) { + s2.print(" Oop:"); + } else if( tkls ) { + s2.print(" Klass:"); + } + t->dump_on(&s2); + } else if( t == Type::MEMORY ) { + s2.print(" Memory:"); + MemNode::dump_adr_type(node, node->adr_type(), &s2); } - } - a->destruct_contents(); -} - -void IdealGraphPrinter::build_blocks(Node *root) { - - Arena *a = new Arena(); - Node_Stack stack(a, 100); - - VectorSet visited(a); - stack.push(root, 0); - GrowableArray blocks(a, 2, 0, Block(0)); - - for (int i = 0; i < _nodes.length(); i++) { - if (_nodes.at(i)) _nodes.at(i)->set_block_index(-1); - } + assert(s2.size() < sizeof(buffer), "size in range"); + print_prop("dump_spec", buffer); - // Order nodes such that node index is equal to idx - for (int i = 0; i < _nodes.length(); i++) { - - if (_nodes.at(i)) { - NodeDescription *node = _nodes.at(i); - int index = node->node()->_idx; - if (index != i) { - _nodes.at_grow(index); - NodeDescription *tmp = _nodes.at(index); - *(_nodes.adr_at(index)) = node; - *(_nodes.adr_at(i)) = tmp; - i--; - } + if (node->is_block_proj()) { + print_prop("is_block_proj", "true"); } - } - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *node = _nodes.at(i); - if (node) { - assert(node->node()->_idx == (uint)i, ""); + if (node->is_block_start()) { + print_prop("is_block_start", "true"); } - } - - while(stack.is_nonempty()) { - //Node *n = stack.node(); - //int index = stack.index(); - Node *proj = stack.node();//n->in(index); - const Node *parent = proj->is_block_proj(); - if (parent == NULL) { - parent = proj; - } - - if (!visited.test_set(parent->_idx)) { - - NodeDescription *end_desc = _nodes.at(parent->_idx); - int block_index = blocks.length(); - Block block(block_index); - blocks.append(block); - Block *b = blocks.adr_at(block_index); - b->set_start(end_desc); - // assert(end_desc->block_index() == -1, ""); - end_desc->set_block_index(block_index); - b->add_node(end_desc); - - // Skip any control-pinned middle'in stuff - Node *p = proj; - NodeDescription *start_desc = NULL; - do { - proj = p; // Update pointer to last Control - if (p->in(0) == NULL) { - start_desc = end_desc; - break; - } - p = p->in(0); // Move control forward - start_desc = _nodes.at(p->_idx); - assert(start_desc, ""); - - if (start_desc != end_desc && start_desc->block_index() == -1) { - assert(start_desc->block_index() == -1, ""); - assert(block_index < blocks.length(), ""); - start_desc->set_block_index(block_index); - b->add_node(start_desc); - } - } while( !p->is_block_proj() && - !p->is_block_start() ); - - for (uint i = 0; i < start_desc->node()->len(); i++) { - - Node *pred_node = start_desc->node()->in(i); - - - if (pred_node && pred_node != start_desc->node()) { - const Node *cur_parent = pred_node->is_block_proj(); - if (cur_parent != NULL) { - pred_node = (Node *)cur_parent; - } - - NodeDescription *pred_node_desc = _nodes.at(pred_node->_idx); - if (pred_node_desc->block_index() != -1) { - blocks.adr_at(pred_node_desc->block_index())->add_succ(block_index); - } - } + const char *short_name = "short_name"; + if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) { + int index = node->as_Proj()->_con - TypeFunc::Parms; + if (index >= 10) { + print_prop(short_name, "PA"); + } else { + sprintf(buffer, "P%d", index); + print_prop(short_name, buffer); } - - for (DUIterator_Fast dmax, i = end_desc->node()->fast_outs(dmax); i < dmax; i++) { - Node* cur_succ = end_desc->node()->fast_out(i); - NodeDescription *cur_succ_desc = _nodes.at(cur_succ->_idx); - - DUIterator_Fast dmax2, i2 = cur_succ->fast_outs(dmax2); - if (cur_succ->is_block_proj() && i2 < dmax2 && !cur_succ->is_Root()) { - - for (; i2fast_out(i2); - if (cur_succ2) { - cur_succ_desc = _nodes.at(cur_succ2->_idx); - if (cur_succ_desc == NULL) { - // dead node so skip it - continue; - } - if (cur_succ2 != end_desc->node() && cur_succ_desc->block_index() != -1) { - b->add_succ(cur_succ_desc->block_index()); - } - } - } - + } else if (strcmp(node->Name(), "IfTrue") == 0) { + print_prop(short_name, "T"); + } else if (strcmp(node->Name(), "IfFalse") == 0) { + print_prop(short_name, "F"); + } else if ((node->is_Con() && node->is_Type()) || node->is_Proj()) { + + if (t->base() == Type::Int && t->is_int()->is_con()) { + const TypeInt *typeInt = t->is_int(); + assert(typeInt->is_con(), "must be constant"); + jint value = typeInt->get_con(); + + // max. 2 chars allowed + if (value >= -9 && value <= 99) { + sprintf(buffer, "%d", value); + print_prop(short_name, buffer); } else { - - if (cur_succ != end_desc->node() && cur_succ_desc && cur_succ_desc->block_index() != -1) { - b->add_succ(cur_succ_desc->block_index()); - } + print_prop(short_name, "I"); } - } - - - int num_preds = p->len(); - int bottom = -1; - if (p->is_Region() || p->is_Phi()) { - bottom = 0; - } - - int pushed = 0; - for (int i=num_preds - 1; i > bottom; i--) { - if (p->in(i) != NULL && p->in(i) != p) { - stack.push(p->in(i), 0); - pushed++; + } else if (t == Type::TOP) { + print_prop(short_name, "^"); + } else if (t->base() == Type::Long && t->is_long()->is_con()) { + const TypeLong *typeLong = t->is_long(); + assert(typeLong->is_con(), "must be constant"); + jlong value = typeLong->get_con(); + + // max. 2 chars allowed + if (value >= -9 && value <= 99) { + sprintf(buffer, "%d", value); + print_prop(short_name, buffer); + } else { + print_prop(short_name, "L"); } + } else if (t->base() == Type::KlassPtr) { + const TypeKlassPtr *typeKlass = t->is_klassptr(); + print_prop(short_name, "CP"); + } else if (t->base() == Type::Control) { + print_prop(short_name, "C"); + } else if (t->base() == Type::Memory) { + print_prop(short_name, "M"); + } else if (t->base() == Type::Abio) { + print_prop(short_name, "IO"); + } else if (t->base() == Type::Return_Address) { + print_prop(short_name, "RA"); + } else if (t->base() == Type::AnyPtr) { + print_prop(short_name, "P"); + } else if (t->base() == Type::RawPtr) { + print_prop(short_name, "RP"); + } else if (t->base() == Type::AryPtr) { + print_prop(short_name, "AP"); } + } - if (pushed == 0 && p->is_Root() && !_matcher) { - // Special case when backedges to root are not yet built - for (int i = 0; i < _nodes.length(); i++) { - if (_nodes.at(i) && _nodes.at(i)->node()->is_SafePoint() && _nodes.at(i)->node()->outcnt() == 0) { - stack.push(_nodes.at(i)->node(), 0); - } - } + JVMState* caller = NULL; + if (node->is_SafePoint()) { + caller = node->as_SafePoint()->jvms(); + } else { + Node_Notes* notes = C->node_notes_at(node->_idx); + if (notes != NULL) { + caller = notes->jvms(); } + } - } else { - stack.pop(); + if (caller != NULL) { + stringStream bciStream; + while(caller) { + bciStream.print("%d ", caller->bci()); + caller = caller->caller(); + } + print_prop("bci", bciStream.as_string()); } - } - build_dominators(&blocks); + if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) { + buffer[0] = 0; + _chaitin->dump_register(node, buffer); + print_prop("reg", buffer); + print_prop("lrg", _chaitin->n2lidx(node)); + } - int **common_dominator = NEW_RESOURCE_ARRAY(int *, blocks.length()); - for (int i = 0; i < blocks.length(); i++) { - int *cur = NEW_RESOURCE_ARRAY(int, blocks.length()); - common_dominator[i] = cur; + node->_in_dump_cnt--; +#endif - for (int j=0; jadd_child(blocks.adr_at(i)->index()); - } - build_common_dominator(common_dominator, 0, &blocks); +void IdealGraphPrinter::walk_nodes(Node *start, void *param) { - schedule_latest(common_dominator, &blocks); - start_element(CONTROL_FLOW_ELEMENT); + VectorSet visited(Thread::current()->resource_area()); + GrowableArray nodeStack(Thread::current()->resource_area(), 0, 0, NULL); + nodeStack.push(start); + visited.test_set(start->_idx); + while(nodeStack.length() > 0) { - for (int i = 0; i < blocks.length(); i++) { - Block *block = blocks.adr_at(i); + Node *n = nodeStack.pop(); + visit_node(n, param); - Properties props; - props.add(new Property(BLOCK_NAME_PROPERTY, i)); - props.add(new Property(BLOCK_DOMINATOR_PROPERTY, block->dominator())); - start_element(BLOCK_ELEMENT, &props); - - if (block->succs()->length() > 0) { - start_element(SUCCESSORS_ELEMENT); - for (int j=0; jsuccs()->length(); j++) { - int cur_index = block->succs()->at(j); - if (cur_index != 0 /* start_block has must not have inputs */) { - Properties properties; - properties.add(new Property(BLOCK_NAME_PROPERTY, cur_index)); - simple_element(SUCCESSOR_ELEMENT, &properties); + if (_traverse_outs) { + for (DUIterator i = n->outs(); n->has_out(i); i++) { + Node* p = n->out(i); + if (!visited.test_set(p->_idx)) { + nodeStack.push(p); } } - end_element(SUCCESSORS_ELEMENT); } - start_element(NODES_ELEMENT); - - for (int j=0; jnodes()->length(); j++) { - NodeDescription *n = block->nodes()->at(j); - Properties properties; - properties.add(new Property(NODE_ID_PROPERTY, n->id())); - simple_element(NODE_ELEMENT, &properties); + for ( uint i = 0; i < n->len(); i++ ) { + if ( n->in(i) ) { + if (!visited.test_set(n->in(i)->_idx)) { + nodeStack.push(n->in(i)); + } + } } - - end_element(NODES_ELEMENT); - - end_element(BLOCK_ELEMENT); } - - - end_element(CONTROL_FLOW_ELEMENT); - - a->destruct_contents(); } void IdealGraphPrinter::print_method(Compile* compile, const char *name, int level, bool clear_nodes) { - print(compile, name, (Node *)compile->root(), level, clear_nodes); + print(compile, name, (Node *)compile->root(), level, clear_nodes); } // Print current ideal graph void IdealGraphPrinter::print(Compile* compile, const char *name, Node *node, int level, bool clear_nodes) { -// if (finish && !in_method) return; if (!_current_method || !_should_send_method || level > PrintIdealGraphLevel) return; - assert(_current_method, "newMethod has to be called first!"); - - if (clear_nodes) { - int x = 0; - } - - _clear_nodes = clear_nodes; + this->C = compile; // Warning, unsafe cast? - _chaitin = (PhaseChaitin *)compile->regalloc(); - _matcher = compile->matcher(); + _chaitin = (PhaseChaitin *)C->regalloc(); + begin_head(GRAPH_ELEMENT); + print_attr(GRAPH_NAME_PROPERTY, (const char *)name); + end_head(); - // Update nodes - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc) { - desc->set_state(Invalid); - } - } - Node *n = node; - walk(n); - - // Update edges - for (int i = 0; i < _edges.length(); i++) { - _edges.at(i)->set_state(Invalid); - } - - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc && desc->state() != Invalid) { - - int to = desc->id(); - uint len = desc->node()->len(); - for (uint j=0; jnode()->in(j); - - if (n) { - - - intptr_t from = (intptr_t)n; - - // Assert from node is valid - /* - bool ok = false; - for (int k=0; k<_nodes.length(); k++) { - NodeDescription *desc = _nodes.at(k); - if (desc && desc->id() == from) { - assert(desc->state() != Invalid, ""); - ok = true; - } - } - assert(ok, "");*/ - - uint index = j; - if (index >= desc->node()->req()) { - index = desc->node()->req(); - } - - print_edge(from, to, index); - } - } - } - } + head(NODES_ELEMENT); + walk_nodes(node, NULL); + tail(NODES_ELEMENT); - bool is_different = false; - - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc && desc->state() != Valid) { - is_different = true; - break; - } - } + head(EDGES_ELEMENT); + walk_nodes(node, (void *)1); + tail(EDGES_ELEMENT); + if (C->cfg() != NULL) { + head(CONTROL_FLOW_ELEMENT); + for (uint i = 0; i < C->cfg()->_blocks.size(); i++) { + Block *b = C->cfg()->_blocks[i]; + begin_head(BLOCK_ELEMENT); + print_attr(BLOCK_NAME_PROPERTY, b->_pre_order); + end_head(); - if (!is_different) { - for (int i = 0; i < _edges.length(); i++) { - EdgeDescription *conn = _edges.at(i); - if (conn && conn->state() != Valid) { - is_different = true; - break; + head(SUCCESSORS_ELEMENT); + for (uint s = 0; s < C->cfg()->_blocks[i]->_num_succs; s++) { + begin_elem(SUCCESSOR_ELEMENT); + print_attr(BLOCK_NAME_PROPERTY, b->_succs[s]->_pre_order); + end_elem(); } - } - } - - // No changes -> do not print graph - if (!is_different) return; + tail(SUCCESSORS_ELEMENT); - Properties properties; - properties.add(new Property(GRAPH_NAME_PROPERTY, (const char *)name)); - start_element(GRAPH_ELEMENT, &properties); - - start_element(NODES_ELEMENT); - for (int i = 0; i < _nodes.length(); i++) { - NodeDescription *desc = _nodes.at(i); - if (desc) { - desc->print(this); - if (desc->state() == Invalid) { - delete desc; - _nodes.at_put(i, NULL); - } else { - desc->set_state(Valid); - } + tail(BLOCK_ELEMENT); } - } - end_element(NODES_ELEMENT); - - build_blocks(node); - - start_element(EDGES_ELEMENT); - for (int i = 0; i < _edges.length(); i++) { - EdgeDescription *conn = _edges.at(i); - - // Assert from and to nodes are valid - /* - if (!conn->state() == Invalid) { - bool ok1 = false; - bool ok2 = false; - for (int j=0; j<_nodes.length(); j++) { - NodeDescription *desc = _nodes.at(j); - if (desc && desc->id() == conn->from()) { - ok1 = true; - } - - if (desc && desc->id() == conn->to()) { - ok2 = true; - } - } - assert(ok1, "from node not found!"); - assert(ok2, "to node not found!"); - }*/ - - conn->print(this); - if (conn->state() == Invalid) { - _edges.remove_at(i); - delete conn; - i--; - } + tail(CONTROL_FLOW_ELEMENT); } - - end_element(EDGES_ELEMENT); - - end_element(GRAPH_ELEMENT); - - _output->flush(); + tail(GRAPH_ELEMENT); + output()->flush(); } -// Print edge -void IdealGraphPrinter::print_edge(int from, int to, int index) { - - EdgeDescription *conn = new EdgeDescription(from, to, index); - for (int i = 0; i < _edges.length(); i++) { - if (_edges.at(i)->equals(conn)) { - conn->set_state(Valid); - delete _edges.at(i); - _edges.at_put(i, conn); - return; - } - } - - _edges.append(conn); -} - -extern const char *NodeClassNames[]; - -// Create node description -IdealGraphPrinter::NodeDescription *IdealGraphPrinter::create_node_description(Node* node) { - -#ifndef PRODUCT - node->_in_dump_cnt++; - NodeDescription *desc = new NodeDescription(node); - desc->properties()->add(new Property(NODE_NAME_PROPERTY, (const char *)node->Name())); - - const Type *t = node->bottom_type(); - desc->properties()->add(new Property("type", (const char *)Type::msg[t->base()])); - - desc->properties()->add(new Property("idx", node->_idx)); -#ifdef ASSERT - desc->properties()->add(new Property("debug_idx", node->_debug_idx)); -#endif - - - const jushort flags = node->flags(); - if (flags & Node::Flag_is_Copy) { - desc->properties()->add(new Property("is_copy", "true")); - } - if (flags & Node::Flag_is_Call) { - desc->properties()->add(new Property("is_call", "true")); - } - if (flags & Node::Flag_rematerialize) { - desc->properties()->add(new Property("rematerialize", "true")); - } - if (flags & Node::Flag_needs_anti_dependence_check) { - desc->properties()->add(new Property("needs_anti_dependence_check", "true")); - } - if (flags & Node::Flag_is_macro) { - desc->properties()->add(new Property("is_macro", "true")); - } - if (flags & Node::Flag_is_Con) { - desc->properties()->add(new Property("is_con", "true")); - } - if (flags & Node::Flag_is_cisc_alternate) { - desc->properties()->add(new Property("is_cisc_alternate", "true")); - } - if (flags & Node::Flag_is_Branch) { - desc->properties()->add(new Property("is_branch", "true")); - } - if (flags & Node::Flag_is_block_start) { - desc->properties()->add(new Property("is_block_start", "true")); - } - if (flags & Node::Flag_is_Goto) { - desc->properties()->add(new Property("is_goto", "true")); - } - if (flags & Node::Flag_is_dead_loop_safe) { - desc->properties()->add(new Property("is_dead_loop_safe", "true")); - } - if (flags & Node::Flag_may_be_short_branch) { - desc->properties()->add(new Property("may_be_short_branch", "true")); - } - if (flags & Node::Flag_is_safepoint_node) { - desc->properties()->add(new Property("is_safepoint_node", "true")); - } - if (flags & Node::Flag_is_pc_relative) { - desc->properties()->add(new Property("is_pc_relative", "true")); - } - - if (_matcher) { - if (_matcher->is_shared(desc->node())) { - desc->properties()->add(new Property("is_shared", "true")); - } else { - desc->properties()->add(new Property("is_shared", "false")); - } - - if (_matcher->is_dontcare(desc->node())) { - desc->properties()->add(new Property("is_dontcare", "true")); - } else { - desc->properties()->add(new Property("is_dontcare", "false")); - } - } - - if (node->is_Proj()) { - desc->properties()->add(new Property("con", (int)node->as_Proj()->_con)); - } - - if (node->is_Mach()) { - desc->properties()->add(new Property("idealOpcode", (const char *)NodeClassNames[node->as_Mach()->ideal_Opcode()])); - } - - - - - - outputStream *oldTty = tty; - buffer[0] = 0; - stringStream s2(buffer, sizeof(buffer) - 1); - - node->dump_spec(&s2); - assert(s2.size() < sizeof(buffer), "size in range"); - desc->properties()->add(new Property("dump_spec", buffer)); - - if (node->is_block_proj()) { - desc->properties()->add(new Property("is_block_proj", "true")); - } - - if (node->is_block_start()) { - desc->properties()->add(new Property("is_block_start", "true")); - } - - const char *short_name = "short_name"; - if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) { - int index = node->as_Proj()->_con - TypeFunc::Parms; - if (index >= 10) { - desc->properties()->add(new Property(short_name, "PA")); - } else { - sprintf(buffer, "P%d", index); - desc->properties()->add(new Property(short_name, buffer)); - } - } else if (strcmp(node->Name(), "IfTrue") == 0) { - desc->properties()->add(new Property(short_name, "T")); - } else if (strcmp(node->Name(), "IfFalse") == 0) { - desc->properties()->add(new Property(short_name, "F")); - } else if ((node->is_Con() && node->is_Type()) || node->is_Proj()) { - - if (t->base() == Type::Int && t->is_int()->is_con()) { - const TypeInt *typeInt = t->is_int(); - assert(typeInt->is_con(), "must be constant"); - jint value = typeInt->get_con(); - - // max. 2 chars allowed - if (value >= -9 && value <= 99) { - sprintf(buffer, "%d", value); - desc->properties()->add(new Property(short_name, buffer)); - } - else - { - desc->properties()->add(new Property(short_name, "I")); - } - } else if (t == Type::TOP) { - desc->properties()->add(new Property(short_name, "^")); - } else if (t->base() == Type::Long && t->is_long()->is_con()) { - const TypeLong *typeLong = t->is_long(); - assert(typeLong->is_con(), "must be constant"); - jlong value = typeLong->get_con(); - - // max. 2 chars allowed - if (value >= -9 && value <= 99) { - sprintf(buffer, "%d", value); - desc->properties()->add(new Property(short_name, buffer)); - } - else - { - desc->properties()->add(new Property(short_name, "L")); - } - } else if (t->base() == Type::KlassPtr) { - const TypeKlassPtr *typeKlass = t->is_klassptr(); - desc->properties()->add(new Property(short_name, "CP")); - } else if (t->base() == Type::Control) { - desc->properties()->add(new Property(short_name, "C")); - } else if (t->base() == Type::Memory) { - desc->properties()->add(new Property(short_name, "M")); - } else if (t->base() == Type::Abio) { - desc->properties()->add(new Property(short_name, "IO")); - } else if (t->base() == Type::Return_Address) { - desc->properties()->add(new Property(short_name, "RA")); - } else if (t->base() == Type::AnyPtr) { - desc->properties()->add(new Property(short_name, "P")); - } else if (t->base() == Type::RawPtr) { - desc->properties()->add(new Property(short_name, "RP")); - } else if (t->base() == Type::AryPtr) { - desc->properties()->add(new Property(short_name, "AP")); - } - } - - if (node->is_SafePoint()) { - SafePointNode *safePointNode = node->as_SafePoint(); - if (safePointNode->jvms()) { - stringStream bciStream; - bciStream.print("%d ", safePointNode->jvms()->bci()); - JVMState *caller = safePointNode->jvms()->caller(); - while(caller) { - bciStream.print("%d ", caller->bci()); - - caller = caller->caller(); - } - desc->properties()->add(new Property("bci", bciStream.as_string())); - } - } - - if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) { - buffer[0] = 0; - _chaitin->dump_register(node, buffer); - desc->properties()->add(new Property("reg", buffer)); - desc->properties()->add(new Property("lrg", _chaitin->n2lidx(node))); - } - - - node->_in_dump_cnt--; - return desc; -#else - return NULL; -#endif -} - -void IdealGraphPrinter::pre_node(Node* node, void *env) { - - IdealGraphPrinter *printer = (IdealGraphPrinter *)env; - - NodeDescription *newDesc = printer->create_node_description(node); - - if (printer->_clear_nodes) { - - printer->_nodes.append(newDesc); - } else { - - NodeDescription *desc = printer->_nodes.at_grow(node->_idx, NULL); - - if (desc && desc->equals(newDesc)) { - //desc->set_state(Valid); - //desc->set_node(node); - delete desc; - printer->_nodes.at_put(node->_idx, NULL); - newDesc->set_state(Valid); - //printer->_nodes.at_put(node->_idx, newDesc); - } else { - - if (desc && desc->id() == newDesc->id()) { - delete desc; - printer->_nodes.at_put(node->_idx, NULL); - newDesc->set_state(New); - - } - - //if (desc) { - // delete desc; - //} - - //printer->_nodes.at_put(node->_idx, newDesc); - } - - printer->_nodes.append(newDesc); - } -} - -void IdealGraphPrinter::post_node(Node* node, void *env) { -} +extern const char *NodeClassNames[]; outputStream *IdealGraphPrinter::output() { - return _output; -} - -IdealGraphPrinter::Description::Description() { - _state = New; -} - -void IdealGraphPrinter::Description::print(IdealGraphPrinter *printer) { - if (_state == Invalid) { - print_removed(printer); - } else if (_state == New) { - print_changed(printer); - } -} - -void IdealGraphPrinter::Description::set_state(State s) { - _state = s; -} - -IdealGraphPrinter::State IdealGraphPrinter::Description::state() { - return _state; -} - -void IdealGraphPrinter::Block::set_proj(NodeDescription *n) { - _proj = n; -} - -void IdealGraphPrinter::Block::set_start(NodeDescription *n) { - _start = n; -} - -int IdealGraphPrinter::Block::semi() { - return _semi; -} - -int IdealGraphPrinter::Block::parent() { - return _parent; -} - -GrowableArray* IdealGraphPrinter::Block::bucket() { - return &_bucket; -} - -GrowableArray* IdealGraphPrinter::Block::children() { - return &_children; -} - -void IdealGraphPrinter::Block::add_child(int i) { - _children.append(i); -} - -GrowableArray* IdealGraphPrinter::Block::dominates() { - return &_dominates; -} - -void IdealGraphPrinter::Block::add_dominates(int i) { - _dominates.append(i); -} - -void IdealGraphPrinter::Block::add_to_bucket(int i) { - _bucket.append(i); -} - -void IdealGraphPrinter::Block::clear_bucket() { - _bucket.clear(); -} - -void IdealGraphPrinter::Block::set_dominator(int i) { - _dominator = i; -} - -void IdealGraphPrinter::Block::set_label(int i) { - _label = i; -} - -int IdealGraphPrinter::Block::label() { - return _label; -} - -int IdealGraphPrinter::Block::ancestor() { - return _ancestor; -} - -void IdealGraphPrinter::Block::set_ancestor(int i) { - _ancestor = i; -} - -int IdealGraphPrinter::Block::dominator() { - return _dominator; -} - -int IdealGraphPrinter::Block::index() { - return _index; -} - -void IdealGraphPrinter::Block::set_parent(int i) { - _parent = i; -} - -GrowableArray* IdealGraphPrinter::Block::pred() { - return &_pred; -} - -void IdealGraphPrinter::Block::set_semi(int i) { - _semi = i; -} - -IdealGraphPrinter::Block::Block() { -} - -IdealGraphPrinter::Block::Block(int index) { - _index = index; - _label = index; - _semi = -1; - _ancestor = -1; - _dominator = -1; -} - -void IdealGraphPrinter::Block::add_pred(int i) { - _pred.append(i); -} - -IdealGraphPrinter::NodeDescription *IdealGraphPrinter::Block::proj() { - return _proj; -} - -IdealGraphPrinter::NodeDescription *IdealGraphPrinter::Block::start() { - return _start; -} - -GrowableArray* IdealGraphPrinter::Block::succs() { - return &_succs; -} - -void IdealGraphPrinter::Block::add_succ(int index) { - - if (this->_index == 16 && index == 15) { - int x = 0; - } - - if (!_succs.contains(index)) { - _succs.append(index); - } -} - - -void IdealGraphPrinter::Block::add_node(NodeDescription *n) { - if (!_nodes.contains(n)) { - _nodes.append(n); - } -} - -GrowableArray* IdealGraphPrinter::Block::nodes() { - return &_nodes; -} - -int IdealGraphPrinter::NodeDescription::count = 0; - -IdealGraphPrinter::NodeDescription::NodeDescription(Node* node) : _node(node) { - _id = (intptr_t)(node); - _block_index = -1; -} - -IdealGraphPrinter::NodeDescription::~NodeDescription() { - _properties.clean(); -} - -// void IdealGraphPrinter::NodeDescription::set_node(Node* node) { -// //this->_node = node; -// } - -int IdealGraphPrinter::NodeDescription::block_index() { - return _block_index; -} - - -GrowableArray* IdealGraphPrinter::NodeDescription::succs() { - return &_succs; -} - -void IdealGraphPrinter::NodeDescription::clear_succs() { - _succs.clear(); -} - -void IdealGraphPrinter::NodeDescription::init_succs() { - _succs = GrowableArray(); -} - -void IdealGraphPrinter::NodeDescription::add_succ(NodeDescription *desc) { - _succs.append(desc); -} - -void IdealGraphPrinter::NodeDescription::set_block_index(int i) { - _block_index = i; -} - -bool IdealGraphPrinter::NodeDescription::equals(NodeDescription *desc) { - if (desc == NULL) return false; - if (desc->id() != id()) return false; - return properties()->equals(desc->properties()); -} - -Node* IdealGraphPrinter::NodeDescription::node() { - return _node; -} - -IdealGraphPrinter::Properties* IdealGraphPrinter::NodeDescription::properties() { - return &_properties; -} - -uint IdealGraphPrinter::NodeDescription::id() { - return _id; -} - -void IdealGraphPrinter::NodeDescription::print_changed(IdealGraphPrinter *printer) { - - - Properties properties; - properties.add(new Property(NODE_ID_PROPERTY, id())); - printer->start_element(NODE_ELEMENT, &properties); - - this->properties()->print(printer); - - - printer->end_element(NODE_ELEMENT); -} - -void IdealGraphPrinter::NodeDescription::print_removed(IdealGraphPrinter *printer) { - - Properties properties; - properties.add(new Property(NODE_ID_PROPERTY, id())); - printer->simple_element(REMOVE_NODE_ELEMENT, &properties); -} - -IdealGraphPrinter::EdgeDescription::EdgeDescription(int from, int to, int index) { - this->_from = from; - this->_to = to; - this->_index = index; -} - -IdealGraphPrinter::EdgeDescription::~EdgeDescription() { -} - -int IdealGraphPrinter::EdgeDescription::from() { - return _from; -} - -int IdealGraphPrinter::EdgeDescription::to() { - return _to; -} - -void IdealGraphPrinter::EdgeDescription::print_changed(IdealGraphPrinter *printer) { - - Properties properties; - properties.add(new Property(INDEX_PROPERTY, _index)); - properties.add(new Property(FROM_PROPERTY, _from)); - properties.add(new Property(TO_PROPERTY, _to)); - printer->simple_element(EDGE_ELEMENT, &properties); -} - -void IdealGraphPrinter::EdgeDescription::print_removed(IdealGraphPrinter *printer) { - - Properties properties; - properties.add(new Property(INDEX_PROPERTY, _index)); - properties.add(new Property(FROM_PROPERTY, _from)); - properties.add(new Property(TO_PROPERTY, _to)); - printer->simple_element(REMOVE_EDGE_ELEMENT, &properties); -} - -bool IdealGraphPrinter::EdgeDescription::equals(IdealGraphPrinter::EdgeDescription *desc) { - if (desc == NULL) return false; - return (_from == desc->_from && _to == desc->_to && _index == desc->_index); -} - -IdealGraphPrinter::Properties::Properties() : list(new (ResourceObj::C_HEAP) GrowableArray(2, 0, NULL, true)) { -} - -IdealGraphPrinter::Properties::~Properties() { - clean(); - delete list; -} - -void IdealGraphPrinter::Properties::add(Property *p) { - assert(p != NULL, "Property not NULL"); - list->append(p); -} - -void IdealGraphPrinter::Properties::print(IdealGraphPrinter *printer) { - printer->start_element(PROPERTIES_ELEMENT); - - for (int i = 0; i < list->length(); i++) { - list->at(i)->print(printer); - } - - printer->end_element(PROPERTIES_ELEMENT); -} - -void IdealGraphPrinter::Properties::clean() { - for (int i = 0; i < list->length(); i++) { - delete list->at(i); - list->at_put(i, NULL); - } - list->clear(); - assert(list->length() == 0, "List cleared"); -} - -void IdealGraphPrinter::Properties::remove(const char *name) { - for (int i = 0; i < list->length(); i++) { - if (strcmp(list->at(i)->name(), name) == 0) { - delete list->at(i); - list->remove_at(i); - i--; - } - } -} - -void IdealGraphPrinter::Properties::print_as_attributes(IdealGraphPrinter *printer) { - - for (int i = 0; i < list->length(); i++) { - assert(list->at(i) != NULL, "Property not null!"); - printer->output()->print(" "); - list->at(i)->print_as_attribute(printer); - } -} - -bool IdealGraphPrinter::Properties::equals(Properties* p) { - if (p->list->length() != this->list->length()) return false; - - for (int i = 0; i < list->length(); i++) { - assert(list->at(i) != NULL, "Property not null!"); - if (!list->at(i)->equals(p->list->at(i))) return false; - } - - return true; -} - -IdealGraphPrinter::Property::Property() { - _name = NULL; - _value = NULL; -} - -const char *IdealGraphPrinter::Property::name() { - return _name; -} - -IdealGraphPrinter::Property::Property(const Property* p) { - - this->_name = NULL; - this->_value = NULL; - - if (p->_name != NULL) { - _name = dup(p->_name); - } - - if (p->_value) { - _value = dup(p->_value); - } -} - -IdealGraphPrinter::Property::~Property() { - - clean(); -} - -IdealGraphPrinter::Property::Property(const char *name, const char *value) { - - assert(name, "Name must not be null!"); - assert(value, "Value must not be null!"); - - _name = dup(name); - _value = dup(value); -} - -IdealGraphPrinter::Property::Property(const char *name, int intValue) { - _name = dup(name); - - stringStream stream; - stream.print("%d", intValue); - _value = dup(stream.as_string()); -} - -void IdealGraphPrinter::Property::clean() { - if (_name) { - delete _name; - _name = NULL; - } - - if (_value) { - delete _value; - _value = NULL; - } -} - - -bool IdealGraphPrinter::Property::is_null() { - return _name == NULL; -} - -void IdealGraphPrinter::Property::print(IdealGraphPrinter *printer) { - - assert(!is_null(), "null properties cannot be printed!"); - Properties properties; - properties.add(new Property(PROPERTY_NAME_PROPERTY, _name)); - printer->start_element(PROPERTY_ELEMENT, &properties, false, false); - printer->print_xml(_value); - printer->end_element(PROPERTY_ELEMENT, false, true); -} - -void IdealGraphPrinter::Property::print_as_attribute(IdealGraphPrinter *printer) { - - printer->output()->print(_name); - printer->output()->print("=\""); - printer->print_xml(_value); - printer->output()->print("\""); -} - - -bool IdealGraphPrinter::Property::equals(Property* p) { - - if (is_null() && p->is_null()) return true; - if (is_null()) return false; - if (p->is_null()) return false; - - int cmp1 = strcmp(p->_name, _name); - if (cmp1 != 0) return false; - - int cmp2 = strcmp(p->_value, _value); - if (cmp2 != 0) return false; - - return true; -} - -void IdealGraphPrinter::print_xml(const char *value) { - size_t len = strlen(value); - - char buf[2]; - buf[1] = 0; - for (size_t i = 0; i < len; i++) { - char c = value[i]; - - switch(c) { - case '<': - output()->print("<"); - break; - - case '>': - output()->print(">"); - break; - - default: - buf[0] = c; - output()->print(buf); - break; - } - } + return _xml; } #endif --- old/hotspot/src/share/vm/opto/idealGraphPrinter.hpp 2009-08-01 04:13:47.243984072 +0100 +++ new/hotspot/src/share/vm/opto/idealGraphPrinter.hpp 2009-08-01 04:13:47.159365637 +0100 @@ -1,6 +1,3 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)idealGraphPrinter.hpp 1.1 07/09/28 11:48:37 JVM" -#endif /* * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ #ifndef PRODUCT @@ -35,7 +32,7 @@ class InlineTree; class ciMethod; -class IdealGraphPrinter +class IdealGraphPrinter { private: @@ -71,7 +68,7 @@ static const char *METHOD_IS_STATIC_PROPERTY; static const char *TRUE_VALUE; static const char *NODE_NAME_PROPERTY; - static const char *EDGE_NAME_PROPERTY; + static const char *EDGE_NAME_PROPERTY; static const char *NODE_ID_PROPERTY; static const char *FROM_PROPERTY; static const char *TO_PROPERTY; @@ -85,233 +82,52 @@ static const char *METHOD_SHORT_NAME_PROPERTY; static const char *ASSEMBLY_ELEMENT; - class Property { - - private: - - const char *_name; - const char *_value; - - public: - - Property(); - Property(const Property* p); - ~Property(); - Property(const char *name, const char *value); - Property(const char *name, int value); - bool equals(Property* p); - void print(IdealGraphPrinter *printer); - void print_as_attribute(IdealGraphPrinter *printer); - bool is_null(); - void clean(); - const char *name(); - - static const char* dup(const char *str) { - char * copy = new char[strlen(str)+1]; - strcpy(copy, str); - return copy; - } - - }; - - class Properties { - - private: - - GrowableArray *list; - - public: - - Properties(); - ~Properties(); - void add(Property *p); - void remove(const char *name); - bool equals(Properties* p); - void print(IdealGraphPrinter *printer); - void print_as_attributes(IdealGraphPrinter *printer); - void clean(); - - }; - - - class Description { - - private: - - State _state; - - public: - - Description(); - - State state(); - void set_state(State s); - void print(IdealGraphPrinter *printer); - virtual void print_changed(IdealGraphPrinter *printer) = 0; - virtual void print_removed(IdealGraphPrinter *printer) = 0; - - }; - - class NodeDescription : public Description{ - - public: - - static int count; - - private: - - GrowableArray _succs; - int _block_index; - uintptr_t _id; - Properties _properties; - Node* _node; - - public: - - NodeDescription(Node* node); - ~NodeDescription(); - Node* node(); - - // void set_node(Node* node); - GrowableArray* succs(); - void init_succs(); - void clear_succs(); - void add_succ(NodeDescription *desc); - int block_index(); - void set_block_index(int i); - Properties* properties(); - virtual void print_changed(IdealGraphPrinter *printer); - virtual void print_removed(IdealGraphPrinter *printer); - bool equals(NodeDescription *desc); - uint id(); - - }; - - class Block { - - private: - - NodeDescription *_start; - NodeDescription *_proj; - GrowableArray _succs; - GrowableArray _nodes; - GrowableArray _dominates; - GrowableArray _children; - int _semi; - int _parent; - GrowableArray _pred; - GrowableArray _bucket; - int _index; - int _dominator; - int _ancestor; - int _label; - - public: - - Block(); - Block(int index); - - void add_node(NodeDescription *n); - GrowableArray* nodes(); - GrowableArray* children(); - void add_child(int i); - void add_succ(int index); - GrowableArray* succs(); - GrowableArray* dominates(); - void add_dominates(int i); - NodeDescription *start(); - NodeDescription *proj(); - void set_start(NodeDescription *n); - void set_proj(NodeDescription *n); - - int label(); - void set_label(int i); - int ancestor(); - void set_ancestor(int i); - int index(); - int dominator(); - void set_dominator(int i); - int parent(); - void set_parent(int i); - int semi(); - GrowableArray* bucket(); - void add_to_bucket(int i); - void clear_bucket(); - GrowableArray* pred(); - void set_semi(int i); - void add_pred(int i); - - }; - - class EdgeDescription : public Description { - - private: - - int _from; - int _to; - int _index; - public: - - EdgeDescription(int from, int to, int index); - ~EdgeDescription(); - - virtual void print_changed(IdealGraphPrinter *printer); - virtual void print_removed(IdealGraphPrinter *printer); - bool equals(EdgeDescription *desc); - int from(); - int to(); - }; - + elapsedTimer _walk_time; + elapsedTimer _output_time; + elapsedTimer _build_blocks_time; static int _file_count; networkStream *_stream; + xmlStream *_xml; outputStream *_output; ciMethod *_current_method; - GrowableArray _nodes; - GrowableArray _edges; int _depth; - Arena *_arena; char buffer[128]; bool _should_send_method; PhaseChaitin* _chaitin; - bool _clear_nodes; - Matcher* _matcher; bool _traverse_outs; - - void start_element_helper(const char *name, Properties *properties, bool endElement, bool print_indent = false, bool print_return = true); - NodeDescription *create_node_description(Node* node); + Compile *C; static void pre_node(Node* node, void *env); static void post_node(Node* node, void *env); - void schedule_latest(int **common_dominator, GrowableArray* blocks); - void build_common_dominator(int **common_dominator, int index, GrowableArray* blocks); - void compress(int index, GrowableArray* blocks); - int eval(int index, GrowableArray* blocks); - void link(int index1, int index2, GrowableArray* blocks); - void build_dominators(GrowableArray* blocks); - void build_blocks(Node *node); - void walk(Node *n); - void start_element(const char *name, Properties *properties = NULL, bool print_indent = false, bool print_return = true); - void simple_element(const char *name, Properties *properties = NULL, bool print_indent = false); - void end_element(const char *name, bool print_indent = false, bool print_return = true); - void print_edge(int from, int to, int index); void print_indent(); void print_method(ciMethod *method, int bci, InlineTree *tree); void print_inline_tree(InlineTree *tree); - void clear_nodes(); - + void visit_node(Node *n, void *param); + void walk_nodes(Node *start, void *param); + void begin_elem(const char *s); + void end_elem(); + void begin_head(const char *s); + void end_head(); + void print_attr(const char *name, const char *val); + void print_attr(const char *name, intptr_t val); + void print_prop(const char *name, const char *val); + void print_prop(const char *name, int val); + void tail(const char *name); + void head(const char *name); + void text(const char *s); + intptr_t get_node_id(Node *n); IdealGraphPrinter(); ~IdealGraphPrinter(); - + public: - + static void clean_up(); static IdealGraphPrinter *printer(); bool traverse_outs(); void set_traverse_outs(bool b); - void print_ifg(PhaseIFG* ifg); outputStream *output(); void print_inlining(Compile* compile); void begin_method(Compile* compile); --- old/hotspot/src/share/vm/opto/idealKit.cpp 2009-08-01 04:13:48.147583824 +0100 +++ new/hotspot/src/share/vm/opto/idealKit.cpp 2009-08-01 04:13:48.067225517 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)idealKit.cpp 1.8 07/06/18 14:25:26 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -348,7 +348,7 @@ if (require_atomic_access && bt == T_LONG) { ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t); } else { - ld = LoadNode::make(C, ctl, mem, adr, adr_type, t, bt); + ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); } return transform(ld); } @@ -364,7 +364,7 @@ if (require_atomic_access && bt == T_LONG) { st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val); } else { - st = StoreNode::make(C, ctl, mem, adr, adr_type, val, bt); + st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt); } st = transform(st); set_memory(st, adr_idx); --- old/hotspot/src/share/vm/opto/ifg.cpp 2009-08-01 04:13:48.951620495 +0100 +++ new/hotspot/src/share/vm/opto/ifg.cpp 2009-08-01 04:13:48.867873578 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ifg.cpp 1.62 07/05/05 17:06:13 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -488,8 +488,9 @@ // Liveout things are presumed live for the whole block. We accumulate // 'area' accordingly. If they get killed in the block, we'll subtract // the unused part of the block from the area. - double cost = b->_freq * double(last_inst-last_phi); - assert( cost >= 0, "negative spill cost" ); + int inst_count = last_inst - last_phi; + double cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count); + assert(!(cost < 0.0), "negative spill cost" ); IndexSetIterator elements(&liveout); uint lidx; while ((lidx = elements.next()) != 0) { @@ -593,10 +594,10 @@ } else { // Else it is live // A DEF also ends 'area' partway through the block. lrgs(r)._area -= cost; - assert( lrgs(r)._area >= 0, "negative spill area" ); + assert(!(lrgs(r)._area < 0.0), "negative spill area" ); // Insure high score for immediate-use spill copies so they get a color - if( n->is_SpillCopy() + if( n->is_SpillCopy() && lrgs(r).is_singledef() // MultiDef live range can still split && n->outcnt() == 1 // and use must be in this block && _cfg._bbs[n->unique_out()->_idx] == b ) { @@ -706,8 +707,9 @@ } // End of if normal register-allocated value - cost -= b->_freq; // Area remaining in the block - if( cost < 0.0 ) cost = 0.0; // Cost goes negative in the Phi area + // Area remaining in the block + inst_count--; + cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count); // Make all inputs live if( !n->is_Phi() ) { // Phi function uses come from prior block @@ -754,7 +756,7 @@ assert( pressure[0] == count_int_pressure (&liveout), "" ); assert( pressure[1] == count_float_pressure(&liveout), "" ); } - assert( lrg._area >= 0, "negative spill area" ); + assert(!(lrg._area < 0.0), "negative spill area" ); } } } // End of reverse pass over all instructions in block --- old/hotspot/src/share/vm/opto/ifnode.cpp 2009-08-01 04:13:49.883853309 +0100 +++ new/hotspot/src/share/vm/opto/ifnode.cpp 2009-08-01 04:13:49.798468637 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ifnode.cpp 1.63 07/10/23 13:12:51 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -608,7 +608,7 @@ // and has one projection leading to this if and the other projection // leading to a region that merges one of this ifs control // projections. -// +// // If // / | // / | @@ -618,7 +618,7 @@ // / \ | // / \ | // / Region -// +// Node* IfNode::fold_compares(PhaseGVN* phase) { if (!EliminateAutoBox || Opcode() != Op_If) return NULL; @@ -728,6 +728,11 @@ int true_path = phi->is_diamond_phi(); if( true_path == 0 ) return NULL; + // Make sure that iff and the control of the phi are different. This + // should really only happen for dead control flow since it requires + // an illegal cycle. + if (phi->in(0)->in(1)->in(0) == iff) return NULL; + // phi->region->if_proj->ifnode->bool->cmp BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool(); @@ -754,6 +759,7 @@ } Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2); + assert(new_bol != iff->in(1), "must make progress"); iff->set_req(1, new_bol); // Intervening diamond probably goes dead phase->C->set_major_progress(); --- old/hotspot/src/share/vm/opto/lcm.cpp 2009-08-01 04:13:50.771108138 +0100 +++ new/hotspot/src/share/vm/opto/lcm.cpp 2009-08-01 04:13:50.686027180 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)lcm.cpp 1.102 07/05/17 15:58:55 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,9 @@ not_null_block = _succs[0]; null_block = _succs[1]; } + while (null_block->is_Empty() == Block::empty_with_goto) { + null_block = null_block->_succs[0]; + } // Search the exception block for an uncommon trap. // (See Parse::do_if and Parse::do_ifnull for the reason @@ -113,8 +116,10 @@ case Op_LoadI: case Op_LoadL: case Op_LoadP: + case Op_LoadN: case Op_LoadS: case Op_LoadKlass: + case Op_LoadNKlass: case Op_LoadRange: case Op_LoadD_unaligned: case Op_LoadL_unaligned: @@ -127,14 +132,16 @@ case Op_StoreI: case Op_StoreL: case Op_StoreP: + case Op_StoreN: was_store = true; // Memory op is a store op // Stores will have their address in slot 2 (memory in slot 1). // If the value being nul-checked is in another slot, it means we // are storing the checked value, which does NOT check the value! if( mach->in(2) != val ) continue; break; // Found a memory op? - case Op_StrComp: - // Not a legit memory op for implicit null check regardless of + case Op_StrComp: + case Op_AryEq: + // Not a legit memory op for implicit null check regardless of // embedded loads continue; default: // Also check for embedded loads @@ -148,6 +155,10 @@ const TypePtr *adr_type = NULL; // Do not need this return value here const Node* base = mach->get_base_and_disp(offset, adr_type); if (base == NULL || base == NodeSentinel) { + // Narrow oop address doesn't have base, only index + if( val->bottom_type()->isa_narrowoop() && + MacroAssembler::needs_explicit_null_check(offset) ) + continue; // Give up if offset is beyond page size // cannot reason about it; is probably not implicit null exception } else { const TypePtr* tptr = base->bottom_type()->is_ptr(); @@ -321,7 +332,7 @@ uint choice = 0; // Bigger is most important uint latency = 0; // Bigger is scheduled first uint score = 0; // Bigger is better - uint idx; // Index in worklist + int idx = -1; // Index in worklist for( uint i=0; i= 0, "index should be set"); + Node *n = worklist[(uint)idx]; // Get the winner - worklist.map(idx,worklist.pop()); // Compress worklist + worklist.map((uint)idx, worklist.pop()); // Compress worklist return n; } @@ -586,7 +598,7 @@ // A few node types require changing a required edge to a precedence edge // before allocation. - if( UseConcMarkSweepGC ) { + if( UseConcMarkSweepGC || UseG1GC ) { if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) { // Note: Required edges with an index greater than oper_input_base // are not supported by the allocator. @@ -598,7 +610,14 @@ assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); } } - if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ) { + if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire && + n->req() > TypeFunc::Parms ) { + // MemBarAcquire could be created without Precedent edge. + // del_req() replaces the specified edge with the last input edge + // and then removes the last edge. If the specified edge > number of + // edges the last edge will be moved outside of the input edges array + // and the edge will be lost. This is why this code should be + // executed only when Precedent (== TypeFunc::Parms) edge is present. Node *x = n->in(TypeFunc::Parms); n->del_req(TypeFunc::Parms); n->add_prec(x); @@ -630,6 +649,10 @@ // of the phi to be scheduled first. The select() method breaks // ties in scheduling by worklist order. delay.push(m); + } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) { + // Force the CreateEx to the top of the list so it's processed + // first and ends up at the start of the block. + worklist.insert(0, m); } else { worklist.push(m); // Then on to worklist! } --- old/hotspot/src/share/vm/opto/library_call.cpp 2009-08-01 04:13:51.756096361 +0100 +++ new/hotspot/src/share/vm/opto/library_call.cpp 2009-08-01 04:13:51.649219828 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)library_call.cpp 1.167 07/07/25 17:43:16 JVM" #endif /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -166,6 +166,7 @@ bool inline_native_newArray(); bool inline_native_getLength(); bool inline_array_copyOf(bool is_copyOfRange); + bool inline_array_equals(); bool inline_native_clone(bool is_virtual); bool inline_native_Reflection_getCallerClass(); bool inline_native_AtomicLong_get(); @@ -262,6 +263,7 @@ switch (id) { case vmIntrinsics::_indexOf: case vmIntrinsics::_compareTo: + case vmIntrinsics::_equalsC: break; // InlineNatives does not control String.compareTo default: return NULL; @@ -275,6 +277,9 @@ case vmIntrinsics::_indexOf: if (!SpecialStringIndexOf) return NULL; break; + case vmIntrinsics::_equalsC: + if (!SpecialArraysEquals) return NULL; + break; case vmIntrinsics::_arraycopy: if (!InlineArrayCopy) return NULL; break; @@ -589,6 +594,8 @@ return inline_array_copyOf(false); case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true); + case vmIntrinsics::_equalsC: + return inline_array_equals(); case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual()); @@ -816,6 +823,24 @@ return true; } +//------------------------------inline_array_equals---------------------------- +bool LibraryCallKit::inline_array_equals() { + + if (!Matcher::has_match_rule(Op_AryEq)) return false; + + _sp += 2; + Node *argument2 = pop(); + Node *argument1 = pop(); + + Node* equals = + _gvn.transform(new (C, 3) AryEqNode(control(), + argument1, + argument2) + ); + push(equals); + return true; +} + // Java version of String.indexOf(constant string) // class StringDecl { // StringDecl(char[] ca) { @@ -899,7 +924,7 @@ Node* sourcea = basic_plus_adr(string_object, string_object, value_offset); Node* source = make_load(no_ctrl, sourcea, source_type, T_OBJECT, string_type->add_offset(value_offset)); - Node* target = _gvn.transform(ConPNode::make(C, target_array)); + Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array)) ); jint target_length = target_array->length(); const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); @@ -1850,7 +1875,7 @@ // See if it is a narrow oop array. if (adr_type->isa_aryptr()) { - if (adr_type->offset() >= objArrayOopDesc::header_size() * wordSize) { + if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes(type)) { const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr(); if (elem_type != NULL) { sharpened_klass = elem_type->klass(); @@ -2100,7 +2125,7 @@ int type_words = type2size[type]; // Cannot inline wide CAS on machines that don't support it natively - if (type2aelembytes[type] > BytesPerInt && !VM_Version::supports_cx8()) + if (type2aelembytes(type) > BytesPerInt && !VM_Version::supports_cx8()) return false; C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". @@ -2167,10 +2192,20 @@ cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval)); break; case T_OBJECT: - // reference stores need a store barrier. + // reference stores need a store barrier. // (They don't if CAS fails, but it isn't worth checking.) pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT); - cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval)); +#ifdef _LP64 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { + Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); + Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop())); + cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr, + newval_enc, oldval_enc)); + } else +#endif + { + cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval)); + } post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true); break; default: @@ -2448,7 +2483,7 @@ if (region == NULL) never_see_null = true; Node* p = basic_plus_adr(mirror, offset); const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; - Node* kls = _gvn.transform(new (C, 3) LoadKlassNode(0, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type)); + Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) ); _sp += nargs; // any deopt will start just before call to enclosing method Node* null_ctl = top(); kls = null_check_oop(kls, &null_ctl, never_see_null); @@ -2628,7 +2663,7 @@ phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror()))); // If we fall through, it's a plain class. Get its _super. p = basic_plus_adr(kls, Klass::super_offset_in_bytes() + sizeof(oopDesc)); - kls = _gvn.transform(new (C, 3) LoadKlassNode(0, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL)); + kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) ); null_ctl = top(); kls = null_check_oop(kls, &null_ctl); if (null_ctl != top()) { @@ -2714,7 +2749,7 @@ args[which_arg] = _gvn.transform(arg); Node* p = basic_plus_adr(arg, class_klass_offset); - Node* kls = new (C, 3) LoadKlassNode(0, immutable_memory(), p, adr_type, kls_type); + Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type); klasses[which_arg] = _gvn.transform(kls); } @@ -2832,6 +2867,8 @@ _sp += nargs; // set original stack for use by uncommon_trap mirror = do_null_check(mirror, T_OBJECT); _sp -= nargs; + // If mirror or obj is dead, only null-path is taken. + if (stopped()) return true; enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT }; RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); @@ -3451,11 +3488,32 @@ const TypePtr *adr_type = _gvn.type(adr)->is_ptr(); int alias_idx = C->get_alias_index(adr_type); - Node *result = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal)); - Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(result)); + Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal)); + Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas)); set_memory(store_proj, alias_idx); + Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) ); - push(result); + Node *result; + // CMove node is not used to be able fold a possible check code + // after attemptUpdate() call. This code could be transformed + // into CMove node by loop optimizations. + { + RegionNode *r = new (C, 3) RegionNode(3); + result = new (C, 3) PhiNode(r, TypeInt::BOOL); + + Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN); + Node *iftrue = opt_iff(r, iff); + r->init_req(1, iftrue); + result->init_req(1, intcon(1)); + result->init_req(2, intcon(0)); + + set_control(_gvn.transform(r)); + record_for_igvn(r); + + C->set_has_split_ifs(true); // Has chance for split-if optimization + } + + push(_gvn.transform(result)); return true; } @@ -3821,16 +3879,22 @@ if (!stopped()) { // Copy the fastest available way. // (No need for PreserveJVMState, since we're using it all up now.) + // TODO: generate fields/elements copies for small objects instead. Node* src = obj; Node* dest = raw_obj; - Node* end = dest; Node* size = _gvn.transform(alloc_siz); // Exclude the header. - int base_off = sizeof(oopDesc); + int base_off = instanceOopDesc::base_offset_in_bytes(); + if (UseCompressedOops) { + assert(base_off % BytesPerLong != 0, "base with compressed oops"); + // With compressed oops base_offset_in_bytes is 12 which creates + // the gap since countx is rounded by 8 bytes below. + // Copy klass and the gap. + base_off = instanceOopDesc::klass_offset_in_bytes(); + } src = basic_plus_adr(src, base_off); dest = basic_plus_adr(dest, base_off); - end = basic_plus_adr(end, size); // Compute the length also, if needed: Node* countx = size; @@ -3978,7 +4042,7 @@ // both indices are constants int s_offs = src_offset_inttype->get_con(); int d_offs = dest_offset_inttype->get_con(); - int element_size = type2aelembytes[t]; + int element_size = type2aelembytes(t); aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0); if (s_offs >= d_offs) disjoint = true; @@ -4173,6 +4237,7 @@ && !_gvn.eqv_uncast(src, dest) && ((alloc = tightly_coupled_allocation(dest, slow_region)) != NULL) + && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0 && alloc->maybe_set_complete(&_gvn)) { // "You break it, you buy it." InitializeNode* init = alloc->initialization(); @@ -4373,7 +4438,7 @@ // (At this point we can assume disjoint_bases, since types differ.) int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc); Node* p1 = basic_plus_adr(dest_klass, ek_offset); - Node* n1 = new (C, 3) LoadKlassNode(0, immutable_memory(), p1, TypeRawPtr::BOTTOM); + Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM); Node* dest_elem_klass = _gvn.transform(n1); Node* cv = generate_checkcast_arraycopy(adr_type, dest_elem_klass, @@ -4391,8 +4456,8 @@ // Let's see if we need card marks: if (alloc != NULL && use_ReduceInitialCardMarks()) { // If we do not need card marks, copy using the jint or jlong stub. - copy_type = LP64_ONLY(T_LONG) NOT_LP64(T_INT); - assert(type2aelembytes[basic_elem_type] == type2aelembytes[copy_type], + copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); + assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), "sizes agree"); } } @@ -4662,7 +4727,7 @@ Node* mem = memory(adr_type); // memory slice to operate on // scaling and rounding of indexes: - int scale = exact_log2(type2aelembytes[basic_elem_type]); + int scale = exact_log2(type2aelembytes(basic_elem_type)); int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); int clear_low = (-1 << scale) & (BytesPerInt - 1); int bump_bit = (-1 << scale) & BytesPerInt; @@ -4717,23 +4782,25 @@ int to_clear = (bump_bit | clear_low); // Align up mod 8, then store a jint zero unconditionally // just before the mod-8 boundary. - // This would only fail if the first array element were immediately - // after the length field, and were also at an even offset mod 8. - assert(((abase + bump_bit) & ~to_clear) - BytesPerInt - >= arrayOopDesc::length_offset_in_bytes() + BytesPerInt, - "store must not trash length field"); - - // Bump 'start' up to (or past) the next jint boundary: - start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) ); + if (((abase + bump_bit) & ~to_clear) - bump_bit + < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) { + bump_bit = 0; + assert((abase & to_clear) == 0, "array base must be long-aligned"); + } else { + // Bump 'start' up to (or past) the next jint boundary: + start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) ); + assert((abase & clear_low) == 0, "array base must be int-aligned"); + } // Round bumped 'start' down to jlong boundary in body of array. start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) ); - // Store a zero to the immediately preceding jint: - Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-BytesPerInt)) ); - Node* p1 = basic_plus_adr(dest, x1); - mem = StoreNode::make(C, control(), mem, p1, adr_type, intcon(0), T_INT); - mem = _gvn.transform(mem); + if (bump_bit != 0) { + // Store a zero to the immediately preceding jint: + Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-bump_bit)) ); + Node* p1 = basic_plus_adr(dest, x1); + mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT); + mem = _gvn.transform(mem); + } } - Node* end = dest_size; // pre-rounded mem = ClearArrayNode::clear_memory(control(), mem, dest, start, end, &_gvn); @@ -4756,7 +4823,7 @@ Node* dest, Node* dest_offset, Node* dest_size) { // See if there is an advantage from block transfer. - int scale = exact_log2(type2aelembytes[basic_elem_type]); + int scale = exact_log2(type2aelembytes(basic_elem_type)); if (scale >= LogBytesPerLong) return false; // it is already a block transfer --- old/hotspot/src/share/vm/opto/locknode.cpp 2009-08-01 04:13:52.882211862 +0100 +++ new/hotspot/src/share/vm/opto/locknode.cpp 2009-08-01 04:13:52.797944610 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)locknode.cpp 1.49 07/05/17 15:59:05 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,17 +39,23 @@ uint BoxLockNode::size_of() const { return sizeof(*this); } -BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ), _slot(slot) { +BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ), + _slot(slot), _is_eliminated(false) { init_class_id(Class_BoxLock); init_flags(Flag_rematerialize); OptoReg::Name reg = OptoReg::stack2reg(_slot); _inmask.Insert(reg); } +//-----------------------------hash-------------------------------------------- +uint BoxLockNode::hash() const { + return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0); +} + //------------------------------cmp-------------------------------------------- uint BoxLockNode::cmp( const Node &n ) const { const BoxLockNode &bn = (const BoxLockNode &)n; - return bn._slot == _slot; + return bn._slot == _slot && bn._is_eliminated == _is_eliminated; } OptoReg::Name BoxLockNode::stack_slot(Node* box_node) { --- old/hotspot/src/share/vm/opto/locknode.hpp 2009-08-01 04:13:53.674922035 +0100 +++ new/hotspot/src/share/vm/opto/locknode.hpp 2009-08-01 04:13:53.601426087 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)locknode.hpp 1.40 07/09/28 10:23:10 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ public: const int _slot; RegMask _inmask; + bool _is_eliminated; // indicates this lock was safely eliminated BoxLockNode( int lock ); virtual int Opcode() const; @@ -38,13 +39,17 @@ virtual const RegMask &in_RegMask(uint) const; virtual const RegMask &out_RegMask() const; virtual uint size_of() const; - virtual uint hash() const { return Node::hash() + _slot; } + virtual uint hash() const; virtual uint cmp( const Node &n ) const; virtual const class Type *bottom_type() const { return TypeRawPtr::BOTTOM; } virtual uint ideal_reg() const { return Op_RegP; } static OptoReg::Name stack_slot(Node* box_node); + bool is_eliminated() { return _is_eliminated; } + // mark lock as eliminated. + void set_eliminated() { _is_eliminated = true; } + #ifndef PRODUCT virtual void format( PhaseRegAlloc *, outputStream *st ) const; virtual void dump_spec(outputStream *st) const { st->print(" Lock %d",_slot); } --- old/hotspot/src/share/vm/opto/loopTransform.cpp 2009-08-01 04:13:54.577498661 +0100 +++ new/hotspot/src/share/vm/opto/loopTransform.cpp 2009-08-01 04:13:54.483505629 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)loopTransform.cpp 1.116 07/06/01 11:35:03 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -682,6 +682,10 @@ CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); post_head->set_post_loop(main_head); + // Reduce the post-loop trip count. + CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); + post_end->_prob = PROB_FAIR; + // Build the main-loop normal exit. IfFalseNode *new_main_exit = new (C, 1) IfFalseNode(main_end); _igvn.register_new_node_with_optimizer( new_main_exit ); @@ -693,7 +697,7 @@ // (the main-loop trip-counter exit value) because we will be changing // the exit value (via unrolling) so we cannot constant-fold away the zero // trip guard until all unrolling is done. - Node *zer_opaq = new (C, 2) Opaque1Node(incr); + Node *zer_opaq = new (C, 2) Opaque1Node(C, incr); Node *zer_cmp = new (C, 3) CmpINode( zer_opaq, limit ); Node *zer_bol = new (C, 2) BoolNode( zer_cmp, b_test ); register_new_node( zer_opaq, new_main_exit ); @@ -751,6 +755,9 @@ pre_head->set_pre_loop(main_head); Node *pre_incr = old_new[incr->_idx]; + // Reduce the pre-loop trip count. + pre_end->_prob = PROB_FAIR; + // Find the pre-loop normal exit. Node* pre_exit = pre_end->proj_out(false); assert( pre_exit->Opcode() == Op_IfFalse, "" ); @@ -763,15 +770,15 @@ // pre-loop, the main-loop may not execute at all. Later in life this // zero-trip guard will become the minimum-trip guard when we unroll // the main-loop. - Node *min_opaq = new (C, 2) Opaque1Node(limit); + Node *min_opaq = new (C, 2) Opaque1Node(C, limit); Node *min_cmp = new (C, 3) CmpINode( pre_incr, min_opaq ); Node *min_bol = new (C, 2) BoolNode( min_cmp, b_test ); register_new_node( min_opaq, new_pre_exit ); register_new_node( min_cmp , new_pre_exit ); register_new_node( min_bol , new_pre_exit ); - // Build the IfNode - IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_FAIR, COUNT_UNKNOWN ); + // Build the IfNode (assume the main-loop is executed always). + IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN ); _igvn.register_new_node_with_optimizer( min_iff ); set_idom(min_iff, new_pre_exit, dd_main_head); set_loop(min_iff, loop->_parent); @@ -813,7 +820,7 @@ // Save the original loop limit in this Opaque1 node for // use by range check elimination. - Node *pre_opaq = new (C, 3) Opaque1Node(pre_limit, limit); + Node *pre_opaq = new (C, 3) Opaque1Node(C, pre_limit, limit); register_new_node( pre_limit, pre_head->in(0) ); register_new_node( pre_opaq , pre_head->in(0) ); @@ -1015,6 +1022,8 @@ if (!has_ctrl(old)) set_loop(nnn, loop); } + + loop->record_for_igvn(); } //------------------------------do_maximally_unroll---------------------------- @@ -1513,10 +1522,12 @@ Node *bol = iff->in(1); if( bol && bol->req() > 1 && bol->in(1) && ((bol->in(1)->Opcode() == Op_StorePConditional ) || + (bol->in(1)->Opcode() == Op_StoreIConditional ) || (bol->in(1)->Opcode() == Op_StoreLConditional ) || (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || - (bol->in(1)->Opcode() == Op_CompareAndSwapP ))) + (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || + (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) return; // Allocation loops RARELY take backedge // Find the OTHER exit path from the IF Node* ex = iff->proj_out(1-test_con); @@ -1583,10 +1594,10 @@ //============================================================================= //------------------------------iteration_split_impl--------------------------- -void IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { +bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { // Check and remove empty loops (spam micro-benchmarks) - if( policy_do_remove_empty_loop(phase) ) - return; // Here we removed an empty loop + if( policy_do_remove_empty_loop(phase) ) + return true; // Here we removed an empty loop bool should_peel = policy_peeling(phase); // Should we peel? @@ -1596,7 +1607,8 @@ // This removes loop-invariant tests (usually null checks). if( !_head->is_CountedLoop() ) { // Non-counted loop if (PartialPeelLoop && phase->partial_peel(this, old_new)) { - return; + // Partial peel succeeded so terminate this round of loop opts + return false; } if( should_peel ) { // Should we peel? #ifndef PRODUCT @@ -1606,14 +1618,14 @@ } else if( should_unswitch ) { phase->do_unswitching(this, old_new); } - return; + return true; } CountedLoopNode *cl = _head->as_CountedLoop(); - if( !cl->loopexit() ) return; // Ignore various kinds of broken loops + if( !cl->loopexit() ) return true; // Ignore various kinds of broken loops // Do nothing special to pre- and post- loops - if( cl->is_pre_loop() || cl->is_post_loop() ) return; + if( cl->is_pre_loop() || cl->is_post_loop() ) return true; // Compute loop trip count from profile data compute_profile_trip_cnt(phase); @@ -1626,11 +1638,11 @@ // Here we did some unrolling and peeling. Eventually we will // completely unroll this loop and it will no longer be a loop. phase->do_maximally_unroll(this,old_new); - return; + return true; } if (should_unswitch) { phase->do_unswitching(this, old_new); - return; + return true; } } @@ -1691,14 +1703,16 @@ if( should_peel ) // Might want to peel but do nothing else phase->do_peeling(this,old_new); } + return true; } //============================================================================= //------------------------------iteration_split-------------------------------- -void IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { +bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { // Recursively iteration split nested loops - if( _child ) _child->iteration_split( phase, old_new ); + if( _child && !_child->iteration_split( phase, old_new )) + return false; // Clean out prior deadwood DCE_loop_body(); @@ -1717,9 +1731,12 @@ // Gate unrolling, RCE and peeling efforts. if( !_child && // If not an inner loop, do not split !_irreducible && + _allow_optimizations && !tail()->is_top() ) { // Also ignore the occasional dead backedge if (!_has_call) { - iteration_split_impl( phase, old_new ); + if (!iteration_split_impl( phase, old_new )) { + return false; + } } else if (policy_unswitching(phase)) { phase->do_unswitching(this, old_new); } @@ -1728,5 +1745,7 @@ // Minor offset re-organization to remove loop-fallout uses of // trip counter. if( _head->is_CountedLoop() ) phase->reorg_offsets( this ); - if( _next ) _next->iteration_split( phase, old_new ); + if( _next && !_next->iteration_split( phase, old_new )) + return false; + return true; } --- old/hotspot/src/share/vm/opto/loopUnswitch.cpp 2009-08-01 04:13:55.527175759 +0100 +++ new/hotspot/src/share/vm/opto/loopUnswitch.cpp 2009-08-01 04:13:55.450144102 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)loopUnswitch.cpp 1.6 07/06/29 14:41:32 JVM" #endif /* - * Copyright 2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,9 @@ if( !LoopUnswitching ) { return false; } + if (!_head->is_Loop()) { + return false; + } uint nodes_left = MaxNodeLimit - phase->C->unique(); if (2 * _body.size() > nodes_left) { return false; // Too speculative if running low on nodes. @@ -205,7 +208,7 @@ Node *cont = _igvn.intcon(1); set_ctrl(cont, C->root()); - Node* opq = new (C, 2) Opaque1Node(cont); + Node* opq = new (C, 2) Opaque1Node(C, cont); register_node(opq, outer_loop, entry, dom_depth(entry)); Node *bol = new (C, 2) Conv2BNode(opq); register_node(bol, outer_loop, entry, dom_depth(entry)); --- old/hotspot/src/share/vm/opto/loopnode.cpp 2009-08-01 04:13:56.439701405 +0100 +++ new/hotspot/src/share/vm/opto/loopnode.cpp 2009-08-01 04:13:56.341640216 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)loopnode.cpp 1.262 07/10/23 13:12:50 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1075,8 +1075,6 @@ phase->_igvn.add_users_to_worklist(l->fast_out(i)); } - phase->C->print_method("After beautify loops", 3); - // Now recursively beautify nested loops if( _child ) result |= _child->beautify_loops( phase ); if( _next ) result |= _next ->beautify_loops( phase ); @@ -1284,7 +1282,7 @@ // Visit all children, looking for Phis for (DUIterator i = cl->outs(); cl->has_out(i); i++) { Node *out = cl->out(i); - if (!out->is_Phi()) continue; // Looking for phis + if (!out->is_Phi() || out == phi) continue; // Looking for other phis PhiNode* phi2 = out->as_Phi(); Node *incr2 = phi2->in( LoopNode::LoopBackControl ); // Look for induction variables of the form: X += constant @@ -1393,6 +1391,37 @@ #endif +static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) { + if (loop == root) { + if (loop->_child != NULL) { + log->begin_head("loop_tree"); + log->end_head(); + if( loop->_child ) log_loop_tree(root, loop->_child, log); + log->tail("loop_tree"); + assert(loop->_next == NULL, "what?"); + } + } else { + Node* head = loop->_head; + log->begin_head("loop"); + log->print(" idx='%d' ", head->_idx); + if (loop->_irreducible) log->print("irreducible='1' "); + if (head->is_Loop()) { + if (head->as_Loop()->is_inner_loop()) log->print("inner_loop='1' "); + if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' "); + } + if (head->is_CountedLoop()) { + CountedLoopNode* cl = head->as_CountedLoop(); + if (cl->is_pre_loop()) log->print("pre_loop='%d' ", cl->main_idx()); + if (cl->is_main_loop()) log->print("main_loop='%d' ", cl->_idx); + if (cl->is_post_loop()) log->print("post_loop='%d' ", cl->main_idx()); + } + log->end_head(); + if( loop->_child ) log_loop_tree(root, loop->_child, log); + log->tail("loop"); + if( loop->_next ) log_loop_tree(root, loop->_next, log); + } +} + //============================================================================= //------------------------------PhaseIdealLoop--------------------------------- // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to @@ -1473,6 +1502,8 @@ } // Reset loop nesting depth _ltree_root->set_nest( 0 ); + + C->print_method("After beautify loops", 3); } } @@ -1564,7 +1595,7 @@ // on just their loop-phi's for this pass of loop opts if( SplitIfBlocks && do_split_ifs ) { if (lpt->policy_range_check(this)) { - lpt->_rce_candidate = true; + lpt->_rce_candidate = 1; // = true } } } @@ -1627,10 +1658,13 @@ // Cleanup any modified bits _igvn.optimize(); - // Do not repeat loop optimizations if irreducible loops are present - // by claiming no-progress. - if( _has_irreducible_loops ) - C->clear_major_progress(); + // disable assert until issue with split_flow_path is resolved (6742111) + // assert(!_has_irreducible_loops || C->parsed_irreducible_loop() || C->is_osr_compilation(), + // "shouldn't introduce irreducible loops"); + + if (C->log() != NULL) { + log_loop_tree(_ltree_root, _ltree_root, C->log()); + } } #ifndef PRODUCT @@ -2148,7 +2182,7 @@ // as well? If so, then I found another entry into the loop. while( is_postvisited(l->_head) ) { // found irreducible - l->_irreducible = true; + l->_irreducible = 1; // = true l = l->_parent; _has_irreducible_loops = true; // Check for bad CFG here to prevent crash, and bailout of compile @@ -2202,6 +2236,12 @@ (iff->as_If()->_prob >= 0.01) ) innermost->_has_call = 1; } + } else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) { + // Disable loop optimizations if the loop has a scalar replaceable + // allocation. This disabling may cause a potential performance lost + // if the allocation is not eliminated for some reason. + innermost->_allow_optimizations = false; + innermost->_has_call = 1; // = true } } } @@ -2622,13 +2662,16 @@ case Op_LoadF: case Op_LoadI: case Op_LoadKlass: + case Op_LoadNKlass: case Op_LoadL: case Op_LoadS: case Op_LoadP: + case Op_LoadN: case Op_LoadRange: case Op_LoadD_unaligned: case Op_LoadL_unaligned: case Op_StrComp: // Does a bunch of load-like effects + case Op_AryEq: pinned = false; } if( pinned ) { @@ -2726,11 +2769,7 @@ } void PhaseIdealLoop::dump( IdealLoopTree *loop, uint idx, Node_List &rpo_list ) const { - - // Indent by loop nesting depth - for( uint x = 0; x < loop->_nest; x++ ) - tty->print(" "); - tty->print_cr("---- Loop N%d-N%d ----", loop->_head->_idx,loop->_tail->_idx); + loop->dump_head(); // Now scan for CFG nodes in the same loop for( uint j=idx; j > 0; j-- ) { --- old/hotspot/src/share/vm/opto/loopnode.hpp 2009-08-01 04:13:57.439554794 +0100 +++ new/hotspot/src/share/vm/opto/loopnode.hpp 2009-08-01 04:13:57.357173858 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)loopnode.hpp 1.146 07/10/23 13:12:55 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,6 +195,8 @@ int is_main_no_pre_loop() const { return _loop_flags & Main_Has_No_Pre_Loop; } void set_main_no_pre_loop() { _loop_flags |= Main_Has_No_Pre_Loop; } + int main_idx() const { return _main_idx; } + void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; } void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; } @@ -292,13 +294,15 @@ _has_sfpt:1, // True if has non-call safepoint _rce_candidate:1; // True if candidate for range check elimination - Node_List* _required_safept; // A inner loop cannot delete these safepts; + Node_List* _required_safept; // A inner loop cannot delete these safepts; + bool _allow_optimizations; // Allow loop optimizations IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail ) : _parent(0), _next(0), _child(0), _head(head), _tail(tail), _phase(phase), _required_safept(NULL), + _allow_optimizations(true), _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0) { } @@ -324,12 +328,14 @@ // Returns TRUE if loop tree is structurally changed. bool beautify_loops( PhaseIdealLoop *phase ); - // Perform iteration-splitting on inner loops. Split iterations to avoid - // range checks or one-shot null checks. - void iteration_split( PhaseIdealLoop *phase, Node_List &old_new ); - - // Driver for various flavors of iteration splitting - void iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ); + // Perform iteration-splitting on inner loops. Split iterations to + // avoid range checks or one-shot null checks. Returns false if the + // current round of loop opts should stop. + bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new ); + + // Driver for various flavors of iteration splitting. Returns false + // if the current round of loop opts should stop. + bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ); // Given dominators, try to find loops with calls that must always be // executed (call dominates loop tail). These loops do not need non-call --- old/hotspot/src/share/vm/opto/loopopts.cpp 2009-08-01 04:13:58.382816945 +0100 +++ new/hotspot/src/share/vm/opto/loopopts.cpp 2009-08-01 04:13:58.288820242 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)loopopts.cpp 1.222 08/11/24 12:23:09 JVM" #endif /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ //------------------------------split_thru_phi--------------------------------- // Split Node 'n' through merge point if there is enough win. Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) { - if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::BOTTOM) { + if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) { // ConvI2L may have type information on it which is unsafe to push up // so disable this for now return NULL; @@ -40,7 +40,18 @@ int wins = 0; assert( !n->is_CFG(), "" ); assert( region->is_Region(), "" ); - Node *phi = new (C, region->req()) PhiNode( region, n->bottom_type() ); + + const Type* type = n->bottom_type(); + const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr(); + Node *phi; + if( t_oop != NULL && t_oop->is_known_instance_field() ) { + int iid = t_oop->instance_id(); + int index = C->get_alias_index(t_oop); + int offset = t_oop->offset(); + phi = new (C,region->req()) PhiNode(region, type, NULL, iid, index, offset); + } else { + phi = new (C,region->req()) PhiNode(region, type); + } uint old_unique = C->unique(); for( uint i = 1; i < region->req(); i++ ) { Node *x; @@ -88,6 +99,10 @@ // our new node, even though we may throw the node away. // (Note: This tweaking with igvn only works because x is a new node.) _igvn.set_type(x, t); + // If x is a TypeNode, capture any more-precise type permanently into Node + // othewise it will be not updated during igvn->transform since + // igvn->type(x) is set to x->Value() already. + x->raise_bottom_type(t); Node *y = x->Identity(&_igvn); if( y != x ) { wins++; @@ -443,9 +458,11 @@ // Check profitability int cost = 0; + int phis = 0; for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { Node *out = region->fast_out(i); if( !out->is_Phi() ) continue; // Ignore other control edges, etc + phis++; PhiNode* phi = out->as_Phi(); switch (phi->type()->basic_type()) { case T_LONG: @@ -456,8 +473,9 @@ case T_ADDRESS: // (RawPtr) cost++; break; + case T_NARROWOOP: // Fall through case T_OBJECT: { // Base oops are OK, but not derived oops - const TypeOopPtr *tp = phi->type()->isa_oopptr(); + const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr(); // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus // CMOVE'ing a derived pointer requires we also CMOVE the base. If we @@ -488,15 +506,21 @@ return NULL; // Too much speculative goo } } - // See if the Phi is used by a Cmp. This will likely Split-If, a - // higher-payoff operation. + // See if the Phi is used by a Cmp or Narrow oop Decode/Encode. + // This will likely Split-If, a higher-payoff operation. for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { Node* use = phi->fast_out(k); - if( use->is_Cmp() ) + if( use->is_Cmp() || use->is_DecodeN() || use->is_EncodeP() ) return NULL; } } if( cost >= ConditionalMoveLimit ) return NULL; // Too much goo + Node* bol = iff->in(1); + assert( bol->Opcode() == Op_Bool, "" ); + int cmp_op = bol->in(1)->Opcode(); + // It is expensive to generate flags from a float compare. + // Avoid duplicated float compare. + if( phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL; // -------------- // Now replace all Phis with CMOV's @@ -561,7 +585,8 @@ Node *cmov = conditional_move( n ); if( cmov ) return cmov; } - if( n->is_CFG() || n_op == Op_StorePConditional || n_op == Op_StoreLConditional || n_op == Op_CompareAndSwapI || n_op == Op_CompareAndSwapL ||n_op == Op_CompareAndSwapP) return n; + if( n->is_CFG() || n->is_LoadStore() ) + return n; if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd n_op == Op_Opaque2 ) { if( !C->major_progress() ) // If chance of no more loop opts... @@ -910,7 +935,7 @@ // to fold a StoreP and an AddP together (as part of an // address expression) and the AddP and StoreP have // different controls. - if( !x->is_Load() ) _igvn._worklist.yank(x); + if( !x->is_Load() && !x->is_DecodeN() ) _igvn._worklist.yank(x); } _igvn.remove_dead_node(n); } @@ -1874,18 +1899,16 @@ _igvn.hash_delete(use); use->set_req(j, n_clone); _igvn._worklist.push(use); + Node* use_c; if (!use->is_Phi()) { - Node* use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); - set_ctrl(n_clone, use_c); - assert(!loop->is_member(get_loop(use_c)), "should be outside loop"); - get_loop(use_c)->_body.push(n_clone); + use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); } else { // Use in a phi is considered a use in the associated predecessor block - Node *prevbb = use->in(0)->in(j); - set_ctrl(n_clone, prevbb); - assert(!loop->is_member(get_loop(prevbb)), "should be outside loop"); - get_loop(prevbb)->_body.push(n_clone); + use_c = use->in(0)->in(j); } + set_ctrl(n_clone, use_c); + assert(!loop->is_member(get_loop(use_c)), "should be outside loop"); + get_loop(use_c)->_body.push(n_clone); _igvn.register_new_node_with_optimizer(n_clone); #if !defined(PRODUCT) if (TracePartialPeeling) { @@ -2241,6 +2264,9 @@ // bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { + if (!loop->_head->is_Loop()) { + return false; } + LoopNode *head = loop->_head->as_Loop(); if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) { @@ -2641,6 +2667,10 @@ // Fix this by adjusting to use the post-increment trip counter. Node *phi = cl->phi(); if( !phi ) return; // Dead infinite loop + + // Shape messed up, probably by iteration_split_impl + if (phi->in(LoopNode::LoopBackControl) != cl->incr()) return; + bool progress = true; while (progress) { progress = false; @@ -2665,7 +2695,7 @@ if( !cle->stride_is_con() ) continue; // Hit! Refactor use to use the post-incremented tripcounter. // Compute a post-increment tripcounter. - Node *opaq = new (C, 2) Opaque2Node( cle->incr() ); + Node *opaq = new (C, 2) Opaque2Node( C, cle->incr() ); register_new_node( opaq, u_ctrl ); Node *neg_stride = _igvn.intcon(-cle->stride_con()); set_ctrl(neg_stride, C->root()); --- old/hotspot/src/share/vm/opto/machnode.cpp 2009-08-01 04:13:59.397683706 +0100 +++ new/hotspot/src/share/vm/opto/machnode.cpp 2009-08-01 04:13:59.320255141 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)machnode.cpp 1.200 07/09/28 10:23:08 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -265,10 +265,19 @@ // Now we have collected every part of the ADLC MEMORY_INTER. // See if it adds up to a base + offset. if (index != NULL) { - if (!index->is_Con()) { + const Type* t_index = index->bottom_type(); + if (t_index->isa_narrowoop()) { // EncodeN, LoadN, LoadConN, LoadNKlass. + // Memory references through narrow oops have a + // funny base so grab the type from the index: + // [R12 + narrow_oop_reg<<3 + offset] + assert(base == NULL, "Memory references through narrow oops have no base"); + offset = disp; + adr_type = t_index->make_ptr()->add_offset(offset); + return NULL; + } else if (!index->is_Con()) { disp = Type::OffsetBot; } else if (disp != Type::OffsetBot) { - const TypeX* ti = index->bottom_type()->isa_intptr_t(); + const TypeX* ti = t_index->isa_intptr_t(); if (ti == NULL) { disp = Type::OffsetBot; // a random constant?? } else { --- old/hotspot/src/share/vm/opto/macro.cpp 2009-08-01 04:14:00.959004193 +0100 +++ new/hotspot/src/share/vm/opto/macro.cpp 2009-08-01 04:14:00.877390692 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)macro.cpp 1.33 07/10/04 14:36:00 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,29 +57,59 @@ uint new_dbg_start = newcall->tf()->domain()->cnt(); int jvms_adj = new_dbg_start - old_dbg_start; assert (new_dbg_start == newcall->req(), "argument count mismatch"); + + Dict* sosn_map = new Dict(cmpkey,hashkey); for (uint i = old_dbg_start; i < oldcall->req(); i++) { - newcall->add_req(oldcall->in(i)); + Node* old_in = oldcall->in(i); + // Clone old SafePointScalarObjectNodes, adjusting their field contents. + if (old_in != NULL && old_in->is_SafePointScalarObject()) { + SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); + uint old_unique = C->unique(); + Node* new_in = old_sosn->clone(jvms_adj, sosn_map); + if (old_unique != C->unique()) { + new_in = transform_later(new_in); // Register new node. + } + old_in = new_in; + } + newcall->add_req(old_in); } + newcall->set_jvms(oldcall->jvms()); for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) { jvms->set_map(newcall); jvms->set_locoff(jvms->locoff()+jvms_adj); jvms->set_stkoff(jvms->stkoff()+jvms_adj); jvms->set_monoff(jvms->monoff()+jvms_adj); + jvms->set_scloff(jvms->scloff()+jvms_adj); jvms->set_endoff(jvms->endoff()+jvms_adj); } } -Node* PhaseMacroExpand::opt_iff(Node* region, Node* iff) { - IfNode *opt_iff = transform_later(iff)->as_If(); +Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) { + Node* cmp; + if (mask != 0) { + Node* and_node = transform_later(new (C, 3) AndXNode(word, MakeConX(mask))); + cmp = transform_later(new (C, 3) CmpXNode(and_node, MakeConX(bits))); + } else { + cmp = word; + } + Node* bol = transform_later(new (C, 2) BoolNode(cmp, BoolTest::ne)); + IfNode* iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); + transform_later(iff); - // Fast path taken; set region slot 2 - Node *fast_taken = transform_later( new (C, 1) IfFalseNode(opt_iff) ); - region->init_req(2,fast_taken); // Capture fast-control + // Fast path taken. + Node *fast_taken = transform_later( new (C, 1) IfFalseNode(iff) ); // Fast path not-taken, i.e. slow path - Node *slow_taken = transform_later( new (C, 1) IfTrueNode(opt_iff) ); - return slow_taken; + Node *slow_taken = transform_later( new (C, 1) IfTrueNode(iff) ); + + if (return_fast_path) { + region->init_req(edge, slow_taken); // Capture slow-control + return fast_taken; + } else { + region->init_req(edge, fast_taken); // Capture fast-control + return slow_taken; + } } //--------------------copy_predefined_input_for_runtime_call-------------------- @@ -169,6 +199,658 @@ } +// Eliminate a card mark sequence. p2x is a ConvP2XNode +void PhaseMacroExpand::eliminate_card_mark(Node *p2x) { + assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required"); + Node *shift = p2x->unique_out(); + Node *addp = shift->unique_out(); + for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) { + Node *st = addp->last_out(j); + assert(st->is_Store(), "store required"); + _igvn.replace_node(st, st->in(MemNode::Memory)); + } +} + +// Search for a memory operation for the specified memory slice. +static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) { + Node *orig_mem = mem; + Node *alloc_mem = alloc->in(TypeFunc::Memory); + const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr(); + while (true) { + if (mem == alloc_mem || mem == start_mem ) { + return mem; // hit one of our sentinals + } else if (mem->is_MergeMem()) { + mem = mem->as_MergeMem()->memory_at(alias_idx); + } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) { + Node *in = mem->in(0); + // we can safely skip over safepoints, calls, locks and membars because we + // already know that the object is safe to eliminate. + if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) { + return in; + } else if (in->is_Call()) { + CallNode *call = in->as_Call(); + if (!call->may_modify(tinst, phase)) { + mem = call->in(TypeFunc::Memory); + } + mem = in->in(TypeFunc::Memory); + } else if (in->is_MemBar()) { + mem = in->in(TypeFunc::Memory); + } else { + assert(false, "unexpected projection"); + } + } else if (mem->is_Store()) { + const TypePtr* atype = mem->as_Store()->adr_type(); + int adr_idx = Compile::current()->get_alias_index(atype); + if (adr_idx == alias_idx) { + assert(atype->isa_oopptr(), "address type must be oopptr"); + int adr_offset = atype->offset(); + uint adr_iid = atype->is_oopptr()->instance_id(); + // Array elements references have the same alias_idx + // but different offset and different instance_id. + if (adr_offset == offset && adr_iid == alloc->_idx) + return mem; + } else { + assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw"); + } + mem = mem->in(MemNode::Memory); + } else { + return mem; + } + assert(mem != orig_mem, "dead memory loop"); + } +} + +// +// Given a Memory Phi, compute a value Phi containing the values from stores +// on the input paths. +// Note: this function is recursive, its depth is limied by the "level" argument +// Returns the computed Phi, or NULL if it cannot compute it. +Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level) { + assert(mem->is_Phi(), "sanity"); + int alias_idx = C->get_alias_index(adr_t); + int offset = adr_t->offset(); + int instance_id = adr_t->instance_id(); + + // Check if an appropriate value phi already exists. + Node* region = mem->in(0); + for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { + Node* phi = region->fast_out(k); + if (phi->is_Phi() && phi != mem && + phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) { + return phi; + } + } + // Check if an appropriate new value phi already exists. + Node* new_phi = NULL; + uint size = value_phis->size(); + for (uint i=0; i < size; i++) { + if ( mem->_idx == value_phis->index_at(i) ) { + return value_phis->node_at(i); + } + } + + if (level <= 0) { + return NULL; // Give up: phi tree too deep + } + Node *start_mem = C->start()->proj_out(TypeFunc::Memory); + Node *alloc_mem = alloc->in(TypeFunc::Memory); + + uint length = mem->req(); + GrowableArray values(length, length, NULL); + + // create a new Phi for the value + PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset); + transform_later(phi); + value_phis->push(phi, mem->_idx); + + for (uint j = 1; j < length; j++) { + Node *in = mem->in(j); + if (in == NULL || in->is_top()) { + values.at_put(j, in); + } else { + Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn); + if (val == start_mem || val == alloc_mem) { + // hit a sentinel, return appropriate 0 value + values.at_put(j, _igvn.zerocon(ft)); + continue; + } + if (val->is_Initialize()) { + val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); + } + if (val == NULL) { + return NULL; // can't find a value on this path + } + if (val == mem) { + values.at_put(j, mem); + } else if (val->is_Store()) { + values.at_put(j, val->in(MemNode::ValueIn)); + } else if(val->is_Proj() && val->in(0) == alloc) { + values.at_put(j, _igvn.zerocon(ft)); + } else if (val->is_Phi()) { + val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1); + if (val == NULL) { + return NULL; + } + values.at_put(j, val); + } else { + assert(false, "unknown node on this path"); + return NULL; // unknown node on this path + } + } + } + // Set Phi's inputs + for (uint j = 1; j < length; j++) { + if (values.at(j) == mem) { + phi->init_req(j, phi); + } else { + phi->init_req(j, values.at(j)); + } + } + return phi; +} + +// Search the last value stored into the object's field. +Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc) { + assert(adr_t->is_known_instance_field(), "instance required"); + int instance_id = adr_t->instance_id(); + assert((uint)instance_id == alloc->_idx, "wrong allocation"); + + int alias_idx = C->get_alias_index(adr_t); + int offset = adr_t->offset(); + Node *start_mem = C->start()->proj_out(TypeFunc::Memory); + Node *alloc_ctrl = alloc->in(TypeFunc::Control); + Node *alloc_mem = alloc->in(TypeFunc::Memory); + Arena *a = Thread::current()->resource_area(); + VectorSet visited(a); + + + bool done = sfpt_mem == alloc_mem; + Node *mem = sfpt_mem; + while (!done) { + if (visited.test_set(mem->_idx)) { + return NULL; // found a loop, give up + } + mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn); + if (mem == start_mem || mem == alloc_mem) { + done = true; // hit a sentinel, return appropriate 0 value + } else if (mem->is_Initialize()) { + mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); + if (mem == NULL) { + done = true; // Something go wrong. + } else if (mem->is_Store()) { + const TypePtr* atype = mem->as_Store()->adr_type(); + assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice"); + done = true; + } + } else if (mem->is_Store()) { + const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr(); + assert(atype != NULL, "address type must be oopptr"); + assert(C->get_alias_index(atype) == alias_idx && + atype->is_known_instance_field() && atype->offset() == offset && + atype->instance_id() == instance_id, "store is correct memory slice"); + done = true; + } else if (mem->is_Phi()) { + // try to find a phi's unique input + Node *unique_input = NULL; + Node *top = C->top(); + for (uint i = 1; i < mem->req(); i++) { + Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn); + if (n == NULL || n == top || n == mem) { + continue; + } else if (unique_input == NULL) { + unique_input = n; + } else if (unique_input != n) { + unique_input = top; + break; + } + } + if (unique_input != NULL && unique_input != top) { + mem = unique_input; + } else { + done = true; + } + } else { + assert(false, "unexpected node"); + } + } + if (mem != NULL) { + if (mem == start_mem || mem == alloc_mem) { + // hit a sentinel, return appropriate 0 value + return _igvn.zerocon(ft); + } else if (mem->is_Store()) { + return mem->in(MemNode::ValueIn); + } else if (mem->is_Phi()) { + // attempt to produce a Phi reflecting the values on the input paths of the Phi + Node_Stack value_phis(a, 8); + Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit); + if (phi != NULL) { + return phi; + } else { + // Kill all new Phis + while(value_phis.is_nonempty()) { + Node* n = value_phis.node(); + _igvn.hash_delete(n); + _igvn.subsume_node(n, C->top()); + value_phis.pop(); + } + } + } + } + // Something go wrong. + return NULL; +} + +// Check the possibility of scalar replacement. +bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray & safepoints) { + // Scan the uses of the allocation to check for anything that would + // prevent us from eliminating it. + NOT_PRODUCT( const char* fail_eliminate = NULL; ) + DEBUG_ONLY( Node* disq_node = NULL; ) + bool can_eliminate = true; + + Node* res = alloc->result_cast(); + const TypeOopPtr* res_type = NULL; + if (res == NULL) { + // All users were eliminated. + } else if (!res->is_CheckCastPP()) { + alloc->_is_scalar_replaceable = false; // don't try again + NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";) + can_eliminate = false; + } else { + res_type = _igvn.type(res)->isa_oopptr(); + if (res_type == NULL) { + NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";) + can_eliminate = false; + } else if (res_type->isa_aryptr()) { + int length = alloc->in(AllocateNode::ALength)->find_int_con(-1); + if (length < 0) { + NOT_PRODUCT(fail_eliminate = "Array's size is not constant";) + can_eliminate = false; + } + } + } + + if (can_eliminate && res != NULL) { + for (DUIterator_Fast jmax, j = res->fast_outs(jmax); + j < jmax && can_eliminate; j++) { + Node* use = res->fast_out(j); + + if (use->is_AddP()) { + const TypePtr* addp_type = _igvn.type(use)->is_ptr(); + int offset = addp_type->offset(); + + if (offset == Type::OffsetTop || offset == Type::OffsetBot) { + NOT_PRODUCT(fail_eliminate = "Undefined field referrence";) + can_eliminate = false; + break; + } + for (DUIterator_Fast kmax, k = use->fast_outs(kmax); + k < kmax && can_eliminate; k++) { + Node* n = use->fast_out(k); + if (!n->is_Store() && n->Opcode() != Op_CastP2X) { + DEBUG_ONLY(disq_node = n;) + if (n->is_Load() || n->is_LoadStore()) { + NOT_PRODUCT(fail_eliminate = "Field load";) + } else { + NOT_PRODUCT(fail_eliminate = "Not store field referrence";) + } + can_eliminate = false; + } + } + } else if (use->is_SafePoint()) { + SafePointNode* sfpt = use->as_SafePoint(); + if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) { + // Object is passed as argument. + DEBUG_ONLY(disq_node = use;) + NOT_PRODUCT(fail_eliminate = "Object is passed as argument";) + can_eliminate = false; + } + Node* sfptMem = sfpt->memory(); + if (sfptMem == NULL || sfptMem->is_top()) { + DEBUG_ONLY(disq_node = use;) + NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";) + can_eliminate = false; + } else { + safepoints.append_if_missing(sfpt); + } + } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark + if (use->is_Phi()) { + if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) { + NOT_PRODUCT(fail_eliminate = "Object is return value";) + } else { + NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";) + } + DEBUG_ONLY(disq_node = use;) + } else { + if (use->Opcode() == Op_Return) { + NOT_PRODUCT(fail_eliminate = "Object is return value";) + }else { + NOT_PRODUCT(fail_eliminate = "Object is referenced by node";) + } + DEBUG_ONLY(disq_node = use;) + } + can_eliminate = false; + } + } + } + +#ifndef PRODUCT + if (PrintEliminateAllocations) { + if (can_eliminate) { + tty->print("Scalar "); + if (res == NULL) + alloc->dump(); + else + res->dump(); + } else { + tty->print("NotScalar (%s)", fail_eliminate); + if (res == NULL) + alloc->dump(); + else + res->dump(); +#ifdef ASSERT + if (disq_node != NULL) { + tty->print(" >>>> "); + disq_node->dump(); + } +#endif /*ASSERT*/ + } + } +#endif + return can_eliminate; +} + +// Do scalar replacement. +bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray & safepoints) { + GrowableArray safepoints_done; + + ciKlass* klass = NULL; + ciInstanceKlass* iklass = NULL; + int nfields = 0; + int array_base; + int element_size; + BasicType basic_elem_type; + ciType* elem_type; + + Node* res = alloc->result_cast(); + const TypeOopPtr* res_type = NULL; + if (res != NULL) { // Could be NULL when there are no users + res_type = _igvn.type(res)->isa_oopptr(); + } + + if (res != NULL) { + klass = res_type->klass(); + if (res_type->isa_instptr()) { + // find the fields of the class which will be needed for safepoint debug information + assert(klass->is_instance_klass(), "must be an instance klass."); + iklass = klass->as_instance_klass(); + nfields = iklass->nof_nonstatic_fields(); + } else { + // find the array's elements which will be needed for safepoint debug information + nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1); + assert(klass->is_array_klass() && nfields >= 0, "must be an array klass."); + elem_type = klass->as_array_klass()->element_type(); + basic_elem_type = elem_type->basic_type(); + array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); + element_size = type2aelembytes(basic_elem_type); + } + } + // + // Process the safepoint uses + // + while (safepoints.length() > 0) { + SafePointNode* sfpt = safepoints.pop(); + Node* mem = sfpt->memory(); + uint first_ind = sfpt->req(); + SafePointScalarObjectNode* sobj = new (C, 1) SafePointScalarObjectNode(res_type, +#ifdef ASSERT + alloc, +#endif + first_ind, nfields); + sobj->init_req(0, sfpt->in(TypeFunc::Control)); + transform_later(sobj); + + // Scan object's fields adding an input to the safepoint for each field. + for (int j = 0; j < nfields; j++) { + intptr_t offset; + ciField* field = NULL; + if (iklass != NULL) { + field = iklass->nonstatic_field_at(j); + offset = field->offset(); + elem_type = field->type(); + basic_elem_type = field->layout_type(); + } else { + offset = array_base + j * (intptr_t)element_size; + } + + const Type *field_type; + // The next code is taken from Parse::do_get_xxx(). + if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) { + if (!elem_type->is_loaded()) { + field_type = TypeInstPtr::BOTTOM; + } else if (field != NULL && field->is_constant()) { + // This can happen if the constant oop is non-perm. + ciObject* con = field->constant_value().as_object(); + // Do not "join" in the previous type; it doesn't add value, + // and may yield a vacuous result if the field is of interface type. + field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); + assert(field_type != NULL, "field singleton type must be consistent"); + } else { + field_type = TypeOopPtr::make_from_klass(elem_type->as_klass()); + } + if (UseCompressedOops) { + field_type = field_type->make_narrowoop(); + basic_elem_type = T_NARROWOOP; + } + } else { + field_type = Type::get_const_basic_type(basic_elem_type); + } + + const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr(); + + Node *field_val = value_from_mem(mem, basic_elem_type, field_type, field_addr_type, alloc); + if (field_val == NULL) { + // we weren't able to find a value for this field, + // give up on eliminating this allocation + alloc->_is_scalar_replaceable = false; // don't try again + // remove any extra entries we added to the safepoint + uint last = sfpt->req() - 1; + for (int k = 0; k < j; k++) { + sfpt->del_req(last--); + } + // rollback processed safepoints + while (safepoints_done.length() > 0) { + SafePointNode* sfpt_done = safepoints_done.pop(); + // remove any extra entries we added to the safepoint + last = sfpt_done->req() - 1; + for (int k = 0; k < nfields; k++) { + sfpt_done->del_req(last--); + } + JVMState *jvms = sfpt_done->jvms(); + jvms->set_endoff(sfpt_done->req()); + // Now make a pass over the debug information replacing any references + // to SafePointScalarObjectNode with the allocated object. + int start = jvms->debug_start(); + int end = jvms->debug_end(); + for (int i = start; i < end; i++) { + if (sfpt_done->in(i)->is_SafePointScalarObject()) { + SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject(); + if (scobj->first_index() == sfpt_done->req() && + scobj->n_fields() == (uint)nfields) { + assert(scobj->alloc() == alloc, "sanity"); + sfpt_done->set_req(i, res); + } + } + } + } +#ifndef PRODUCT + if (PrintEliminateAllocations) { + if (field != NULL) { + tty->print("=== At SafePoint node %d can't find value of Field: ", + sfpt->_idx); + field->print(); + int field_idx = C->get_alias_index(field_addr_type); + tty->print(" (alias_idx=%d)", field_idx); + } else { // Array's element + tty->print("=== At SafePoint node %d can't find value of array element [%d]", + sfpt->_idx, j); + } + tty->print(", which prevents elimination of: "); + if (res == NULL) + alloc->dump(); + else + res->dump(); + } +#endif + return false; + } + if (UseCompressedOops && field_type->isa_narrowoop()) { + // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation + // to be able scalar replace the allocation. + if (field_val->is_EncodeP()) { + field_val = field_val->in(1); + } else { + field_val = transform_later(new (C, 2) DecodeNNode(field_val, field_val->bottom_type()->make_ptr())); + } + } + sfpt->add_req(field_val); + } + JVMState *jvms = sfpt->jvms(); + jvms->set_endoff(sfpt->req()); + // Now make a pass over the debug information replacing any references + // to the allocated object with "sobj" + int start = jvms->debug_start(); + int end = jvms->debug_end(); + for (int i = start; i < end; i++) { + if (sfpt->in(i) == res) { + sfpt->set_req(i, sobj); + } + } + safepoints_done.append_if_missing(sfpt); // keep it for rollback + } + return true; +} + +// Process users of eliminated allocation. +void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { + Node* res = alloc->result_cast(); + if (res != NULL) { + for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { + Node *use = res->last_out(j); + uint oc1 = res->outcnt(); + + if (use->is_AddP()) { + for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) { + Node *n = use->last_out(k); + uint oc2 = use->outcnt(); + if (n->is_Store()) { + _igvn.replace_node(n, n->in(MemNode::Memory)); + } else { + assert( n->Opcode() == Op_CastP2X, "CastP2X required"); + eliminate_card_mark(n); + } + k -= (oc2 - use->outcnt()); + } + } else { + assert( !use->is_SafePoint(), "safepoint uses must have been already elimiated"); + assert( use->Opcode() == Op_CastP2X, "CastP2X required"); + eliminate_card_mark(use); + } + j -= (oc1 - res->outcnt()); + } + assert(res->outcnt() == 0, "all uses of allocated objects must be deleted"); + _igvn.remove_dead_node(res); + } + + // + // Process other users of allocation's projections + // + if (_resproj != NULL && _resproj->outcnt() != 0) { + for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) { + Node *use = _resproj->last_out(j); + uint oc1 = _resproj->outcnt(); + if (use->is_Initialize()) { + // Eliminate Initialize node. + InitializeNode *init = use->as_Initialize(); + assert(init->outcnt() <= 2, "only a control and memory projection expected"); + Node *ctrl_proj = init->proj_out(TypeFunc::Control); + if (ctrl_proj != NULL) { + assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection"); + _igvn.replace_node(ctrl_proj, _fallthroughcatchproj); + } + Node *mem_proj = init->proj_out(TypeFunc::Memory); + if (mem_proj != NULL) { + Node *mem = init->in(TypeFunc::Memory); +#ifdef ASSERT + if (mem->is_MergeMem()) { + assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection"); + } else { + assert(mem == _memproj_fallthrough, "allocation memory projection"); + } +#endif + _igvn.replace_node(mem_proj, mem); + } + } else if (use->is_AddP()) { + // raw memory addresses used only by the initialization + _igvn.hash_delete(use); + _igvn.subsume_node(use, C->top()); + } else { + assert(false, "only Initialize or AddP expected"); + } + j -= (oc1 - _resproj->outcnt()); + } + } + if (_fallthroughcatchproj != NULL) { + _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control)); + } + if (_memproj_fallthrough != NULL) { + _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory)); + } + if (_memproj_catchall != NULL) { + _igvn.replace_node(_memproj_catchall, C->top()); + } + if (_ioproj_fallthrough != NULL) { + _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O)); + } + if (_ioproj_catchall != NULL) { + _igvn.replace_node(_ioproj_catchall, C->top()); + } + if (_catchallcatchproj != NULL) { + _igvn.replace_node(_catchallcatchproj, C->top()); + } +} + +bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { + + if (!EliminateAllocations || !alloc->_is_scalar_replaceable) { + return false; + } + + extract_call_projections(alloc); + + GrowableArray safepoints; + if (!can_eliminate_allocation(alloc, safepoints)) { + return false; + } + + if (!scalar_replacement(alloc, safepoints)) { + return false; + } + + process_users_of_allocation(alloc); + +#ifndef PRODUCT +if (PrintEliminateAllocations) { + if (alloc->is_AllocateArray()) + tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx); + else + tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx); +} +#endif + + return true; +} + //---------------------------set_eden_pointers------------------------- void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { @@ -190,8 +872,8 @@ Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { Node* adr = basic_plus_adr(base, offset); - const TypePtr* adr_type = TypeRawPtr::BOTTOM; - Node* value = LoadNode::make(C, ctl, mem, adr, adr_type, value_type, bt); + const TypePtr* adr_type = adr->bottom_type()->is_ptr(); + Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt); transform_later(value); return value; } @@ -199,7 +881,7 @@ Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { Node* adr = basic_plus_adr(base, offset); - mem = StoreNode::make(C, ctl, mem, adr, NULL, value, bt); + mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt); transform_later(mem); return mem; } @@ -273,25 +955,14 @@ Node* klass_node = alloc->in(AllocateNode::KlassNode); Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); - Node* eden_top_adr; - Node* eden_end_adr; - set_eden_pointers(eden_top_adr, eden_end_adr); + // With escape analysis, the entire memory state was needed to be able to + // eliminate the allocation. Since the allocations cannot be eliminated, + // optimize it to the raw slice. + if (mem->is_MergeMem()) { + mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw); + } - uint raw_idx = C->get_alias_index(TypeRawPtr::BOTTOM); assert(ctrl != NULL, "must have control"); - - // Load Eden::end. Loop invariant and hoisted. - // - // Note: We set the control input on "eden_end" and "old_eden_top" when using - // a TLAB to work around a bug where these values were being moved across - // a safepoint. These are not oops, so they cannot be include in the oop - // map, but the can be changed by a GC. The proper way to fix this would - // be to set the raw memory state when generating a SafepointNode. However - // this will require extensive changes to the loop optimization in order to - // prevent a degradation of the optimization. - // See comment in memnode.hpp, around line 227 in class LoadPNode. - Node* eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); - // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. // they will not be used if "always_slow" is set enum { slow_result_path = 1, fast_result_path = 2 }; @@ -311,12 +982,15 @@ initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn); } - if (DTraceAllocProbes) { + if (DTraceAllocProbes || + !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() || + (UseConcMarkSweepGC && CMSIncrementalMode))) { // Force slow-path allocation always_slow = true; initial_slow_test = NULL; } + enum { too_big_or_final_path = 1, need_gc_path = 2 }; Node *slow_region = NULL; Node *toobig_false = ctrl; @@ -345,6 +1019,23 @@ Node *slow_mem = mem; // save the current memory state for slow path // generate the fast allocation code unless we know that the initial test will always go slow if (!always_slow) { + Node* eden_top_adr; + Node* eden_end_adr; + + set_eden_pointers(eden_top_adr, eden_end_adr); + + // Load Eden::end. Loop invariant and hoisted. + // + // Note: We set the control input on "eden_end" and "old_eden_top" when using + // a TLAB to work around a bug where these values were being moved across + // a safepoint. These are not oops, so they cannot be include in the oop + // map, but the can be changed by a GC. The proper way to fix this would + // be to set the raw memory state when generating a SafepointNode. However + // this will require extensive changes to the loop optimization in order to + // prevent a degradation of the optimization. + // See comment in memnode.hpp, around line 227 in class LoadPNode. + Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); + // allocate the Region and Phi nodes for the result result_region = new (C, 3) RegionNode(3); result_phi_rawmem = new (C, 3) PhiNode( result_region, Type::MEMORY, TypeRawPtr::BOTTOM ); @@ -635,6 +1326,7 @@ mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); } rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); + rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT); int header_size = alloc->minimum_header_size(); // conservatively small @@ -642,7 +1334,7 @@ if (length != NULL) { // Arrays need length field rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT); // conservatively small header size: - header_size = sizeof(arrayOopDesc); + header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE); ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass(); if (k->is_array_klass()) // we know the exact header size in most cases: header_size = Klass::layout_helper_header_size(k->layout_helper()); @@ -671,7 +1363,6 @@ rawmem = init->complete_stores(control, rawmem, object, header_size, size_in_bytes, &_igvn); } - // We have no more use for this link, since the AllocateNode goes away: init->set_req(InitializeNode::RawAddress, top()); // (If we keep the link, it just confuses the register allocator, @@ -816,27 +1507,138 @@ // Note: The membar's associated with the lock/unlock are currently not // eliminated. This should be investigated as a future enhancement. // -void PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { - Node* mem = alock->in(TypeFunc::Memory); +bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { + + if (!alock->is_eliminated()) { + return false; + } + if (alock->is_Lock() && !alock->is_coarsened()) { + // Create new "eliminated" BoxLock node and use it + // in monitor debug info for the same object. + BoxLockNode* oldbox = alock->box_node()->as_BoxLock(); + Node* obj = alock->obj_node(); + if (!oldbox->is_eliminated()) { + BoxLockNode* newbox = oldbox->clone()->as_BoxLock(); + newbox->set_eliminated(); + transform_later(newbox); + // Replace old box node with new box for all users + // of the same object. + for (uint i = 0; i < oldbox->outcnt();) { + + bool next_edge = true; + Node* u = oldbox->raw_out(i); + if (u == alock) { + i++; + continue; // It will be removed below + } + if (u->is_Lock() && + u->as_Lock()->obj_node() == obj && + // oldbox could be referenced in debug info also + u->as_Lock()->box_node() == oldbox) { + assert(u->as_Lock()->is_eliminated(), "sanity"); + _igvn.hash_delete(u); + u->set_req(TypeFunc::Parms + 1, newbox); + next_edge = false; +#ifdef ASSERT + } else if (u->is_Unlock() && u->as_Unlock()->obj_node() == obj) { + assert(u->as_Unlock()->is_eliminated(), "sanity"); +#endif + } + // Replace old box in monitor debug info. + if (u->is_SafePoint() && u->as_SafePoint()->jvms()) { + SafePointNode* sfn = u->as_SafePoint(); + JVMState* youngest_jvms = sfn->jvms(); + int max_depth = youngest_jvms->depth(); + for (int depth = 1; depth <= max_depth; depth++) { + JVMState* jvms = youngest_jvms->of_depth(depth); + int num_mon = jvms->nof_monitors(); + // Loop over monitors + for (int idx = 0; idx < num_mon; idx++) { + Node* obj_node = sfn->monitor_obj(jvms, idx); + Node* box_node = sfn->monitor_box(jvms, idx); + if (box_node == oldbox && obj_node == obj) { + int j = jvms->monitor_box_offset(idx); + _igvn.hash_delete(u); + u->set_req(j, newbox); + next_edge = false; + } + } // for (int idx = 0; + } // for (int depth = 1; + } // if (u->is_SafePoint() + if (next_edge) i++; + } // for (uint i = 0; i < oldbox->outcnt();) + } // if (!oldbox->is_eliminated()) + } // if (alock->is_Lock() && !lock->is_coarsened()) + + #ifndef PRODUCT + if (PrintEliminateLocks) { + if (alock->is_Lock()) { + tty->print_cr("++++ Eliminating: %d Lock", alock->_idx); + } else { + tty->print_cr("++++ Eliminating: %d Unlock", alock->_idx); + } + } + #endif + + Node* mem = alock->in(TypeFunc::Memory); + Node* ctrl = alock->in(TypeFunc::Control); + + extract_call_projections(alock); + // There are 2 projections from the lock. The lock node will + // be deleted when its last use is subsumed below. + assert(alock->outcnt() == 2 && + _fallthroughproj != NULL && + _memproj_fallthrough != NULL, + "Unexpected projections from Lock/Unlock"); + + Node* fallthroughproj = _fallthroughproj; + Node* memproj_fallthrough = _memproj_fallthrough; // The memory projection from a lock/unlock is RawMem // The input to a Lock is merged memory, so extract its RawMem input // (unless the MergeMem has been optimized away.) if (alock->is_Lock()) { - if (mem->is_MergeMem()) - mem = mem->as_MergeMem()->in(Compile::AliasIdxRaw); + // Seach for MemBarAcquire node and delete it also. + MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar(); + assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, ""); + Node* ctrlproj = membar->proj_out(TypeFunc::Control); + Node* memproj = membar->proj_out(TypeFunc::Memory); + _igvn.hash_delete(ctrlproj); + _igvn.subsume_node(ctrlproj, fallthroughproj); + _igvn.hash_delete(memproj); + _igvn.subsume_node(memproj, memproj_fallthrough); + + // Delete FastLock node also if this Lock node is unique user + // (a loop peeling may clone a Lock node). + Node* flock = alock->as_Lock()->fastlock_node(); + if (flock->outcnt() == 1) { + assert(flock->unique_out() == alock, "sanity"); + _igvn.hash_delete(flock); + _igvn.subsume_node(flock, top()); + } } - extract_call_projections(alock); - // There are 2 projections from the lock. The lock node will - // be deleted when its last use is subsumed below. - assert(alock->outcnt() == 2 && _fallthroughproj != NULL && - _memproj_fallthrough != NULL, "Unexpected projections from Lock/Unlock"); - _igvn.hash_delete(_fallthroughproj); - _igvn.subsume_node(_fallthroughproj, alock->in(TypeFunc::Control)); - _igvn.hash_delete(_memproj_fallthrough); - _igvn.subsume_node(_memproj_fallthrough, mem); - return; + // Seach for MemBarRelease node and delete it also. + if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() && + ctrl->in(0)->is_MemBar()) { + MemBarNode* membar = ctrl->in(0)->as_MemBar(); + assert(membar->Opcode() == Op_MemBarRelease && + mem->is_Proj() && membar == mem->in(0), ""); + _igvn.hash_delete(fallthroughproj); + _igvn.subsume_node(fallthroughproj, ctrl); + _igvn.hash_delete(memproj_fallthrough); + _igvn.subsume_node(memproj_fallthrough, mem); + fallthroughproj = ctrl; + memproj_fallthrough = mem; + ctrl = membar->in(TypeFunc::Control); + mem = membar->in(TypeFunc::Memory); + } + + _igvn.hash_delete(fallthroughproj); + _igvn.subsume_node(fallthroughproj, ctrl); + _igvn.hash_delete(memproj_fallthrough); + _igvn.subsume_node(memproj_fallthrough, mem); + return true; } @@ -847,21 +1649,204 @@ Node* mem = lock->in(TypeFunc::Memory); Node* obj = lock->obj_node(); Node* box = lock->box_node(); - Node *flock = lock->fastlock_node(); + Node* flock = lock->fastlock_node(); - if (lock->is_eliminated()) { - eliminate_locking_node(lock); - return; + // Make the merge point + Node *region; + Node *mem_phi; + Node *slow_path; + + if (UseOptoBiasInlining) { + /* + * See the full descrition in MacroAssembler::biased_locking_enter(). + * + * if( (mark_word & biased_lock_mask) == biased_lock_pattern ) { + * // The object is biased. + * proto_node = klass->prototype_header; + * o_node = thread | proto_node; + * x_node = o_node ^ mark_word; + * if( (x_node & ~age_mask) == 0 ) { // Biased to the current thread ? + * // Done. + * } else { + * if( (x_node & biased_lock_mask) != 0 ) { + * // The klass's prototype header is no longer biased. + * cas(&mark_word, mark_word, proto_node) + * goto cas_lock; + * } else { + * // The klass's prototype header is still biased. + * if( (x_node & epoch_mask) != 0 ) { // Expired epoch? + * old = mark_word; + * new = o_node; + * } else { + * // Different thread or anonymous biased. + * old = mark_word & (epoch_mask | age_mask | biased_lock_mask); + * new = thread | old; + * } + * // Try to rebias. + * if( cas(&mark_word, old, new) == 0 ) { + * // Done. + * } else { + * goto slow_path; // Failed. + * } + * } + * } + * } else { + * // The object is not biased. + * cas_lock: + * if( FastLock(obj) == 0 ) { + * // Done. + * } else { + * slow_path: + * OptoRuntime::complete_monitor_locking_Java(obj); + * } + * } + */ + + region = new (C, 5) RegionNode(5); + // create a Phi for the memory state + mem_phi = new (C, 5) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); + + Node* fast_lock_region = new (C, 3) RegionNode(3); + Node* fast_lock_mem_phi = new (C, 3) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM); + + // First, check mark word for the biased lock pattern. + Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); + + // Get fast path - mark word has the biased lock pattern. + ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node, + markOopDesc::biased_lock_mask_in_place, + markOopDesc::biased_lock_pattern, true); + // fast_lock_region->in(1) is set to slow path. + fast_lock_mem_phi->init_req(1, mem); + + // Now check that the lock is biased to the current thread and has + // the same epoch and bias as Klass::_prototype_header. + + // Special-case a fresh allocation to avoid building nodes: + Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn); + if (klass_node == NULL) { + Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); + klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) ); +#ifdef _LP64 + if (UseCompressedOops && klass_node->is_DecodeN()) { + assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity"); + klass_node->in(1)->init_req(0, ctrl); + } else +#endif + klass_node->init_req(0, ctrl); + } + Node *proto_node = make_load(ctrl, mem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeX_X, TypeX_X->basic_type()); + + Node* thread = transform_later(new (C, 1) ThreadLocalNode()); + Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread)); + Node* o_node = transform_later(new (C, 3) OrXNode(cast_thread, proto_node)); + Node* x_node = transform_later(new (C, 3) XorXNode(o_node, mark_node)); + + // Get slow path - mark word does NOT match the value. + Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node, + (~markOopDesc::age_mask_in_place), 0); + // region->in(3) is set to fast path - the object is biased to the current thread. + mem_phi->init_req(3, mem); + + + // Mark word does NOT match the value (thread | Klass::_prototype_header). + + + // First, check biased pattern. + // Get fast path - _prototype_header has the same biased lock pattern. + ctrl = opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node, + markOopDesc::biased_lock_mask_in_place, 0, true); + + not_biased_ctrl = fast_lock_region->in(2); // Slow path + // fast_lock_region->in(2) - the prototype header is no longer biased + // and we have to revoke the bias on this object. + // We are going to try to reset the mark of this object to the prototype + // value and fall through to the CAS-based locking scheme. + Node* adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); + Node* cas = new (C, 5) StoreXConditionalNode(not_biased_ctrl, mem, adr, + proto_node, mark_node); + transform_later(cas); + Node* proj = transform_later( new (C, 1) SCMemProjNode(cas)); + fast_lock_mem_phi->init_req(2, proj); + + + // Second, check epoch bits. + Node* rebiased_region = new (C, 3) RegionNode(3); + Node* old_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X); + Node* new_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X); + + // Get slow path - mark word does NOT match epoch bits. + Node* epoch_ctrl = opt_bits_test(ctrl, rebiased_region, 1, x_node, + markOopDesc::epoch_mask_in_place, 0); + // The epoch of the current bias is not valid, attempt to rebias the object + // toward the current thread. + rebiased_region->init_req(2, epoch_ctrl); + old_phi->init_req(2, mark_node); + new_phi->init_req(2, o_node); + + // rebiased_region->in(1) is set to fast path. + // The epoch of the current bias is still valid but we know + // nothing about the owner; it might be set or it might be clear. + Node* cmask = MakeConX(markOopDesc::biased_lock_mask_in_place | + markOopDesc::age_mask_in_place | + markOopDesc::epoch_mask_in_place); + Node* old = transform_later(new (C, 3) AndXNode(mark_node, cmask)); + cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread)); + Node* new_mark = transform_later(new (C, 3) OrXNode(cast_thread, old)); + old_phi->init_req(1, old); + new_phi->init_req(1, new_mark); + + transform_later(rebiased_region); + transform_later(old_phi); + transform_later(new_phi); + + // Try to acquire the bias of the object using an atomic operation. + // If this fails we will go in to the runtime to revoke the object's bias. + cas = new (C, 5) StoreXConditionalNode(rebiased_region, mem, adr, + new_phi, old_phi); + transform_later(cas); + proj = transform_later( new (C, 1) SCMemProjNode(cas)); + + // Get slow path - Failed to CAS. + not_biased_ctrl = opt_bits_test(rebiased_region, region, 4, cas, 0, 0); + mem_phi->init_req(4, proj); + // region->in(4) is set to fast path - the object is rebiased to the current thread. + + // Failed to CAS. + slow_path = new (C, 3) RegionNode(3); + Node *slow_mem = new (C, 3) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM); + + slow_path->init_req(1, not_biased_ctrl); // Capture slow-control + slow_mem->init_req(1, proj); + + // Call CAS-based locking scheme (FastLock node). + + transform_later(fast_lock_region); + transform_later(fast_lock_mem_phi); + + // Get slow path - FastLock failed to lock the object. + ctrl = opt_bits_test(fast_lock_region, region, 2, flock, 0, 0); + mem_phi->init_req(2, fast_lock_mem_phi); + // region->in(2) is set to fast path - the object is locked to the current thread. + + slow_path->init_req(2, ctrl); // Capture slow-control + slow_mem->init_req(2, fast_lock_mem_phi); + + transform_later(slow_path); + transform_later(slow_mem); + // Reset lock's memory edge. + lock->set_req(TypeFunc::Memory, slow_mem); + + } else { + region = new (C, 3) RegionNode(3); + // create a Phi for the memory state + mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); + + // Optimize test; set region slot 2 + slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0); + mem_phi->init_req(2, mem); } - // Make the merge point - Node *region = new (C, 3) RegionNode(3); - - Node *bol = transform_later(new (C, 2) BoolNode(flock,BoolTest::ne)); - Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); - // Optimize test; set region slot 2 - Node *slow_path = opt_iff(region,iff); - // Make slow path call CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box ); @@ -886,43 +1871,49 @@ transform_later(region); _igvn.subsume_node(_fallthroughproj, region); - // create a Phi for the memory state - Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); - Node *memproj = transform_later( new (C, 1) ProjNode(call, TypeFunc::Memory) ); + Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); mem_phi->init_req(1, memproj ); - mem_phi->init_req(2, mem); transform_later(mem_phi); - _igvn.hash_delete(_memproj_fallthrough); + _igvn.hash_delete(_memproj_fallthrough); _igvn.subsume_node(_memproj_fallthrough, mem_phi); - - } //------------------------------expand_unlock_node---------------------- void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { - Node *ctrl = unlock->in(TypeFunc::Control); + Node* ctrl = unlock->in(TypeFunc::Control); Node* mem = unlock->in(TypeFunc::Memory); Node* obj = unlock->obj_node(); Node* box = unlock->box_node(); - - if (unlock->is_eliminated()) { - eliminate_locking_node(unlock); - return; - } - // No need for a null check on unlock // Make the merge point - RegionNode *region = new (C, 3) RegionNode(3); + Node *region; + Node *mem_phi; + + if (UseOptoBiasInlining) { + // Check for biased locking unlock case, which is a no-op. + // See the full descrition in MacroAssembler::biased_locking_exit(). + region = new (C, 4) RegionNode(4); + // create a Phi for the memory state + mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); + mem_phi->init_req(3, mem); + + Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); + ctrl = opt_bits_test(ctrl, region, 3, mark_node, + markOopDesc::biased_lock_mask_in_place, + markOopDesc::biased_lock_pattern); + } else { + region = new (C, 3) RegionNode(3); + // create a Phi for the memory state + mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); + } FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box ); funlock = transform_later( funlock )->as_FastUnlock(); - Node *bol = transform_later(new (C, 2) BoolNode(funlock,BoolTest::ne)); - Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); // Optimize test; set region slot 2 - Node *slow_path = opt_iff(region,iff); + Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0); CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box ); @@ -944,16 +1935,12 @@ transform_later(region); _igvn.subsume_node(_fallthroughproj, region); - // create a Phi for the memory state - Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); mem_phi->init_req(1, memproj ); mem_phi->init_req(2, mem); transform_later(mem_phi); - _igvn.hash_delete(_memproj_fallthrough); + _igvn.hash_delete(_memproj_fallthrough); _igvn.subsume_node(_memproj_fallthrough, mem_phi); - - } //------------------------------expand_macro_nodes---------------------- @@ -961,14 +1948,61 @@ bool PhaseMacroExpand::expand_macro_nodes() { if (C->macro_count() == 0) return false; - // Make sure expansion will not cause node limit to be exceeded. Worst case is a - // macro node gets expanded into about 50 nodes. Allow 50% more for optimization + // First, attempt to eliminate locks + bool progress = true; + while (progress) { + progress = false; + for (int i = C->macro_count(); i > 0; i--) { + Node * n = C->macro_node(i-1); + bool success = false; + debug_only(int old_macro_count = C->macro_count();); + if (n->is_AbstractLock()) { + success = eliminate_locking_node(n->as_AbstractLock()); + } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { + _igvn.add_users_to_worklist(n); + _igvn.hash_delete(n); + _igvn.subsume_node(n, n->in(1)); + success = true; + } + assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); + progress = progress || success; + } + } + // Next, attempt to eliminate allocations + progress = true; + while (progress) { + progress = false; + for (int i = C->macro_count(); i > 0; i--) { + Node * n = C->macro_node(i-1); + bool success = false; + debug_only(int old_macro_count = C->macro_count();); + switch (n->class_id()) { + case Node::Class_Allocate: + case Node::Class_AllocateArray: + success = eliminate_allocate_node(n->as_Allocate()); + break; + case Node::Class_Lock: + case Node::Class_Unlock: + assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); + break; + default: + assert(false, "unknown node type in macro list"); + } + assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); + progress = progress || success; + } + } + // Make sure expansion will not cause node limit to be exceeded. + // Worst case is a macro node gets expanded into about 50 nodes. + // Allow 50% more for optimization. if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) ) return true; + // expand "macro" nodes // nodes are removed from the macro list as they are processed while (C->macro_count() > 0) { - Node * n = C->macro_node(0); + int macro_count = C->macro_count(); + Node * n = C->macro_node(macro_count-1); assert(n->is_macro(), "only macro nodes expected here"); if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) { // node is unreachable, so don't try to expand it @@ -991,8 +2025,11 @@ default: assert(false, "unknown node type in macro list"); } + assert(C->macro_count() < macro_count, "must have deleted a node from macro list"); if (C->failing()) return true; } + + _igvn.set_delay_transform(false); _igvn.optimize(); return false; } --- old/hotspot/src/share/vm/opto/macro.hpp 2009-08-01 04:14:02.015354315 +0100 +++ new/hotspot/src/share/vm/opto/macro.hpp 2009-08-01 04:14:01.934105114 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)macro.hpp 1.11 07/07/19 19:08:26 JVM" #endif /* - * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,14 +81,23 @@ Node* length, const TypeFunc* slow_call_type, address slow_call_address); - void eliminate_locking_node(AbstractLockNode *alock); + Node *value_from_mem(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc); + Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level); + + bool eliminate_allocate_node(AllocateNode *alloc); + bool can_eliminate_allocation(AllocateNode *alloc, GrowableArray & safepoints); + bool scalar_replacement(AllocateNode *alloc, GrowableArray & safepoints_done); + void process_users_of_allocation(AllocateNode *alloc); + + void eliminate_card_mark(Node *cm); + bool eliminate_locking_node(AbstractLockNode *alock); void expand_lock_node(LockNode *lock); void expand_unlock_node(UnlockNode *unlock); int replace_input(Node *use, Node *oldref, Node *newref); void copy_call_debug_info(CallNode *oldcall, CallNode * newcall); - Node* opt_iff(Node* region, Node* iff); - void copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call); + Node* opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path = false); + void copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call); CallNode* make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1); void extract_call_projections(CallNode *call); @@ -104,7 +113,9 @@ Node* length); public: - PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {} + PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) { + _igvn.set_delay_transform(true); + } bool expand_macro_nodes(); }; --- old/hotspot/src/share/vm/opto/matcher.cpp 2009-08-01 04:14:02.881594429 +0100 +++ new/hotspot/src/share/vm/opto/matcher.cpp 2009-08-01 04:14:02.786183420 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)matcher.cpp 1.388 07/09/28 10:33:13 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ const int Matcher::base2reg[Type::lastype] = { - Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, + Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, Op_RegN, Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */ Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */ 0, 0/*abio*/, @@ -54,12 +54,13 @@ PhaseTransform( Phase::Ins_Select ), #ifdef ASSERT _old2new_map(C->comp_arena()), + _new2old_map(C->comp_arena()), #endif - _shared_constants(C->comp_arena()), - _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp), - _swallowed(swallowed), - _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE), - _end_inst_chain_rule(_END_INST_CHAIN_RULE), + _shared_nodes(C->comp_arena()), + _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp), + _swallowed(swallowed), + _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE), + _end_inst_chain_rule(_END_INST_CHAIN_RULE), _must_clone(must_clone), _proj_list(proj_list), _register_save_policy(register_save_policy), _c_reg_save_policy(c_reg_save_policy), @@ -73,16 +74,19 @@ C->set_matcher(this); idealreg2spillmask[Op_RegI] = NULL; + idealreg2spillmask[Op_RegN] = NULL; idealreg2spillmask[Op_RegL] = NULL; idealreg2spillmask[Op_RegF] = NULL; idealreg2spillmask[Op_RegD] = NULL; idealreg2spillmask[Op_RegP] = NULL; idealreg2debugmask[Op_RegI] = NULL; + idealreg2debugmask[Op_RegN] = NULL; idealreg2debugmask[Op_RegL] = NULL; idealreg2debugmask[Op_RegF] = NULL; idealreg2debugmask[Op_RegD] = NULL; idealreg2debugmask[Op_RegP] = NULL; + debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node } //------------------------------warp_incoming_stk_arg------------------------ @@ -272,7 +276,7 @@ find_shared( C->root() ); find_shared( C->top() ); - C->print_method("Before Matching", 2); + C->print_method("Before Matching"); // Swap out to old-space; emptying new-space Arena *old = C->node_arena()->move_contents(C->old_arena()); @@ -369,17 +373,19 @@ void Matcher::init_first_stack_mask() { // Allocate storage for spill masks as masks for the appropriate load type. - RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*10); - idealreg2spillmask[Op_RegI] = &rms[0]; - idealreg2spillmask[Op_RegL] = &rms[1]; - idealreg2spillmask[Op_RegF] = &rms[2]; - idealreg2spillmask[Op_RegD] = &rms[3]; - idealreg2spillmask[Op_RegP] = &rms[4]; - idealreg2debugmask[Op_RegI] = &rms[5]; - idealreg2debugmask[Op_RegL] = &rms[6]; - idealreg2debugmask[Op_RegF] = &rms[7]; - idealreg2debugmask[Op_RegD] = &rms[8]; - idealreg2debugmask[Op_RegP] = &rms[9]; + RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*12); + idealreg2spillmask[Op_RegN] = &rms[0]; + idealreg2spillmask[Op_RegI] = &rms[1]; + idealreg2spillmask[Op_RegL] = &rms[2]; + idealreg2spillmask[Op_RegF] = &rms[3]; + idealreg2spillmask[Op_RegD] = &rms[4]; + idealreg2spillmask[Op_RegP] = &rms[5]; + idealreg2debugmask[Op_RegN] = &rms[6]; + idealreg2debugmask[Op_RegI] = &rms[7]; + idealreg2debugmask[Op_RegL] = &rms[8]; + idealreg2debugmask[Op_RegF] = &rms[9]; + idealreg2debugmask[Op_RegD] = &rms[10]; + idealreg2debugmask[Op_RegP] = &rms[11]; OptoReg::Name i; @@ -402,7 +408,11 @@ C->FIRST_STACK_mask().set_AllStack(); // Make spill masks. Registers for their class, plus FIRST_STACK_mask. - *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI]; +#ifdef _LP64 + *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN]; + idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask()); +#endif + *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI]; idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask()); *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL]; idealreg2spillmask[Op_RegL]->OR(C->FIRST_STACK_mask()); @@ -416,6 +426,7 @@ // Make up debug masks. Any spill slot plus callee-save registers. // Caller-save registers are assumed to be trashable by the various // inline-cache fixup routines. + *idealreg2debugmask[Op_RegN]= *idealreg2spillmask[Op_RegN]; *idealreg2debugmask[Op_RegI]= *idealreg2spillmask[Op_RegI]; *idealreg2debugmask[Op_RegL]= *idealreg2spillmask[Op_RegL]; *idealreg2debugmask[Op_RegF]= *idealreg2spillmask[Op_RegF]; @@ -431,7 +442,8 @@ if( _register_save_policy[i] == 'C' || _register_save_policy[i] == 'A' || (_register_save_policy[i] == 'E' && exclude_soe) ) { - idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call + idealreg2debugmask[Op_RegN]->Remove(i); + idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call idealreg2debugmask[Op_RegL]->Remove(i); // registers from debug idealreg2debugmask[Op_RegF]->Remove(i); // masks idealreg2debugmask[Op_RegD]->Remove(i); @@ -664,6 +676,9 @@ set_shared(fp); // Compute generic short-offset Loads +#ifdef _LP64 + MachNode *spillCP = match_tree(new (C, 3) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM)); +#endif MachNode *spillI = match_tree(new (C, 3) LoadINode(NULL,mem,fp,atp)); MachNode *spillL = match_tree(new (C, 3) LoadLNode(NULL,mem,fp,atp)); MachNode *spillF = match_tree(new (C, 3) LoadFNode(NULL,mem,fp,atp)); @@ -673,6 +688,9 @@ spillD != NULL && spillP != NULL, ""); // Get the ADLC notion of the right regmask, for each basic type. +#ifdef _LP64 + idealreg2regmask[Op_RegN] = &spillCP->out_RegMask(); +#endif idealreg2regmask[Op_RegI] = &spillI->out_RegMask(); idealreg2regmask[Op_RegL] = &spillL->out_RegMask(); idealreg2regmask[Op_RegF] = &spillF->out_RegMask(); @@ -731,6 +749,7 @@ if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) { switch (n->Opcode()) { case Op_StrComp: + case Op_AryEq: case Op_MemBarVolatile: case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type? nidx = Compile::AliasIdxTop; @@ -820,10 +839,16 @@ if( n->is_Proj() && n->in(0)->is_Multi()) { // Projections? // Convert to machine-dependent projection m = n->in(0)->as_Multi()->match( n->as_Proj(), this ); +#ifdef ASSERT + _new2old_map.map(m->_idx, n); +#endif if (m->in(0) != NULL) // m might be top - collect_null_checks(m); + collect_null_checks(m, n); } else { // Else just a regular 'ol guy m = n->clone(); // So just clone into new-space +#ifdef ASSERT + _new2old_map.map(m->_idx, n); +#endif // Def-Use edges will be added incrementally as Uses // of this node are matched. assert(m->outcnt() == 0, "no Uses of this clone yet"); @@ -867,11 +892,14 @@ Node *m = n->in(i); // Get input int op = m->Opcode(); assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); - if( op == Op_ConI || op == Op_ConP || + if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConF || op == Op_ConD || op == Op_ConL // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp ) { m = m->clone(); +#ifdef ASSERT + _new2old_map.map(m->_idx, n); +#endif mstack.push(m, Post_Visit, n, i); // Don't neet to visit mstack.push(m->in(0), Visit, m, 0); } else { @@ -1139,7 +1167,10 @@ // StoreNodes require their Memory input to match any LoadNodes Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ; - +#ifdef ASSERT + Node* save_mem_node = _mem_node; + _mem_node = n->is_Store() ? (Node*)n : NULL; +#endif // State object for root node of match tree // Allocate it on _states_arena - stack allocation can cause stack overflow. State *s = new (&_states_arena) State; @@ -1172,13 +1203,14 @@ MachNode *m = ReduceInst( s, s->_rule[mincost], mem ); #ifdef ASSERT _old2new_map.map(n->_idx, m); + _new2old_map.map(m->_idx, (Node*)n); #endif // Add any Matcher-ignored edges uint cnt = n->req(); uint start = 1; if( mem != (Node*)1 ) start = MemNode::Memory+1; - if( n->Opcode() == Op_AddP ) { + if( n->is_AddP() ) { assert( mem == (Node*)1, "" ); start = AddPNode::Base+1; } @@ -1191,6 +1223,7 @@ } } + debug_only( _mem_node = save_mem_node; ) return m; } @@ -1206,7 +1239,7 @@ if( t->singleton() ) { // Never force constants into registers. Allow them to match as // constants or registers. Copies of the same value will share - // the same register. See find_shared_constant. + // the same register. See find_shared_node. return false; } else { // Not a constant // Stop recursion if they have different Controls. @@ -1230,6 +1263,11 @@ if( j == max_scan ) // No post-domination before scan end? return true; // Then break the match tree up } + if (m->is_DecodeN() && Matcher::clone_shift_expressions) { + // These are commonly used in address expressions and can + // efficiently fold into them on X64 in some cases. + return false; + } } // Not forceably cloning. If shared, put it into a register. @@ -1348,13 +1386,16 @@ // which reduces the number of copies of a constant in the final // program. The register allocator is free to split uses later to // split live ranges. -MachNode* Matcher::find_shared_constant(Node* leaf, uint rule) { - if (!leaf->is_Con()) return NULL; +MachNode* Matcher::find_shared_node(Node* leaf, uint rule) { + if (!leaf->is_Con() && !leaf->is_DecodeN()) return NULL; // See if this Con has already been reduced using this rule. - if (_shared_constants.Size() <= leaf->_idx) return NULL; - MachNode* last = (MachNode*)_shared_constants.at(leaf->_idx); + if (_shared_nodes.Size() <= leaf->_idx) return NULL; + MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx); if (last != NULL && rule == last->rule()) { + // Don't expect control change for DecodeN + if (leaf->is_DecodeN()) + return last; // Get the new space root. Node* xroot = new_node(C->root()); if (xroot == NULL) { @@ -1400,9 +1441,9 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) { assert( rule >= NUM_OPERANDS, "called with operand rule" ); - MachNode* shared_con = find_shared_constant(s->_leaf, rule); - if (shared_con != NULL) { - return shared_con; + MachNode* shared_node = find_shared_node(s->_leaf, rule); + if (shared_node != NULL) { + return shared_node; } // Build the object to represent this state & prepare for recursive calls @@ -1412,6 +1453,8 @@ Node *leaf = s->_leaf; // Check for instruction or instruction chain rule if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) { + assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf), + "duplicating node that's already been matched"); // Instruction mach->add_req( leaf->in(0) ); // Set initial control // Reduce interior of complex instruction @@ -1423,11 +1466,40 @@ } // If a Memory was used, insert a Memory edge - if( mem != (Node*)1 ) + if( mem != (Node*)1 ) { mach->ins_req(MemNode::Memory,mem); +#ifdef ASSERT + // Verify adr type after matching memory operation + const MachOper* oper = mach->memory_operand(); + if (oper != NULL && oper != (MachOper*)-1 && + mach->adr_type() != TypeRawPtr::BOTTOM) { // non-direct addressing mode + // It has a unique memory operand. Find corresponding ideal mem node. + Node* m = NULL; + if (leaf->is_Mem()) { + m = leaf; + } else { + m = _mem_node; + assert(m != NULL && m->is_Mem(), "expecting memory node"); + } + const Type* mach_at = mach->adr_type(); + // DecodeN node consumed by an address may have different type + // then its input. Don't compare types for such case. + if (m->adr_type() != mach_at && m->in(MemNode::Address)->is_AddP() && + m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN()) { + mach_at = m->adr_type(); + } + if (m->adr_type() != mach_at) { + m->dump(); + tty->print_cr("mach:"); + mach->dump(1); + } + assert(m->adr_type() == mach_at, "matcher should not change adr type"); + } +#endif + } // If the _leaf is an AddP, insert the base edge - if( leaf->Opcode() == Op_AddP ) + if( leaf->is_AddP() ) mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base)); uint num_proj = _proj_list.size(); @@ -1442,6 +1514,9 @@ for( uint i=0; ireq(); i++ ) { mach->set_req(i,NULL); } +#ifdef ASSERT + _new2old_map.map(ex->_idx, s->_leaf); +#endif } // PhaseChaitin::fixup_spills will sometimes generate spill code @@ -1455,9 +1530,9 @@ guarantee(_proj_list.size() == num_proj, "no allocation during spill generation"); } - if (leaf->is_Con()) { + if (leaf->is_Con() || leaf->is_DecodeN()) { // Record the con for sharing - _shared_constants.map(leaf->_idx, ex); + _shared_nodes.map(leaf->_idx, ex); } return ex; @@ -1488,7 +1563,9 @@ assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand"); mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C ); Node *mem1 = (Node*)1; + debug_only(Node *save_mem_node = _mem_node;) mach->add_req( ReduceInst(s, newrule, mem1) ); + debug_only(_mem_node = save_mem_node;) } return; } @@ -1498,6 +1575,7 @@ if( s->_leaf->is_Load() ) { Node *mem2 = s->_leaf->in(MemNode::Memory); assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" ); + debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;) mem = mem2; } if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) { @@ -1541,7 +1619,9 @@ // --> ReduceInst( newrule ) mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C ); Node *mem1 = (Node*)1; + debug_only(Node *save_mem_node = _mem_node;) mach->add_req( ReduceInst( newstate, newrule, mem1 ) ); + debug_only(_mem_node = save_mem_node;) } } assert( mach->_opnds[num_opnds-1], "" ); @@ -1572,6 +1652,7 @@ if( s->_leaf->is_Load() ) { assert( mem == (Node*)1, "multiple Memories being matched at once?" ); mem = s->_leaf->in(MemNode::Memory); + debug_only(_mem_node = s->_leaf;) } if( s->_leaf->in(0) && s->_leaf->req() > 1) { if( !mach->in(0) ) @@ -1596,7 +1677,9 @@ // Reduce the instruction, and add a direct pointer from this // machine instruction to the newly reduced one. Node *mem1 = (Node*)1; + debug_only(Node *save_mem_node = _mem_node;) mach->add_req( ReduceInst( kid, newrule, mem1 ) ); + debug_only(_mem_node = save_mem_node;) } } } @@ -1650,6 +1733,7 @@ case Op_Phi: // Treat Phis as shared roots case Op_Parm: case Op_Proj: // All handled specially during matching + case Op_SafePointScalarObject: set_shared(n); set_dontcare(n); break; @@ -1695,6 +1779,7 @@ mstack.push(n->in(0), Pre_Visit); // Visit Control input continue; // while (mstack.is_nonempty()) case Op_StrComp: + case Op_AryEq: set_shared(n); // Force result into register (it will be anyways) break; case Op_ConP: { // Convert pointers above the centerline to NUL @@ -1705,6 +1790,14 @@ } break; } + case Op_ConN: { // Convert narrow pointers above the centerline to NUL + TypeNode *tn = n->as_Type(); // Constants derive from type nodes + const TypePtr* tp = tn->type()->make_ptr(); + if (tp && tp->_ptr == TypePtr::AnyNull) { + tn->set_type(TypeNarrowOop::NULL_PTR); + } + break; + } case Op_Binary: // These are introduced in the Post_Visit state. ShouldNotReachHere(); break; @@ -1716,6 +1809,7 @@ case Op_StoreI: case Op_StoreL: case Op_StoreP: + case Op_StoreN: case Op_Store16B: case Op_Store8B: case Op_Store4B: @@ -1738,9 +1832,11 @@ case Op_LoadF: case Op_LoadI: case Op_LoadKlass: + case Op_LoadNKlass: case Op_LoadL: case Op_LoadS: case Op_LoadP: + case Op_LoadN: case Op_LoadRange: case Op_LoadD_unaligned: case Op_LoadL_unaligned: @@ -1788,13 +1884,19 @@ // Clone addressing expressions as they are "free" in most instructions if( mem_op && i == MemNode::Address && mop == Op_AddP ) { + if (m->in(AddPNode::Base)->Opcode() == Op_DecodeN) { + // Bases used in addresses must be shared but since + // they are shared through a DecodeN they may appear + // to have a single use so force sharing here. + set_shared(m->in(AddPNode::Base)->in(1)); + } Node *off = m->in(AddPNode::Offset); if( off->is_Con() ) { set_visited(m); // Flag as visited now Node *adr = m->in(AddPNode::Address); // Intel, ARM and friends can handle 2 adds in addressing mode - if( clone_shift_expressions && adr->Opcode() == Op_AddP && + if( clone_shift_expressions && adr->is_AddP() && // AtomicAdd is not an addressing expression. // Cheap to find it by looking for screwy base. !adr->in(AddPNode::Base)->is_top() ) { @@ -1852,10 +1954,12 @@ // Now hack a few special opcodes switch( n->Opcode() ) { // Handle some opcodes special case Op_StorePConditional: + case Op_StoreIConditional: case Op_StoreLConditional: case Op_CompareAndSwapI: case Op_CompareAndSwapL: - case Op_CompareAndSwapP: { // Convert trinary to binary-tree + case Op_CompareAndSwapP: + case Op_CompareAndSwapN: { // Convert trinary to binary-tree Node *newval = n->in(MemNode::ValueIn ); Node *oldval = n->in(LoadStoreNode::ExpectedIn); Node *pair = new (C, 3) BinaryNode( oldval, newval ); @@ -1867,6 +1971,7 @@ case Op_CMoveF: case Op_CMoveI: case Op_CMoveL: + case Op_CMoveN: case Op_CMoveP: { // Restructure into a binary tree for Matching. It's possible that // we could move this code up next to the graph reshaping for IfNodes @@ -1901,29 +2006,59 @@ // it. Used by later implicit-null-check handling. Actually collects // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal // value being tested. -void Matcher::collect_null_checks( Node *proj ) { +void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) { Node *iff = proj->in(0); if( iff->Opcode() == Op_If ) { // During matching If's have Bool & Cmp side-by-side BoolNode *b = iff->in(1)->as_Bool(); Node *cmp = iff->in(2); - if( cmp->Opcode() == Op_CmpP ) { - if( cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) { + int opc = cmp->Opcode(); + if (opc != Op_CmpP && opc != Op_CmpN) return; - if( proj->Opcode() == Op_IfTrue ) { - extern int all_null_checks_found; - all_null_checks_found++; - if( b->_test._test == BoolTest::ne ) { - _null_check_tests.push(proj); - _null_check_tests.push(cmp->in(1)); - } - } else { - assert( proj->Opcode() == Op_IfFalse, "" ); - if( b->_test._test == BoolTest::eq ) { - _null_check_tests.push(proj); - _null_check_tests.push(cmp->in(1)); + const Type* ct = cmp->in(2)->bottom_type(); + if (ct == TypePtr::NULL_PTR || + (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) { + + bool push_it = false; + if( proj->Opcode() == Op_IfTrue ) { + extern int all_null_checks_found; + all_null_checks_found++; + if( b->_test._test == BoolTest::ne ) { + push_it = true; + } + } else { + assert( proj->Opcode() == Op_IfFalse, "" ); + if( b->_test._test == BoolTest::eq ) { + push_it = true; + } + } + if( push_it ) { + _null_check_tests.push(proj); + Node* val = cmp->in(1); +#ifdef _LP64 + if (UseCompressedOops && !Matcher::clone_shift_expressions && + val->bottom_type()->isa_narrowoop()) { + // + // Look for DecodeN node which should be pinned to orig_proj. + // On platforms (Sparc) which can not handle 2 adds + // in addressing mode we have to keep a DecodeN node and + // use it to do implicit NULL check in address. + // + // DecodeN node was pinned to non-null path (orig_proj) during + // CastPP transformation in final_graph_reshaping_impl(). + // + uint cnt = orig_proj->outcnt(); + for (uint i = 0; i < orig_proj->outcnt(); i++) { + Node* d = orig_proj->raw_out(i); + if (d->is_DecodeN() && d->in(1) == val) { + val = d; + val->set_req(0, NULL); // Unpin now. + break; + } } } +#endif + _null_check_tests.push(val); } } } @@ -2040,6 +2175,7 @@ xop == Op_FastLock || xop == Op_CompareAndSwapL || xop == Op_CompareAndSwapP || + xop == Op_CompareAndSwapN || xop == Op_CompareAndSwapI) return true; --- old/hotspot/src/share/vm/opto/matcher.hpp 2009-08-01 04:14:04.004773282 +0100 +++ new/hotspot/src/share/vm/opto/matcher.hpp 2009-08-01 04:14:03.932027065 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)matcher.hpp 1.188 07/07/19 19:08:27 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ void ReduceOper( State *s, int newrule, Node *&mem, MachNode *mach ); // If this node already matched using "rule", return the MachNode for it. - MachNode* find_shared_constant(Node* con, uint rule); + MachNode* find_shared_node(Node* n, uint rule); // Convert a dense opcode number to an expanded rule number const int *_reduceOp; @@ -84,9 +84,10 @@ Node_List &_proj_list; // For Machine nodes killing many values - Node_Array _shared_constants; + Node_Array _shared_nodes; debug_only(Node_Array _old2new_map;) // Map roots of ideal-trees to machine-roots + debug_only(Node_Array _new2old_map;) // Maps machine nodes back to ideal // Accessors for the inherited field PhaseTransform::_nodes: void grow_new_node_array(uint idx_limit) { @@ -107,6 +108,8 @@ #ifdef ASSERT // Make sure only new nodes are reachable from this node void verify_new_nodes_only(Node* root); + + Node* _mem_node; // Ideal memory node consumed by mach node #endif public: @@ -166,7 +169,7 @@ // List of IfFalse or IfTrue Nodes that indicate a taken null test. // List is valid in the post-matching space. Node_List _null_check_tests; - void collect_null_checks( Node *proj ); + void collect_null_checks( Node *proj, Node *orig_proj ); void validate_null_checks( ); Matcher( Node_List &proj_list ); @@ -324,7 +327,7 @@ virtual int regnum_to_fpu_offset(int regnum); // Is this branch offset small enough to be addressed by a short branch? - bool is_short_branch_offset(int offset); + bool is_short_branch_offset(int rule, int offset); // Optional scaling for the parameter to the ClearArray/CopyArray node. static const bool init_array_count_is_in_bytes; @@ -391,5 +394,9 @@ #ifdef ASSERT void dump_old2new_map(); // machine-independent to machine-dependent + + Node* find_old_node(Node* new_node) { + return _new2old_map[new_node->_idx]; + } #endif }; --- old/hotspot/src/share/vm/opto/memnode.cpp 2009-08-01 04:14:04.895929052 +0100 +++ new/hotspot/src/share/vm/opto/memnode.cpp 2009-08-01 04:14:04.802798621 +0100 @@ -32,6 +32,8 @@ #include "incls/_precompiled.incl" #include "incls/_memnode.cpp.incl" +static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st); + //============================================================================= uint MemNode::size_of() const { return sizeof(*this); } @@ -90,6 +92,123 @@ #endif +Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { + const TypeOopPtr *tinst = t_adr->isa_oopptr(); + if (tinst == NULL || !tinst->is_known_instance_field()) + return mchain; // don't try to optimize non-instance types + uint instance_id = tinst->instance_id(); + Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory); + Node *prev = NULL; + Node *result = mchain; + while (prev != result) { + prev = result; + if (result == start_mem) + break; // hit one of our sentinals + // skip over a call which does not affect this memory slice + if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { + Node *proj_in = result->in(0); + if (proj_in->is_Allocate() && proj_in->_idx == instance_id) { + break; // hit one of our sentinals + } else if (proj_in->is_Call()) { + CallNode *call = proj_in->as_Call(); + if (!call->may_modify(t_adr, phase)) { + result = call->in(TypeFunc::Memory); + } + } else if (proj_in->is_Initialize()) { + AllocateNode* alloc = proj_in->as_Initialize()->allocation(); + // Stop if this is the initialization for the object instance which + // which contains this memory slice, otherwise skip over it. + if (alloc != NULL && alloc->_idx != instance_id) { + result = proj_in->in(TypeFunc::Memory); + } + } else if (proj_in->is_MemBar()) { + result = proj_in->in(TypeFunc::Memory); + } else { + assert(false, "unexpected projection"); + } + } else if (result->is_MergeMem()) { + result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty); + } + } + return result; +} + +Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { + const TypeOopPtr *t_oop = t_adr->isa_oopptr(); + bool is_instance = (t_oop != NULL) && t_oop->is_known_instance_field(); + PhaseIterGVN *igvn = phase->is_IterGVN(); + Node *result = mchain; + result = optimize_simple_memory_chain(result, t_adr, phase); + if (is_instance && igvn != NULL && result->is_Phi()) { + PhiNode *mphi = result->as_Phi(); + assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); + const TypePtr *t = mphi->adr_type(); + if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || + t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && + t->is_oopptr()->cast_to_exactness(true) + ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) + ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) { + // clone the Phi with our address type + result = mphi->split_out_instance(t_adr, igvn); + } else { + assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); + } + } + return result; +} + +static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) { + uint alias_idx = phase->C->get_alias_index(tp); + Node *mem = mmem; +#ifdef ASSERT + { + // Check that current type is consistent with the alias index used during graph construction + assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx"); + bool consistent = adr_check == NULL || adr_check->empty() || + phase->C->must_alias(adr_check, alias_idx ); + // Sometimes dead array references collapse to a[-1], a[-2], or a[-3] + if( !consistent && adr_check != NULL && !adr_check->empty() && + tp->isa_aryptr() && tp->offset() == Type::OffsetBot && + adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot && + ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() || + adr_check->offset() == oopDesc::klass_offset_in_bytes() || + adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) { + // don't assert if it is dead code. + consistent = true; + } + if( !consistent ) { + st->print("alias_idx==%d, adr_check==", alias_idx); + if( adr_check == NULL ) { + st->print("NULL"); + } else { + adr_check->dump(); + } + st->cr(); + print_alias_types(); + assert(consistent, "adr_check must match alias idx"); + } + } +#endif + // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally + // means an array I have not precisely typed yet. Do not do any + // alias stuff with it any time soon. + const TypeOopPtr *tinst = tp->isa_oopptr(); + if( tp->base() != Type::AnyPtr && + !(tinst && + tinst->klass()->is_java_lang_Object() && + tinst->offset() == Type::OffsetBot) ) { + // compress paths and change unreachable cycles to TOP + // If not, we can update the input infinitely along a MergeMem cycle + // Equivalent code in PhiNode::Ideal + Node* m = phase->transform(mmem); + // If tranformed to a MergeMem, get the desired slice + // Otherwise the returned node represents memory for every slice + mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m; + // Update input if it is progress over what we have now + } + return mem; +} + //--------------------------Ideal_common--------------------------------------- // Look for degenerate control and memory inputs. Bypass MergeMem inputs. // Unhook non-raw memories from complete (macro-expanded) initializations. @@ -98,6 +217,9 @@ Node *ctl = in(MemNode::Control); if (ctl && remove_dead_region(phase, can_reshape)) return this; + ctl = in(MemNode::Control); + // Don't bother trying to transform a dead node + if( ctl && ctl->is_top() ) return NodeSentinel; // Ignore if memory is dead, or self-loop Node *mem = in(MemNode::Memory); @@ -108,72 +230,35 @@ const Type *t_adr = phase->type( address ); if( t_adr == Type::TOP ) return NodeSentinel; // caller will return NULL + PhaseIterGVN *igvn = phase->is_IterGVN(); + if( can_reshape && igvn != NULL && igvn->_worklist.member(address) ) { + // The address's base and type may change when the address is processed. + // Delay this mem node transformation until the address is processed. + phase->is_IterGVN()->_worklist.push(this); + return NodeSentinel; // caller will return NULL + } + // Avoid independent memory operations Node* old_mem = mem; - if (mem->is_Proj() && mem->in(0)->is_Initialize()) { - InitializeNode* init = mem->in(0)->as_Initialize(); - if (init->is_complete()) { // i.e., after macro expansion - const TypePtr* tp = t_adr->is_ptr(); - uint alias_idx = phase->C->get_alias_index(tp); - // Free this slice from the init. It was hooked, temporarily, - // by GraphKit::set_output_for_allocation. - if (alias_idx > Compile::AliasIdxRaw) { - mem = init->memory(alias_idx); - // ...but not with the raw-pointer slice. - } - } - } + // The code which unhooks non-raw memories from complete (macro-expanded) + // initializations was removed. After macro-expansion all stores catched + // by Initialize node became raw stores and there is no information + // which memory slices they modify. So it is unsafe to move any memory + // operation above these stores. Also in most cases hooked non-raw memories + // were already unhooked by using information from detect_ptr_independence() + // and find_previous_store(). if (mem->is_MergeMem()) { MergeMemNode* mmem = mem->as_MergeMem(); - const TypePtr *tp = t_adr->is_ptr(); - uint alias_idx = phase->C->get_alias_index(tp); -#ifdef ASSERT - { - // Check that current type is consistent with the alias index used during graph construction - assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx"); - const TypePtr *adr_t = adr_type(); - bool consistent = adr_t == NULL || adr_t->empty() || phase->C->must_alias(adr_t, alias_idx ); - // Sometimes dead array references collapse to a[-1], a[-2], or a[-3] - if( !consistent && adr_t != NULL && !adr_t->empty() && - tp->isa_aryptr() && tp->offset() == Type::OffsetBot && - adr_t->isa_aryptr() && adr_t->offset() != Type::OffsetBot && - ( adr_t->offset() == arrayOopDesc::length_offset_in_bytes() || - adr_t->offset() == oopDesc::klass_offset_in_bytes() || - adr_t->offset() == oopDesc::mark_offset_in_bytes() ) ) { - // don't assert if it is dead code. - consistent = true; - } - if( !consistent ) { - tty->print("alias_idx==%d, adr_type()==", alias_idx); if( adr_t == NULL ) { tty->print("NULL"); } else { adr_t->dump(); } - tty->cr(); - print_alias_types(); - assert(consistent, "adr_type must match alias idx"); - } - } -#endif - // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally - // means an array I have not precisely typed yet. Do not do any - // alias stuff with it any time soon. - const TypeInstPtr *tinst = tp->isa_instptr(); - if( tp->base() != Type::AnyPtr && - !(tinst && - tinst->klass()->is_java_lang_Object() && - tinst->offset() == Type::OffsetBot) ) { - // compress paths and change unreachable cycles to TOP - // If not, we can update the input infinitely along a MergeMem cycle - // Equivalent code in PhiNode::Ideal - Node* m = phase->transform(mmem); - // If tranformed to a MergeMem, get the desired slice - // Otherwise the returned node represents memory for every slice - mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m; - // Update input if it is progress over what we have now - } + const TypePtr *tp = t_adr->is_ptr(); + + mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty); } if (mem != old_mem) { set_req(MemNode::Memory, mem); + if (phase->type( mem ) == Type::TOP) return NodeSentinel; return this; } @@ -182,36 +267,102 @@ } // Helper function for proving some simple control dominations. -// Attempt to prove that control input 'dom' dominates (or equals) 'sub'. +// Attempt to prove that all control inputs of 'dom' dominate 'sub'. // Already assumes that 'dom' is available at 'sub', and that 'sub' // is not a constant (dominated by the method's StartNode). // Used by MemNode::find_previous_store to prove that the // control input of a memory operation predates (dominates) // an allocation it wants to look past. -bool MemNode::detect_dominating_control(Node* dom, Node* sub) { - if (dom == NULL) return false; - if (dom->is_Proj()) dom = dom->in(0); - if (dom->is_Start()) return true; // anything inside the method - if (dom->is_Root()) return true; // dom 'controls' a constant - int cnt = 20; // detect cycle or too much effort - while (sub != NULL) { // walk 'sub' up the chain to 'dom' - if (--cnt < 0) return false; // in a cycle or too complex - if (sub == dom) return true; - if (sub->is_Start()) return false; - if (sub->is_Root()) return false; - Node* up = sub->in(0); - if (sub == up && sub->is_Region()) { - for (uint i = 1; i < sub->req(); i++) { - Node* in = sub->in(i); - if (in != NULL && !in->is_top() && in != sub) { - up = in; break; // take any path on the way up to 'dom' +bool MemNode::all_controls_dominate(Node* dom, Node* sub) { + if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top()) + return false; // Conservative answer for dead code + + // Check 'dom'. Skip Proj and CatchProj nodes. + dom = dom->find_exact_control(dom); + if (dom == NULL || dom->is_top()) + return false; // Conservative answer for dead code + + if (dom == sub) { + // For the case when, for example, 'sub' is Initialize and the original + // 'dom' is Proj node of the 'sub'. + return false; + } + + if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub) + return true; + + // 'dom' dominates 'sub' if its control edge and control edges + // of all its inputs dominate or equal to sub's control edge. + + // Currently 'sub' is either Allocate, Initialize or Start nodes. + // Or Region for the check in LoadNode::Ideal(); + // 'sub' should have sub->in(0) != NULL. + assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || + sub->is_Region(), "expecting only these nodes"); + + // Get control edge of 'sub'. + Node* orig_sub = sub; + sub = sub->find_exact_control(sub->in(0)); + if (sub == NULL || sub->is_top()) + return false; // Conservative answer for dead code + + assert(sub->is_CFG(), "expecting control"); + + if (sub == dom) + return true; + + if (sub->is_Start() || sub->is_Root()) + return false; + + { + // Check all control edges of 'dom'. + + ResourceMark rm; + Arena* arena = Thread::current()->resource_area(); + Node_List nlist(arena); + Unique_Node_List dom_list(arena); + + dom_list.push(dom); + bool only_dominating_controls = false; + + for (uint next = 0; next < dom_list.size(); next++) { + Node* n = dom_list.at(next); + if (n == orig_sub) + return false; // One of dom's inputs dominated by sub. + if (!n->is_CFG() && n->pinned()) { + // Check only own control edge for pinned non-control nodes. + n = n->find_exact_control(n->in(0)); + if (n == NULL || n->is_top()) + return false; // Conservative answer for dead code + assert(n->is_CFG(), "expecting control"); + dom_list.push(n); + } else if (n->is_Con() || n->is_Start() || n->is_Root()) { + only_dominating_controls = true; + } else if (n->is_CFG()) { + if (n->dominates(sub, nlist)) + only_dominating_controls = true; + else + return false; + } else { + // First, own control edge. + Node* m = n->find_exact_control(n->in(0)); + if (m != NULL) { + if (m->is_top()) + return false; // Conservative answer for dead code + dom_list.push(m); + } + // Now, the rest of edges. + uint cnt = n->req(); + for (uint i = 1; i < cnt; i++) { + m = n->find_exact_control(n->in(i)); + if (m == NULL || m->is_top()) + continue; + dom_list.push(m); } } } - if (sub == up) return false; // some kind of tight cycle - sub = up; + return only_dominating_controls; } - return false; } //---------------------detect_ptr_independence--------------------------------- @@ -232,9 +383,9 @@ return (a1 != a2); } else if (a1 != NULL) { // one allocation a1 // (Note: p2->is_Con implies p2->in(0)->is_Root, which dominates.) - return detect_dominating_control(p2->in(0), a1->in(0)); + return all_controls_dominate(p2, a1); } else { //(a2 != NULL) // one allocation a2 - return detect_dominating_control(p1->in(0), a2->in(0)); + return all_controls_dominate(p1, a2); } return false; } @@ -263,6 +414,8 @@ if (offset == Type::OffsetBot) return NULL; // cannot unalias unless there are precise offsets + const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); + intptr_t size_in_bytes = memory_size(); Node* mem = in(MemNode::Memory); // start searching here... @@ -318,8 +471,7 @@ known_identical = true; else if (alloc != NULL) known_independent = true; - else if (ctrl != NULL && - detect_dominating_control(ctrl, st_alloc->in(0))) + else if (all_controls_dominate(this, st_alloc)) known_independent = true; if (known_independent) { @@ -342,6 +494,22 @@ return mem; // let caller handle steps (c), (d) } + } else if (addr_t != NULL && addr_t->is_known_instance_field()) { + // Can't use optimize_simple_memory_chain() since it needs PhaseGVN. + if (mem->is_Proj() && mem->in(0)->is_Call()) { + CallNode *call = mem->in(0)->as_Call(); + if (!call->may_modify(addr_t, phase)) { + mem = call->in(TypeFunc::Memory); + continue; // (a) advance through independent call memory + } + } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) { + mem = mem->in(0)->in(TypeFunc::Memory); + continue; // (a) advance through independent MemBar memory + } else if (mem->is_MergeMem()) { + int alias_idx = phase->C->get_alias_index(adr_type()); + mem = mem->as_MergeMem()->memory_at(alias_idx); + continue; // (a) advance through independent MergeMem memory + } } // Unless there is an explicit 'continue', we must bail out here, @@ -446,6 +614,9 @@ // Find any cast-away of null-ness and keep its control. Null cast-aways are // going away in this pass and we need to make this memory op depend on the // gating null check. +Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { + return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address)); +} // I tried to leave the CastPP's in. This makes the graph more accurate in // some sense; we get to keep around the knowledge that an oop is not-null @@ -455,15 +626,14 @@ // some of the more trivial cases in the optimizer. Removing more useless // Phi's started allowing Loads to illegally float above null checks. I gave // up on this approach. CNC 10/20/2000 -Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { - Node *ctr = in(MemNode::Control); - Node *mem = in(MemNode::Memory); - Node *adr = in(MemNode::Address); +// This static method may be called not from MemNode (EncodePNode calls it). +// Only the control edge of the node 'n' might be updated. +Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) { Node *skipped_cast = NULL; // Need a null check? Regular static accesses do not because they are // from constant addresses. Array ops are gated by the range check (which // always includes a NULL check). Just check field ops. - if( !ctr ) { + if( n->in(MemNode::Control) == NULL ) { // Scan upwards for the highest location we can place this memory op. while( true ) { switch( adr->Opcode() ) { @@ -472,6 +642,10 @@ adr = adr->in(AddPNode::Base); continue; + case Op_DecodeN: // No change to NULL-ness, so peek thru + adr = adr->in(1); + continue; + case Op_CastPP: // If the CastPP is useless, just peek on through it. if( ccp->type(adr) == ccp->type(adr->in(1)) ) { @@ -484,11 +658,11 @@ } // CastPP is going away in this pass! We need this memory op to be // control-dependent on the test that is guarding the CastPP. - ccp->hash_delete(this); - set_req(MemNode::Control, adr->in(0)); - ccp->hash_insert(this); - return this; - + ccp->hash_delete(n); + n->set_req(MemNode::Control, adr->in(0)); + ccp->hash_insert(n); + return n; + case Op_Phi: // Attempt to float above a Phi to some dominating point. if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) { @@ -518,21 +692,25 @@ adr = adr->in(1); continue; } - ccp->hash_delete(this); - set_req(MemNode::Control, adr->in(0)); - ccp->hash_insert(this); - return this; - + ccp->hash_delete(n); + n->set_req(MemNode::Control, adr->in(0)); + ccp->hash_insert(n); + return n; + // List of "safe" opcodes; those that implicitly block the memory // op below any null check. case Op_CastX2P: // no null checks on native pointers case Op_Parm: // 'this' pointer is not null case Op_LoadP: // Loading from within a klass + case Op_LoadN: // Loading from within a klass case Op_LoadKlass: // Loading from within a klass + case Op_LoadNKlass: // Loading from within a klass case Op_ConP: // Loading from a klass + case Op_ConN: // Loading from a klass case Op_CreateEx: // Sucking up the guts of an exception oop case Op_Con: // Reading from TLS case Op_CMoveP: // CMoveP is pinned + case Op_CMoveN: // CMoveN is pinned break; // No progress case Op_Proj: // Direct call to an allocation routine @@ -541,9 +719,12 @@ { assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); const Node* call = adr->in(0); - if (call->is_CallStaticJava()) { - const CallStaticJavaNode* call_java = call->as_CallStaticJava(); - assert(call_java && call_java->method() == NULL, "must be runtime call"); + if (call->is_CallJava()) { + const CallJavaNode* call_java = call->as_CallJava(); + const TypeTuple *r = call_java->tf()->range(); + assert(r->cnt() > TypeFunc::Parms, "must return value"); + const Type* ret_type = r->field_at(TypeFunc::Parms); + assert(ret_type && ret_type->isa_ptr(), "must return pointer"); // We further presume that this is one of // new_instance_Java, new_array_Java, or // the like, but do not assert for this. @@ -589,7 +770,9 @@ //----------------------------LoadNode::make----------------------------------- // Polymorphic factory method: -LoadNode *LoadNode::make( Compile *C, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) { +Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) { + Compile* C = gvn.C; + // sanity check the alias category against the created node type assert(!(adr_type->isa_oopptr() && adr_type->offset() == oopDesc::klass_offset_in_bytes()), @@ -607,7 +790,17 @@ case T_FLOAT: return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt ); case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt ); case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); - case T_OBJECT: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); + case T_OBJECT: +#ifdef _LP64 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { + Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop())); + return new (C, 2) DecodeNNode(load, load->bottom_type()->make_ptr()); + } else +#endif + { + assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop"); + return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); + } } ShouldNotReachHere(); return (LoadNode*)NULL; @@ -741,6 +934,21 @@ return NULL; } +//----------------------is_instance_field_load_with_local_phi------------------ +bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) { + if( in(MemNode::Memory)->is_Phi() && in(MemNode::Memory)->in(0) == ctrl && + in(MemNode::Address)->is_AddP() ) { + const TypeOopPtr* t_oop = in(MemNode::Address)->bottom_type()->isa_oopptr(); + // Only instances. + if( t_oop != NULL && t_oop->is_known_instance_field() && + t_oop->offset() != Type::OffsetBot && + t_oop->offset() != Type::OffsetTop) { + return true; + } + } + return false; +} + //------------------------------Identity--------------------------------------- // Loads are identity if previous store is to same address Node *LoadNode::Identity( PhaseTransform *phase ) { @@ -763,6 +971,25 @@ // usually runs first, producing the singleton type of the Con.) return value; } + + // Search for an existing data phi which was generated before for the same + // instance's field to avoid infinite genertion of phis in a loop. + Node *region = mem->in(0); + if (is_instance_field_load_with_local_phi(region)) { + const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr(); + int this_index = phase->C->get_alias_index(addr_t); + int this_offset = addr_t->offset(); + int this_id = addr_t->is_oopptr()->instance_id(); + const Type* this_type = bottom_type(); + for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { + Node* phi = region->fast_out(i); + if (phi->is_Phi() && phi != mem && + phi->as_Phi()->is_same_inst_field(this_type, this_id, this_index, this_offset)) { + return phi; + } + } + } + return this; } @@ -842,23 +1069,24 @@ break; } } - LoadNode* load = NULL; - if (allocation != NULL && base->in(load_index)->is_Load()) { - load = base->in(load_index)->as_Load(); - } - if (load != NULL && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) { + bool has_load = ( allocation != NULL && + (base->in(load_index)->is_Load() || + base->in(load_index)->is_DecodeN() && + base->in(load_index)->in(1)->is_Load()) ); + if (has_load && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) { // Push the loads from the phi that comes from valueOf up // through it to allow elimination of the loads and the recovery // of the original value. Node* mem_phi = in(Memory); Node* offset = in(Address)->in(AddPNode::Offset); + Node* region = base->in(0); Node* in1 = clone(); Node* in1_addr = in1->in(Address)->clone(); in1_addr->set_req(AddPNode::Base, base->in(allocation_index)); in1_addr->set_req(AddPNode::Address, base->in(allocation_index)); in1_addr->set_req(AddPNode::Offset, offset); - in1->set_req(0, base->in(allocation_index)); + in1->set_req(0, region->in(allocation_index)); in1->set_req(Address, in1_addr); in1->set_req(Memory, mem_phi->in(allocation_index)); @@ -867,7 +1095,7 @@ in2_addr->set_req(AddPNode::Base, base->in(load_index)); in2_addr->set_req(AddPNode::Address, base->in(load_index)); in2_addr->set_req(AddPNode::Offset, offset); - in2->set_req(0, base->in(load_index)); + in2->set_req(0, region->in(load_index)); in2->set_req(Address, in2_addr); in2->set_req(Memory, mem_phi->in(load_index)); @@ -876,23 +1104,32 @@ in2_addr = phase->transform(in2_addr); in2 = phase->transform(in2); - PhiNode* result = PhiNode::make_blank(base->in(0), this); + PhiNode* result = PhiNode::make_blank(region, this); result->set_req(allocation_index, in1); result->set_req(load_index, in2); return result; } - } else if (base->is_Load()) { + } else if (base->is_Load() || + base->is_DecodeN() && base->in(1)->is_Load()) { + if (base->is_DecodeN()) { + // Get LoadN node which loads cached Integer object + base = base->in(1); + } // Eliminate the load of Integer.value for integers from the cache // array by deriving the value from the index into the array. // Capture the offset of the load and then reverse the computation. Node* load_base = base->in(Address)->in(AddPNode::Base); + if (load_base->is_DecodeN()) { + // Get LoadN node which loads IntegerCache.cache field + load_base = load_base->in(1); + } if (load_base != NULL) { Compile::AliasType* atp = phase->C->alias_type(load_base->adr_type()); intptr_t cache_offset; int shift = -1; Node* cache = NULL; if (is_autobox_cache(atp)) { - shift = exact_log2(type2aelembytes[T_OBJECT]); + shift = exact_log2(type2aelembytes(T_OBJECT)); cache = AddPNode::Ideal_base_and_offset(load_base->in(Address), phase, cache_offset); } if (cache != NULL && base->in(Address)->is_AddP()) { @@ -918,7 +1155,7 @@ } #ifdef _LP64 result = new (phase->C, 2) ConvL2INode(phase->transform(result)); -#endif +#endif return result; } } @@ -927,6 +1164,131 @@ return NULL; } +//------------------------------split_through_phi------------------------------ +// Split instance field load through Phi. +Node *LoadNode::split_through_phi(PhaseGVN *phase) { + Node* mem = in(MemNode::Memory); + Node* address = in(MemNode::Address); + const TypePtr *addr_t = phase->type(address)->isa_ptr(); + const TypeOopPtr *t_oop = addr_t->isa_oopptr(); + + assert(mem->is_Phi() && (t_oop != NULL) && + t_oop->is_known_instance_field(), "invalide conditions"); + + Node *region = mem->in(0); + if (region == NULL) { + return NULL; // Wait stable graph + } + uint cnt = mem->req(); + for( uint i = 1; i < cnt; i++ ) { + Node *in = mem->in(i); + if( in == NULL ) { + return NULL; // Wait stable graph + } + } + // Check for loop invariant. + if (cnt == 3) { + for( uint i = 1; i < cnt; i++ ) { + Node *in = mem->in(i); + Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); + if (m == mem) { + set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi. + return this; + } + } + } + // Split through Phi (see original code in loopopts.cpp). + assert(phase->C->have_alias_type(addr_t), "instance should have alias type"); + + // Do nothing here if Identity will find a value + // (to avoid infinite chain of value phis generation). + if ( !phase->eqv(this, this->Identity(phase)) ) + return NULL; + + // Skip the split if the region dominates some control edge of the address. + if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) + return NULL; + + const Type* this_type = this->bottom_type(); + int this_index = phase->C->get_alias_index(addr_t); + int this_offset = addr_t->offset(); + int this_iid = addr_t->is_oopptr()->instance_id(); + int wins = 0; + PhaseIterGVN *igvn = phase->is_IterGVN(); + Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); + for( uint i = 1; i < region->req(); i++ ) { + Node *x; + Node* the_clone = NULL; + if( region->in(i) == phase->C->top() ) { + x = phase->C->top(); // Dead path? Use a dead data op + } else { + x = this->clone(); // Else clone up the data op + the_clone = x; // Remember for possible deletion. + // Alter data node to use pre-phi inputs + if( this->in(0) == region ) { + x->set_req( 0, region->in(i) ); + } else { + x->set_req( 0, NULL ); + } + for( uint j = 1; j < this->req(); j++ ) { + Node *in = this->in(j); + if( in->is_Phi() && in->in(0) == region ) + x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone + } + } + // Check for a 'win' on some paths + const Type *t = x->Value(igvn); + + bool singleton = t->singleton(); + + // See comments in PhaseIdealLoop::split_thru_phi(). + if( singleton && t == Type::TOP ) { + singleton &= region->is_Loop() && (i != LoopNode::EntryControl); + } + + if( singleton ) { + wins++; + x = igvn->makecon(t); + } else { + // We now call Identity to try to simplify the cloned node. + // Note that some Identity methods call phase->type(this). + // Make sure that the type array is big enough for + // our new node, even though we may throw the node away. + // (This tweaking with igvn only works because x is a new node.) + igvn->set_type(x, t); + // If x is a TypeNode, capture any more-precise type permanently into Node + // othewise it will be not updated during igvn->transform since + // igvn->type(x) is set to x->Value() already. + x->raise_bottom_type(t); + Node *y = x->Identity(igvn); + if( y != x ) { + wins++; + x = y; + } else { + y = igvn->hash_find(x); + if( y ) { + wins++; + x = y; + } else { + // Else x is a new node we are keeping + // We do not need register_new_node_with_optimizer + // because set_type has already been called. + igvn->_worklist.push(x); + } + } + } + if (x != the_clone && the_clone != NULL) + igvn->remove_dead_node(the_clone); + phi->set_req(i, x); + } + if( wins > 0 ) { + // Record Phi + igvn->register_new_node_with_optimizer(phi); + return phi; + } + igvn->remove_dead_node(phi); + return NULL; +} //------------------------------Ideal------------------------------------------ // If the load is from Field memory and the pointer is non-null, we can @@ -954,7 +1316,7 @@ Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); if (base != NULL && phase->type(base)->higher_equal(TypePtr::NOTNULL) - && detect_dominating_control(base->in(0), phase->C->start())) { + && all_controls_dominate(base, phase->C->start())) { // A method-invariant, non-null address (constant or 'this' argument). set_req(MemNode::Control, NULL); } @@ -971,6 +1333,26 @@ } } + Node* mem = in(MemNode::Memory); + const TypePtr *addr_t = phase->type(address)->isa_ptr(); + + if (addr_t != NULL) { + // try to optimize our memory input + Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase); + if (opt_mem != mem) { + set_req(MemNode::Memory, opt_mem); + if (phase->type( opt_mem ) == Type::TOP) return NULL; + return this; + } + const TypeOopPtr *t_oop = addr_t->isa_oopptr(); + if (can_reshape && opt_mem->is_Phi() && + (t_oop != NULL) && t_oop->is_known_instance_field()) { + // Split instance field load through Phi. + Node* result = split_through_phi(phase); + if (result != NULL) return result; + } + } + // Check for prior store with a different base or offset; make Load // independent. Skip through any number of them. Bail out if the stores // are in an endless dead cycle and report no progress. This is a key @@ -1057,7 +1439,7 @@ // had an original form like p1:(AddP x x (LShiftL quux 3)), where the // expression (LShiftL quux 3) independently optimized to the constant 8. if ((t->isa_int() == NULL) && (t->isa_long() == NULL) - && Opcode() != Op_LoadKlass) { + && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. // Make sure the reference is not into the header, by comparing @@ -1074,7 +1456,7 @@ // This can happen if a interface-typed array narrows to a class type. jt = _type; } - + if (EliminateAutoBox) { // The pointers in the autobox arrays are always non-null Node* base = in(Address)->in(AddPNode::Base); @@ -1198,6 +1580,17 @@ return value->bottom_type(); } + const TypeOopPtr *tinst = tp->isa_oopptr(); + if (tinst != NULL && tinst->is_known_instance_field()) { + // If we have an instance type and our memory input is the + // programs's initial memory state, there is no matching store, + // so just return a zero of the appropriate type + Node *mem = in(MemNode::Memory); + if (mem->is_Parm() && mem->in(0)->is_Start()) { + assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm"); + return Type::get_zero_type(_type->basic_type()); + } + } return _type; } @@ -1260,8 +1653,30 @@ } //============================================================================= +//----------------------------LoadKlassNode::make------------------------------ +// Polymorphic factory method: +Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) { + Compile* C = gvn.C; + Node *ctl = NULL; + // sanity check the alias category against the created node type + const TypeOopPtr *adr_type = adr->bottom_type()->isa_oopptr(); + assert(adr_type != NULL, "expecting TypeOopPtr"); +#ifdef _LP64 + if (adr_type->is_ptr_to_narrowoop()) { + Node* load_klass = gvn.transform(new (C, 3) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowoop())); + return new (C, 2) DecodeNNode(load_klass, load_klass->bottom_type()->make_ptr()); + } +#endif + assert(!adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); + return new (C, 3) LoadKlassNode(ctl, mem, adr, at, tk); +} + //------------------------------Value------------------------------------------ const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { + return klass_value_common(phase); +} + +const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const { // Either input is TOP ==> the result is TOP const Type *t1 = phase->type( in(MemNode::Memory) ); if (t1 == Type::TOP) return Type::TOP; @@ -1393,6 +1808,10 @@ // To clean up reflective code, simplify k.java_mirror.as_klass to plain k. // Also feed through the klass in Allocate(...klass...)._klass. Node* LoadKlassNode::Identity( PhaseTransform *phase ) { + return klass_identity_common(phase); +} + +Node* LoadNode::klass_identity_common(PhaseTransform *phase ) { Node* x = LoadNode::Identity(phase); if (x != this) return x; @@ -1451,6 +1870,29 @@ return this; } + +//------------------------------Value------------------------------------------ +const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const { + const Type *t = klass_value_common(phase); + if (t == Type::TOP) + return t; + + return t->make_narrowoop(); +} + +//------------------------------Identity--------------------------------------- +// To clean up reflective code, simplify k.java_mirror.as_klass to narrow k. +// Also feed through the klass in Allocate(...klass...)._klass. +Node* LoadNKlassNode::Identity( PhaseTransform *phase ) { + Node *x = klass_identity_common(phase); + + const Type *t = phase->type( x ); + if( t == Type::TOP ) return x; + if( t->isa_narrowoop()) return x; + + return phase->transform(new (phase->C, 2) EncodePNode(x, t->make_narrowoop())); +} + //------------------------------Value----------------------------------------- const Type *LoadRangeNode::Value( PhaseTransform *phase ) const { // Either input is TOP ==> the result is TOP @@ -1466,6 +1908,38 @@ return tap->size(); } +//-------------------------------Ideal--------------------------------------- +// Feed through the length in AllocateArray(...length...)._length. +Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) { + Node* p = MemNode::Ideal_common(phase, can_reshape); + if (p) return (p == NodeSentinel) ? NULL : p; + + // Take apart the address into an oop and and offset. + // Return 'this' if we cannot. + Node* adr = in(MemNode::Address); + intptr_t offset = 0; + Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); + if (base == NULL) return NULL; + const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); + if (tary == NULL) return NULL; + + // We can fetch the length directly through an AllocateArrayNode. + // This works even if the length is not constant (clone or newArray). + if (offset == arrayOopDesc::length_offset_in_bytes()) { + AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); + if (alloc != NULL) { + Node* allocated_length = alloc->Ideal_length(); + Node* len = alloc->make_ideal_length(tary, phase); + if (allocated_length != len) { + // New CastII improves on this. + return len; + } + } + } + + return NULL; +} + //------------------------------Identity--------------------------------------- // Feed through the length in AllocateArray(...length...)._length. Node* LoadRangeNode::Identity( PhaseTransform *phase ) { @@ -1484,19 +1958,28 @@ // We can fetch the length directly through an AllocateArrayNode. // This works even if the length is not constant (clone or newArray). if (offset == arrayOopDesc::length_offset_in_bytes()) { - Node* allocated_length = AllocateArrayNode::Ideal_length(base, phase); - if (allocated_length != NULL) { - return allocated_length; + AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); + if (alloc != NULL) { + Node* allocated_length = alloc->Ideal_length(); + // Do not allow make_ideal_length to allocate a CastII node. + Node* len = alloc->make_ideal_length(tary, phase, false); + if (allocated_length == len) { + // Return allocated_length only if it would not be improved by a CastII. + return allocated_length; + } } } return this; } + //============================================================================= //---------------------------StoreNode::make----------------------------------- // Polymorphic factory method: -StoreNode* StoreNode::make( Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { +StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { + Compile* C = gvn.C; + switch (bt) { case T_BOOLEAN: case T_BYTE: return new (C, 4) StoreBNode(ctl, mem, adr, adr_type, val); @@ -1507,7 +1990,18 @@ case T_FLOAT: return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val); case T_DOUBLE: return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val); case T_ADDRESS: - case T_OBJECT: return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val); + case T_OBJECT: +#ifdef _LP64 + if (adr->bottom_type()->is_ptr_to_narrowoop() || + (UseCompressedOops && val->bottom_type()->isa_klassptr() && + adr->bottom_type()->isa_rawptr())) { + val = gvn.transform(new (C, 2) EncodePNode(val, val->bottom_type()->make_narrowoop())); + return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, val); + } else +#endif + { + return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val); + } } ShouldNotReachHere(); return (StoreNode*)NULL; @@ -1721,7 +2215,7 @@ const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr(); if (adr_oop == NULL) return false; - if (!adr_oop->is_instance()) + if (!adr_oop->is_known_instance_field()) return false; // if not a distinct instance, there may be aliases of the address for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) { Node *use = adr->fast_out(i); @@ -1830,7 +2324,7 @@ //------------------------------Identity--------------------------------------- // Clearing a zero length array does nothing Node *ClearArrayNode::Identity( PhaseTransform *phase ) { - return phase->type(in(2))->higher_equal(TypeInt::ZERO) ? in(1) : this; + return phase->type(in(2))->higher_equal(TypeX::ZERO) ? in(1) : this; } //------------------------------Idealize--------------------------------------- @@ -1889,7 +2383,7 @@ Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; - mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); + mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); mem = phase->transform(mem); offset += BytesPerInt; } @@ -1903,8 +2397,13 @@ Node* start_offset, Node* end_offset, PhaseGVN* phase) { - Compile* C = phase->C; - int unit = BytesPerLong; + if (start_offset == end_offset) { + // nothing to do + return mem; + } + + Compile* C = phase->C; + int unit = BytesPerLong; Node* zbase = start_offset; Node* zend = end_offset; @@ -1928,6 +2427,11 @@ intptr_t start_offset, intptr_t end_offset, PhaseGVN* phase) { + if (start_offset == end_offset) { + // nothing to do + return mem; + } + Compile* C = phase->C; assert((end_offset % BytesPerInt) == 0, "odd end offset"); intptr_t done_offset = end_offset; @@ -1942,7 +2446,7 @@ Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(done_offset)); adr = phase->transform(adr); const TypePtr* atp = TypeRawPtr::BOTTOM; - mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); + mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); mem = phase->transform(mem); done_offset += BytesPerInt; } @@ -1963,6 +2467,13 @@ return remove_dead_region(phase, can_reshape) ? this : NULL; } +//------------------------------Ideal------------------------------------------ +// Return a node which is more "ideal" than the current node. Strip out +// control copies +Node *AryEqNode::Ideal(PhaseGVN *phase, bool can_reshape){ + return remove_dead_region(phase, can_reshape) ? this : NULL; +} + //============================================================================= MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) @@ -2001,8 +2512,7 @@ // Return a node which is more "ideal" than the current node. Strip out // control copies Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if (remove_dead_region(phase, can_reshape)) return this; - return NULL; + return remove_dead_region(phase, can_reshape) ? this : NULL; } //------------------------------Value------------------------------------------ @@ -2232,7 +2742,7 @@ // must have preceded the init, or else be equal to the init. // Even after loop optimizations (which might change control edges) // a store is never pinned *before* the availability of its inputs. - if (!MemNode::detect_dominating_control(ctl, this->in(0))) + if (!MemNode::all_controls_dominate(n, this)) return false; // failed to prove a good control } @@ -2299,9 +2809,7 @@ assert(allocation() != NULL, "must be present"); // no negatives, no header fields: - if (start < (intptr_t) sizeof(oopDesc)) return FAIL; - if (start < (intptr_t) sizeof(arrayOopDesc) && - start < (intptr_t) allocation()->minimum_header_size()) return FAIL; + if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL; // after a certain size, we bail out on tracking all the stores: intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); @@ -2638,14 +3146,14 @@ if (!split) { ++new_long; off[nst] = offset; - st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp, + st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, phase->longcon(con), T_LONG); } else { // Omit either if it is a zero. if (con0 != 0) { ++new_int; off[nst] = offset; - st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp, + st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, phase->intcon(con0), T_INT); } if (con1 != 0) { @@ -2653,7 +3161,7 @@ offset += BytesPerInt; adr = make_raw_address(offset, phase); off[nst] = offset; - st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp, + st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, phase->intcon(con1), T_INT); } } @@ -2761,9 +3269,10 @@ Node* zmem = zero_memory(); // initially zero memory state Node* inits = zmem; // accumulating a linearized chain of inits #ifdef ASSERT - intptr_t last_init_off = sizeof(oopDesc); // previous init offset - intptr_t last_init_end = sizeof(oopDesc); // previous init offset+size - intptr_t last_tile_end = sizeof(oopDesc); // previous tile offset+size + intptr_t first_offset = allocation()->minimum_header_size(); + intptr_t last_init_off = first_offset; // previous init offset + intptr_t last_init_end = first_offset; // previous init offset+size + intptr_t last_tile_end = first_offset; // previous tile offset+size #endif intptr_t zeroes_done = header_size; @@ -2898,7 +3407,8 @@ bool InitializeNode::stores_are_sane(PhaseTransform* phase) { if (is_complete()) return true; // stores could be anything at this point - intptr_t last_off = sizeof(oopDesc); + assert(allocation() != NULL, "must be present"); + intptr_t last_off = allocation()->minimum_header_size(); for (uint i = InitializeNode::RawStores; i < req(); i++) { Node* st = in(i); intptr_t st_off = get_store_offset(st, phase); @@ -3253,7 +3763,7 @@ } } - assert(verify_sparse(), "please, no dups of base"); + assert(progress || verify_sparse(), "please, no dups of base"); return progress; } --- old/hotspot/src/share/vm/opto/memnode.hpp 2009-08-01 04:14:06.104133769 +0100 +++ new/hotspot/src/share/vm/opto/memnode.hpp 2009-08-01 04:14:06.019512969 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)memnode.hpp 1.121 07/10/23 13:12:55 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,17 +63,20 @@ debug_only(_adr_type=at; adr_type();) } +public: // Helpers for the optimizer. Documented in memnode.cpp. static bool detect_ptr_independence(Node* p1, AllocateNode* a1, Node* p2, AllocateNode* a2, PhaseTransform* phase); static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); -public: + static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); + static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); // This one should probably be a phase-specific function: - static bool detect_dominating_control(Node* dom, Node* sub); + static bool all_controls_dominate(Node* dom, Node* sub); - // Is this Node a MemNode or some descendent? Default is YES. + // Find any cast-away of null-ness and keep its control. + static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); virtual const class TypePtr *adr_type() const; // returns bottom_type of address @@ -100,7 +103,13 @@ // What is the type of the value in memory? (T_VOID mean "unspecified".) virtual BasicType memory_type() const = 0; - virtual int memory_size() const { return type2aelembytes[memory_type()]; } + virtual int memory_size() const { +#ifdef ASSERT + return type2aelembytes(memory_type(), true); +#else + return type2aelembytes(memory_type()); +#endif + } // Search through memory states which precede this node (load or store). // Look for an exact match for the address, with no intervening @@ -132,7 +141,8 @@ } // Polymorphic factory method: - static LoadNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, BasicType bt ); + static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, + const TypePtr* at, const Type *rt, BasicType bt ); virtual uint hash() const; // Check the type @@ -144,6 +154,9 @@ // zero out the control input. virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + // Split instance field load through Phi. + Node* split_through_phi(PhaseGVN *phase); + // Recover original value from boxed values Node *eliminate_autobox(PhaseGVN *phase); @@ -151,6 +164,10 @@ // then call the virtual add() to set the type. virtual const Type *Value( PhaseTransform *phase ) const; + // Common methods for LoadKlass and LoadNKlass nodes. + const Type *klass_value_common( PhaseTransform *phase ) const; + Node *klass_identity_common( PhaseTransform *phase ); + virtual uint ideal_reg() const; virtual const Type *bottom_type() const; // Following method is copied from TypeNode: @@ -169,6 +186,9 @@ // Map a load opcode to its corresponding store opcode. virtual int store_Opcode() const = 0; + // Check if the load's memory input is a Phi node with the same control. + bool is_instance_field_load_with_local_phi(Node* ctrl); + #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif @@ -224,6 +244,7 @@ virtual int Opcode() const; virtual const Type *Value( PhaseTransform *phase ) const; virtual Node *Identity( PhaseTransform *phase ); + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); }; //------------------------------LoadLNode-------------------------------------- @@ -322,18 +343,62 @@ virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } }; + +//------------------------------LoadNNode-------------------------------------- +// Load a narrow oop from memory (either object or array) +class LoadNNode : public LoadNode { +public: + LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t ) + : LoadNode(c,mem,adr,at,t) {} + virtual int Opcode() const; + virtual uint ideal_reg() const { return Op_RegN; } + virtual int store_Opcode() const { return Op_StoreN; } + virtual BasicType memory_type() const { return T_NARROWOOP; } + // depends_only_on_test is almost always true, and needs to be almost always + // true to enable key hoisting & commoning optimizations. However, for the + // special case of RawPtr loads from TLS top & end, the control edge carries + // the dependence preventing hoisting past a Safepoint instead of the memory + // edge. (An unfortunate consequence of having Safepoints not set Raw + // Memory; itself an unfortunate consequence of having Nodes which produce + // results (new raw memory state) inside of loops preventing all manner of + // other optimizations). Basically, it's ugly but so is the alternative. + // See comment in macro.cpp, around line 125 expand_allocate_common(). + virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } +}; + //------------------------------LoadKlassNode---------------------------------- // Load a Klass from an object class LoadKlassNode : public LoadPNode { public: - LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ) + LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk ) : LoadPNode(c,mem,adr,at,tk) {} virtual int Opcode() const; virtual const Type *Value( PhaseTransform *phase ) const; virtual Node *Identity( PhaseTransform *phase ); - virtual bool depends_only_on_test() const { return true; } + virtual bool depends_only_on_test() const { return true; } + + // Polymorphic factory method: + static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, + const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ); +}; + +//------------------------------LoadNKlassNode--------------------------------- +// Load a narrow Klass from an object. +class LoadNKlassNode : public LoadNNode { +public: + LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk ) + : LoadNNode(c,mem,adr,at,tk) {} + virtual int Opcode() const; + virtual uint ideal_reg() const { return Op_RegN; } + virtual int store_Opcode() const { return Op_StoreN; } + virtual BasicType memory_type() const { return T_NARROWOOP; } + + virtual const Type *Value( PhaseTransform *phase ) const; + virtual Node *Identity( PhaseTransform *phase ); + virtual bool depends_only_on_test() const { return true; } }; + //------------------------------LoadSNode-------------------------------------- // Load a short (16bits signed) from memory class LoadSNode : public LoadNode { @@ -368,7 +433,8 @@ } // Polymorphic factory method: - static StoreNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, BasicType bt ); + static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, + const TypePtr* at, Node *val, BasicType bt ); virtual uint hash() const; // Check the type @@ -480,6 +546,15 @@ virtual BasicType memory_type() const { return T_ADDRESS; } }; +//------------------------------StoreNNode------------------------------------- +// Store narrow oop to memory +class StoreNNode : public StoreNode { +public: + StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} + virtual int Opcode() const; + virtual BasicType memory_type() const { return T_NARROWOOP; } +}; + //------------------------------StoreCMNode----------------------------------- // Store card-mark byte to memory for CM // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store @@ -536,6 +611,7 @@ }; //------------------------------LoadStoreNode--------------------------- +// Note: is_Mem() method returns 'true' for this class. class LoadStoreNode : public Node { public: enum { @@ -559,6 +635,17 @@ virtual uint ideal_reg() const { return Op_RegFlags; } }; +//------------------------------StoreIConditionalNode--------------------------- +// Conditionally store int to memory, if no change since prior +// load-locked. Sets flags for success or failure of the store. +class StoreIConditionalNode : public LoadStoreNode { +public: + StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { } + virtual int Opcode() const; + // Produces flags + virtual uint ideal_reg() const { return Op_RegFlags; } +}; + //------------------------------StoreLConditionalNode--------------------------- // Conditionally store long to memory, if no change since prior // load-locked. Sets flags for success or failure of the store. @@ -566,6 +653,8 @@ public: StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { } virtual int Opcode() const; + // Produces flags + virtual uint ideal_reg() const { return Op_RegFlags; } }; @@ -592,6 +681,13 @@ virtual int Opcode() const; }; +//------------------------------CompareAndSwapNNode--------------------------- +class CompareAndSwapNNode : public LoadStoreNode { +public: + CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } + virtual int Opcode() const; +}; + //------------------------------ClearArray------------------------------------- class ClearArrayNode: public Node { public: @@ -647,6 +743,18 @@ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); }; +//------------------------------AryEq--------------------------------------- +class AryEqNode: public Node { +public: + AryEqNode(Node *control, Node* s1, Node* s2): Node(control, s1, s2) {}; + virtual int Opcode() const; + virtual bool depends_only_on_test() const { return false; } + virtual const Type* bottom_type() const { return TypeInt::BOOL; } + virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } + virtual uint ideal_reg() const { return Op_RegI; } + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); +}; + //------------------------------MemBar----------------------------------------- // There are different flavors of Memory Barriers to match the Java Memory // Model. Monitor-enter and volatile-load act as Aquires: no following ref --- old/hotspot/src/share/vm/opto/mulnode.cpp 2009-08-01 04:14:07.044650682 +0100 +++ new/hotspot/src/share/vm/opto/mulnode.cpp 2009-08-01 04:14:06.942811705 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mulnode.cpp 1.134 07/07/19 19:08:26 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -155,6 +155,14 @@ if( t1 == Type::BOTTOM || t2 == Type::BOTTOM ) return bottom_type(); +#if defined(IA32) + // Can't trust native compilers to properly fold strict double + // multiplication with round-to-zero on this platform. + if (op == Op_MulD && phase->C->method()->is_strict()) { + return TypeD::DOUBLE; + } +#endif + return mul_ring(t1,t2); // Local flavor of type multiplication } @@ -363,11 +371,30 @@ // Compute the product type of two double ranges into this node. const Type *MulDNode::mul_ring(const Type *t0, const Type *t1) const { if( t0 == Type::DOUBLE || t1 == Type::DOUBLE ) return Type::DOUBLE; - // We must be adding 2 double constants. + // We must be multiplying 2 double constants. return TypeD::make( t0->getd() * t1->getd() ); } //============================================================================= +//------------------------------Value------------------------------------------ +const Type *MulHiLNode::Value( PhaseTransform *phase ) const { + // Either input is TOP ==> the result is TOP + const Type *t1 = phase->type( in(1) ); + const Type *t2 = phase->type( in(2) ); + if( t1 == Type::TOP ) return Type::TOP; + if( t2 == Type::TOP ) return Type::TOP; + + // Either input is BOTTOM ==> the result is the local BOTTOM + const Type *bot = bottom_type(); + if( (t1 == bot) || (t2 == bot) || + (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) + return bot; + + // It is not worth trying to constant fold this stuff! + return TypeLong::LONG; +} + +//============================================================================= //------------------------------mul_ring--------------------------------------- // Supplied function returns the product of the inputs IN THE CURRENT RING. // For the logical operations the ring's MUL is really a logical AND function. @@ -425,9 +452,10 @@ // needed either. if( lop == Op_URShiftI ) { const TypeInt *t12 = phase->type( load->in(2) )->isa_int(); - if( t12 && t12->is_con() ) { - int shift_con = t12->get_con(); - int mask = max_juint >> shift_con; + if( t12 && t12->is_con() ) { // Shift is by a constant + int shift = t12->get_con(); + shift &= BitsPerJavaInteger - 1; // semantics of Java shifts + int mask = max_juint >> shift; if( (mask&con) == mask ) // If AND is useless, skip it return load; } @@ -555,9 +583,10 @@ // needed either. if( lop == Op_URShiftL ) { const TypeInt *t12 = phase->type( usr->in(2) )->isa_int(); - if( t12 && t12->is_con() ) { - int shift_con = t12->get_con(); - jlong mask = max_julong >> shift_con; + if( t12 && t12->is_con() ) { // Shift is by a constant + int shift = t12->get_con(); + shift &= BitsPerJavaLong - 1; // semantics of Java shifts + jlong mask = max_julong >> shift; if( (mask&con) == mask ) // If AND is useless, skip it return usr; } @@ -581,9 +610,9 @@ const TypeInt *t12 = phase->type(rsh->in(2))->isa_int(); if( t12 && t12->is_con() ) { // Shift is by a constant int shift = t12->get_con(); - shift &= (BitsPerJavaInteger*2)-1; // semantics of Java shifts - const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaInteger*2 - shift)) -1); - // If the AND'ing of the 2 masks has no bits, then only original shifted + shift &= BitsPerJavaLong - 1; // semantics of Java shifts + const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - shift)) -1); + // If the AND'ing of the 2 masks has no bits, then only original shifted // bits survive. NO sign-extension bits survive the maskings. if( (sign_bits_mask & mask) == 0 ) { // Use zero-fill shift instead @@ -762,7 +791,7 @@ // Check for ((x & ((CONST64(1)<<(64-c0))-1)) << c0) which ANDs off high bits // before shifting them away. - const jlong bits_mask = ((jlong)CONST64(1) << (jlong)(BitsPerJavaInteger*2 - con)) - CONST64(1); + const jlong bits_mask = ((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - con)) - CONST64(1); if( add1_op == Op_AndL && phase->type(add1->in(2)) == TypeLong::make( bits_mask ) ) return new (phase->C, 3) LShiftLNode( add1->in(1), in(2) ); @@ -796,7 +825,7 @@ return TypeLong::LONG; uint shift = r2->get_con(); - shift &= (BitsPerJavaInteger*2)-1; // semantics of Java shifts + shift &= BitsPerJavaLong - 1; // semantics of Java shifts // Shift by a multiple of 64 does nothing: if (shift == 0) return t1; @@ -1211,7 +1240,7 @@ if ( con == 0 ) return NULL; // let Identity() handle a 0 shift count // note: mask computation below does not work for 0 shift count // We'll be wanting the right-shift amount as a mask of that many bits - const jlong mask = (((jlong)CONST64(1) << (jlong)(BitsPerJavaInteger*2 - con)) -1); + const jlong mask = (((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - con)) -1); // Check for ((x << z) + Y) >>> z. Replace with x + con>>>z // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z". @@ -1278,7 +1307,7 @@ if (r2->is_con()) { uint shift = r2->get_con(); - shift &= (2*BitsPerJavaInteger)-1; // semantics of Java shifts + shift &= BitsPerJavaLong - 1; // semantics of Java shifts // Shift by a multiple of 64 does nothing: if (shift == 0) return t1; // Calculate reasonably aggressive bounds for the result. @@ -1301,7 +1330,7 @@ const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen)); #ifdef ASSERT // Make sure we get the sign-capture idiom correct. - if (shift == (2*BitsPerJavaInteger)-1) { + if (shift == BitsPerJavaLong - 1) { if (r1->_lo >= 0) assert(tl == TypeLong::ZERO, ">>>63 of + is 0"); if (r1->_hi < 0) assert(tl == TypeLong::ONE, ">>>63 of - is +1"); } --- old/hotspot/src/share/vm/opto/mulnode.hpp 2009-08-01 04:14:08.002485133 +0100 +++ new/hotspot/src/share/vm/opto/mulnode.hpp 2009-08-01 04:14:07.925676942 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mulnode.hpp 1.53 07/05/05 17:06:18 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,6 +136,16 @@ virtual uint ideal_reg() const { return Op_RegD; } }; +//-------------------------------MulHiLNode------------------------------------ +// Upper 64 bits of a 64 bit by 64 bit multiply +class MulHiLNode : public Node { +public: + MulHiLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {} + virtual int Opcode() const; + virtual const Type *Value( PhaseTransform *phase ) const; + const Type *bottom_type() const { return TypeLong::LONG; } + virtual uint ideal_reg() const { return Op_RegL; } +}; //------------------------------AndINode--------------------------------------- // Logically AND 2 integers. Included with the MUL nodes because it inherits --- old/hotspot/src/share/vm/opto/multnode.hpp 2009-08-01 04:14:08.844328799 +0100 +++ new/hotspot/src/share/vm/opto/multnode.hpp 2009-08-01 04:14:08.771588629 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)multnode.hpp 1.47 07/09/28 10:23:05 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,6 +64,9 @@ : Node( src ), _con(con), _is_io_use(io_use) { init_class_id(Class_Proj); + // Optimistic setting. Need additional checks in Node::is_dead_loop_safe(). + if (con != TypeFunc::Memory || src->is_Start()) + init_flags(Flag_is_dead_loop_safe); debug_only(check_con()); } const uint _con; // The field in the tuple we are projecting --- old/hotspot/src/share/vm/opto/node.cpp 2009-08-01 04:14:09.719018613 +0100 +++ new/hotspot/src/share/vm/opto/node.cpp 2009-08-01 04:14:09.625192241 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)node.cpp 1.228 07/09/28 10:23:04 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -815,8 +815,7 @@ Node* Node::uncast() const { // Should be inline: //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this; - if (is_ConstraintCast() || - (is_Type() && req() == 2 && Opcode() == Op_CheckCastPP)) + if (is_ConstraintCast() || is_CheckCastPP()) return uncast_helper(this); else return (Node*) this; @@ -830,7 +829,7 @@ break; } else if (p->is_ConstraintCast()) { p = p->in(1); - } else if (p->is_Type() && p->Opcode() == Op_CheckCastPP) { + } else if (p->is_CheckCastPP()) { p = p->in(1); } else { break; @@ -1021,21 +1020,164 @@ return false; }; +//--------------------------find_exact_control--------------------------------- +// Skip Proj and CatchProj nodes chains. Check for Null and Top. +Node* Node::find_exact_control(Node* ctrl) { + if (ctrl == NULL && this->is_Region()) + ctrl = this->as_Region()->is_copy(); + + if (ctrl != NULL && ctrl->is_CatchProj()) { + if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) + ctrl = ctrl->in(0); + if (ctrl != NULL && !ctrl->is_top()) + ctrl = ctrl->in(0); + } + + if (ctrl != NULL && ctrl->is_Proj()) + ctrl = ctrl->in(0); + + return ctrl; +} + +//--------------------------dominates------------------------------------------ +// Helper function for MemNode::all_controls_dominate(). +// Check if 'this' control node dominates or equal to 'sub' control node. +// We already know that if any path back to Root or Start reaches 'this', +// then all paths so, so this is a simple search for one example, +// not an exhaustive search for a counterexample. +bool Node::dominates(Node* sub, Node_List &nlist) { + assert(this->is_CFG(), "expecting control"); + assert(sub != NULL && sub->is_CFG(), "expecting control"); + + // detect dead cycle without regions + int iterations_without_region_limit = DominatorSearchLimit; + + Node* orig_sub = sub; + Node* dom = this; + bool met_dom = false; + nlist.clear(); + + // Walk 'sub' backward up the chain to 'dom', watching for regions. + // After seeing 'dom', continue up to Root or Start. + // If we hit a region (backward split point), it may be a loop head. + // Keep going through one of the region's inputs. If we reach the + // same region again, go through a different input. Eventually we + // will either exit through the loop head, or give up. + // (If we get confused, break out and return a conservative 'false'.) + while (sub != NULL) { + if (sub->is_top()) break; // Conservative answer for dead code. + if (sub == dom) { + if (nlist.size() == 0) { + // No Region nodes except loops were visited before and the EntryControl + // path was taken for loops: it did not walk in a cycle. + return true; + } else if (met_dom) { + break; // already met before: walk in a cycle + } else { + // Region nodes were visited. Continue walk up to Start or Root + // to make sure that it did not walk in a cycle. + met_dom = true; // first time meet + iterations_without_region_limit = DominatorSearchLimit; // Reset + } + } + if (sub->is_Start() || sub->is_Root()) { + // Success if we met 'dom' along a path to Start or Root. + // We assume there are no alternative paths that avoid 'dom'. + // (This assumption is up to the caller to ensure!) + return met_dom; + } + Node* up = sub->in(0); + // Normalize simple pass-through regions and projections: + up = sub->find_exact_control(up); + // If sub == up, we found a self-loop. Try to push past it. + if (sub == up && sub->is_Loop()) { + // Take loop entry path on the way up to 'dom'. + up = sub->in(1); // in(LoopNode::EntryControl); + } else if (sub == up && sub->is_Region() && sub->req() != 3) { + // Always take in(1) path on the way up to 'dom' for clone regions + // (with only one input) or regions which merge > 2 paths + // (usually used to merge fast/slow paths). + up = sub->in(1); + } else if (sub == up && sub->is_Region()) { + // Try both paths for Regions with 2 input paths (it may be a loop head). + // It could give conservative 'false' answer without information + // which region's input is the entry path. + iterations_without_region_limit = DominatorSearchLimit; // Reset + + bool region_was_visited_before = false; + // Was this Region node visited before? + // If so, we have reached it because we accidentally took a + // loop-back edge from 'sub' back into the body of the loop, + // and worked our way up again to the loop header 'sub'. + // So, take the first unexplored path on the way up to 'dom'. + for (int j = nlist.size() - 1; j >= 0; j--) { + intptr_t ni = (intptr_t)nlist.at(j); + Node* visited = (Node*)(ni & ~1); + bool visited_twice_already = ((ni & 1) != 0); + if (visited == sub) { + if (visited_twice_already) { + // Visited 2 paths, but still stuck in loop body. Give up. + return false; + } + // The Region node was visited before only once. + // (We will repush with the low bit set, below.) + nlist.remove(j); + // We will find a new edge and re-insert. + region_was_visited_before = true; + break; + } + } + + // Find an incoming edge which has not been seen yet; walk through it. + assert(up == sub, ""); + uint skip = region_was_visited_before ? 1 : 0; + for (uint i = 1; i < sub->req(); i++) { + Node* in = sub->in(i); + if (in != NULL && !in->is_top() && in != sub) { + if (skip == 0) { + up = in; + break; + } + --skip; // skip this nontrivial input + } + } + + // Set 0 bit to indicate that both paths were taken. + nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0))); + } + + if (up == sub) { + break; // some kind of tight cycle + } + if (up == orig_sub && met_dom) { + // returned back after visiting 'dom' + break; // some kind of cycle + } + if (--iterations_without_region_limit < 0) { + break; // dead cycle + } + sub = up; + } + + // Did not meet Root or Start node in pred. chain. + // Conservative answer for dead code. + return false; +} + //------------------------------remove_dead_region----------------------------- // This control node is dead. Follow the subgraph below it making everything // using it dead as well. This will happen normally via the usual IterGVN // worklist but this call is more efficient. Do not update use-def info // inside the dead region, just at the borders. -static bool kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { +static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { // Con's are a popular node to re-hit in the hash table again. - if( dead->is_Con() ) return false; + if( dead->is_Con() ) return; // Can't put ResourceMark here since igvn->_worklist uses the same arena // for verify pass with +VerifyOpto and we add/remove elements in it here. Node_List nstack(Thread::current()->resource_area()); Node *top = igvn->C->top(); - bool progress = false; nstack.push(dead); while (nstack.size() > 0) { @@ -1074,7 +1216,6 @@ for (uint i=0; i < dead->req(); i++) { Node *n = dead->in(i); // Get input to dead guy if (n != NULL && !n->is_top()) { // Input is valid? - progress = true; dead->set_req(i, top); // Smash input away if (n->outcnt() == 0) { // Input also goes dead? if (!n->is_Con()) @@ -1093,7 +1234,7 @@ } } // (dead->outcnt() == 0) } // while (nstack.size() > 0) for outputs - return progress; + return; } //------------------------------remove_dead_region----------------------------- @@ -1103,7 +1244,8 @@ // Lost control into this guy? I.e., it became unreachable? // Aggressively kill all unreachable code. if (can_reshape && n->is_top()) { - return kill_dead_code(this, phase->is_IterGVN()); + kill_dead_code(this, phase->is_IterGVN()); + return false; // Node is dead. } if( n->is_Region() && n->as_Region()->is_copy() ) { @@ -1173,6 +1315,12 @@ return ((ConPNode*)this)->type()->is_ptr()->get_con(); } +// Get a narrow oop constant from a ConNNode. +intptr_t Node::get_narrowcon() const { + assert( Opcode() == Op_ConN, "" ); + return ((ConNNode*)this)->type()->is_narrowoop()->get_con(); +} + // Get a long constant from a ConNode. // Return a default value if there is no apparent constant here. const TypeLong* Node::find_long_type() const { @@ -1241,7 +1389,7 @@ } #ifdef ASSERT // Search along debug_orig edges last: - for (Node* orig = n->debug_orig(); orig != NULL; orig = orig->debug_orig()) { + for (Node* orig = n->debug_orig(); orig != NULL && n != orig; orig = orig->debug_orig()) { if (NotANode(orig)) break; find_recur( result, orig, idx, only_ctrl, old_space, new_space ); } @@ -1465,97 +1613,48 @@ } //------------------------------dump_nodes------------------------------------- - -// Helper class for dump_nodes. Wraps an old and new VectorSet. -class OldNewVectorSet : public StackObj { - Arena* _node_arena; - VectorSet _old_vset, _new_vset; - VectorSet* select(Node* n) { - return _node_arena->contains(n) ? &_new_vset : &_old_vset; - } - public: - OldNewVectorSet(Arena* node_arena, ResourceArea* area) : - _node_arena(node_arena), - _old_vset(area), _new_vset(area) {} - - void set(Node* n) { select(n)->set(n->_idx); } - bool test_set(Node* n) { return select(n)->test_set(n->_idx) != 0; } - bool test(Node* n) { return select(n)->test(n->_idx) != 0; } - void del(Node* n) { (*select(n)) >>= n->_idx; } -}; - - static void dump_nodes(const Node* start, int d, bool only_ctrl) { Node* s = (Node*)start; // remove const if (NotANode(s)) return; + uint depth = (uint)ABS(d); + int direction = d; Compile* C = Compile::current(); - ResourceArea *area = Thread::current()->resource_area(); - Node_Stack stack(area, MIN2((uint)ABS(d), C->unique() >> 1)); - OldNewVectorSet visited(C->node_arena(), area); - OldNewVectorSet on_stack(C->node_arena(), area); - - visited.set(s); - on_stack.set(s); - stack.push(s, 0); - if (d < 0) s->dump(); - - // Do a depth first walk over edges - while (stack.is_nonempty()) { - Node* tp = stack.node(); - uint idx = stack.index(); - uint limit = d > 0 ? tp->len() : tp->outcnt(); - if (idx >= limit) { - // no more arcs to visit - if (d > 0) tp->dump(); - on_stack.del(tp); - stack.pop(); - } else { - // process the "idx"th arc - stack.set_index(idx + 1); - Node* n = d > 0 ? tp->in(idx) : tp->raw_out(idx); - - if (NotANode(n)) continue; - // do not recurse through top or the root (would reach unrelated stuff) - if (n->is_Root() || n->is_top()) continue; - if (only_ctrl && !n->is_CFG()) continue; - - if (!visited.test_set(n)) { // forward arc - // Limit depth - if (stack.size() < (uint)ABS(d)) { - if (d < 0) n->dump(); - stack.push(n, 0); - on_stack.set(n); - } - } else { // back or cross arc - if (on_stack.test(n)) { // back arc - // print loop if there are no phis or regions in the mix - bool found_loop_breaker = false; - int k; - for (k = stack.size() - 1; k >= 0; k--) { - Node* m = stack.node_at(k); - if (m->is_Phi() || m->is_Region() || m->is_Root() || m->is_Start()) { - found_loop_breaker = true; - break; - } - if (m == n) // Found loop head - break; - } - assert(k >= 0, "n must be on stack"); + GrowableArray nstack(C->unique()); - if (!found_loop_breaker) { - tty->print("# %s LOOP FOUND:", only_ctrl ? "CONTROL" : "DATA"); - for (int i = stack.size() - 1; i >= k; i--) { - Node* m = stack.node_at(i); - bool mnew = C->node_arena()->contains(m); - tty->print(" %s%d:%s", (mnew? "": "o"), m->_idx, m->Name()); - if (i != 0) tty->print(d > 0? " <-": " ->"); - } - tty->cr(); - } + nstack.append(s); + int begin = 0; + int end = 0; + for(uint i = 0; i < depth; i++) { + end = nstack.length(); + for(int j = begin; j < end; j++) { + Node* tp = nstack.at(j); + uint limit = direction > 0 ? tp->len() : tp->outcnt(); + for(uint k = 0; k < limit; k++) { + Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k); + + if (NotANode(n)) continue; + // do not recurse through top or the root (would reach unrelated stuff) + if (n->is_Root() || n->is_top()) continue; + if (only_ctrl && !n->is_CFG()) continue; + + bool on_stack = nstack.contains(n); + if (!on_stack) { + nstack.append(n); } } } + begin = end; + } + end = nstack.length(); + if (direction > 0) { + for(int j = end-1; j >= 0; j--) { + nstack.at(j)->dump(); + } + } else { + for(int j = 0; j < end; j++) { + nstack.at(j)->dump(); + } } } --- old/hotspot/src/share/vm/opto/node.hpp 2009-08-01 04:14:10.712825889 +0100 +++ new/hotspot/src/share/vm/opto/node.hpp 2009-08-01 04:14:10.620746616 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)node.hpp 1.224 07/09/28 10:33:17 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,6 +56,8 @@ class ConNode; class CountedLoopNode; class CountedLoopEndNode; +class DecodeNNode; +class EncodePNode; class FastLockNode; class FastUnlockNode; class IfNode; @@ -94,6 +96,7 @@ class Node_Stack; class NullCheckNode; class OopMap; +class ParmNode; class PCTableNode; class PhaseCCP; class PhaseGVN; @@ -108,6 +111,7 @@ class RegionNode; class RootNode; class SafePointNode; +class SafePointScalarObjectNode; class StartNode; class State; class StoreNode; @@ -439,6 +443,12 @@ public: // Globally replace this node by a given new node, updating all uses. void replace_by(Node* new_node); + // Globally replace this node by a given new node, updating all uses + // and cutting input edges of old node. + void subsume_by(Node* new_node) { + replace_by(new_node); + disconnect_inputs(NULL); + } void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); // Find the one non-null required input. RegionNode only Node *nonnull_req() const; @@ -560,6 +570,7 @@ DEFINE_CLASS_ID(JumpProj, Proj, 1) DEFINE_CLASS_ID(IfTrue, Proj, 2) DEFINE_CLASS_ID(IfFalse, Proj, 3) + DEFINE_CLASS_ID(Parm, Proj, 4) DEFINE_CLASS_ID(Region, Node, 3) DEFINE_CLASS_ID(Loop, Region, 0) @@ -576,6 +587,9 @@ DEFINE_CLASS_ID(ConstraintCast, Type, 1) DEFINE_CLASS_ID(CheckCastPP, Type, 2) DEFINE_CLASS_ID(CMove, Type, 3) + DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) + DEFINE_CLASS_ID(DecodeN, Type, 5) + DEFINE_CLASS_ID(EncodeP, Type, 6) DEFINE_CLASS_ID(Mem, Node, 6) DEFINE_CLASS_ID(Load, Mem, 0) @@ -684,6 +698,8 @@ DEFINE_CLASS_QUERY(Cmp) DEFINE_CLASS_QUERY(CountedLoop) DEFINE_CLASS_QUERY(CountedLoopEnd) + DEFINE_CLASS_QUERY(DecodeN) + DEFINE_CLASS_QUERY(EncodeP) DEFINE_CLASS_QUERY(FastLock) DEFINE_CLASS_QUERY(FastUnlock) DEFINE_CLASS_QUERY(If) @@ -715,12 +731,14 @@ DEFINE_CLASS_QUERY(Mul) DEFINE_CLASS_QUERY(Multi) DEFINE_CLASS_QUERY(MultiBranch) + DEFINE_CLASS_QUERY(Parm) DEFINE_CLASS_QUERY(PCTable) DEFINE_CLASS_QUERY(Phi) DEFINE_CLASS_QUERY(Proj) DEFINE_CLASS_QUERY(Region) DEFINE_CLASS_QUERY(Root) DEFINE_CLASS_QUERY(SafePoint) + DEFINE_CLASS_QUERY(SafePointScalarObject) DEFINE_CLASS_QUERY(Start) DEFINE_CLASS_QUERY(Store) DEFINE_CLASS_QUERY(Sub) @@ -737,9 +755,10 @@ bool is_Con () const { return (_flags & Flag_is_Con) != 0; } bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; } // The data node which is safe to leave in dead loop during IGVN optimization. - bool is_dead_loop_safe() const { - return is_Phi() || is_Proj() || - (_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0; + bool is_dead_loop_safe() const { + return is_Phi() || (is_Proj() && in(0) == NULL) || + ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && + (!is_Proj() || !in(0)->is_Allocate())); } // is_Copy() returns copied edge index (0 or 1) @@ -814,6 +833,12 @@ // for the transformations to happen. bool has_special_unique_user() const; + // Skip Proj and CatchProj nodes chains. Check for Null and Top. + Node* find_exact_control(Node* ctrl); + + // Check if 'this' node dominates or equal to 'sub'. + bool dominates(Node* sub, Node_List &nlist); + protected: bool remove_dead_region(PhaseGVN *phase, bool can_reshape); public: @@ -914,6 +939,7 @@ // These guys are called by code generated by ADLC: intptr_t get_ptr() const; + intptr_t get_narrowcon() const; jdouble getd() const; jfloat getf() const; @@ -1297,7 +1323,8 @@ Node *pop() { if( _clock_index >= size() ) _clock_index = 0; Node *b = at(_clock_index); - map( _clock_index++, Node_List::pop()); + map( _clock_index, Node_List::pop()); + if (size() != 0) _clock_index++; // Always start from 0 _in_worklist >>= b->_idx; return b; } @@ -1325,7 +1352,6 @@ // Inline definition of Compile::record_for_igvn must be deferred to this point. inline void Compile::record_for_igvn(Node* n) { _for_igvn->push(n); - record_for_escape_analysis(n); } //------------------------------Node_Stack------------------------------------- @@ -1377,6 +1403,10 @@ uint index() const { return _inode_top->indx; } + uint index_at(uint i) const { + assert(_inodes + i <= _inode_top, "in range"); + return _inodes[i].indx; + } void set_node(Node *n) { _inode_top->node = n; } @@ -1384,7 +1414,7 @@ _inode_top->indx = i; } uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size - uint size() const { return (uint)pointer_delta(_inode_top, _inodes, sizeof(INode)) + 1; } // Current size + uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size bool is_nonempty() const { return (_inode_top >= _inodes); } bool is_empty() const { return (_inode_top < _inodes); } void clear() { _inode_top = _inodes - 1; } // retain storage --- old/hotspot/src/share/vm/opto/opcodes.cpp 2009-08-01 04:14:11.612242105 +0100 +++ new/hotspot/src/share/vm/opto/opcodes.cpp 2009-08-01 04:14:11.543977898 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)opcodes.cpp 1.15 07/05/05 17:06:24 JVM" #endif /* - * Copyright 1998-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ const char *NodeClassNames[] = { "Node", "Set", + "RegN", "RegI", "RegP", "RegF", --- old/hotspot/src/share/vm/opto/opcodes.hpp 2009-08-01 04:14:12.507719460 +0100 +++ new/hotspot/src/share/vm/opto/opcodes.hpp 2009-08-01 04:14:12.431247557 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)opcodes.hpp 1.31 07/05/05 17:06:24 JVM" #endif /* - * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ enum Opcodes { Op_Node = 0, macro(Set) // Instruction selection match rule + macro(RegN) // Machine narrow oop register macro(RegI) // Machine integer register macro(RegP) // Machine pointer register macro(RegF) // Machine float register --- old/hotspot/src/share/vm/opto/output.cpp 2009-08-01 04:14:13.421099673 +0100 +++ new/hotspot/src/share/vm/opto/output.cpp 2009-08-01 04:14:13.324864136 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)output.cpp 1.290 07/09/20 11:01:49 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,7 @@ // Initialize the space for the BufferBlob used to find and verify // instruction size in MachNode::emit_size() init_scratch_buffer_blob(); + if (failing()) return; // Out of memory // Make sure I can find the Start Node Block_Array& bbs = _cfg->_bbs; @@ -265,7 +266,7 @@ # endif // ENABLE_ZAP_DEAD_LOCALS //------------------------------compute_loop_first_inst_sizes------------------ -// Compute the size of first NumberOfLoopInstrToAlign instructions at head +// Compute the size of first NumberOfLoopInstrToAlign instructions at the top // of a loop. When aligning a loop we need to provide enough instructions // in cpu's fetch buffer to feed decoders. The loop alignment could be // avoided if we have enough instructions in fetch buffer at the head of a loop. @@ -286,34 +287,23 @@ for( uint i=1; i <= last_block; i++ ) { Block *b = _cfg->_blocks[i]; // Check the first loop's block which requires an alignment. - if( b->head()->is_Loop() && - b->code_alignment() > (uint)relocInfo::addr_unit() ) { + if( b->loop_alignment() > (uint)relocInfo::addr_unit() ) { uint sum_size = 0; uint inst_cnt = NumberOfLoopInstrToAlign; - inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt, - _regalloc); - // Check the next fallthrough block if first loop's block does not have - // enough instructions. - if( inst_cnt > 0 && i < last_block ) { - // First, check if the first loop's block contains whole loop. - // LoopNode::LoopBackControl == 2. - Block *bx = _cfg->_bbs[b->pred(2)->_idx]; - // Skip connector blocks (with limit in case of irreducible loops). - int search_limit = 16; - while( bx->is_connector() && search_limit-- > 0) { - bx = _cfg->_bbs[bx->pred(1)->_idx]; - } - if( bx != b ) { // loop body is in several blocks. - Block *nb = NULL; - while( inst_cnt > 0 && i < last_block && nb != bx && - !_cfg->_blocks[i+1]->head()->is_Loop() ) { - i++; - nb = _cfg->_blocks[i]; - inst_cnt = nb->compute_first_inst_size(sum_size, inst_cnt, - _regalloc); - } // while( inst_cnt > 0 && i < last_block ) - } // if( bx != b ) - } // if( inst_cnt > 0 && i < last_block ) + inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt, _regalloc); + + // Check subsequent fallthrough blocks if the loop's first + // block(s) does not have enough instructions. + Block *nb = b; + while( inst_cnt > 0 && + i < last_block && + !_cfg->_blocks[i+1]->has_loop_alignment() && + !nb->has_successor(b) ) { + i++; + nb = _cfg->_blocks[i]; + inst_cnt = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc); + } // while( inst_cnt > 0 && i < last_block ) + b->set_first_inst_size(sum_size); } // f( b->head()->is_Loop() ) } // for( i <= last_block ) @@ -334,6 +324,7 @@ uint *jmp_end = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); uint *blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1); DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); ) + DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); ) blk_starts[0] = 0; // Initialize the sizes to 0 @@ -445,15 +436,17 @@ uintptr_t target = blk_starts[bnum]; if( mach->is_pc_relative() ) { int offset = target-(blk_starts[i] + jmp_end[i]); - if (_matcher->is_short_branch_offset(offset)) { + if (_matcher->is_short_branch_offset(mach->rule(), offset)) { // We've got a winner. Replace this branch. - MachNode *replacement = mach->short_branch_version(this); + MachNode* replacement = mach->short_branch_version(this); b->_nodes.map(j, replacement); - + mach->subsume_by(replacement); + // Update the jmp_end size to save time in our // next pass. jmp_end[i] -= (mach->size(_regalloc) - replacement->size(_regalloc)); DEBUG_ONLY( jmp_target[i] = bnum; ); + DEBUG_ONLY( jmp_rule[i] = mach->rule(); ); } } else { #ifndef PRODUCT @@ -511,7 +504,7 @@ // Get the size of the block uint blk_size = adr - blk_starts[i]; - // When the next block starts a loop, we may insert pad NOP + // When the next block is the top of a loop, we may insert pad NOP // instructions. Block *nb = _cfg->_blocks[i+1]; int current_offset = blk_starts[i] + blk_size; @@ -525,10 +518,10 @@ for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks if( jmp_target[i] != 0 ) { int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_end[i]); - if (!_matcher->is_short_branch_offset(offset)) { + if (!_matcher->is_short_branch_offset(jmp_rule[i], offset)) { tty->print_cr("target (%d) - jmp_end(%d) = offset (%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_end[i], offset, i, jmp_target[i]); } - assert(_matcher->is_short_branch_offset(offset), "Displacement too large for short jmp"); + assert(_matcher->is_short_branch_offset(jmp_rule[i], offset), "Displacement too large for short jmp"); } } #endif @@ -564,7 +557,30 @@ : new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum))); } -void Compile::FillLocArray( int idx, Node *local, GrowableArray *array ) { + +ObjectValue* +Compile::sv_for_node_id(GrowableArray *objs, int id) { + for (int i = 0; i < objs->length(); i++) { + assert(objs->at(i)->is_object(), "corrupt object cache"); + ObjectValue* sv = (ObjectValue*) objs->at(i); + if (sv->id() == id) { + return sv; + } + } + // Otherwise.. + return NULL; +} + +void Compile::set_sv_for_object_node(GrowableArray *objs, + ObjectValue* sv ) { + assert(sv_for_node_id(objs, sv->id()) == NULL, "Precondition"); + objs->append(sv); +} + + +void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local, + GrowableArray *array, + GrowableArray *objs ) { assert( local, "use _top instead of null" ); if (array->length() != idx) { assert(array->length() == idx + 1, "Unexpected array count"); @@ -581,6 +597,29 @@ } const Type *t = local->bottom_type(); + // Is it a safepoint scalar object node? + if (local->is_SafePointScalarObject()) { + SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject(); + + ObjectValue* sv = Compile::sv_for_node_id(objs, spobj->_idx); + if (sv == NULL) { + ciKlass* cik = t->is_oopptr()->klass(); + assert(cik->is_instance_klass() || + cik->is_array_klass(), "Not supported allocation."); + sv = new ObjectValue(spobj->_idx, + new ConstantOopWriteValue(cik->encoding())); + Compile::set_sv_for_object_node(objs, sv); + + uint first_ind = spobj->first_index(); + for (uint i = 0; i < spobj->n_fields(); i++) { + Node* fld_node = sfpt->in(first_ind+i); + (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs); + } + } + array->append(sv); + return; + } + // Grab the register number for the local OptoReg::Name regnum = _regalloc->get_reg_first(local); if( OptoReg::is_valid(regnum) ) {// Got a register/stack? @@ -640,6 +679,8 @@ } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) { array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long ? Location::int_in_long : Location::normal )); + } else if( t->base() == Type::NarrowOop ) { + array->append(new_loc_value( _regalloc, regnum, Location::narrowoop )); } else { array->append(new_loc_value( _regalloc, regnum, _regalloc->is_oop(local) ? Location::oop : Location::normal )); } @@ -659,8 +700,15 @@ case Type::KlassPtr: // fall through array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->encoding())); break; - case Type::Int: - array->append(new ConstantIntValue(t->is_int()->get_con())); + case Type::NarrowOop: + if (t == TypeNarrowOop::NULL_PTR) { + array->append(new ConstantOopWriteValue(NULL)); + } else { + array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->encoding())); + } + break; + case Type::Int: + array->append(new ConstantIntValue(t->is_int()->get_con())); break; case Type::RawPtr: // A return address (T_ADDRESS). @@ -758,6 +806,11 @@ JVMState* youngest_jvms = sfn->jvms(); int max_depth = youngest_jvms->depth(); + // Allocate the object pool for scalar-replaced objects -- the map from + // small-integer keys (which can be recorded in the local and ostack + // arrays) to descriptions of the object state. + GrowableArray *objs = new GrowableArray(); + // Visit scopes from oldest to youngest. for (int depth = 1; depth <= max_depth; depth++) { JVMState* jvms = youngest_jvms->of_depth(depth); @@ -776,13 +829,13 @@ // Insert locals into the locarray GrowableArray *locarray = new GrowableArray(num_locs); for( idx = 0; idx < num_locs; idx++ ) { - FillLocArray( idx, sfn->local(jvms, idx), locarray ); + FillLocArray( idx, sfn, sfn->local(jvms, idx), locarray, objs ); } // Insert expression stack entries into the exparray GrowableArray *exparray = new GrowableArray(num_exps); for( idx = 0; idx < num_exps; idx++ ) { - FillLocArray( idx, sfn->stack(jvms, idx), exparray ); + FillLocArray( idx, sfn, sfn->stack(jvms, idx), exparray, objs ); } // Add in mappings of the monitors @@ -799,23 +852,51 @@ // Loop over monitors and insert into array for(idx = 0; idx < num_mon; idx++) { // Grab the node that defines this monitor - Node* box_node; - Node* obj_node; - box_node = sfn->monitor_box(jvms, idx); - obj_node = sfn->monitor_obj(jvms, idx); + Node* box_node = sfn->monitor_box(jvms, idx); + Node* obj_node = sfn->monitor_obj(jvms, idx); // Create ScopeValue for object ScopeValue *scval = NULL; - if( !obj_node->is_Con() ) { + + if( obj_node->is_SafePointScalarObject() ) { + SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject(); + scval = Compile::sv_for_node_id(objs, spobj->_idx); + if (scval == NULL) { + const Type *t = obj_node->bottom_type(); + ciKlass* cik = t->is_oopptr()->klass(); + assert(cik->is_instance_klass() || + cik->is_array_klass(), "Not supported allocation."); + ObjectValue* sv = new ObjectValue(spobj->_idx, + new ConstantOopWriteValue(cik->encoding())); + Compile::set_sv_for_object_node(objs, sv); + + uint first_ind = spobj->first_index(); + for (uint i = 0; i < spobj->n_fields(); i++) { + Node* fld_node = sfn->in(first_ind+i); + (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs); + } + scval = sv; + } + } else if( !obj_node->is_Con() ) { OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node); - scval = new_loc_value( _regalloc, obj_reg, Location::oop ); + if( obj_node->bottom_type()->base() == Type::NarrowOop ) { + scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop ); + } else { + scval = new_loc_value( _regalloc, obj_reg, Location::oop ); + } } else { - scval = new ConstantOopWriteValue(obj_node->bottom_type()->is_instptr()->const_oop()->encoding()); + const TypePtr *tp = obj_node->bottom_type()->make_ptr(); + scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->encoding()); } OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node); - monarray->append(new MonitorValue(scval, Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg)))); - } + Location basic_lock = Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg)); + while( !box_node->is_BoxLock() ) box_node = box_node->in(1); + monarray->append(new MonitorValue(scval, basic_lock, box_node->as_BoxLock()->is_eliminated())); + } + + // We dump the object pool first, since deoptimization reads it in first. + debug_info()->dump_object_pool(objs); // Build first class objects to pass to scope DebugToken *locvals = debug_info()->create_scope_values(locarray); @@ -826,6 +907,7 @@ ciMethod* scope_method = method ? method : _method; // Describe the scope here assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI"); + // Now we can describe the scope. debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),locvals,expvals,monvals); } // End jvms loop @@ -924,11 +1006,8 @@ // blown the code cache size. C->record_failure("excessive request to CodeCache"); } else { - UseInterpreter = true; - UseCompiler = false; - AlwaysCompileLoopMethods = false; + // Let CompilerBroker disable further compilations. C->record_failure("CodeCache is full"); - warning("CodeCache is full. Compiling has been disabled"); } } @@ -983,7 +1062,7 @@ // If this machine supports different size branch offsets, then pre-compute // the length of the blocks - if( _matcher->is_short_branch_offset(0) ) { + if( _matcher->is_short_branch_offset(-1, 0) ) { Shorten_branches(blk_labels, code_req, locs_req, stub_req, const_req); labels_not_set = false; } @@ -1294,8 +1373,8 @@ } // End for all instructions in block - // If the next block _starts_ a loop, pad this block out to align - // the loop start a little. Helps prevent pipe stalls at loop starts + // If the next block is the top of a loop, pad this block out to align + // the loop top a little. Helps prevent pipe stalls at loop back branches. int nop_size = (new (this) MachNopNode())->size(_regalloc); if( i<_cfg->_num_blocks-1 ) { Block *nb = _cfg->_blocks[i+1]; --- old/hotspot/src/share/vm/opto/parse.hpp 2009-08-01 04:14:14.470218093 +0100 +++ new/hotspot/src/share/vm/opto/parse.hpp 2009-08-01 04:14:14.388853361 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parse.hpp 1.274 07/09/28 10:23:03 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,9 +57,9 @@ InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, JVMState* caller_jvms, int caller_bci); - const char* try_to_inline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); - const char* shouldInline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; - const char* shouldNotInline(ciMethod* callee_method, WarmCallInfo* wci_result) const; + const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); + const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; + const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN; InlineTree *caller_tree() const { return _caller_tree; } @@ -170,9 +170,19 @@ int start() const { return flow()->start(); } int limit() const { return flow()->limit(); } - int pre_order() const { return flow()->pre_order(); } + int rpo() const { return flow()->rpo(); } int start_sp() const { return flow()->stack_size(); } + bool is_loop_head() const { return flow()->is_loop_head(); } + bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } + bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } + bool is_invariant_local(uint i) const { + const JVMState* jvms = start_map()->jvms(); + if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; + return flow()->is_invariant_local(i - jvms->locoff()); + } + bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } + const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } const Type* stack_type_at(int i) const; @@ -308,7 +318,7 @@ // entry_bci() -- see osr_bci, etc. ciTypeFlow* flow() const { return _flow; } - // blocks() -- see pre_order_at, start_block, etc. + // blocks() -- see rpo_at, start_block, etc. int block_count() const { return _block_count; } GraphKit& exits() { return _exits; } @@ -333,12 +343,12 @@ // Must this parse be aborted? bool failing() { return C->failing(); } - Block* pre_order_at(int po) { - assert(0 <= po && po < _block_count, "oob"); - return &_blocks[po]; + Block* rpo_at(int rpo) { + assert(0 <= rpo && rpo < _block_count, "oob"); + return &_blocks[rpo]; } Block* start_block() { - return pre_order_at(flow()->start_block()->pre_order()); + return rpo_at(flow()->start_block()->rpo()); } // Can return NULL if the flow pass did not complete a block. Block* successor_for_bci(int bci) { @@ -362,9 +372,6 @@ // Parse all the basic blocks. void do_all_blocks(); - // Helper for do_all_blocks; makes one pass in pre-order. - void visit_blocks(); - // Parse the current basic block void do_one_block(); @@ -482,7 +489,7 @@ float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); bool seems_never_taken(float prob); - void do_ifnull(BoolTest::mask btest); + void do_ifnull(BoolTest::mask btest, Node* c); void do_if(BoolTest::mask btest, Node* c); void repush_if_args(); void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, --- old/hotspot/src/share/vm/opto/parse1.cpp 2009-08-01 04:14:15.483841924 +0100 +++ new/hotspot/src/share/vm/opto/parse1.cpp 2009-08-01 04:14:15.389924947 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parse1.cpp 1.493 07/05/17 15:59:31 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,17 +32,17 @@ // the most. Some of the non-static variables are needed in bytecodeInfo.cpp // and eventually should be encapsulated in a proper class (gri 8/18/98). -int nodes_created = 0; int nodes_created_old = 0; -int methods_parsed = 0; int methods_parsed_old = 0; -int methods_seen = 0; int methods_seen_old = 0; +int nodes_created = 0; +int methods_parsed = 0; +int methods_seen = 0; +int blocks_parsed = 0; +int blocks_seen = 0; -int explicit_null_checks_inserted = 0, explicit_null_checks_inserted_old = 0; -int explicit_null_checks_elided = 0, explicit_null_checks_elided_old = 0; +int explicit_null_checks_inserted = 0; +int explicit_null_checks_elided = 0; int all_null_checks_found = 0, implicit_null_checks = 0; int implicit_null_throws = 0; -int parse_idx = 0; -size_t parse_arena = 0; int reclaim_idx = 0; int reclaim_in = 0; int reclaim_node = 0; @@ -64,6 +64,7 @@ tty->cr(); if (methods_seen != methods_parsed) tty->print_cr("Reasons for parse failures (NOT cumulative):"); + tty->print_cr("Blocks parsed: %d Blocks seen: %d", blocks_parsed, blocks_seen); if( explicit_null_checks_inserted ) tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found); @@ -376,6 +377,12 @@ C->record_method_not_compilable_all_tiers(_flow->failure_reason()); } +#ifndef PRODUCT + if (_flow->has_irreducible_entry()) { + C->set_parsed_irreducible_loop(true); + } +#endif + if (_expected_uses <= 0) { _prof_factor = 1; } else { @@ -559,116 +566,91 @@ set_map(entry_map); do_exits(); - // Collect a few more statistics. - parse_idx += C->unique(); - parse_arena += C->node_arena()->used(); - if (log) log->done("parse nodes='%d' memory='%d'", C->unique(), C->node_arena()->used()); } //---------------------------do_all_blocks------------------------------------- void Parse::do_all_blocks() { - _blocks_merged = 0; - _blocks_parsed = 0; + bool has_irreducible = flow()->has_irreducible_entry(); - int old_blocks_merged = -1; - int old_blocks_parsed = -1; + // Walk over all blocks in Reverse Post-Order. + while (true) { + bool progress = false; + for (int rpo = 0; rpo < block_count(); rpo++) { + Block* block = rpo_at(rpo); - for (int tries = 0; ; tries++) { - visit_blocks(); - if (failing()) return; // Check for bailout - - // No need for a work list. The outer loop is hardly ever repeated. - // The following loop traverses the blocks in a reasonable pre-order, - // as produced by the ciTypeFlow pass. - - // This loop can be taken more than once if there are two entries to - // a loop (irreduceable CFG), and the edge which ciTypeFlow chose - // as the first predecessor to the loop goes dead in the parser, - // due to parse-time optimization. (Could happen with obfuscated code.) - - // Look for progress, or the lack of it: - if (_blocks_parsed == block_count()) { - // That's all, folks. - if (TraceOptoParse) { - tty->print_cr("All blocks parsed."); - } - break; - } + if (block->is_parsed()) continue; - // How much work was done this time around? - int new_blocks_merged = _blocks_merged - old_blocks_merged; - int new_blocks_parsed = _blocks_parsed - old_blocks_parsed; - if (new_blocks_merged == 0) { - if (TraceOptoParse) { - tty->print_cr("All live blocks parsed; %d dead blocks.", block_count() - _blocks_parsed); + if (!block->is_merged()) { + // Dead block, no state reaches this block + continue; } - // No new blocks have become parseable. Some blocks are just dead. - break; - } - assert(new_blocks_parsed > 0, "must make progress"); - assert(tries < block_count(), "the pre-order cannot be this bad!"); - old_blocks_merged = _blocks_merged; - old_blocks_parsed = _blocks_parsed; - } + // Prepare to parse this block. + load_state_from(block); -#ifndef PRODUCT - // Make sure there are no half-processed blocks remaining. - // Every remaining unprocessed block is dead and may be ignored now. - for (int po = 0; po < block_count(); po++) { - Block* block = pre_order_at(po); - if (!block->is_parsed()) { - if (TraceOptoParse) { - tty->print("Skipped dead block %d at bci:%d", po, block->start()); - assert(!block->is_merged(), "no half-processed blocks"); + if (stopped()) { + // Block is dead. + continue; } - } - } -#endif -} -//---------------------------visit_blocks-------------------------------------- -void Parse::visit_blocks() { - // Walk over all blocks, parsing every one that has been reached (merged). - for (int po = 0; po < block_count(); po++) { - Block* block = pre_order_at(po); + blocks_parsed++; - if (block->is_parsed()) { - // Do not parse twice. - continue; - } + progress = true; + if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) { + // Not all preds have been parsed. We must build phis everywhere. + // (Note that dead locals do not get phis built, ever.) + ensure_phis_everywhere(); - if (!block->is_merged()) { - // No state on this block. It had not yet been reached. - // Delay reaching it until later. - continue; - } + // Leave behind an undisturbed copy of the map, for future merges. + set_map(clone_map()); + } - // Prepare to parse this block. - load_state_from(block); + if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) { + // In the absence of irreducible loops, the Region and Phis + // associated with a merge that doesn't involve a backedge can + // be simplfied now since the RPO parsing order guarantees + // that any path which was supposed to reach here has already + // been parsed or must be dead. + Node* c = control(); + Node* result = _gvn.transform_no_reclaim(control()); + if (c != result && TraceOptoParse) { + tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx); + } + if (result != top()) { + record_for_igvn(result); + } + } - if (stopped()) { - // Block is dead. - continue; - } + // Parse the block. + do_one_block(); - if (!block->is_ready() || block->is_handler()) { - // Not all preds have been parsed. We must build phis everywhere. - // (Note that dead locals do not get phis built, ever.) - ensure_phis_everywhere(); + // Check for bailouts. + if (failing()) return; + } - // Leave behind an undisturbed copy of the map, for future merges. - set_map(clone_map()); + // with irreducible loops multiple passes might be necessary to parse everything + if (!has_irreducible || !progress) { + break; } + } - // Ready or not, parse the block. - do_one_block(); + blocks_seen += block_count(); - // Check for bailouts. - if (failing()) return; +#ifndef PRODUCT + // Make sure there are no half-processed blocks remaining. + // Every remaining unprocessed block is dead and may be ignored now. + for (int rpo = 0; rpo < block_count(); rpo++) { + Block* block = rpo_at(rpo); + if (!block->is_parsed()) { + if (TraceOptoParse) { + tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start()); + } + assert(!block->is_merged(), "no half-processed blocks"); + } } +#endif } //-------------------------------build_exits---------------------------------- @@ -1137,24 +1119,24 @@ _blocks = NEW_RESOURCE_ARRAY(Block, _block_count); Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count); - int po; + int rpo; // Initialize the structs. - for (po = 0; po < block_count(); po++) { - Block* block = pre_order_at(po); - block->init_node(this, po); + for (rpo = 0; rpo < block_count(); rpo++) { + Block* block = rpo_at(rpo); + block->init_node(this, rpo); } // Collect predecessor and successor information. - for (po = 0; po < block_count(); po++) { - Block* block = pre_order_at(po); + for (rpo = 0; rpo < block_count(); rpo++) { + Block* block = rpo_at(rpo); block->init_graph(this); } } //-------------------------------init_node------------------------------------- -void Parse::Block::init_node(Parse* outer, int po) { - _flow = outer->flow()->pre_order_at(po); +void Parse::Block::init_node(Parse* outer, int rpo) { + _flow = outer->flow()->rpo_at(rpo); _pred_count = 0; _preds_parsed = 0; _count = 0; @@ -1180,7 +1162,7 @@ int p = 0; for (int i = 0; i < ns+ne; i++) { ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns); - Block* block2 = outer->pre_order_at(tf2->pre_order()); + Block* block2 = outer->rpo_at(tf2->rpo()); _successors[i] = block2; // Accumulate pred info for the other block, too. @@ -1371,10 +1353,11 @@ int nt = b->all_successors(); tty->print("Parsing block #%d at bci [%d,%d), successors: ", - block()->pre_order(), block()->start(), block()->limit()); + block()->rpo(), block()->start(), block()->limit()); for (int i = 0; i < nt; i++) { - tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->pre_order()); + tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo()); } + if (b->is_loop_head()) tty->print(" lphd"); tty->print_cr(""); } @@ -1503,8 +1486,8 @@ void Parse::handle_missing_successor(int target_bci) { #ifndef PRODUCT Block* b = block(); - int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1; - tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->pre_order(), trap_bci); + int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1; + tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci); #endif ShouldNotReachHere(); } @@ -1512,7 +1495,7 @@ //--------------------------merge_common--------------------------------------- void Parse::merge_common(Parse::Block* target, int pnum) { if (TraceOptoParse) { - tty->print("Merging state at block #%d bci:%d", target->pre_order(), target->start()); + tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start()); } // Zap extra stack slots to top @@ -1537,6 +1520,7 @@ // which must not be allowed into this block's map.) if (pnum > PhiNode::Input // Known multiple inputs. || target->is_handler() // These have unpredictable inputs. + || target->is_loop_head() // Known multiple inputs || control()->is_Region()) { // We must hide this guy. // Add a Region to start the new basic block. Phis will be added // later lazily. @@ -1578,15 +1562,21 @@ // Compute where to merge into // Merge incoming control path - r->set_req(pnum, newin->control()); + r->init_req(pnum, newin->control()); if (pnum == 1) { // Last merge for this Region? - _gvn.transform_no_reclaim(r); + if (!block()->flow()->is_irreducible_entry()) { + Node* result = _gvn.transform_no_reclaim(r); + if (r != result && TraceOptoParse) { + tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx); + } + } record_for_igvn(r); } // Update all the non-control inputs to map: assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms"); + bool check_elide_phi = target->is_SEL_backedge(save_block); for (uint j = 1; j < newin->req(); j++) { Node* m = map()->in(j); // Current state of target. Node* n = newin->in(j); // Incoming change to target state. @@ -1606,7 +1596,11 @@ merge_memory_edges(n->as_MergeMem(), pnum, nophi); continue; default: // All normal stuff - if (phi == NULL) phi = ensure_phi(j, nophi); + if (phi == NULL) { + if (!check_elide_phi || !target->can_elide_SEL_phi(j)) { + phi = ensure_phi(j, nophi); + } + } break; } } @@ -1739,9 +1733,13 @@ uint nof_monitors = map()->jvms()->nof_monitors(); assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms"); + bool check_elide_phi = block()->is_SEL_head(); for (uint i = TypeFunc::Parms; i < monoff; i++) { - ensure_phi(i); + if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) { + ensure_phi(i); + } } + // Even monitors need Phis, though they are well-structured. // This is true for OSR methods, and also for the rare cases where // a monitor object is the subject of a replace_in_map operation. @@ -1839,7 +1837,7 @@ PhiNode* phi = PhiNode::make(region, o, t); gvn().set_type(phi, t); - if (DoEscapeAnalysis) record_for_igvn(phi); + if (C->do_escape_analysis()) record_for_igvn(phi); map->set_req(idx, phi); return phi; } @@ -1904,7 +1902,7 @@ // finalization. In general this will fold up since the concrete // class is often visible so the access flags are constant. Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); - Node* klass = _gvn.transform(new (C, 3) LoadKlassNode(NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS)); + Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); Node* access_flags_addr = basic_plus_adr(klass, klass, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT); --- old/hotspot/src/share/vm/opto/parse2.cpp 2009-08-01 04:14:16.503034382 +0100 +++ new/hotspot/src/share/vm/opto/parse2.cpp 2009-08-01 04:14:16.409209627 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parse2.cpp 1.364 08/07/16 09:47:25 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,12 +70,13 @@ const Type* elemtype = arytype->elem(); if (UseUniqueSubclasses && result2 != NULL) { - const TypeInstPtr* toop = elemtype->isa_instptr(); - if (toop) { + const Type* el = elemtype->make_ptr(); + if (el && el->isa_instptr()) { + const TypeInstPtr* toop = el->is_instptr(); if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { // If we load from "AbstractClass[]" we must see "ConcreteSubClass". const Type* subklass = Type::get_const_type(toop->klass()); - elemtype = subklass->join(elemtype); + elemtype = subklass->join(el); } } } @@ -102,12 +103,22 @@ // Do the range check if (GenerateRangeChecks && need_range_check) { - // Range is constant in array-oop, so we can use the original state of mem - Node* len = load_array_length(ary); - // Test length vs index (standard trick using unsigned compare) - Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) ); - BoolTest::mask btest = BoolTest::lt; - Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) ); + Node* tst; + if (sizetype->_hi <= 0) { + // The greatest array bound is negative, so we can conclude that we're + // compiling unreachable code, but the unsigned compare trick used below + // only works with non-negative lengths. Instead, hack "tst" to be zero so + // the uncommon_trap path will always be taken. + tst = _gvn.intcon(0); + } else { + // Range is constant in array-oop, so we can use the original state of mem + Node* len = load_array_length(ary); + + // Test length vs index (standard trick using unsigned compare) + Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) ); + BoolTest::mask btest = BoolTest::lt; + tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) ); + } // Branch to failure if out of bounds { BuildCutout unless(this, tst, PROB_MAX); if (C->allow_range_check_smearing()) { @@ -130,9 +141,12 @@ // Check for always knowing you are throwing a range-check exception if (stopped()) return top(); - Node* ptr = array_element_address( ary, idx, type, sizetype); + Node* ptr = array_element_address(ary, idx, type, sizetype); if (result2 != NULL) *result2 = elemtype; + + assert(ptr != top(), "top should go hand-in-hand with stopped"); + return ptr; } @@ -887,7 +901,7 @@ } //----------------------------------do_ifnull---------------------------------- -void Parse::do_ifnull(BoolTest::mask btest) { +void Parse::do_ifnull(BoolTest::mask btest, Node *c) { int target_bci = iter().get_dest(); Block* branch_block = successor_for_bci(target_bci); @@ -899,7 +913,7 @@ // (An earlier version of do_ifnull omitted this trap for OSR methods.) #ifndef PRODUCT if (PrintOpto && Verbose) - tty->print_cr("Never-taken backedge stops compilation at bci %d",bci()); + tty->print_cr("Never-taken edge stops compilation at bci %d",bci()); #endif // We need to mark this branch as taken so that if we recompile we will // see that it is possible. In the tiered system the interpreter doesn't @@ -917,18 +931,7 @@ return; } - // If this is a backwards branch in the bytecodes, add Safepoint - maybe_add_safepoint(target_bci); - explicit_null_checks_inserted++; - Node* a = null(); - Node* b = pop(); - Node* c = _gvn.transform( new (C, 3) CmpPNode(b, a) ); - - // Make a cast-away-nullness that is control dependent on the test - const Type *t = _gvn.type(b); - const Type *t_not_null = t->join(TypePtr::NOTNULL); - Node *cast = new (C, 2) CastPPNode(b,t_not_null); // Generate real control flow Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) ); @@ -990,7 +993,7 @@ if (prob == PROB_UNKNOWN) { #ifndef PRODUCT if (PrintOpto && Verbose) - tty->print_cr("Never-taken backedge stops compilation at bci %d",bci()); + tty->print_cr("Never-taken edge stops compilation at bci %d",bci()); #endif repush_if_args(); // to gather stats on loop // We need to mark this branch as taken so that if we recompile we will @@ -1026,10 +1029,27 @@ Node* tst = _gvn.transform(tst0); BoolTest::mask taken_btest = BoolTest::illegal; BoolTest::mask untaken_btest = BoolTest::illegal; - if (btest == BoolTest::ne) { - // For now, these are the only cases of btest that matter. (More later.) - taken_btest = taken_if_true ? btest : BoolTest::eq; - untaken_btest = taken_if_true ? BoolTest::eq : btest; + + if (tst->is_Bool()) { + // Refresh c from the transformed bool node, since it may be + // simpler than the original c. Also re-canonicalize btest. + // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). + // That can arise from statements like: if (x instanceof C) ... + if (tst != tst0) { + // Canonicalize one more time since transform can change it. + btest = tst->as_Bool()->_test._test; + if (!BoolTest(btest).is_canonical()) { + // Reverse edges one more time... + tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); + btest = tst->as_Bool()->_test._test; + assert(BoolTest(btest).is_canonical(), "sanity"); + taken_if_true = !taken_if_true; + } + c = tst->in(1); + } + BoolTest::mask neg_btest = BoolTest(btest).negate(); + taken_btest = taken_if_true ? btest : neg_btest; + untaken_btest = taken_if_true ? neg_btest : btest; } // Generate real control flow @@ -2077,11 +2097,15 @@ break; } - case Bytecodes::_ifnull: - do_ifnull(BoolTest::eq); - break; - case Bytecodes::_ifnonnull: - do_ifnull(BoolTest::ne); + case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; + case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; + handle_if_null: + // If this is a backwards branch in the bytecodes, add Safepoint + maybe_add_safepoint(iter().get_dest()); + a = null(); + b = pop(); + c = _gvn.transform( new (C, 3) CmpPNode(b, a) ); + do_ifnull(btest, c); break; case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; @@ -2197,7 +2221,7 @@ sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); bool old = printer->traverse_outs(); printer->set_traverse_outs(true); - printer->print_method(C, buffer, 3); + printer->print_method(C, buffer, 4); printer->set_traverse_outs(old); } #endif --- old/hotspot/src/share/vm/opto/parse3.cpp 2009-08-01 04:14:17.480978266 +0100 +++ new/hotspot/src/share/vm/opto/parse3.cpp 2009-08-01 04:14:17.398043442 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parse3.cpp 1.269 08/11/24 12:24:07 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -368,7 +368,7 @@ const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); for (jint i = 0; i < length_con; i++) { Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1); - intptr_t offset = header + ((intptr_t)i << LogBytesPerWord); + intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); Node* eaddr = basic_plus_adr(array, offset); store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT); } @@ -411,7 +411,7 @@ jint dim_con = find_int_con(length[j], -1); expand_fanout *= dim_con; expand_count += expand_fanout; // count the level-J sub-arrays - if (dim_con < 0 + if (dim_con <= 0 || dim_con > expand_limit || expand_count > expand_limit) { expand_count = 0; --- old/hotspot/src/share/vm/opto/parseHelper.cpp 2009-08-01 04:14:18.433827448 +0100 +++ new/hotspot/src/share/vm/opto/parseHelper.cpp 2009-08-01 04:14:18.343759140 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)parseHelper.cpp 1.197 07/08/14 16:13:24 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ // Get method const TypeInstPtr* method_type = TypeInstPtr::make(TypePtr::Constant, method->klass(), true, method, 0); - Node *method_node = _gvn.transform( new (C, 1) ConPNode(method_type) ); + Node *method_node = _gvn.transform( ConNode::make(C, method_type) ); kill_dead_locals(); @@ -146,7 +146,7 @@ int klass_offset = oopDesc::klass_offset_in_bytes(); Node* p = basic_plus_adr( ary, ary, klass_offset ); // p's type is array-of-OOPS plus klass_offset - Node* array_klass = _gvn.transform(new (C, 3) LoadKlassNode(0, immutable_memory(), p, TypeInstPtr::KLASS)); + Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) ); // Get the array klass const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); @@ -192,7 +192,7 @@ // Extract the array element class int element_klass_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc); Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); - Node *a_e_klass = _gvn.transform(new (C, 3) LoadKlassNode(0, immutable_memory(), p2, tak)); + Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) ); // Check (the hard way) and throw if not a subklass. // Result is ignored, we just need the CFG effects. --- old/hotspot/src/share/vm/opto/phase.cpp 2009-08-01 04:14:19.264164272 +0100 +++ new/hotspot/src/share/vm/opto/phase.cpp 2009-08-01 04:14:19.187232568 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)phase.cpp 1.59 07/05/17 16:00:26 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,7 @@ #ifndef PRODUCT elapsedTimer Phase::_t_graphReshaping; elapsedTimer Phase::_t_scheduler; -elapsedTimer Phase::_t_removeEmptyBlocks; +elapsedTimer Phase::_t_blockOrdering; elapsedTimer Phase::_t_macroExpand; elapsedTimer Phase::_t_peephole; elapsedTimer Phase::_t_codeGeneration; @@ -131,17 +131,17 @@ tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", regalloc_subtotal, percent_of_regalloc); } tty->print_cr (" macroExpand : %3.3f sec", Phase::_t_macroExpand.seconds()); - tty->print_cr (" removeEmpty : %3.3f sec", Phase::_t_removeEmptyBlocks.seconds()); + tty->print_cr (" blockOrdering: %3.3f sec", Phase::_t_blockOrdering.seconds()); tty->print_cr (" peephole : %3.3f sec", Phase::_t_peephole.seconds()); tty->print_cr (" codeGen : %3.3f sec", Phase::_t_codeGeneration.seconds()); tty->print_cr (" install_code : %3.3f sec", Phase::_t_registerMethod.seconds()); tty->print_cr (" ------------ : ----------"); double phase_subtotal = Phase::_t_parser.seconds() + (DoEscapeAnalysis ? Phase::_t_escapeAnalysis.seconds() : 0.0) + - Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() + - Phase::_t_matcher.seconds() + Phase::_t_scheduler.seconds() + - Phase::_t_registerAllocation.seconds() + Phase::_t_removeEmptyBlocks.seconds() + - Phase::_t_macroExpand.seconds() + Phase::_t_peephole.seconds() + + Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() + + Phase::_t_matcher.seconds() + Phase::_t_scheduler.seconds() + + Phase::_t_registerAllocation.seconds() + Phase::_t_blockOrdering.seconds() + + Phase::_t_macroExpand.seconds() + Phase::_t_peephole.seconds() + Phase::_t_codeGeneration.seconds() + Phase::_t_registerMethod.seconds(); double percent_of_method_compile = ((phase_subtotal == 0.0) ? 0.0 : phase_subtotal / Phase::_t_methodCompilation.seconds()) * 100.0; // counters inside Compile::CodeGen include time for adapters and stubs --- old/hotspot/src/share/vm/opto/phase.hpp 2009-08-01 04:14:20.135666918 +0100 +++ new/hotspot/src/share/vm/opto/phase.hpp 2009-08-01 04:14:20.049159907 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)phase.hpp 1.53 07/05/17 16:00:29 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,16 +43,12 @@ Optimistic, // Optimistic analysis phase GVN, // Pessimistic global value numbering phase Ins_Select, // Instruction selection phase - Copy_Elimination, // Copy Elimination - Dead_Code_Elimination, // DCE and compress Nodes - Conditional_Constant, // Conditional Constant Propagation CFG, // Build a CFG - DefUse, // Build Def->Use chains + BlockLayout, // Linear ordering of blocks Register_Allocation, // Register allocation, duh LIVE, // Dragon-book LIVE range problem Interference_Graph, // Building the IFG Coalesce, // Coalescing copies - Conditional_CProp, // Conditional Constant Propagation Ideal_Loop, // Find idealized trip-counted loops Macro_Expand, // Expand macro nodes Peephole, // Apply peephole optimizations @@ -83,7 +79,7 @@ #ifndef PRODUCT static elapsedTimer _t_graphReshaping; static elapsedTimer _t_scheduler; - static elapsedTimer _t_removeEmptyBlocks; + static elapsedTimer _t_blockOrdering; static elapsedTimer _t_macroExpand; static elapsedTimer _t_peephole; static elapsedTimer _t_codeGeneration; --- old/hotspot/src/share/vm/opto/phaseX.cpp 2009-08-01 04:14:21.008294593 +0100 +++ new/hotspot/src/share/vm/opto/phaseX.cpp 2009-08-01 04:14:20.922683565 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)phaseX.cpp 1.262 07/07/19 19:08:26 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -590,11 +590,6 @@ Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true); loc->clear(); // do not put debug info on constants } - // Collect points-to information for escape analysys - ConnectionGraph *cgr = C->congraph(); - if (cgr != NULL) { - cgr->record_escape(x, this); - } } else { x->destruct(); // Hit, destroy duplicate constant x = k; // use existing constant @@ -651,79 +646,9 @@ //============================================================================= //------------------------------transform-------------------------------------- // Return a node which computes the same function as this node, but in a -// faster or cheaper fashion. The Node passed in here must have no other -// pointers to it, as its storage will be reclaimed if the Node can be -// optimized away. +// faster or cheaper fashion. Node *PhaseGVN::transform( Node *n ) { - NOT_PRODUCT( set_transforms(); ) - - // Apply the Ideal call in a loop until it no longer applies - Node *k = n; - NOT_PRODUCT( uint loop_count = 0; ) - while( 1 ) { - Node *i = k->Ideal(this, /*can_reshape=*/false); - if( !i ) break; - assert( i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" ); - // Can never reclaim storage for Ideal calls, because the Ideal call - // returns a new Node, bumping the High Water Mark and our old Node - // is caught behind the new one. - //if( k != i ) { - //k->destruct(); // Reclaim storage for recent node - k = i; - //} - assert(loop_count++ < K, "infinite loop in PhaseGVN::transform"); - } - NOT_PRODUCT( if( loop_count != 0 ) { set_progress(); } ) - - // If brand new node, make space in type array. - ensure_type_or_null(k); - - // Cache result of Value call since it can be expensive - // (abstract interpretation of node 'k' using phase->_types[ inputs ]) - const Type *t = k->Value(this); // Get runtime Value set - assert(t != NULL, "value sanity"); - if (type_or_null(k) != t) { -#ifndef PRODUCT - // Do not record transformation or value construction on first visit - if (type_or_null(k) == NULL) { - inc_new_values(); - set_progress(); - } -#endif - set_type(k, t); - // If k is a TypeNode, capture any more-precise type permanently into Node - k->raise_bottom_type(t); - } - - if( t->singleton() && !k->is_Con() ) { - //k->destruct(); // Reclaim storage for recent node - NOT_PRODUCT( set_progress(); ) - return makecon(t); // Turn into a constant - } - - // Now check for Identities - Node *i = k->Identity(this); // Look for a nearby replacement - if( i != k ) { // Found? Return replacement! - //k->destruct(); // Reclaim storage for recent node - NOT_PRODUCT( set_progress(); ) - return i; - } - - // Try Global Value Numbering - i = hash_find_insert(k); // Found older value when i != NULL - if( i && i != k ) { // Hit? Return the old guy - NOT_PRODUCT( set_progress(); ) - return i; - } - - // Collect points-to information for escape analysys - ConnectionGraph *cgr = C->congraph(); - if (cgr != NULL) { - cgr->record_escape(k, this); - } - - // Return Idealized original - return k; + return transform_no_reclaim(n); } //------------------------------transform-------------------------------------- @@ -822,20 +747,23 @@ //============================================================================= //------------------------------PhaseIterGVN----------------------------------- // Initialize hash table to fresh and clean for +VerifyOpto -PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ) { +PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ), + _delay_transform(false) { } //------------------------------PhaseIterGVN----------------------------------- // Initialize with previous PhaseIterGVN info; used by PhaseCCP -PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn), - _worklist( igvn->_worklist ) +PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn), + _worklist( igvn->_worklist ), + _delay_transform(igvn->_delay_transform) { } //------------------------------PhaseIterGVN----------------------------------- // Initialize with previous PhaseGVN info from Parser -PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn), - _worklist(*C->for_igvn()) +PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn), + _worklist(*C->for_igvn()), + _delay_transform(false) { uint max; @@ -1031,6 +959,12 @@ //------------------------------transform-------------------------------------- // Non-recursive: idealize Node 'n' with respect to its inputs and its value Node *PhaseIterGVN::transform( Node *n ) { + if (_delay_transform) { + // Register the node but don't optimize for now + register_new_node_with_optimizer(n); + return n; + } + // If brand new node, make space in type array, and give it a type. ensure_type_or_null(n); if (type_or_null(n) == NULL) { @@ -1055,7 +989,9 @@ // Apply the Ideal call in a loop until it no longer applies Node *k = n; DEBUG_ONLY(dead_loop_check(k);) + DEBUG_ONLY(bool is_new = (k->outcnt() == 0);) Node *i = k->Ideal(this, /*can_reshape=*/true); + assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes"); #ifndef PRODUCT if( VerifyIterativeGVN ) verify_step(k); @@ -1093,7 +1029,9 @@ } DEBUG_ONLY(dead_loop_check(k);) // Try idealizing again + DEBUG_ONLY(is_new = (k->outcnt() == 0);) i = k->Ideal(this, /*can_reshape=*/true); + assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes"); #ifndef PRODUCT if( VerifyIterativeGVN ) verify_step(k); @@ -1312,7 +1250,7 @@ uint use_op = use->Opcode(); // If changed Cast input, check Phi users for simple cycles - if( use->is_ConstraintCast() || use->Opcode() == Op_CheckCastPP ) { + if( use->is_ConstraintCast() || use->is_CheckCastPP() ) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); if (u->is_Phi()) --- old/hotspot/src/share/vm/opto/phaseX.hpp 2009-08-01 04:14:21.977213107 +0100 +++ new/hotspot/src/share/vm/opto/phaseX.hpp 2009-08-01 04:14:21.898044364 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)phaseX.hpp 1.119 07/05/05 17:06:26 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -386,6 +386,10 @@ // Phase for iteratively performing local, pessimistic GVN-style optimizations. // and ideal transformations on the graph. class PhaseIterGVN : public PhaseGVN { + private: + bool _delay_transform; // When true simply register the node when calling transform + // instead of actually optimizing it + // Idealize old Node 'n' with respect to its inputs and its value virtual Node *transform_old( Node *a_node ); protected: @@ -442,6 +446,17 @@ void add_users_to_worklist0( Node *n ); void add_users_to_worklist ( Node *n ); + // Replace old node with new one. + void replace_node( Node *old, Node *nn ) { + add_users_to_worklist(old); + hash_delete(old); + subsume_node(old, nn); + } + + void set_delay_transform(bool delay) { + _delay_transform = delay; + } + #ifndef PRODUCT protected: // Sub-quadratic implementation of VerifyIterativeGVN. --- old/hotspot/src/share/vm/opto/postaloc.cpp 2009-08-01 04:14:22.851493144 +0100 +++ new/hotspot/src/share/vm/opto/postaloc.cpp 2009-08-01 04:14:22.772596890 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)postaloc.cpp 1.84 08/03/26 10:13:00 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ #endif } -//------------------------------may_be_copy_of_callee----------------------------- +//---------------------------may_be_copy_of_callee----------------------------- // Check to see if we can possibly be a copy of a callee-save value. bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const { // Short circuit if there are no callee save registers @@ -228,6 +228,20 @@ // Scan all registers to see if this value is around already for( uint reg = 0; reg < (uint)_max_reg; reg++ ) { + if (reg == (uint)nk_reg) { + // Found ourselves so check if there is only one user of this + // copy and keep on searching for a better copy if so. + bool ignore_self = true; + x = n->in(k); + DUIterator_Fast imax, i = x->fast_outs(imax); + Node* first = x->fast_out(i); i++; + while (i < imax && ignore_self) { + Node* use = x->fast_out(i); i++; + if (use != first) ignore_self = false; + } + if (ignore_self) continue; + } + Node *vv = value[reg]; if( !single ) { // Doubles check for aligned-adjacent pair if( (reg&1)==0 ) continue; // Wrong half of a pair --- old/hotspot/src/share/vm/opto/reg_split.cpp 2009-08-01 04:14:23.752443786 +0100 +++ new/hotspot/src/share/vm/opto/reg_split.cpp 2009-08-01 04:14:23.659775273 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)reg_split.cpp 1.81 07/05/05 17:06:27 JVM" #endif /* - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,6 +56,7 @@ // Bail rather than abort int ireg = def->ideal_reg(); if( ireg == 0 || ireg == Op_RegFlags ) { + assert(false, "attempted to spill a non-spillable item"); C->record_method_not_compilable("attempted to spill a non-spillable item"); return NULL; } @@ -530,6 +531,7 @@ // Initialize needs_phi and needs_split bool needs_phi = false; bool needs_split = false; + bool has_phi = false; // Walk the predecessor blocks to check inputs for that live range // Grab predecessor block header n1 = b->pred(1); @@ -572,29 +574,31 @@ u3 = u2; } } // End for all potential Phi inputs - - // If a phi is needed, check for it - if( needs_phi ) { - // check block for appropriate phinode & update edges - for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { - n1 = b->_nodes[insidx]; - // bail if this is not a phi - phi = n1->is_Phi() ? n1->as_Phi() : NULL; - if( phi == NULL ) { - // Keep track of index of first non-PhiNode instruction in block - non_phi = insidx; - // break out of the for loop as we have handled all phi nodes - break; - } - // must be looking at a phi - if( Find_id(n1) == lidxs.at(slidx) ) { - // found the necessary phi - needs_phi = false; - // initialize the Reaches entry for this LRG - Reachblock[slidx] = phi; - break; - } // end if found correct phi - } // end for all phi's + + // check block for appropriate phinode & update edges + for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { + n1 = b->_nodes[insidx]; + // bail if this is not a phi + phi = n1->is_Phi() ? n1->as_Phi() : NULL; + if( phi == NULL ) { + // Keep track of index of first non-PhiNode instruction in block + non_phi = insidx; + // break out of the for loop as we have handled all phi nodes + break; + } + // must be looking at a phi + if( Find_id(n1) == lidxs.at(slidx) ) { + // found the necessary phi + needs_phi = false; + has_phi = true; + // initialize the Reaches entry for this LRG + Reachblock[slidx] = phi; + break; + } // end if found correct phi + } // end for all phi's + + // If a phi is needed or exist, check for it + if( needs_phi || has_phi ) { // add new phinode if one not already found if( needs_phi ) { // create a new phi node and insert it into the block @@ -698,7 +702,8 @@ } } assert( u, "at least 1 valid input expected" ); - if( i >= cnt ) { // Didn't find 2+ unique inputs? + if( i >= cnt ) { // Found one unique input + assert(Find_id(n) == Find_id(u), "should be the same lrg"); n->replace_by(u); // Then replace with unique input n->disconnect_inputs(NULL); b->_nodes.remove(insidx); --- old/hotspot/src/share/vm/opto/runtime.cpp 2009-08-01 04:14:24.655652393 +0100 +++ new/hotspot/src/share/vm/opto/runtime.cpp 2009-08-01 04:14:24.568952200 +0100 @@ -47,6 +47,8 @@ address OptoRuntime::_multianewarray3_Java = NULL; address OptoRuntime::_multianewarray4_Java = NULL; address OptoRuntime::_multianewarray5_Java = NULL; +address OptoRuntime::_g1_wb_pre_Java = NULL; +address OptoRuntime::_g1_wb_post_Java = NULL; address OptoRuntime::_vtable_must_compile_Java = NULL; address OptoRuntime::_complete_monitor_locking_Java = NULL; address OptoRuntime::_rethrow_Java = NULL; @@ -92,6 +94,8 @@ gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true , false, false); gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true , false, false); gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true , false, false); + gen(env, _g1_wb_pre_Java , g1_wb_pre_Type , SharedRuntime::g1_wb_pre , 0 , false, false, false); + gen(env, _g1_wb_post_Java , g1_wb_post_Type , SharedRuntime::g1_wb_post , 0 , false, false, false); gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C , 0 , false, false, false); gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , false, true ); @@ -388,6 +392,33 @@ return multianewarray_Type(5); } +const TypeFunc *OptoRuntime::g1_wb_pre_Type() { + const Type **fields = TypeTuple::fields(2); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value + fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); + + // create result type (range) + fields = TypeTuple::fields(0); + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); + + return TypeFunc::make(domain, range); +} + +const TypeFunc *OptoRuntime::g1_wb_post_Type() { + + const Type **fields = TypeTuple::fields(2); + fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr + fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); + + // create result type (range) + fields = TypeTuple::fields(0); + const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); + + return TypeFunc::make(domain, range); +} + const TypeFunc *OptoRuntime::uncommon_trap_Type() { // create input type (domain) const Type **fields = TypeTuple::fields(1); --- old/hotspot/src/share/vm/opto/runtime.hpp 2009-08-01 04:14:25.630610006 +0100 +++ new/hotspot/src/share/vm/opto/runtime.hpp 2009-08-01 04:14:25.553907427 +0100 @@ -111,7 +111,9 @@ static address _multianewarray3_Java; static address _multianewarray4_Java; static address _multianewarray5_Java; - static address _vtable_must_compile_Java; + static address _g1_wb_pre_Java; + static address _g1_wb_post_Java; + static address _vtable_must_compile_Java; static address _complete_monitor_locking_Java; static address _rethrow_Java; @@ -143,7 +145,9 @@ static void multianewarray3_C(klassOopDesc* klass, int len1, int len2, int len3, JavaThread *thread); static void multianewarray4_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, JavaThread *thread); static void multianewarray5_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, int len5, JavaThread *thread); - + static void g1_wb_pre_C(oopDesc* orig, JavaThread* thread); + static void g1_wb_post_C(void* card_addr, JavaThread* thread); + public: // Slow-path Locking and Unlocking static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread); @@ -198,8 +202,10 @@ static address multianewarray3_Java() { return _multianewarray3_Java; } static address multianewarray4_Java() { return _multianewarray4_Java; } static address multianewarray5_Java() { return _multianewarray5_Java; } - static address vtable_must_compile_stub() { return _vtable_must_compile_Java; } - static address complete_monitor_locking_Java() { return _complete_monitor_locking_Java; } + static address g1_wb_pre_Java() { return _g1_wb_pre_Java; } + static address g1_wb_post_Java() { return _g1_wb_post_Java; } + static address vtable_must_compile_stub() { return _vtable_must_compile_Java; } + static address complete_monitor_locking_Java() { return _complete_monitor_locking_Java; } static address slow_arraycopy_Java() { return _slow_arraycopy_Java; } static address register_finalizer_Java() { return _register_finalizer_Java; } @@ -235,6 +241,8 @@ static const TypeFunc* multianewarray3_Type(); // multianewarray static const TypeFunc* multianewarray4_Type(); // multianewarray static const TypeFunc* multianewarray5_Type(); // multianewarray + static const TypeFunc* g1_wb_pre_Type(); + static const TypeFunc* g1_wb_post_Type(); static const TypeFunc* complete_monitor_enter_Type(); static const TypeFunc* complete_monitor_exit_Type(); static const TypeFunc* uncommon_trap_Type(); --- old/hotspot/src/share/vm/opto/subnode.cpp 2009-08-01 04:14:26.492890844 +0100 +++ new/hotspot/src/share/vm/opto/subnode.cpp 2009-08-01 04:14:26.403227964 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)subnode.cpp 1.162 07/09/28 10:33:21 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,10 +48,13 @@ return in(2)->in(2); } - // Convert "(X+Y) - Y" into X + // Convert "(X+Y) - Y" into X and "(X+Y) - X" into Y if( in(1)->Opcode() == Op_AddI ) { if( phase->eqv(in(1)->in(2),in(2)) ) return in(1)->in(1); + if (phase->eqv(in(1)->in(1),in(2))) + return in(1)->in(2); + // Also catch: "(X + Opaque2(Y)) - Y". In this case, 'Y' is a loop-varying // trip counter and X is likely to be loop-invariant (that's how O2 Nodes // are originally used, although the optimizer sometimes jiggers things). @@ -206,6 +209,14 @@ if( op1 == Op_AddI && op2 == Op_AddI && in1->in(2) == in2->in(2) ) return new (phase->C, 3) SubINode( in1->in(1), in2->in(1) ); + // Convert "(A+X) - (X+B)" into "A - B" + if( op1 == Op_AddI && op2 == Op_AddI && in1->in(2) == in2->in(1) ) + return new (phase->C, 3) SubINode( in1->in(1), in2->in(2) ); + + // Convert "(X+A) - (B+X)" into "A - B" + if( op1 == Op_AddI && op2 == Op_AddI && in1->in(1) == in2->in(2) ) + return new (phase->C, 3) SubINode( in1->in(2), in2->in(1) ); + // Convert "A-(B-C)" into (A+C)-B", since add is commutative and generally // nicer to optimize than subtract. if( op2 == Op_SubI && in2->outcnt() == 1) { @@ -617,6 +628,13 @@ const TypeOopPtr* p0 = r0->isa_oopptr(); const TypeOopPtr* p1 = r1->isa_oopptr(); if (p0 && p1) { + Node* in1 = in(1)->uncast(); + Node* in2 = in(2)->uncast(); + AllocateNode* alloc1 = AllocateNode::Ideal_allocation(in1, NULL); + AllocateNode* alloc2 = AllocateNode::Ideal_allocation(in2, NULL); + if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, NULL)) { + return TypeInt::CC_GT; // different pointers + } ciKlass* klass0 = p0->klass(); bool xklass0 = p0->klass_is_exact(); ciKlass* klass1 = p1->klass(); @@ -626,20 +644,31 @@ kps != 1 && // both or neither are klass pointers !klass0->is_interface() && // do not trust interfaces !klass1->is_interface()) { + bool unrelated_classes = false; // See if neither subclasses the other, or if the class on top - // is precise. In either of these cases, the compare must fail. + // is precise. In either of these cases, the compare is known + // to fail if at least one of the pointers is provably not null. if (klass0->equals(klass1) || // if types are unequal but klasses are !klass0->is_java_klass() || // types not part of Java language? !klass1->is_java_klass()) { // types not part of Java language? // Do nothing; we know nothing for imprecise types } else if (klass0->is_subtype_of(klass1)) { - // If klass1's type is PRECISE, then we can fail. - if (xklass1) return TypeInt::CC_GT; + // If klass1's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass1; } else if (klass1->is_subtype_of(klass0)) { - // If klass0's type is PRECISE, then we can fail. - if (xklass0) return TypeInt::CC_GT; + // If klass0's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass0; } else { // Neither subtypes the other - return TypeInt::CC_GT; // ...so always fail + unrelated_classes = true; + } + if (unrelated_classes) { + // The oops classes are known to be unrelated. If the joined PTRs of + // two oops is not Null and not Bottom, then we are sure that one + // of the two oops is non-null, and the comparison will always fail. + TypePtr::PTR jp = r0->join_ptr(r1->_ptr); + if (jp != TypePtr::Null && jp != TypePtr::BotPTR) { + return TypeInt::CC_GT; + } } } } @@ -674,7 +703,11 @@ // Now check for LoadKlass on left. Node* ldk1 = in(1); - if (ldk1->Opcode() != Op_LoadKlass) + if (ldk1->is_DecodeN()) { + ldk1 = ldk1->in(1); + if (ldk1->Opcode() != Op_LoadNKlass ) + return NULL; + } else if (ldk1->Opcode() != Op_LoadKlass ) return NULL; // Take apart the address of the LoadKlass: Node* adr1 = ldk1->in(MemNode::Address); @@ -695,7 +728,11 @@ // Check for a LoadKlass from primary supertype array. // Any nested loadklass from loadklass+con must be from the p.s. array. - if (ldk2->Opcode() != Op_LoadKlass) + if (ldk2->is_DecodeN()) { + // Keep ldk2 as DecodeN since it could be used in CmpP below. + if (ldk2->in(1)->Opcode() != Op_LoadNKlass ) + return NULL; + } else if (ldk2->Opcode() != Op_LoadKlass) return NULL; // Verify that we understand the situation @@ -732,6 +769,86 @@ } //============================================================================= +//------------------------------sub-------------------------------------------- +// Simplify an CmpN (compare 2 pointers) node, based on local information. +// If both inputs are constants, compare them. +const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const { + const TypePtr *r0 = t1->make_ptr(); // Handy access + const TypePtr *r1 = t2->make_ptr(); + + // Undefined inputs makes for an undefined result + if( TypePtr::above_centerline(r0->_ptr) || + TypePtr::above_centerline(r1->_ptr) ) + return Type::TOP; + + if (r0 == r1 && r0->singleton()) { + // Equal pointer constants (klasses, nulls, etc.) + return TypeInt::CC_EQ; + } + + // See if it is 2 unrelated classes. + const TypeOopPtr* p0 = r0->isa_oopptr(); + const TypeOopPtr* p1 = r1->isa_oopptr(); + if (p0 && p1) { + ciKlass* klass0 = p0->klass(); + bool xklass0 = p0->klass_is_exact(); + ciKlass* klass1 = p1->klass(); + bool xklass1 = p1->klass_is_exact(); + int kps = (p0->isa_klassptr()?1:0) + (p1->isa_klassptr()?1:0); + if (klass0 && klass1 && + kps != 1 && // both or neither are klass pointers + !klass0->is_interface() && // do not trust interfaces + !klass1->is_interface()) { + bool unrelated_classes = false; + // See if neither subclasses the other, or if the class on top + // is precise. In either of these cases, the compare is known + // to fail if at least one of the pointers is provably not null. + if (klass0->equals(klass1) || // if types are unequal but klasses are + !klass0->is_java_klass() || // types not part of Java language? + !klass1->is_java_klass()) { // types not part of Java language? + // Do nothing; we know nothing for imprecise types + } else if (klass0->is_subtype_of(klass1)) { + // If klass1's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass1; + } else if (klass1->is_subtype_of(klass0)) { + // If klass0's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass0; + } else { // Neither subtypes the other + unrelated_classes = true; + } + if (unrelated_classes) { + // The oops classes are known to be unrelated. If the joined PTRs of + // two oops is not Null and not Bottom, then we are sure that one + // of the two oops is non-null, and the comparison will always fail. + TypePtr::PTR jp = r0->join_ptr(r1->_ptr); + if (jp != TypePtr::Null && jp != TypePtr::BotPTR) { + return TypeInt::CC_GT; + } + } + } + } + + // Known constants can be compared exactly + // Null can be distinguished from any NotNull pointers + // Unknown inputs makes an unknown result + if( r0->singleton() ) { + intptr_t bits0 = r0->get_con(); + if( r1->singleton() ) + return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT; + return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC; + } else if( r1->singleton() ) { + intptr_t bits1 = r1->get_con(); + return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC; + } else + return TypeInt::CC; +} + +//------------------------------Ideal------------------------------------------ +Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) { + return NULL; +} + +//============================================================================= //------------------------------Value------------------------------------------ // Simplify an CmpF (compare 2 floats ) node, based on local information. // If both inputs are constants, compare them. --- old/hotspot/src/share/vm/opto/subnode.hpp 2009-08-01 04:14:27.477049587 +0100 +++ new/hotspot/src/share/vm/opto/subnode.hpp 2009-08-01 04:14:27.392217112 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)subnode.hpp 1.86 07/09/28 10:23:02 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -166,6 +166,16 @@ virtual const Type *sub( const Type *, const Type * ) const; }; +//------------------------------CmpNNode-------------------------------------- +// Compare 2 narrow oop values, returning condition codes (-1, 0 or 1). +class CmpNNode : public CmpNode { +public: + CmpNNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {} + virtual int Opcode() const; + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + virtual const Type *sub( const Type *, const Type * ) const; +}; + //------------------------------CmpLNode--------------------------------------- // Compare 2 long values, returning condition codes (-1, 0 or 1). class CmpLNode : public CmpNode { --- old/hotspot/src/share/vm/opto/superword.cpp 2009-08-01 04:14:28.339392381 +0100 +++ new/hotspot/src/share/vm/opto/superword.cpp 2009-08-01 04:14:28.245496442 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)superword.cpp 1.8 08/03/26 10:13:00 JVM" #endif /* - * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,6 +68,11 @@ Node *cl_exit = cl->loopexit(); if (cl_exit->in(0) != lpt->_head) return; + // Make sure the are no extra control users of the loop backedge + if (cl->back_control()->outcnt() != 1) { + return; + } + // Check for pre-loop ending with CountedLoopEnd(Bool(Cmp(x,Opaque1(limit)))) CountedLoopEndNode* pre_end = get_pre_loop_end(cl); if (pre_end == NULL) return; @@ -162,7 +167,8 @@ Node_List memops; for (int i = 0; i < _block.length(); i++) { Node* n = _block.at(i); - if (n->is_Mem() && in_bb(n)) { + if (n->is_Mem() && in_bb(n) && + is_java_primitive(n->as_Mem()->memory_type())) { int align = memory_alignment(n->as_Mem(), 0); if (align != bottom_align) { memops.push(n); @@ -573,7 +579,7 @@ int SuperWord::data_size(Node* s) { const Type* t = velt_type(s); BasicType bt = t->array_element_basic_type(); - int bsize = type2aelembytes[bt]; + int bsize = type2aelembytes(bt); assert(bsize != 0, "valid size"); return bsize; } @@ -1193,8 +1199,10 @@ Node *n = lp()->fast_out(i); if (in_bb(n) && (n->is_Phi() && n->bottom_type() == Type::MEMORY)) { Node* n_tail = n->in(LoopNode::LoopBackControl); - _mem_slice_head.push(n); - _mem_slice_tail.push(n_tail); + if (n_tail != n->in(LoopNode::EntryControl)) { + _mem_slice_head.push(n); + _mem_slice_tail.push(n_tail); + } } } @@ -1421,8 +1429,9 @@ //---------------------------container_type--------------------------- // Smallest type containing range of values const Type* SuperWord::container_type(const Type* t) { - if (t->isa_aryptr()) { - t = t->is_aryptr()->elem(); + const Type* tp = t->make_ptr(); + if (tp && tp->isa_aryptr()) { + t = tp->is_aryptr()->elem(); } if (t->basic_type() == T_INT) { if (t->higher_equal(TypeInt::BOOL)) return TypeInt::BOOL; @@ -1605,7 +1614,7 @@ // (e - lim) % V == 0 // Solving for lim: // (e - lim0 + N) % V == 0 - // N = [V - (e - lim0) % V] % V + // N = (V - (e - lim0)) % V // lim = lim0 - (V - (e - lim0)) % V int stride = iv_stride(); --- old/hotspot/src/share/vm/opto/type.cpp 2009-08-01 04:14:29.332519499 +0100 +++ new/hotspot/src/share/vm/opto/type.cpp 2009-08-01 04:14:29.230747629 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)type.cpp 1.257 07/10/04 14:36:00 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,7 @@ T_INT, // Int T_LONG, // Long T_VOID, // Half + T_NARROWOOP, // NarrowOop T_ILLEGAL, // Tuple T_ARRAY, // Array @@ -282,15 +283,6 @@ TypeRawPtr::BOTTOM = TypeRawPtr::make( TypePtr::BotPTR ); TypeRawPtr::NOTNULL= TypeRawPtr::make( TypePtr::NotNull ); - mreg2type[Op_Node] = Type::BOTTOM; - mreg2type[Op_Set ] = 0; - mreg2type[Op_RegI] = TypeInt::INT; - mreg2type[Op_RegP] = TypePtr::BOTTOM; - mreg2type[Op_RegF] = Type::FLOAT; - mreg2type[Op_RegD] = Type::DOUBLE; - mreg2type[Op_RegL] = TypeLong::LONG; - mreg2type[Op_RegFlags] = TypeInt::CC; - const Type **fmembar = TypeTuple::fields(0); TypeTuple::MEMBAR = TypeTuple::make(TypeFunc::Parms+0, fmembar); @@ -308,9 +300,32 @@ false, 0, oopDesc::klass_offset_in_bytes()); TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot); + TypeNarrowOop::NULL_PTR = TypeNarrowOop::make( TypePtr::NULL_PTR ); + TypeNarrowOop::BOTTOM = TypeNarrowOop::make( TypeInstPtr::BOTTOM ); + + mreg2type[Op_Node] = Type::BOTTOM; + mreg2type[Op_Set ] = 0; + mreg2type[Op_RegN] = TypeNarrowOop::BOTTOM; + mreg2type[Op_RegI] = TypeInt::INT; + mreg2type[Op_RegP] = TypePtr::BOTTOM; + mreg2type[Op_RegF] = Type::FLOAT; + mreg2type[Op_RegD] = Type::DOUBLE; + mreg2type[Op_RegL] = TypeLong::LONG; + mreg2type[Op_RegFlags] = TypeInt::CC; + TypeAryPtr::RANGE = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), current->env()->Object_klass(), false, arrayOopDesc::length_offset_in_bytes()); - // There is no shared klass for Object[]. See note in TypeAryPtr::klass(). - TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); + + TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); + +#ifdef _LP64 + if (UseCompressedOops) { + TypeAryPtr::OOPS = TypeAryPtr::NARROWOOPS; + } else +#endif + { + // There is no shared klass for Object[]. See note in TypeAryPtr::klass(). + TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); + } TypeAryPtr::BYTES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::BYTE ,TypeInt::POS), ciTypeArrayKlass::make(T_BYTE), true, Type::OffsetBot); TypeAryPtr::SHORTS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::SHORT ,TypeInt::POS), ciTypeArrayKlass::make(T_SHORT), true, Type::OffsetBot); TypeAryPtr::CHARS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::CHAR ,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, Type::OffsetBot); @@ -319,8 +334,10 @@ TypeAryPtr::FLOATS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT), true, Type::OffsetBot); TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true, Type::OffsetBot); + // Nobody should ask _array_body_type[T_NARROWOOP]. Use NULL as assert. + TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; TypeAryPtr::_array_body_type[T_OBJECT] = TypeAryPtr::OOPS; - TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays + TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays TypeAryPtr::_array_body_type[T_BYTE] = TypeAryPtr::BYTES; TypeAryPtr::_array_body_type[T_BOOLEAN] = TypeAryPtr::BYTES; // boolean[] is a byte array TypeAryPtr::_array_body_type[T_SHORT] = TypeAryPtr::SHORTS; @@ -348,6 +365,7 @@ longpair[1] = TypeLong::LONG; TypeTuple::LONG_PAIR = TypeTuple::make(2, longpair); + _const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM; _const_basic_type[T_BOOLEAN] = TypeInt::BOOL; _const_basic_type[T_CHAR] = TypeInt::CHAR; _const_basic_type[T_BYTE] = TypeInt::BYTE; @@ -362,6 +380,7 @@ _const_basic_type[T_ADDRESS] = TypeRawPtr::BOTTOM; // both interpreter return addresses & random raw ptrs _const_basic_type[T_CONFLICT]= Type::BOTTOM; // why not? + _zero_type[T_NARROWOOP] = TypeNarrowOop::NULL_PTR; _zero_type[T_BOOLEAN] = TypeInt::ZERO; // false == 0 _zero_type[T_CHAR] = TypeInt::ZERO; // '\0' == 0 _zero_type[T_BYTE] = TypeInt::ZERO; // 0x00 == 0 @@ -403,6 +422,10 @@ Type* t = (Type*)i._value; tdic->Insert(t,t); // New Type, insert into Type table } + +#ifdef ASSERT + verify_lastype(); +#endif } //------------------------------hashcons--------------------------------------- @@ -470,7 +493,13 @@ // Compute the MEET of two types. NOT virtual. It enforces that meet is // commutative and the lattice is symmetric. const Type *Type::meet( const Type *t ) const { + if (isa_narrowoop() && t->isa_narrowoop()) { + const Type* result = make_ptr()->meet(t->make_ptr()); + return result->make_narrowoop(); + } + const Type *mt = xmeet(t); + if (isa_narrowoop() || t->isa_narrowoop()) return mt; #ifdef ASSERT assert( mt == t->xmeet(this), "meet not commutative" ); const Type* dual_join = mt->_dual; @@ -488,23 +517,8 @@ bool t_interface = t_inst->klass()->is_interface(); interface_vs_oop = this_interface ^ t_interface; } - const Type *tdual = t->_dual; - const Type *thisdual = _dual; - // strip out instances - if (t2t->isa_oopptr() != NULL) { - t2t = t2t->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE); - } - if (t2this->isa_oopptr() != NULL) { - t2this = t2this->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE); - } - if (tdual->isa_oopptr() != NULL) { - tdual = tdual->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE); - } - if (thisdual->isa_oopptr() != NULL) { - thisdual = thisdual->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE); - } - if( !interface_vs_oop && (t2t != tdual || t2this != thisdual) ) { + if( !interface_vs_oop && (t2t != t->_dual || t2this != _dual) ) { tty->print_cr("=== Meet Not Symmetric ==="); tty->print("t = "); t->dump(); tty->cr(); tty->print("this= "); dump(); tty->cr(); @@ -559,6 +573,9 @@ case AryPtr: return t->xmeet(this); + case NarrowOop: + return t->xmeet(this); + case Bad: // Type check default: // Bogus type not in lattice typerr(t); @@ -616,7 +633,8 @@ Bad, // Int - handled in v-call Bad, // Long - handled in v-call Half, // Half - + Bad, // NarrowOop - handled in v-call + Bad, // Tuple - handled in v-call Bad, // Array - handled in v-call @@ -671,14 +689,17 @@ ResourceMark rm; Dict d(cmpkey,hashkey); // Stop recursive type dumping dump2(d,1, st); + if (is_ptr_to_narrowoop()) { + st->print(" [narrow]"); + } } //------------------------------data------------------------------------------- const char * const Type::msg[Type::lastype] = { - "bad","control","top","int:","long:","half", - "tuple:", "aryptr", - "anyptr:", "rawptr:", "java:", "inst:", "ary:", "klass:", - "func", "abIO", "return_address", "memory", + "bad","control","top","int:","long:","half", "narrowoop:", + "tuple:", "aryptr", + "anyptr:", "rawptr:", "java:", "inst:", "ary:", "klass:", + "func", "abIO", "return_address", "memory", "float_top", "ftcon:", "float", "double_top", "dblcon:", "double", "bottom" @@ -738,7 +759,7 @@ //------------------------------isa_oop_ptr------------------------------------ // Return true if type is an oop pointer type. False for raw pointers. static char isa_oop_ptr_tbl[Type::lastype] = { - 0,0,0,0,0,0,0/*tuple*/, 0/*ary*/, + 0,0,0,0,0,0,0/*narrowoop*/,0/*tuple*/, 0/*ary*/, 0/*anyptr*/,0/*rawptr*/,1/*OopPtr*/,1/*InstPtr*/,1/*AryPtr*/,1/*KlassPtr*/, 0/*func*/,0,0/*return_address*/,0, /*floats*/0,0,0, /*doubles*/0,0,0, @@ -786,6 +807,7 @@ case InstPtr: case KlassPtr: case AryPtr: + case NarrowOop: case Int: case Long: case DoubleTop: @@ -901,6 +923,7 @@ case InstPtr: case KlassPtr: case AryPtr: + case NarrowOop: case Int: case Long: case FloatTop: @@ -1047,6 +1070,7 @@ case InstPtr: case KlassPtr: case AryPtr: + case NarrowOop: case Long: case FloatTop: case FloatCon: @@ -1288,6 +1312,7 @@ case InstPtr: case KlassPtr: case AryPtr: + case NarrowOop: case Int: case FloatTop: case FloatCon: @@ -1721,6 +1746,9 @@ //------------------------------make------------------------------------------- const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) { + if (UseCompressedOops && elem->isa_oopptr()) { + elem = elem->make_narrowoop(); + } size = normalize_array_size(size); return (TypeAry*)(new TypeAry(elem,size))->hashcons(); } @@ -1803,15 +1831,30 @@ // In such cases, an array built on this ary must have no subclasses. if (_elem == BOTTOM) return false; // general array not exact if (_elem == TOP ) return false; // inverted general array not exact - const TypeOopPtr* toop = _elem->isa_oopptr(); + const TypeOopPtr* toop = NULL; + if (UseCompressedOops && _elem->isa_narrowoop()) { + toop = _elem->make_ptr()->isa_oopptr(); + } else { + toop = _elem->isa_oopptr(); + } if (!toop) return true; // a primitive type, like int ciKlass* tklass = toop->klass(); if (tklass == NULL) return false; // unloaded class if (!tklass->is_loaded()) return false; // unloaded class - const TypeInstPtr* tinst = _elem->isa_instptr(); - if (tinst) return tklass->as_instance_klass()->is_final(); - const TypeAryPtr* tap = _elem->isa_aryptr(); - if (tap) return tap->ary()->ary_must_be_exact(); + const TypeInstPtr* tinst; + if (_elem->isa_narrowoop()) + tinst = _elem->make_ptr()->isa_instptr(); + else + tinst = _elem->isa_instptr(); + if (tinst) + return tklass->as_instance_klass()->is_final(); + const TypeAryPtr* tap; + if (_elem->isa_narrowoop()) + tap = _elem->make_ptr()->isa_aryptr(); + else + tap = _elem->isa_aryptr(); + if (tap) + return tap->ary()->ary_must_be_exact(); return false; } @@ -1867,6 +1910,7 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case NarrowOop: case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: @@ -1915,14 +1959,25 @@ return new TypePtr( AnyPtr, dual_ptr(), dual_offset() ); } +//------------------------------xadd_offset------------------------------------ +int TypePtr::xadd_offset( intptr_t offset ) const { + // Adding to 'TOP' offset? Return 'TOP'! + if( _offset == OffsetTop || offset == OffsetTop ) return OffsetTop; + // Adding to 'BOTTOM' offset? Return 'BOTTOM'! + if( _offset == OffsetBot || offset == OffsetBot ) return OffsetBot; + // Addition overflows or "accidentally" equals to OffsetTop? Return 'BOTTOM'! + offset += (intptr_t)_offset; + if (offset != (int)offset || offset == OffsetTop) return OffsetBot; + + // assert( _offset >= 0 && _offset+offset >= 0, "" ); + // It is possible to construct a negative offset during PhaseCCP + + return (int)offset; // Sum valid offsets +} + //------------------------------add_offset------------------------------------- -const TypePtr *TypePtr::add_offset( int offset ) const { - if( offset == 0 ) return this; // No change - if( _offset == OffsetBot ) return this; - if( offset == OffsetBot ) offset = OffsetBot; - else if( _offset == OffsetTop || offset == OffsetTop ) offset = OffsetTop; - else offset += _offset; - return make( AnyPtr, _ptr, offset ); +const TypePtr *TypePtr::add_offset( intptr_t offset ) const { + return make( AnyPtr, _ptr, xadd_offset(offset) ); } //------------------------------eq--------------------------------------------- @@ -2055,7 +2110,7 @@ } //------------------------------add_offset------------------------------------- -const TypePtr *TypeRawPtr::add_offset( int offset ) const { +const TypePtr *TypeRawPtr::add_offset( intptr_t offset ) const { if( offset == OffsetTop ) return BOTTOM; // Undefined offset-> undefined pointer if( offset == OffsetBot ) return BOTTOM; // Unknown offset-> unknown pointer if( offset == 0 ) return this; // No change @@ -2099,6 +2154,67 @@ // Convenience common pre-built type. const TypeOopPtr *TypeOopPtr::BOTTOM; +//------------------------------TypeOopPtr------------------------------------- +TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ) + : TypePtr(t, ptr, offset), + _const_oop(o), _klass(k), + _klass_is_exact(xk), + _is_ptr_to_narrowoop(false), + _instance_id(instance_id) { +#ifdef _LP64 + if (UseCompressedOops && _offset != 0) { + if (klass() == NULL) { + assert(this->isa_aryptr(), "only arrays without klass"); + _is_ptr_to_narrowoop = true; + } else if (_offset == oopDesc::klass_offset_in_bytes()) { + _is_ptr_to_narrowoop = true; + } else if (this->isa_aryptr()) { + _is_ptr_to_narrowoop = (klass()->is_obj_array_klass() && + _offset != arrayOopDesc::length_offset_in_bytes()); + } else if (klass() == ciEnv::current()->Class_klass() && + (_offset == java_lang_Class::klass_offset_in_bytes() || + _offset == java_lang_Class::array_klass_offset_in_bytes())) { + // Special hidden fields from the Class. + assert(this->isa_instptr(), "must be an instance ptr."); + _is_ptr_to_narrowoop = true; + } else if (klass()->is_instance_klass()) { + ciInstanceKlass* ik = klass()->as_instance_klass(); + ciField* field = NULL; + if (this->isa_klassptr()) { + // Perm objects don't use compressed references, except for + // static fields which are currently compressed. + field = ik->get_field_by_offset(_offset, true); + if (field != NULL) { + BasicType basic_elem_type = field->layout_type(); + _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || + basic_elem_type == T_ARRAY); + } + } else if (_offset == OffsetBot || _offset == OffsetTop) { + // unsafe access + _is_ptr_to_narrowoop = true; + } else { // exclude unsafe ops + assert(this->isa_instptr(), "must be an instance ptr."); + // Field which contains a compressed oop references. + field = ik->get_field_by_offset(_offset, false); + if (field != NULL) { + BasicType basic_elem_type = field->layout_type(); + _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || + basic_elem_type == T_ARRAY); + } else if (klass()->equals(ciEnv::current()->Object_klass())) { + // Compile::find_alias_type() cast exactness on all types to verify + // that it does not affect alias type. + _is_ptr_to_narrowoop = true; + } else { + // Type for the copy start in LibraryCallKit::inline_native_clone(). + assert(!klass_is_exact(), "only non-exact klass"); + _is_ptr_to_narrowoop = true; + } + } + } + } +#endif +} + //------------------------------make------------------------------------------- const TypeOopPtr *TypeOopPtr::make(PTR ptr, int offset) { @@ -2106,7 +2222,7 @@ ciKlass* k = ciKlassKlass::make(); bool xk = false; ciObject* o = NULL; - return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, UNKNOWN_INSTANCE))->hashcons(); + return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, InstanceBot))->hashcons(); } @@ -2117,9 +2233,9 @@ return make(ptr, _offset); } -//-----------------------------cast_to_instance------------------------------- -const TypeOopPtr *TypeOopPtr::cast_to_instance(int instance_id) const { - // There are no instances of a general oop. +//-----------------------------cast_to_instance_id---------------------------- +const TypeOopPtr *TypeOopPtr::cast_to_instance_id(int instance_id) const { + // There are no instances of a general oop. // Return self unchanged. return this; } @@ -2162,6 +2278,7 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case NarrowOop: case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: @@ -2212,7 +2329,7 @@ const Type *TypeOopPtr::xdual() const { assert(klass() == ciKlassKlass::make(), "no klasses here"); assert(const_oop() == NULL, "no constants here"); - return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance() ); + return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id() ); } //--------------------------make_from_klass_common----------------------------- @@ -2357,6 +2474,8 @@ const Type* ft = join(kills); const TypeInstPtr* ftip = ft->isa_instptr(); const TypeInstPtr* ktip = kills->isa_instptr(); + const TypeKlassPtr* ftkp = ft->isa_klassptr(); + const TypeKlassPtr* ktkp = kills->isa_klassptr(); if (ft->empty()) { // Check for evil case of 'this' being a class and 'kills' expecting an @@ -2370,6 +2489,8 @@ // uplift the type. if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface()) return kills; // Uplift to interface + if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface()) + return kills; // Uplift to interface return Type::TOP; // Canonical empty value } @@ -2385,6 +2506,12 @@ // Happens in a CTW of rt.jar, 320-341, no extra flags return ktip->cast_to_ptr_type(ftip->ptr()); } + if (ftkp != NULL && ktkp != NULL && + ftkp->is_loaded() && ftkp->klass()->is_interface() && + ktkp->is_loaded() && !ktkp->klass()->is_interface()) { + // Happens in a CTW of rt.jar, 320-341, no extra flags + return ktkp->cast_to_ptr_type(ftkp->ptr()); + } return ft; } @@ -2426,7 +2553,9 @@ case 0: break; default: st->print("+%d",_offset); break; } - if (_instance_id != UNKNOWN_INSTANCE) + if (_instance_id == InstanceTop) + st->print(",iid=top"); + else if (_instance_id != InstanceBot) st->print(",iid=%d",_instance_id); } #endif @@ -2440,34 +2569,29 @@ return (_offset == 0) && !below_centerline(_ptr); } -//------------------------------xadd_offset------------------------------------ -int TypeOopPtr::xadd_offset( int offset ) const { - // Adding to 'TOP' offset? Return 'TOP'! - if( _offset == OffsetTop || offset == OffsetTop ) return OffsetTop; - // Adding to 'BOTTOM' offset? Return 'BOTTOM'! - if( _offset == OffsetBot || offset == OffsetBot ) return OffsetBot; - - // assert( _offset >= 0 && _offset+offset >= 0, "" ); - // It is possible to construct a negative offset during PhaseCCP - - return _offset+offset; // Sum valid offsets -} - //------------------------------add_offset------------------------------------- -const TypePtr *TypeOopPtr::add_offset( int offset ) const { +const TypePtr *TypeOopPtr::add_offset( intptr_t offset ) const { return make( _ptr, xadd_offset(offset) ); } -int TypeOopPtr::meet_instance(int iid) const { - if (iid == 0) { - return (_instance_id < 0) ? _instance_id : UNKNOWN_INSTANCE; - } else if (_instance_id == UNKNOWN_INSTANCE) { - return (iid < 0) ? iid : UNKNOWN_INSTANCE; - } else { - return (_instance_id == iid) ? iid : UNKNOWN_INSTANCE; - } +//------------------------------meet_instance_id-------------------------------- +int TypeOopPtr::meet_instance_id( int instance_id ) const { + // Either is 'TOP' instance? Return the other instance! + if( _instance_id == InstanceTop ) return instance_id; + if( instance_id == InstanceTop ) return _instance_id; + // If either is different, return 'BOTTOM' instance + if( _instance_id != instance_id ) return InstanceBot; + return _instance_id; +} + +//------------------------------dual_instance_id-------------------------------- +int TypeOopPtr::dual_instance_id( ) const { + if( _instance_id == InstanceTop ) return InstanceBot; // Map TOP into BOTTOM + if( _instance_id == InstanceBot ) return InstanceTop; // Map BOTTOM into TOP + return _instance_id; // Map everything else into self } + //============================================================================= // Convenience common pre-built types. const TypeInstPtr *TypeInstPtr::NOTNULL; @@ -2499,8 +2623,7 @@ // Ptr is never Null assert( ptr != Null, "NULL pointers are not typed" ); - if (instance_id != UNKNOWN_INSTANCE) - xk = true; // instances are always exactly typed + assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = false; if (ptr == Constant) { // Note: This case includes meta-object constants, such as methods. @@ -2524,7 +2647,7 @@ if( ptr == _ptr ) return this; // Reconstruct _sig info here since not a problem with later lazy // construction, _sig will show up on demand. - return make(ptr, klass(), klass_is_exact(), const_oop(), _offset); + return make(ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id); } @@ -2539,12 +2662,10 @@ return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id); } -//-----------------------------cast_to_instance------------------------------- -const TypeOopPtr *TypeInstPtr::cast_to_instance(int instance_id) const { - if( instance_id == _instance_id) return this; - bool exact = (instance_id == UNKNOWN_INSTANCE) ? _klass_is_exact : true; - - return make(ptr(), klass(), exact, const_oop(), _offset, instance_id); +//-----------------------------cast_to_instance_id---------------------------- +const TypeOopPtr *TypeInstPtr::cast_to_instance_id(int instance_id) const { + if( instance_id == _instance_id ) return this; + return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id); } //------------------------------xmeet_unloaded--------------------------------- @@ -2610,6 +2731,7 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case NarrowOop: case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: @@ -2624,16 +2746,17 @@ const TypeAryPtr *tp = t->is_aryptr(); int offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); - int iid = meet_instance(tp->instance_id()); + int instance_id = meet_instance_id(tp->instance_id()); switch (ptr) { case TopPTR: case AnyNull: // Fall 'down' to dual of object klass if (klass()->equals(ciEnv::current()->Object_klass())) { - return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, iid); + return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id); } else { // cannot subclass, so the meet has to fall badly below the centerline ptr = NotNull; - return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, iid); + instance_id = InstanceBot; + return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id); } case Constant: case NotNull: @@ -2644,14 +2767,15 @@ // then we can subclass in the Java class heirarchy. if (klass()->equals(ciEnv::current()->Object_klass())) { // that is, tp's array type is a subtype of my klass - return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, iid); + return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id); } } // The other case cannot happen, since I cannot be a subtype of an array. // The meet falls down to Object class below centerline. if( ptr == Constant ) ptr = NotNull; - return make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, iid ); + instance_id = InstanceBot; + return make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id ); default: typerr(t); } } @@ -2662,10 +2786,12 @@ int offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); switch (tp->ptr()) { - case TopPTR: - case AnyNull: + case TopPTR: + case AnyNull: { + int instance_id = meet_instance_id(InstanceTop); return make(ptr, klass(), klass_is_exact(), - (ptr == Constant ? const_oop() : NULL), offset); + (ptr == Constant ? const_oop() : NULL), offset, instance_id); + } case NotNull: case BotPTR: return TypeOopPtr::make(ptr, offset); @@ -2681,10 +2807,13 @@ switch (tp->ptr()) { case Null: if( ptr == Null ) return TypePtr::make( AnyPtr, ptr, offset ); - case TopPTR: - case AnyNull: + // else fall through to AnyNull + case TopPTR: + case AnyNull: { + int instance_id = meet_instance_id(InstanceTop); return make( ptr, klass(), klass_is_exact(), - (ptr == Constant ? const_oop() : NULL), offset ); + (ptr == Constant ? const_oop() : NULL), offset, instance_id); + } case NotNull: case BotPTR: return TypePtr::make( AnyPtr, ptr, offset ); @@ -2713,7 +2842,7 @@ const TypeInstPtr *tinst = t->is_instptr(); int off = meet_offset( tinst->offset() ); PTR ptr = meet_ptr( tinst->ptr() ); - int instance_id = meet_instance(tinst->instance_id()); + int instance_id = meet_instance_id(tinst->instance_id()); // Check for easy case; klasses are equal (and perhaps not loaded!) // If we have constants, then we created oops so classes are loaded @@ -2776,13 +2905,14 @@ xk = above_centerline(ptr) ? tinst_xk : false; // Watch out for Constant vs. AnyNull interface. if (ptr == Constant) ptr = NotNull; // forget it was a constant + instance_id = InstanceBot; } ciObject* o = NULL; // the Constant value, if any if (ptr == Constant) { // Find out which constant. o = (this_klass == klass()) ? const_oop() : tinst->const_oop(); } - return make( ptr, k, xk, o, off ); + return make( ptr, k, xk, o, off, instance_id ); } // Either oop vs oop or interface vs interface or interface vs Object @@ -2866,10 +2996,11 @@ // class hierarchy - which means we have to fall to at least NotNull. if( ptr == TopPTR || ptr == AnyNull || ptr == Constant ) ptr = NotNull; + instance_id = InstanceBot; // Now we find the LCA of Java classes ciKlass* k = this_klass->least_common_ancestor(tinst_klass); - return make( ptr, k, false, NULL, off ); + return make( ptr, k, false, NULL, off, instance_id ); } // End of case InstPtr case KlassPtr: @@ -2896,7 +3027,7 @@ // Dual: do NOT dual on klasses. This means I do NOT understand the Java // inheritence mechanism. const Type *TypeInstPtr::xdual() const { - return new TypeInstPtr( dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance() ); + return new TypeInstPtr( dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id() ); } //------------------------------eq--------------------------------------------- @@ -2948,13 +3079,15 @@ } st->print(" *"); - if (_instance_id != UNKNOWN_INSTANCE) + if (_instance_id == InstanceTop) + st->print(",iid=top"); + else if (_instance_id != InstanceBot) st->print(",iid=%d",_instance_id); } #endif //------------------------------add_offset------------------------------------- -const TypePtr *TypeInstPtr::add_offset( int offset ) const { +const TypePtr *TypeInstPtr::add_offset( intptr_t offset ) const { return make( _ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id ); } @@ -2962,6 +3095,7 @@ // Convenience common pre-built types. const TypeAryPtr *TypeAryPtr::RANGE; const TypeAryPtr *TypeAryPtr::OOPS; +const TypeAryPtr *TypeAryPtr::NARROWOOPS; const TypeAryPtr *TypeAryPtr::BYTES; const TypeAryPtr *TypeAryPtr::SHORTS; const TypeAryPtr *TypeAryPtr::CHARS; @@ -2975,8 +3109,7 @@ assert(!(k == NULL && ary->_elem->isa_int()), "integral arrays must be pre-equipped with a class"); if (!xk) xk = ary->ary_must_be_exact(); - if (instance_id != UNKNOWN_INSTANCE) - xk = true; // instances are always exactly typed + assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id))->hashcons(); } @@ -2987,8 +3120,7 @@ "integral arrays must be pre-equipped with a class"); assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" ); if (!xk) xk = (o != NULL) || ary->ary_must_be_exact(); - if (instance_id != UNKNOWN_INSTANCE) - xk = true; // instances are always exactly typed + assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed"); if (!UseExactTypes) xk = (ptr == Constant); return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id))->hashcons(); } @@ -2996,7 +3128,7 @@ //------------------------------cast_to_ptr_type------------------------------- const Type *TypeAryPtr::cast_to_ptr_type(PTR ptr) const { if( ptr == _ptr ) return this; - return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset); + return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id); } @@ -3008,11 +3140,10 @@ return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id); } -//-----------------------------cast_to_instance------------------------------- -const TypeOopPtr *TypeAryPtr::cast_to_instance(int instance_id) const { - if( instance_id == _instance_id) return this; - bool exact = (instance_id == UNKNOWN_INSTANCE) ? _klass_is_exact : true; - return make(ptr(), const_oop(), _ary, klass(), exact, _offset, instance_id); +//-----------------------------cast_to_instance_id---------------------------- +const TypeOopPtr *TypeAryPtr::cast_to_instance_id(int instance_id) const { + if( instance_id == _instance_id ) return this; + return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id); } //-----------------------------narrow_size_type------------------------------- @@ -3024,6 +3155,9 @@ jint res = cache; if (res == 0) { switch (etype) { + case T_NARROWOOP: + etype = T_OBJECT; + break; case T_CONFLICT: case T_ILLEGAL: case T_VOID: @@ -3036,17 +3170,18 @@ // Narrow the given size type to the index range for the given array base type. // Return NULL if the resulting int type becomes empty. -const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size, BasicType elem) { +const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const { jint hi = size->_hi; jint lo = size->_lo; jint min_lo = 0; - jint max_hi = max_array_length(elem); + jint max_hi = max_array_length(elem()->basic_type()); //if (index_not_size) --max_hi; // type of a valid array index, FTR bool chg = false; if (lo < min_lo) { lo = min_lo; chg = true; } if (hi > max_hi) { hi = max_hi; chg = true; } + // Negative length arrays will produce weird intermediate dead fath-path code if (lo > hi) - return NULL; + return TypeInt::ZERO; if (!chg) return size; return TypeInt::make(lo, hi, Type::WidenMin); @@ -3055,12 +3190,10 @@ //-------------------------------cast_to_size---------------------------------- const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const { assert(new_size != NULL, ""); - new_size = narrow_size_type(new_size, elem()->basic_type()); - if (new_size == NULL) // Negative length arrays will produce weird - new_size = TypeInt::ZERO; // intermediate dead fast-path goo + new_size = narrow_size_type(new_size); if (new_size == size()) return this; const TypeAry* new_ary = TypeAry::make(elem(), new_size); - return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset); + return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id); } @@ -3096,6 +3229,7 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case NarrowOop: case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: @@ -3110,9 +3244,12 @@ int offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); switch (tp->ptr()) { - case TopPTR: - case AnyNull: - return make(ptr, (ptr == Constant ? const_oop() : NULL), _ary, _klass, _klass_is_exact, offset); + case TopPTR: + case AnyNull: { + int instance_id = meet_instance_id(InstanceTop); + return make(ptr, (ptr == Constant ? const_oop() : NULL), + _ary, _klass, _klass_is_exact, offset, instance_id); + } case BotPTR: case NotNull: return TypeOopPtr::make(ptr, offset); @@ -3133,8 +3270,12 @@ return TypePtr::make(AnyPtr, ptr, offset); case Null: if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset); - case AnyNull: - return make( ptr, (ptr == Constant ? const_oop() : NULL), _ary, _klass, _klass_is_exact, offset ); + // else fall through to AnyNull + case AnyNull: { + int instance_id = meet_instance_id(InstanceTop); + return make( ptr, (ptr == Constant ? const_oop() : NULL), + _ary, _klass, _klass_is_exact, offset, instance_id); + } default: ShouldNotReachHere(); } } @@ -3146,7 +3287,7 @@ int off = meet_offset(tap->offset()); const TypeAry *tary = _ary->meet(tap->_ary)->is_ary(); PTR ptr = meet_ptr(tap->ptr()); - int iid = meet_instance(tap->instance_id()); + int instance_id = meet_instance_id(tap->instance_id()); ciKlass* lazy_klass = NULL; if (tary->_elem->isa_int()) { // Integral array element types have irrelevant lattice relations. @@ -3158,6 +3299,7 @@ } else { // Something like byte[int+] meets char[int+]. // This must fall to bottom, not (int[-128..65535])[int+]. + instance_id = InstanceBot; tary = TypeAry::make(Type::BOTTOM, tary->_size); } } @@ -3167,19 +3309,20 @@ case TopPTR: // Compute new klass on demand, do not use tap->_klass xk = (tap->_klass_is_exact | this->_klass_is_exact); - return make( ptr, const_oop(), tary, lazy_klass, xk, off ); + return make( ptr, const_oop(), tary, lazy_klass, xk, off, instance_id ); case Constant: { ciObject* o = const_oop(); if( _ptr == Constant ) { if( tap->const_oop() != NULL && !o->equals(tap->const_oop()) ) { ptr = NotNull; o = NULL; + instance_id = InstanceBot; } } else if( above_centerline(_ptr) ) { o = tap->const_oop(); } xk = true; - return TypeAryPtr::make( ptr, o, tary, tap->_klass, xk, off ); + return TypeAryPtr::make( ptr, o, tary, tap->_klass, xk, off, instance_id ); } case NotNull: case BotPTR: @@ -3190,7 +3333,7 @@ xk = this->_klass_is_exact; else xk = (tap->_klass_is_exact & this->_klass_is_exact) && (klass() == tap->klass()); // Only precise for identical arrays - return TypeAryPtr::make( ptr, NULL, tary, lazy_klass, xk, off, iid ); + return TypeAryPtr::make( ptr, NULL, tary, lazy_klass, xk, off, instance_id ); default: ShouldNotReachHere(); } } @@ -3200,16 +3343,17 @@ const TypeInstPtr *tp = t->is_instptr(); int offset = meet_offset(tp->offset()); PTR ptr = meet_ptr(tp->ptr()); - int iid = meet_instance(tp->instance_id()); + int instance_id = meet_instance_id(tp->instance_id()); switch (ptr) { case TopPTR: case AnyNull: // Fall 'down' to dual of object klass if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) { - return TypeAryPtr::make( ptr, _ary, _klass, _klass_is_exact, offset, iid ); + return TypeAryPtr::make( ptr, _ary, _klass, _klass_is_exact, offset, instance_id ); } else { // cannot subclass, so the meet has to fall badly below the centerline ptr = NotNull; - return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, iid); + instance_id = InstanceBot; + return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id); } case Constant: case NotNull: @@ -3220,14 +3364,15 @@ // then we can subclass in the Java class heirarchy. if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) { // that is, my array type is a subtype of 'tp' klass - return make( ptr, _ary, _klass, _klass_is_exact, offset, iid ); + return make( ptr, _ary, _klass, _klass_is_exact, offset, instance_id ); } } // The other case cannot happen, since t cannot be a subtype of an array. // The meet falls down to Object class below centerline. if( ptr == Constant ) ptr = NotNull; - return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, iid); + instance_id = InstanceBot; + return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id); default: typerr(t); } } @@ -3242,7 +3387,7 @@ //------------------------------xdual------------------------------------------ // Dual: compute field-by-field dual const Type *TypeAryPtr::xdual() const { - return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance() ); + return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id() ); } //------------------------------dump2------------------------------------------ @@ -3266,14 +3411,23 @@ break; } - st->print("*"); - if (_instance_id != UNKNOWN_INSTANCE) + if( _offset != 0 ) { + int header_size = objArrayOopDesc::header_size() * wordSize; + if( _offset == OffsetTop ) st->print("+undefined"); + else if( _offset == OffsetBot ) st->print("+any"); + else if( _offset < header_size ) st->print("+%d", _offset); + else { + BasicType basic_elem_type = elem()->basic_type(); + int array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); + int elem_size = type2aelembytes(basic_elem_type); + st->print("[%d]", (_offset - array_base)/elem_size); + } + } + st->print(" *"); + if (_instance_id == InstanceTop) + st->print(",iid=top"); + else if (_instance_id != InstanceBot) st->print(",iid=%d",_instance_id); - if( !_offset ) return; - if( _offset == OffsetTop ) st->print("+undefined"); - else if( _offset == OffsetBot ) st->print("+any"); - else if( _offset < 12 ) st->print("+%d",_offset); - else st->print("[%d]", (_offset-12)/4 ); } #endif @@ -3283,12 +3437,130 @@ } //------------------------------add_offset------------------------------------- -const TypePtr *TypeAryPtr::add_offset( int offset ) const { +const TypePtr *TypeAryPtr::add_offset( intptr_t offset ) const { return make( _ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id ); } //============================================================================= +const TypeNarrowOop *TypeNarrowOop::BOTTOM; +const TypeNarrowOop *TypeNarrowOop::NULL_PTR; + + +const TypeNarrowOop* TypeNarrowOop::make(const TypePtr* type) { + return (const TypeNarrowOop*)(new TypeNarrowOop(type))->hashcons(); +} + +//------------------------------hash------------------------------------------- +// Type-specific hashing function. +int TypeNarrowOop::hash(void) const { + return _ooptype->hash() + 7; +} + + +bool TypeNarrowOop::eq( const Type *t ) const { + const TypeNarrowOop* tc = t->isa_narrowoop(); + if (tc != NULL) { + if (_ooptype->base() != tc->_ooptype->base()) { + return false; + } + return tc->_ooptype->eq(_ooptype); + } + return false; +} + +bool TypeNarrowOop::singleton(void) const { // TRUE if type is a singleton + return _ooptype->singleton(); +} + +bool TypeNarrowOop::empty(void) const { + return _ooptype->empty(); +} + +//------------------------------xmeet------------------------------------------ +// Compute the MEET of two types. It returns a new Type object. +const Type *TypeNarrowOop::xmeet( const Type *t ) const { + // Perform a fast test for common case; meeting the same types together. + if( this == t ) return this; // Meeting same type-rep? + + + // Current "this->_base" is OopPtr + switch (t->base()) { // switch on original type + + case Int: // Mixing ints & oops happens when javac + case Long: // reuses local variables + case FloatTop: + case FloatCon: + case FloatBot: + case DoubleTop: + case DoubleCon: + case DoubleBot: + case AnyPtr: + case RawPtr: + case OopPtr: + case InstPtr: + case KlassPtr: + case AryPtr: + + case Bottom: // Ye Olde Default + return Type::BOTTOM; + case Top: + return this; + + case NarrowOop: { + const Type* result = _ooptype->xmeet(t->make_ptr()); + if (result->isa_ptr()) { + return TypeNarrowOop::make(result->is_ptr()); + } + return result; + } + + default: // All else is a mistake + typerr(t); + + } // End of switch + + return this; +} + +const Type *TypeNarrowOop::xdual() const { // Compute dual right now. + const TypePtr* odual = _ooptype->dual()->is_ptr(); + return new TypeNarrowOop(odual); +} + +const Type *TypeNarrowOop::filter( const Type *kills ) const { + if (kills->isa_narrowoop()) { + const Type* ft =_ooptype->filter(kills->is_narrowoop()->_ooptype); + if (ft->empty()) + return Type::TOP; // Canonical empty value + if (ft->isa_ptr()) { + return make(ft->isa_ptr()); + } + return ft; + } else if (kills->isa_ptr()) { + const Type* ft = _ooptype->join(kills); + if (ft->empty()) + return Type::TOP; // Canonical empty value + return ft; + } else { + return Type::TOP; + } +} + + +intptr_t TypeNarrowOop::get_con() const { + return _ooptype->get_con(); +} + +#ifndef PRODUCT +void TypeNarrowOop::dump2( Dict & d, uint depth, outputStream *st ) const { + st->print("narrowoop: "); + _ooptype->dump2(d, depth, st); +} +#endif + + +//============================================================================= // Convenience common pre-built types. // Not-null object klass or below @@ -3337,31 +3609,36 @@ ciKlass* k_ary = NULL; const TypeInstPtr *tinst; const TypeAryPtr *tary; + const Type* el = elem(); + if (el->isa_narrowoop()) { + el = el->make_ptr(); + } + // Get element klass - if ((tinst = elem()->isa_instptr()) != NULL) { + if ((tinst = el->isa_instptr()) != NULL) { // Compute array klass from element klass k_ary = ciObjArrayKlass::make(tinst->klass()); - } else if ((tary = elem()->isa_aryptr()) != NULL) { + } else if ((tary = el->isa_aryptr()) != NULL) { // Compute array klass from element klass ciKlass* k_elem = tary->klass(); // If element type is something like bottom[], k_elem will be null. if (k_elem != NULL) k_ary = ciObjArrayKlass::make(k_elem); - } else if ((elem()->base() == Type::Top) || - (elem()->base() == Type::Bottom)) { + } else if ((el->base() == Type::Top) || + (el->base() == Type::Bottom)) { // element type of Bottom occurs from meet of basic type // and object; Top occurs when doing join on Bottom. // Leave k_ary at NULL. } else { // Cannot compute array klass directly from basic type, // since subtypes of TypeInt all have basic type T_INT. - assert(!elem()->isa_int(), + assert(!el->isa_int(), "integral arrays must be pre-equipped with a class"); // Compute array klass directly from basic type - k_ary = ciTypeArrayKlass::make(elem()->basic_type()); + k_ary = ciTypeArrayKlass::make(el->basic_type()); } - - if( this != TypeAryPtr::OOPS ) + + if( this != TypeAryPtr::OOPS ) { // The _klass field acts as a cache of the underlying // ciKlass for this array type. In order to set the field, // we need to cast away const-ness. @@ -3376,13 +3653,18 @@ // a bit less efficient than caching, but calls to // TypeAryPtr::OOPS->klass() are not common enough to matter. ((TypeAryPtr*)this)->_klass = k_ary; + if (UseCompressedOops && k_ary != NULL && k_ary->is_obj_array_klass() && + _offset != 0 && _offset != arrayOopDesc::length_offset_in_bytes()) { + ((TypeAryPtr*)this)->_is_ptr_to_narrowoop = true; + } + } return k_ary; } //------------------------------add_offset------------------------------------- // Access internals of klass object -const TypePtr *TypeKlassPtr::add_offset( int offset ) const { +const TypePtr *TypeKlassPtr::add_offset( intptr_t offset ) const { return make( _ptr, klass(), xadd_offset(offset) ); } @@ -3432,6 +3714,7 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case NarrowOop: case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: @@ -3706,10 +3989,10 @@ //------------------------------print_flattened-------------------------------- // Print a 'flattened' signature static const char * const flat_type_msg[Type::lastype] = { - "bad","control","top","int","long","_", - "tuple:", "array:", - "ptr", "rawptr", "ptr", "ptr", "ptr", "ptr", - "func", "abIO", "return_address", "mem", + "bad","control","top","int","long","_", "narrowoop", + "tuple:", "array:", + "ptr", "rawptr", "ptr", "ptr", "ptr", "ptr", + "func", "abIO", "return_address", "mem", "float_top", "ftcon:", "flt", "double_top", "dblcon:", "dbl", "bottom" --- old/hotspot/src/share/vm/opto/type.hpp 2009-08-01 04:14:31.097073438 +0100 +++ new/hotspot/src/share/vm/opto/type.hpp 2009-08-01 04:14:31.012292587 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)type.hpp 1.159 07/10/23 13:12:48 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ class TypeF; class TypeInt; class TypeLong; +class TypeNarrowOop; class TypeAry; class TypeTuple; class TypePtr; @@ -67,6 +68,7 @@ Int, // Integer range (lo-hi) Long, // Long integer range (lo-hi) Half, // Placeholder half of doubleword + NarrowOop, // Compressed oop pointer Tuple, // Method signature or object layout Array, // Array types @@ -191,6 +193,10 @@ // Currently, it also works around limitations involving interface types. virtual const Type *filter( const Type *kills ) const; + // Returns true if this pointer points at memory which contains a + // compressed oop references. + bool is_ptr_to_narrowoop() const; + // Convenience access float getf() const; double getd() const; @@ -207,18 +213,27 @@ const TypeAry *is_ary() const; // Array, NOT array pointer const TypePtr *is_ptr() const; // Asserts it is a ptr type const TypePtr *isa_ptr() const; // Returns NULL if not ptr type - const TypeRawPtr *is_rawptr() const; // NOT Java oop - const TypeOopPtr *isa_oopptr() const; // Returns NULL if not ptr type - const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr - const TypeKlassPtr *is_klassptr() const; // assert if not KlassPtr - const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer - const TypeInstPtr *isa_instptr() const; // Returns NULL if not InstPtr - const TypeInstPtr *is_instptr() const; // Instance - const TypeAryPtr *isa_aryptr() const; // Returns NULL if not AryPtr - const TypeAryPtr *is_aryptr() const; // Array oop + const TypeRawPtr *isa_rawptr() const; // NOT Java oop + const TypeRawPtr *is_rawptr() const; // Asserts is rawptr + const TypeNarrowOop *is_narrowoop() const; // Java-style GC'd pointer + const TypeNarrowOop *isa_narrowoop() const; // Returns NULL if not oop ptr type + const TypeOopPtr *isa_oopptr() const; // Returns NULL if not oop ptr type + const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer + const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr + const TypeKlassPtr *is_klassptr() const; // assert if not KlassPtr + const TypeInstPtr *isa_instptr() const; // Returns NULL if not InstPtr + const TypeInstPtr *is_instptr() const; // Instance + const TypeAryPtr *isa_aryptr() const; // Returns NULL if not AryPtr + const TypeAryPtr *is_aryptr() const; // Array oop virtual bool is_finite() const; // Has a finite value virtual bool is_nan() const; // Is not a number (NaN) + // Returns this ptr type or the equivalent ptr type for this compressed pointer. + const TypePtr* make_ptr() const; + // Returns this compressed pointer or the equivalent compressed version + // of this pointer type. + const TypeNarrowOop* make_narrowoop() const; + // Special test for register pressure heuristic bool is_floatingpoint() const; // True if Float or Double base type @@ -435,6 +450,7 @@ // Check for single integer int is_con() const { return _lo==_hi; } + bool is_con(int i) const { return is_con() && _lo == i; } jlong get_con() const { assert( is_con(), "" ); return _lo; } virtual bool is_finite() const; // Has a finite value @@ -543,6 +559,7 @@ // Otherwise the _base will indicate which subset of pointers is affected, // and the class will be inherited from. class TypePtr : public Type { + friend class TypeNarrowOop; public: enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR }; protected: @@ -567,7 +584,8 @@ virtual intptr_t get_con() const; - virtual const TypePtr *add_offset( int offset ) const; + int xadd_offset( intptr_t offset ) const; + virtual const TypePtr *add_offset( intptr_t offset ) const; virtual bool singleton(void) const; // TRUE if type is a singleton virtual bool empty(void) const; // TRUE if type is vacuous @@ -618,7 +636,7 @@ virtual intptr_t get_con() const; - virtual const TypePtr *add_offset( int offset ) const; + virtual const TypePtr *add_offset( intptr_t offset ) const; virtual const Type *xmeet( const Type *t ) const; virtual const Type *xdual() const; // Compute dual right now. @@ -634,32 +652,34 @@ // Some kind of oop (Java pointer), either klass or instance or array. class TypeOopPtr : public TypePtr { protected: - TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ) : TypePtr(t, ptr, offset), _const_oop(o), _klass(k), _klass_is_exact(xk), _instance_id(instance_id) { } + TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ); public: virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing virtual bool singleton(void) const; // TRUE if type is a singleton enum { - UNKNOWN_INSTANCE = 0 + InstanceTop = -1, // undefined instance + InstanceBot = 0 // any possible instance }; protected: - int xadd_offset( int offset ) const; // Oop is NULL, unless this is a constant oop. ciObject* _const_oop; // Constant oop // If _klass is NULL, then so is _sig. This is an unloaded klass. ciKlass* _klass; // Klass object // Does the type exclude subclasses of the klass? (Inexact == polymorphic.) bool _klass_is_exact; + bool _is_ptr_to_narrowoop; - int _instance_id; // if not UNKNOWN_INSTANCE, indicates that this is a particular instance - // of this type which is distinct. This is the the node index of the - // node creating this instance + // If not InstanceTop or InstanceBot, indicates that this is + // a particular instance of this type which is distinct. + // This is the the node index of the allocation node creating this instance. + int _instance_id; static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact); - int dual_instance() const { return -_instance_id; } - int meet_instance(int uid) const; + int dual_instance_id() const; + int meet_instance_id(int uid) const; public: // Creates a type given a klass. Correctly handles multi-dimensional arrays @@ -687,8 +707,14 @@ ciObject* const_oop() const { return _const_oop; } virtual ciKlass* klass() const { return _klass; } bool klass_is_exact() const { return _klass_is_exact; } - bool is_instance() const { return _instance_id != UNKNOWN_INSTANCE; } - uint instance_id() const { return _instance_id; } + + // Returns true if this pointer points at memory which contains a + // compressed oop references. + bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; } + + bool is_known_instance() const { return _instance_id > 0; } + int instance_id() const { return _instance_id; } + bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; } virtual intptr_t get_con() const; @@ -696,12 +722,12 @@ virtual const Type *cast_to_exactness(bool klass_is_exact) const; - virtual const TypeOopPtr *cast_to_instance(int instance_id) const; + virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; // corresponding pointer to klass, for a given instance const TypeKlassPtr* as_klass_type() const; - virtual const TypePtr *add_offset( int offset ) const; + virtual const TypePtr *add_offset( intptr_t offset ) const; virtual const Type *xmeet( const Type *t ) const; virtual const Type *xdual() const; // Compute dual right now. @@ -757,7 +783,7 @@ } // Make a pointer to an oop. - static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = 0 ); + static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot ); // If this is a java.lang.Class constant, return the type for it or NULL. // Pass to Type::get_const_type to turn it to a type, which will usually @@ -768,9 +794,9 @@ virtual const Type *cast_to_exactness(bool klass_is_exact) const; - virtual const TypeOopPtr *cast_to_instance(int instance_id) const; + virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; - virtual const TypePtr *add_offset( int offset ) const; + virtual const TypePtr *add_offset( intptr_t offset ) const; virtual const Type *xmeet( const Type *t ) const; virtual const TypeInstPtr *xmeet_unloaded( const TypeInstPtr *t ) const; @@ -802,9 +828,9 @@ const Type* elem() const { return _ary->_elem; } const TypeInt* size() const { return _ary->_size; } - static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = 0); + static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot); // Constant pointer to array - static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = 0); + static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot); // Convenience static const TypeAryPtr *make(ciObject* o); @@ -814,12 +840,13 @@ virtual const Type *cast_to_exactness(bool klass_is_exact) const; - virtual const TypeOopPtr *cast_to_instance(int instance_id) const; + virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const; virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const; + virtual const TypeInt* narrow_size_type(const TypeInt* size) const; virtual bool empty(void) const; // TRUE if type is vacuous - virtual const TypePtr *add_offset( int offset ) const; + virtual const TypePtr *add_offset( intptr_t offset ) const; virtual const Type *xmeet( const Type *t ) const; virtual const Type *xdual() const; // Compute dual right now. @@ -827,6 +854,7 @@ // Convenience common pre-built types. static const TypeAryPtr *RANGE; static const TypeAryPtr *OOPS; + static const TypeAryPtr *NARROWOOPS; static const TypeAryPtr *BYTES; static const TypeAryPtr *SHORTS; static const TypeAryPtr *CHARS; @@ -841,7 +869,6 @@ } static const TypeAryPtr *_array_body_type[T_CONFLICT+1]; // sharpen the type of an int which is used as an array size - static const TypeInt* narrow_size_type(const TypeInt* size, BasicType elem); #ifndef PRODUCT virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping #endif @@ -858,6 +885,8 @@ public: ciSymbol* name() const { return _klass->name(); } + bool is_loaded() const { return _klass->is_loaded(); } + // ptr to klass 'k' static const TypeKlassPtr *make( ciKlass* k ) { return make( TypePtr::Constant, k, 0); } // ptr to klass 'k' with offset @@ -872,7 +901,7 @@ // corresponding pointer to instance, for a given class const TypeOopPtr* as_instance_type() const; - virtual const TypePtr *add_offset( int offset ) const; + virtual const TypePtr *add_offset( intptr_t offset ) const; virtual const Type *xmeet( const Type *t ) const; virtual const Type *xdual() const; // Compute dual right now. @@ -884,6 +913,56 @@ #endif }; +//------------------------------TypeNarrowOop---------------------------------- +// A compressed reference to some kind of Oop. This type wraps around +// a preexisting TypeOopPtr and forwards most of it's operations to +// the underlying type. It's only real purpose is to track the +// oopness of the compressed oop value when we expose the conversion +// between the normal and the compressed form. +class TypeNarrowOop : public Type { +protected: + const TypePtr* _ooptype; // Could be TypePtr::NULL_PTR + + TypeNarrowOop( const TypePtr* ooptype): Type(NarrowOop), + _ooptype(ooptype) { + assert(ooptype->offset() == 0 || + ooptype->offset() == OffsetBot || + ooptype->offset() == OffsetTop, "no real offsets"); + } +public: + virtual bool eq( const Type *t ) const; + virtual int hash() const; // Type specific hashing + virtual bool singleton(void) const; // TRUE if type is a singleton + + virtual const Type *xmeet( const Type *t ) const; + virtual const Type *xdual() const; // Compute dual right now. + + virtual intptr_t get_con() const; + + // Do not allow interface-vs.-noninterface joins to collapse to top. + virtual const Type *filter( const Type *kills ) const; + + virtual bool empty(void) const; // TRUE if type is vacuous + + static const TypeNarrowOop *make( const TypePtr* type); + + static const TypeNarrowOop* make_from_constant(ciObject* con) { + return make(TypeOopPtr::make_from_constant(con)); + } + + // returns the equivalent ptr type for this compressed pointer + const TypePtr *make_oopptr() const { + return _ooptype; + } + + static const TypeNarrowOop *BOTTOM; + static const TypeNarrowOop *NULL_PTR; + +#ifndef PRODUCT + virtual void dump2( Dict &d, uint depth, outputStream *st ) const; +#endif +}; + //------------------------------TypeFunc--------------------------------------- // Class of Array Types class TypeFunc : public Type { @@ -926,8 +1005,16 @@ }; //------------------------------accessors-------------------------------------- -inline float Type::getf() const { - assert( _base == FloatCon, "Not a FloatCon" ); +inline bool Type::is_ptr_to_narrowoop() const { +#ifdef _LP64 + return (isa_oopptr() != NULL && is_oopptr()->is_ptr_to_narrowoop_nv()); +#else + return false; +#endif +} + +inline float Type::getf() const { + assert( _base == FloatCon, "Not a FloatCon" ); return ((TypeF*)this)->_f; } @@ -1004,9 +1091,13 @@ return (_base >= OopPtr && _base <= KlassPtr) ? (TypeOopPtr*)this : NULL; } -inline const TypeRawPtr *Type::is_rawptr() const { - assert( _base == RawPtr, "Not a raw pointer" ); - return (TypeRawPtr*)this; +inline const TypeRawPtr *Type::isa_rawptr() const { + return (_base == RawPtr) ? (TypeRawPtr*)this : NULL; +} + +inline const TypeRawPtr *Type::is_rawptr() const { + assert( _base == RawPtr, "Not a raw pointer" ); + return (TypeRawPtr*)this; } inline const TypeInstPtr *Type::isa_instptr() const { @@ -1027,6 +1118,17 @@ return (TypeAryPtr*)this; } +inline const TypeNarrowOop *Type::is_narrowoop() const { + // OopPtr is the first and KlassPtr the last, with no non-oops between. + assert(_base == NarrowOop, "Not a narrow oop" ) ; + return (TypeNarrowOop*)this; +} + +inline const TypeNarrowOop *Type::isa_narrowoop() const { + // OopPtr is the first and KlassPtr the last, with no non-oops between. + return (_base == NarrowOop) ? (TypeNarrowOop*)this : NULL; +} + inline const TypeKlassPtr *Type::isa_klassptr() const { return (_base == KlassPtr) ? (TypeKlassPtr*)this : NULL; } @@ -1036,6 +1138,16 @@ return (TypeKlassPtr*)this; } +inline const TypePtr* Type::make_ptr() const { + return (_base == NarrowOop) ? is_narrowoop()->make_oopptr() : + (isa_ptr() ? is_ptr() : NULL); +} + +inline const TypeNarrowOop* Type::make_narrowoop() const { + return (_base == NarrowOop) ? is_narrowoop() : + (isa_ptr() ? TypeNarrowOop::make(is_ptr()) : NULL); +} + inline bool Type::is_floatingpoint() const { if( (_base == FloatCon) || (_base == FloatBot) || (_base == DoubleCon) || (_base == DoubleBot) ) @@ -1076,6 +1188,9 @@ #define RShiftXNode RShiftLNode // For card marks and hashcodes #define URShiftXNode URShiftLNode +// UseOptoBiasInlining +#define XorXNode XorLNode +#define StoreXConditionalNode StoreLConditionalNode // Opcodes #define Op_LShiftX Op_LShiftL #define Op_AndX Op_AndL @@ -1115,6 +1230,9 @@ #define RShiftXNode RShiftINode // For card marks and hashcodes #define URShiftXNode URShiftINode +// UseOptoBiasInlining +#define XorXNode XorINode +#define StoreXConditionalNode StoreIConditionalNode // Opcodes #define Op_LShiftX Op_LShiftI #define Op_AndX Op_AndI --- old/hotspot/src/share/vm/opto/vectornode.cpp 2009-08-01 04:14:32.043588770 +0100 +++ new/hotspot/src/share/vm/opto/vectornode.cpp 2009-08-01 04:14:31.962996769 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vectornode.cpp 1.5 07/05/17 16:02:33 JVM" #endif /* - * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,7 +138,7 @@ int mid = lo + ct/2; Node* n1 = ct == 2 ? in(lo) : binaryTreePack(C, lo, mid); Node* n2 = ct == 2 ? in(lo+1) : binaryTreePack(C, mid, hi ); - int rslt_bsize = ct * type2aelembytes[elt_basic_type()]; + int rslt_bsize = ct * type2aelembytes(elt_basic_type()); if (bottom_type()->is_floatingpoint()) { switch (rslt_bsize) { case 8: return new (C, 3) PackFNode(n1, n2); --- old/hotspot/src/share/vm/opto/vectornode.hpp 2009-08-01 04:14:32.929101817 +0100 +++ new/hotspot/src/share/vm/opto/vectornode.hpp 2009-08-01 04:14:32.852185403 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vectornode.hpp 1.6 07/05/17 16:02:36 JVM" #endif /* - * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ uint length() const { return _length; } // Vector length static uint max_vlen(BasicType bt) { // max vector length - return (uint)(Matcher::vector_width_in_bytes() / type2aelembytes[bt]); + return (uint)(Matcher::vector_width_in_bytes() / type2aelembytes(bt)); } // Element and vector type @@ -395,7 +395,7 @@ virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(); } virtual BasicType memory_type() const { return T_VOID; } - virtual int memory_size() const { return length()*type2aelembytes[elt_basic_type()]; } + virtual int memory_size() const { return length()*type2aelembytes(elt_basic_type()); } // Vector opcode from scalar opcode static int opcode(int sopc, uint vlen); @@ -623,7 +623,7 @@ virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(); } virtual BasicType memory_type() const { return T_VOID; } - virtual int memory_size() const { return length()*type2aelembytes[elt_basic_type()]; } + virtual int memory_size() const { return length()*type2aelembytes(elt_basic_type()); } // Vector opcode from scalar opcode static int opcode(int sopc, uint vlen); --- old/hotspot/src/share/vm/prims/forte.cpp 2009-08-01 04:14:33.884125692 +0100 +++ new/hotspot/src/share/vm/prims/forte.cpp 2009-08-01 04:14:33.780425986 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)forte.cpp 1.70 07/08/31 18:43:32 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,20 @@ # include "incls/_precompiled.incl" # include "incls/_forte.cpp.incl" +// These name match the names reported by the forte quality kit +enum { + ticks_no_Java_frame = 0, + ticks_no_class_load = -1, + ticks_GC_active = -2, + ticks_unknown_not_Java = -3, + ticks_not_walkable_not_Java = -4, + ticks_unknown_Java = -5, + ticks_not_walkable_Java = -6, + ticks_unknown_state = -7, + ticks_thread_exit = -8, + ticks_deopt = -9, + ticks_safepoint = -10 +}; //------------------------------------------------------- @@ -44,297 +58,29 @@ }; -static void forte_is_walkable_compiled_frame(frame* fr, RegisterMap* map, +static void is_decipherable_compiled_frame(frame* fr, RegisterMap* map, bool* is_compiled_p, bool* is_walkable_p); -static bool forte_is_walkable_interpreted_frame(frame* fr, - methodOop* method_p, int* bci_p); - +static bool is_decipherable_interpreted_frame(JavaThread* thread, + frame* fr, + methodOop* method_p, + int* bci_p); -// A Forte specific version of frame:safe_for_sender(). -static bool forte_safe_for_sender(frame* fr, JavaThread *thread) { - bool ret_value = false; // be pessimistic - -#ifdef COMPILER2 -#if defined(IA32) || defined(AMD64) - { - // This check is the same as the standard safe_for_sender() - // on IA32 or AMD64 except that NULL FP values are tolerated - // for C2. - address sp = (address)fr->sp(); - address fp = (address)fr->fp(); - ret_value = sp != NULL && sp <= thread->stack_base() && - sp >= thread->stack_base() - thread->stack_size() && - (fp == NULL || (fp <= thread->stack_base() && - fp >= thread->stack_base() - thread->stack_size())); - - // We used to use standard safe_for_sender() when we are supposed - // to be executing Java code. However, that prevents us from - // walking some intrinsic stacks so now we have to be more refined. - // If we passed the above check and we have a NULL frame pointer - // and we are supposed to be executing Java code, then we have a - // couple of more checks to make. - if (ret_value && fp == NULL && (thread->thread_state() == _thread_in_Java - || thread->thread_state() == _thread_in_Java_trans)) { - if (fr->is_interpreted_frame()) { - // interpreted frames don't really have a NULL frame pointer - return false; - } else if (CodeCache::find_blob(fr->pc()) == NULL) { - // the NULL frame pointer should be associated with generated code - return false; - } - } - } -#else // !(IA32 || AMD64) - ret_value = fr->safe_for_sender(thread); -#endif // IA32 || AMD64 - -#else // !COMPILER2 - ret_value = fr->safe_for_sender(thread); -#endif // COMPILER2 - - if (!ret_value) { - return ret_value; // not safe, nothing more to do - } - - address sp1; - -#ifdef SPARC - // On Solaris SPARC, when a compiler frame has an interpreted callee - // the _interpreter_sp_adjustment field contains the adjustment to - // this frame's SP made by that interpreted callee. - // For AsyncGetCallTrace(), we need to verify that the resulting SP - // is valid for the specified thread's stack. - sp1 = (address)fr->sp(); - address sp2 = (address)fr->unextended_sp(); - - // If the second SP is NULL, then the _interpreter_sp_adjustment - // field simply adjusts this frame's SP to NULL and the frame is - // not safe. This strange value can be set in the frame constructor - // when our peek into the interpreted callee's adjusted value for - // this frame's SP finds a NULL. This can happen when SIGPROF - // catches us while we are creating the interpreter frame. - // - if (sp2 == NULL || - - // If the two SPs are different, then _interpreter_sp_adjustment - // is non-zero and we need to validate the second SP. We invert - // the range check from frame::safe_for_sender() and bail out - // if the second SP is not safe. - (sp1 != sp2 && !(sp2 <= thread->stack_base() - && sp2 >= (thread->stack_base() - thread->stack_size())))) { - return false; - } -#endif // SPARC - - if (fr->is_entry_frame()) { - // This frame thinks it is an entry frame; we need to validate - // the JavaCallWrapper pointer. - // Note: frame::entry_frame_is_first() assumes that the - // JavaCallWrapper has a non-NULL _anchor field. We don't - // check that here (yet) since we've never seen a failure - // due to a NULL _anchor field. - // Update: Originally this check was done only for SPARC. However, - // this failure has now been seen on C2 C86. I have no reason to - // believe that this is not a general issue so I'm enabling the - // check for all compilers on all supported platforms. -#ifdef COMPILER2 -#if defined(IA32) || defined(AMD64) - if (fr->fp() == NULL) { - // C2 X86 allows NULL frame pointers, but if we have one then - // we cannot call entry_frame_call_wrapper(). - return false; - } -#endif // IA32 || AMD64 -#endif // COMPILER2 - sp1 = (address)fr->entry_frame_call_wrapper(); - // We invert the range check from frame::safe_for_sender() and - // bail out if the JavaCallWrapper * is not safe. - if (!(sp1 <= thread->stack_base() - && sp1 >= (thread->stack_base() - thread->stack_size()))) { - return false; - } - } - - return ret_value; -} - - -// Unknown compiled frames have caused assertion failures on Solaris -// X86. This code also detects unknown compiled frames on Solaris -// SPARC, but no assertion failures have been observed. However, I'm -// paranoid so I'm enabling this code whenever we have a compiler. -// -// Returns true if the specified frame is an unknown compiled frame -// and false otherwise. -static bool is_unknown_compiled_frame(frame* fr, JavaThread *thread) { - bool ret_value = false; // be optimistic - - // This failure mode only occurs when the thread is in state - // _thread_in_Java so we are okay for this check for any other - // thread state. - // - // Note: _thread_in_Java does not always mean that the thread - // is executing Java code. AsyncGetCallTrace() has caught - // threads executing in JRT_LEAF() routines when the state - // will also be _thread_in_Java. - if (thread->thread_state() != _thread_in_Java) { - return ret_value; - } - - // This failure mode only occurs with compiled frames so we are - // okay for this check for both entry and interpreted frames. - if (fr->is_entry_frame() || fr->is_interpreted_frame()) { - return ret_value; - } - - // This failure mode only occurs when the compiled frame's PC - // is in the code cache so we are okay for this check if the - // PC is not in the code cache. - CodeBlob* cb = CodeCache::find_blob(fr->pc()); - if (cb == NULL) { - return ret_value; - } - - // We have compiled code in the code cache so it is time for - // the final check: let's see if any frame type is set - ret_value = !( - // is_entry_frame() is checked above - // testers that are a subset of is_entry_frame(): - // is_first_frame() - fr->is_java_frame() - // testers that are a subset of is_java_frame(): - // is_interpreted_frame() - // is_compiled_frame() - || fr->is_native_frame() - || fr->is_runtime_frame() - || fr->is_safepoint_blob_frame() - ); - - // If there is no frame type set, then we have an unknown compiled - // frame and sender() should not be called on it. - - return ret_value; -} +vframeStreamForte::vframeStreamForte(JavaThread *jt, + frame fr, + bool stop_at_java_call_stub) : vframeStreamCommon(jt) { -#define DebugNonSafepoints_IS_CLEARED \ - (!FLAG_IS_DEFAULT(DebugNonSafepoints) && !DebugNonSafepoints) - -// if -XX:-DebugNonSafepoints, then top-frame will be skipped -vframeStreamForte::vframeStreamForte(JavaThread *jt, frame fr, - bool stop_at_java_call_stub) : vframeStreamCommon(jt) { _stop_at_java_call_stub = stop_at_java_call_stub; + _frame = fr; - if (!DebugNonSafepoints_IS_CLEARED) { - // decode the top frame fully - // (usual case, if JVMTI is enabled) - _frame = fr; - } else { - // skip top frame, as it may not be at safepoint - // For AsyncGetCallTrace(), we extracted as much info from the top - // frame as we could in forte_is_walkable_frame(). We also verified - // forte_safe_for_sender() so this sender() call is safe. - _frame = fr.sender(&_reg_map); - } - - if (jt->thread_state() == _thread_in_Java && !fr.is_first_frame()) { - bool sender_check = false; // assume sender is not safe - - if (forte_safe_for_sender(&_frame, jt)) { - // If the initial sender frame is safe, then continue on with other - // checks. The unsafe sender frame has been seen on Solaris X86 - // with both Compiler1 and Compiler2. It has not been seen on - // Solaris SPARC, but seems like a good sanity check to have - // anyway. - - // SIGPROF caught us in Java code and the current frame is not the - // first frame so we should sanity check the sender frame. It is - // possible for SIGPROF to catch us in the middle of making a call. - // When that happens the current frame is actually a combination of - // the real sender and some of the new call's info. We can't find - // the real sender with such a current frame and things can get - // confused. - // - // This sanity check has caught problems with the sender frame on - // Solaris SPARC. So far Solaris X86 has not had a failure here. - sender_check = _frame.is_entry_frame() - // testers that are a subset of is_entry_frame(): - // is_first_frame() - || _frame.is_java_frame() - // testers that are a subset of is_java_frame(): - // is_interpreted_frame() - // is_compiled_frame() - || _frame.is_native_frame() - || _frame.is_runtime_frame() - || _frame.is_safepoint_blob_frame() - ; - - // We need an additional sanity check on an initial interpreted - // sender frame. This interpreted frame needs to be both walkable - // and have a valid BCI. This is yet another variant of SIGPROF - // catching us in the middle of making a call. - if (sender_check && _frame.is_interpreted_frame()) { - methodOop method = NULL; - int bci = -1; - - if (!forte_is_walkable_interpreted_frame(&_frame, &method, &bci) - || bci == -1) { - sender_check = false; - } - } - - // We need an additional sanity check on an initial compiled - // sender frame. This compiled frame also needs to be walkable. - // This is yet another variant of SIGPROF catching us in the - // middle of making a call. - if (sender_check && !_frame.is_interpreted_frame()) { - bool is_compiled, is_walkable; - - forte_is_walkable_compiled_frame(&_frame, &_reg_map, - &is_compiled, &is_walkable); - if (is_compiled && !is_walkable) { - sender_check = false; - } - } - } - - if (!sender_check) { - // nothing else to try if we can't recognize the sender - _mode = at_end_mode; - return; - } - } - - int loop_count = 0; - int loop_max = MaxJavaStackTraceDepth * 2; + // We must always have a valid frame to start filling - while (!fill_from_frame()) { - _frame = _frame.sender(&_reg_map); + bool filled_in = fill_from_frame(); -#ifdef COMPILER2 -#if defined(IA32) || defined(AMD64) - // Stress testing on C2 X86 has shown a periodic problem with - // the sender() call below. The initial _frame that we have on - // entry to the loop has already passed forte_safe_for_sender() - // so we only check frames after it. - if (!forte_safe_for_sender(&_frame, _thread)) { - _mode = at_end_mode; - return; - } -#endif // IA32 || AMD64 -#endif // COMPILER2 + assert(filled_in, "invariant"); - if (++loop_count >= loop_max) { - // We have looped more than twice the number of possible - // Java frames. This indicates that we are trying to walk - // a stack that is in the middle of being constructed and - // it is self referential. - _mode = at_end_mode; - return; - } - } } @@ -361,95 +107,57 @@ do { -#if defined(COMPILER1) && defined(SPARC) - bool prevIsInterpreted = _frame.is_interpreted_frame(); -#endif // COMPILER1 && SPARC + loop_count++; - _frame = _frame.sender(&_reg_map); + // By the time we get here we should never see unsafe but better + // safe then segv'd - if (!forte_safe_for_sender(&_frame, _thread)) { + if (loop_count > loop_max || !_frame.safe_for_sender(_thread)) { _mode = at_end_mode; return; } -#if defined(COMPILER1) && defined(SPARC) - if (prevIsInterpreted) { - // previous callee was interpreted and may require a special check - if (_frame.is_compiled_frame() && _frame.cb()->is_compiled_by_c1()) { - // compiled sender called interpreted callee so need one more check - bool is_compiled, is_walkable; - - // sanity check the compiled sender frame - forte_is_walkable_compiled_frame(&_frame, &_reg_map, - &is_compiled, &is_walkable); - assert(is_compiled, "sanity check"); - if (!is_walkable) { - // compiled sender frame is not walkable so bail out - _mode = at_end_mode; - return; - } - } - } -#endif // COMPILER1 && SPARC + _frame = _frame.sender(&_reg_map); - if (++loop_count >= loop_max) { - // We have looped more than twice the number of possible - // Java frames. This indicates that we are trying to walk - // a stack that is in the middle of being constructed and - // it is self referential. - _mode = at_end_mode; - return; - } } while (!fill_from_frame()); } -// Determine if 'fr' is a walkable, compiled frame. -// *is_compiled_p is set to true if the frame is compiled and if it -// is, then *is_walkable_p is set to true if it is also walkable. -static void forte_is_walkable_compiled_frame(frame* fr, RegisterMap* map, - bool* is_compiled_p, bool* is_walkable_p) { - - *is_compiled_p = false; - *is_walkable_p = false; - - CodeBlob* cb = CodeCache::find_blob(fr->pc()); - if (cb != NULL && - cb->is_nmethod() && - ((nmethod*)cb)->is_java_method()) { - // frame is compiled and executing a Java method - *is_compiled_p = true; - - // Increment PC because the PcDesc we want is associated with - // the *end* of the instruction, and pc_desc_near searches - // forward to the first matching PC after the probe PC. - PcDesc* pc_desc = NULL; - if (!DebugNonSafepoints_IS_CLEARED) { - // usual case: look for any safepoint near the sampled PC - address probe_pc = fr->pc() + 1; - pc_desc = ((nmethod*) cb)->pc_desc_near(probe_pc); - } else { - // reduced functionality: only recognize PCs immediately after calls - pc_desc = ((nmethod*) cb)->pc_desc_at(fr->pc()); - } - if (pc_desc != NULL && (pc_desc->scope_decode_offset() - == DebugInformationRecorder::serialized_null)) { - pc_desc = NULL; - } - if (pc_desc != NULL) { - // it has a PcDesc so the frame is also walkable - *is_walkable_p = true; - if (!DebugNonSafepoints_IS_CLEARED) { - // Normalize the PC to the one associated exactly with - // this PcDesc, so that subsequent stack-walking queries - // need not be approximate: - fr->set_pc(pc_desc->real_pc((nmethod*) cb)); - } +// Determine if 'fr' is a decipherable compiled frame. We are already +// assured that fr is for a java nmethod. + +static bool is_decipherable_compiled_frame(frame* fr) { + + assert(fr->cb() != NULL && fr->cb()->is_nmethod(), "invariant"); + nmethod* nm = (nmethod*) fr->cb(); + assert(nm->is_java_method(), "invariant"); + + // First try and find an exact PcDesc + + PcDesc* pc_desc = nm->pc_desc_at(fr->pc()); + + // Did we find a useful PcDesc? + if (pc_desc != NULL && + pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { + + address probe_pc = fr->pc() + 1; + pc_desc = nm->pc_desc_near(probe_pc); + + // Now do we have a useful PcDesc? + + if (pc_desc != NULL && + pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { + // No debug information available for this pc + // vframeStream would explode if we try and walk the frames. + return false; } - // Implied else: this compiled frame has no PcDesc, i.e., contains - // a frameless stub such as C1 method exit, so it is not walkable. + + // This PcDesc is useful however we must adjust the frame's pc + // so that the vframeStream lookups will use this same pc + + fr->set_pc(pc_desc->real_pc(nm)); } - // Implied else: this isn't a compiled frame so it isn't a - // walkable, compiled frame. + + return true; } // Determine if 'fr' is a walkable interpreted frame. Returns false @@ -460,159 +168,189 @@ // Note: this method returns true when a valid Java method is found // even if a valid BCI cannot be found. -static bool forte_is_walkable_interpreted_frame(frame* fr, - methodOop* method_p, int* bci_p) { +static bool is_decipherable_interpreted_frame(JavaThread* thread, + frame* fr, + methodOop* method_p, + int* bci_p) { assert(fr->is_interpreted_frame(), "just checking"); // top frame is an interpreted frame // check if it is walkable (i.e. valid methodOop and valid bci) - if (fr->is_interpreted_frame_valid()) { - if (fr->fp() != NULL) { - // access address in order not to trigger asserts that - // are built in interpreter_frame_method function - methodOop method = *fr->interpreter_frame_method_addr(); - if (Universe::heap()->is_valid_method(method)) { - intptr_t bcx = fr->interpreter_frame_bcx(); - int bci = method->validate_bci_from_bcx(bcx); - // note: bci is set to -1 if not a valid bci - *method_p = method; - *bci_p = bci; - return true; - } - } + + // Because we may be racing a gc thread the method and/or bci + // of a valid interpreter frame may look bad causing us to + // fail the is_interpreted_frame_valid test. If the thread + // is in any of the following states we are assured that the + // frame is in fact valid and we must have hit the race. + + JavaThreadState state = thread->thread_state(); + bool known_valid = (state == _thread_in_native || + state == _thread_in_vm || + state == _thread_blocked ); + + if (known_valid || fr->is_interpreted_frame_valid(thread)) { + + // The frame code should completely validate the frame so that + // references to methodOop and bci are completely safe to access + // If they aren't the frame code should be fixed not this + // code. However since gc isn't locked out the values could be + // stale. This is a race we can never completely win since we can't + // lock out gc so do one last check after retrieving their values + // from the frame for additional safety + + methodOop method = fr->interpreter_frame_method(); + + // We've at least found a method. + // NOTE: there is something to be said for the approach that + // if we don't find a valid bci then the method is not likely + // a valid method. Then again we may have caught an interpreter + // frame in the middle of construction and the bci field is + // not yet valid. + + *method_p = method; + + // See if gc may have invalidated method since we validated frame + + if (!Universe::heap()->is_valid_method(method)) return false; + + intptr_t bcx = fr->interpreter_frame_bcx(); + + int bci = method->validate_bci_from_bcx(bcx); + + // note: bci is set to -1 if not a valid bci + *bci_p = bci; + return true; } + return false; } -// Determine if 'fr' can be used to find a walkable frame. Returns -// false if a walkable frame cannot be found. *walkframe_p, *method_p, -// and *bci_p are not set when false is returned. Returns true if a -// walkable frame is returned via *walkframe_p. *method_p is non-NULL -// if the returned frame was executing a Java method. *bci_p is != -1 -// if a valid BCI in the Java method could be found. -// -// *walkframe_p will be used by vframeStreamForte as the initial -// frame for walking the stack. Currently the initial frame is -// skipped by vframeStreamForte because we inherited the logic from -// the vframeStream class. This needs to be revisited in the future. -static bool forte_is_walkable_frame(JavaThread* thread, frame* fr, - frame* walkframe_p, methodOop* method_p, int* bci_p) { - - if (!forte_safe_for_sender(fr, thread) - || is_unknown_compiled_frame(fr, thread) - ) { - // If the initial frame is not safe, then bail out. So far this - // has only been seen on Solaris X86 with Compiler2, but it seems - // like a great initial sanity check. - return false; - } - - if (fr->is_first_frame()) { - // If initial frame is frame from StubGenerator and there is no - // previous anchor, there are no java frames yet - return false; - } - - if (fr->is_interpreted_frame()) { - if (forte_is_walkable_interpreted_frame(fr, method_p, bci_p)) { - *walkframe_p = *fr; - return true; - } - return false; - } +// Determine if 'fr' can be used to find an initial Java frame. +// Return false if it can not find a fully decipherable Java frame +// (in other words a frame that isn't safe to use in a vframe stream). +// Obviously if it can't even find a Java frame false will also be returned. +// +// If we find a Java frame decipherable or not then by definition we have +// identified a method and that will be returned to the caller via method_p. +// If we can determine a bci that is returned also. (Hmm is it possible +// to return a method and bci and still return false? ) +// +// The initial Java frame we find (if any) is return via initial_frame_p. +// + +static bool find_initial_Java_frame(JavaThread* thread, + frame* fr, + frame* initial_frame_p, + methodOop* method_p, + int* bci_p) { + + // It is possible that for a frame containing an nmethod + // we can capture the method but no bci. If we get no + // bci the frame isn't walkable but the method is usable. + // Therefore we init the returned methodOop to NULL so the + // caller can make the distinction. + + *method_p = NULL; + + // On the initial call to this method the frame we get may not be + // recognizable to us. This should only happen if we are in a JRT_LEAF + // or something called by a JRT_LEAF method. + - // At this point we have something other than a first frame or an - // interpreted frame. - methodOop method = NULL; frame candidate = *fr; - // If we loop more than twice the number of possible Java - // frames, then this indicates that we are trying to walk - // a stack that is in the middle of being constructed and - // it is self referential. So far this problem has only - // been seen on Solaris X86 Compiler2, but it seems like - // a good robustness fix for all platforms. + // If the starting frame we were given has no codeBlob associated with + // it see if we can find such a frame because only frames with codeBlobs + // are possible Java frames. + + if (fr->cb() == NULL) { + + // See if we can find a useful frame + int loop_count; + int loop_max = MaxJavaStackTraceDepth * 2; + RegisterMap map(thread, false); + for (loop_count = 0; loop_count < loop_max; loop_count++) { + if (!candidate.safe_for_sender(thread)) return false; + candidate = candidate.sender(&map); + if (candidate.cb() != NULL) break; + } + if (candidate.cb() == NULL) return false; + } + + // We have a frame known to be in the codeCache + // We will hopefully be able to figure out something to do with it. int loop_count; int loop_max = MaxJavaStackTraceDepth * 2; + RegisterMap map(thread, false); for (loop_count = 0; loop_count < loop_max; loop_count++) { - // determine if the candidate frame is executing a Java method - if (CodeCache::contains(candidate.pc())) { - // candidate is a compiled frame or stub routine - CodeBlob* cb = CodeCache::find_blob(candidate.pc()); - - if (cb->is_nmethod()) { - method = ((nmethod *)cb)->method(); - } - } // end if CodeCache has our PC - RegisterMap map(thread, false); + if (candidate.is_first_frame()) { + // If initial frame is frame from StubGenerator and there is no + // previous anchor, there are no java frames associated with a method + return false; + } - // we have a Java frame that seems reasonable - if (method != NULL && candidate.is_java_frame() - && candidate.sp() != NULL && candidate.pc() != NULL) { - // we need to sanity check the candidate further - bool is_compiled, is_walkable; - - forte_is_walkable_compiled_frame(&candidate, &map, &is_compiled, - &is_walkable); - if (is_compiled) { - // At this point, we know we have a compiled Java frame with - // method information that we want to return. We don't check - // the is_walkable flag here because that flag pertains to - // vframeStreamForte work that is done after we are done here. - break; + if (candidate.is_interpreted_frame()) { + if (is_decipherable_interpreted_frame(thread, &candidate, method_p, bci_p)) { + *initial_frame_p = candidate; + return true; } + + // Hopefully we got some data + return false; } - // At this point, the candidate doesn't work so try the sender. + if (candidate.cb()->is_nmethod()) { - // For AsyncGetCallTrace() we cannot assume there is a sender - // for the initial frame. The initial forte_safe_for_sender() call - // and check for is_first_frame() is done on entry to this method. - candidate = candidate.sender(&map); - if (!forte_safe_for_sender(&candidate, thread)) { + nmethod* nm = (nmethod*) candidate.cb(); + *method_p = nm->method(); -#ifdef COMPILER2 -#if defined(IA32) || defined(AMD64) - // C2 on X86 can use the ebp register as a general purpose register - // which can cause the candidate to fail theforte_safe_for_sender() - // above. We try one more time using a NULL frame pointer (fp). - - candidate = frame(candidate.sp(), NULL, candidate.pc()); - if (!forte_safe_for_sender(&candidate, thread)) { -#endif // IA32 || AMD64 -#endif // COMPILER2 + // If the frame isn't fully decipherable then the default + // value for the bci is a signal that we don't have a bci. + // If we have a decipherable frame this bci value will + // not be used. - return false; + *bci_p = -1; -#ifdef COMPILER2 -#if defined(IA32) || defined(AMD64) - } // end forte_safe_for_sender retry with NULL fp -#endif // IA32 || AMD64 -#endif // COMPILER2 + *initial_frame_p = candidate; - } // end first forte_safe_for_sender check + // Native wrapper code is trivial to decode by vframeStream - if (candidate.is_first_frame() - || is_unknown_compiled_frame(&candidate, thread)) { - return false; + if (nm->is_native_method()) return true; + + // If it isn't decipherable then we have found a pc that doesn't + // have a PCDesc that can get us a bci however we did find + // a method + + if (!is_decipherable_compiled_frame(&candidate)) { + return false; + } + + // is_decipherable_compiled_frame may modify candidate's pc + *initial_frame_p = candidate; + + return true; } - } // end for loop_count - if (method == NULL) { - // If we didn't get any method info from the candidate, then - // we have nothing to return so bail out. - return false; + // Must be some stub frame that we don't care about + + if (!candidate.safe_for_sender(thread)) return false; + candidate = candidate.sender(&map); + + // If it isn't in the code cache something is wrong + // since once we find a frame in the code cache they + // all should be there. + + if (candidate.cb() == NULL) return false; + } - *walkframe_p = candidate; - *method_p = method; - *bci_p = -1; - return true; + return false; + } @@ -630,10 +368,12 @@ } ASGCT_CallTrace; static void forte_fill_call_trace_given_top(JavaThread* thd, - ASGCT_CallTrace* trace, int depth, frame top_frame) { + ASGCT_CallTrace* trace, + int depth, + frame top_frame) { NoHandleMark nhm; - frame walkframe; + frame initial_Java_frame; methodOop method; int bci; int count; @@ -641,48 +381,51 @@ count = 0; assert(trace->frames != NULL, "trace->frames must be non-NULL"); - if (!forte_is_walkable_frame(thd, &top_frame, &walkframe, &method, &bci)) { - // return if no walkable frame is found - return; - } + bool fully_decipherable = find_initial_Java_frame(thd, &top_frame, &initial_Java_frame, &method, &bci); + + // The frame might not be walkable but still recovered a method + // (e.g. an nmethod with no scope info for the pc + + if (method == NULL) return; CollectedHeap* ch = Universe::heap(); - if (method != NULL) { - // The method is not stored GC safe so see if GC became active - // after we entered AsyncGetCallTrace() and before we try to - // use the methodOop. - // Yes, there is still a window after this check and before - // we use methodOop below, but we can't lock out GC so that - // has to be an acceptable risk. - if (!ch->is_valid_method(method)) { - trace->num_frames = -2; - return; - } + // The method is not stored GC safe so see if GC became active + // after we entered AsyncGetCallTrace() and before we try to + // use the methodOop. + // Yes, there is still a window after this check and before + // we use methodOop below, but we can't lock out GC so that + // has to be an acceptable risk. + if (!ch->is_valid_method(method)) { + trace->num_frames = ticks_GC_active; // -2 + return; + } - if (DebugNonSafepoints_IS_CLEARED) { - // Take whatever method the top-frame decoder managed to scrape up. - // We look further at the top frame only if non-safepoint - // debugging information is available. - count++; - trace->num_frames = count; - trace->frames[0].method_id = method->find_jmethod_id_or_null(); - if (!method->is_native()) { - trace->frames[0].lineno = bci; - } else { - trace->frames[0].lineno = -3; - } + // We got a Java frame however it isn't fully decipherable + // so it won't necessarily be safe to use it for the + // initial frame in the vframe stream. + + if (!fully_decipherable) { + // Take whatever method the top-frame decoder managed to scrape up. + // We look further at the top frame only if non-safepoint + // debugging information is available. + count++; + trace->num_frames = count; + trace->frames[0].method_id = method->find_jmethod_id_or_null(); + if (!method->is_native()) { + trace->frames[0].lineno = bci; + } else { + trace->frames[0].lineno = -3; } - } - // check has_last_Java_frame() after looking at the top frame - // which may be an interpreted Java frame. - if (!thd->has_last_Java_frame() && method == NULL) { - trace->num_frames = 0; - return; + if (!initial_Java_frame.safe_for_sender(thd)) return; + + RegisterMap map(thd, false); + initial_Java_frame = initial_Java_frame.sender(&map); } - vframeStreamForte st(thd, walkframe, false); + vframeStreamForte st(thd, initial_Java_frame, false); + for (; !st.at_end() && count < depth; st.forte_next(), count++) { bci = st.bci(); method = st.method(); @@ -696,7 +439,7 @@ if (!ch->is_valid_method(method)) { // we throw away everything we've gathered in this sample since // none of it is safe - trace->num_frames = -2; + trace->num_frames = ticks_GC_active; // -2 return; } @@ -768,6 +511,11 @@ extern "C" { void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) { + +// This is if'd out because we no longer use thread suspension. +// However if someone wanted to backport this to a 5.0 jvm then this +// code would be important. +#if 0 if (SafepointSynchronize::is_synchronizing()) { // The safepoint mechanism is trying to synchronize all the threads. // Since this can involve thread suspension, it is not safe for us @@ -777,9 +525,10 @@ // are suspended while holding a resource and another thread blocks // on that resource in the SIGPROF handler, then we will have a // three-thread deadlock (VMThread, this thread, the other thread). - trace->num_frames = -10; + trace->num_frames = ticks_safepoint; // -10 return; } +#endif JavaThread* thread; @@ -788,13 +537,13 @@ thread->is_exiting()) { // bad env_id, thread has exited or thread is exiting - trace->num_frames = -8; + trace->num_frames = ticks_thread_exit; // -8 return; } if (thread->in_deopt_handler()) { // thread is in the deoptimization handler so return no frames - trace->num_frames = -9; + trace->num_frames = ticks_deopt; // -9 return; } @@ -802,12 +551,12 @@ "AsyncGetCallTrace must be called by the current interrupted thread"); if (!JvmtiExport::should_post_class_load()) { - trace->num_frames = -1; + trace->num_frames = ticks_no_class_load; // -1 return; } if (Universe::heap()->is_gc_active()) { - trace->num_frames = -2; + trace->num_frames = ticks_GC_active; // -2 return; } @@ -830,15 +579,23 @@ // param isInJava == false - indicate we aren't in Java code if (!thread->pd_get_top_frame_for_signal_handler(&fr, ucontext, false)) { + trace->num_frames = ticks_unknown_not_Java; // -3 unknown frame + } else { if (!thread->has_last_Java_frame()) { - trace->num_frames = 0; // no Java frames + trace->num_frames = 0; // No Java frames } else { - trace->num_frames = -3; // unknown frame + trace->num_frames = ticks_not_walkable_not_Java; // -4 non walkable frame by default + forte_fill_call_trace_given_top(thread, trace, depth, fr); + + // This assert would seem to be valid but it is not. + // It would be valid if we weren't possibly racing a gc + // thread. A gc thread can make a valid interpreted frame + // look invalid. It's a small window but it does happen. + // The assert is left here commented out as a reminder. + // assert(trace->num_frames != ticks_not_walkable_not_Java, "should always be walkable"); + } - } else { - trace->num_frames = -4; // non walkable frame by default - forte_fill_call_trace_given_top(thread, trace, depth, fr); - } + } } break; case _thread_in_Java: @@ -848,16 +605,16 @@ // param isInJava == true - indicate we are in Java code if (!thread->pd_get_top_frame_for_signal_handler(&fr, ucontext, true)) { - trace->num_frames = -5; // unknown frame + trace->num_frames = ticks_unknown_Java; // -5 unknown frame } else { - trace->num_frames = -6; // non walkable frame by default + trace->num_frames = ticks_not_walkable_Java; // -6, non walkable frame by default forte_fill_call_trace_given_top(thread, trace, depth, fr); } } break; default: // Unknown thread state - trace->num_frames = -7; + trace->num_frames = ticks_unknown_state; // -7 break; } } --- old/hotspot/src/share/vm/prims/jni.cpp 2009-08-01 04:14:34.899378053 +0100 +++ new/hotspot/src/share/vm/prims/jni.cpp 2009-08-01 04:14:34.807639819 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jni.cpp 1.436 07/07/11 09:47:42 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,7 +138,10 @@ if (offset <= small_offset_mask) { klassOop field_klass = k; klassOop super_klass = Klass::cast(field_klass)->super(); - while (instanceKlass::cast(super_klass)->contains_field_offset(offset)) { + // With compressed oops the most super class with nonstatic fields would + // be the owner of fields embedded in the header. + while (instanceKlass::cast(super_klass)->has_nonstatic_fields() && + instanceKlass::cast(super_klass)->contains_field_offset(offset)) { field_klass = super_klass; // super contains the field also super_klass = Klass::cast(field_klass)->super(); } @@ -631,7 +634,7 @@ DTRACE_PROBE2(hotspot_jni, FatalError__entry, env, msg); tty->print_cr("FATAL ERROR in native method: %s", msg); thread->print_stack(); - os::abort(false); // Prevent core dump, causes a jck failure. + os::abort(); // Dump core and abort JNI_END @@ -2173,8 +2176,8 @@ size_t size = os::vm_allocation_granularity(); bad_address = os::reserve_memory(size); if (bad_address != NULL) { - os::commit_memory(bad_address, size); - os::protect_memory(bad_address, size); + os::protect_memory(bad_address, size, os::MEM_PROT_READ, + /*is_committed*/false); } } return bad_address; --- old/hotspot/src/share/vm/prims/jniCheck.cpp 2009-08-01 04:14:35.936205599 +0100 +++ new/hotspot/src/share/vm/prims/jniCheck.cpp 2009-08-01 04:14:35.791436226 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jniCheck.cpp 1.50 07/08/31 11:20:56 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,18 +115,6 @@ static const char * fatal_non_string = "JNI string operation received a non-string"; - -// Report a JNI failure caught by -Xcheck:jni. Perform a core dump. -// Note: two variations -- one to be called when in VM state (e.g. when -// within IN_VM macro), one to be called when in NATIVE state. - -// When in VM state: -static void ReportJNIFatalError(JavaThread* thr, const char *msg) { - tty->print_cr("FATAL ERROR in native method: %s", msg); - thr->print_stack(); - os::abort(true); -} - // When in VM state: static void ReportJNIWarning(JavaThread* thr, const char *msg) { tty->print_cr("WARNING in native method: %s", msg); --- old/hotspot/src/share/vm/prims/jniCheck.hpp 2009-08-01 04:14:36.831180883 +0100 +++ new/hotspot/src/share/vm/prims/jniCheck.hpp 2009-08-01 04:14:36.756062032 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jniCheck.hpp 1.11 07/05/05 17:06:32 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,19 @@ * */ +extern "C" { + // Report a JNI failure caught by -Xcheck:jni. Perform a core dump. + // Note: two variations -- one to be called when in VM state (e.g. when + // within IN_VM macro), one to be called when in NATIVE state. + + // When in VM state: + static void ReportJNIFatalError(JavaThread* thr, const char *msg) { + tty->print_cr("FATAL ERROR in native method: %s", msg); + thr->print_stack(); + os::abort(true); + } +} + // // Checked JNI routines that are useful for outside of checked JNI // --- old/hotspot/src/share/vm/prims/jvm.cpp 2009-08-01 04:14:37.713096618 +0100 +++ new/hotspot/src/share/vm/prims/jvm.cpp 2009-08-01 04:14:37.601452730 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jvm.cpp 1.570 07/08/17 11:48:55 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -380,7 +380,11 @@ JVM_ENTRY_NO_ENV(jlong, JVM_FreeMemory(void)) JVMWrapper("JVM_FreeMemory"); CollectedHeap* ch = Universe::heap(); - size_t n = ch->capacity() - ch->used(); + size_t n; + { + MutexLocker x(Heap_lock); + n = ch->capacity() - ch->used(); + } return convert_size_t_to_jlong(n); JVM_END @@ -627,12 +631,11 @@ if (PrintJVMWarnings) warning("JVM_ResolveClass not implemented"); JVM_END - -JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name, - jboolean init, jobject loader, - jboolean throwError)) - JVMWrapper3("JVM_FindClassFromClassLoader %s throw %s", name, - throwError ? "error" : "exception"); +// Common implementation for JVM_FindClassFromBootLoader and +// JVM_FindClassFromLoader +static jclass jvm_find_class_from_class_loader(JNIEnv* env, const char* name, + jboolean init, jobject loader, + jboolean throwError, TRAPS) { // Java libraries should ensure that name is never null... if (name == NULL || (int)strlen(name) > symbolOopDesc::max_length()) { // It's impossible to create this class; the name cannot fit @@ -645,14 +648,47 @@ } symbolHandle h_name = oopFactory::new_symbol_handle(name, CHECK_NULL); Handle h_loader(THREAD, JNIHandles::resolve(loader)); - jclass result = find_class_from_class_loader(env, h_name, init, h_loader, - Handle(), throwError, thread); + jclass result = find_class_from_class_loader(env, h_name, init, h_loader, + Handle(), throwError, THREAD); if (TraceClassResolution && result != NULL) { trace_class_resolution(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(result))); } - return result; +} + +// Rationale behind JVM_FindClassFromBootLoader +// a> JVM_FindClassFromClassLoader was never exported in the export tables. +// b> because of (a) java.dll has a direct dependecy on the unexported +// private symbol "_JVM_FindClassFromClassLoader@20". +// c> the launcher cannot use the private symbol as it dynamically opens +// the entry point, so if something changes, the launcher will fail +// unexpectedly at runtime, it is safest for the launcher to dlopen a +// stable exported interface. +// d> re-exporting JVM_FindClassFromClassLoader as public, will cause its +// signature to change from _JVM_FindClassFromClassLoader@20 to +// JVM_FindClassFromClassLoader and will not be backward compatible +// with older JDKs. +// Thus a public/stable exported entry point is the right solution, +// public here means public in linker semantics, and is exported only +// to the JDK, and is not intended to be a public API. + +JVM_ENTRY(jclass, JVM_FindClassFromBootLoader(JNIEnv* env, + const char* name, + jboolean throwError)) + JVMWrapper3("JVM_FindClassFromBootLoader %s throw %s", name, + throwError ? "error" : "exception"); + return jvm_find_class_from_class_loader(env, name, JNI_FALSE, + (jobject)NULL, throwError, THREAD); +JVM_END + +JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name, + jboolean init, jobject loader, + jboolean throwError)) + JVMWrapper3("JVM_FindClassFromClassLoader %s throw %s", name, + throwError ? "error" : "exception"); + return jvm_find_class_from_class_loader(env, name, init, loader, + throwError, THREAD); JVM_END @@ -711,6 +747,7 @@ // common code for JVM_DefineClass() and JVM_DefineClassWithSource() static jclass jvm_define_class_common(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd, const char *source, TRAPS) { + if (source == NULL) source = "__JVM_DefineClass__"; // Since exceptions can be thrown, class initialization can take place // if name is NULL no check for class name in .class stream has to be made. @@ -749,7 +786,7 @@ JVM_ENTRY(jclass, JVM_DefineClass(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd)) JVMWrapper2("JVM_DefineClass %s", name); - return jvm_define_class_common(env, name, loader, buf, len, pd, "__JVM_DefineClass__", THREAD); + return jvm_define_class_common(env, name, loader, buf, len, pd, NULL, THREAD); JVM_END @@ -4171,6 +4208,36 @@ return res == oldVal; JVM_END +// DTrace /////////////////////////////////////////////////////////////////// + +JVM_ENTRY(jint, JVM_DTraceGetVersion(JNIEnv* env)) + JVMWrapper("JVM_DTraceGetVersion"); + return (jint)JVM_TRACING_DTRACE_VERSION; +JVM_END + +JVM_ENTRY(jlong,JVM_DTraceActivate( + JNIEnv* env, jint version, jstring module_name, jint providers_count, + JVM_DTraceProvider* providers)) + JVMWrapper("JVM_DTraceActivate"); + return DTraceJSDT::activate( + version, module_name, providers_count, providers, CHECK_0); +JVM_END + +JVM_ENTRY(jboolean,JVM_DTraceIsProbeEnabled(JNIEnv* env, jmethodID method)) + JVMWrapper("JVM_DTraceIsProbeEnabled"); + return DTraceJSDT::is_probe_enabled(method); +JVM_END + +JVM_ENTRY(void,JVM_DTraceDispose(JNIEnv* env, jlong handle)) + JVMWrapper("JVM_DTraceDispose"); + DTraceJSDT::dispose(handle); +JVM_END + +JVM_ENTRY(jboolean,JVM_DTraceIsSupported(JNIEnv* env)) + JVMWrapper("JVM_DTraceIsSupported"); + return DTraceJSDT::is_supported(); +JVM_END + // Returns an array of all live Thread objects (VM internal JavaThreads, // jvmti agent threads, and JNI attaching threads are skipped) // See CR 6404306 regarding JNI attaching threads --- old/hotspot/src/share/vm/prims/jvm.h 2009-08-01 04:14:38.846677667 +0100 +++ new/hotspot/src/share/vm/prims/jvm.h 2009-08-01 04:14:38.761424128 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jvm.h 1.88 07/08/15 20:22:47 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -393,6 +393,17 @@ jobject loader, jboolean throwError); /* + * Find a class from a boot class loader. Throw ClassNotFoundException + * or NoClassDefFoundError depending on the value of the last + * argument. This is the same as FindClassFromClassLoader but provided + * as a convenience method exported correctly on all platforms for + * JSR 277 launcher class loading. + */ +JNIEXPORT jclass JNICALL +JVM_FindClassFromBootLoader(JNIEnv *env, const char *name, + jboolean throwError); + +/* * Find a class from a given class. */ JNIEXPORT jclass JNICALL @@ -414,6 +425,14 @@ const jbyte *buf, jsize len, jobject pd, const char *source); +/* Define a class with a source (MLVM) */ +JNIEXPORT jclass JNICALL +JVM_DefineClassWithCP(JNIEnv *env, const char *name, jobject loader, + const jbyte *buf, jsize len, jobject pd, + const char *source, + // same args as JVM_DefineClassWithSource to this point + jobjectArray constants); + /* * Reflection support functions */ @@ -609,6 +628,83 @@ JNIEXPORT jboolean JNICALL JVM_CX8Field(JNIEnv *env, jobject obj, jfieldID fldID, jlong oldVal, jlong newVal); +/* + * com.sun.dtrace.jsdt support + */ + +#define JVM_TRACING_DTRACE_VERSION 1 + +/* + * Structure to pass one probe description to JVM. + * + * The VM will overwrite the definition of the referenced method with + * code that will fire the probe. + */ +typedef struct { + jmethodID method; + jstring function; + jstring name; + void* reserved[4]; // for future use +} JVM_DTraceProbe; + +/** + * Encapsulates the stability ratings for a DTrace provider field + */ +typedef struct { + jint nameStability; + jint dataStability; + jint dependencyClass; +} JVM_DTraceInterfaceAttributes; + +/* + * Structure to pass one provider description to JVM + */ +typedef struct { + jstring name; + JVM_DTraceProbe* probes; + jint probe_count; + JVM_DTraceInterfaceAttributes providerAttributes; + JVM_DTraceInterfaceAttributes moduleAttributes; + JVM_DTraceInterfaceAttributes functionAttributes; + JVM_DTraceInterfaceAttributes nameAttributes; + JVM_DTraceInterfaceAttributes argsAttributes; + void* reserved[4]; // for future use +} JVM_DTraceProvider; + +/* + * Get the version number the JVM was built with + */ +JNIEXPORT jint JNICALL +JVM_DTraceGetVersion(JNIEnv* env); + +/* + * Register new probe with given signature, return global handle + * + * The version passed in is the version that the library code was + * built with. + */ +JNIEXPORT jlong JNICALL +JVM_DTraceActivate(JNIEnv* env, jint version, jstring module_name, + jint providers_count, JVM_DTraceProvider* providers); + +/* + * Check JSDT probe + */ +JNIEXPORT jboolean JNICALL +JVM_DTraceIsProbeEnabled(JNIEnv* env, jmethodID method); + +/* + * Destroy custom DOF + */ +JNIEXPORT void JNICALL +JVM_DTraceDispose(JNIEnv* env, jlong handle); + +/* + * Check to see if DTrace is supported by OS + */ +JNIEXPORT jboolean JNICALL +JVM_DTraceIsSupported(JNIEnv* env); + /************************************************************************* PART 2: Support for the Verifier and Class File Format Checker ************************************************************************/ --- old/hotspot/src/share/vm/prims/jvmtiEnv.cpp 2009-08-01 04:14:39.797168177 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiEnv.cpp 2009-08-01 04:14:39.701016145 +0100 @@ -102,6 +102,9 @@ } // otherwise, create the state state = JvmtiThreadState::state_for(java_thread); + if (state == NULL) { + return JVMTI_ERROR_THREAD_NOT_ALIVE; + } } state->env_thread_state(this)->set_agent_thread_local_storage_data((void*)data); return JVMTI_ERROR_NONE; @@ -1311,6 +1314,9 @@ // retrieve or create JvmtiThreadState. JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); + if (state == NULL) { + return JVMTI_ERROR_THREAD_NOT_ALIVE; + } uint32_t debug_bits = 0; if (is_thread_fully_suspended(java_thread, true, &debug_bits)) { err = get_frame_count(state, count_ptr); @@ -1332,6 +1338,12 @@ HandleMark hm(current_thread); uint32_t debug_bits = 0; + // retrieve or create the state + JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); + if (state == NULL) { + return JVMTI_ERROR_THREAD_NOT_ALIVE; + } + // Check if java_thread is fully suspended if (!is_thread_fully_suspended(java_thread, true /* wait for suspend completion */, &debug_bits)) { return JVMTI_ERROR_THREAD_NOT_SUSPENDED; @@ -1401,9 +1413,6 @@ // It's fine to update the thread state here because no JVMTI events // shall be posted for this PopFrame. - - // retreive or create the state - JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); state->update_for_pop_top_frame(); java_thread->set_popframe_condition(JavaThread::popframe_pending_bit); @@ -1448,6 +1457,11 @@ ResourceMark rm; uint32_t debug_bits = 0; + JvmtiThreadState *state = JvmtiThreadState::state_for(java_thread); + if (state == NULL) { + return JVMTI_ERROR_THREAD_NOT_ALIVE; + } + if (!JvmtiEnv::is_thread_fully_suspended(java_thread, true, &debug_bits)) { return JVMTI_ERROR_THREAD_NOT_SUSPENDED; } @@ -1467,7 +1481,6 @@ assert(vf->frame_pointer() != NULL, "frame pointer mustn't be NULL"); - JvmtiThreadState *state = JvmtiThreadState::state_for(java_thread); int frame_number = state->count_frames() - depth; state->env_thread_state(this)->set_frame_pop(frame_number); --- old/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp 2009-08-01 04:14:40.797957362 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp 2009-08-01 04:14:40.704345289 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jvmtiEnvBase.cpp 1.90 07/07/16 14:37:39 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -124,7 +124,7 @@ JvmtiEventController::env_initialize((JvmtiEnv*)this); #ifdef JVMTI_TRACE - _jvmti_external.functions = strlen(TraceJVMTI)? &jvmtiTrace_Interface : &jvmti_Interface; + _jvmti_external.functions = TraceJVMTI != NULL ? &jvmtiTrace_Interface : &jvmti_Interface; #else _jvmti_external.functions = &jvmti_Interface; #endif @@ -1325,6 +1325,12 @@ HandleMark hm(current_thread); uint32_t debug_bits = 0; + // retrieve or create the state + JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); + if (state == NULL) { + return JVMTI_ERROR_THREAD_NOT_ALIVE; + } + // Check if java_thread is fully suspended if (!is_thread_fully_suspended(java_thread, true /* wait for suspend completion */, @@ -1332,9 +1338,6 @@ return JVMTI_ERROR_THREAD_NOT_SUSPENDED; } - // retreive or create the state - JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); - // Check to see if a ForceEarlyReturn was already in progress if (state->is_earlyret_pending()) { // Probably possible for JVMTI clients to trigger this, but the --- old/hotspot/src/share/vm/prims/jvmtiEventController.cpp 2009-08-01 04:14:41.683157491 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiEventController.cpp 2009-08-01 04:14:41.597541123 +0100 @@ -481,6 +481,11 @@ // set external state accordingly. Only thread-filtered events are included. jlong JvmtiEventControllerPrivate::recompute_thread_enabled(JvmtiThreadState *state) { + if (state == NULL) { + // associated JavaThread is exiting + return (jlong)0; + } + jlong was_any_env_enabled = state->thread_event_enable()->_event_enabled.get_bits(); jlong any_env_enabled = 0; @@ -556,6 +561,7 @@ { MutexLocker mu(Threads_lock); //hold the Threads_lock for the iteration for (JavaThread *tp = Threads::first(); tp != NULL; tp = tp->next()) { + // state_for_while_locked() makes tp->is_exiting() check JvmtiThreadState::state_for_while_locked(tp); // create the thread state if missing } }// release Threads_lock --- old/hotspot/src/share/vm/prims/jvmtiExport.cpp 2009-08-01 04:14:42.590125223 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiExport.cpp 2009-08-01 04:14:42.496141779 +0100 @@ -1875,6 +1875,9 @@ { // register the stub with the current dynamic code event collector JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current()); + // state can only be NULL if the current thread is exiting which + // should not happen since we're trying to post an event + guarantee(state != NULL, "attempt to register stub via an exiting thread"); JvmtiDynamicCodeEventCollector* collector = state->get_dynamic_code_event_collector(); guarantee(collector != NULL, "attempt to register stub without event collector"); collector->register_stub(name, code_begin, code_end); @@ -2256,8 +2259,11 @@ void JvmtiEventCollector::setup_jvmti_thread_state() { // set this event collector to be the current one. JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current()); - if (is_vm_object_alloc_event()) { - _prev = state->get_vm_object_alloc_event_collector(); + // state can only be NULL if the current thread is exiting which + // should not happen since we're trying to configure for event collection + guarantee(state != NULL, "exiting thread called setup_jvmti_thread_state"); + if (is_vm_object_alloc_event()) { + _prev = state->get_vm_object_alloc_event_collector(); state->set_vm_object_alloc_event_collector((JvmtiVMObjectAllocEventCollector *)this); } else if (is_dynamic_code_event()) { _prev = state->get_dynamic_code_event_collector(); @@ -2436,18 +2442,7 @@ // so we record the number of collections so that it can be checked in // the destructor. if (!_full) { - if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(gch->n_gens() == 2, "configuration not recognized"); - _invocation_count = (unsigned int)gch->get_gen(1)->stat_record()->invocations; - } else { -#ifndef SERIALGC - assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "checking"); - _invocation_count = PSMarkSweep::total_invocations(); -#else // SERIALGC - fatal("SerialGC only supported in this configuration."); -#endif // SERIALGC - } + _invocation_count = Universe::heap()->total_full_collections(); } // Do clean up tasks that need to be done at a safepoint @@ -2469,20 +2464,7 @@ // generation but could have ended up doing a "full" GC - check the // GC count to see. if (!_full) { - if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - if (_invocation_count != (unsigned int)gch->get_gen(1)->stat_record()->invocations) { - _full = true; - } - } else { -#ifndef SERIALGC - if (_invocation_count != PSMarkSweep::total_invocations()) { - _full = true; - } -#else // SERIALGC - fatal("SerialGC only supported in this configuration."); -#endif // SERIALGC - } + _full = (_invocation_count != Universe::heap()->total_full_collections()); } // Full collection probably means the perm generation has been GC'ed --- old/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp 2009-08-01 04:14:43.633241737 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp 2009-08-01 04:14:43.525834348 +0100 @@ -834,6 +834,9 @@ ResourceMark rm(THREAD); JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); + // state can only be NULL if the current thread is exiting which + // should not happen since we're trying to do a RedefineClasses + guarantee(state != NULL, "exiting thread calling load_new_class_versions"); for (int i = 0; i < _class_count; i++) { oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); // classes for primitives cannot be redefined --- old/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp 2009-08-01 04:14:44.607545376 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp 2009-08-01 04:14:44.531202808 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)jvmtiRedefineClassesTrace.hpp 1.2 07/12/20 10:58:59 JVM" -#endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ // RedefineClasses tracing support via the TraceRedefineClasses @@ -34,7 +31,7 @@ // the old cumulative behavior, pick the level after the one in // which you are interested and subtract one, e.g., 33554431 will // print every tracing message. -// +// // 0x00000000 | 0 - default; no tracing messages // 0x00000001 | 1 - name each target class before loading, after // loading and after redefinition is completed @@ -52,10 +49,10 @@ // 0x00000400 | 1024 - previous class weak reference mgmt during // add previous ops (GC) // 0x00000800 | 2048 - previous class breakpoint mgmt -// 0x00001000 | 4096 - unused -// 0x00002000 | 8192 - unused +// 0x00001000 | 4096 - detect calls to obsolete methods +// 0x00002000 | 8192 - fail a guarantee() in addition to detection // 0x00004000 | 16384 - unused -// 0x00008000 | 32768 - old/new method matching/add/delete +// 0x00008000 | 32768 - old/new method matching/add/delete // 0x00010000 | 65536 - impl details: CP size info // 0x00020000 | 131072 - impl details: CP merge pass info // 0x00040000 | 262144 - impl details: CP index maps @@ -124,5 +121,3 @@ if (RC_TRACE_ENABLED(0x00000004)) { \ t.stop(); \ } while (0) - - --- old/hotspot/src/share/vm/prims/jvmtiTagMap.cpp 2009-08-01 04:14:45.453801491 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiTagMap.cpp 2009-08-01 04:14:45.359812496 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jvmtiTagMap.cpp 1.85 07/06/27 00:30:05 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -403,16 +403,28 @@ // get the memory region used for the young generation void JvmtiTagMap::get_young_generation() { - if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - _young_gen = gch->get_gen(0)->reserved(); - } else { + CollectedHeap* ch = Universe::heap(); + switch (ch->kind()) { + case (CollectedHeap::GenCollectedHeap): { + _young_gen = ((GenCollectedHeap*)ch)->get_gen(0)->reserved(); + break; + } #ifndef SERIALGC - ParallelScavengeHeap* psh = ParallelScavengeHeap::heap(); - _young_gen= psh->young_gen()->reserved(); -#else // SERIALGC - fatal("SerialGC only supported in this configuration."); -#endif // SERIALGC + case (CollectedHeap::ParallelScavengeHeap): { + _young_gen = ((ParallelScavengeHeap*)ch)->young_gen()->reserved(); + break; + } + case (CollectedHeap::G1CollectedHeap): { + // Until a more satisfactory solution is implemented, all + // oops in the tag map will require rehash at each gc. + // This is a correct, if extremely inefficient solution. + // See RFE 6621729 for related commentary. + _young_gen = ch->reserved_region(); + break; + } +#endif // !SERIALGC + default: + ShouldNotReachHere(); } } @@ -2665,6 +2677,7 @@ _continue = CallbackInvoker::report_simple_root(kind, o); } + virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; // A supporting closure used to process JNI locals @@ -2707,6 +2720,7 @@ // invoke the callback _continue = CallbackInvoker::report_jni_local_root(_thread_tag, _tid, _depth, _method, o); } + virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; @@ -2881,9 +2895,11 @@ } // verify that a static oop field is in range -static inline bool verify_static_oop(instanceKlass* ik, oop* obj_p) { - oop* start = ik->start_of_static_fields(); - oop* end = start + ik->static_oop_field_size(); +static inline bool verify_static_oop(instanceKlass* ik, + klassOop k, int offset) { + address obj_p = (address)k + offset; + address start = (address)ik->start_of_static_fields(); + address end = start + (ik->static_oop_field_size() * heapOopSize); assert(end >= start, "sanity check"); if (obj_p >= start && obj_p < end) { @@ -2982,26 +2998,15 @@ ClassFieldMap* field_map = ClassFieldMap::create_map_of_static_fields(k); for (i=0; ifield_count(); i++) { ClassFieldDescriptor* field = field_map->field_at(i); - char type = field->field_type(); - if (!is_primitive_field_type(type)) { - address addr = (address)k + field->field_offset(); - oop* f = (oop*)addr; - assert(verify_static_oop(ik, f), "sanity check"); - oop fld_o = *f; - if (fld_o != NULL) { - int slot = field->field_index(); - if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) { - delete field_map; - return false; - } - } - } else { - if (is_reporting_primitive_fields()) { - address addr = (address)k + field->field_offset(); - int slot = field->field_index(); - if (!CallbackInvoker::report_primitive_static_field(mirror, slot, addr, type)) { - delete field_map; - return false; + char type = field->field_type(); + if (!is_primitive_field_type(type)) { + oop fld_o = k->obj_field(field->field_offset()); + assert(verify_static_oop(ik, k, field->field_offset()), "sanity check"); + if (fld_o != NULL) { + int slot = field->field_index(); + if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) { + delete field_map; + return false; } } } @@ -3029,9 +3034,7 @@ ClassFieldDescriptor* field = field_map->field_at(i); char type = field->field_type(); if (!is_primitive_field_type(type)) { - address addr = (address)o + field->field_offset(); - oop* f = (oop*)addr; - oop fld_o = *f; + oop fld_o = o->obj_field(field->field_offset()); if (fld_o != NULL) { // reflection code may have a reference to a klassOop. // - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe --- old/hotspot/src/share/vm/prims/jvmtiThreadState.hpp 2009-08-01 04:14:46.557476561 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiThreadState.hpp 2009-08-01 04:14:46.474596617 +0100 @@ -317,17 +317,24 @@ void update_for_pop_top_frame(); // already holding JvmtiThreadState_lock - retrieve or create JvmtiThreadState + // Can return NULL if JavaThread is exiting. inline static JvmtiThreadState *state_for_while_locked(JavaThread *thread) { assert(JvmtiThreadState_lock->is_locked(), "sanity check"); JvmtiThreadState *state = thread->jvmti_thread_state(); if (state == NULL) { + if (thread->is_exiting()) { + // don't add a JvmtiThreadState to a thread that is exiting + return NULL; + } + state = new JvmtiThreadState(thread); } return state; } // retrieve or create JvmtiThreadState + // Can return NULL if JavaThread is exiting. inline static JvmtiThreadState *state_for(JavaThread *thread) { JvmtiThreadState *state = thread->jvmti_thread_state(); if (state == NULL) { --- old/hotspot/src/share/vm/prims/jvmtiTrace.cpp 2009-08-01 04:14:47.420635165 +0100 +++ new/hotspot/src/share/vm/prims/jvmtiTrace.cpp 2009-08-01 04:14:47.339943341 +0100 @@ -1,8 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)jvmtiTrace.cpp 1.1 07/07/16 15:03:49 JVM" -#endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +19,7 @@ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. - * + * */ # include "incls/_precompiled.incl" @@ -73,10 +70,10 @@ return; } SafeResourceMark rm; - + const char *very_end; const char *curr; - if (strlen(TraceJVMTI)) { + if (TraceJVMTI != NULL) { curr = TraceJVMTI; } else { curr = ""; // hack in fixed tracing here @@ -106,25 +103,25 @@ jbyte bits = 0; for (; flags < flags_end; ++flags) { switch (*flags) { - case 'i': + case 'i': bits |= SHOW_IN; break; - case 'I': + case 'I': bits |= SHOW_IN_DETAIL; break; - case 'e': + case 'e': bits |= SHOW_ERROR; break; - case 'o': + case 'o': bits |= SHOW_OUT; break; - case 'O': + case 'O': bits |= SHOW_OUT_DETAIL; break; - case 't': + case 't': bits |= SHOW_EVENT_TRIGGER; break; - case 's': + case 's': bits |= SHOW_EVENT_SENT; break; default: @@ -274,7 +271,7 @@ } return UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length()); } - + // return the name of the current thread const char *JvmtiTrace::safe_get_current_thread_name() { --- old/hotspot/src/share/vm/prims/unsafe.cpp 2009-08-01 04:14:48.272261906 +0100 +++ new/hotspot/src/share/vm/prims/unsafe.cpp 2009-08-01 04:14:48.181160462 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)unsafe.cpp 1.64 07/05/17 16:05:09 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,8 +103,8 @@ assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset"); if (byte_offset == (jint)byte_offset) { void* ptr_plus_disp = (address)p + byte_offset; - assert((void*)p->obj_field_addr((jint)byte_offset) == ptr_plus_disp, - "raw [ptr+disp] must be consistent with oop::field_base"); + assert((void*)p->obj_field_addr((jint)byte_offset) == ptr_plus_disp, + "raw [ptr+disp] must be consistent with oop::field_base"); } } #endif @@ -149,13 +149,36 @@ *(volatile type_name*)index_oop_from_field_offset_long(p, offset) = x; \ OrderAccess::fence(); +// Macros for oops that check UseCompressedOops + +#define GET_OOP_FIELD(obj, offset, v) \ + oop p = JNIHandles::resolve(obj); \ + oop v; \ + if (UseCompressedOops) { \ + narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset); \ + v = oopDesc::decode_heap_oop(n); \ + } else { \ + v = *(oop*)index_oop_from_field_offset_long(p, offset); \ + } + +#define GET_OOP_FIELD_VOLATILE(obj, offset, v) \ + oop p = JNIHandles::resolve(obj); \ + volatile oop v; \ + if (UseCompressedOops) { \ + volatile narrowOop n = *(volatile narrowOop*)index_oop_from_field_offset_long(p, offset); \ + v = oopDesc::decode_heap_oop(n); \ + } else { \ + v = *(volatile oop*)index_oop_from_field_offset_long(p, offset); \ + } + + // Get/SetObject must be special-cased, since it works with handles. // The xxx140 variants for backward compatibility do not allow a full-width offset. UNSAFE_ENTRY(jobject, Unsafe_GetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset)) UnsafeWrapper("Unsafe_GetObject"); if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException()); - GET_FIELD(obj, offset, oop, v); + GET_OOP_FIELD(obj, offset, v) return JNIHandles::make_local(env, v); UNSAFE_END @@ -165,11 +188,21 @@ oop x = JNIHandles::resolve(x_h); //SET_FIELD(obj, offset, oop, x); oop p = JNIHandles::resolve(obj); - if (x != NULL) { - // If there is a heap base pointer, we are obliged to emit a store barrier. - oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + if (UseCompressedOops) { + if (x != NULL) { + // If there is a heap base pointer, we are obliged to emit a store barrier. + oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x); + } else { + narrowOop n = oopDesc::encode_heap_oop_not_null(x); + *(narrowOop*)index_oop_from_field_offset_long(p, offset) = n; + } } else { - *(oop*)index_oop_from_field_offset_long(p, offset) = x; + if (x != NULL) { + // If there is a heap base pointer, we are obliged to emit a store barrier. + oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + } else { + *(oop*)index_oop_from_field_offset_long(p, offset) = x; + } } UNSAFE_END @@ -178,7 +211,7 @@ // That is, it should be in the range [0, MAX_OBJECT_SIZE]. UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) UnsafeWrapper("Unsafe_GetObject"); - GET_FIELD(obj, offset, oop, v); + GET_OOP_FIELD(obj, offset, v) return JNIHandles::make_local(env, v); UNSAFE_END @@ -186,12 +219,16 @@ UnsafeWrapper("Unsafe_SetObject"); oop x = JNIHandles::resolve(x_h); oop p = JNIHandles::resolve(obj); - oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + if (UseCompressedOops) { + oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x); + } else { + oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + } UNSAFE_END UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) UnsafeWrapper("Unsafe_GetObjectVolatile"); - GET_FIELD_VOLATILE(obj, offset, oop, v); + GET_OOP_FIELD_VOLATILE(obj, offset, v) return JNIHandles::make_local(env, v); UNSAFE_END @@ -199,7 +236,11 @@ UnsafeWrapper("Unsafe_SetObjectVolatile"); oop x = JNIHandles::resolve(x_h); oop p = JNIHandles::resolve(obj); - oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + if (UseCompressedOops) { + oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x); + } else { + oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + } OrderAccess::fence(); UNSAFE_END @@ -314,7 +355,11 @@ UnsafeWrapper("Unsafe_SetOrderedObject"); oop x = JNIHandles::resolve(x_h); oop p = JNIHandles::resolve(obj); - oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + if (UseCompressedOops) { + oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x); + } else { + oop_store((oop*)index_oop_from_field_offset_long(p, offset), x); + } OrderAccess::fence(); UNSAFE_END @@ -650,7 +695,7 @@ THROW(vmSymbols::java_lang_InvalidClassException()); } else if (k->klass_part()->oop_is_objArray()) { base = arrayOopDesc::base_offset_in_bytes(T_OBJECT); - scale = oopSize; + scale = heapOopSize; } else if (k->klass_part()->oop_is_typeArray()) { typeArrayKlass* tak = typeArrayKlass::cast(k); base = tak->array_header_in_bytes(); @@ -795,6 +840,163 @@ } UNSAFE_END +#define DAC_Args CLS"[B["OBJ +// define a class but do not make it known to the class loader or system dictionary +// - host_class: supplies context for linkage, access control, protection domain, and class loader +// - data: bytes of a class file, a raw memory address (length gives the number of bytes) +// - cp_patches: where non-null entries exist, they replace corresponding CP entries in data + +// When you load an anonymous class U, it works as if you changed its name just before loading, +// to a name that you will never use again. Since the name is lost, no other class can directly +// link to any member of U. Just after U is loaded, the only way to use it is reflectively, +// through java.lang.Class methods like Class.newInstance. + +// Access checks for linkage sites within U continue to follow the same rules as for named classes. +// The package of an anonymous class is given by the package qualifier on the name under which it was loaded. +// An anonymous class also has special privileges to access any member of its host class. +// This is the main reason why this loading operation is unsafe. The purpose of this is to +// allow language implementations to simulate "open classes"; a host class in effect gets +// new code when an anonymous class is loaded alongside it. A less convenient but more +// standard way to do this is with reflection, which can also be set to ignore access +// restrictions. + +// Access into an anonymous class is possible only through reflection. Therefore, there +// are no special access rules for calling into an anonymous class. The relaxed access +// rule for the host class is applied in the opposite direction: A host class reflectively +// access one of its anonymous classes. + +// If you load the same bytecodes twice, you get two different classes. You can reload +// the same bytecodes with or without varying CP patches. + +// By using the CP patching array, you can have a new anonymous class U2 refer to an older one U1. +// The bytecodes for U2 should refer to U1 by a symbolic name (doesn't matter what the name is). +// The CONSTANT_Class entry for that name can be patched to refer directly to U1. + +// This allows, for example, U2 to use U1 as a superclass or super-interface, or as +// an outer class (so that U2 is an anonymous inner class of anonymous U1). +// It is not possible for a named class, or an older anonymous class, to refer by +// name (via its CP) to a newer anonymous class. + +// CP patching may also be used to modify (i.e., hack) the names of methods, classes, +// or type descriptors used in the loaded anonymous class. + +// Finally, CP patching may be used to introduce "live" objects into the constant pool, +// instead of "dead" strings. A compiled statement like println((Object)"hello") can +// be changed to println(greeting), where greeting is an arbitrary object created before +// the anonymous class is loaded. This is useful in dynamic languages, in which +// various kinds of metaobjects must be introduced as constants into bytecode. +// Note the cast (Object), which tells the verifier to expect an arbitrary object, +// not just a literal string. For such ldc instructions, the verifier uses the +// type Object instead of String, if the loaded constant is not in fact a String. + +static oop +Unsafe_DefineAnonymousClass_impl(JNIEnv *env, + jclass host_class, jbyteArray data, jobjectArray cp_patches_jh, + HeapWord* *temp_alloc, + TRAPS) { + + if (UsePerfData) { + ClassLoader::unsafe_defineClassCallCounter()->inc(); + } + + if (data == NULL) { + THROW_0(vmSymbols::java_lang_NullPointerException()); + } + + jint length = typeArrayOop(JNIHandles::resolve_non_null(data))->length(); + jint word_length = (length + sizeof(HeapWord)-1) / sizeof(HeapWord); + HeapWord* body = NEW_C_HEAP_ARRAY(HeapWord, word_length); + if (body == NULL) { + THROW_0(vmSymbols::java_lang_OutOfMemoryError()); + } + + // caller responsible to free it: + (*temp_alloc) = body; + + { + jbyte* array_base = typeArrayOop(JNIHandles::resolve_non_null(data))->byte_at_addr(0); + Copy::conjoint_words((HeapWord*) array_base, body, word_length); + } + + u1* class_bytes = (u1*) body; + int class_bytes_length = (int) length; + if (class_bytes_length < 0) class_bytes_length = 0; + if (class_bytes == NULL + || host_class == NULL + || length != class_bytes_length) + THROW_0(vmSymbols::java_lang_IllegalArgumentException()); + + objArrayHandle cp_patches_h; + if (cp_patches_jh != NULL) { + oop p = JNIHandles::resolve_non_null(cp_patches_jh); + if (!p->is_objArray()) + THROW_0(vmSymbols::java_lang_IllegalArgumentException()); + cp_patches_h = objArrayHandle(THREAD, (objArrayOop)p); + } + + KlassHandle host_klass(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(host_class))); + const char* host_source = host_klass->external_name(); + Handle host_loader(THREAD, host_klass->class_loader()); + Handle host_domain(THREAD, host_klass->protection_domain()); + + GrowableArray* cp_patches = NULL; + if (cp_patches_h.not_null()) { + int alen = cp_patches_h->length(); + for (int i = alen-1; i >= 0; i--) { + oop p = cp_patches_h->obj_at(i); + if (p != NULL) { + Handle patch(THREAD, p); + if (cp_patches == NULL) + cp_patches = new GrowableArray(i+1, i+1, Handle()); + cp_patches->at_put(i, patch); + } + } + } + + ClassFileStream st(class_bytes, class_bytes_length, (char*) host_source); + + instanceKlassHandle anon_klass; + { + symbolHandle no_class_name; + klassOop anonk = SystemDictionary::parse_stream(no_class_name, + host_loader, host_domain, + &st, host_klass, cp_patches, + CHECK_NULL); + if (anonk == NULL) return NULL; + anon_klass = instanceKlassHandle(THREAD, anonk); + } + + // let caller initialize it as needed... + + return anon_klass->java_mirror(); +} + +UNSAFE_ENTRY(jclass, Unsafe_DefineAnonymousClass(JNIEnv *env, jobject unsafe, jclass host_class, jbyteArray data, jobjectArray cp_patches_jh)) +{ + UnsafeWrapper("Unsafe_DefineAnonymousClass"); + ResourceMark rm(THREAD); + + HeapWord* temp_alloc = NULL; + + jobject res_jh = NULL; + + { oop res_oop = Unsafe_DefineAnonymousClass_impl(env, + host_class, data, cp_patches_jh, + &temp_alloc, THREAD); + if (res_oop != NULL) + res_jh = JNIHandles::make_local(env, res_oop); + } + + // try/finally clause: + if (temp_alloc != NULL) { + FREE_C_HEAP_ARRAY(HeapWord, temp_alloc); + } + + return (jclass) res_jh; +} +UNSAFE_END + + UNSAFE_ENTRY(void, Unsafe_MonitorEnter(JNIEnv *env, jobject unsafe, jobject jobj)) UnsafeWrapper("Unsafe_MonitorEnter"); @@ -848,11 +1050,12 @@ oop x = JNIHandles::resolve(x_h); oop e = JNIHandles::resolve(e_h); oop p = JNIHandles::resolve(obj); - intptr_t* addr = (intptr_t *)index_oop_from_field_offset_long(p, offset); - intptr_t res = Atomic::cmpxchg_ptr((intptr_t)x, addr, (intptr_t)e); - jboolean success = (res == (intptr_t)e); + HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset); + update_barrier_set_pre((void*)addr, e); + oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e); + jboolean success = (res == e); if (success) - update_barrier_set((oop*)addr, x); + update_barrier_set((void*)addr, x); return success; UNSAFE_END @@ -1249,6 +1452,9 @@ {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)} }; +JNINativeMethod anonk_methods[] = { + {CC"defineAnonymousClass", CC"("DAC_Args")"CLS, FN_PTR(Unsafe_DefineAnonymousClass)}, +}; #undef CC #undef FN_PTR @@ -1311,6 +1517,15 @@ } } } + if (AnonymousClasses) { + env->RegisterNatives(unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod)); + if (env->ExceptionOccurred()) { + if (PrintMiscellaneous && (Verbose || WizardMode)) { + tty->print_cr("Warning: SDK 1.7 Unsafe.defineClass (anonymous version) not found."); + } + env->ExceptionClear(); + } + } int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod)); if (env->ExceptionOccurred()) { if (PrintMiscellaneous && (Verbose || WizardMode)) { --- old/hotspot/src/share/vm/runtime/aprofiler.hpp 2009-08-01 04:14:49.271262500 +0100 +++ new/hotspot/src/share/vm/runtime/aprofiler.hpp 2009-08-01 04:14:49.203427020 +0100 @@ -36,6 +36,7 @@ class AllocationProfiler: AllStatic { friend class GenCollectedHeap; + friend class G1CollectedHeap; friend class MarkSweep; private: static bool _active; // tells whether profiler is active --- old/hotspot/src/share/vm/runtime/arguments.cpp 2009-08-01 04:14:50.111776304 +0100 +++ new/hotspot/src/share/vm/runtime/arguments.cpp 2009-08-01 04:14:50.026735833 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)arguments.cpp 1.337 07/10/23 13:12:47 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -156,37 +156,56 @@ os::init_system_properties_values(); } -// String containing commands that will be ignored and cause a -// warning to be issued. These commands should be accepted -// for 1.6 but not 1.7. The string should be cleared at the -// beginning of 1.7. -static const char* obsolete_jvm_flags_1_5_0[] = { - "UseTrainGC", - "UseSpecialLargeObjectHandling", - "UseOversizedCarHandling", - "TraceCarAllocation", - "PrintTrainGCProcessingStats", - "LogOfCarSpaceSize", - "OversizedCarThreshold", - "MinTickInterval", - "DefaultTickInterval", - "MaxTickInterval", - "DelayTickAdjustment", - "ProcessingToTenuringRatio", - "MinTrainLength", - 0}; +/** + * Provide a slightly more user-friendly way of eliminating -XX flags. + * When a flag is eliminated, it can be added to this list in order to + * continue accepting this flag on the command-line, while issuing a warning + * and ignoring the value. Once the JDK version reaches the 'accept_until' + * limit, we flatly refuse to admit the existence of the flag. This allows + * a flag to die correctly over JDK releases using HSX. + */ +typedef struct { + const char* name; + JDK_Version obsoleted_in; // when the flag went away + JDK_Version accept_until; // which version to start denying the existence +} ObsoleteFlag; + +static ObsoleteFlag obsolete_jvm_flags[] = { + { "UseTrainGC", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "UseSpecialLargeObjectHandling", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "UseOversizedCarHandling", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "TraceCarAllocation", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "PrintTrainGCProcessingStats", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "LogOfCarSpaceSize", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "OversizedCarThreshold", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "MinTickInterval", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "DefaultTickInterval", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "MaxTickInterval", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "DelayTickAdjustment", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "ProcessingToTenuringRatio", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "MinTrainLength", JDK_Version::jdk(5), JDK_Version::jdk(7) }, + { "AppendRatio", JDK_Version::jdk_update(6,10), JDK_Version::jdk(7) }, + { NULL, JDK_Version(0), JDK_Version(0) } +}; -bool Arguments::made_obsolete_in_1_5_0(const char *s) { +// Returns true if the flag is obsolete and fits into the range specified +// for being ignored. In the case that the flag is ignored, the 'version' +// value is filled in with the version number when the flag became +// obsolete so that that value can be displayed to the user. +bool Arguments::is_newly_obsolete(const char *s, JDK_Version* version) { int i = 0; - while (obsolete_jvm_flags_1_5_0[i] != NULL) { + assert(version != NULL, "Must provide a version buffer"); + while (obsolete_jvm_flags[i].name != NULL) { + const ObsoleteFlag& flag_status = obsolete_jvm_flags[i]; // =xxx form // [-|+] form - if ((strncmp(obsolete_jvm_flags_1_5_0[i], s, - strlen(obsolete_jvm_flags_1_5_0[i])) == 0) || - ((s[0] == '+' || s[0] == '-') && - (strncmp(obsolete_jvm_flags_1_5_0[i], &s[1], - strlen(obsolete_jvm_flags_1_5_0[i])) == 0))) { - return true; + if ((strncmp(flag_status.name, s, strlen(flag_status.name)) == 0) || + ((s[0] == '+' || s[0] == '-') && + (strncmp(flag_status.name, &s[1], strlen(flag_status.name)) == 0))) { + if (JDK_Version::current().compare(flag_status.accept_until) == -1) { + *version = flag_status.obsoleted_in; + return true; + } } i++; } @@ -213,6 +232,7 @@ inline void set_base(const char* base); inline void add_prefix(const char* prefix); + inline void add_suffix_to_prefix(const char* suffix); inline void add_suffix(const char* suffix); inline void reset_path(const char* base); @@ -274,6 +294,10 @@ _items[_scp_prefix] = add_to_path(_items[_scp_prefix], prefix, true); } +inline void SysClassPath::add_suffix_to_prefix(const char* suffix) { + _items[_scp_prefix] = add_to_path(_items[_scp_prefix], suffix, false); +} + inline void SysClassPath::add_suffix(const char* suffix) { _items[_scp_suffix] = add_to_path(_items[_scp_suffix], suffix, false); } @@ -428,9 +452,9 @@ } // Parses a memory size specification string. -static bool atomll(const char *s, jlong* result) { - jlong n = 0; - int args_read = sscanf(s, os::jlong_format_specifier(), &n); +static bool atomull(const char *s, julong* result) { + julong n = 0; + int args_read = sscanf(s, os::julong_format_specifier(), &n); if (args_read != 1) { return false; } @@ -444,15 +468,20 @@ switch (*s) { case 'T': case 't': *result = n * G * K; + // Check for overflow. + if (*result/((julong)G * K) != n) return false; return true; case 'G': case 'g': *result = n * G; + if (*result/G != n) return false; return true; case 'M': case 'm': *result = n * M; + if (*result/M != n) return false; return true; case 'K': case 'k': *result = n * K; + if (*result/K != n) return false; return true; case '\0': *result = n; @@ -462,10 +491,10 @@ } } -Arguments::ArgsRange Arguments::check_memory_size(jlong size, jlong min_size) { +Arguments::ArgsRange Arguments::check_memory_size(julong size, julong min_size) { if (size < min_size) return arg_too_small; // Check that size will fit in a size_t (only relevant on 32-bit) - if ((julong) size > max_uintx) return arg_too_big; + if (size > max_uintx) return arg_too_big; return arg_in_range; } @@ -491,7 +520,6 @@ return CommandLineFlags::boolAtPut(name, &value, origin); } - static bool set_fp_numeric_flag(char* name, char* value, FlagValueOrigin origin) { double v; if (sscanf(value, "%lf", &v) != 1) { @@ -504,12 +532,11 @@ return false; } - static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) { - jlong v; + julong v; intx intx_v; bool is_neg = false; - // Check the sign first since atomll() parses only unsigned values. + // Check the sign first since atomull() parses only unsigned values. if (*value == '-') { if (!CommandLineFlags::intxAt(name, &intx_v)) { return false; @@ -517,7 +544,7 @@ value++; is_neg = true; } - if (!atomll(value, &v)) { + if (!atomull(value, &v)) { return false; } intx_v = (intx) v; @@ -534,7 +561,6 @@ return false; } - static bool set_string_flag(char* name, const char* value, FlagValueOrigin origin) { if (!CommandLineFlags::ccstrAtPut(name, &value, origin)) return false; // Contract: CommandLineFlags always returns a pointer that needs freeing. @@ -570,7 +596,6 @@ return true; } - bool Arguments::parse_argument(const char* arg, FlagValueOrigin origin) { // range of acceptable characters spelled out for portability reasons @@ -631,7 +656,6 @@ return false; } - void Arguments::add_string(char*** bldarray, int* count, const char* arg) { assert(bldarray != NULL, "illegal argument"); @@ -708,14 +732,20 @@ } } -bool Arguments::process_argument(const char* arg, jboolean ignore_unrecognized, FlagValueOrigin origin) { +bool Arguments::process_argument(const char* arg, + jboolean ignore_unrecognized, FlagValueOrigin origin) { + + JDK_Version since = JDK_Version(); if (parse_argument(arg, origin)) { // do nothing - } else if (made_obsolete_in_1_5_0(arg)) { + } else if (is_newly_obsolete(arg, &since)) { + enum { bufsize = 256 }; + char buffer[bufsize]; + since.to_string(buffer, bufsize); jio_fprintf(defaultStream::error_stream(), - "Warning: The flag %s has been EOL'd as of 1.5.0 and will" - " be ignored\n", arg); + "Warning: The flag %s has been EOL'd as of %s and will" + " be ignored\n", arg, buffer); } else { if (!ignore_unrecognized) { jio_fprintf(defaultStream::error_stream(), @@ -729,7 +759,6 @@ return true; } - bool Arguments::process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized) { FILE* stream = fopen(file_name, "rb"); if (stream == NULL) { @@ -826,16 +855,13 @@ FreeHeap(value); } return true; - } - else if (strcmp(key, "sun.java.command") == 0) { - + } else if (strcmp(key, "sun.java.command") == 0) { _java_command = value; // don't add this property to the properties exposed to the java application FreeHeap(key); return true; - } - else if (strcmp(key, "sun.java.launcher.pid") == 0) { + } else if (strcmp(key, "sun.java.launcher.pid") == 0) { // launcher.pid property is private and is processed // in process_sun_java_launcher_properties(); // the sun.java.launcher property is passed on to the java application @@ -844,13 +870,14 @@ FreeHeap(value); } return true; - } - else if (strcmp(key, "java.vendor.url.bug") == 0) { + } else if (strcmp(key, "java.vendor.url.bug") == 0) { // save it in _java_vendor_url_bug, so JVM fatal error handler can access // its value without going through the property list or making a Java call. _java_vendor_url_bug = value; + } else if (strcmp(key, "sun.boot.library.path") == 0) { + PropertyList_unique_add(&_system_properties, key, value, true); + return true; } - // Create new property and add at the end of the list PropertyList_unique_add(&_system_properties, key, value); return true; @@ -869,8 +896,8 @@ // Ensure Agent_OnLoad has the correct initial values. // This may not be the final mode; mode may change later in onload phase. PropertyList_unique_add(&_system_properties, "java.vm.info", - (char*)Abstract_VM_Version::vm_info_string()); - + (char*)Abstract_VM_Version::vm_info_string(), false); + UseInterpreter = true; UseCompiler = true; UseLoopCounter = true; @@ -905,7 +932,6 @@ } } - // Conflict: required to use shared spaces (-Xshare:on), but // incompatible command line options were chosen. @@ -919,18 +945,24 @@ } } - // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC // if it's not explictly set or unset. If the user has chosen // UseParNewGC and not explicitly set ParallelGCThreads we // set it, unless this is a single cpu machine. void Arguments::set_parnew_gc_flags() { - assert(!UseSerialGC && !UseParallelGC, "control point invariant"); + assert(!UseSerialGC && !UseParallelGC && !UseG1GC, + "control point invariant"); + assert(UseParNewGC, "Error"); + + // Turn off AdaptiveSizePolicy by default for parnew until it is + // complete. + if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) { + FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false); + } - if (FLAG_IS_DEFAULT(UseParNewGC) && ParallelGCThreads > 1) { - FLAG_SET_DEFAULT(UseParNewGC, true); - } else if (UseParNewGC && ParallelGCThreads == 0) { - FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads()); + if (ParallelGCThreads == 0) { + FLAG_SET_DEFAULT(ParallelGCThreads, + Abstract_VM_Version::parallel_worker_threads()); if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) { FLAG_SET_DEFAULT(UseParNewGC, false); } @@ -939,8 +971,8 @@ FLAG_SET_DEFAULT(ParallelGCThreads, 0); } else { no_shared_spaces(); - - // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 correspondinly, + + // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively, // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration // we set them to 1024 and 1024. // See CR 6362902. @@ -956,25 +988,16 @@ if (AlwaysTenure) { FLAG_SET_CMDLINE(intx, MaxTenuringThreshold, 0); } - } -} - -// CAUTION: this code is currently shared by UseParallelGC, UseParNewGC and -// UseconcMarkSweepGC. Further tuning of individual collectors might -// dictate refinement on a per-collector basis. -int Arguments::nof_parallel_gc_threads() { - if (FLAG_IS_DEFAULT(ParallelGCThreads)) { - // For very large machines, there are diminishing returns - // for large numbers of worker threads. Instead of - // hogging the whole system, use 5/8ths of a worker for every - // processor after the first 8. For example, on a 72 cpu - // machine use 8 + (72 - 8) * (5/8) == 48 worker threads. - // This is just a start and needs further tuning and study in - // Tiger. - int ncpus = os::active_processor_count(); - return (ncpus <= 8) ? ncpus : 3 + ((ncpus * 5) / 8); - } else { - return ParallelGCThreads; + // When using compressed oops, we use local overflow stacks, + // rather than using a global overflow list chained through + // the klass word of the object's pre-image. + if (UseCompressedOops && !ParGCUseLocalOverflow) { + if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) { + warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references"); + } + FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true); + } + assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error"); } } @@ -983,30 +1006,25 @@ // further optimization and tuning efforts, and would almost // certainly gain from analysis of platform and environment. void Arguments::set_cms_and_parnew_gc_flags() { - if (UseSerialGC || UseParallelGC) { - return; - } + assert(!UseSerialGC && !UseParallelGC, "Error"); + assert(UseConcMarkSweepGC, "CMS is expected to be on here"); // If we are using CMS, we prefer to UseParNewGC, // unless explicitly forbidden. - if (UseConcMarkSweepGC && !UseParNewGC && FLAG_IS_DEFAULT(UseParNewGC)) { - FLAG_SET_DEFAULT(UseParNewGC, true); + if (FLAG_IS_DEFAULT(UseParNewGC)) { + FLAG_SET_ERGO(bool, UseParNewGC, true); } // Turn off AdaptiveSizePolicy by default for cms until it is - // complete. Also turn it off in general if the - // parnew collector has been selected. - if ((UseConcMarkSweepGC || UseParNewGC) && - FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) { + // complete. + if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) { FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false); } // In either case, adjust ParallelGCThreads and/or UseParNewGC // as needed. - set_parnew_gc_flags(); - - if (!UseConcMarkSweepGC) { - return; + if (UseParNewGC) { + set_parnew_gc_flags(); } // Now make adjustments for CMS @@ -1016,7 +1034,7 @@ intx tenuring_default; if (CMSUseOldDefaults) { // old defaults: "old" as of 6.0 if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) { - FLAG_SET_DEFAULT(CMSYoungGenPerWorker, 4*M); + FLAG_SET_ERGO(intx, CMSYoungGenPerWorker, 4*M); } young_gen_per_worker = 4*M; new_ratio = (intx)15; @@ -1041,17 +1059,21 @@ // for "short" pauses ~ 4M*ParallelGCThreads if (FLAG_IS_DEFAULT(MaxNewSize)) { // MaxNewSize not set at command-line if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line - FLAG_SET_DEFAULT(MaxNewSize, MAX2(NewSize, preferred_max_new_size)); + FLAG_SET_ERGO(uintx, MaxNewSize, MAX2(NewSize, preferred_max_new_size)); } else { - FLAG_SET_DEFAULT(MaxNewSize, preferred_max_new_size); + FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size); } + if(PrintGCDetails && Verbose) { + // Too early to use gclog_or_tty + tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize); + } } // Unless explicitly requested otherwise, prefer a large // Old to Young gen size so as to shift the collection load // to the old generation concurrent collector if (FLAG_IS_DEFAULT(NewRatio)) { - FLAG_SET_DEFAULT(NewRatio, MAX2(NewRatio, new_ratio)); - + FLAG_SET_ERGO(intx, NewRatio, MAX2(NewRatio, new_ratio)); + size_t min_new = align_size_up(ScaleForWordSize(min_new_default), os::vm_page_size()); size_t prev_initial_size = initial_heap_size(); if (prev_initial_size != 0 && prev_initial_size < min_new+OldSize) { @@ -1068,19 +1090,34 @@ size_t max_heap = align_size_down(MaxHeapSize, CardTableRS::ct_max_alignment_constraint()); + if(PrintGCDetails && Verbose) { + // Too early to use gclog_or_tty + tty->print_cr("CMS set min_heap_size: " SIZE_FORMAT + " initial_heap_size: " SIZE_FORMAT + " max_heap: " SIZE_FORMAT, + min_heap_size(), initial_heap_size(), max_heap); + } if (max_heap > min_new) { // Unless explicitly requested otherwise, make young gen // at least min_new, and at most preferred_max_new_size. if (FLAG_IS_DEFAULT(NewSize)) { - FLAG_SET_DEFAULT(NewSize, MAX2(NewSize, min_new)); - FLAG_SET_DEFAULT(NewSize, MIN2(preferred_max_new_size, NewSize)); + FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new)); + FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize)); + if(PrintGCDetails && Verbose) { + // Too early to use gclog_or_tty + tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize); + } } // Unless explicitly requested otherwise, size old gen // so that it's at least 3X of NewSize to begin with; // later NewRatio will decide how it grows; see above. if (FLAG_IS_DEFAULT(OldSize)) { if (max_heap > NewSize) { - FLAG_SET_DEFAULT(OldSize, MIN2(3*NewSize, max_heap - NewSize)); + FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize)); + if(PrintGCDetails && Verbose) { + // Too early to use gclog_or_tty + tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize); + } } } } @@ -1089,14 +1126,14 @@ // promote all objects surviving "tenuring_default" scavenges. if (FLAG_IS_DEFAULT(MaxTenuringThreshold) && FLAG_IS_DEFAULT(SurvivorRatio)) { - FLAG_SET_DEFAULT(MaxTenuringThreshold, tenuring_default); - } + FLAG_SET_ERGO(intx, MaxTenuringThreshold, tenuring_default); + } // If we decided above (or user explicitly requested) // `promote all' (via MaxTenuringThreshold := 0), // prefer minuscule survivor spaces so as not to waste // space for (non-existent) survivors if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) { - FLAG_SET_DEFAULT(SurvivorRatio, MAX2((intx)1024, SurvivorRatio)); + FLAG_SET_ERGO(intx, SurvivorRatio, MAX2((intx)1024, SurvivorRatio)); } // If OldPLABSize is set and CMSParPromoteBlocksToClaim is not, // set CMSParPromoteBlocksToClaim equal to OldPLABSize. @@ -1105,7 +1142,11 @@ // See CR 6362902. if (!FLAG_IS_DEFAULT(OldPLABSize)) { if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) { - FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, OldPLABSize); + // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim + // is. In this situtation let CMSParPromoteBlocksToClaim follow + // the value (either from the command line or ergonomics) of + // OldPLABSize. Following OldPLABSize is an ergonomics decision. + FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize); } else { // OldPLABSize and CMSParPromoteBlocksToClaim are both set. @@ -1118,6 +1159,11 @@ } } +inline uintx max_heap_for_compressed_oops() { + LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size()); + NOT_LP64(return DefaultMaxRAM); +} + bool Arguments::should_auto_select_low_pause_collector() { if (UseAutoGCSelectPolicy && !FLAG_IS_DEFAULT(MaxGCPauseMillis) && @@ -1145,26 +1191,53 @@ // machine class and automatic selection policy. if (!UseSerialGC && !UseConcMarkSweepGC && + !UseG1GC && !UseParNewGC && !DumpSharedSpaces && FLAG_IS_DEFAULT(UseParallelGC)) { if (should_auto_select_low_pause_collector()) { FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true); - set_cms_and_parnew_gc_flags(); } else { FLAG_SET_ERGO(bool, UseParallelGC, true); } no_shared_spaces(); } + } - // This is here because the parallel collector could - // have been selected so this initialization should - // still be done. - set_parallel_gc_flags(); +#ifdef _LP64 + // Compressed Headers do not work with CMS, which uses a bit in the klass + // field offset to determine free list chunk markers. + // Check that UseCompressedOops can be set with the max heap size allocated + // by ergonomics. + if (MaxHeapSize <= max_heap_for_compressed_oops()) { + if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) { + // Turn off until bug is fixed. + // the following line to return it to default status. + // FLAG_SET_ERGO(bool, UseCompressedOops, true); + } else if (UseCompressedOops && UseG1GC) { + warning(" UseCompressedOops does not currently work with UseG1GC; switching off UseCompressedOops. "); + FLAG_SET_DEFAULT(UseCompressedOops, false); + } +#ifdef _WIN64 + if (UseLargePages && UseCompressedOops) { + // Cannot allocate guard pages for implicit checks in indexed addressing + // mode, when large pages are specified on windows. + FLAG_SET_DEFAULT(UseImplicitNullCheckForNarrowOop, false); + } +#endif // _WIN64 + } else { + if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) { + warning("Max heap size too large for Compressed Oops"); + FLAG_SET_DEFAULT(UseCompressedOops, false); + } } + // Also checks that certain machines are slower with compressed oops + // in vm_version initialization code. +#endif // _LP64 } void Arguments::set_parallel_gc_flags() { + assert(UseParallelGC || UseParallelOldGC, "Error"); // If parallel old was requested, automatically enable parallel scavenge. if (UseParallelOldGC && !UseParallelGC && FLAG_IS_DEFAULT(UseParallelGC)) { FLAG_SET_DEFAULT(UseParallelGC, true); @@ -1173,48 +1246,11 @@ // If no heap maximum was requested explicitly, use some reasonable fraction // of the physical memory, up to a maximum of 1GB. if (UseParallelGC) { - if (FLAG_IS_DEFAULT(MaxHeapSize)) { - const uint64_t reasonable_fraction = - os::physical_memory() / DefaultMaxRAMFraction; - const uint64_t maximum_size = (uint64_t) DefaultMaxRAM; - size_t reasonable_max = - (size_t) os::allocatable_physical_memory(reasonable_fraction); - if (reasonable_max > maximum_size) { - reasonable_max = maximum_size; - } - if (PrintGCDetails && Verbose) { - // Cannot use gclog_or_tty yet. - tty->print_cr(" Max heap size for server class platform " - SIZE_FORMAT, reasonable_max); - } - // If the initial_heap_size has not been set with -Xms, - // then set it as fraction of size of physical memory - // respecting the maximum and minimum sizes of the heap. - if (initial_heap_size() == 0) { - const uint64_t reasonable_initial_fraction = - os::physical_memory() / DefaultInitialRAMFraction; - const size_t reasonable_initial = - (size_t) os::allocatable_physical_memory(reasonable_initial_fraction); - const size_t minimum_size = NewSize + OldSize; - set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max), - minimum_size)); - // Currently the minimum size and the initial heap sizes are the same. - set_min_heap_size(initial_heap_size()); - if (PrintGCDetails && Verbose) { - // Cannot use gclog_or_tty yet. - tty->print_cr(" Initial heap size for server class platform " - SIZE_FORMAT, initial_heap_size()); - } - } else { - // An minimum size was specified on the command line. Be sure - // that the maximum size is consistent. - if (initial_heap_size() > reasonable_max) { - reasonable_max = initial_heap_size(); - } - } - FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max); - } + FLAG_SET_ERGO(uintx, ParallelGCThreads, + Abstract_VM_Version::parallel_worker_threads()); + // PS is a server collector, setup the heap sizes accordingly. + set_server_heap_size(); // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the // SurvivorRatio has been set, reset their default values to SurvivorRatio + // 2. By doing this we make SurvivorRatio also work for Parallel Scavenger. @@ -1230,14 +1266,83 @@ if (UseParallelOldGC) { // Par compact uses lower default values since they are treated as - // minimums. + // minimums. These are different defaults because of the different + // interpretation and are not ergonomically set. if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) { - MarkSweepDeadRatio = 1; + FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1); } if (FLAG_IS_DEFAULT(PermMarkSweepDeadRatio)) { - PermMarkSweepDeadRatio = 5; + FLAG_SET_DEFAULT(PermMarkSweepDeadRatio, 5); + } + } + } +} + +void Arguments::set_g1_gc_flags() { + assert(UseG1GC, "Error"); + // G1 is a server collector, setup the heap sizes accordingly. + set_server_heap_size(); +#ifdef COMPILER1 + FastTLABRefill = false; +#endif + FLAG_SET_DEFAULT(ParallelGCThreads, + Abstract_VM_Version::parallel_worker_threads()); + if (ParallelGCThreads == 0) { + FLAG_SET_DEFAULT(ParallelGCThreads, + Abstract_VM_Version::parallel_worker_threads()); + } + no_shared_spaces(); + + // Set the maximum pause time goal to be a reasonable default. + if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { + FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); + } +} + +void Arguments::set_server_heap_size() { + if (FLAG_IS_DEFAULT(MaxHeapSize)) { + const uint64_t reasonable_fraction = + os::physical_memory() / DefaultMaxRAMFraction; + const uint64_t maximum_size = (uint64_t) + (FLAG_IS_DEFAULT(DefaultMaxRAM) && UseCompressedOops ? + MIN2(max_heap_for_compressed_oops(), DefaultMaxRAM) : + DefaultMaxRAM); + size_t reasonable_max = + (size_t) os::allocatable_physical_memory(reasonable_fraction); + if (reasonable_max > maximum_size) { + reasonable_max = maximum_size; + } + if (PrintGCDetails && Verbose) { + // Cannot use gclog_or_tty yet. + tty->print_cr(" Max heap size for server class platform " + SIZE_FORMAT, reasonable_max); + } + // If the initial_heap_size has not been set with -Xms, + // then set it as fraction of size of physical memory + // respecting the maximum and minimum sizes of the heap. + if (initial_heap_size() == 0) { + const uint64_t reasonable_initial_fraction = + os::physical_memory() / DefaultInitialRAMFraction; + const size_t reasonable_initial = + (size_t) os::allocatable_physical_memory(reasonable_initial_fraction); + const size_t minimum_size = NewSize + OldSize; + set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max), + minimum_size)); + // Currently the minimum size and the initial heap sizes are the same. + set_min_heap_size(initial_heap_size()); + if (PrintGCDetails && Verbose) { + // Cannot use gclog_or_tty yet. + tty->print_cr(" Initial heap size for server class platform " + SIZE_FORMAT, initial_heap_size()); + } + } else { + // A minimum size was specified on the command line. Be sure + // that the maximum size is consistent. + if (initial_heap_size() > reasonable_max) { + reasonable_max = initial_heap_size(); } } + FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max); } } @@ -1264,22 +1369,29 @@ } if (FLAG_IS_DEFAULT(AutoBoxCacheMax)) { FLAG_SET_DEFAULT(AutoBoxCacheMax, 20000); - } + } // Feed the cache size setting into the JDK char buffer[1024]; sprintf(buffer, "java.lang.Integer.IntegerCache.high=%d", AutoBoxCacheMax); add_property(buffer); } + if (AggressiveOpts && FLAG_IS_DEFAULT(DoEscapeAnalysis)) { + FLAG_SET_DEFAULT(DoEscapeAnalysis, true); + } + if (AggressiveOpts && FLAG_IS_DEFAULT(SpecialArraysEquals)) { + FLAG_SET_DEFAULT(SpecialArraysEquals, true); + } + if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) { + FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500); + } #endif if (AggressiveOpts) { -NOT_WINDOWS( - // No measured benefit on Windows - if (FLAG_IS_DEFAULT(CacheTimeMillis)) { - FLAG_SET_DEFAULT(CacheTimeMillis, true); - } -) +// Sample flag setting code +// if (FLAG_IS_DEFAULT(EliminateZeroing)) { +// FLAG_SET_DEFAULT(EliminateZeroing, true); +// } } } @@ -1323,12 +1435,38 @@ FLAG_SET_DEFAULT(UseConcMarkSweepGC, false); FLAG_SET_DEFAULT(UseParallelGC, false); FLAG_SET_DEFAULT(UseParallelOldGC, false); + FLAG_SET_DEFAULT(UseG1GC, false); } static bool verify_serial_gc_flags() { - return (UseSerialGC && - !(UseParNewGC || UseConcMarkSweepGC || UseParallelGC || - UseParallelOldGC)); + return (UseSerialGC && + !(UseParNewGC || UseConcMarkSweepGC || UseG1GC || + UseParallelGC || UseParallelOldGC)); +} + +// Check consistency of GC selection +bool Arguments::check_gc_consistency() { + bool status = true; + // Ensure that the user has not selected conflicting sets + // of collectors. [Note: this check is merely a user convenience; + // collectors over-ride each other so that only a non-conflicting + // set is selected; however what the user gets is not what they + // may have expected from the combination they asked for. It's + // better to reduce user confusion by not allowing them to + // select conflicting combinations. + uint i = 0; + if (UseSerialGC) i++; + if (UseConcMarkSweepGC || UseParNewGC) i++; + if (UseParallelGC || UseParallelOldGC) i++; + if (i > 1) { + jio_fprintf(defaultStream::error_stream(), + "Conflicting collector combinations in option list; " + "please refer to the release notes for the combinations " + "allowed\n"); + status = false; + } + + return status; } // Check the consistency of vm_init_args @@ -1373,14 +1511,14 @@ status = false; } - status &= verify_percentage(MaxLiveObjectEvacuationRatio, - "MaxLiveObjectEvacuationRatio"); - status &= verify_percentage(AdaptiveSizePolicyWeight, - "AdaptiveSizePolicyWeight"); - status &= verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight"); - status &= verify_percentage(ThresholdTolerance, "ThresholdTolerance"); - status &= verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio"); - status &= verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio"); + status = status && verify_percentage(MaxLiveObjectEvacuationRatio, + "MaxLiveObjectEvacuationRatio"); + status = status && verify_percentage(AdaptiveSizePolicyWeight, + "AdaptiveSizePolicyWeight"); + status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight"); + status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance"); + status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio"); + status = status && verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio"); if (MinHeapFreeRatio > MaxHeapFreeRatio) { jio_fprintf(defaultStream::error_stream(), @@ -1396,18 +1534,28 @@ MarkSweepAlwaysCompactCount = 1; // Move objects every gc. } - status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); - status &= verify_percentage(GCTimeLimit, "GCTimeLimit"); + if (UseParallelOldGC && ParallelOldGCSplitALot) { + // Settings to encourage splitting. + if (!FLAG_IS_CMDLINE(NewRatio)) { + FLAG_SET_CMDLINE(intx, NewRatio, 2); + } + if (!FLAG_IS_CMDLINE(ScavengeBeforeFullGC)) { + FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false); + } + } + + status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); + status = status && verify_percentage(GCTimeLimit, "GCTimeLimit"); if (GCTimeLimit == 100) { // Turn off gc-overhead-limit-exceeded checks FLAG_SET_DEFAULT(UseGCOverheadLimit, false); } - status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); + status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); // Check user specified sharing option conflict with Parallel GC - bool cannot_share = (UseConcMarkSweepGC || UseParallelGC || - UseParallelOldGC || UseParNewGC || + bool cannot_share = (UseConcMarkSweepGC || UseG1GC || UseParNewGC || + UseParallelGC || UseParallelOldGC || SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages)); if (cannot_share) { @@ -1421,24 +1569,7 @@ } } - // Ensure that the user has not selected conflicting sets - // of collectors. [Note: this check is merely a user convenience; - // collectors over-ride each other so that only a non-conflicting - // set is selected; however what the user gets is not what they - // may have expected from the combination they asked for. It's - // better to reduce user confusion by not allowing them to - // select conflicting combinations. - uint i = 0; - if (UseSerialGC) i++; - if (UseConcMarkSweepGC || UseParNewGC) i++; - if (UseParallelGC || UseParallelOldGC) i++; - if (i > 1) { - jio_fprintf(defaultStream::error_stream(), - "Conflicting collector combinations in option list; " - "please refer to the release notes for the combinations " - "allowed\n"); - status = false; - } + status = status && check_gc_consistency(); if (_has_alloc_profile) { if (UseParallelGC || UseParallelOldGC) { @@ -1464,22 +1595,17 @@ "The CMS collector (-XX:+UseConcMarkSweepGC) must be " "selected in order\nto use CMSIncrementalMode.\n"); status = false; - } else if (!UseTLAB) { - jio_fprintf(defaultStream::error_stream(), - "error: CMSIncrementalMode requires thread-local " - "allocation buffers\n(-XX:+UseTLAB).\n"); - status = false; } else { - status &= verify_percentage(CMSIncrementalDutyCycle, - "CMSIncrementalDutyCycle"); - status &= verify_percentage(CMSIncrementalDutyCycleMin, - "CMSIncrementalDutyCycleMin"); - status &= verify_percentage(CMSIncrementalSafetyFactor, - "CMSIncrementalSafetyFactor"); - status &= verify_percentage(CMSIncrementalOffset, - "CMSIncrementalOffset"); - status &= verify_percentage(CMSExpAvgFactor, - "CMSExpAvgFactor"); + status = status && verify_percentage(CMSIncrementalDutyCycle, + "CMSIncrementalDutyCycle"); + status = status && verify_percentage(CMSIncrementalDutyCycleMin, + "CMSIncrementalDutyCycleMin"); + status = status && verify_percentage(CMSIncrementalSafetyFactor, + "CMSIncrementalSafetyFactor"); + status = status && verify_percentage(CMSIncrementalOffset, + "CMSIncrementalOffset"); + status = status && verify_percentage(CMSExpAvgFactor, + "CMSExpAvgFactor"); // If it was not set on the command line, set // CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early. if (CMSInitiatingOccupancyFraction < 0) { @@ -1488,13 +1614,6 @@ } } - if (UseNUMA && !UseTLAB) { - jio_fprintf(defaultStream::error_stream(), - "error: NUMA allocator (-XX:+UseNUMA) requires thread-local " - "allocation\nbuffers (-XX:+UseTLAB).\n"); - status = false; - } - // CMS space iteration, which FLSVerifyAllHeapreferences entails, // insists that we hold the requisite locks so that the iteration is // MT-safe. For the verification at start-up and shut-down, we don't @@ -1575,9 +1694,9 @@ } Arguments::ArgsRange Arguments::parse_memory_size(const char* s, - jlong* long_arg, - jlong min_size) { - if (!atomll(s, long_arg)) return arg_unreadable; + julong* long_arg, + julong min_size) { + if (!atomull(s, long_arg)) return arg_unreadable; return check_memory_size(*long_arg, min_size); } @@ -1607,6 +1726,21 @@ return result; } + if (AggressiveOpts) { + // Insert alt-rt.jar between user-specified bootclasspath + // prefix and the default bootclasspath. os::set_boot_path() + // uses meta_index_dir as the default bootclasspath directory. + const char* altclasses_jar = "alt-rt.jar"; + size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 + + strlen(altclasses_jar); + char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len); + strcpy(altclasses_path, get_meta_index_dir()); + strcat(altclasses_path, altclasses_jar); + scp.add_suffix_to_prefix(altclasses_path); + scp_assembly_required = true; + FREE_C_HEAP_ARRAY(char, altclasses_path); + } + // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM) result = parse_java_options_environment_variable(&scp, &scp_assembly_required); if (result != JNI_OK) { @@ -1622,8 +1756,7 @@ return JNI_OK; } - -jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, +jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, FlagValueOrigin origin) { @@ -1688,7 +1821,7 @@ *scp_assembly_required_p = true; // -Xrun } else if (match_option(option, "-Xrun", &tail)) { - if(tail != NULL) { + if (tail != NULL) { const char* pos = strchr(tail, ':'); size_t len = (pos == NULL) ? strlen(tail) : pos - tail; char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1), tail, len); @@ -1755,7 +1888,7 @@ FLAG_SET_CMDLINE(bool, BackgroundCompilation, false); // -Xmn for compatibility with other JVM vendors } else if (match_option(option, "-Xmn", &tail)) { - jlong long_initial_eden_size = 0; + julong long_initial_eden_size = 0; ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -1767,7 +1900,7 @@ FLAG_SET_CMDLINE(uintx, NewSize, (size_t) long_initial_eden_size); // -Xms } else if (match_option(option, "-Xms", &tail)) { - jlong long_initial_heap_size = 0; + julong long_initial_heap_size = 0; ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -1780,7 +1913,7 @@ set_min_heap_size(initial_heap_size()); // -Xmx } else if (match_option(option, "-Xmx", &tail)) { - jlong long_max_heap_size = 0; + julong long_max_heap_size = 0; ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -1813,7 +1946,7 @@ } // -Xss } else if (match_option(option, "-Xss", &tail)) { - jlong long_ThreadStackSize = 0; + julong long_ThreadStackSize = 0; ArgsRange errcode = parse_memory_size(tail, &long_ThreadStackSize, 1000); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -1829,9 +1962,9 @@ // HotSpot does not have separate native and Java stacks, ignore silently for compatibility // -Xmaxjitcodesize } else if (match_option(option, "-Xmaxjitcodesize", &tail)) { - jlong long_ReservedCodeCacheSize = 0; + julong long_ReservedCodeCacheSize = 0; ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, - InitialCodeCacheSize); + (size_t)InitialCodeCacheSize); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), "Invalid maximum code cache size: %s\n", @@ -2083,7 +2216,8 @@ // Enable parallel GC and adaptive generation sizing FLAG_SET_CMDLINE(bool, UseParallelGC, true); - FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads()); + FLAG_SET_DEFAULT(ParallelGCThreads, + Abstract_VM_Version::parallel_worker_threads()); // Encourage steady state memory management FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100); @@ -2135,7 +2269,7 @@ } else if (match_option(option, "-XX:TLEFragmentationRatio=", &tail)) { // No longer used. } else if (match_option(option, "-XX:TLESize=", &tail)) { - jlong long_tlab_size = 0; + julong long_tlab_size = 0; ArgsRange errcode = parse_memory_size(tail, &long_tlab_size, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -2190,7 +2324,7 @@ "-XX:ParCMSPromoteBlocksToClaim in the future\n"); } else if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) { - jlong old_plab_size = 0; + julong old_plab_size = 0; ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -2198,13 +2332,13 @@ describe_range_error(errcode); return JNI_EINVAL; } - FLAG_SET_CMDLINE(uintx, OldPLABSize, (julong)old_plab_size); + FLAG_SET_CMDLINE(uintx, OldPLABSize, old_plab_size); jio_fprintf(defaultStream::error_stream(), "Please use -XX:OldPLABSize in place of " "-XX:ParallelGCOldGenAllocBufferSize in the future\n"); } else if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) { - jlong young_plab_size = 0; + julong young_plab_size = 0; ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -2212,7 +2346,7 @@ describe_range_error(errcode); return JNI_EINVAL; } - FLAG_SET_CMDLINE(uintx, YoungPLABSize, (julong)young_plab_size); + FLAG_SET_CMDLINE(uintx, YoungPLABSize, young_plab_size); jio_fprintf(defaultStream::error_stream(), "Please use -XX:YoungPLABSize in place of " "-XX:ParallelGCToSpaceAllocBufferSize in the future\n"); @@ -2229,7 +2363,12 @@ return JNI_ERR; } } - + // Change the default value for flags which have different default values + // when working with older JDKs. + if (JDK_Version::current().compare_major(6) <= 0 && + FLAG_IS_DEFAULT(UseVMInterruptibleIO)) { + FLAG_SET_DEFAULT(UseVMInterruptibleIO, true); + } return JNI_OK; } @@ -2282,10 +2421,15 @@ SOLARIS_ONLY(FLAG_SET_DEFAULT(UseMPSS, false)); SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false)); } + #else if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) { FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1); } + // Temporary disable bulk zeroing reduction with G1. See CR 6627983. + if (UseG1GC) { + FLAG_SET_DEFAULT(ReduceBulkZeroing, false); + } #endif if (!check_vm_args_consistency()) { @@ -2419,6 +2563,9 @@ if (match_option(option, "-XX:+PrintVMOptions", &tail)) { PrintVMOptions = true; } + if (match_option(option, "-XX:-PrintVMOptions", &tail)) { + PrintVMOptions = false; + } } // Parse default .hotspotrc settings file @@ -2443,6 +2590,22 @@ return result; } + // These are hacks until G1 is fully supported and tested + // but lets you force -XX:+UseG1GC in PRT and get it where it (mostly) works + if (UseG1GC) { + if (UseConcMarkSweepGC || UseParNewGC || UseParallelGC || UseParallelOldGC || UseSerialGC) { +#ifndef PRODUCT + tty->print_cr("-XX:+UseG1GC is incompatible with other collectors, using UseG1GC"); +#endif // PRODUCT + UseConcMarkSweepGC = false; + UseParNewGC = false; + UseParallelGC = false; + UseParallelOldGC = false; + UseSerialGC = false; + } + no_shared_spaces(); + } + #ifndef PRODUCT if (TraceBytecodesAt != 0) { TraceBytecodes = true; @@ -2469,16 +2632,32 @@ #ifdef KERNEL no_shared_spaces(); #endif // KERNEL - - // Set some flags for ParallelGC if needed. - set_parallel_gc_flags(); - - // Set some flags for CMS and/or ParNew collectors, as needed. - set_cms_and_parnew_gc_flags(); // Set flags based on ergonomics. set_ergonomics_flags(); + // Check the GC selections again. + if (!check_gc_consistency()) { + return JNI_EINVAL; + } + + if (UseParallelGC || UseParallelOldGC) { + // Set some flags for ParallelGC if needed. + set_parallel_gc_flags(); + } else if (UseConcMarkSweepGC) { + // Set some flags for CMS + set_cms_and_parnew_gc_flags(); + } else if (UseParNewGC) { + // Set some flags for ParNew + set_parnew_gc_flags(); + } + // Temporary; make the "if" an "else-if" before + // we integrate G1. XXX + if (UseG1GC) { + // Set some flags for garbage-first, if needed. + set_g1_gc_flags(); + } + #ifdef SERIALGC assert(verify_serial_gc_flags(), "SerialGC unset"); #endif // SERIALGC @@ -2494,10 +2673,22 @@ FLAG_SET_DEFAULT(UseBiasedLocking, false); #endif /* CC_INTERP */ +#ifdef COMPILER2 + if (!UseBiasedLocking || EmitSync != 0) { + UseOptoBiasInlining = false; + } +#endif + if (PrintCommandLineFlags) { CommandLineFlags::printSetFlags(); } +#ifdef ASSERT + if (PrintFlagsFinal) { + CommandLineFlags::printFlags(); + } +#endif + return JNI_OK; } @@ -2572,7 +2763,7 @@ } // This add maintains unique property key in the list. -void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, char* v) { +void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append) { if (plist == NULL) return; @@ -2580,7 +2771,11 @@ SystemProperty* prop; for (prop = *plist; prop != NULL; prop = prop->next()) { if (strcmp(k, prop->key()) == 0) { - prop->set_value(v); + if (append) { + prop->append_value(v); + } else { + prop->set_value(v); + } return; } } --- old/hotspot/src/share/vm/runtime/arguments.hpp 2009-08-01 04:14:51.276356793 +0100 +++ new/hotspot/src/share/vm/runtime/arguments.hpp 2009-08-01 04:14:51.190300569 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)arguments.hpp 1.104 07/09/04 17:30:49 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -294,15 +294,17 @@ static bool _CIDynamicCompilePriority; static intx _Tier2CompileThreshold; - // GC processing - static int nof_parallel_gc_threads(); // CMS/ParNew garbage collectors static void set_parnew_gc_flags(); static void set_cms_and_parnew_gc_flags(); - // UseParallelGC + // UseParallel[Old]GC static void set_parallel_gc_flags(); + // Garbage-First (UseG1GC) + static void set_g1_gc_flags(); // GC ergonomics static void set_ergonomics_flags(); + // Setup heap size for a server platform + static void set_server_heap_size(); // Based on automatic selection criteria, should the // low pause collector be used. static bool should_auto_select_low_pause_collector(); @@ -340,9 +342,9 @@ } static bool verify_percentage(uintx value, const char* name); static void describe_range_error(ArgsRange errcode); - static ArgsRange check_memory_size(jlong size, jlong min_size); - static ArgsRange parse_memory_size(const char* s, jlong* long_arg, - jlong min_size); + static ArgsRange check_memory_size(julong size, julong min_size); + static ArgsRange parse_memory_size(const char* s, julong* long_arg, + julong min_size); // methods to build strings from individual args static void build_jvm_args(const char* arg); @@ -362,9 +364,11 @@ short* methodsNum, short* methodsMax, char*** methods, bool** allClasses ); - // Returns true if the string s is in the list of - // flags made obsolete in 1.5.0. - static bool made_obsolete_in_1_5_0(const char* s); + // Returns true if the string s is in the list of flags that have recently + // been made obsolete. If we detect one of these flags on the command + // line, instead of failing we print a warning message and ignore the + // flag. This gives the user a release or so to stop using the flag. + static bool is_newly_obsolete(const char* s, JDK_Version* buffer); static short CompileOnlyClassesNum; static short CompileOnlyClassesMax; @@ -388,6 +392,8 @@ public: // Parses the arguments static jint parse(const JavaVMInitArgs* args); + // Check for consistency in the selection of the garbage collector. + static bool check_gc_consistency(); // Check consistecy or otherwise of VM argument settings static bool check_vm_args_consistency(); // Used by os_solaris @@ -472,10 +478,13 @@ // System properties static void init_system_properties(); - // Proptery List manipulation + // Property List manipulation static void PropertyList_add(SystemProperty** plist, SystemProperty *element); static void PropertyList_add(SystemProperty** plist, const char* k, char* v); - static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v); + static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v) { + PropertyList_unique_add(plist, k, v, false); + } + static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append); static const char* PropertyList_get_value(SystemProperty* plist, const char* key); static int PropertyList_count(SystemProperty* pl); static const char* PropertyList_get_key_at(SystemProperty* pl,int index); --- old/hotspot/src/share/vm/runtime/atomic.cpp 2009-08-01 04:14:52.161132987 +0100 +++ new/hotspot/src/share/vm/runtime/atomic.cpp 2009-08-01 04:14:52.087346259 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)atomic.cpp 1.14 07/05/05 17:06:42 JVM" #endif /* - * Copyright 2001-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,3 +47,15 @@ } return cur_as_bytes[offset]; } + +unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) { + assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); + return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); +} + +unsigned Atomic::cmpxchg(unsigned int exchange_value, + volatile unsigned int* dest, unsigned int compare_value) { + assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); + return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, + (jint)compare_value); +} --- old/hotspot/src/share/vm/runtime/atomic.hpp 2009-08-01 04:14:52.973638730 +0100 +++ new/hotspot/src/share/vm/runtime/atomic.hpp 2009-08-01 04:14:52.890809008 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)atomic.hpp 1.22 07/05/05 17:06:42 JVM" #endif /* - * Copyright 1999-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,10 @@ static void dec_ptr(volatile void* dest); // Performs atomic exchange of *dest with exchange_value. Returns old prior value of *dest. - static jint xchg (jint exchange_value, volatile jint* dest); + static jint xchg(jint exchange_value, volatile jint* dest); + static unsigned int xchg(unsigned int exchange_value, + volatile unsigned int* dest); + static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest); static void* xchg_ptr(void* exchange_value, volatile void* dest); @@ -68,6 +71,11 @@ static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value); static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value); static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value); + + static unsigned int cmpxchg(unsigned int exchange_value, + volatile unsigned int* dest, + unsigned int compare_value); + static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value); static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value); }; --- old/hotspot/src/share/vm/runtime/biasedLocking.cpp 2009-08-01 04:14:53.828709786 +0100 +++ new/hotspot/src/share/vm/runtime/biasedLocking.cpp 2009-08-01 04:14:53.741953647 +0100 @@ -1,9 +1,5 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)biasedLocking.cpp 1.15 07/05/23 10:53:58 JVM" -#endif - /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,9 +36,14 @@ } class VM_EnableBiasedLocking: public VM_Operation { + private: + bool _is_cheap_allocated; public: - VM_EnableBiasedLocking() {} - VMOp_Type type() const { return VMOp_EnableBiasedLocking; } + VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } + VMOp_Type type() const { return VMOp_EnableBiasedLocking; } + Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } + bool is_cheap_allocated() const { return _is_cheap_allocated; } + void doit() { // Iterate the system dictionary enabling biased locking for all // currently loaded classes @@ -65,8 +66,10 @@ EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} virtual void task() { - VM_EnableBiasedLocking op; - VMThread::execute(&op); + // Use async VM operation to avoid blocking the Watcher thread. + // VM Thread will free C heap storage. + VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); + VMThread::execute(op); // Reclaim our storage and disenroll ourself delete this; @@ -87,7 +90,7 @@ EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); task->enroll(); } else { - VM_EnableBiasedLocking op; + VM_EnableBiasedLocking op(false); VMThread::execute(&op); } } @@ -579,13 +582,19 @@ if (heuristics == HR_NOT_BIASED) { return NOT_BIASED; } else if (heuristics == HR_SINGLE_REVOKE) { - if (mark->biased_locker() == THREAD) { + Klass *k = Klass::cast(obj->klass()); + markOop prototype_header = k->prototype_header(); + if (mark->biased_locker() == THREAD && + prototype_header->bias_epoch() == mark->bias_epoch()) { // A thread is trying to revoke the bias of an object biased // toward it, again likely due to an identity hash code // computation. We can again avoid a safepoint in this case // since we are only going to walk our own stack. There are no // races with revocations occurring in other threads because we // reach no safepoints in the revocation path. + // Also check the epoch because even if threads match, another thread + // can come in with a CAS to steal the bias of an object that has a + // stale epoch. ResourceMark rm; if (TraceBiasedLocking) { tty->print_cr("Revoking bias by walking my own stack:"); --- old/hotspot/src/share/vm/runtime/deoptimization.cpp 2009-08-01 04:14:54.765526749 +0100 +++ new/hotspot/src/share/vm/runtime/deoptimization.cpp 2009-08-01 04:14:54.672203150 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)deoptimization.cpp 1.284 07/08/29 13:42:28 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,41 +144,53 @@ #ifdef COMPILER2 // Reallocate the non-escaping objects and restore their fields. Then // relock objects if synchronization on them was eliminated. - if (DoEscapeAnalysis && EliminateAllocations) { - GrowableArray* objects = chunk->at(0)->scope()->objects(); - bool reallocated = false; - if (objects != NULL) { - JRT_BLOCK - reallocated = realloc_objects(thread, &deoptee, objects, THREAD); - JRT_END - } - if (reallocated) { - reassign_fields(&deoptee, &map, objects); + if (DoEscapeAnalysis) { + if (EliminateAllocations) { + assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); + GrowableArray* objects = chunk->at(0)->scope()->objects(); + bool reallocated = false; + if (objects != NULL) { + JRT_BLOCK + reallocated = realloc_objects(thread, &deoptee, objects, THREAD); + JRT_END + } + if (reallocated) { + reassign_fields(&deoptee, &map, objects); #ifndef PRODUCT - if (TraceDeoptimization) { - ttyLocker ttyl; - tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread); - print_objects(objects); + if (TraceDeoptimization) { + ttyLocker ttyl; + tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread); + print_objects(objects); } #endif + } } - for (int i = 0; i < chunk->length(); i++) { - GrowableArray* monitors = chunk->at(i)->scope()->monitors(); - if (monitors != NULL) { - relock_objects(&deoptee, &map, monitors); + if (EliminateLocks) { #ifndef PRODUCT - if (TraceDeoptimization) { - ttyLocker ttyl; - tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread); - for (int j = 0; i < monitors->length(); i++) { - MonitorValue* mv = monitors->at(i); - if (mv->eliminated()) { - StackValue* owner = StackValue::create_stack_value(&deoptee, &map, mv->owner()); - tty->print_cr(" object <" INTPTR_FORMAT "> locked", owner->get_obj()()); + bool first = true; +#endif + for (int i = 0; i < chunk->length(); i++) { + compiledVFrame* cvf = chunk->at(i); + assert (cvf->scope() != NULL,"expect only compiled java frames"); + GrowableArray* monitors = cvf->monitors(); + if (monitors->is_nonempty()) { + relock_objects(monitors, thread); +#ifndef PRODUCT + if (TraceDeoptimization) { + ttyLocker ttyl; + for (int j = 0; j < monitors->length(); j++) { + MonitorInfo* mi = monitors->at(j); + if (mi->eliminated()) { + if (first) { + first = false; + tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread); + } + tty->print_cr(" object <" INTPTR_FORMAT "> locked", mi->owner()); + } } } - } #endif + } } } } @@ -659,6 +671,7 @@ void do_field(fieldDescriptor* fd) { + intptr_t val; StackValue* value = StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i())); int offset = fd->offset(); @@ -671,25 +684,37 @@ case T_LONG: case T_DOUBLE: { assert(value->type() == T_INT, "Agreement."); StackValue* low = - StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i)); + StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i)); +#ifdef _LP64 + jlong res = (jlong)low->get_int(); +#else +#ifdef SPARC + // For SPARC we have to swap high and low words. + jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); +#else jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); +#endif //SPARC +#endif _obj->long_field_put(offset, res); break; } - + // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. case T_INT: case T_FLOAT: // 4 bytes. assert(value->type() == T_INT, "Agreement."); - _obj->int_field_put(offset, (jint)value->get_int()); + val = value->get_int(); + _obj->int_field_put(offset, (jint)*((jint*)&val)); break; case T_SHORT: case T_CHAR: // 2 bytes assert(value->type() == T_INT, "Agreement."); - _obj->short_field_put(offset, (jshort)value->get_int()); + val = value->get_int(); + _obj->short_field_put(offset, (jshort)*((jint*)&val)); break; - case T_BOOLEAN: // 1 byte + case T_BOOLEAN: case T_BYTE: // 1 byte assert(value->type() == T_INT, "Agreement."); - _obj->bool_field_put(offset, (jboolean)value->get_int()); + val = value->get_int(); + _obj->bool_field_put(offset, (jboolean)*((jint*)&val)); break; default: @@ -701,25 +726,49 @@ // restore elements of an eliminated type array void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { - StackValue* low; - jlong lval; int index = 0; + intptr_t val; for (int i = 0; i < sv->field_size(); i++) { StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); switch(type) { - case T_BOOLEAN: obj->bool_at_put (index, (jboolean) value->get_int()); break; - case T_BYTE: obj->byte_at_put (index, (jbyte) value->get_int()); break; - case T_CHAR: obj->char_at_put (index, (jchar) value->get_int()); break; - case T_SHORT: obj->short_at_put(index, (jshort) value->get_int()); break; - case T_INT: obj->int_at_put (index, (jint) value->get_int()); break; - case T_FLOAT: obj->float_at_put(index, (jfloat) value->get_int()); break; - case T_LONG: - case T_DOUBLE: - low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); - lval = jlong_from((jint)value->get_int(), (jint)low->get_int()); - sv->value()->long_field_put(index, lval); - break; + case T_LONG: case T_DOUBLE: { + assert(value->type() == T_INT, "Agreement."); + StackValue* low = + StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); +#ifdef _LP64 + jlong res = (jlong)low->get_int(); +#else +#ifdef SPARC + // For SPARC we have to swap high and low words. + jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); +#else + jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); +#endif //SPARC +#endif + obj->long_at_put(index, res); + break; + } + + // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. + case T_INT: case T_FLOAT: // 4 bytes. + assert(value->type() == T_INT, "Agreement."); + val = value->get_int(); + obj->int_at_put(index, (jint)*((jint*)&val)); + break; + + case T_SHORT: case T_CHAR: // 2 bytes + assert(value->type() == T_INT, "Agreement."); + val = value->get_int(); + obj->short_at_put(index, (jshort)*((jint*)&val)); + break; + + case T_BOOLEAN: case T_BYTE: // 1 byte + assert(value->type() == T_INT, "Agreement."); + val = value->get_int(); + obj->bool_at_put(index, (jboolean)*((jint*)&val)); + break; + default: ShouldNotReachHere(); } @@ -761,18 +810,27 @@ // relock objects for which synchronization was eliminated -void Deoptimization::relock_objects(frame* fr, RegisterMap* reg_map, GrowableArray* monitors) { +void Deoptimization::relock_objects(GrowableArray* monitors, JavaThread* thread) { for (int i = 0; i < monitors->length(); i++) { - MonitorValue* mv = monitors->at(i); - StackValue* owner = StackValue::create_stack_value(fr, reg_map, mv->owner()); - if (mv->eliminated()) { - Handle obj = owner->get_obj(); - assert(obj.not_null(), "reallocation was missed"); - BasicLock* lock = StackValue::resolve_monitor_lock(fr, mv->basic_lock()); - lock->set_displaced_header(obj->mark()); - obj->set_mark((markOop) lock); + MonitorInfo* mon_info = monitors->at(i); + if (mon_info->eliminated()) { + assert(mon_info->owner() != NULL, "reallocation was missed"); + Handle obj = Handle(mon_info->owner()); + markOop mark = obj->mark(); + if (UseBiasedLocking && mark->has_bias_pattern()) { + // New allocated objects may have the mark set to anonymously biased. + // Also the deoptimized method may called methods with synchronization + // where the thread-local object is bias locked to the current thread. + assert(mark->is_biased_anonymously() || + mark->biased_locker() == thread, "should be locked to current thread"); + // Reset mark word to unbiased prototype. + markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); + obj->set_mark(unbiased_prototype); + } + BasicLock* lock = mon_info->lock(); + ObjectSynchronizer::slow_enter(obj, lock, thread); } - assert(owner->get_obj()->is_locked(), "object must be locked now"); + assert(mon_info->owner()->is_locked(), "object must be locked now"); } } @@ -878,7 +936,7 @@ GrowableArray* monitors = cvf->monitors(); for (int i = 0; i < monitors->length(); i++) { MonitorInfo* mon_info = monitors->at(i); - if (mon_info->owner() != NULL) { + if (mon_info->owner() != NULL && !mon_info->eliminated()) { objects_to_revoke->append(Handle(mon_info->owner())); } } --- old/hotspot/src/share/vm/runtime/deoptimization.hpp 2009-08-01 04:14:55.723795830 +0100 +++ new/hotspot/src/share/vm/runtime/deoptimization.hpp 2009-08-01 04:14:55.647481817 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)deoptimization.hpp 1.92 07/07/27 16:21:04 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,7 +108,7 @@ static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type); static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj); static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects); - static void relock_objects(frame* fr, RegisterMap* reg_map, GrowableArray* monitors); + static void relock_objects(GrowableArray* monitors, JavaThread* thread); NOT_PRODUCT(static void print_objects(GrowableArray* objects);) #endif // COMPILER2 --- old/hotspot/src/share/vm/runtime/fprofiler.cpp 2009-08-01 04:14:56.595978227 +0100 +++ new/hotspot/src/share/vm/runtime/fprofiler.cpp 2009-08-01 04:14:56.507064021 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)fprofiler.cpp 1.137 07/08/31 18:44:03 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -927,29 +927,23 @@ FlatProfiler::record_thread_ticks(); } -void ThreadProfiler::record_interpreted_tick(frame fr, TickPosition where, int* ticks) { +void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) { FlatProfiler::all_int_ticks++; if (!FlatProfiler::full_profile()) { return; } - if (!fr.is_interpreted_frame_valid()) { + if (!fr.is_interpreted_frame_valid(thread)) { // tick came at a bad time interpreter_ticks += 1; FlatProfiler::interpreter_ticks += 1; return; } - methodOop method = NULL; - if (fr.fp() != NULL) { - method = *fr.interpreter_frame_method_addr(); - } - if (!Universe::heap()->is_valid_method(method)) { - // tick came at a bad time, stack frame not initialized correctly - interpreter_ticks += 1; - FlatProfiler::interpreter_ticks += 1; - return; - } + // The frame has been fully validated so we can trust the method and bci + + methodOop method = *fr.interpreter_frame_method_addr(); + interpreted_update(method, where); // update byte code table @@ -1000,7 +994,7 @@ // The tick happend in real code -> non VM code if (fr.is_interpreted_frame()) { interval_data_ref()->inc_interpreted(); - record_interpreted_tick(fr, tp_code, FlatProfiler::bytecode_ticks); + record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks); return; } @@ -1031,7 +1025,7 @@ // The tick happend in VM code interval_data_ref()->inc_native(); if (fr.is_interpreted_frame()) { - record_interpreted_tick(fr, tp_native, FlatProfiler::bytecode_ticks_stub); + record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub); return; } if (CodeCache::contains(fr.pc())) { --- old/hotspot/src/share/vm/runtime/fprofiler.hpp 2009-08-01 04:14:57.617550362 +0100 +++ new/hotspot/src/share/vm/runtime/fprofiler.hpp 2009-08-01 04:14:57.534564907 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)fprofiler.hpp 1.55 07/06/17 14:09:46 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,7 +138,7 @@ ProfilerNode** table; private: - void record_interpreted_tick(frame fr, TickPosition where, int* ticks); + void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks); void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where); void interpreted_update(methodOop method, TickPosition where); void compiled_update (methodOop method, TickPosition where); --- old/hotspot/src/share/vm/runtime/frame.cpp 2009-08-01 04:14:58.468729303 +0100 +++ new/hotspot/src/share/vm/runtime/frame.cpp 2009-08-01 04:14:58.383762061 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)frame.cpp 1.235 07/09/25 17:07:43 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,12 +86,12 @@ intptr_t* src = (intptr_t*) location(r); if (src != NULL) { - r->print(); - tty->print(" [" INTPTR_FORMAT "] = ", src); + r->print_on(st); + st->print(" [" INTPTR_FORMAT "] = ", src); if (((uintptr_t)src & (sizeof(*src)-1)) != 0) { - tty->print_cr(""); + st->print_cr(""); } else { - tty->print_cr(INTPTR_FORMAT, *src); + st->print_cr(INTPTR_FORMAT, *src); } } } @@ -1155,10 +1155,9 @@ if(reg->is_reg()) { // If it is passed in a register, it got spilled in the stub frame. return (oop *)reg_map->location(reg); - } else { - int sp_offset_in_stack_slots = reg->reg2stack(); - int sp_offset = sp_offset_in_stack_slots >> (LogBytesPerWord - LogBytesPerInt); - return (oop *)&unextended_sp()[sp_offset]; + } else { + int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size; + return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes); } } @@ -1334,8 +1333,7 @@ ResourceMark rm(thread); assert(_cb != NULL, "sanity check"); if (_cb->oop_maps() != NULL) { - OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, - &_check_value, &_zap_dead); + OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value); } } --- old/hotspot/src/share/vm/runtime/frame.hpp 2009-08-01 04:14:59.381396810 +0100 +++ new/hotspot/src/share/vm/runtime/frame.hpp 2009-08-01 04:14:59.308127364 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)frame.hpp 1.164 07/08/29 13:42:28 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,7 +111,7 @@ bool is_first_frame() const; // oldest frame? (has no sender) bool is_first_java_frame() const; // same for Java frame - bool is_interpreted_frame_valid() const; // performs sanity checks on interpreted frames. + bool is_interpreted_frame_valid(JavaThread* thread) const; // performs sanity checks on interpreted frames. // tells whether this frame is marked for deoptimization bool should_be_deoptimized() const; @@ -253,7 +253,7 @@ oop interpreter_callee_receiver(symbolHandle signature) { return *interpreter_callee_receiver_addr(signature); } - oop *interpreter_callee_receiver_addr(symbolHandle signature); + oop* interpreter_callee_receiver_addr(symbolHandle signature); // expression stack (may go up or down, direction == 1 or -1) @@ -405,19 +405,25 @@ # ifdef ENABLE_ZAP_DEAD_LOCALS private: class CheckValueClosure: public OopClosure { - public: void do_oop(oop* p); + public: + void do_oop(oop* p); + void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; static CheckValueClosure _check_value; class CheckOopClosure: public OopClosure { - public: void do_oop(oop* p); + public: + void do_oop(oop* p); + void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; static CheckOopClosure _check_oop; static void check_derived_oop(oop* base, oop* derived); class ZapDeadClosure: public OopClosure { - public: void do_oop(oop* p); + public: + void do_oop(oop* p); + void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; static ZapDeadClosure _zap_dead; --- old/hotspot/src/share/vm/runtime/globals.cpp 2009-08-01 04:15:00.812886797 +0100 +++ new/hotspot/src/share/vm/runtime/globals.cpp 2009-08-01 04:15:00.732565618 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globals.cpp 1.50 07/09/13 20:51:49 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,20 +31,26 @@ RUNTIME_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \ MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \ - MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG, \ - MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG) + MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, \ + MATERIALIZE_NOTPRODUCT_FLAG, \ + MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG, \ + MATERIALIZE_LP64_PRODUCT_FLAG) RUNTIME_OS_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \ MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \ MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG) bool Flag::is_unlocker() const { - return strcmp(name, "UnlockDiagnosticVMOptions") == 0; + return strcmp(name, "UnlockDiagnosticVMOptions") == 0 || + strcmp(name, "UnlockExperimentalVMOptions") == 0; + } bool Flag::is_unlocked() const { if (strcmp(kind, "{diagnostic}") == 0) { return UnlockDiagnosticVMOptions; + } else if (strcmp(kind, "{experimental}") == 0) { + return UnlockExperimentalVMOptions; } else { return true; } @@ -71,18 +77,20 @@ if (is_uintx()) st->print("%-16lu", get_uintx()); if (is_ccstr()) { const char* cp = get_ccstr(); - const char* eol; - while ((eol = strchr(cp, '\n')) != NULL) { - char format_buffer[FORMAT_BUFFER_LEN]; - size_t llen = pointer_delta(eol, cp, sizeof(char)); - jio_snprintf(format_buffer, FORMAT_BUFFER_LEN, - "%%." SIZE_FORMAT "s", llen); - st->print(format_buffer, cp); - st->cr(); - cp = eol+1; - st->print("%5s %-35s += ", "", name); + if (cp != NULL) { + const char* eol; + while ((eol = strchr(cp, '\n')) != NULL) { + char format_buffer[FORMAT_BUFFER_LEN]; + size_t llen = pointer_delta(eol, cp, sizeof(char)); + jio_snprintf(format_buffer, FORMAT_BUFFER_LEN, + "%%." SIZE_FORMAT "s", llen); + st->print(format_buffer, cp); + st->cr(); + cp = eol+1; + st->print("%5s %-35s += ", "", name); + } + st->print("%-16s", cp); } - st->print("%-16s", cp); } st->print(" %s", kind); st->cr(); @@ -97,18 +105,21 @@ st->print("-XX:%s=" UINTX_FORMAT, name, get_uintx()); } else if (is_ccstr()) { st->print("-XX:%s=", name); - // Need to turn embedded '\n's back into separate arguments - // Not so efficient to print one character at a time, - // but the choice is to do the transformation to a buffer - // and print that. And this need not be efficient. - for (const char* cp = get_ccstr(); *cp != '\0'; cp += 1) { - switch (*cp) { - default: - st->print("%c", *cp); - break; - case '\n': - st->print(" -XX:%s=", name); - break; + const char* cp = get_ccstr(); + if (cp != NULL) { + // Need to turn embedded '\n's back into separate arguments + // Not so efficient to print one character at a time, + // but the choice is to do the transformation to a buffer + // and print that. And this need not be efficient. + for (; *cp != '\0'; cp += 1) { + switch (*cp) { + default: + st->print("%c", *cp); + break; + case '\n': + st->print(" -XX:%s=", name); + break; + } } } } else { @@ -122,6 +133,7 @@ #define RUNTIME_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{product}", DEFAULT }, #define RUNTIME_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{pd product}", DEFAULT }, #define RUNTIME_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{diagnostic}", DEFAULT }, +#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{experimental}", DEFAULT }, #define RUNTIME_MANAGEABLE_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{manageable}", DEFAULT }, #define RUNTIME_PRODUCT_RW_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{product rw}", DEFAULT }, @@ -135,6 +147,12 @@ #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{notproduct}", DEFAULT }, #endif +#ifdef _LP64 + #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{lp64_product}", DEFAULT }, +#else + #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ +#endif // _LP64 + #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 product}", DEFAULT }, #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{C1 pd product}", DEFAULT }, #ifdef PRODUCT @@ -163,8 +181,11 @@ static Flag flagTable[] = { - RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT) + RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT) RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT) +#ifndef SERIALGC + G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT) +#endif // SERIALGC #ifdef COMPILER1 C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT) #endif @@ -187,8 +208,9 @@ for (Flag* current = &flagTable[0]; current->name; current++) { if (str_equal(current->name, name, length)) { if (!(current->is_unlocked() || current->is_unlocker())) { - // disable use of diagnostic flags until they are unlocked - return NULL; + // disable use of diagnostic or experimental flags until they + // are explicitly unlocked + return NULL; } return current; } @@ -208,6 +230,18 @@ return (f->origin == DEFAULT); } +bool CommandLineFlagsEx::is_ergo(CommandLineFlag flag) { + assert((size_t)flag < Flag::numFlags, "bad command line flag index"); + Flag* f = &Flag::flags[flag]; + return (f->origin == ERGONOMIC); +} + +bool CommandLineFlagsEx::is_cmdline(CommandLineFlag flag) { + assert((size_t)flag < Flag::numFlags, "bad command line flag index"); + Flag* f = &Flag::flags[flag]; + return (f->origin == COMMAND_LINE); +} + bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) { Flag* result = Flag::find_flag((char*)name, strlen(name)); if (result == NULL) return false; @@ -334,8 +368,11 @@ if (result == NULL) return false; if (!result->is_ccstr()) return false; ccstr old_value = result->get_ccstr(); - char* new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1); - strcpy(new_value, *value); + char* new_value = NULL; + if (*value != NULL) { + new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1); + strcpy(new_value, *value); + } result->set_ccstr(new_value); if (result->origin == DEFAULT && old_value != NULL) { // Prior value is NOT heap allocated, but was a literal constant. --- old/hotspot/src/share/vm/runtime/globals.hpp 2009-08-01 04:15:01.785414033 +0100 +++ new/hotspot/src/share/vm/runtime/globals.hpp 2009-08-01 04:15:01.665903342 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globals.hpp 1.975 08/08/28 22:07:15 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -240,7 +240,6 @@ #define falseInTiered true #endif - // develop flags are settable / visible only during development and are constant in the PRODUCT version // product flags are always settable / visible // notproduct flags are settable / visible only during development and are not declared in the PRODUCT version @@ -259,11 +258,23 @@ // diagnostic information about VM problems. To use a VM diagnostic // option, you must first specify +UnlockDiagnosticVMOptions. // (This master switch also affects the behavior of -Xprintflags.) - -// manageable flags are writeable external product flags. -// They are dynamically writeable through the JDK management interface -// (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. -// These flags are external exported interface (see CCC). The list of +// +// experimental flags are in support of features that are not +// part of the officially supported product, but are available +// for experimenting with. They could, for example, be performance +// features that may not have undergone full or rigorous QA, but which may +// help performance in some cases and released for experimentation +// by the community of users and developers. This flag also allows one to +// be able to build a fully supported product that nonetheless also +// ships with some unsupported, lightly tested, experimental features. +// Like the UnlockDiagnosticVMOptions flag above, there is a corresponding +// UnlockExperimentalVMOptions flag, which allows the control and +// modification of the experimental flags. +// +// manageable flags are writeable external product flags. +// They are dynamically writeable through the JDK management interface +// (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole. +// These flags are external exported interface (see CCC). The list of // manageable flags can be queried programmatically through the management // interface. // @@ -289,7 +300,17 @@ // Note that when there is a need to support develop flags to be writeable, // it can be done in the same way as product_rw. -#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, manageable, product_rw) \ +#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \ + \ + lp64_product(bool, UseCompressedOops, false, \ + "Use 32-bit object references in 64-bit VM. " \ + "lp64_product means flag is always constant in 32 bit VM") \ + \ + lp64_product(bool, CheckCompressedOops, trueInDebug, \ + "generate checks in encoding/decoding code") \ + \ + product(bool, UseImplicitNullCheckForNarrowOop, true, \ + "generate implicit null check in indexed addressing mode.") \ \ /* UseMembar is theoretically a temp flag used for memory barrier \ * removal testing. It was supposed to be removed before FCS but has \ @@ -301,7 +322,10 @@ "Prints flags that appeared on the command line") \ \ diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \ - "Enable processing of flags relating to field diagnostics") \ + "Enable normal processing of flags relating to field diagnostics")\ + \ + experimental(bool, UnlockExperimentalVMOptions, false, \ + "Enable normal processing of flags relating to experimental features")\ \ product(bool, JavaMonitorsInStackTrace, true, \ "Print info. about Java monitor locks when the stacks are dumped")\ @@ -309,12 +333,21 @@ product_pd(bool, UseLargePages, \ "Use large page memory") \ \ + product_pd(bool, UseLargePagesIndividualAllocation, \ + "Allocate large pages individually for better affinity") \ + \ + develop(bool, LargePagesIndividualAllocationInjectError, false, \ + "Fail large pages individual allocation") \ + \ develop(bool, TracePageSizes, false, \ "Trace page size selection and usage.") \ \ product(bool, UseNUMA, false, \ "Use NUMA if available") \ \ + product(bool, ForceNUMA, false, \ + "Force NUMA optimizations on single-node/UMA systems") \ + \ product(intx, NUMAChunkResizeWeight, 20, \ "Percentage (0-100) used to weight the current sample when " \ "computing exponentially decaying average for " \ @@ -347,12 +380,6 @@ product(bool, ForceTimeHighResolution, false, \ "Using high time resolution(For Win32 only)") \ \ - product(bool, CacheTimeMillis, false, \ - "Cache os::javaTimeMillis with CacheTimeMillisGranularity") \ - \ - diagnostic(uintx, CacheTimeMillisGranularity, 50, \ - "Granularity for CacheTimeMillis") \ - \ develop(bool, TraceItables, false, \ "Trace initialization and use of itables") \ \ @@ -463,13 +490,16 @@ develop(bool, SpecialStringIndexOf, true, \ "special version of string indexOf") \ \ + product(bool, SpecialArraysEquals, false, \ + "special version of Arrays.equals(char[],char[])") \ + \ develop(bool, TraceCallFixup, false, \ "traces all call fixups") \ \ develop(bool, DeoptimizeALot, false, \ "deoptimize at every exit from the runtime system") \ \ - develop(ccstrlist, DeoptimizeOnlyAt, "", \ + notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ "a comma separated list of bcis to deoptimize at") \ \ product(bool, DeoptimizeRandom, false, \ @@ -592,6 +622,15 @@ develop(bool, ZapUnusedHeapArea, trueInDebug, \ "Zap unused heap space with 0xBAADBABE") \ \ + develop(bool, TraceZapUnusedHeapArea, false, \ + "Trace zapping of unused heap space") \ + \ + develop(bool, CheckZapUnusedHeapArea, false, \ + "Check zapping of unused heap space") \ + \ + develop(bool, ZapFillerObjects, trueInDebug, \ + "Zap filler objects with 0xDEAFBABE") \ + \ develop(bool, PrintVMMessages, true, \ "Print vm messages on console") \ \ @@ -677,16 +716,19 @@ notproduct(bool, PrintCompilation2, false, \ "Print additional statistics per compilation") \ \ - notproduct(bool, PrintAdapterHandlers, false, \ + diagnostic(bool, PrintAdapterHandlers, false, \ "Print code generated for i2c/c2i adapters") \ \ - develop(bool, PrintAssembly, false, \ - "Print assembly code") \ + diagnostic(bool, PrintAssembly, false, \ + "Print assembly code (using external disassembler.so)") \ \ - develop(bool, PrintNMethods, false, \ + diagnostic(ccstr, PrintAssemblyOptions, NULL, \ + "Options string passed to disassembler.so") \ + \ + diagnostic(bool, PrintNMethods, false, \ "Print assembly code for nmethods when generated") \ \ - develop(bool, PrintNativeNMethods, false, \ + diagnostic(bool, PrintNativeNMethods, false, \ "Print assembly code for native nmethods when generated") \ \ develop(bool, PrintDebugInfo, false, \ @@ -711,7 +753,7 @@ develop(bool, PrintCodeCache2, false, \ "Print detailed info on the compiled_code cache when exiting") \ \ - develop(bool, PrintStubCode, false, \ + diagnostic(bool, PrintStubCode, false, \ "Print generated stub code") \ \ product(bool, StackTraceInThrowable, true, \ @@ -782,6 +824,9 @@ product(bool, ClassUnloading, true, \ "Do unloading of classes") \ \ + diagnostic(bool, LinkWellKnownClasses, false, \ + "Resolve a well known class as soon as its name is seen") \ + \ develop(bool, DisableStartThread, false, \ "Disable starting of additional Java threads " \ "(for debugging only)") \ @@ -818,7 +863,7 @@ "Use LWP-based instead of libthread-based synchronization " \ "(SPARC only)") \ \ - product(ccstr, SyncKnobs, "", \ + product(ccstr, SyncKnobs, NULL, \ "(Unstable) Various monitor synchronization tunables") \ \ product(intx, EmitSync, 0, \ @@ -940,6 +985,9 @@ diagnostic(bool, UseIncDec, true, \ "Use INC, DEC instructions on x86") \ \ + product(bool, UseNewLongLShift, false, \ + "Use optimized bitwise shift left") \ + \ product(bool, UseStoreImmI16, true, \ "Use store immediate 16-bits value instruction on x86") \ \ @@ -952,6 +1000,18 @@ product(bool, UseXmmRegToRegMoveAll, false, \ "Copy all XMM register bits when moving value between registers") \ \ + product(bool, UseXmmI2D, false, \ + "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \ + \ + product(bool, UseXmmI2F, false, \ + "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \ + \ + product(bool, UseXMMForArrayCopy, false, \ + "Use SSE2 MOVQ instruction for Arraycopy") \ + \ + product(bool, UseUnalignedLoadStores, false, \ + "Use SSE2 MOVDQU instruction for Arraycopy") \ + \ product(intx, FieldsAllocationStyle, 1, \ "0 - type based with oops first, 1 - with oops last") \ \ @@ -993,7 +1053,7 @@ notproduct(bool, TraceJVMCalls, false, \ "Trace JVM calls") \ \ - product(ccstr, TraceJVMTI, "", \ + product(ccstr, TraceJVMTI, NULL, \ "Trace flags for JVMTI functions and events") \ \ /* This option can change an EMCP method into an obsolete method. */ \ @@ -1100,7 +1160,10 @@ /* gc */ \ \ product(bool, UseSerialGC, false, \ - "Tells whether the VM should use serial garbage collector") \ + "Use the serial garbage collector") \ + \ + experimental(bool, UseG1GC, false, \ + "Use the Garbage-First garbage collector") \ \ product(bool, UseParallelGC, false, \ "Use the Parallel Scavenge garbage collector") \ @@ -1115,10 +1178,6 @@ "In the Parallel Old garbage collector use parallel dense" \ " prefix update") \ \ - develop(bool, UseParallelOldGCChunkPointerCalc, true, \ - "In the Parallel Old garbage collector use chucks to calculate" \ - " new object locations") \ - \ product(uintx, HeapMaximumCompactionInterval, 20, \ "How often should we maximally compact the heap (not allowing " \ "any dead space)") \ @@ -1147,21 +1206,18 @@ product(uintx, ParallelCMSThreads, 0, \ "Max number of threads CMS will use for concurrent work") \ \ - develop(bool, VerifyParallelOldWithMarkSweep, false, \ - "Use the MarkSweep code to verify phases of Parallel Old") \ + develop(bool, ParallelOldGCSplitALot, false, \ + "Provoke splitting (copying data from a young gen space to" \ + "multiple destination spaces)") \ \ - develop(uintx, VerifyParallelOldWithMarkSweepInterval, 1, \ - "Interval at which the MarkSweep code is used to verify " \ - "phases of Parallel Old") \ + develop(uintx, ParallelOldGCSplitInterval, 3, \ + "How often to provoke splitting a young gen space") \ \ - develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \ - "Use the Parallel Old MT unsafe in marking the bitmap") \ + develop(bool, TraceRegionTasksQueuing, false, \ + "Trace the queuing of the region tasks") \ \ - develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \ - "Use the Parallel Old MT unsafe in update of live size") \ - \ - develop(bool, TraceChunkTasksQueuing, false, \ - "Trace the queuing of the chunk tasks") \ + product(uintx, ParallelMarkingThreads, 0, \ + "Number of marking threads concurrent gc will use") \ \ product(uintx, YoungPLABSize, 4096, \ "Size of young gen promotion labs (in HeapWords)") \ @@ -1241,6 +1297,19 @@ product(intx, ParGCArrayScanChunk, 50, \ "Scan a subset and push remainder, if array is bigger than this") \ \ + product(bool, ParGCUseLocalOverflow, false, \ + "Instead of a global overflow list, use local overflow stacks") \ + \ + product(bool, ParGCTrimOverflow, true, \ + "Eagerly trim the local overflow lists (when ParGCUseLocalOverflow") \ + \ + notproduct(bool, ParGCWorkQueueOverflowALot, false, \ + "Whether we should simulate work queue overflow in ParNew") \ + \ + notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ + "An `interval' counter that determines how frequently" \ + " we simulate overflow; a smaller number increases frequency") \ + \ product(intx, ParGCDesiredObjsFromOverflowList, 20, \ "The desired number of objects to claim from the overflow list") \ \ @@ -1259,8 +1328,14 @@ "The amount of young gen chosen by default per GC worker " \ "thread available ") \ \ - product(bool, CMSIncrementalMode, false, \ - "Whether CMS GC should operate in \"incremental\" mode") \ + product(bool, GCOverheadReporting, false, \ + "Enables the GC overhead reporting facility") \ + \ + product(intx, GCOverheadReportingPeriodMS, 100, \ + "Reporting period for conc GC overhead reporting, in ms ") \ + \ + product(bool, CMSIncrementalMode, false, \ + "Whether CMS GC should operate in \"incremental\" mode") \ \ product(uintx, CMSIncrementalDutyCycle, 10, \ "CMS incremental mode duty cycle (a percentage, 0-100). If" \ @@ -1322,7 +1397,11 @@ product(bool, CMSClassUnloadingEnabled, false, \ "Whether class unloading enabled when using CMS GC") \ \ - product(bool, CMSCompactWhenClearAllSoftRefs, true, \ + product(uintx, CMSClassUnloadingMaxInterval, 0, \ + "When CMS class unloading is enabled, the maximum CMS cycle count"\ + " for which classes may not be unloaded") \ + \ + product(bool, CMSCompactWhenClearAllSoftRefs, true, \ "Compact when asked to collect CMS gen with clear_all_soft_refs") \ \ product(bool, UseCMSCompactAtFullCollection, true, \ @@ -1507,16 +1586,29 @@ "Percentage of MinHeapFreeRatio in CMS generation that is " \ " allocated before a CMS collection cycle commences") \ \ - product(intx, CMSBootstrapOccupancy, 50, \ + product(intx, CMSTriggerPermRatio, 80, \ + "Percentage of MinHeapFreeRatio in the CMS perm generation that" \ + " is allocated before a CMS collection cycle commences, that " \ + " also collects the perm generation") \ + \ + product(uintx, CMSBootstrapOccupancy, 50, \ "Percentage CMS generation occupancy at which to " \ " initiate CMS collection for bootstrapping collection stats") \ \ product(intx, CMSInitiatingOccupancyFraction, -1, \ "Percentage CMS generation occupancy to start a CMS collection " \ - " cycle (A negative value means that CMSTirggerRatio is used)") \ - \ - product(bool, UseCMSInitiatingOccupancyOnly, false, \ - "Only use occupancy as a crierion for starting a CMS collection") \ + " cycle (A negative value means that CMSTriggerRatio is used)") \ + \ + product(intx, CMSInitiatingPermOccupancyFraction, -1, \ + "Percentage CMS perm generation occupancy to start a CMScollection"\ + " cycle (A negative value means that CMSTriggerPermRatio is used)")\ + \ + product(bool, UseCMSInitiatingOccupancyOnly, false, \ + "Only use occupancy as a crierion for starting a CMS collection") \ + \ + product(intx, CMSIsTooFullPercentage, 98, \ + "An absolute ceiling above which CMS will always consider the" \ + " perm gen ripe for collection") \ \ develop(bool, CMSTestInFreeList, false, \ "Check if the coalesced range is already in the " \ @@ -1570,6 +1662,9 @@ product(bool, ZeroTLAB, false, \ "Zero out the newly created TLAB") \ \ + product(bool, FastTLABRefill, true, \ + "Use fast TLAB refill code") \ + \ product(bool, PrintTLAB, false, \ "Print various TLAB related information") \ \ @@ -1690,8 +1785,12 @@ product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ "Decay factor to TenuredGenerationSizeIncrement") \ \ - product(uintx, MaxGCPauseMillis, max_uintx, \ - "Adaptive size policy maximum GC pause time goal in msec") \ + product(uintx, MaxGCPauseMillis, max_uintx, \ + "Adaptive size policy maximum GC pause time goal in msec, " \ + "or (G1 Only) the max. GC time per MMU time slice") \ + \ + product(intx, GCPauseIntervalMillis, 500, \ + "Time slice for MMU specification") \ \ product(uintx, MaxGCMinorPauseMillis, max_uintx, \ "Adaptive size policy maximum GC minor pause time goal in msec") \ @@ -1759,6 +1858,9 @@ diagnostic(bool, VerifyDuringGC, false, \ "Verify memory system during GC (between phases)") \ \ + diagnostic(bool, GCParallelVerificationEnabled, true, \ + "Enable parallel memory system verification") \ + \ diagnostic(bool, VerifyRememberedSets, false, \ "Verify GC remembered sets") \ \ @@ -1797,6 +1899,9 @@ "number of times a GC thread (minus the coordinator) " \ "will sleep while yielding before giving up and resuming GC") \ \ + notproduct(bool, PrintFlagsFinal, false, \ + "Print all command line flags after argument processing") \ + \ /* gc tracing */ \ manageable(bool, PrintGC, false, \ "Print message at garbage collect") \ @@ -1902,8 +2007,12 @@ develop(bool, IgnoreLibthreadGPFault, false, \ "Suppress workaround for libthread GP fault") \ \ - /* JVMTI heap profiling */ \ - \ + product(bool, PrintJNIGCStalls, false, \ + "Print diagnostic message when GC is stalled" \ + "by JNI critical section") \ + \ + /* JVMTI heap profiling */ \ + \ diagnostic(bool, TraceJVMTIObjectTagging, false, \ "Trace JVMTI object tagging calls") \ \ @@ -2210,6 +2319,9 @@ product(bool, AggressiveOpts, false, \ "Enable aggressive optimizations - see arguments.cpp") \ \ + product(bool, UseStringCache, false, \ + "Enable String cache capabilities on String.java") \ + \ /* statistics */ \ develop(bool, UseVTune, false, \ "enable support for Intel's VTune profiler") \ @@ -2250,7 +2362,7 @@ product_pd(bool, RewriteFrequentPairs, \ "Rewrite frequently used bytecode pairs into a single bytecode") \ \ - product(bool, PrintInterpreter, false, \ + diagnostic(bool, PrintInterpreter, false, \ "Prints the generated interpreter code") \ \ product(bool, UseInterpreter, true, \ @@ -2300,7 +2412,7 @@ develop(bool, PrintBytecodePairHistogram, false, \ "Print histogram of the executed bytecode pairs") \ \ - develop(bool, PrintSignatureHandlers, false, \ + diagnostic(bool, PrintSignatureHandlers, false, \ "Print code generated for native method signature handlers") \ \ develop(bool, VerifyOops, false, \ @@ -2476,7 +2588,7 @@ develop(intx, MaxRecursiveInlineLevel, 1, \ "maximum number of nested recursive calls that are inlined") \ \ - develop(intx, InlineSmallCode, 1000, \ + product(intx, InlineSmallCode, 1000, \ "Only inline already compiled methods if their code size is " \ "less than this") \ \ @@ -2707,8 +2819,8 @@ product(intx, TargetSurvivorRatio, 50, \ "Desired percentage of survivor space used after scavenge") \ \ - product(intx, MarkSweepDeadRatio, 5, \ - "Percentage (0-100) of the old gen allowed as dead wood." \ + product(uintx, MarkSweepDeadRatio, 5, \ + "Percentage (0-100) of the old gen allowed as dead wood." \ "Serial mark sweep treats this as both the min and max value." \ "CMS uses this value only if it falls back to mark sweep." \ "Par compact uses a variable scale based on the density of the" \ @@ -2716,9 +2828,9 @@ "either completely full or completely empty. Par compact also" \ "has a smaller default value; see arguments.cpp.") \ \ - product(intx, PermMarkSweepDeadRatio, 20, \ - "Percentage (0-100) of the perm gen allowed as dead wood." \ - "See MarkSweepDeadRatio for collector-specific comments.") \ + product(uintx, PermMarkSweepDeadRatio, 20, \ + "Percentage (0-100) of the perm gen allowed as dead wood." \ + "See MarkSweepDeadRatio for collector-specific comments.") \ \ product(intx, MarkSweepAlwaysCompactCount, 4, \ "How often should we fully compact the heap (ignoring the dead " \ @@ -2760,6 +2872,12 @@ "how many entries we'll try to leave on the stack during " \ "parallel GC") \ \ + product(intx, DCQBarrierQueueBufferSize, 256, \ + "Number of elements in a dirty card queue buffer") \ + \ + product(intx, DCQBarrierProcessCompletedThreshold, 5, \ + "Number of completed dirty card buffers to trigger processing.") \ + \ /* stack parameters */ \ product_pd(intx, StackYellowPages, \ "Number of yellow zone (recoverable overflows) pages") \ @@ -3139,6 +3257,9 @@ "Skip assert() and verify() which page-in unwanted shared " \ "objects. ") \ \ + product(bool, AnonymousClasses, false, \ + "support sun.misc.Unsafe.defineAnonymousClass") \ + \ product(bool, TaggedStackInterpreter, false, \ "Insert tags in interpreter execution stack for oopmap generaion")\ \ @@ -3165,9 +3286,13 @@ product(bool, RelaxAccessControlCheck, false, \ "Relax the access control checks in the verifier") \ \ - product(bool, UseVMInterruptibleIO, true, \ + diagnostic(bool, PrintDTraceDOF, false, \ + "Print the DTrace DOF passed to the system for JSDT probes") \ + \ + product(bool, UseVMInterruptibleIO, false, \ "(Unstable, Solaris-specific) Thread interrupt before or with " \ - "EINTR for I/O operations results in OS_INTRPT") + "EINTR for I/O operations results in OS_INTRPT. The default value"\ + " of this flag is true for JDK 6 and earliers") /* @@ -3178,6 +3303,7 @@ #define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; #define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name; #define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name; +#define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name; #define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name; #define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name; #ifdef PRODUCT @@ -3189,12 +3315,19 @@ #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name; #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name; #endif +// Special LP64 flags, product only needed for now. +#ifdef _LP64 +#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name; +#else +#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value; +#endif // _LP64 // Implementation macros #define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value; #define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name; #define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value; -#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; +#define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value; +#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value; #define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value; #ifdef PRODUCT #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) /* flag name is constant */ @@ -3205,8 +3338,13 @@ #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name; #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value; #endif +#ifdef _LP64 +#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value; +#else +#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */ +#endif // _LP64 -RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) +RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG, DECLARE_LP64_PRODUCT_FLAG) RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG) --- old/hotspot/src/share/vm/runtime/globals_extension.hpp 2009-08-01 04:15:02.983797009 +0100 +++ new/hotspot/src/share/vm/runtime/globals_extension.hpp 2009-08-01 04:15:02.901136444 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globals_extension.hpp 1.17 07/05/17 16:05:46 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define RUNTIME_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), #define RUNTIME_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), +#define RUNTIME_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define RUNTIME_MANAGEABLE_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define RUNTIME_PRODUCT_RW_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #ifdef PRODUCT @@ -44,6 +45,11 @@ #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #endif +#ifdef _LP64 +#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), +#else +#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) /* flag is constant */ +#endif // _LP64 #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), @@ -72,19 +78,16 @@ #endif typedef enum { - RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, - RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, - RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER) - RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, - RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, - RUNTIME_NOTPRODUCT_FLAG_MEMBER) + RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER, RUNTIME_LP64_PRODUCT_FLAG_MEMBER) + RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER) +#ifndef KERNEL + G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER) +#endif #ifdef COMPILER1 - C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, - C1_NOTPRODUCT_FLAG_MEMBER) + C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER) #endif #ifdef COMPILER2 - C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, - C2_DIAGNOSTIC_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER) + C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER) #endif NUM_CommandLineFlag } CommandLineFlag; @@ -96,6 +99,7 @@ #define RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#define RUNTIME_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #ifdef PRODUCT @@ -119,6 +123,11 @@ #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #endif +#ifdef _LP64 +#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), +#else +#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */ +#endif // _LP64 #define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), @@ -135,28 +144,54 @@ #endif typedef enum { - RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, - RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, - RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE, + RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE, - RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE) -RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, - RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, - RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, - RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) + RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE, + RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE) + RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, + RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) +#ifndef KERNEL + G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, + RUNTIME_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE, + RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE) +#endif // KERNEL #ifdef COMPILER1 - C1_FLAGS(C1_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PRODUCT_FLAG_MEMBER_WITH_TYPE, - C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) + C1_FLAGS(C1_DEVELOP_FLAG_MEMBER_WITH_TYPE, + C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, + C1_PRODUCT_FLAG_MEMBER_WITH_TYPE, + C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) #endif #ifdef COMPILER2 - C2_FLAGS(C2_DEVELOP_FLAG_MEMBER_WITH_TYPE, C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C2_PRODUCT_FLAG_MEMBER_WITH_TYPE, - C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) + C2_FLAGS(C2_DEVELOP_FLAG_MEMBER_WITH_TYPE, + C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, + C2_PRODUCT_FLAG_MEMBER_WITH_TYPE, + C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, + C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) #endif NUM_CommandLineFlagWithType } CommandLineFlagWithType; #define FLAG_IS_DEFAULT(name) (CommandLineFlagsEx::is_default(FLAG_MEMBER(name))) +#define FLAG_IS_ERGO(name) (CommandLineFlagsEx::is_ergo(FLAG_MEMBER(name))) +#define FLAG_IS_CMDLINE(name) (CommandLineFlagsEx::is_cmdline(FLAG_MEMBER(name))) #define FLAG_SET_DEFAULT(name, value) ((name) = (value)) @@ -174,4 +209,6 @@ static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin); static bool is_default(CommandLineFlag flag); + static bool is_ergo(CommandLineFlag flag); + static bool is_cmdline(CommandLineFlag flag); }; --- old/hotspot/src/share/vm/runtime/hpi.cpp 2009-08-01 04:15:03.919900107 +0100 +++ new/hotspot/src/share/vm/runtime/hpi.cpp 2009-08-01 04:15:03.849101619 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)hpi.cpp 1.18 07/05/17 16:05:48 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,8 @@ extern "C" { static void unimplemented_panic(const char *fmt, ...) { - Unimplemented(); + // mitigate testing damage from bug 6626677 + warning("hpi::unimplemented_panic called"); } static void unimplemented_monitorRegister(sys_mon_t *mid, char *info_str) { --- old/hotspot/src/share/vm/runtime/hpi.hpp 2009-08-01 04:15:04.684338613 +0100 +++ new/hotspot/src/share/vm/runtime/hpi.hpp 2009-08-01 04:15:04.615984936 +0100 @@ -69,6 +69,8 @@ static inline int socket_shutdown(int fd, int howto); static inline int recv(int fd, char *buf, int nBytes, int flags); static inline int send(int fd, char *buf, int nBytes, int flags); + // Variant of send that doesn't support interruptible I/O + static inline int raw_send(int fd, char *buf, int nBytes, int flags); static inline int timeout(int fd, long timeout); static inline int listen(int fd, int count); static inline int connect(int fd, struct sockaddr *him, int len); @@ -91,7 +93,7 @@ static inline struct protoent* get_proto_by_name(char* name); // HPI_LibraryInterface - static inline void dll_build_name(char *buf, int buf_len, char* path, + static inline void dll_build_name(char *buf, int buf_len, const char* path, const char *name); static inline void* dll_load(const char *name, char *ebuf, int ebuflen); static inline void dll_unload(void *lib); @@ -138,7 +140,15 @@ return result; \ } - +#define VM_HPIDECL_VOID(name, names, func, arg_type, arg_print, arg) \ + inline void hpi::name arg_type { \ + if (TraceHPI) { \ + tty->print("hpi::" names "("); \ + tty->print arg_print; \ + tty->print(") = "); \ + } \ + func arg; \ + } #define HPIDECL_VOID(name, names, intf, func, arg_type, arg_print, arg) \ inline void hpi::name arg_type { \ @@ -198,11 +208,11 @@ (fd, size)); // HPI_LibraryInterface -HPIDECL_VOID(dll_build_name, "dll_build_name", _library, BuildLibName, - (char *buf, int buf_len, char *path, const char *name), - ("buf = %p, buflen = %d, path = %s, name = %s", - buf, buf_len, path, name), - (buf, buf_len, path, name)); +VM_HPIDECL_VOID(dll_build_name, "dll_build_name", os::dll_build_name, + (char *buf, int buf_len, const char *path, const char *name), + ("buf = %p, buflen = %d, path = %s, name = %s", + buf, buf_len, path, name), + (buf, buf_len, path, name)); VM_HPIDECL(dll_load, "dll_load", os::dll_load, void *, "(void *)%p", --- old/hotspot/src/share/vm/runtime/init.cpp 2009-08-01 04:15:05.517237605 +0100 +++ new/hotspot/src/share/vm/runtime/init.cpp 2009-08-01 04:15:05.439109808 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)init.cpp 1.124 07/08/31 14:03:12 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,6 @@ // Initialization done by VM thread in vm_init_globals() void check_ThreadShadow(); -void check_basic_types(); void eventlog_init(); void mutex_init(); void chunkpool_init(); @@ -43,7 +42,6 @@ void classLoader_init(); void codeCache_init(); void VM_Version_init(); -void JDK_Version_init(); void stubRoutines_init1(); jint universe_init(); // dependent on codeCache_init and stubRoutines_init void interpreter_init(); // before any methods loaded @@ -76,7 +74,7 @@ void vm_init_globals() { check_ThreadShadow(); - check_basic_types(); + basic_types_init(); eventlog_init(); mutex_init(); chunkpool_init(); @@ -92,7 +90,6 @@ classLoader_init(); codeCache_init(); VM_Version_init(); - JDK_Version_init(); stubRoutines_init1(); jint status = universe_init(); // dependent on codeCache_init and stubRoutines_init if (status != JNI_OK) --- old/hotspot/src/share/vm/runtime/java.cpp 2009-08-01 04:15:06.331585876 +0100 +++ new/hotspot/src/share/vm/runtime/java.cpp 2009-08-01 04:15:06.245284528 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)java.cpp 1.223 07/07/16 14:37:42 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -393,11 +393,6 @@ StatSampler::disengage(); StatSampler::destroy(); - // shut down the TimeMillisUpdateTask - if (CacheTimeMillis) { - TimeMillisUpdateTask::disengage(); - } - #ifndef SERIALGC // stop CMS threads if (UseConcMarkSweepGC) { @@ -510,9 +505,9 @@ os::shutdown(); } -void vm_abort() { +void vm_abort(bool dump_core) { vm_perform_shutdown_actions(); - os::abort(PRODUCT_ONLY(false)); + os::abort(dump_core); ShouldNotReachHere(); } @@ -546,18 +541,24 @@ java_lang_Throwable::print_stack_trace(exception(), tty); tty->cr(); vm_notify_during_shutdown(NULL, NULL); - vm_abort(); + + // Failure during initialization, we don't want to dump core + vm_abort(false); } void vm_exit_during_initialization(symbolHandle ex, const char* message) { ResourceMark rm; vm_notify_during_shutdown(ex->as_C_string(), message); - vm_abort(); + + // Failure during initialization, we don't want to dump core + vm_abort(false); } void vm_exit_during_initialization(const char* error, const char* message) { vm_notify_during_shutdown(error, message); - vm_abort(); + + // Failure during initialization, we don't want to dump core + vm_abort(false); } void vm_shutdown_during_initialization(const char* error, const char* message) { @@ -565,32 +566,104 @@ vm_shutdown(); } -jdk_version_info JDK_Version::_version_info = {0}; -bool JDK_Version::_pre_jdk16_version = false; -int JDK_Version::_jdk_version = 0; +JDK_Version JDK_Version::_current; void JDK_Version::initialize() { + jdk_version_info info; + assert(!_current.is_valid(), "Don't initialize twice"); + void *lib_handle = os::native_java_library(); - jdk_version_info_fn_t func = - CAST_TO_FN_PTR(jdk_version_info_fn_t, hpi::dll_lookup(lib_handle, "JDK_GetVersionInfo0")); + jdk_version_info_fn_t func = CAST_TO_FN_PTR(jdk_version_info_fn_t, + os::dll_lookup(lib_handle, "JDK_GetVersionInfo0")); if (func == NULL) { // JDK older than 1.6 - _pre_jdk16_version = true; - return; - } + _current._partially_initialized = true; + } else { + (*func)(&info, sizeof(info)); - if (func != NULL) { - (*func)(&_version_info, sizeof(_version_info)); + int major = JDK_VERSION_MAJOR(info.jdk_version); + int minor = JDK_VERSION_MINOR(info.jdk_version); + int micro = JDK_VERSION_MICRO(info.jdk_version); + int build = JDK_VERSION_BUILD(info.jdk_version); + if (major == 1 && minor > 4) { + // We represent "1.5.0" as "5.0", but 1.4.2 as itself. + major = minor; + minor = micro; + micro = 0; + } + _current = JDK_Version(major, minor, micro, info.update_version, + info.special_update_version, build, + info.thread_park_blocker == 1); } - if (jdk_major_version() == 1) { - _jdk_version = jdk_minor_version(); - } else { - // If the release version string is changed to n.x.x (e.g. 7.0.0) in a future release - _jdk_version = jdk_major_version(); +} + +void JDK_Version::fully_initialize( + uint8_t major, uint8_t minor, uint8_t micro, uint8_t update) { + // This is only called when current is less than 1.6 and we've gotten + // far enough in the initialization to determine the exact version. + assert(major < 6, "not needed for JDK version >= 6"); + assert(is_partially_initialized(), "must not initialize"); + if (major < 5) { + // JDK verison sequence: 1.2.x, 1.3.x, 1.4.x, 5.0.x, 6.0.x, etc. + micro = minor; + minor = major; + major = 1; } + _current = JDK_Version(major, minor, micro, update); } void JDK_Version_init() { JDK_Version::initialize(); } + +static int64_t encode_jdk_version(const JDK_Version& v) { + return + ((int64_t)v.major_version() << (BitsPerByte * 5)) | + ((int64_t)v.minor_version() << (BitsPerByte * 4)) | + ((int64_t)v.micro_version() << (BitsPerByte * 3)) | + ((int64_t)v.update_version() << (BitsPerByte * 2)) | + ((int64_t)v.special_update_version() << (BitsPerByte * 1)) | + ((int64_t)v.build_number() << (BitsPerByte * 0)); +} + +int JDK_Version::compare(const JDK_Version& other) const { + assert(is_valid() && other.is_valid(), "Invalid version (uninitialized?)"); + if (!is_partially_initialized() && other.is_partially_initialized()) { + return -(other.compare(*this)); // flip the comparators + } + assert(!other.is_partially_initialized(), "Not initialized yet"); + if (is_partially_initialized()) { + assert(other.major_version() >= 6, + "Invalid JDK version comparison during initialization"); + return -1; + } else { + uint64_t e = encode_jdk_version(*this); + uint64_t o = encode_jdk_version(other); + return (e > o) ? 1 : ((e == o) ? 0 : -1); + } +} + +void JDK_Version::to_string(char* buffer, size_t buflen) const { + size_t index = 0; + if (!is_valid()) { + jio_snprintf(buffer, buflen, "%s", "(uninitialized)"); + } else if (is_partially_initialized()) { + jio_snprintf(buffer, buflen, "%s", "(uninitialized) pre-1.6.0"); + } else { + index += jio_snprintf( + &buffer[index], buflen - index, "%d.%d", _major, _minor); + if (_micro > 0) { + index += jio_snprintf(&buffer[index], buflen - index, ".%d", _micro); + } + if (_update > 0) { + index += jio_snprintf(&buffer[index], buflen - index, "_%02d", _update); + } + if (_special > 0) { + index += jio_snprintf(&buffer[index], buflen - index, "%c", _special); + } + if (_build > 0) { + index += jio_snprintf(&buffer[index], buflen - index, "-b%02d", _build); + } + } +} --- old/hotspot/src/share/vm/runtime/java.hpp 2009-08-01 04:15:07.182442672 +0100 +++ new/hotspot/src/share/vm/runtime/java.hpp 2009-08-01 04:15:07.107250201 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)java.hpp 1.38 07/08/21 18:54:50 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ // Shutdown the VM but do not exit the process extern void vm_shutdown(); // Shutdown the VM and abort the process -extern void vm_abort(); +extern void vm_abort(bool dump_core=true); // Trigger any necessary notification of the VM being shutdown extern void notify_vm_shutdown(); @@ -51,76 +51,163 @@ extern void vm_exit_during_initialization(const char* error, const char* message = NULL); extern void vm_shutdown_during_initialization(const char* error, const char* message = NULL); -class JDK_Version : AllStatic { +/** + * Discovering the JDK_Version during initialization is tricky when the + * running JDK is less than JDK6. For JDK6 and greater, a "GetVersion" + * function exists in libjava.so and we simply call it during the + * 'initialize()' call to find the version. For JDKs with version < 6, no + * such call exists and we have to probe the JDK in order to determine + * the exact version. This probing cannot happen during late in + * the VM initialization process so there's a period of time during + * initialization when we don't know anything about the JDK version other than + * that it less than version 6. This is the "partially initialized" time, + * when we can answer only certain version queries (such as, is the JDK + * version greater than 5? Answer: no). Once the JDK probing occurs, we + * know the version and are considered fully initialized. + */ +class JDK_Version VALUE_OBJ_CLASS_SPEC { friend class VMStructs; + friend class Universe; + friend void JDK_Version_init(); private: - static jdk_version_info _version_info; - static bool _pre_jdk16_version; - static int _jdk_version; // JDK version number representing the release - // i.e. n in 1.n.x (= jdk_minor_version()) - public: + static JDK_Version _current; + + // In this class, we promote the minor version of release to be the + // major version for releases >= 5 in anticipation of the JDK doing the + // same thing. For example, we represent "1.5.0" as major version 5 (we + // drop the leading 1 and use 5 as the 'major'). + + uint8_t _major; + uint8_t _minor; + uint8_t _micro; + uint8_t _update; + uint8_t _special; + uint8_t _build; + + // If partially initialized, the above fields are invalid and we know + // that we're less than major version 6. + bool _partially_initialized; + + bool _thread_park_blocker; + + bool is_valid() const { + return (_major != 0 || _partially_initialized); + } + + // initializes or partially initializes the _current static field static void initialize(); - static int jdk_major_version() { return JDK_VERSION_MAJOR(_version_info.jdk_version); } - static int jdk_minor_version() { return JDK_VERSION_MINOR(_version_info.jdk_version); } - static int jdk_micro_version() { return JDK_VERSION_MICRO(_version_info.jdk_version); } - static int jdk_build_number() { return JDK_VERSION_BUILD(_version_info.jdk_version); } - - static bool is_pre_jdk16_version() { return _pre_jdk16_version; } - static bool is_jdk12x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 2; } - static bool is_jdk13x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 3; } - static bool is_jdk14x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 4; } - static bool is_jdk15x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 5; } - static bool is_jdk16x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 6; } - static bool is_jdk17x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 7; } - static bool supports_thread_park_blocker() { return _version_info.thread_park_blocker; } + // Completes initialization for a pre-JDK6 version. + static void fully_initialize(uint8_t major, uint8_t minor = 0, + uint8_t micro = 0, uint8_t update = 0); + + public: + + // Returns true if the the current version has only been partially initialized + static bool is_partially_initialized() { + return _current._partially_initialized; + } + + JDK_Version() : _major(0), _minor(0), _micro(0), _update(0), + _special(0), _build(0), _partially_initialized(false), + _thread_park_blocker(false) {} + + JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0, + uint8_t update = 0, uint8_t special = 0, uint8_t build = 0, + bool thread_park_blocker = false) : + _major(major), _minor(minor), _micro(micro), _update(update), + _special(special), _build(build), _partially_initialized(false), + _thread_park_blocker(thread_park_blocker) {} + + // Returns the current running JDK version + static JDK_Version current() { return _current; } + + // Factory methods for convenience + static JDK_Version jdk(uint8_t m) { + return JDK_Version(m); + } + + static JDK_Version jdk_update(uint8_t major, uint8_t update_number) { + return JDK_Version(major, 0, 0, update_number); + } + + uint8_t major_version() const { return _major; } + uint8_t minor_version() const { return _minor; } + uint8_t micro_version() const { return _micro; } + uint8_t update_version() const { return _update; } + uint8_t special_update_version() const { return _special; } + uint8_t build_number() const { return _build; } + + bool supports_thread_park_blocker() const { + return _thread_park_blocker; + } + + // Performs a full ordering comparison using all fields (update, build, etc.) + int compare(const JDK_Version& other) const; + + /** + * Performs comparison using only the major version, returning negative + * if the major version of 'this' is less than the parameter, 0 if it is + * equal, and a positive value if it is greater. + */ + int compare_major(int version) const { + if (_partially_initialized) { + if (version >= 6) { + return -1; + } else { + assert(false, "Can't make this comparison during init time"); + return -1; // conservative + } + } else { + return major_version() - version; + } + } + + void to_string(char* buffer, size_t buflen) const; + + // Convenience methods for queries on the current major/minor version + static bool is_jdk12x_version() { + return current().compare_major(2) == 0; + } + + static bool is_jdk13x_version() { + return current().compare_major(3) == 0; + } + + static bool is_jdk14x_version() { + return current().compare_major(4) == 0; + } + + static bool is_jdk15x_version() { + return current().compare_major(5) == 0; + } + + static bool is_jdk16x_version() { + return current().compare_major(6) == 0; + } + + static bool is_jdk17x_version() { + return current().compare_major(7) == 0; + } + + static bool is_gte_jdk13x_version() { + return current().compare_major(3) >= 0; + } static bool is_gte_jdk14x_version() { - // Keep the semantics of this that the version number is >= 1.4 - assert(is_jdk_version_initialized(), "Not initialized"); - return _jdk_version >= 4; + return current().compare_major(4) >= 0; } + static bool is_gte_jdk15x_version() { - // Keep the semantics of this that the version number is >= 1.5 - assert(is_jdk_version_initialized(), "Not initialized"); - return _jdk_version >= 5; + return current().compare_major(5) >= 0; } + static bool is_gte_jdk16x_version() { - // Keep the semantics of this that the version number is >= 1.6 - assert(is_jdk_version_initialized(), "Not initialized"); - return _jdk_version >= 6; + return current().compare_major(6) >= 0; } static bool is_gte_jdk17x_version() { - // Keep the semantics of this that the version number is >= 1.7 - assert(is_jdk_version_initialized(), "Not initialized"); - return _jdk_version >= 7; - } - - static bool is_jdk_version_initialized() { - return _jdk_version > 0; - } - - // These methods are defined to deal with pre JDK 1.6 versions - static void set_jdk12x_version() { - assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize"); - _jdk_version = 2; - _version_info.jdk_version = (1 << 24) | (2 << 16); - } - static void set_jdk13x_version() { - assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize"); - _jdk_version = 3; - _version_info.jdk_version = (1 << 24) | (3 << 16); - } - static void set_jdk14x_version() { - assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize"); - _jdk_version = 4; - _version_info.jdk_version = (1 << 24) | (4 << 16); - } - static void set_jdk15x_version() { - assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize"); - _jdk_version = 5; - _version_info.jdk_version = (1 << 24) | (5 << 16); + return current().compare_major(7) >= 0; } }; --- old/hotspot/src/share/vm/runtime/javaCalls.cpp 2009-08-01 04:15:08.142322765 +0100 +++ new/hotspot/src/share/vm/runtime/javaCalls.cpp 2009-08-01 04:15:08.060128725 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)javaCalls.cpp 1.220 07/05/05 17:06:51 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,11 +40,6 @@ guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler"); _result = result; - // Make sure that that the value of the higest_lock is at least the same as the current stackpointer, - // since, the Java code is highly likely to use locks. - // Use '(address)this' to guarantee that highest_lock address is conservative and inside our thread - thread->update_highest_lock((address)this); - // Allocate handle block for Java code. This must be done before we change thread_state to _thread_in_Java_or_stub, // since it can potentially block. JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(thread); @@ -312,8 +307,12 @@ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();) - // Make sure that the arguments have the right type - debug_only(args->verify(method, result->get_type(), thread)); + // Verify the arguments + + if (CheckJNICalls) { + args->verify(method, result->get_type(), thread); + } + else debug_only(args->verify(method, result->get_type(), thread)); // Ignore call if method is empty if (method->is_empty_method()) { @@ -434,25 +433,27 @@ return TaggedStackInterpreter ? _parameters : _value; } -//-------------------------------------------------------------------------------------- -// Non-Product code -#ifndef PRODUCT class SignatureChekker : public SignatureIterator { private: bool *_is_oop; int _pos; BasicType _return_type; + intptr_t* _value; + Thread* _thread; public: bool _is_return; - SignatureChekker(symbolHandle signature, BasicType return_type, bool is_static, bool* is_oop) : SignatureIterator(signature) { + SignatureChekker(symbolHandle signature, BasicType return_type, bool is_static, bool* is_oop, intptr_t* value, Thread* thread) : SignatureIterator(signature) { _is_oop = is_oop; _is_return = false; _return_type = return_type; - _pos = 0; - if (!is_static) { + _pos = 0; + _value = value; + _thread = thread; + + if (!is_static) { check_value(true); // Receiver must be an oop } } @@ -491,7 +492,25 @@ if (_is_return) { check_return_type(t); return; - } + } + + // verify handle and the oop pointed to by handle + int p = _pos; + bool bad = false; + // If argument is oop + if (_is_oop[p]) { + intptr_t v = _value[p]; + if (v != 0 ) { + size_t t = (size_t)v; + bad = (t < (size_t)os::vm_page_size() ) || !Handle::raw_resolve((oop *)v)->is_oop_or_null(true); + if (CheckJNICalls && bad) { + ReportJNIFatalError((JavaThread*)_thread, "Bad JNI oop argument"); + } + } + // for the regular debug case. + assert(!bad, "Bad JNI oop argument"); + } + check_value(true); } @@ -508,6 +527,7 @@ void do_array(int begin, int end) { check_obj(T_OBJECT); } }; + void JavaCallArguments::verify(methodHandle method, BasicType return_type, Thread *thread) { guarantee(method->size_of_parameters() == size_of_parameters(), "wrong no. of arguments pushed"); @@ -518,10 +538,9 @@ // Check that oop information is correct symbolHandle signature (thread, method->signature()); - SignatureChekker sc(signature, return_type, method->is_static(),_is_oop); + SignatureChekker sc(signature, return_type, method->is_static(),_is_oop, _value, thread); sc.iterate_parameters(); sc.check_doing_return(true); sc.iterate_returntype(); } -#endif // PRODUCT --- old/hotspot/src/share/vm/runtime/javaCalls.hpp 2009-08-01 04:15:09.024578066 +0100 +++ new/hotspot/src/share/vm/runtime/javaCalls.hpp 2009-08-01 04:15:08.947761722 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)javaCalls.hpp 1.81 07/05/05 17:06:47 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,7 +153,7 @@ int size_of_parameters() const { return _size; } // Verify that pushed arguments fits a given method - void verify(methodHandle method, BasicType return_type, Thread *thread) PRODUCT_RETURN; + void verify(methodHandle method, BasicType return_type, Thread *thread); }; // All calls to Java have to go via JavaCalls. Sets up the stack frame --- old/hotspot/src/share/vm/runtime/jniHandles.cpp 2009-08-01 04:15:09.799593004 +0100 +++ new/hotspot/src/share/vm/runtime/jniHandles.cpp 2009-08-01 04:15:09.725547920 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)jniHandles.cpp 1.64 07/05/17 16:06:13 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -209,9 +209,10 @@ int _count; public: CountHandleClosure(): _count(0) {} - void do_oop(oop* unused) { + virtual void do_oop(oop* unused) { _count++; } + virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); } int count() { return _count; } }; @@ -233,9 +234,10 @@ class VerifyHandleClosure: public OopClosure { public: - void do_oop(oop* root) { + virtual void do_oop(oop* root) { (*root)->verify(); } + virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); } }; void JNIHandles::verify() { --- old/hotspot/src/share/vm/runtime/mutex.cpp 2009-08-01 04:15:10.642104722 +0100 +++ new/hotspot/src/share/vm/runtime/mutex.cpp 2009-08-01 04:15:10.557026372 +0100 @@ -3,7 +3,7 @@ #endif /* - * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1122,21 +1122,26 @@ assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; } -void Monitor::ClearMonitor (Monitor * m) { - m->_owner = NULL ; - m->_snuck = false ; - m->_name = "UNKNOWN" ; - m->_LockWord.FullWord = 0 ; - m->_EntryList = NULL ; - m->_OnDeck = NULL ; - m->_WaitSet = NULL ; - m->_WaitLock[0] = 0 ; +void Monitor::ClearMonitor (Monitor * m, const char *name) { + m->_owner = NULL ; + m->_snuck = false ; + if (name == NULL) { + strcpy(m->_name, "UNKNOWN") ; + } else { + strncpy(m->_name, name, MONITOR_NAME_LEN - 1); + m->_name[MONITOR_NAME_LEN - 1] = '\0'; + } + m->_LockWord.FullWord = 0 ; + m->_EntryList = NULL ; + m->_OnDeck = NULL ; + m->_WaitSet = NULL ; + m->_WaitLock[0] = 0 ; } Monitor::Monitor() { ClearMonitor(this); } -Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { - ClearMonitor (this) ; +Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { + ClearMonitor (this, name) ; #ifdef ASSERT _allow_vm_block = allow_vm_block; _rank = Rank ; @@ -1147,8 +1152,8 @@ assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; } -Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { - ClearMonitor ((Monitor *) this) ; +Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { + ClearMonitor ((Monitor *) this, name) ; #ifdef ASSERT _allow_vm_block = allow_vm_block; _rank = Rank ; --- old/hotspot/src/share/vm/runtime/mutex.hpp 2009-08-01 04:15:11.563394510 +0100 +++ new/hotspot/src/share/vm/runtime/mutex.hpp 2009-08-01 04:15:11.472586502 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mutex.hpp 1.70 07/07/09 15:32:56 JVM" #endif /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,6 +85,9 @@ // *in that order*. If their implementations change such that these // assumptions are violated, a whole lot of code will break. +// The default length of monitor name is choosen to be 64 to avoid false sharing. +static const int MONITOR_NAME_LEN = 64; + class Monitor : public CHeapObj { public: @@ -127,11 +130,10 @@ ParkEvent * volatile _EntryList ; // List of threads waiting for entry ParkEvent * volatile _OnDeck ; // heir-presumptive volatile intptr_t _WaitLock [1] ; // Protects _WaitSet - ParkEvent * volatile _WaitSet ; // LL of ParkEvents - volatile bool _snuck; // Used for sneaky locking (evil). - const char * _name; // Name of mutex + ParkEvent * volatile _WaitSet ; // LL of ParkEvents + volatile bool _snuck; // Used for sneaky locking (evil). int NotifyCount ; // diagnostic assist - double pad [8] ; // avoid false sharing + char _name[MONITOR_NAME_LEN]; // Name of mutex // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode) #ifndef PRODUCT @@ -173,8 +175,8 @@ int ILocked () ; protected: - static void ClearMonitor (Monitor * m) ; - Monitor() ; + static void ClearMonitor (Monitor * m, const char* name = NULL) ; + Monitor() ; public: Monitor(int rank, const char *name, bool allow_vm_block=false); --- old/hotspot/src/share/vm/runtime/mutexLocker.cpp 2009-08-01 04:15:12.420203259 +0100 +++ new/hotspot/src/share/vm/runtime/mutexLocker.cpp 2009-08-01 04:15:12.339450642 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mutexLocker.cpp 1.181 07/07/11 13:22:55 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,8 +49,8 @@ Mutex* JfieldIdCreation_lock = NULL; Monitor* JNICritical_lock = NULL; Mutex* JvmtiThreadState_lock = NULL; -Monitor* JvmtiPendingEvent_lock = NULL; -Mutex* Heap_lock = NULL; +Monitor* JvmtiPendingEvent_lock = NULL; +Monitor* Heap_lock = NULL; Mutex* ExpandHeap_lock = NULL; Mutex* AdapterHandlerLibrary_lock = NULL; Mutex* SignatureHandlerLibrary_lock = NULL; @@ -70,8 +70,19 @@ Monitor* SLT_lock = NULL; Monitor* iCMS_lock = NULL; Monitor* FullGCCount_lock = NULL; +Monitor* CMark_lock = NULL; +Monitor* ZF_mon = NULL; +Monitor* Cleanup_mon = NULL; +Monitor* G1ConcRefine_mon = NULL; +Mutex* SATB_Q_FL_lock = NULL; +Monitor* SATB_Q_CBL_mon = NULL; +Mutex* Shared_SATB_Q_lock = NULL; +Mutex* DirtyCardQ_FL_lock = NULL; +Monitor* DirtyCardQ_CBL_mon = NULL; +Mutex* Shared_DirtyCardQ_lock = NULL; Mutex* ParGCRareEvent_lock = NULL; -Mutex* DerivedPointerTableGC_lock = NULL; +Mutex* EvacFailureStack_lock = NULL; +Mutex* DerivedPointerTableGC_lock = NULL; Mutex* Compile_lock = NULL; Monitor* MethodCompileQueue_lock = NULL; #ifdef TIERED @@ -105,6 +116,9 @@ Mutex* PerfDataManager_lock = NULL; Mutex* OopMapCacheAlloc_lock = NULL; +Mutex* MMUTracker_lock = NULL; +Mutex* HotCardCache_lock = NULL; + Monitor* GCTaskManager_lock = NULL; Mutex* Management_lock = NULL; @@ -153,6 +167,23 @@ def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent } + if (UseG1GC) { + def(CMark_lock , Monitor, nonleaf, true ); // coordinate concurrent mark thread + def(ZF_mon , Monitor, leaf, true ); + def(Cleanup_mon , Monitor, nonleaf, true ); + def(G1ConcRefine_mon , Monitor, nonleaf, true ); + def(SATB_Q_FL_lock , Mutex , special, true ); + def(SATB_Q_CBL_mon , Monitor, nonleaf, true ); + def(Shared_SATB_Q_lock , Mutex, nonleaf, true ); + + def(DirtyCardQ_FL_lock , Mutex , special, true ); + def(DirtyCardQ_CBL_mon , Monitor, nonleaf, true ); + def(Shared_DirtyCardQ_lock , Mutex, nonleaf, true ); + + def(MMUTracker_lock , Mutex , leaf , true ); + def(HotCardCache_lock , Mutex , special , true ); + def(EvacFailureStack_lock , Mutex , nonleaf , true ); + } def(ParGCRareEvent_lock , Mutex , leaf , true ); def(DerivedPointerTableGC_lock , Mutex, leaf, true ); def(CodeCache_lock , Mutex , special, true ); @@ -191,11 +222,7 @@ def(Safepoint_lock , Monitor, safepoint, true ); // locks SnippetCache_lock/Threads_lock - if (!UseMembar) { - def(SerializePage_lock , Monitor, leaf, true ); - } - - def(Threads_lock , Monitor, barrier, true ); + def(Threads_lock , Monitor, barrier, true ); def(VMOperationQueue_lock , Monitor, nonleaf, true ); // VM_thread allowed to block on these def(VMOperationRequest_lock , Monitor, nonleaf, true ); @@ -210,7 +237,7 @@ def(SLT_lock , Monitor, nonleaf, false ); // used in CMS GC for locking PLL lock } - def(Heap_lock , Mutex , nonleaf+1, false); + def(Heap_lock , Monitor, nonleaf+1, false); def(JfieldIdCreation_lock , Mutex , nonleaf+1, true ); // jfieldID, Used in VM_Operation def(JNICachedItableIndex_lock , Mutex , nonleaf+1, false); // Used to cache an itable index during JNI invoke --- old/hotspot/src/share/vm/runtime/mutexLocker.hpp 2009-08-01 04:15:13.339589510 +0100 +++ new/hotspot/src/share/vm/runtime/mutexLocker.hpp 2009-08-01 04:15:13.255323241 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)mutexLocker.hpp 1.152 07/07/09 15:31:19 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,8 +40,8 @@ extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers extern Monitor* JNICritical_lock; // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in extern Mutex* JvmtiThreadState_lock; // a lock on modification of JVMTI thread data -extern Monitor* JvmtiPendingEvent_lock; // a lock on the JVMTI pending events list -extern Mutex* Heap_lock; // a lock on the heap +extern Monitor* JvmtiPendingEvent_lock; // a lock on the JVMTI pending events list +extern Monitor* Heap_lock; // a lock on the heap extern Mutex* ExpandHeap_lock; // a lock on expanding the heap extern Mutex* AdapterHandlerLibrary_lock; // a lock on the AdapterHandlerLibrary extern Mutex* SignatureHandlerLibrary_lock; // a lock on the SignatureHandlerLibrary @@ -55,8 +55,7 @@ extern Monitor* VMOperationQueue_lock; // a lock on queue of vm_operations waiting to execute extern Monitor* VMOperationRequest_lock; // a lock on Threads waiting for a vm_operation to terminate extern Monitor* Safepoint_lock; // a lock used by the safepoint abstraction -extern Monitor* SerializePage_lock; // a lock used when VMThread changing serialize memory page permission during safepoint -extern Monitor* Threads_lock; // a lock on the Threads table of active Java threads +extern Monitor* Threads_lock; // a lock on the Threads table of active Java threads // (also used by Safepoints too to block threads creation/destruction) extern Monitor* CGC_lock; // used for coordination between // fore- & background GC threads. @@ -64,8 +63,30 @@ extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc +extern Monitor* CMark_lock; // used for concurrent mark thread coordination +extern Monitor* ZF_mon; // used for G1 conc zero-fill. +extern Monitor* Cleanup_mon; // used for G1 conc cleanup. +extern Monitor* G1ConcRefine_mon; // used for G1 conc-refine + // coordination. + +extern Mutex* SATB_Q_FL_lock; // Protects SATB Q + // buffer free list. +extern Monitor* SATB_Q_CBL_mon; // Protects SATB Q + // completed buffer queue. +extern Mutex* Shared_SATB_Q_lock; // Lock protecting SATB + // queue shared by + // non-Java threads. + +extern Mutex* DirtyCardQ_FL_lock; // Protects dirty card Q + // buffer free list. +extern Monitor* DirtyCardQ_CBL_mon; // Protects dirty card Q + // completed buffer queue. +extern Mutex* Shared_DirtyCardQ_lock; // Lock protecting dirty card + // queue shared by + // non-Java threads. // (see option ExplicitGCInvokesConcurrent) extern Mutex* ParGCRareEvent_lock; // Synchronizes various (rare) parallel GC ops. +extern Mutex* EvacFailureStack_lock; // guards the evac failure scan stack extern Mutex* Compile_lock; // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc) extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued #ifdef TIERED @@ -97,6 +118,10 @@ extern Mutex* ParkerFreeList_lock; extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches +extern Mutex* MMUTracker_lock; // protects the MMU + // tracker data structures +extern Mutex* HotCardCache_lock; // protects the hot card cache + extern Mutex* Management_lock; // a lock used to serialize JVM management extern Monitor* LowMemory_lock; // a lock used for low memory detection --- old/hotspot/src/share/vm/runtime/os.cpp 2009-08-01 04:15:14.235712634 +0100 +++ new/hotspot/src/share/vm/runtime/os.cpp 2009-08-01 04:15:14.150476872 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os.cpp 1.185 07/10/04 10:49:22 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,10 +36,7 @@ uintptr_t os::_serialize_page_mask = 0; long os::_rand_seed = 1; int os::_processor_count = 0; -volatile jlong os::_global_time = 0; -volatile int os::_global_time_lock = 0; -bool os::_use_global_time = false; -size_t os::_page_sizes[os::page_sizes_max]; +size_t os::_page_sizes[os::page_sizes_max]; #ifndef PRODUCT int os::num_mallocs = 0; // # of calls to malloc/realloc @@ -47,74 +44,6 @@ int os::num_frees = 0; // # of calls to free #endif -// Atomic read of a jlong is assured by a seqlock; see update_global_time() -jlong os::read_global_time() { -#ifdef _LP64 - return _global_time; -#else - volatile int lock; - volatile jlong current_time; - int ctr = 0; - - for (;;) { - lock = _global_time_lock; - - // spin while locked - while ((lock & 0x1) != 0) { - ++ctr; - if ((ctr & 0xFFF) == 0) { - // Guarantee writer progress. Can't use yield; yield is advisory - // and has almost no effect on some platforms. Don't need a state - // transition - the park call will return promptly. - assert(Thread::current() != NULL, "TLS not initialized"); - assert(Thread::current()->_ParkEvent != NULL, "sync not initialized"); - Thread::current()->_ParkEvent->park(1); - } - lock = _global_time_lock; - } - - OrderAccess::loadload(); - current_time = _global_time; - OrderAccess::loadload(); - - // ratify seqlock value - if (lock == _global_time_lock) { - return current_time; - } - } -#endif -} - -// -// NOTE - Assumes only one writer thread! -// -// We use a seqlock to guarantee that jlong _global_time is updated -// atomically on 32-bit platforms. A locked value is indicated by -// the lock variable LSB == 1. Readers will initially read the lock -// value, spinning until the LSB == 0. They then speculatively read -// the global time value, then re-read the lock value to ensure that -// it hasn't changed. If the lock value has changed, the entire read -// sequence is retried. -// -// Writers simply set the LSB = 1 (i.e. increment the variable), -// update the global time, then release the lock and bump the version -// number (i.e. increment the variable again.) In this case we don't -// even need a CAS since we ensure there's only one writer. -// -void os::update_global_time() { -#ifdef _LP64 - _global_time = timeofday(); -#else - assert((_global_time_lock & 0x1) == 0, "multiple writers?"); - jlong current_time = timeofday(); - _global_time_lock++; // lock - OrderAccess::storestore(); - _global_time = current_time; - OrderAccess::storestore(); - _global_time_lock++; // unlock -#endif -} - // Fill in buffer with current local time as an ISO-8601 string. // E.g., yyyy-mm-ddThh:mm:ss-zzzz. // Returns buffer, or NULL if it failed. @@ -141,20 +70,19 @@ return NULL; } // Get the current time - jlong milliseconds_since_19700101 = timeofday(); + jlong milliseconds_since_19700101 = javaTimeMillis(); const int milliseconds_per_microsecond = 1000; const time_t seconds_since_19700101 = milliseconds_since_19700101 / milliseconds_per_microsecond; const int milliseconds_after_second = milliseconds_since_19700101 % milliseconds_per_microsecond; // Convert the time value to a tm and timezone variable - const struct tm *time_struct_temp = localtime(&seconds_since_19700101); - if (time_struct_temp == NULL) { - assert(false, "Failed localtime"); + struct tm time_struct; + if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) { + assert(false, "Failed localtime_pd"); return NULL; } - // Save the results of localtime - const struct tm time_struct = *time_struct_temp; + const time_t zone = timezone; // If daylight savings time is in effect, @@ -167,10 +95,10 @@ UTC_to_local = UTC_to_local - seconds_per_hour; } // Compute the time zone offset. - // localtime(3C) sets timezone to the difference (in seconds) + // localtime_pd sets timezone to the difference (in seconds) // between UTC and and local time. - // ISO 8601 says we need the difference between local time and UTC, - // we change the sign of the localtime(3C) result. + // ISO 8601 says we need the difference between local time and UTC, + // we change the sign of the localtime_pd result. const time_t local_to_UTC = -(UTC_to_local); // Then we have to figure out if if we are ahead (+) or behind (-) UTC. char sign_local_to_UTC = '+'; @@ -410,29 +338,38 @@ char buffer[JVM_MAXPATHLEN]; char ebuf[1024]; - // Try to load verify dll first. In 1.3 java dll depends on it and is not always - // able to find it when the loading executable is outside the JDK. + // Try to load verify dll first. In 1.3 java dll depends on it and is not + // always able to find it when the loading executable is outside the JDK. // In order to keep working with 1.2 we ignore any loading errors. - hpi::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), "verify"); - hpi::dll_load(buffer, ebuf, sizeof(ebuf)); + dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), "verify"); + dll_load(buffer, ebuf, sizeof(ebuf)); // Load java dll - hpi::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), "java"); - _native_java_library = hpi::dll_load(buffer, ebuf, sizeof(ebuf)); + dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), "java"); + _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf)); if (_native_java_library == NULL) { vm_exit_during_initialization("Unable to load native library", ebuf); } - // The JNI_OnLoad handling is normally done by method load in java.lang.ClassLoader$NativeLibrary, - // but the VM loads the base library explicitly so we have to check for JNI_OnLoad as well - const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS; - JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR(JNI_OnLoad_t, hpi::dll_lookup(_native_java_library, onLoadSymbols[0])); - if (JNI_OnLoad != NULL) { - JavaThread* thread = JavaThread::current(); - ThreadToNativeFromVM ttn(thread); - HandleMark hm(thread); - jint ver = (*JNI_OnLoad)(&main_vm, NULL); - if (!Threads::is_supported_jni_version_including_1_1(ver)) { - vm_exit_during_initialization("Unsupported JNI version"); + } + static jboolean onLoaded = JNI_FALSE; + if (onLoaded) { + // We may have to wait to fire OnLoad until TLS is initialized. + if (ThreadLocalStorage::is_initialized()) { + // The JNI_OnLoad handling is normally done by method load in + // java.lang.ClassLoader$NativeLibrary, but the VM loads the base library + // explicitly so we have to check for JNI_OnLoad as well + const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS; + JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR( + JNI_OnLoad_t, dll_lookup(_native_java_library, onLoadSymbols[0])); + if (JNI_OnLoad != NULL) { + JavaThread* thread = JavaThread::current(); + ThreadToNativeFromVM ttn(thread); + HandleMark hm(thread); + jint ver = (*JNI_OnLoad)(&main_vm, NULL); + onLoaded = JNI_TRUE; + if (!Threads::is_supported_jni_version_including_1_1(ver)) { + vm_exit_during_initialization("Unsupported JNI version"); + } } } } @@ -929,7 +866,6 @@ bool os::set_boot_path(char fileSep, char pathSep) { - const char* home = Arguments::get_java_home(); int home_len = (int)strlen(home); @@ -959,6 +895,59 @@ return true; } +/* + * Splits a path, based on its separator, the number of + * elements is returned back in n. + * It is the callers responsibility to: + * a> check the value of n, and n may be 0. + * b> ignore any empty path elements + * c> free up the data. + */ +char** os::split_path(const char* path, int* n) { + *n = 0; + if (path == NULL || strlen(path) == 0) { + return NULL; + } + const char psepchar = *os::path_separator(); + char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1); + if (inpath == NULL) { + return NULL; + } + strncpy(inpath, path, strlen(path)); + int count = 1; + char* p = strchr(inpath, psepchar); + // Get a count of elements to allocate memory + while (p != NULL) { + count++; + p++; + p = strchr(p, psepchar); + } + char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count); + if (opath == NULL) { + return NULL; + } + + // do the actual splitting + p = inpath; + for (int i = 0 ; i < count ; i++) { + size_t len = strcspn(p, os::path_separator()); + if (len > JVM_MAXPATHLEN) { + return NULL; + } + // allocate the string and add terminator storage + char* s = (char*)NEW_C_HEAP_ARRAY(char, len + 1); + if (s == NULL) { + return NULL; + } + strncpy(s, p, len); + s[len] = '\0'; + opath[i] = s; + p += len + 1; + } + FREE_C_HEAP_ARRAY(char, inpath); + *n = count; + return opath; +} void os::set_memory_serialize_page(address page) { int count = log2_intptr(sizeof(class JavaThread)) - log2_intptr(64); @@ -970,6 +959,8 @@ set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t))); } +static volatile intptr_t SerializePageLock = 0; + // This method is called from signal handler when SIGSEGV occurs while the current // thread tries to store to the "read-only" memory serialize page during state // transition. @@ -977,30 +968,29 @@ if (TraceSafepoint) { tty->print_cr("Block until the serialize page permission restored"); } - // When VMThread is holding the SerializePage_lock during modifying the + // When VMThread is holding the SerializePageLock during modifying the // access permission of the memory serialize page, the following call // will block until the permission of that page is restored to rw. // Generally, it is unsafe to manipulate locks in signal handlers, but in // this case, it's OK as the signal is synchronous and we know precisely when - // it can occur. SerializePage_lock is a transiently-held leaf lock, so - // lock_without_safepoint_check should be safe. - SerializePage_lock->lock_without_safepoint_check(); - SerializePage_lock->unlock(); + // it can occur. + Thread::muxAcquire(&SerializePageLock, "set_memory_serialize_page"); + Thread::muxRelease(&SerializePageLock); } // Serialize all thread state variables void os::serialize_thread_states() { - // On some platforms such as Solaris & Linux, the time duration of the page - // permission restoration is observed to be much longer than expected due to - // scheduler starvation problem etc. To avoid the long synchronization - // time and expensive page trap spinning, 'SerializePage_lock' is used to block - // the mutator thread if such case is encountered. Since this method is always - // called by VMThread during safepoint, lock_without_safepoint_check is used - // instead. See bug 6546278. - SerializePage_lock->lock_without_safepoint_check(); - os::protect_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() ); - os::unguard_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() ); - SerializePage_lock->unlock(); + // On some platforms such as Solaris & Linux, the time duration of the page + // permission restoration is observed to be much longer than expected due to + // scheduler starvation problem etc. To avoid the long synchronization + // time and expensive page trap spinning, 'SerializePageLock' is used to block + // the mutator thread if such case is encountered. See bug 6546278 for details. + Thread::muxAcquire(&SerializePageLock, "serialize_thread_states"); + os::protect_memory((char *)os::get_memory_serialize_page(), + os::vm_page_size(), MEM_PROT_READ); + os::protect_memory((char *)os::get_memory_serialize_page(), + os::vm_page_size(), MEM_PROT_RW); + Thread::muxRelease(&SerializePageLock); } // Returns true if the current stack pointer is above the stack shadow --- old/hotspot/src/share/vm/runtime/os.hpp 2009-08-01 04:15:15.156074823 +0100 +++ new/hotspot/src/share/vm/runtime/os.hpp 2009-08-01 04:15:15.070315665 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)os.hpp 1.223 07/10/04 10:49:22 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ class Event; class DLL; class FileHandle; +template class GrowableArray; // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose @@ -69,9 +70,6 @@ static address _polling_page; static volatile int32_t * _mem_serialize_page; static uintptr_t _serialize_page_mask; - static volatile jlong _global_time; - static volatile int _global_time_lock; - static bool _use_global_time; static size_t _page_sizes[page_sizes_max]; static void init_page_sizes(size_t default_page_size) { @@ -90,12 +88,7 @@ static bool getenv(const char* name, char* buffer, int len); static bool have_special_privileges(); - - static jlong timeofday(); - static void enable_global_time() { _use_global_time = true; } - static void disable_global_time() { _use_global_time = false; } - static jlong read_global_time(); - static void update_global_time(); + static jlong javaTimeMillis(); static jlong javaTimeNanos(); static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); @@ -115,9 +108,22 @@ static jlong elapsed_counter(); static jlong elapsed_frequency(); - // Return current local time in a string (YYYY-MM-DD HH:MM:SS). - // It is MT safe, but not async-safe, as reading time zone + // The "virtual time" of a thread is the amount of time a thread has + // actually run. The first function indicates whether the OS supports + // this functionality for the current thread, and if so: + // * the second enables vtime tracking (if that is required). + // * the third tells whether vtime is enabled. + // * the fourth returns the elapsed virtual time for the current + // thread. + static bool supports_vtime(); + static bool enable_vtime(); + static bool vtime_enabled(); + static double elapsedVTime(); + + // Return current local time in a string (YYYY-MM-DD HH:MM:SS). + // It is MT safe, but not async-safe, as reading time zone // information may require a lock on some platforms. + static struct tm* localtime_pd (const time_t* clock, struct tm* res); static char* local_time_string(char *buf, size_t buflen); // Fill in buffer with current local time as an ISO-8601 string. // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. @@ -203,7 +209,11 @@ static bool commit_memory(char* addr, size_t size, size_t alignment_hint); static bool uncommit_memory(char* addr, size_t bytes); static bool release_memory(char* addr, size_t bytes); - static bool protect_memory(char* addr, size_t bytes); + + enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; + static bool protect_memory(char* addr, size_t bytes, ProtType prot, + bool is_committed = true); + static bool guard_memory(char* addr, size_t bytes); static bool unguard_memory(char* addr, size_t bytes); static char* map_memory(int fd, const char* file_name, size_t file_offset, @@ -217,7 +227,9 @@ static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); // NUMA-specific interface - static void numa_make_local(char *addr, size_t bytes); + static bool numa_has_static_binding(); + static bool numa_has_group_homing(); + static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); static void numa_make_global(char *addr, size_t bytes); static size_t numa_get_groups_num(); static size_t numa_get_leaf_groups(int *ids, size_t size); @@ -239,6 +251,7 @@ static bool large_page_init(); static size_t large_page_size(); static bool can_commit_large_page_memory(); + static bool can_execute_large_page_memory(); // OS interface to polling page static address get_polling_page() { return _polling_page; } @@ -393,6 +406,10 @@ static const char* get_temp_directory(); static const char* get_current_directory(char *buf, int buflen); + // Builds a platform-specific full library path given a ld path and lib name + static void dll_build_name(char* buffer, size_t size, + const char* pathname, const char* fname); + // Symbol lookup, find nearest function name; basically it implements // dladdr() for all platforms. Name of the nearest function is copied // to buf. Distance from its base address is returned as offset. @@ -416,6 +433,9 @@ // same architecture as Hotspot is running on static void* dll_load(const char *name, char *ebuf, int ebuflen); + // lookup symbol in a shared library + static void* dll_lookup(void* handle, const char* name); + // Print out system information; they are called by fatal error handler. // Output format may be different on different platforms. static void print_os_info(outputStream* st); @@ -587,6 +607,7 @@ char fileSep, char pathSep); static bool set_boot_path(char fileSep, char pathSep); + static char** split_path(const char* path, int* n); }; // Note that "PAUSE" is almost always used with synchronization --- old/hotspot/src/share/vm/runtime/perfMemory.cpp 2009-08-01 04:15:16.032989675 +0100 +++ new/hotspot/src/share/vm/runtime/perfMemory.cpp 2009-08-01 04:15:15.957308667 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)perfMemory.cpp 1.28 07/09/13 11:29:49 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,14 @@ # include "incls/_precompiled.incl" # include "incls/_perfMemory.cpp.incl" +// Prefix of performance data file. +const char PERFDATA_NAME[] = "hsperfdata"; + +// Add 1 for the '_' character between PERFDATA_NAME and pid. The '\0' terminating +// character will be included in the sizeof(PERFDATA_NAME) operation. +static const size_t PERFDATA_FILENAME_LEN = sizeof(PERFDATA_NAME) + + UINT_CHARS + 1; + char* PerfMemory::_start = NULL; char* PerfMemory::_end = NULL; char* PerfMemory::_top = NULL; --- old/hotspot/src/share/vm/runtime/perfMemory.hpp 2009-08-01 04:15:16.852159001 +0100 +++ new/hotspot/src/share/vm/runtime/perfMemory.hpp 2009-08-01 04:15:16.770246140 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)perfMemory.hpp 1.23 07/05/05 17:06:54 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,7 +98,7 @@ } PerfDataEntry; // Prefix of performance data file. -static const char PERFDATA_NAME[] = "hsperfdata"; +extern const char PERFDATA_NAME[]; // UINT_CHARS contains the number of characters holding a process id // (i.e. pid). pid is defined as unsigned "int" so the maximum possible pid value @@ -106,11 +106,6 @@ // string. static const size_t UINT_CHARS = 10; -// Add 1 for the '_' character between PERFDATA_NAME and pid. The '\0' terminating -// character will be included in the sizeof(PERFDATA_NAME) operation. -static const size_t PERFDATA_FILENAME_LEN = sizeof(PERFDATA_NAME) + - UINT_CHARS + 1; - /* the PerfMemory class manages creation, destruction, * and allocation of the PerfData region. */ --- old/hotspot/src/share/vm/runtime/reflection.cpp 2009-08-01 04:15:17.663513254 +0100 +++ new/hotspot/src/share/vm/runtime/reflection.cpp 2009-08-01 04:15:17.570370157 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)reflection.cpp 1.179 07/08/09 09:12:05 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -459,12 +459,34 @@ return can_relax_access_check_for(current_class, new_class, classloader_only); } +static bool under_host_klass(instanceKlass* ik, klassOop host_klass) { + DEBUG_ONLY(int inf_loop_check = 1000 * 1000 * 1000); + for (;;) { + klassOop hc = (klassOop) ik->host_klass(); + if (hc == NULL) return false; + if (hc == host_klass) return true; + ik = instanceKlass::cast(hc); + + // There's no way to make a host class loop short of patching memory. + // Therefore there cannot be a loop here unles there's another bug. + // Still, let's check for it. + assert(--inf_loop_check > 0, "no host_klass loop"); + } +} + bool Reflection::can_relax_access_check_for( klassOop accessor, klassOop accessee, bool classloader_only) { instanceKlass* accessor_ik = instanceKlass::cast(accessor); instanceKlass* accessee_ik = instanceKlass::cast(accessee); - if (RelaxAccessControlCheck || - (accessor_ik->major_version() < JAVA_1_5_VERSION && + + // If either is on the other's host_klass chain, access is OK, + // because one is inside the other. + if (under_host_klass(accessor_ik, accessee) || + under_host_klass(accessee_ik, accessor)) + return true; + + if (RelaxAccessControlCheck || + (accessor_ik->major_version() < JAVA_1_5_VERSION && accessee_ik->major_version() < JAVA_1_5_VERSION)) { return classloader_only && Verifier::relax_verify_for(accessor_ik->class_loader()) && @@ -503,7 +525,8 @@ if (!protected_restriction) { // See if current_class is a subclass of field_class if (Klass::cast(current_class)->is_subclass_of(field_class)) { - if (current_class == resolved_class || + if (access.is_static() || // static fields are ok, see 6622385 + current_class == resolved_class || field_class == resolved_class || Klass::cast(current_class)->is_subclass_of(resolved_class) || Klass::cast(resolved_class)->is_subclass_of(current_class)) { @@ -1551,10 +1574,11 @@ } instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror)); - if (!klass->methods()->is_within_bounds(slot)) { + methodOop m = klass->method_with_idnum(slot); + if (m == NULL) { THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke"); } - methodHandle method(THREAD, methodOop(klass->methods()->obj_at(slot))); + methodHandle method(THREAD, m); return invoke(klass, method, receiver, override, ptypes, rtype, args, true, THREAD); } @@ -1566,11 +1590,12 @@ bool override = java_lang_reflect_Constructor::override(constructor_mirror) != 0; objArrayHandle ptypes(THREAD, objArrayOop(java_lang_reflect_Constructor::parameter_types(constructor_mirror))); - instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror)); - if (!klass->methods()->is_within_bounds(slot)) { + instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror)); + methodOop m = klass->method_with_idnum(slot); + if (m == NULL) { THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke"); } - methodHandle method(THREAD, methodOop(klass->methods()->obj_at(slot))); + methodHandle method(THREAD, m); assert(method->name() == vmSymbols::object_initializer_name(), "invalid constructor"); // Make sure klass gets initialize --- old/hotspot/src/share/vm/runtime/sharedRuntime.cpp 2009-08-01 04:15:18.740135077 +0100 +++ new/hotspot/src/share/vm/runtime/sharedRuntime.cpp 2009-08-01 04:15:18.640471067 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)sharedRuntime.cpp 1.383 08/05/13 16:13:34 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,6 +114,25 @@ } #endif // PRODUCT +#ifndef SERIALGC + +// G1 write-barrier pre: executed before a pointer store. +JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread)) + if (orig == NULL) { + assert(false, "should be optimized out"); + return; + } + // store the original value that was in the field reference + thread->satb_mark_queue().enqueue(orig); +JRT_END + +// G1 write-barrier post: executed after a pointer store. +JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread)) + thread->dirty_card_queue().enqueue(card_addr); +JRT_END + +#endif // !SERIALGC + JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x)) return x * y; @@ -379,6 +398,32 @@ throw_and_post_jvmti_exception(thread, h_exception); } +// The interpreter code to call this tracing function is only +// called/generated when TraceRedefineClasses has the right bits +// set. Since obsolete methods are never compiled, we don't have +// to modify the compilers to generate calls to this function. +// +JRT_LEAF(int, SharedRuntime::rc_trace_method_entry( + JavaThread* thread, methodOopDesc* method)) + assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call"); + + if (method->is_obsolete()) { + // We are calling an obsolete method, but this is not necessarily + // an error. Our method could have been redefined just after we + // fetched the methodOop from the constant pool. + + // RC_TRACE macro has an embedded ResourceMark + RC_TRACE_WITH_THREAD(0x00001000, thread, + ("calling obsolete method '%s'", + method->name_and_sig_as_C_string())); + if (RC_TRACE_ENABLED(0x00002000)) { + // this option is provided to debug calls to obsolete methods + guarantee(false, "faulting at call to an obsolete method."); + } + } + return 0; +JRT_END + // ret_pc points into caller; we are returning caller's exception handler // for given exception address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, @@ -540,7 +585,10 @@ // the caller was at a call site, it's safe to destroy all // caller-saved registers, as these entry points do. VtableStub* vt_stub = VtableStubs::stub_containing(pc); - guarantee(vt_stub != NULL, "unable to find SEGVing vtable stub"); + + // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error. + if (vt_stub == NULL) return NULL; + if (vt_stub->is_abstract_method_error(pc)) { assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs"); return StubRoutines::throw_AbstractMethodError_entry(); @@ -549,7 +597,9 @@ } } else { CodeBlob* cb = CodeCache::find_blob(pc); - guarantee(cb != NULL, "exception happened outside interpreter, nmethods and vtable stubs (1)"); + + // If code blob is NULL, then return NULL to signal handler to report the SEGV error. + if (cb == NULL) return NULL; // Exception happened in CodeCache. Must be either: // 1. Inline-cache check in C2I handler blob, @@ -558,7 +608,7 @@ if (!cb->is_nmethod()) { guarantee(cb->is_adapter_blob(), - "exception happened outside interpreter, nmethods and vtable stubs (2)"); + "exception happened outside interpreter, nmethods and vtable stubs (1)"); // There is no handler here, so we will simply unwind. return StubRoutines::throw_NullPointerException_at_call_entry(); } @@ -1492,7 +1542,7 @@ char* message = NEW_RESOURCE_ARRAY(char, msglen); if (NULL == message) { // Shouldn't happen, but don't cause even more problems if it does - message = const_cast(objName); + message = const_cast(objName); } else { jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName); } @@ -1751,12 +1801,7 @@ // _fingerprints array (it is not safe for concurrent readers and a single // writer: this can be fixed if it becomes a problem). - // Shouldn't be here if running -Xint - if (Arguments::mode() == Arguments::_int) { - ShouldNotReachHere(); - } - - // Get the address of the ic_miss handlers before we grab the + // Get the address of the ic_miss handlers before we grab the // AdapterHandlerLibrary_lock. This fixes bug 6236259 which // was caused by the initialization of the stubs happening // while we held the lock and then notifying jvmti while @@ -1840,7 +1885,25 @@ regs); B = BufferBlob::create(AdapterHandlerEntry::name, &buffer); - if (B == NULL) return -2; // Out of CodeCache space + if (B == NULL) { + // CodeCache is full, disable compilation + // Ought to log this but compile log is only per compile thread + // and we're some non descript Java thread. + UseInterpreter = true; + if (UseCompiler || AlwaysCompileLoopMethods ) { +#ifndef PRODUCT + warning("CodeCache is full. Compiler has been disabled"); + if (CompileTheWorld || ExitOnFullCodeCache) { + before_exit(JavaThread::current()); + exit_globals(); // will delete tty + vm_direct_exit(CompileTheWorld ? 0 : 1); + } +#endif + UseCompiler = false; + AlwaysCompileLoopMethods = false; + } + return 0; // Out of CodeCache space (_handlers[0] == NULL) + } entry->relocate(B->instructions_begin()); #ifndef PRODUCT // debugging suppport @@ -1982,6 +2045,64 @@ return nm; } +#ifdef HAVE_DTRACE_H +// Create a dtrace nmethod for this method. The wrapper converts the +// java compiled calling convention to the native convention, makes a dummy call +// (actually nops for the size of the call instruction, which become a trap if +// probe is enabled). The returns to the caller. Since this all looks like a +// leaf no thread transition is needed. + +nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) { + ResourceMark rm; + nmethod* nm = NULL; + + if (PrintCompilation) { + ttyLocker ttyl; + tty->print("--- n%s "); + method->print_short_name(tty); + if (method->is_static()) { + tty->print(" (static)"); + } + tty->cr(); + } + + { + // perform the work while holding the lock, but perform any printing + // outside the lock + MutexLocker mu(AdapterHandlerLibrary_lock); + // See if somebody beat us to it + nm = method->code(); + if (nm) { + return nm; + } + + // Improve alignment slightly + u_char* buf = (u_char*) + (((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); + CodeBuffer buffer(buf, AdapterHandlerLibrary_size); + // Need a few relocation entries + double locs_buf[20]; + buffer.insts()->initialize_shared_locs( + (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); + MacroAssembler _masm(&buffer); + + // Generate the compiled-to-native wrapper code + nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method); + } + return nm; +} + +// the dtrace method needs to convert java lang string to utf8 string. +void SharedRuntime::get_utf(oopDesc* src, address dst) { + typeArrayOop jlsValue = java_lang_String::value(src); + int jlsOffset = java_lang_String::offset(src); + int jlsLen = java_lang_String::length(src); + jchar* jlsPos = (jlsLen == 0) ? NULL : + jlsValue->char_at_addr(jlsOffset); + (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size); +} +#endif // ndef HAVE_DTRACE_H + // ------------------------------------------------------------------------- // Java-Java calling convention // (what you use when Java calls Java) @@ -2161,6 +2282,8 @@ #ifndef PRODUCT bool AdapterHandlerLibrary::contains(CodeBlob* b) { + if (_handlers == NULL) return false; + for (int i = 0 ; i < _handlers->length() ; i++) { AdapterHandlerEntry* a = get_entry(i); if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true; --- old/hotspot/src/share/vm/runtime/sharedRuntime.hpp 2009-08-01 04:15:19.718635687 +0100 +++ new/hotspot/src/share/vm/runtime/sharedRuntime.hpp 2009-08-01 04:15:19.649646956 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)sharedRuntime.hpp 1.158 07/10/05 19:47:48 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +62,10 @@ #endif // !PRODUCT public: + + // max bytes for each dtrace string parameter + enum { max_dtrace_string_size = 256 }; + // The following arithmetic routines are used on platforms that do // not have machine instructions to implement their functionality. // Do not remove these. @@ -98,6 +102,12 @@ static address raw_exception_handler_for_return_address(address return_address); static address exception_handler_for_return_address(address return_address); +#ifndef SERIALGC + // G1 write barriers + static void g1_wb_pre(oopDesc* orig, JavaThread *thread); + static void g1_wb_post(void* card_addr, JavaThread* thread); +#endif // !SERIALGC + // exception handling and implicit exceptions static address compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, bool force_unwind, bool top_frame_only); @@ -159,6 +169,9 @@ static void throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception); static void throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message = NULL); + // RedefineClasses() tracing support for obsolete method entry + static int rc_trace_method_entry(JavaThread* thread, methodOopDesc* m); + // To be used as the entry point for unresolved native methods. static address native_method_throw_unsatisfied_link_error_entry(); @@ -261,9 +274,6 @@ public: - - static void create_native_wrapper (JavaThread* thread, methodOop method); - // Read the array of BasicTypes from a Java signature, and compute where // compiled Java code would like to put the results. Values in reg_lo and // reg_hi refer to 4-byte quantities. Values less than SharedInfo::stack0 are @@ -357,6 +367,19 @@ VMRegPair *regs, BasicType ret_type ); +#ifdef HAVE_DTRACE_H + // Generate a dtrace wrapper for a given method. The method takes arguments + // in the Java compiled code convention, marshals them to the native + // convention (handlizes oops, etc), transitions to native, makes the call, + // returns to java state (possibly blocking), unhandlizes any result and + // returns. + static nmethod *generate_dtrace_nmethod(MacroAssembler* masm, + methodHandle method); + + // dtrace support to convert a Java string to utf8 + static void get_utf(oopDesc* src, address dst); +#endif // def HAVE_DTRACE_H + // A compiled caller has just called the interpreter, but compiled code // exists. Patch the caller so he no longer calls into the interpreter. static void fixup_callers_callsite(methodOopDesc* moop, address ret_pc); @@ -495,42 +518,55 @@ address _c2i_unverified_entry; public: + + // The name we give all buffer blobs + static const char* name; + AdapterHandlerEntry(address i2c_entry, address c2i_entry, address c2i_unverified_entry): _i2c_entry(i2c_entry), _c2i_entry(c2i_entry), _c2i_unverified_entry(c2i_unverified_entry) { } - // The name we give all buffer blobs - static const char* name; address get_i2c_entry() { return _i2c_entry; } address get_c2i_entry() { return _c2i_entry; } address get_c2i_unverified_entry() { return _c2i_unverified_entry; } + void relocate(address new_base); #ifndef PRODUCT void print(); #endif /* PRODUCT */ }; - class AdapterHandlerLibrary: public AllStatic { private: + static u_char _buffer[]; // the temporary code buffer + static GrowableArray* _fingerprints; // the fingerprint collection + static GrowableArray * _handlers; // the corresponding handlers enum { AbstractMethodHandler = 1 // special handler for abstract methods }; - static GrowableArray* _fingerprints; // the fingerprint collection - static GrowableArray * _handlers; // the corresponding handlers - static u_char _buffer[]; // the temporary code buffer static void initialize(); - static AdapterHandlerEntry* get_entry( int index ) { return _handlers->at(index); } static int get_create_adapter_index(methodHandle method); - static address get_i2c_entry( int index ) { return get_entry(index)->get_i2c_entry(); } - static address get_c2i_entry( int index ) { return get_entry(index)->get_c2i_entry(); } - static address get_c2i_unverified_entry( int index ) { return get_entry(index)->get_c2i_unverified_entry(); } + static address get_i2c_entry( int index ) { + return get_entry(index)->get_i2c_entry(); + } + static address get_c2i_entry( int index ) { + return get_entry(index)->get_c2i_entry(); + } + static address get_c2i_unverified_entry( int index ) { + return get_entry(index)->get_c2i_unverified_entry(); + } public: + static AdapterHandlerEntry* get_entry( int index ) { return _handlers->at(index); } static nmethod* create_native_wrapper(methodHandle method); - static AdapterHandlerEntry* get_adapter(methodHandle method) { return get_entry(get_create_adapter_index(method)); } + static AdapterHandlerEntry* get_adapter(methodHandle method) { + return get_entry(get_create_adapter_index(method)); + } +#ifdef HAVE_DTRACE_H + static nmethod* create_dtrace_nmethod (methodHandle method); +#endif // HAVE_DTRACE_H #ifndef PRODUCT static void print_handler(CodeBlob* b); --- old/hotspot/src/share/vm/runtime/stackValue.cpp 2009-08-01 04:15:20.618742437 +0100 +++ new/hotspot/src/share/vm/runtime/stackValue.cpp 2009-08-01 04:15:20.544825262 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)stackValue.cpp 1.28 07/05/24 14:38:39 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,6 +89,22 @@ case Location::lng: // Long value in an aligned adjacent pair return new StackValue(*(intptr_t*)value_addr); + case Location::narrowoop: { + union { intptr_t p; narrowOop noop;} value; + value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF); + if (loc.is_register()) { + // The callee has no clue whether the register holds an int, + // long or is unused. He always saves a long. Here we know + // a long was saved, but we only want an int back. Narrow the + // saved long to the int that the JVM wants. + value.noop = (narrowOop) *(julong*) value_addr; + } else { + value.noop = *(narrowOop*) value_addr; + } + // Decode narrowoop and wrap a handle around the oop + Handle h(oopDesc::decode_heap_oop(value.noop)); + return new StackValue(h); + } #endif case Location::oop: { Handle h(*(oop *)value_addr); // Wrap a handle around the oop --- old/hotspot/src/share/vm/runtime/statSampler.cpp 2009-08-01 04:15:21.465206514 +0100 +++ new/hotspot/src/share/vm/runtime/statSampler.cpp 2009-08-01 04:15:21.392717921 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)statSampler.cpp 1.24 07/05/05 17:06:58 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -220,6 +220,7 @@ "java.class.path", "java.endorsed.dirs", "java.ext.dirs", + "java.version", "java.home", NULL }; --- old/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp 2009-08-01 04:15:22.302690878 +0100 +++ new/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp 2009-08-01 04:15:22.223481982 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)stubCodeGenerator.cpp 1.30 07/05/17 16:06:31 JVM" #endif /* - * Copyright 1997-2004 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,7 +72,6 @@ _first_stub = _last_stub = NULL; } -#ifndef PRODUCT extern "C" { static int compare_cdesc(const void* void_a, const void* void_b) { int ai = (*((StubCodeDesc**) void_a))->index(); @@ -80,10 +79,8 @@ return ai - bi; } } -#endif StubCodeGenerator::~StubCodeGenerator() { -#ifndef PRODUCT if (PrintStubCode) { CodeBuffer* cbuf = _masm->code(); CodeBlob* blob = CodeCache::find_blob_unsafe(cbuf->insts()->start()); @@ -108,7 +105,6 @@ tty->cr(); } } -#endif //PRODUCT } --- old/hotspot/src/share/vm/runtime/synchronizer.cpp 2009-08-01 04:15:24.687549463 +0100 +++ new/hotspot/src/share/vm/runtime/synchronizer.cpp 2009-08-01 04:15:24.574799338 +0100 @@ -1123,7 +1123,7 @@ // m->OwnerIsThread = 1. Note that a thread can inflate an object // that it has stack-locked -- as might happen in wait() -- directly // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. - m->set_owner (mark->locker()); + m->set_owner(mark->locker()); m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. @@ -1219,8 +1219,7 @@ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); } - THREAD->update_highest_lock((address)lock); - slow_enter (obj, lock, THREAD) ; + slow_enter (obj, lock, THREAD) ; } void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { @@ -3366,13 +3365,13 @@ // If the wakee is cold then transiently setting it's affinity // to the current CPU is a good idea. // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt + DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); Trigger->unpark() ; // Maintain stats and report events to JVMTI if (ObjectSynchronizer::_sync_Parks != NULL) { ObjectSynchronizer::_sync_Parks->inc() ; } - DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); } --- old/hotspot/src/share/vm/runtime/task.cpp 2009-08-01 04:15:25.725724884 +0100 +++ new/hotspot/src/share/vm/runtime/task.cpp 2009-08-01 04:15:25.649072612 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)task.cpp 1.27 07/05/05 17:06:59 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,6 @@ PeriodicTask::PeriodicTask(size_t interval_time) : _counter(0), _interval(interval_time) { - assert(is_init_completed(), "Periodic tasks should not start during VM initialization"); // Sanity check the interval time assert(_interval >= PeriodicTask::min_interval && _interval <= PeriodicTask::max_interval && @@ -110,25 +109,3 @@ _tasks[index] = _tasks[index+1]; } } - -TimeMillisUpdateTask* TimeMillisUpdateTask::_task = NULL; - -void TimeMillisUpdateTask::task() { - os::update_global_time(); -} - -void TimeMillisUpdateTask::engage() { - assert(_task == NULL, "init twice?"); - os::update_global_time(); // initial update - os::enable_global_time(); - _task = new TimeMillisUpdateTask(CacheTimeMillisGranularity); - _task->enroll(); -} - -void TimeMillisUpdateTask::disengage() { - assert(_task != NULL, "uninit twice?"); - os::disable_global_time(); - _task->disenroll(); - delete _task; - _task = NULL; -} --- old/hotspot/src/share/vm/runtime/task.hpp 2009-08-01 04:15:26.512747727 +0100 +++ new/hotspot/src/share/vm/runtime/task.hpp 2009-08-01 04:15:26.434959141 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)task.hpp 1.23 07/05/05 17:06:59 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -116,13 +116,3 @@ // The task to perform at each period virtual void task() = 0; }; - -class TimeMillisUpdateTask : public PeriodicTask { - private: - static TimeMillisUpdateTask* _task; - public: - TimeMillisUpdateTask(int interval) : PeriodicTask(interval) {} - void task(); - static void engage(); - static void disengage(); -}; --- old/hotspot/src/share/vm/runtime/thread.cpp 2009-08-01 04:15:27.385490704 +0100 +++ new/hotspot/src/share/vm/runtime/thread.cpp 2009-08-01 04:15:27.282470978 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)thread.cpp 1.819 07/11/02 18:02:02 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,7 +131,6 @@ debug_only(_allow_allocation_count = 0;) NOT_PRODUCT(_allow_safepoint_count = 0;) CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;) - _highest_lock = NULL; _jvmti_env_iteration_count = 0; _vm_operation_started_count = 0; _vm_operation_completed_count = 0; @@ -793,19 +792,6 @@ } #endif -bool Thread::lock_is_in_stack(address adr) const { - assert(Thread::current() == this, "lock_is_in_stack can only be called from current thread"); - // High limit: highest_lock is set during thread execution - // Low limit: address of the local variable dummy, rounded to 4K boundary. - // (The rounding helps finding threads in unsafe mode, even if the particular stack - // frame has been popped already. Correct as long as stacks are at least 4K long and aligned.) - address end = os::current_stack_pointer(); - if (_highest_lock >= adr && adr >= end) return true; - - return false; -} - - bool Thread::is_in_stack(address adr) const { assert(Thread::current() == this, "is_in_stack can only be called from current thread"); address end = os::current_stack_pointer(); @@ -821,8 +807,7 @@ // should be revisited, and they should be removed if possible. bool Thread::is_lock_owned(address adr) const { - if (lock_is_in_stack(adr) ) return true; - return false; + return (_stack_base >= adr && adr >= (_stack_base - _stack_size)); } bool Thread::set_as_starting_thread() { @@ -1141,6 +1126,10 @@ void JavaThread::initialize() { // Initialize fields + + // Set the claimed par_id to -1 (ie not claiming any par_ids) + set_claimed_par_id(-1); + set_saved_exception_pc(NULL); set_threadObj(NULL); _anchor.clear(); @@ -1212,7 +1201,18 @@ pd_initialize(); } -JavaThread::JavaThread(bool is_attaching) : Thread() { +#ifndef SERIALGC +SATBMarkQueueSet JavaThread::_satb_mark_queue_set; +DirtyCardQueueSet JavaThread::_dirty_card_queue_set; +#endif // !SERIALGC + +JavaThread::JavaThread(bool is_attaching) : + Thread() +#ifndef SERIALGC + , _satb_mark_queue(&_satb_mark_queue_set), + _dirty_card_queue(&_dirty_card_queue_set) +#endif // !SERIALGC +{ initialize(); _is_attaching = is_attaching; } @@ -1258,7 +1258,13 @@ // Remove this ifdef when C1 is ported to the compiler interface. static void compiler_thread_entry(JavaThread* thread, TRAPS); -JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : Thread() { +JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : + Thread() +#ifndef SERIALGC + , _satb_mark_queue(&_satb_mark_queue_set), + _dirty_card_queue(&_dirty_card_queue_set) +#endif // !SERIALGC +{ if (TraceThreadEvents) { tty->print_cr("creating thread %p", this); } @@ -1320,10 +1326,6 @@ ThreadSafepointState::destroy(this); if (_thread_profiler != NULL) delete _thread_profiler; if (_thread_stat != NULL) delete _thread_stat; - - if (jvmti_thread_state() != NULL) { - JvmtiExport::cleanup_thread(this); - } } @@ -1408,6 +1410,7 @@ thread->clear_pending_exception(); } + // For any new cleanup additions, please check to see if they need to be applied to // cleanup_failed_attach_current_thread as well. void JavaThread::exit(bool destroy_vm, ExitType exit_type) { @@ -1574,39 +1577,66 @@ tlab().make_parsable(true); // retire TLAB } - // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread - Threads::remove(this); + if (jvmti_thread_state() != NULL) { + JvmtiExport::cleanup_thread(this); + } + +#ifndef SERIALGC + // We must flush G1-related buffers before removing a thread from + // the list of active threads. + if (UseG1GC) { + flush_barrier_queues(); + } +#endif + + // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread + Threads::remove(this); } +#ifndef SERIALGC +// Flush G1-related queues. +void JavaThread::flush_barrier_queues() { + satb_mark_queue().flush(); + dirty_card_queue().flush(); +} +#endif + void JavaThread::cleanup_failed_attach_current_thread() { + if (get_thread_profiler() != NULL) { + get_thread_profiler()->disengage(); + ResourceMark rm; + get_thread_profiler()->print(get_thread_name()); + } - if (get_thread_profiler() != NULL) { - get_thread_profiler()->disengage(); - ResourceMark rm; - get_thread_profiler()->print(get_thread_name()); - } - - if (active_handles() != NULL) { - JNIHandleBlock* block = active_handles(); - set_active_handles(NULL); - JNIHandleBlock::release_block(block); - } - - if (free_handle_block() != NULL) { - JNIHandleBlock* block = free_handle_block(); - set_free_handle_block(NULL); - JNIHandleBlock::release_block(block); - } - - if (UseTLAB) { - tlab().make_parsable(true); // retire TLAB, if any - } - - Threads::remove(this); - delete this; + if (active_handles() != NULL) { + JNIHandleBlock* block = active_handles(); + set_active_handles(NULL); + JNIHandleBlock::release_block(block); + } + + if (free_handle_block() != NULL) { + JNIHandleBlock* block = free_handle_block(); + set_free_handle_block(NULL); + JNIHandleBlock::release_block(block); + } + + if (UseTLAB) { + tlab().make_parsable(true); // retire TLAB, if any + } + +#ifndef SERIALGC + if (UseG1GC) { + flush_barrier_queues(); + } +#endif + + Threads::remove(this); + delete this; } + + JavaThread* JavaThread::active() { Thread* thread = ThreadLocalStorage::thread(); assert(thread != NULL, "just checking"); @@ -1621,9 +1651,9 @@ } } -bool JavaThread::is_lock_owned(address adr) const { - if (lock_is_in_stack(adr)) return true; - +bool JavaThread::is_lock_owned(address adr) const { + if (Thread::is_lock_owned(adr)) return true; + for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) { if (chunk->contains(adr)) return true; } @@ -2401,7 +2431,7 @@ if (thread_oop != NULL && java_lang_Thread::is_daemon(thread_oop)) st->print("daemon "); Thread::print_on(st); // print guess for valid stack memory region (assume 4K pages); helps lock debugging - st->print_cr("[" INTPTR_FORMAT ".." INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12), highest_lock()); + st->print_cr("[" INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12)); if (thread_oop != NULL && JDK_Version::is_gte_jdk15x_version()) { st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop)); } @@ -2581,7 +2611,8 @@ oop JavaThread::current_park_blocker() { // Support for JSR-166 locks oop thread_oop = threadObj(); - if (thread_oop != NULL && JDK_Version::supports_thread_park_blocker()) { + if (thread_oop != NULL && + JDK_Version::current().supports_thread_park_blocker()) { return java_lang_Thread::park_blocker(thread_oop); } return NULL; @@ -2758,12 +2789,24 @@ // For now, just manually iterate through them. tc->do_thread(VMThread::vm_thread()); Universe::heap()->gc_threads_do(tc); - tc->do_thread(WatcherThread::watcher_thread()); + WatcherThread *wt = WatcherThread::watcher_thread(); + // Strictly speaking, the following NULL check isn't sufficient to make sure + // the data for WatcherThread is still valid upon being examined. However, + // considering that WatchThread terminates when the VM is on the way to + // exit at safepoint, the chance of the above is extremely small. The right + // way to prevent termination of WatcherThread would be to acquire + // Terminator_lock, but we can't do that without violating the lock rank + // checking in some cases. + if (wt != NULL) + tc->do_thread(wt); + // If CompilerThreads ever become non-JavaThreads, add them here } jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { - + + extern void JDK_Version_init(); + // Check version if (!is_supported_jni_version(args->version)) return JNI_EVERSION; @@ -2779,6 +2822,9 @@ // Initialize system properties. Arguments::init_system_properties(); + // So that JDK version can be used as a discrimintor when parsing arguments + JDK_Version_init(); + // Parse arguments jint parse_result = Arguments::parse(args); if (parse_result != JNI_OK) return parse_result; @@ -2929,21 +2975,38 @@ } if (AggressiveOpts) { - // Forcibly initialize java/util/HashMap and mutate the private - // static final "frontCacheEnabled" field before we start creating instances -#ifdef ASSERT - klassOop tmp_k = SystemDictionary::find(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0); - assert(tmp_k == NULL, "java/util/HashMap should not be loaded yet"); + { + // Forcibly initialize java/util/HashMap and mutate the private + // static final "frontCacheEnabled" field before we start creating instances +#ifdef ASSERT + klassOop tmp_k = SystemDictionary::find(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0); + assert(tmp_k == NULL, "java/util/HashMap should not be loaded yet"); #endif - klassOop k_o = SystemDictionary::resolve_or_null(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0); - KlassHandle k = KlassHandle(THREAD, k_o); - guarantee(k.not_null(), "Must find java/util/HashMap"); - instanceKlassHandle ik = instanceKlassHandle(THREAD, k()); - ik->initialize(CHECK_0); - fieldDescriptor fd; - // Possible we might not find this field; if so, don't break - if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) { - k()->bool_field_put(fd.offset(), true); + klassOop k_o = SystemDictionary::resolve_or_null(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0); + KlassHandle k = KlassHandle(THREAD, k_o); + guarantee(k.not_null(), "Must find java/util/HashMap"); + instanceKlassHandle ik = instanceKlassHandle(THREAD, k()); + ik->initialize(CHECK_0); + fieldDescriptor fd; + // Possible we might not find this field; if so, don't break + if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) { + k()->bool_field_put(fd.offset(), true); + } + } + + if (UseStringCache) { + // Forcibly initialize java/lang/StringValue and mutate the private + // static final "stringCacheEnabled" field before we start creating instances + klassOop k_o = SystemDictionary::resolve_or_null(vmSymbolHandles::java_lang_StringValue(), Handle(), Handle(), CHECK_0); + KlassHandle k = KlassHandle(THREAD, k_o); + guarantee(k.not_null(), "Must find java/lang/StringValue"); + instanceKlassHandle ik = instanceKlassHandle(THREAD, k()); + ik->initialize(CHECK_0); + fieldDescriptor fd; + // Possible we might not find this field; if so, don't break + if (ik->find_local_field(vmSymbols::stringCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) { + k()->bool_field_put(fd.offset(), true); + } } } @@ -3037,9 +3100,14 @@ #ifndef SERIALGC // Support for ConcurrentMarkSweep. This should be cleaned up - // and better encapsulated. XXX YSR - if (UseConcMarkSweepGC) { - ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD); + // and better encapsulated. The ugly nested if test would go away + // once things are properly refactored. XXX YSR + if (UseConcMarkSweepGC || UseG1GC) { + if (UseConcMarkSweepGC) { + ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD); + } else { + ConcurrentMarkThread::makeSurrogateLockerThread(THREAD); + } if (HAS_PENDING_EXCEPTION) { vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); } @@ -3088,7 +3156,6 @@ if (MemProfiling) MemProfiler::engage(); StatSampler::engage(); if (CheckJNICalls) JniPeriodicChecker::engage(); - if (CacheTimeMillis) TimeMillisUpdateTask::engage(); BiasedLocking::init(); @@ -3652,25 +3719,13 @@ // heavyweight monitors, then the owner is the stack address of the // Lock Word in the owning Java thread's stack. // - // We can't use Thread::is_lock_owned() or Thread::lock_is_in_stack() because - // those routines rely on the "current" stack pointer. That would be our - // stack pointer which is not relevant to the question. Instead we use the - // highest lock ever entered by the thread and find the thread that is - // higher than and closest to our target stack address. - // - address least_diff = 0; - bool least_diff_initialized = false; JavaThread* the_owner = NULL; { MutexLockerEx ml(doLock ? Threads_lock : NULL); ALL_JAVA_THREADS(q) { - address addr = q->highest_lock(); - if (addr == NULL || addr < owner) continue; // thread has entered no monitors or is too low - address diff = (address)(addr - owner); - if (!least_diff_initialized || diff < least_diff) { - least_diff_initialized = true; - least_diff = diff; + if (q->is_lock_owned(owner)) { the_owner = q; + break; } } } --- old/hotspot/src/share/vm/runtime/thread.hpp 2009-08-01 04:15:28.578424401 +0100 +++ new/hotspot/src/share/vm/runtime/thread.hpp 2009-08-01 04:15:28.484776648 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)thread.hpp 1.456 07/09/28 10:22:58 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -203,14 +203,6 @@ friend class ThreadLocalStorage; friend class GC_locker; - // In order for all threads to be able to use fast locking, we need to know the highest stack - // address of where a lock is on the stack (stacks normally grow towards lower addresses). This - // variable is initially set to NULL, indicating no locks are used by the thread. During the thread's - // execution, it will be set whenever locking can happen, i.e., when we call out to Java code or use - // an ObjectLocker. The value is never decreased, hence, it will over the lifetime of a thread - // approximate the real stackbase. - address _highest_lock; // Highest stack address where a JavaLock exist - ThreadLocalAllocBuffer _tlab; // Thread-local eden int _vm_operation_started_count; // VM_Operation support @@ -401,20 +393,14 @@ } // Sweeper support - void nmethods_do(); - - // Fast-locking support - address highest_lock() const { return _highest_lock; } - void update_highest_lock(address base) { if (base > _highest_lock) _highest_lock = base; } + void nmethods_do(); - // Tells if adr belong to this thread. This is used - // for checking if a lock is owned by the running thread. - // Warning: the method can only be used on the running thread - // Fast lock support uses these methods - virtual bool lock_is_in_stack(address adr) const; - virtual bool is_lock_owned(address adr) const; + // Tells if adr belong to this thread. This is used for checking if a lock + // is owned by the running thread. It is used to support fast lock. + virtual bool is_lock_owned(address adr) const; // Check if address is in the stack of the thread (not just for locks). + // Warning: the method can only be used on the running thread bool is_in_stack(address adr) const; // Sets this thread as starting thread. Returns failure if thread @@ -786,6 +772,20 @@ } _jmp_ring[ jump_ring_buffer_size ]; #endif /* PRODUCT */ +#ifndef SERIALGC + // Support for G1 barriers + + ObjPtrQueue _satb_mark_queue; // Thread-local log for SATB barrier. + // Set of all such queues. + static SATBMarkQueueSet _satb_mark_queue_set; + + DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards. + // Set of all such queues. + static DirtyCardQueueSet _dirty_card_queue_set; + + void flush_barrier_queues(); +#endif // !SERIALGC + friend class VMThread; friend class ThreadWaitTransition; friend class VM_Exit; @@ -1171,6 +1171,11 @@ static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } +#ifndef SERIALGC + static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); } + static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); } +#endif // !SERIALGC + // Returns the jni environment for this thread JNIEnv* jni_environment() { return &_jni_environment; } @@ -1329,6 +1334,13 @@ public: // Thread local information maintained by JVMTI. void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } + // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() + // getter is used to get this JavaThread's JvmtiThreadState if it has + // one which means NULL can be returned. JvmtiThreadState::state_for() + // is used to get the specified JavaThread's JvmtiThreadState if it has + // one or it allocates a new JvmtiThreadState for the JavaThread and + // returns it. JvmtiThreadState::state_for() will return NULL only if + // the specified JavaThread is exiting. JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; } @@ -1416,7 +1428,21 @@ static inline void set_stack_size_at_create(size_t value) { _stack_size_at_create = value; } - + +#ifndef SERIALGC + // SATB marking queue support + ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; } + static SATBMarkQueueSet& satb_mark_queue_set() { + return _satb_mark_queue_set; + } + + // Dirty card queue support + DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; } + static DirtyCardQueueSet& dirty_card_queue_set() { + return _dirty_card_queue_set; + } +#endif // !SERIALGC + // Machine dependent stuff #include "incls/_thread_pd.hpp.incl" @@ -1448,6 +1474,14 @@ // clearing/querying jni attach status bool is_attaching() const { return _is_attaching; } void set_attached() { _is_attaching = false; OrderAccess::fence(); } +private: + // This field is used to determine if a thread has claimed + // a par_id: it is -1 if the thread has not claimed a par_id; + // otherwise its value is the par_id that has been claimed. + int _claimed_par_id; +public: + int get_claimed_par_id() { return _claimed_par_id; } + void set_claimed_par_id(int id) { _claimed_par_id = id;} }; // Inline implementation of JavaThread::current --- old/hotspot/src/share/vm/runtime/threadLocalStorage.cpp 2009-08-01 04:15:29.847279948 +0100 +++ new/hotspot/src/share/vm/runtime/threadLocalStorage.cpp 2009-08-01 04:15:29.474793930 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)threadLocalStorage.cpp 1.46 07/05/05 17:07:00 JVM" #endif /* - * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,9 +45,13 @@ } void ThreadLocalStorage::init() { - assert(ThreadLocalStorage::thread_index() == -1, "More than one attempt to initialize threadLocalStorage"); + assert(!is_initialized(), + "More than one attempt to initialize threadLocalStorage"); pd_init(); set_thread_index(os::allocate_thread_local_storage()); generate_code_for_get_thread(); } +bool ThreadLocalStorage::is_initialized() { + return (thread_index() != -1); +} --- old/hotspot/src/share/vm/runtime/threadLocalStorage.hpp 2009-08-01 04:15:30.973416100 +0100 +++ new/hotspot/src/share/vm/runtime/threadLocalStorage.hpp 2009-08-01 04:15:30.888641513 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)threadLocalStorage.hpp 1.45 07/05/05 17:07:00 JVM" #endif /* - * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,6 +50,7 @@ // Initialization // Called explicitly from VMThread::activate_system instead of init_globals. static void init(); + static bool is_initialized(); private: static int _thread_index; --- old/hotspot/src/share/vm/runtime/vframe.cpp 2009-08-01 04:15:31.808661680 +0100 +++ new/hotspot/src/share/vm/runtime/vframe.cpp 2009-08-01 04:15:31.725133958 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vframe.cpp 1.163 07/08/29 13:42:30 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -209,7 +209,7 @@ for (BasicObjectLock* current = (fr().previous_monitor_in_interpreter_frame(fr().interpreter_frame_monitor_begin())); current >= fr().interpreter_frame_monitor_end(); current = fr().previous_monitor_in_interpreter_frame(current)) { - result->push(new MonitorInfo(current->obj(), current->lock())); + result->push(new MonitorInfo(current->obj(), current->lock(), false)); } return result; } --- old/hotspot/src/share/vm/runtime/vframe.hpp 2009-08-01 04:15:32.655706291 +0100 +++ new/hotspot/src/share/vm/runtime/vframe.hpp 2009-08-01 04:15:32.571315362 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vframe.hpp 1.89 07/05/17 16:07:04 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -233,15 +233,18 @@ private: oop _owner; // the object owning the monitor BasicLock* _lock; + bool _eliminated; public: // Constructor - MonitorInfo(oop owner, BasicLock* lock) { + MonitorInfo(oop owner, BasicLock* lock, bool eliminated) { _owner = owner; _lock = lock; + _eliminated = eliminated; } // Accessors oop owner() const { return _owner; } BasicLock* lock() const { return _lock; } + bool eliminated() const { return _eliminated; } }; class vframeStreamCommon : StackObj { @@ -416,6 +419,48 @@ int decode_offset; if (pc_desc == NULL) { // Should not happen, but let fill_from_compiled_frame handle it. + + // If we are trying to walk the stack of a thread that is not + // at a safepoint (like AsyncGetCallTrace would do) then this is an + // acceptable result. [ This is assuming that safe_for_sender + // is so bullet proof that we can trust the frames it produced. ] + // + // So if we see that the thread is not safepoint safe + // then simply produce the method and a bci of zero + // and skip the possibility of decoding any inlining that + // may be present. That is far better than simply stopping (or + // asserting. If however the thread is safepoint safe this + // is the sign of a compiler bug and we'll let + // fill_from_compiled_frame handle it. + + + JavaThreadState state = _thread->thread_state(); + + // in_Java should be good enough to test safepoint safety + // if state were say in_Java_trans then we'd expect that + // the pc would have already been slightly adjusted to + // one that would produce a pcDesc since the trans state + // would be one that might in fact anticipate a safepoint + + if (state == _thread_in_Java ) { + // This will get a method a zero bci and no inlining. + // Might be nice to have a unique bci to signify this + // particular case but for now zero will do. + + fill_from_compiled_native_frame(); + + // There is something to be said for setting the mode to + // at_end_mode to prevent trying to walk further up the + // stack. There is evidence that if we walk any further + // that we could produce a bad stack chain. However until + // we see evidence that allowing this causes us to find + // frames bad enough to cause segv's or assertion failures + // we don't do it as while we may get a bad call chain the + // probability is much higher (several magnitudes) that we + // get good data. + + return true; + } decode_offset = DebugInformationRecorder::serialized_null; } else { decode_offset = pc_desc->scope_decode_offset(); --- old/hotspot/src/share/vm/runtime/vframe_hp.cpp 2009-08-01 04:15:33.559071192 +0100 +++ new/hotspot/src/share/vm/runtime/vframe_hp.cpp 2009-08-01 04:15:33.484179896 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vframe_hp.cpp 1.159 07/05/24 14:38:39 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -193,7 +193,7 @@ // Casting away const frame& fr = (frame&) _fr; MonitorInfo* info = new MonitorInfo(fr.compiled_synchronized_native_monitor_owner(nm), - fr.compiled_synchronized_native_monitor(nm)); + fr.compiled_synchronized_native_monitor(nm), false); monitors->push(info); return monitors; } @@ -205,7 +205,7 @@ for (int index = 0; index < monitors->length(); index++) { MonitorValue* mv = monitors->at(index); StackValue *owner_sv = create_stack_value(mv->owner()); // it is an oop - result->push(new MonitorInfo(owner_sv->get_obj()(), resolve_monitor_lock(mv->basic_lock()))); + result->push(new MonitorInfo(owner_sv->get_obj()(), resolve_monitor_lock(mv->basic_lock()), mv->eliminated())); } return result; } --- old/hotspot/src/share/vm/runtime/virtualspace.cpp 2009-08-01 04:15:34.357038082 +0100 +++ new/hotspot/src/share/vm/runtime/virtualspace.cpp 2009-08-01 04:15:34.281515057 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)virtualspace.cpp 1.63 07/10/04 10:49:29 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,12 +31,15 @@ // ReservedSpace ReservedSpace::ReservedSpace(size_t size) { - initialize(size, 0, false, NULL); + initialize(size, 0, false, NULL, 0); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, - bool large, char* requested_address) { - initialize(size, alignment, large, requested_address); + bool large, + char* requested_address, + const size_t noaccess_prefix) { + initialize(size+noaccess_prefix, alignment, large, requested_address, + noaccess_prefix); } char * @@ -106,9 +109,10 @@ } ReservedSpace::ReservedSpace(const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align) + const size_t prefix_align, + const size_t suffix_size, + const size_t suffix_align, + const size_t noaccess_prefix) { assert(prefix_size != 0, "sanity"); assert(prefix_align != 0, "sanity"); @@ -121,12 +125,16 @@ assert((suffix_align & prefix_align - 1) == 0, "suffix_align not divisible by prefix_align"); + // Add in noaccess_prefix to prefix_size; + const size_t adjusted_prefix_size = prefix_size + noaccess_prefix; + const size_t size = adjusted_prefix_size + suffix_size; + // On systems where the entire region has to be reserved and committed up // front, the compound alignment normally done by this method is unnecessary. const bool try_reserve_special = UseLargePages && prefix_align == os::large_page_size(); if (!os::can_commit_large_page_memory() && try_reserve_special) { - initialize(prefix_size + suffix_size, prefix_align, true); + initialize(size, prefix_align, true, NULL, noaccess_prefix); return; } @@ -134,15 +142,19 @@ _size = 0; _alignment = 0; _special = false; + _noaccess_prefix = 0; + + // Assert that if noaccess_prefix is used, it is the same as prefix_align. + assert(noaccess_prefix == 0 || + noaccess_prefix == prefix_align, "noaccess prefix wrong"); // Optimistically try to reserve the exact size needed. - const size_t size = prefix_size + suffix_size; char* addr = os::reserve_memory(size, NULL, prefix_align); if (addr == NULL) return; // Check whether the result has the needed alignment (unlikely unless // prefix_align == suffix_align). - const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1; + const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1; if (ofs != 0) { // Wrong alignment. Release, allocate more space and do manual alignment. // @@ -156,22 +168,24 @@ } const size_t extra = MAX2(ofs, suffix_align - ofs); - addr = reserve_and_align(size + extra, prefix_size, prefix_align, - suffix_size, suffix_align); + addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align, + suffix_size, suffix_align); if (addr == NULL) { // Try an even larger region. If this fails, address space is exhausted. - addr = reserve_and_align(size + suffix_align, prefix_size, - prefix_align, suffix_size, suffix_align); + addr = reserve_and_align(size + suffix_align, adjusted_prefix_size, + prefix_align, suffix_size, suffix_align); } } _base = addr; _size = size; _alignment = prefix_align; + _noaccess_prefix = noaccess_prefix; } void ReservedSpace::initialize(size_t size, size_t alignment, bool large, - char* requested_address) { + char* requested_address, + const size_t noaccess_prefix) { const size_t granularity = os::vm_allocation_granularity(); assert((size & granularity - 1) == 0, "size not aligned to os::vm_allocation_granularity()"); @@ -184,6 +198,7 @@ _size = 0; _special = false; _alignment = 0; + _noaccess_prefix = 0; if (size == 0) { return; } @@ -223,7 +238,8 @@ // important. If available space is not detected, return NULL. if (requested_address != 0) { - base = os::attempt_reserve_memory_at(size, requested_address); + base = os::attempt_reserve_memory_at(size, + requested_address-noaccess_prefix); } else { base = os::reserve_memory(size, NULL, alignment); } @@ -238,30 +254,27 @@ // increase size to a multiple of the desired alignment size = align_size_up(size, alignment); size_t extra_size = size + alignment; - char* extra_base = os::reserve_memory(extra_size, NULL, alignment); - if (extra_base == NULL) return; - // Do manual alignement - base = (char*) align_size_up((uintptr_t) extra_base, alignment); - assert(base >= extra_base, "just checking"); - // Release unused areas - size_t unused_bottom_size = base - extra_base; - size_t unused_top_size = extra_size - size - unused_bottom_size; - assert(unused_bottom_size % os::vm_allocation_granularity() == 0, - "size not allocation aligned"); - assert(unused_top_size % os::vm_allocation_granularity() == 0, - "size not allocation aligned"); - if (unused_bottom_size > 0) { - os::release_memory(extra_base, unused_bottom_size); - } - if (unused_top_size > 0) { - os::release_memory(base + size, unused_top_size); - } + do { + char* extra_base = os::reserve_memory(extra_size, NULL, alignment); + if (extra_base == NULL) return; + // Do manual alignement + base = (char*) align_size_up((uintptr_t) extra_base, alignment); + assert(base >= extra_base, "just checking"); + // Re-reserve the region at the aligned base address. + os::release_memory(extra_base, extra_size); + base = os::reserve_memory(size, base); + } while (base == NULL); } } // Done _base = base; _size = size; _alignment = MAX2(alignment, (size_t) os::vm_page_size()); + _noaccess_prefix = noaccess_prefix; + + // Assert that if noaccess_prefix is used, it is the same as alignment. + assert(noaccess_prefix == 0 || + noaccess_prefix == _alignment, "noaccess prefix wrong"); assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, "area must be distinguisable from marks for mark-sweep"); @@ -277,6 +290,7 @@ _base = base; _size = size; _alignment = alignment; + _noaccess_prefix = 0; _special = special; } @@ -323,17 +337,60 @@ void ReservedSpace::release() { if (is_reserved()) { + char *real_base = _base - _noaccess_prefix; + const size_t real_size = _size + _noaccess_prefix; if (special()) { - os::release_memory_special(_base, _size); + os::release_memory_special(real_base, real_size); } else{ - os::release_memory(_base, _size); + os::release_memory(real_base, real_size); } _base = NULL; _size = 0; + _noaccess_prefix = 0; _special = false; } } +void ReservedSpace::protect_noaccess_prefix(const size_t size) { + // If there is noaccess prefix, return. + if (_noaccess_prefix == 0) return; + + assert(_noaccess_prefix >= (size_t)os::vm_page_size(), + "must be at least page size big"); + + // Protect memory at the base of the allocated region. + // If special, the page was committed (only matters on windows) + if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, + _special)) { + fatal("cannot protect protection page"); + } + + _base += _noaccess_prefix; + _size -= _noaccess_prefix; + assert((size == _size) && ((uintptr_t)_base % _alignment == 0), + "must be exactly of required size and alignment"); +} + +ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, + bool large, char* requested_address) : + ReservedSpace(size, alignment, large, + requested_address, + UseCompressedOops && UseImplicitNullCheckForNarrowOop ? + lcm(os::vm_page_size(), alignment) : 0) { + // Only reserved space for the java heap should have a noaccess_prefix + // if using compressed oops. + protect_noaccess_prefix(size); +} + +ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size, + const size_t prefix_align, + const size_t suffix_size, + const size_t suffix_align) : + ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align, + UseCompressedOops && UseImplicitNullCheckForNarrowOop ? + lcm(os::vm_page_size(), prefix_align) : 0) { + protect_noaccess_prefix(prefix_size+suffix_size); +} // VirtualSpace @@ -351,6 +408,7 @@ _lower_alignment = 0; _middle_alignment = 0; _upper_alignment = 0; + _special = false; } @@ -405,7 +463,8 @@ void VirtualSpace::release() { - (void)os::release_memory(low_boundary(), reserved_size()); + // This does not release memory it never reserved. + // Caller must release via rs.release(); _low_boundary = NULL; _high_boundary = NULL; _low = NULL; --- old/hotspot/src/share/vm/runtime/virtualspace.hpp 2009-08-01 04:15:35.256029612 +0100 +++ new/hotspot/src/share/vm/runtime/virtualspace.hpp 2009-08-01 04:15:35.183028608 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)virtualspace.hpp 1.42 07/10/04 10:49:29 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,13 +32,15 @@ private: char* _base; size_t _size; + size_t _noaccess_prefix; size_t _alignment; bool _special; // ReservedSpace ReservedSpace(char* base, size_t size, size_t alignment, bool special); void initialize(size_t size, size_t alignment, bool large, - char* requested_address = NULL); + char* requested_address, + const size_t noaccess_prefix); // Release parts of an already-reserved memory region [addr, addr + len) to // get a new region that has "compound alignment." Return the start of the @@ -62,13 +64,19 @@ const size_t suffix_size, const size_t suffix_align); + protected: + // Create protection page at the beginning of the space. + void protect_noaccess_prefix(const size_t size); + public: // Constructor ReservedSpace(size_t size); ReservedSpace(size_t size, size_t alignment, bool large, - char* requested_address = NULL); + char* requested_address = NULL, + const size_t noaccess_prefix = 0); ReservedSpace(const size_t prefix_size, const size_t prefix_align, - const size_t suffix_size, const size_t suffix_align); + const size_t suffix_size, const size_t suffix_align, + const size_t noaccess_prefix); // Accessors char* base() const { return _base; } @@ -76,6 +84,8 @@ size_t alignment() const { return _alignment; } bool special() const { return _special; } + size_t noaccess_prefix() const { return _noaccess_prefix; } + bool is_reserved() const { return _base != NULL; } void release(); @@ -107,6 +117,16 @@ return last_part(partition_size, alignment()); } +// Class encapsulating behavior specific of memory space reserved for Java heap +class ReservedHeapSpace : public ReservedSpace { +public: + // Constructor + ReservedHeapSpace(size_t size, size_t forced_base_alignment, + bool large, char* requested_address); + ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align, + const size_t suffix_size, const size_t suffix_align); +}; + // VirtualSpace is data structure for committing a previously reserved address range in smaller chunks. class VirtualSpace VALUE_OBJ_CLASS_SPEC { --- old/hotspot/src/share/vm/runtime/vmStructs.cpp 2009-08-01 04:15:36.172599914 +0100 +++ new/hotspot/src/share/vm/runtime/vmStructs.cpp 2009-08-01 04:15:36.069939705 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmStructs.cpp 1.189 08/04/09 19:20:08 JVM" #endif /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,8 @@ /******************************************************************/ \ \ volatile_nonstatic_field(oopDesc, _mark, markOop) \ - nonstatic_field(oopDesc, _klass, klassOop) \ + volatile_nonstatic_field(oopDesc, _metadata._klass, wideKlassOop) \ + volatile_nonstatic_field(oopDesc, _metadata._compressed_klass, narrowOop) \ static_field(oopDesc, _bs, BarrierSet*) \ nonstatic_field(arrayKlass, _dimension, int) \ nonstatic_field(arrayKlass, _higher_dimension, klassOop) \ @@ -82,13 +83,14 @@ nonstatic_field(arrayKlass, _vtable_len, int) \ nonstatic_field(arrayKlass, _alloc_size, juint) \ nonstatic_field(arrayKlass, _component_mirror, oop) \ - nonstatic_field(arrayOopDesc, _length, int) \ nonstatic_field(compiledICHolderKlass, _alloc_size, juint) \ nonstatic_field(compiledICHolderOopDesc, _holder_method, methodOop) \ nonstatic_field(compiledICHolderOopDesc, _holder_klass, klassOop) \ nonstatic_field(constantPoolOopDesc, _tags, typeArrayOop) \ nonstatic_field(constantPoolOopDesc, _cache, constantPoolCacheOop) \ nonstatic_field(constantPoolOopDesc, _pool_holder, klassOop) \ + nonstatic_field(constantPoolOopDesc, _length, int) \ + nonstatic_field(constantPoolCacheOopDesc, _length, int) \ nonstatic_field(constantPoolCacheOopDesc, _constant_pool, constantPoolOop) \ nonstatic_field(instanceKlass, _array_klasses, klassOop) \ nonstatic_field(instanceKlass, _methods, objArrayOop) \ @@ -264,6 +266,7 @@ static_field(Universe, _bootstrapping, bool) \ static_field(Universe, _fully_initialized, bool) \ static_field(Universe, _verify_count, int) \ + static_field(Universe, _heap_base, address) \ \ /**********************************************************************************/ \ /* Generation and Space hierarchies */ \ @@ -302,14 +305,12 @@ nonstatic_field(CardTableModRefBS, _guard_region, MemRegion) \ nonstatic_field(CardTableModRefBS, byte_map_base, jbyte*) \ \ - nonstatic_field(CardTableRS, _ct_bs, CardTableModRefBS) \ + nonstatic_field(CardTableRS, _ct_bs, CardTableModRefBSForCTRS*) \ \ nonstatic_field(CollectedHeap, _reserved, MemRegion) \ nonstatic_field(SharedHeap, _perm_gen, PermGen*) \ nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \ nonstatic_field(CollectedHeap, _is_gc_active, bool) \ - nonstatic_field(CollectedHeap, _max_heap_capacity, size_t) \ - \ nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \ nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \ nonstatic_field(CompactibleSpace, _end_of_live, HeapWord*) \ @@ -455,40 +456,40 @@ static_field(SystemDictionary, _shared_dictionary, Dictionary*) \ static_field(SystemDictionary, _system_loader_lock_obj, oop) \ static_field(SystemDictionary, _loader_constraints, LoaderConstraintTable*) \ - static_field(SystemDictionary, _object_klass, klassOop) \ - static_field(SystemDictionary, _string_klass, klassOop) \ - static_field(SystemDictionary, _class_klass, klassOop) \ - static_field(SystemDictionary, _cloneable_klass, klassOop) \ - static_field(SystemDictionary, _classloader_klass, klassOop) \ - static_field(SystemDictionary, _serializable_klass, klassOop) \ - static_field(SystemDictionary, _system_klass, klassOop) \ - static_field(SystemDictionary, _throwable_klass, klassOop) \ - static_field(SystemDictionary, _threaddeath_klass, klassOop) \ - static_field(SystemDictionary, _error_klass, klassOop) \ - static_field(SystemDictionary, _exception_klass, klassOop) \ - static_field(SystemDictionary, _runtime_exception_klass, klassOop) \ - static_field(SystemDictionary, _classNotFoundException_klass, klassOop) \ - static_field(SystemDictionary, _noClassDefFoundError_klass, klassOop) \ - static_field(SystemDictionary, _linkageError_klass, klassOop) \ - static_field(SystemDictionary, _classCastException_klass, klassOop) \ - static_field(SystemDictionary, _arrayStoreException_klass, klassOop) \ - static_field(SystemDictionary, _virtualMachineError_klass, klassOop) \ - static_field(SystemDictionary, _outOfMemoryError_klass, klassOop) \ - static_field(SystemDictionary, _StackOverflowError_klass, klassOop) \ - static_field(SystemDictionary, _protectionDomain_klass, klassOop) \ - static_field(SystemDictionary, _AccessControlContext_klass, klassOop) \ - static_field(SystemDictionary, _reference_klass, klassOop) \ - static_field(SystemDictionary, _soft_reference_klass, klassOop) \ - static_field(SystemDictionary, _weak_reference_klass, klassOop) \ - static_field(SystemDictionary, _final_reference_klass, klassOop) \ - static_field(SystemDictionary, _phantom_reference_klass, klassOop) \ - static_field(SystemDictionary, _finalizer_klass, klassOop) \ - static_field(SystemDictionary, _thread_klass, klassOop) \ - static_field(SystemDictionary, _threadGroup_klass, klassOop) \ - static_field(SystemDictionary, _properties_klass, klassOop) \ - static_field(SystemDictionary, _stringBuffer_klass, klassOop) \ - static_field(SystemDictionary, _vector_klass, klassOop) \ - static_field(SystemDictionary, _hashtable_klass, klassOop) \ + static_field(SystemDictionary, WK_KLASS(object_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(string_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(class_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(cloneable_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(classloader_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(serializable_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(system_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(throwable_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(threaddeath_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(error_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(exception_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(runtime_exception_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(classNotFoundException_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(noClassDefFoundError_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(linkageError_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(ClassCastException_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(ArrayStoreException_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(virtualMachineError_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(OutOfMemoryError_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(StackOverflowError_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(protectionDomain_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(AccessControlContext_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(reference_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(soft_reference_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(weak_reference_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(final_reference_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(phantom_reference_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(finalizer_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(thread_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(threadGroup_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(properties_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(stringBuffer_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(vector_klass), klassOop) \ + static_field(SystemDictionary, WK_KLASS(hashtable_klass), klassOop) \ static_field(SystemDictionary, _box_klasses[0], klassOop) \ static_field(SystemDictionary, _java_system_loader, oop) \ \ @@ -585,7 +586,7 @@ /***********************************/ \ \ static_field(StubRoutines, _call_stub_return_address, address) \ - IA32_ONLY(static_field(StubRoutines::i486,_call_stub_compiled_return, address)) \ + IA32_ONLY(static_field(StubRoutines::x86,_call_stub_compiled_return, address)) \ \ /***************************************/ \ /* PcDesc and other compiled code info */ \ @@ -656,7 +657,6 @@ \ volatile_nonstatic_field(Thread, _suspend_flags, uint32_t) \ nonstatic_field(Thread, _active_handles, JNIHandleBlock*) \ - nonstatic_field(Thread, _highest_lock, address) \ nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \ nonstatic_field(Thread, _current_pending_monitor, ObjectMonitor*) \ nonstatic_field(Thread, _current_pending_monitor_is_from_java, bool) \ @@ -763,8 +763,9 @@ static_field(Abstract_VM_Version, _vm_minor_version, int) \ static_field(Abstract_VM_Version, _vm_build_number, int) \ \ - static_field(JDK_Version, _pre_jdk16_version, bool) \ - static_field(JDK_Version, _jdk_version, int) \ + static_field(JDK_Version, _current, JDK_Version) \ + nonstatic_field(JDK_Version, _partially_initialized, bool) \ + nonstatic_field(JDK_Version, _major, unsigned char) \ \ \ \ @@ -916,12 +917,12 @@ declare_type(arrayKlass, Klass) \ declare_type(arrayKlassKlass, klassKlass) \ declare_type(arrayOopDesc, oopDesc) \ - declare_type(compiledICHolderKlass, Klass) \ - declare_type(compiledICHolderOopDesc, oopDesc) \ - declare_type(constantPoolKlass, arrayKlass) \ - declare_type(constantPoolOopDesc, arrayOopDesc) \ - declare_type(constantPoolCacheKlass, arrayKlass) \ - declare_type(constantPoolCacheOopDesc, arrayOopDesc) \ + declare_type(compiledICHolderKlass, Klass) \ + declare_type(compiledICHolderOopDesc, oopDesc) \ + declare_type(constantPoolKlass, Klass) \ + declare_type(constantPoolOopDesc, oopDesc) \ + declare_type(constantPoolCacheKlass, Klass) \ + declare_type(constantPoolCacheOopDesc, oopDesc) \ declare_type(instanceKlass, Klass) \ declare_type(instanceKlassKlass, klassKlass) \ declare_type(instanceOopDesc, oopDesc) \ @@ -953,9 +954,11 @@ declare_oop_type(klassOop) \ declare_oop_type(markOop) \ declare_oop_type(methodOop) \ - declare_oop_type(methodDataOop) \ + declare_oop_type(methodDataOop) \ declare_oop_type(objArrayOop) \ declare_oop_type(oop) \ + declare_oop_type(narrowOop) \ + declare_oop_type(wideKlassOop) \ declare_oop_type(constMethodOop) \ declare_oop_type(symbolOop) \ declare_oop_type(typeArrayOop) \ @@ -994,6 +997,7 @@ declare_toplevel_type(BarrierSet) \ declare_type(ModRefBarrierSet, BarrierSet) \ declare_type(CardTableModRefBS, ModRefBarrierSet) \ + declare_type(CardTableModRefBSForCTRS, CardTableModRefBS) \ declare_toplevel_type(GenRemSet) \ declare_type(CardTableRS, GenRemSet) \ declare_toplevel_type(BlockOffsetSharedArray) \ @@ -1021,6 +1025,10 @@ declare_toplevel_type(BlockOffsetSharedArray*) \ declare_toplevel_type(GenRemSet*) \ declare_toplevel_type(CardTableRS*) \ + declare_toplevel_type(CardTableModRefBS*) \ + declare_toplevel_type(CardTableModRefBS**) \ + declare_toplevel_type(CardTableModRefBSForCTRS*) \ + declare_toplevel_type(CardTableModRefBSForCTRS**) \ declare_toplevel_type(CollectedHeap*) \ declare_toplevel_type(ContiguousSpace*) \ declare_toplevel_type(DefNewGeneration*) \ @@ -1106,7 +1114,7 @@ \ declare_toplevel_type(StubQueue) \ declare_toplevel_type(StubRoutines) \ - IA32_ONLY(declare_toplevel_type(StubRoutines::i486)) \ + IA32_ONLY(declare_toplevel_type(StubRoutines::x86)) \ declare_toplevel_type(Stub) \ declare_type(InterpreterCodelet, Stub) \ \ @@ -1311,6 +1319,7 @@ /* Object sizes */ \ /****************/ \ \ + declare_constant(oopSize) \ declare_constant(LogBytesPerWord) \ declare_constant(BytesPerLong) \ \ @@ -1318,7 +1327,9 @@ /* Object alignment */ \ /********************/ \ \ + declare_constant(MinObjAlignment) \ declare_constant(MinObjAlignmentInBytes) \ + declare_constant(LogMinObjAlignmentInBytes) \ \ /********************************************/ \ /* Generation and Space Hierarchy Constants */ \ @@ -1365,7 +1376,6 @@ \ declare_constant(HeapWordSize) \ declare_constant(LogHeapWordSize) \ - declare_constant(HeapWordsPerOop) \ \ /* constants from PermGen::Name enum */ \ \ @@ -1574,6 +1584,7 @@ \ declare_constant(Location::normal) \ declare_constant(Location::oop) \ + declare_constant(Location::narrowoop) \ declare_constant(Location::int_in_long) \ declare_constant(Location::lng) \ declare_constant(Location::float_in_dbl) \ @@ -1614,7 +1625,7 @@ declare_constant(OopMapValue::unused_value) \ declare_constant(OopMapValue::oop_value) \ declare_constant(OopMapValue::value_value) \ - declare_constant(OopMapValue::dead_value) \ + declare_constant(OopMapValue::narrowoop_value) \ declare_constant(OopMapValue::callee_saved_value) \ declare_constant(OopMapValue::derived_oop_value) \ \ @@ -1693,7 +1704,12 @@ declare_constant(markOopDesc::no_hash) \ declare_constant(markOopDesc::no_hash_in_place) \ declare_constant(markOopDesc::no_lock_in_place) \ - declare_constant(markOopDesc::max_age) + declare_constant(markOopDesc::max_age) \ + \ + /* Constants in markOop used by CMS. */ \ + declare_constant(markOopDesc::cms_shift) \ + declare_constant(markOopDesc::cms_mask) \ + declare_constant(markOopDesc::size_shift) \ /* NOTE that we do not use the last_entry() macro here; it is used */ /* in vmStructs__.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and */ @@ -1957,6 +1973,7 @@ GENERATE_STATIC_VM_STRUCT_ENTRY) VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \ + GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \ GENERATE_STATIC_VM_STRUCT_ENTRY) #endif // SERIALGC @@ -2098,6 +2115,7 @@ CHECK_STATIC_VM_STRUCT_ENTRY); VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY, + CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY, CHECK_STATIC_VM_STRUCT_ENTRY); #endif // SERIALGC @@ -2202,6 +2220,7 @@ debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT, \ ENSURE_FIELD_TYPE_PRESENT)); debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \ + ENSURE_FIELD_TYPE_PRESENT, \ ENSURE_FIELD_TYPE_PRESENT)); #endif // SERIALGC debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \ --- old/hotspot/src/share/vm/runtime/vm_operations.hpp 2009-08-01 04:15:37.325409125 +0100 +++ new/hotspot/src/share/vm/runtime/vm_operations.hpp 2009-08-01 04:15:37.240390126 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vm_operations.hpp 1.130 07/05/23 10:54:21 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,11 +52,16 @@ template(GenCollectFull) \ template(GenCollectFullConcurrent) \ template(GenCollectForAllocation) \ + template(GenCollectForPermanentAllocation) \ template(ParallelGCFailedAllocation) \ template(ParallelGCFailedPermanentAllocation) \ template(ParallelGCSystemGC) \ + template(CGC_Operation) \ template(CMS_Initial_Mark) \ template(CMS_Final_Remark) \ + template(G1CollectFull) \ + template(G1CollectForAllocation) \ + template(G1IncCollectionPause) \ template(EnableBiasedLocking) \ template(RevokeBias) \ template(BulkRevokeBias) \ --- old/hotspot/src/share/vm/runtime/vm_version.cpp 2009-08-01 04:15:38.148065155 +0100 +++ new/hotspot/src/share/vm/runtime/vm_version.cpp 2009-08-01 04:15:38.063752659 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vm_version.cpp 1.59 07/08/17 11:47:16 JVM" #endif /* - * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,8 @@ int Abstract_VM_Version::_vm_minor_version = 0; int Abstract_VM_Version::_vm_build_number = 0; bool Abstract_VM_Version::_initialized = false; +int Abstract_VM_Version::_parallel_worker_threads = 0; +bool Abstract_VM_Version::_parallel_worker_threads_initialized = false; void Abstract_VM_Version::initialize() { if (_initialized) { @@ -213,3 +215,43 @@ } #endif } + +unsigned int Abstract_VM_Version::nof_parallel_worker_threads( + unsigned int num, + unsigned int den, + unsigned int switch_pt) { + if (FLAG_IS_DEFAULT(ParallelGCThreads)) { + assert(ParallelGCThreads == 0, "Default ParallelGCThreads is not 0"); + // For very large machines, there are diminishing returns + // for large numbers of worker threads. Instead of + // hogging the whole system, use a fraction of the workers for every + // processor after the first 8. For example, on a 72 cpu machine + // and a chosen fraction of 5/8 + // use 8 + (72 - 8) * (5/8) == 48 worker threads. + unsigned int ncpus = (unsigned int) os::active_processor_count(); + return (ncpus <= switch_pt) ? + ncpus : + (switch_pt + ((ncpus - switch_pt) * num) / den); + } else { + return ParallelGCThreads; + } +} + +unsigned int Abstract_VM_Version::calc_parallel_worker_threads() { + return nof_parallel_worker_threads(5, 8, 8); +} + + +// Does not set the _initialized flag since it is +// a global flag. +unsigned int Abstract_VM_Version::parallel_worker_threads() { + if (!_parallel_worker_threads_initialized) { + if (FLAG_IS_DEFAULT(ParallelGCThreads)) { + _parallel_worker_threads = VM_Version::calc_parallel_worker_threads(); + } else { + _parallel_worker_threads = ParallelGCThreads; + } + _parallel_worker_threads_initialized = true; + } + return _parallel_worker_threads; +} --- old/hotspot/src/share/vm/runtime/vm_version.hpp 2009-08-01 04:15:39.052133392 +0100 +++ new/hotspot/src/share/vm/runtime/vm_version.hpp 2009-08-01 04:15:38.961660513 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vm_version.hpp 1.28 07/10/04 10:49:20 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,12 @@ static int _vm_minor_version; static int _vm_build_number; static bool _initialized; + static int _parallel_worker_threads; + static bool _parallel_worker_threads_initialized; + + static unsigned int nof_parallel_worker_threads(unsigned int num, + unsigned int dem, + unsigned int switch_pt); public: static void initialize(); @@ -72,5 +78,14 @@ // subclasses should define new versions to hide this one as needed. Note // that the O/S may support more sizes, but at most this many are used. static uint page_size_count() { return 2; } + + // Returns the number of parallel threads to be used for VM + // work. If that number has not been calculated, do so and + // save it. Returns ParallelGCThreads if it is set on the + // command line. + static unsigned int parallel_worker_threads(); + // Calculates and returns the number of parallel threads. May + // be VM version specific. + static unsigned int calc_parallel_worker_threads(); }; --- old/hotspot/src/share/vm/services/heapDumper.cpp 2009-08-01 04:15:39.979480023 +0100 +++ new/hotspot/src/share/vm/services/heapDumper.cpp 2009-08-01 04:15:39.890244723 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)heapDumper.cpp 1.24 07/07/22 22:35:59 JVM" #endif /* - * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -346,7 +346,8 @@ // Default stack trace ID (used for dummy HPROF_TRACE record) enum { - STACK_TRACE_ID = 1 + STACK_TRACE_ID = 1, + INITIAL_CLASS_COUNT = 200 }; @@ -411,6 +412,7 @@ void write_u8(u8 x); void write_objectID(oop o); void write_classID(Klass* k); + void write_id(u4 x); }; DumpWriter::DumpWriter(const char* path) { @@ -551,6 +553,14 @@ #endif } +void DumpWriter::write_id(u4 x) { +#ifdef _LP64 + write_u8((u8) x); +#else + write_u4(x); +#endif +} + // We use java mirror as the class ID void DumpWriter::write_classID(Klass* k) { write_objectID(k->java_mirror()); @@ -599,6 +609,8 @@ static void dump_object_array(DumpWriter* writer, objArrayOop array); // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array static void dump_prim_array(DumpWriter* writer, typeArrayOop array); + // create HPROF_FRAME record for the given method and bci + static void dump_stack_frame(DumpWriter* writer, int frame_serial_num, int class_serial_num, methodOop m, int bci); }; // write a header of the given type @@ -672,9 +684,13 @@ void DumperSupport::dump_field_value(DumpWriter* writer, char type, address addr) { switch (type) { case JVM_SIGNATURE_CLASS : - case JVM_SIGNATURE_ARRAY : { - oop* f = (oop*)addr; - oop o = *f; + case JVM_SIGNATURE_ARRAY : { + oop o; + if (UseCompressedOops) { + o = oopDesc::load_decode_heap_oop((narrowOop*)addr); + } else { + o = oopDesc::load_decode_heap_oop((oop*)addr); + } // reflection and sun.misc.Unsafe classes may have a reference to a // klassOop so filter it out. @@ -1000,7 +1016,7 @@ } // If the byte ordering is big endian then we can copy most types directly - int length_in_bytes = array->length() * type2aelembytes[type]; + int length_in_bytes = array->length() * type2aelembytes(type); assert(length_in_bytes > 0, "nothing to copy"); switch (type) { @@ -1069,6 +1085,29 @@ } } +// create a HPROF_FRAME record of the given methodOop and bci +void DumperSupport::dump_stack_frame(DumpWriter* writer, + int frame_serial_num, + int class_serial_num, + methodOop m, + int bci) { + int line_number; + if (m->is_native()) { + line_number = -3; // native frame + } else { + line_number = m->line_number_from_bci(bci); + } + + write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4)); + writer->write_id(frame_serial_num); // frame serial number + writer->write_objectID(m->name()); // method's name + writer->write_objectID(m->signature()); // method's signature + + assert(Klass::cast(m->method_holder())->oop_is_instance(), "not instanceKlass"); + writer->write_objectID(instanceKlass::cast(m->method_holder())->source_file_name()); // source file name + writer->write_u4(class_serial_num); // class serial number + writer->write_u4((u4) line_number); // line number +} // Support class used to generate HPROF_UTF8 records from the entries in the // SymbolTable. @@ -1080,6 +1119,7 @@ public: SymbolTableDumper(DumpWriter* writer) { _writer = writer; } void do_oop(oop* obj_p); + void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; void SymbolTableDumper::do_oop(oop* obj_p) { @@ -1102,13 +1142,17 @@ private: DumpWriter* _writer; u4 _thread_serial_num; - DumpWriter* writer() const { return _writer; } + int _frame_num; + DumpWriter* writer() const { return _writer; } public: JNILocalsDumper(DumpWriter* writer, u4 thread_serial_num) { _writer = writer; _thread_serial_num = thread_serial_num; + _frame_num = -1; // default - empty stack } + void set_frame_number(int n) { _frame_num = n; } void do_oop(oop* obj_p); + void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; @@ -1119,7 +1163,7 @@ writer()->write_u1(HPROF_GC_ROOT_JNI_LOCAL); writer()->write_objectID(o); writer()->write_u4(_thread_serial_num); - writer()->write_u4((u4)-1); // empty + writer()->write_u4((u4)_frame_num); } } @@ -1136,6 +1180,7 @@ _writer = writer; } void do_oop(oop* obj_p); + void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; void JNIGlobalsDumper::do_oop(oop* obj_p) { @@ -1167,6 +1212,7 @@ writer()->write_u1(HPROF_GC_ROOT_MONITOR_USED); writer()->write_objectID(*obj_p); } + void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; @@ -1181,6 +1227,7 @@ _writer = writer; } void do_oop(oop* obj_p); + void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; void StickyClassDumper::do_oop(oop* obj_p) { @@ -1263,6 +1310,9 @@ bool _gc_before_heap_dump; bool _is_segmented_dump; jlong _dump_start; + GrowableArray* _klass_map; + ThreadStackTrace** _stack_traces; + int _num_threads; // accessors DumpWriter* writer() const { return _writer; } @@ -1285,9 +1335,16 @@ static void do_basic_type_array_class_dump(klassOop k); // HPROF_GC_ROOT_THREAD_OBJ records - void do_thread(JavaThread* thread, u4 thread_serial_num); + int do_thread(JavaThread* thread, u4 thread_serial_num); void do_threads(); + void add_class_serial_number(Klass* k, int serial_num) { + _klass_map->at_put_grow(serial_num, k); + } + + // HPROF_TRACE and HPROF_FRAME records + void dump_stack_traces(); + // writes a HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT record void write_dump_header(); @@ -1307,6 +1364,18 @@ _gc_before_heap_dump = gc_before_heap_dump; _is_segmented_dump = false; _dump_start = (jlong)-1; + _klass_map = new (ResourceObj::C_HEAP) GrowableArray(INITIAL_CLASS_COUNT, true); + _stack_traces = NULL; + _num_threads = 0; + } + ~VM_HeapDumper() { + if (_stack_traces != NULL) { + for (int i=0; i < _num_threads; i++) { + delete _stack_traces[i]; + } + FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces); + } + delete _klass_map; } VMOp_Type type() const { return VMOp_HeapDumper; } @@ -1430,6 +1499,9 @@ Klass* klass = Klass::cast(k); writer->write_classID(klass); + // add the klassOop and class serial number pair + dumper->add_class_serial_number(klass, class_serial_num); + writer->write_u4(STACK_TRACE_ID); // class name ID @@ -1459,15 +1531,15 @@ // Walk the stack of the given thread. // Dumps a HPROF_GC_ROOT_JAVA_FRAME record for each local // Dumps a HPROF_GC_ROOT_JNI_LOCAL record for each JNI local -void VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) { +// +// It returns the number of Java frames in this thread stack +int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) { JNILocalsDumper blk(writer(), thread_serial_num); oop threadObj = java_thread->threadObj(); assert(threadObj != NULL, "sanity check"); - - // JNI locals for the top frame - java_thread->active_handles()->oops_do(&blk); + int stack_depth = 0; if (java_thread->has_last_Java_frame()) { // vframes are resource allocated @@ -1478,61 +1550,79 @@ RegisterMap reg_map(java_thread); frame f = java_thread->last_frame(); vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); - + frame* last_entry_frame = NULL; + while (vf != NULL) { + blk.set_frame_number(stack_depth); if (vf->is_java_frame()) { - // java frame (interpreted, compiled, ...) - javaVFrame *jvf = javaVFrame::cast(vf); - - if (!(jvf->method()->is_native())) { - StackValueCollection* locals = jvf->locals(); - for (int slot=0; slotsize(); slot++) { - if (locals->at(slot)->type() == T_OBJECT) { - oop o = locals->obj_at(slot)(); + // java frame (interpreted, compiled, ...) + javaVFrame *jvf = javaVFrame::cast(vf); + if (!(jvf->method()->is_native())) { + StackValueCollection* locals = jvf->locals(); + for (int slot=0; slotsize(); slot++) { + if (locals->at(slot)->type() == T_OBJECT) { + oop o = locals->obj_at(slot)(); if (o != NULL) { writer()->write_u1(HPROF_GC_ROOT_JAVA_FRAME); writer()->write_objectID(o); writer()->write_u4(thread_serial_num); - writer()->write_u4((u4)-1); // empty - } - } - } - } - } else { + writer()->write_u4((u4) stack_depth); + } + } + } + } else { + // native frame + if (stack_depth == 0) { + // JNI locals for the top frame. + java_thread->active_handles()->oops_do(&blk); + } else { + if (last_entry_frame != NULL) { + // JNI locals for the entry frame + assert(last_entry_frame->is_entry_frame(), "checking"); + last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk); + } + } + } + // increment only for Java frames + stack_depth++; + last_entry_frame = NULL; - // externalVFrame - if it's an entry frame then report any JNI locals - // as roots - frame* fr = vf->frame_pointer(); + } else { + // externalVFrame - if it's an entry frame then report any JNI locals + // as roots when we find the corresponding native javaVFrame + frame* fr = vf->frame_pointer(); assert(fr != NULL, "sanity check"); if (fr->is_entry_frame()) { - fr->entry_frame_call_wrapper()->handles()->oops_do(&blk); - } + last_entry_frame = fr; + } } - vf = vf->sender(); - } + } + } else { + // no last java frame but there may be JNI locals + java_thread->active_handles()->oops_do(&blk); } + return stack_depth; } // write a HPROF_GC_ROOT_THREAD_OBJ record for each java thread. Then walk // the stack so that locals and JNI locals are dumped. void VM_HeapDumper::do_threads() { - u4 thread_serial_num = 0; - for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) { + for (int i=0; i < _num_threads; i++) { + JavaThread* thread = _stack_traces[i]->thread(); oop threadObj = thread->threadObj(); - if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) { - ++thread_serial_num; - - writer()->write_u1(HPROF_GC_ROOT_THREAD_OBJ); - writer()->write_objectID(threadObj); - writer()->write_u4(thread_serial_num); - writer()->write_u4(STACK_TRACE_ID); - - do_thread(thread, thread_serial_num); - } + u4 thread_serial_num = i+1; + u4 stack_serial_num = thread_serial_num + STACK_TRACE_ID; + writer()->write_u1(HPROF_GC_ROOT_THREAD_OBJ); + writer()->write_objectID(threadObj); + writer()->write_u4(thread_serial_num); // thread number + writer()->write_u4(stack_serial_num); // stack trace serial number + int num_frames = do_thread(thread, thread_serial_num); + assert(num_frames == _stack_traces[i]->get_stack_depth(), + "total number of Java frames not matched"); } } @@ -1541,16 +1631,16 @@ // records: // // HPROF_HEADER -// HPROF_TRACE // [HPROF_UTF8]* // [HPROF_LOAD_CLASS]* +// [[HPROF_FRAME]*|HPROF_TRACE]* // [HPROF_GC_CLASS_DUMP]* // HPROF_HEAP_DUMP // -// The HPROF_TRACE record after the header is "dummy trace" record which does -// not include any frames. Other records which require a stack trace ID will -// specify the trace ID of this record (1). It also means we can run HAT without -// needing the -stack false option. +// The HPROF_TRACE records represent the stack traces where the heap dump +// is generated and a "dummy trace" record which does not include +// any frames. The dummy trace record is used to be referenced as the +// unknown object alloc site. // // The HPROF_HEAP_DUMP record has a length following by sub-records. To allow // the heap dump be generated in a single pass we remember the position of @@ -1572,17 +1662,8 @@ } // Write the file header - use 1.0.2 for large heaps, otherwise 1.0.1 - size_t used; + size_t used = ch->used(); const char* header; -#ifndef SERIALGC - if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) { - used = GenCollectedHeap::heap()->used(); - } else { - used = ParallelScavengeHeap::heap()->used(); - } -#else // SERIALGC - used = GenCollectedHeap::heap()->used(); -#endif // SERIALGC if (used > (size_t)SegmentedHeapDumpThreshold) { set_segmented_dump(); header = "JAVA PROFILE 1.0.2"; @@ -1595,12 +1676,6 @@ writer()->write_u4(oopSize); writer()->write_u8(os::javaTimeMillis()); - // HPROF_TRACE record without any frames - DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4)); - writer()->write_u4(STACK_TRACE_ID); - writer()->write_u4(0); // thread number - writer()->write_u4(0); // frame count - // HPROF_UTF8 records SymbolTableDumper sym_dumper(writer()); SymbolTable::oops_do(&sym_dumper); @@ -1609,6 +1684,10 @@ SystemDictionary::classes_do(&do_load_class); Universe::basic_type_classes_do(&do_load_class); + // write HPROF_FRAME and HPROF_TRACE records + // this must be called after _klass_map is built when iterating the classes above. + dump_stack_traces(); + // write HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT write_dump_header(); @@ -1649,6 +1728,47 @@ end_of_dump(); } +void VM_HeapDumper::dump_stack_traces() { + // write a HPROF_TRACE record without any frames to be referenced as object alloc sites + DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4)); + writer()->write_u4((u4) STACK_TRACE_ID); + writer()->write_u4(0); // thread number + writer()->write_u4(0); // frame count + + _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads()); + int frame_serial_num = 0; + for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) { + oop threadObj = thread->threadObj(); + if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) { + // dump thread stack trace + ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false); + stack_trace->dump_stack_at_safepoint(-1); + _stack_traces[_num_threads++] = stack_trace; + + // write HPROF_FRAME records for this thread's stack trace + int depth = stack_trace->get_stack_depth(); + int thread_frame_start = frame_serial_num; + for (int j=0; j < depth; j++) { + StackFrameInfo* frame = stack_trace->stack_frame_at(j); + methodOop m = frame->method(); + int class_serial_num = _klass_map->find(Klass::cast(m->method_holder())); + // the class serial number starts from 1 + assert(class_serial_num > 0, "class not found"); + DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci()); + } + + // write HPROF_TRACE record for one thread + DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize); + int stack_serial_num = _num_threads + STACK_TRACE_ID; + writer()->write_u4(stack_serial_num); // stack trace serial number + writer()->write_u4((u4) _num_threads); // thread serial number + writer()->write_u4(depth); // frame count + for (int j=1; j <= depth; j++) { + writer()->write_id(thread_frame_start + j); + } + } + } +} // dump the heap to given path. int HeapDumper::dump(const char* path) { --- old/hotspot/src/share/vm/services/management.cpp 2009-08-01 04:15:40.941064578 +0100 +++ new/hotspot/src/share/vm/services/management.cpp 2009-08-01 04:15:40.847814238 +0100 @@ -697,10 +697,10 @@ -1); } - if (threshold > max_intx) { - THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), - "Invalid threshold value > max value of size_t", - -1); + if ((size_t)threshold > max_uintx) { + stringStream st; + st.print("Invalid valid threshold value. Threshold value (" UINT64_FORMAT ") > max value of size_t (" SIZE_FORMAT ")", (size_t)threshold, max_uintx); + THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), st.as_string(), -1); } MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_(0L)); @@ -889,7 +889,7 @@ int count = 0; for (int i = 0; i < nFlags; i++) { Flag* flag = &Flag::flags[i]; - // Exclude the diagnostic flags + // Exclude the locked (diagnostic, experimental) flags if (flag->is_unlocked() || flag->is_unlocker()) { count++; } @@ -1490,7 +1490,7 @@ int num_entries = 0; for (int i = 0; i < nFlags; i++) { Flag* flag = &Flag::flags[i]; - // Exclude the diagnostic flags + // Exclude the locked (experimental, diagnostic) flags if (flag->is_unlocked() || flag->is_unlocker()) { Handle s = java_lang_String::create_from_str(flag->name, CHECK_0); flags_ah->obj_at_put(num_entries, s()); @@ -1619,7 +1619,7 @@ int num_entries = 0; for (int i = 0; i < nFlags && num_entries < count; i++) { Flag* flag = &Flag::flags[i]; - // Exclude the diagnostic flags + // Exclude the locked (diagnostic, experimental) flags if (flag->is_unlocked() || flag->is_unlocker()) { add_global_entry(env, null_h, &globals[num_entries], flag, THREAD); num_entries++; --- old/hotspot/src/share/vm/services/memoryService.cpp 2009-08-01 04:15:41.845025139 +0100 +++ new/hotspot/src/share/vm/services/memoryService.cpp 2009-08-01 04:15:41.764557942 +0100 @@ -62,9 +62,13 @@ add_parallel_scavenge_heap_info(ParallelScavengeHeap::heap()); break; } + case CollectedHeap::G1CollectedHeap : { + G1CollectedHeap::g1_unimplemented(); + return; + } #endif // SERIALGC default: { - guarantee(false, "Not recognized kind of heap"); + guarantee(false, "Unrecognized kind of heap"); } } --- old/hotspot/src/share/vm/services/threadService.cpp 2009-08-01 04:15:42.678023083 +0100 +++ new/hotspot/src/share/vm/services/threadService.cpp 2009-08-01 04:15:42.587179104 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)threadService.cpp 1.54 07/05/17 16:07:12 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -544,6 +544,7 @@ Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) { klassOop k = SystemDictionary::stackTraceElement_klass(); + assert(k != NULL, "must be loaded in 1.4+"); instanceKlassHandle ik(THREAD, k); // Allocate an array of java/lang/StackTraceElement object @@ -746,7 +747,7 @@ } // Support for JSR-166 locks - if (JDK_Version::supports_thread_park_blocker() && + if (JDK_Version::current().supports_thread_park_blocker() && (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED)) { --- old/hotspot/src/share/vm/services/threadService.hpp 2009-08-01 04:15:43.636197829 +0100 +++ new/hotspot/src/share/vm/services/threadService.hpp 2009-08-01 04:15:43.555017053 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)threadService.hpp 1.43 07/10/03 11:09:53 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -245,6 +245,7 @@ ThreadStackTrace(JavaThread* thread, bool with_locked_monitors); ~ThreadStackTrace(); + JavaThread* thread() { return _thread; } StackFrameInfo* stack_frame_at(int i) { return _frames->at(i); } int get_stack_depth() { return _depth; } --- old/hotspot/src/share/vm/utilities/array.hpp 2009-08-01 04:15:44.520206709 +0100 +++ new/hotspot/src/share/vm/utilities/array.hpp 2009-08-01 04:15:44.447029144 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)array.hpp 1.15 07/05/05 17:07:07 JVM" #endif /* - * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,11 +43,18 @@ _length = 0; _data = NULL; DEBUG_ONLY(init_nesting();) + // client may call initialize, at most once } ResourceArray(size_t esize, int length) { + DEBUG_ONLY(_data = NULL); + initialize(esize, length); + } + + void initialize(size_t esize, int length) { assert(length >= 0, "illegal length"); + assert(_data == NULL, "must be new object"); _length = length; _data = resource_allocate_bytes(esize * length); DEBUG_ONLY(init_nesting();) @@ -114,7 +121,10 @@ /* creation */ \ array_name() : base_class() {} \ array_name(const int length) : base_class(esize, length) {} \ - array_name(const int length, const etype fx) : base_class(esize, length) { \ + array_name(const int length, const etype fx) { initialize(length, fx); } \ + void initialize(const int length) { base_class::initialize(esize, length); } \ + void initialize(const int length, const etype fx) { \ + initialize(length); \ for (int i = 0; i < length; i++) ((etype*)_data)[i] = fx; \ } \ \ @@ -160,16 +170,29 @@ \ public: \ /* creation */ \ - stack_name() : array_name() { _size = 0; } \ - stack_name(const int size) : array_name(size){ _length = 0; _size = size; } \ - stack_name(const int size, const etype fx) : array_name(size, fx) { _size = size; } \ + stack_name() : array_name() { _size = 0; } \ + stack_name(const int size) { initialize(size); } \ + stack_name(const int size, const etype fx) { initialize(size, fx); } \ + void initialize(const int size, const etype fx) { \ + _size = size; \ + array_name::initialize(size, fx); \ + /* _length == size, allocation and size are the same */ \ + } \ + void initialize(const int size) { \ + _size = size; \ + array_name::initialize(size); \ + _length = 0; /* reset length to zero; _size records the allocation */ \ + } \ \ /* standard operations */ \ int size() const { return _size; } \ \ - void push(const etype x) { \ - if (length() >= size()) expand(esize, length(), _size); \ - ((etype*)_data)[_length++] = x; \ + int push(const etype x) { \ + int len = length(); \ + if (len >= size()) expand(esize, len, _size); \ + ((etype*)_data)[len] = x; \ + _length = len+1; \ + return len; \ } \ \ etype pop() { \ @@ -238,7 +261,7 @@ int capacity() const { return size(); } \ void clear() { truncate(0); } \ void trunc_to(const int length) { truncate(length); } \ - void append(const etype x) { push(x); } \ + int append(const etype x) { return push(x); } \ void appendAll(const stack_name* stack) { push_all(stack); } \ etype last() const { return top(); } \ }; \ --- old/hotspot/src/share/vm/utilities/bitMap.cpp 2009-08-01 04:15:45.383774424 +0100 +++ new/hotspot/src/share/vm/utilities/bitMap.cpp 2009-08-01 04:15:45.313687817 +0100 @@ -29,54 +29,59 @@ # include "incls/_bitMap.cpp.incl" -BitMap::BitMap(idx_t* map, idx_t size_in_bits) { +BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) : + _map(map), _size(size_in_bits) +{ + assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); assert(size_in_bits >= 0, "just checking"); - _map = map; - _size = size_in_bits; } -BitMap::BitMap(idx_t size_in_bits) { - assert(size_in_bits >= 0, "just checking"); - _size = size_in_bits; - _map = NEW_RESOURCE_ARRAY(idx_t, size_in_words()); +BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) : + _map(NULL), _size(0) +{ + assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); + resize(size_in_bits, in_resource_area); +} + + +void BitMap::verify_index(idx_t index) const { + assert(index < _size, "BitMap index out of bounds"); } +void BitMap::verify_range(idx_t beg_index, idx_t end_index) const { +#ifdef ASSERT + assert(beg_index <= end_index, "BitMap range error"); + // Note that [0,0) and [size,size) are both valid ranges. + if (end_index != _size) verify_index(end_index); +#endif +} -void BitMap::resize(idx_t size_in_bits) { +void BitMap::resize(idx_t size_in_bits, bool in_resource_area) { assert(size_in_bits >= 0, "just checking"); - size_t old_size_in_words = size_in_words(); - uintptr_t* old_map = map(); + idx_t old_size_in_words = size_in_words(); + bm_word_t* old_map = map(); + _size = size_in_bits; - size_t new_size_in_words = size_in_words(); - _map = NEW_RESOURCE_ARRAY(idx_t, new_size_in_words); - Copy::disjoint_words((HeapWord*) old_map, (HeapWord*) _map, MIN2(old_size_in_words, new_size_in_words)); + idx_t new_size_in_words = size_in_words(); + if (in_resource_area) { + _map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words); + } else { + if (old_map != NULL) FREE_C_HEAP_ARRAY(bm_word_t, _map); + _map = NEW_C_HEAP_ARRAY(bm_word_t, new_size_in_words); + } + Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map, + MIN2(old_size_in_words, new_size_in_words)); if (new_size_in_words > old_size_in_words) { clear_range_of_words(old_size_in_words, size_in_words()); } } -// Returns a bit mask for a range of bits [beg, end) within a single word. Each -// bit in the mask is 0 if the bit is in the range, 1 if not in the range. The -// returned mask can be used directly to clear the range, or inverted to set the -// range. Note: end must not be 0. -inline BitMap::idx_t -BitMap::inverted_bit_mask_for_range(idx_t beg, idx_t end) const { - assert(end != 0, "does not work when end == 0"); - assert(beg == end || word_index(beg) == word_index(end - 1), - "must be a single-word range"); - idx_t mask = bit_mask(beg) - 1; // low (right) bits - if (bit_in_word(end) != 0) { - mask |= ~(bit_mask(end) - 1); // high (left) bits - } - return mask; -} - void BitMap::set_range_within_word(idx_t beg, idx_t end) { // With a valid range (beg <= end), this test ensures that end != 0, as // required by inverted_bit_mask_for_range. Also avoids an unnecessary write. if (beg != end) { - idx_t mask = inverted_bit_mask_for_range(beg, end); + bm_word_t mask = inverted_bit_mask_for_range(beg, end); *word_addr(beg) |= ~mask; } } @@ -85,7 +90,7 @@ // With a valid range (beg <= end), this test ensures that end != 0, as // required by inverted_bit_mask_for_range. Also avoids an unnecessary write. if (beg != end) { - idx_t mask = inverted_bit_mask_for_range(beg, end); + bm_word_t mask = inverted_bit_mask_for_range(beg, end); *word_addr(beg) &= mask; } } @@ -108,20 +113,6 @@ } } -inline void BitMap::set_large_range_of_words(idx_t beg, idx_t end) { - memset(_map + beg, ~(unsigned char)0, (end - beg) * sizeof(uintptr_t)); -} - -inline void BitMap::clear_large_range_of_words(idx_t beg, idx_t end) { - memset(_map + beg, 0, (end - beg) * sizeof(uintptr_t)); -} - -inline BitMap::idx_t BitMap::word_index_round_up(idx_t bit) const { - idx_t bit_rounded_up = bit + (BitsPerWord - 1); - // Check for integer arithmetic overflow. - return bit_rounded_up > bit ? word_index(bit_rounded_up) : size_in_words(); -} - void BitMap::set_range(idx_t beg, idx_t end) { verify_range(beg, end); @@ -190,6 +181,64 @@ clear_range_within_word(bit_index(end_full_word), end); } +void BitMap::mostly_disjoint_range_union(BitMap* from_bitmap, + idx_t from_start_index, + idx_t to_start_index, + size_t word_num) { + // Ensure that the parameters are correct. + // These shouldn't be that expensive to check, hence I left them as + // guarantees. + guarantee(from_bitmap->bit_in_word(from_start_index) == 0, + "it should be aligned on a word boundary"); + guarantee(bit_in_word(to_start_index) == 0, + "it should be aligned on a word boundary"); + guarantee(word_num >= 2, "word_num should be at least 2"); + + intptr_t* from = (intptr_t*) from_bitmap->word_addr(from_start_index); + intptr_t* to = (intptr_t*) word_addr(to_start_index); + + if (*from != 0) { + // if it's 0, then there's no point in doing the CAS + while (true) { + intptr_t old_value = *to; + intptr_t new_value = old_value | *from; + intptr_t res = Atomic::cmpxchg_ptr(new_value, to, old_value); + if (res == old_value) break; + } + } + ++from; + ++to; + + for (size_t i = 0; i < word_num - 2; ++i) { + if (*from != 0) { + // if it's 0, then there's no point in doing the CAS + assert(*to == 0, "nobody else should be writing here"); + intptr_t new_value = *from; + *to = new_value; + } + + ++from; + ++to; + } + + if (*from != 0) { + // if it's 0, then there's no point in doing the CAS + while (true) { + intptr_t old_value = *to; + intptr_t new_value = old_value | *from; + intptr_t res = Atomic::cmpxchg_ptr(new_value, to, old_value); + if (res == old_value) break; + } + } + + // the -1 is because we didn't advance them after the final CAS + assert(from == + (intptr_t*) from_bitmap->word_addr(from_start_index) + word_num - 1, + "invariant"); + assert(to == (intptr_t*) word_addr(to_start_index) + word_num - 1, + "invariant"); +} + void BitMap::at_put(idx_t offset, bool value) { if (value) { set_bit(offset); @@ -285,11 +334,11 @@ bool BitMap::contains(const BitMap other) const { assert(size() == other.size(), "must have same size"); - uintptr_t* dest_map = map(); - uintptr_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size_in_words(); index++) { - uintptr_t word_union = dest_map[index] | other_map[index]; + bm_word_t word_union = dest_map[index] | other_map[index]; // If this has more bits set than dest_map[index], then other is not a // subset. if (word_union != dest_map[index]) return false; @@ -299,8 +348,8 @@ bool BitMap::intersects(const BitMap other) const { assert(size() == other.size(), "must have same size"); - uintptr_t* dest_map = map(); - uintptr_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size_in_words(); index++) { if ((dest_map[index] & other_map[index]) != 0) return true; @@ -311,8 +360,8 @@ void BitMap::set_union(BitMap other) { assert(size() == other.size(), "must have same size"); - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size_in_words(); index++) { dest_map[index] = dest_map[index] | other_map[index]; @@ -322,8 +371,8 @@ void BitMap::set_difference(BitMap other) { assert(size() == other.size(), "must have same size"); - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size_in_words(); index++) { dest_map[index] = dest_map[index] & ~(other_map[index]); @@ -333,8 +382,8 @@ void BitMap::set_intersection(BitMap other) { assert(size() == other.size(), "must have same size"); - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size; index++) { dest_map[index] = dest_map[index] & other_map[index]; @@ -342,11 +391,26 @@ } +void BitMap::set_intersection_at_offset(BitMap other, idx_t offset) { + assert(other.size() >= offset, "offset not in range"); + assert(other.size() - offset >= size(), "other not large enough"); + // XXX Ideally, we would remove this restriction. + guarantee((offset % (sizeof(bm_word_t) * BitsPerByte)) == 0, + "Only handle aligned cases so far."); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); + idx_t offset_word_ind = word_index(offset); + idx_t size = size_in_words(); + for (idx_t index = 0; index < size; index++) { + dest_map[index] = dest_map[index] & other_map[offset_word_ind + index]; + } +} + bool BitMap::set_union_with_result(BitMap other) { assert(size() == other.size(), "must have same size"); bool changed = false; - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size; index++) { idx_t temp = map(index) | other_map[index]; @@ -360,11 +424,11 @@ bool BitMap::set_difference_with_result(BitMap other) { assert(size() == other.size(), "must have same size"); bool changed = false; - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size; index++) { - idx_t temp = dest_map[index] & ~(other_map[index]); + bm_word_t temp = dest_map[index] & ~(other_map[index]); changed = changed || (temp != dest_map[index]); dest_map[index] = temp; } @@ -375,12 +439,12 @@ bool BitMap::set_intersection_with_result(BitMap other) { assert(size() == other.size(), "must have same size"); bool changed = false; - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size; index++) { - idx_t orig = dest_map[index]; - idx_t temp = orig & other_map[index]; + bm_word_t orig = dest_map[index]; + bm_word_t temp = orig & other_map[index]; changed = changed || (temp != orig); dest_map[index] = temp; } @@ -390,8 +454,8 @@ void BitMap::set_from(BitMap other) { assert(size() == other.size(), "must have same size"); - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size; index++) { dest_map[index] = other_map[index]; @@ -401,8 +465,8 @@ bool BitMap::is_same(BitMap other) { assert(size() == other.size(), "must have same size"); - idx_t* dest_map = map(); - idx_t* other_map = other.map(); + bm_word_t* dest_map = map(); + bm_word_t* other_map = other.map(); idx_t size = size_in_words(); for (idx_t index = 0; index < size; index++) { if (dest_map[index] != other_map[index]) return false; @@ -411,24 +475,24 @@ } bool BitMap::is_full() const { - uintptr_t* word = map(); + bm_word_t* word = map(); idx_t rest = size(); for (; rest >= (idx_t) BitsPerWord; rest -= BitsPerWord) { - if (*word != (uintptr_t) AllBits) return false; + if (*word != (bm_word_t) AllBits) return false; word++; } - return rest == 0 || (*word | ~right_n_bits((int)rest)) == (uintptr_t) AllBits; + return rest == 0 || (*word | ~right_n_bits((int)rest)) == (bm_word_t) AllBits; } bool BitMap::is_empty() const { - uintptr_t* word = map(); + bm_word_t* word = map(); idx_t rest = size(); for (; rest >= (idx_t) BitsPerWord; rest -= BitsPerWord) { - if (*word != (uintptr_t) NoBits) return false; + if (*word != (bm_word_t) NoBits) return false; word++; } - return rest == 0 || (*word & right_n_bits((int)rest)) == (uintptr_t) NoBits; + return rest == 0 || (*word & right_n_bits((int)rest)) == (bm_word_t) NoBits; } void BitMap::clear_large() { @@ -439,7 +503,7 @@ // then modifications in and to the left of the _bit_ being // currently sampled will not be seen. Note also that the // interval [leftOffset, rightOffset) is right open. -void BitMap::iterate(BitMapClosure* blk, idx_t leftOffset, idx_t rightOffset) { +bool BitMap::iterate(BitMapClosure* blk, idx_t leftOffset, idx_t rightOffset) { verify_range(leftOffset, rightOffset); idx_t startIndex = word_index(leftOffset); @@ -448,106 +512,71 @@ offset < rightOffset && index < endIndex; offset = (++index) << LogBitsPerWord) { idx_t rest = map(index) >> (offset & (BitsPerWord - 1)); - for (; offset < rightOffset && rest != (uintptr_t)NoBits; offset++) { + for (; offset < rightOffset && rest != (bm_word_t)NoBits; offset++) { if (rest & 1) { - blk->do_bit(offset); + if (!blk->do_bit(offset)) return false; // resample at each closure application // (see, for instance, CMS bug 4525989) rest = map(index) >> (offset & (BitsPerWord -1)); - // XXX debugging: remove - // The following assertion assumes that closure application - // doesn't clear bits (may not be true in general, e.g. G1). - assert(rest & 1, - "incorrect shift or closure application can clear bits?"); } rest = rest >> 1; } } + return true; } -BitMap::idx_t BitMap::get_next_one_offset(idx_t l_offset, - idx_t r_offset) const { - assert(l_offset <= size(), "BitMap index out of bounds"); - assert(r_offset <= size(), "BitMap index out of bounds"); - assert(l_offset <= r_offset, "l_offset > r_offset ?"); - - if (l_offset == r_offset) { - return l_offset; - } - idx_t index = word_index(l_offset); - idx_t r_index = word_index(r_offset-1) + 1; - idx_t res_offset = l_offset; - - // check bits including and to the _left_ of offset's position - idx_t pos = bit_in_word(res_offset); - idx_t res = map(index) >> pos; - if (res != (uintptr_t)NoBits) { - // find the position of the 1-bit - for (; !(res & 1); res_offset++) { - res = res >> 1; +BitMap::idx_t* BitMap::_pop_count_table = NULL; + +void BitMap::init_pop_count_table() { + if (_pop_count_table == NULL) { + BitMap::idx_t *table = NEW_C_HEAP_ARRAY(idx_t, 256); + for (uint i = 0; i < 256; i++) { + table[i] = num_set_bits(i); } - assert(res_offset >= l_offset, "just checking"); - return MIN2(res_offset, r_offset); - } - // skip over all word length 0-bit runs - for (index++; index < r_index; index++) { - res = map(index); - if (res != (uintptr_t)NoBits) { - // found a 1, return the offset - for (res_offset = index << LogBitsPerWord; !(res & 1); - res_offset++) { - res = res >> 1; - } - assert(res & 1, "tautology; see loop condition"); - assert(res_offset >= l_offset, "just checking"); - return MIN2(res_offset, r_offset); + + intptr_t res = Atomic::cmpxchg_ptr((intptr_t) table, + (intptr_t*) &_pop_count_table, + (intptr_t) NULL_WORD); + if (res != NULL_WORD) { + guarantee( _pop_count_table == (void*) res, "invariant" ); + FREE_C_HEAP_ARRAY(bm_word_t, table); } } - return r_offset; } -BitMap::idx_t BitMap::get_next_zero_offset(idx_t l_offset, - idx_t r_offset) const { - assert(l_offset <= size(), "BitMap index out of bounds"); - assert(r_offset <= size(), "BitMap index out of bounds"); - assert(l_offset <= r_offset, "l_offset > r_offset ?"); - - if (l_offset == r_offset) { - return l_offset; - } - idx_t index = word_index(l_offset); - idx_t r_index = word_index(r_offset-1) + 1; - idx_t res_offset = l_offset; - - // check bits including and to the _left_ of offset's position - idx_t pos = res_offset & (BitsPerWord - 1); - idx_t res = (map(index) >> pos) | left_n_bits((int)pos); - - if (res != (uintptr_t)AllBits) { - // find the position of the 0-bit - for (; res & 1; res_offset++) { - res = res >> 1; +BitMap::idx_t BitMap::num_set_bits(bm_word_t w) { + idx_t bits = 0; + + while (w != 0) { + while ((w & 1) == 0) { + w >>= 1; } - assert(res_offset >= l_offset, "just checking"); - return MIN2(res_offset, r_offset); + bits++; + w >>= 1; } - // skip over all word length 1-bit runs - for (index++; index < r_index; index++) { - res = map(index); - if (res != (uintptr_t)AllBits) { - // found a 0, return the offset - for (res_offset = index << LogBitsPerWord; res & 1; - res_offset++) { - res = res >> 1; - } - assert(!(res & 1), "tautology; see loop condition"); - assert(res_offset >= l_offset, "just checking"); - return MIN2(res_offset, r_offset); + return bits; +} + +BitMap::idx_t BitMap::num_set_bits_from_table(unsigned char c) { + assert(_pop_count_table != NULL, "precondition"); + return _pop_count_table[c]; +} + +BitMap::idx_t BitMap::count_one_bits() const { + init_pop_count_table(); // If necessary. + idx_t sum = 0; + typedef unsigned char uchar; + for (idx_t i = 0; i < size_in_words(); i++) { + bm_word_t w = map()[i]; + for (size_t j = 0; j < sizeof(bm_word_t); j++) { + sum += num_set_bits_from_table(uchar(w & 255)); + w >>= 8; } } - return r_offset; + return sum; } + #ifndef PRODUCT void BitMap::print_on(outputStream* st) const { @@ -561,7 +590,7 @@ #endif -BitMap2D::BitMap2D(uintptr_t* map, idx_t size_in_slots, idx_t bits_per_slot) +BitMap2D::BitMap2D(bm_word_t* map, idx_t size_in_slots, idx_t bits_per_slot) : _bits_per_slot(bits_per_slot) , _map(map, size_in_slots * bits_per_slot) { --- old/hotspot/src/share/vm/utilities/bitMap.hpp 2009-08-01 04:15:46.251031485 +0100 +++ new/hotspot/src/share/vm/utilities/bitMap.hpp 2009-08-01 04:15:46.172107057 +0100 @@ -25,25 +25,19 @@ * */ -// Closure for iterating over BitMaps - -class BitMapClosure VALUE_OBJ_CLASS_SPEC { - public: - // Callback when bit in map is set - virtual void do_bit(size_t offset) = 0; -}; - +// Forward decl; +class BitMapClosure; -// Operations for bitmaps represented as arrays of unsigned 32- or 64-bit -// integers (uintptr_t). -// -// Bit offsets are numbered from 0 to size-1 +// Operations for bitmaps represented as arrays of unsigned integers. +// Bit offsets are numbered from 0 to size-1. class BitMap VALUE_OBJ_CLASS_SPEC { friend class BitMap2D; public: - typedef size_t idx_t; // Type used for bit and word indices. + typedef size_t idx_t; // Type used for bit and word indices. + typedef uintptr_t bm_word_t; // Element type of array that represents + // the bitmap. // Hints for range sizes. typedef enum { @@ -51,8 +45,8 @@ } RangeSizeHint; private: - idx_t* _map; // First word in bitmap - idx_t _size; // Size of bitmap (in bits) + bm_word_t* _map; // First word in bitmap + idx_t _size; // Size of bitmap (in bits) // Puts the given value at the given offset, using resize() to size // the bitmap appropriately if needed using factor-of-two expansion. @@ -65,7 +59,7 @@ // Return a mask that will select the specified bit, when applied to the word // containing the bit. - static idx_t bit_mask(idx_t bit) { return (idx_t)1 << bit_in_word(bit); } + static bm_word_t bit_mask(idx_t bit) { return (bm_word_t)1 << bit_in_word(bit); } // Return the index of the word containing the specified bit. static idx_t word_index(idx_t bit) { return bit >> LogBitsPerWord; } @@ -74,66 +68,68 @@ static idx_t bit_index(idx_t word) { return word << LogBitsPerWord; } // Return the array of bitmap words, or a specific word from it. - idx_t* map() const { return _map; } - idx_t map(idx_t word) const { return _map[word]; } + bm_word_t* map() const { return _map; } + bm_word_t map(idx_t word) const { return _map[word]; } // Return a pointer to the word containing the specified bit. - idx_t* word_addr(idx_t bit) const { return map() + word_index(bit); } + bm_word_t* word_addr(idx_t bit) const { return map() + word_index(bit); } // Set a word to a specified value or to all ones; clear a word. - void set_word (idx_t word, idx_t val) { _map[word] = val; } + void set_word (idx_t word, bm_word_t val) { _map[word] = val; } void set_word (idx_t word) { set_word(word, ~(uintptr_t)0); } void clear_word(idx_t word) { _map[word] = 0; } // Utilities for ranges of bits. Ranges are half-open [beg, end). // Ranges within a single word. - inline idx_t inverted_bit_mask_for_range(idx_t beg, idx_t end) const; - inline void set_range_within_word (idx_t beg, idx_t end); - inline void clear_range_within_word (idx_t beg, idx_t end); - inline void par_put_range_within_word (idx_t beg, idx_t end, bool value); + bm_word_t inverted_bit_mask_for_range(idx_t beg, idx_t end) const; + void set_range_within_word (idx_t beg, idx_t end); + void clear_range_within_word (idx_t beg, idx_t end); + void par_put_range_within_word (idx_t beg, idx_t end, bool value); // Ranges spanning entire words. - inline void set_range_of_words (idx_t beg, idx_t end); - inline void clear_range_of_words (idx_t beg, idx_t end); - inline void set_large_range_of_words (idx_t beg, idx_t end); - inline void clear_large_range_of_words (idx_t beg, idx_t end); + void set_range_of_words (idx_t beg, idx_t end); + void clear_range_of_words (idx_t beg, idx_t end); + void set_large_range_of_words (idx_t beg, idx_t end); + void clear_large_range_of_words (idx_t beg, idx_t end); // The index of the first full word in a range. - inline idx_t word_index_round_up(idx_t bit) const; + idx_t word_index_round_up(idx_t bit) const; // Verification, statistics. - void verify_index(idx_t index) const { - assert(index < _size, "BitMap index out of bounds"); - } + void verify_index(idx_t index) const; + void verify_range(idx_t beg_index, idx_t end_index) const; - void verify_range(idx_t beg_index, idx_t end_index) const { -#ifdef ASSERT - assert(beg_index <= end_index, "BitMap range error"); - // Note that [0,0) and [size,size) are both valid ranges. - if (end_index != _size) verify_index(end_index); -#endif - } + static idx_t* _pop_count_table; + static void init_pop_count_table(); + static idx_t num_set_bits(bm_word_t w); + static idx_t num_set_bits_from_table(unsigned char c); public: // Constructs a bitmap with no map, and size 0. BitMap() : _map(NULL), _size(0) {} - // Construction - BitMap(idx_t* map, idx_t size_in_bits); + // Constructs a bitmap with the given map and size. + BitMap(bm_word_t* map, idx_t size_in_bits); - // Allocates necessary data structure in resource area - BitMap(idx_t size_in_bits); + // Constructs an empty bitmap of the given size (that is, this clears the + // new bitmap). Allocates the map array in resource area if + // "in_resource_area" is true, else in the C heap. + BitMap(idx_t size_in_bits, bool in_resource_area = true); - void set_map(idx_t* map) { _map = map; } + // Set the map and size. + void set_map(bm_word_t* map) { _map = map; } void set_size(idx_t size_in_bits) { _size = size_in_bits; } - // Allocates necessary data structure in resource area. + // Allocates necessary data structure, either in the resource area + // or in the C heap, as indicated by "in_resource_area." // Preserves state currently in bit map by copying data. // Zeros any newly-addressable bits. - // Does not perform any frees (i.e., of current _map). - void resize(idx_t size_in_bits); + // If "in_resource_area" is false, frees the current map. + // (Note that this assumes that all calls to "resize" on the same BitMap + // use the same value for "in_resource_area".) + void resize(idx_t size_in_bits, bool in_resource_area = true); // Accessing idx_t size() const { return _size; } @@ -160,11 +156,11 @@ // Set or clear the specified bit. inline void set_bit(idx_t bit); - inline void clear_bit(idx_t bit); + void clear_bit(idx_t bit); // Atomically set or clear the specified bit. - inline bool par_set_bit(idx_t bit); - inline bool par_clear_bit(idx_t bit); + bool par_set_bit(idx_t bit); + bool par_clear_bit(idx_t bit); // Put the given value at the given offset. The parallel version // will CAS the value into the bitmap and is quite a bit slower. @@ -186,23 +182,61 @@ // Update a range of bits, using a hint about the size. Currently only // inlines the predominant case of a 1-bit range. Works best when hint is a // compile-time constant. - inline void set_range(idx_t beg, idx_t end, RangeSizeHint hint); - inline void clear_range(idx_t beg, idx_t end, RangeSizeHint hint); - inline void par_set_range(idx_t beg, idx_t end, RangeSizeHint hint); - inline void par_clear_range (idx_t beg, idx_t end, RangeSizeHint hint); + void set_range(idx_t beg, idx_t end, RangeSizeHint hint); + void clear_range(idx_t beg, idx_t end, RangeSizeHint hint); + void par_set_range(idx_t beg, idx_t end, RangeSizeHint hint); + void par_clear_range (idx_t beg, idx_t end, RangeSizeHint hint); + + // It performs the union operation between subsets of equal length + // of two bitmaps (the target bitmap of the method and the + // from_bitmap) and stores the result to the target bitmap. The + // from_start_index represents the first bit index of the subrange + // of the from_bitmap. The to_start_index is the equivalent of the + // target bitmap. Both indexes should be word-aligned, i.e. they + // should correspond to the first bit on a bitmap word (it's up to + // the caller to ensure this; the method does check it). The length + // of the subset is specified with word_num and it is in number of + // bitmap words. The caller should ensure that this is at least 2 + // (smaller ranges are not support to save extra checks). Again, + // this is checked in the method. + // + // Atomicity concerns: it is assumed that any contention on the + // target bitmap with other threads will happen on the first and + // last words; the ones in between will be "owned" exclusively by + // the calling thread and, in fact, they will already be 0. So, the + // method performs a CAS on the first word, copies the next + // word_num-2 words, and finally performs a CAS on the last word. + void mostly_disjoint_range_union(BitMap* from_bitmap, + idx_t from_start_index, + idx_t to_start_index, + size_t word_num); + // Clearing - void clear(); void clear_large(); - - // Iteration support - void iterate(BitMapClosure* blk, idx_t leftIndex, idx_t rightIndex); - inline void iterate(BitMapClosure* blk) { + inline void clear(); + + // Iteration support. Returns "true" if the iteration completed, false + // if the iteration terminated early (because the closure "blk" returned + // false). + bool iterate(BitMapClosure* blk, idx_t leftIndex, idx_t rightIndex); + bool iterate(BitMapClosure* blk) { // call the version that takes an interval - iterate(blk, 0, size()); + return iterate(blk, 0, size()); } - // Looking for 1's and 0's to the "right" + // Looking for 1's and 0's at indices equal to or greater than "l_index", + // stopping if none has been found before "r_index", and returning + // "r_index" (which must be at most "size") in that case. + idx_t get_next_one_offset_inline (idx_t l_index, idx_t r_index) const; + idx_t get_next_zero_offset_inline(idx_t l_index, idx_t r_index) const; + + // Like "get_next_one_offset_inline", except requires that "r_index" is + // aligned to bitsizeof(bm_word_t). + idx_t get_next_one_offset_inline_aligned_right(idx_t l_index, + idx_t r_index) const; + + // Non-inline versionsof the above. idx_t get_next_one_offset (idx_t l_index, idx_t r_index) const; idx_t get_next_zero_offset(idx_t l_index, idx_t r_index) const; @@ -213,12 +247,8 @@ return get_next_zero_offset(offset, size()); } - - - // Find the next one bit in the range [beg_bit, end_bit), or return end_bit if - // no one bit is found. Equivalent to get_next_one_offset(), but inline for - // use in performance-critical code. - inline idx_t find_next_one_bit(idx_t beg_bit, idx_t end_bit) const; + // Returns the number of bits set in the bitmap. + idx_t count_one_bits() const; // Set operations. void set_union(BitMap bits); @@ -235,6 +265,15 @@ bool set_difference_with_result(BitMap bits); bool set_intersection_with_result(BitMap bits); + // Requires the submap of "bits" starting at offset to be at least as + // large as "this". Modifies "this" to be the intersection of its + // current contents and the submap of "bits" starting at "offset" of the + // same length as "this." + // (For expedience, currently requires the offset to be aligned to the + // bitsize of a uintptr_t. This should go away in the future though it + // will probably remain a good case to optimize.) + void set_intersection_at_offset(BitMap bits, idx_t offset); + void set_from(BitMap bits); bool is_same(BitMap bits); @@ -251,58 +290,13 @@ #endif }; -inline void BitMap::set_bit(idx_t bit) { - verify_index(bit); - *word_addr(bit) |= bit_mask(bit); -} - -inline void BitMap::clear_bit(idx_t bit) { - verify_index(bit); - *word_addr(bit) &= ~bit_mask(bit); -} - -inline void BitMap::set_range(idx_t beg, idx_t end, RangeSizeHint hint) { - if (hint == small_range && end - beg == 1) { - set_bit(beg); - } else { - if (hint == large_range) { - set_large_range(beg, end); - } else { - set_range(beg, end); - } - } -} - -inline void BitMap::clear_range(idx_t beg, idx_t end, RangeSizeHint hint) { - if (hint == small_range && end - beg == 1) { - clear_bit(beg); - } else { - if (hint == large_range) { - clear_large_range(beg, end); - } else { - clear_range(beg, end); - } - } -} - -inline void BitMap::par_set_range(idx_t beg, idx_t end, RangeSizeHint hint) { - if (hint == small_range && end - beg == 1) { - par_at_put(beg, true); - } else { - if (hint == large_range) { - par_at_put_large_range(beg, end, true); - } else { - par_at_put_range(beg, end, true); - } - } -} - // Convenience class wrapping BitMap which provides multiple bits per slot. class BitMap2D VALUE_OBJ_CLASS_SPEC { public: - typedef size_t idx_t; // Type used for bit and word indices. - + typedef BitMap::idx_t idx_t; // Type used for bit and word indices. + typedef BitMap::bm_word_t bm_word_t; // Element type of array that + // represents the bitmap. private: BitMap _map; idx_t _bits_per_slot; @@ -317,7 +311,7 @@ public: // Construction. bits_per_slot must be greater than 0. - BitMap2D(uintptr_t* map, idx_t size_in_slots, idx_t bits_per_slot); + BitMap2D(bm_word_t* map, idx_t size_in_slots, idx_t bits_per_slot); // Allocates necessary data structure in resource area. bits_per_slot must be greater than 0. BitMap2D(idx_t size_in_slots, idx_t bits_per_slot); @@ -361,40 +355,15 @@ verify_bit_within_slot_index(bit_within_slot_index); _map.at_put_grow(bit_index(slot_index, bit_within_slot_index), value); } - - void clear() { - _map.clear(); - } -}; - + void clear(); +}; -inline void BitMap::set_range_of_words(idx_t beg, idx_t end) { - uintptr_t* map = _map; - for (idx_t i = beg; i < end; ++i) map[i] = ~(uintptr_t)0; -} - - -inline void BitMap::clear_range_of_words(idx_t beg, idx_t end) { - uintptr_t* map = _map; - for (idx_t i = beg; i < end; ++i) map[i] = 0; -} - - -inline void BitMap::clear() { - clear_range_of_words(0, size_in_words()); -} - - -inline void BitMap::par_clear_range(idx_t beg, idx_t end, RangeSizeHint hint) { - if (hint == small_range && end - beg == 1) { - par_at_put(beg, false); - } else { - if (hint == large_range) { - par_at_put_large_range(beg, end, false); - } else { - par_at_put_range(beg, end, false); - } - } -} +// Closure for iterating over BitMaps +class BitMapClosure VALUE_OBJ_CLASS_SPEC { + public: + // Callback when bit in map is set. Should normally return "true"; + // return of false indicates that the bitmap iteration should terminate. + virtual bool do_bit(BitMap::idx_t offset) = 0; +}; --- old/hotspot/src/share/vm/utilities/bitMap.inline.hpp 2009-08-01 04:15:47.180666392 +0100 +++ new/hotspot/src/share/vm/utilities/bitMap.inline.hpp 2009-08-01 04:15:47.104722431 +0100 @@ -25,6 +25,17 @@ * */ + +inline void BitMap::set_bit(idx_t bit) { + verify_index(bit); + *word_addr(bit) |= bit_mask(bit); +} + +inline void BitMap::clear_bit(idx_t bit) { + verify_index(bit); + *word_addr(bit) &= ~bit_mask(bit); +} + inline bool BitMap::par_set_bit(idx_t bit) { verify_index(bit); volatile idx_t* const addr = word_addr(bit); @@ -67,42 +78,236 @@ } while (true); } +inline void BitMap::set_range(idx_t beg, idx_t end, RangeSizeHint hint) { + if (hint == small_range && end - beg == 1) { + set_bit(beg); + } else { + if (hint == large_range) { + set_large_range(beg, end); + } else { + set_range(beg, end); + } + } +} + +inline void BitMap::clear_range(idx_t beg, idx_t end, RangeSizeHint hint) { + if (hint == small_range && end - beg == 1) { + clear_bit(beg); + } else { + if (hint == large_range) { + clear_large_range(beg, end); + } else { + clear_range(beg, end); + } + } +} + +inline void BitMap::par_set_range(idx_t beg, idx_t end, RangeSizeHint hint) { + if (hint == small_range && end - beg == 1) { + par_at_put(beg, true); + } else { + if (hint == large_range) { + par_at_put_large_range(beg, end, true); + } else { + par_at_put_range(beg, end, true); + } + } +} + +inline void BitMap::set_range_of_words(idx_t beg, idx_t end) { + bm_word_t* map = _map; + for (idx_t i = beg; i < end; ++i) map[i] = ~(uintptr_t)0; +} + + +inline void BitMap::clear_range_of_words(idx_t beg, idx_t end) { + bm_word_t* map = _map; + for (idx_t i = beg; i < end; ++i) map[i] = 0; +} + + +inline void BitMap::clear() { + clear_range_of_words(0, size_in_words()); +} + + +inline void BitMap::par_clear_range(idx_t beg, idx_t end, RangeSizeHint hint) { + if (hint == small_range && end - beg == 1) { + par_at_put(beg, false); + } else { + if (hint == large_range) { + par_at_put_large_range(beg, end, false); + } else { + par_at_put_range(beg, end, false); + } + } +} + inline BitMap::idx_t -BitMap::find_next_one_bit(idx_t beg_bit, idx_t end_bit) const -{ - verify_range(beg_bit, end_bit); - assert(bit_in_word(end_bit) == 0, "end_bit not word-aligned"); +BitMap::get_next_one_offset_inline(idx_t l_offset, idx_t r_offset) const { + assert(l_offset <= size(), "BitMap index out of bounds"); + assert(r_offset <= size(), "BitMap index out of bounds"); + assert(l_offset <= r_offset, "l_offset > r_offset ?"); + + if (l_offset == r_offset) { + return l_offset; + } + idx_t index = word_index(l_offset); + idx_t r_index = word_index(r_offset-1) + 1; + idx_t res_offset = l_offset; + + // check bits including and to the _left_ of offset's position + idx_t pos = bit_in_word(res_offset); + idx_t res = map(index) >> pos; + if (res != (uintptr_t)NoBits) { + // find the position of the 1-bit + for (; !(res & 1); res_offset++) { + res = res >> 1; + } + assert(res_offset >= l_offset && + res_offset < r_offset, "just checking"); + return MIN2(res_offset, r_offset); + } + // skip over all word length 0-bit runs + for (index++; index < r_index; index++) { + res = map(index); + if (res != (uintptr_t)NoBits) { + // found a 1, return the offset + for (res_offset = bit_index(index); !(res & 1); res_offset++) { + res = res >> 1; + } + assert(res & 1, "tautology; see loop condition"); + assert(res_offset >= l_offset, "just checking"); + return MIN2(res_offset, r_offset); + } + } + return r_offset; +} - if (beg_bit == end_bit) { - return beg_bit; +inline BitMap::idx_t +BitMap::get_next_zero_offset_inline(idx_t l_offset, idx_t r_offset) const { + assert(l_offset <= size(), "BitMap index out of bounds"); + assert(r_offset <= size(), "BitMap index out of bounds"); + assert(l_offset <= r_offset, "l_offset > r_offset ?"); + + if (l_offset == r_offset) { + return l_offset; } + idx_t index = word_index(l_offset); + idx_t r_index = word_index(r_offset-1) + 1; + idx_t res_offset = l_offset; + + // check bits including and to the _left_ of offset's position + idx_t pos = res_offset & (BitsPerWord - 1); + idx_t res = (map(index) >> pos) | left_n_bits((int)pos); - idx_t index = word_index(beg_bit); - idx_t r_index = word_index(end_bit); - idx_t res_bit = beg_bit; + if (res != (uintptr_t)AllBits) { + // find the position of the 0-bit + for (; res & 1; res_offset++) { + res = res >> 1; + } + assert(res_offset >= l_offset, "just checking"); + return MIN2(res_offset, r_offset); + } + // skip over all word length 1-bit runs + for (index++; index < r_index; index++) { + res = map(index); + if (res != (uintptr_t)AllBits) { + // found a 0, return the offset + for (res_offset = index << LogBitsPerWord; res & 1; + res_offset++) { + res = res >> 1; + } + assert(!(res & 1), "tautology; see loop condition"); + assert(res_offset >= l_offset, "just checking"); + return MIN2(res_offset, r_offset); + } + } + return r_offset; +} + +inline BitMap::idx_t +BitMap::get_next_one_offset_inline_aligned_right(idx_t l_offset, + idx_t r_offset) const +{ + verify_range(l_offset, r_offset); + assert(bit_in_word(r_offset) == 0, "r_offset not word-aligned"); + + if (l_offset == r_offset) { + return l_offset; + } + idx_t index = word_index(l_offset); + idx_t r_index = word_index(r_offset); + idx_t res_offset = l_offset; // check bits including and to the _left_ of offset's position - idx_t res = map(index) >> bit_in_word(res_bit); - if (res != (uintptr_t) NoBits) { + idx_t res = map(index) >> bit_in_word(res_offset); + if (res != (uintptr_t)NoBits) { // find the position of the 1-bit - for (; !(res & 1); res_bit++) { + for (; !(res & 1); res_offset++) { res = res >> 1; } - assert(res_bit >= beg_bit && res_bit < end_bit, "just checking"); - return res_bit; + assert(res_offset >= l_offset && + res_offset < r_offset, "just checking"); + return res_offset; } // skip over all word length 0-bit runs for (index++; index < r_index; index++) { res = map(index); - if (res != (uintptr_t) NoBits) { + if (res != (uintptr_t)NoBits) { // found a 1, return the offset - for (res_bit = bit_index(index); !(res & 1); res_bit++) { + for (res_offset = bit_index(index); !(res & 1); res_offset++) { res = res >> 1; } assert(res & 1, "tautology; see loop condition"); - assert(res_bit >= beg_bit && res_bit < end_bit, "just checking"); - return res_bit; + assert(res_offset >= l_offset && res_offset < r_offset, "just checking"); + return res_offset; } } - return end_bit; + return r_offset; +} + + +// Returns a bit mask for a range of bits [beg, end) within a single word. Each +// bit in the mask is 0 if the bit is in the range, 1 if not in the range. The +// returned mask can be used directly to clear the range, or inverted to set the +// range. Note: end must not be 0. +inline BitMap::bm_word_t +BitMap::inverted_bit_mask_for_range(idx_t beg, idx_t end) const { + assert(end != 0, "does not work when end == 0"); + assert(beg == end || word_index(beg) == word_index(end - 1), + "must be a single-word range"); + bm_word_t mask = bit_mask(beg) - 1; // low (right) bits + if (bit_in_word(end) != 0) { + mask |= ~(bit_mask(end) - 1); // high (left) bits + } + return mask; +} + +inline void BitMap::set_large_range_of_words(idx_t beg, idx_t end) { + memset(_map + beg, ~(unsigned char)0, (end - beg) * sizeof(uintptr_t)); +} + +inline void BitMap::clear_large_range_of_words(idx_t beg, idx_t end) { + memset(_map + beg, 0, (end - beg) * sizeof(uintptr_t)); +} + +inline BitMap::idx_t BitMap::word_index_round_up(idx_t bit) const { + idx_t bit_rounded_up = bit + (BitsPerWord - 1); + // Check for integer arithmetic overflow. + return bit_rounded_up > bit ? word_index(bit_rounded_up) : size_in_words(); +} + +inline BitMap::idx_t BitMap::get_next_one_offset(idx_t l_offset, + idx_t r_offset) const { + return get_next_one_offset_inline(l_offset, r_offset); +} + +inline BitMap::idx_t BitMap::get_next_zero_offset(idx_t l_offset, + idx_t r_offset) const { + return get_next_zero_offset_inline(l_offset, r_offset); +} + +inline void BitMap2D::clear() { + _map.clear(); } --- old/hotspot/src/share/vm/utilities/constantTag.hpp 2009-08-01 04:15:48.086286001 +0100 +++ new/hotspot/src/share/vm/utilities/constantTag.hpp 2009-08-01 04:15:48.000817603 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)constantTag.hpp 1.28 07/05/05 17:07:08 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,6 +74,7 @@ bool is_string_index() const { return _tag == JVM_CONSTANT_StringIndex; } bool is_klass_reference() const { return is_klass_index() || is_unresolved_klass(); } + bool is_klass_or_reference() const{ return is_klass() || is_klass_reference(); } bool is_field_or_method() const { return is_field() || is_method() || is_interface_method(); } bool is_symbol() const { return is_utf8(); } --- old/hotspot/src/share/vm/utilities/copy.hpp 2009-08-01 04:15:48.959659229 +0100 +++ new/hotspot/src/share/vm/utilities/copy.hpp 2009-08-01 04:15:48.870444866 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)copy.hpp 1.15 07/05/17 16:07:14 JVM" #endif /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,11 +151,19 @@ // oops, conjoint, atomic on each oop static void conjoint_oops_atomic(oop* from, oop* to, size_t count) { - assert_params_ok(from, to, LogBytesPerOop); + assert_params_ok(from, to, LogBytesPerHeapOop); assert_non_zero(count); pd_conjoint_oops_atomic(from, to, count); } + // overloaded for UseCompressedOops + static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) { + assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong"); + assert_params_ok(from, to, LogBytesPerInt); + assert_non_zero(count); + pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + } + // Copy a span of memory. If the span is an integral number of aligned // longs, words, or ints, copy those units atomically. // The largest atomic transfer unit is 8 bytes, or the largest power @@ -191,7 +199,7 @@ // oops, conjoint array, atomic on each oop static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - assert_params_ok(from, to, LogBytesPerOop); + assert_params_ok(from, to, LogBytesPerHeapOop); assert_non_zero(count); pd_arrayof_conjoint_oops(from, to, count); } --- old/hotspot/src/share/vm/utilities/debug.cpp 2009-08-01 04:15:49.772256095 +0100 +++ new/hotspot/src/share/vm/utilities/debug.cpp 2009-08-01 04:15:49.697724382 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)debug.cpp 1.183 07/07/02 11:45:25 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -211,7 +211,9 @@ Thread* thread = ThreadLocalStorage::get_thread_slow(); VMError(thread, size, message, file_name, line_no).report_and_die(); } - vm_abort(); + + // Dump core and abort + vm_abort(true); } void report_vm_out_of_memory_vararg(const char* file_name, int line_no, size_t size, const char* format, ...) { @@ -568,7 +570,7 @@ } // the InlineCacheBuffer is using stubs generated into a buffer blob if (InlineCacheBuffer::contains(addr)) { - tty->print_cr(INTPTR_FORMAT "is pointing into InlineCacheBuffer", addr); + tty->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr); return; } VtableStub* v = VtableStubs::stub_containing(addr); @@ -596,7 +598,7 @@ return; } - if (Universe::heap()->is_in_reserved(addr)) { + if (Universe::heap()->is_in(addr)) { HeapWord* p = Universe::heap()->block_start(addr); bool print = false; // If we couldn't find it it just may mean that heap wasn't parseable @@ -622,24 +624,28 @@ } return; } + } else if (Universe::heap()->is_in_reserved(addr)) { + tty->print_cr(INTPTR_FORMAT " is an unallocated location in the heap", addr); + return; } + if (JNIHandles::is_global_handle((jobject) addr)) { - tty->print_cr(INTPTR_FORMAT "is a global jni handle", addr); + tty->print_cr(INTPTR_FORMAT " is a global jni handle", addr); return; } if (JNIHandles::is_weak_global_handle((jobject) addr)) { - tty->print_cr(INTPTR_FORMAT "is a weak global jni handle", addr); + tty->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr); return; } if (JNIHandleBlock::any_contains((jobject) addr)) { - tty->print_cr(INTPTR_FORMAT "is a local jni handle", addr); + tty->print_cr(INTPTR_FORMAT " is a local jni handle", addr); return; } for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) { - // Check for priviledge stack + // Check for privilege stack if (thread->privileged_stack_top() != NULL && thread->privileged_stack_top()->contains(addr)) { - tty->print_cr(INTPTR_FORMAT "is pointing into the priviledge stack for thread: " INTPTR_FORMAT, addr, thread); + tty->print_cr(INTPTR_FORMAT " is pointing into the privilege stack for thread: " INTPTR_FORMAT, addr, thread); return; } // If the addr is a java thread print information about that. @@ -660,7 +666,7 @@ return; } - tty->print_cr(INTPTR_FORMAT "is pointing to unknown location", addr); + tty->print_cr(INTPTR_FORMAT " is pointing to unknown location", addr); } @@ -669,9 +675,10 @@ oop target; void do_oop(oop* o) { if (o != NULL && *o == target) { - tty->print_cr("0x%08x", o); + tty->print_cr(INTPTR_FORMAT, o); } } + void do_oop(narrowOop* o) { ShouldNotReachHere(); } }; @@ -687,13 +694,13 @@ static void findref(intptr_t x) { - GenCollectedHeap *gch = GenCollectedHeap::heap(); + CollectedHeap *ch = Universe::heap(); LookForRefInGenClosure lookFor; lookFor.target = (oop) x; LookForRefInObjectClosure look_in_object((oop) x); tty->print_cr("Searching heap:"); - gch->object_iterate(&look_in_object); + ch->object_iterate(&look_in_object); tty->print_cr("Searching strong roots:"); Universe::oops_do(&lookFor, false); --- old/hotspot/src/share/vm/utilities/globalDefinitions.cpp 2009-08-01 04:15:50.686767877 +0100 +++ new/hotspot/src/share/vm/utilities/globalDefinitions.cpp 2009-08-01 04:15:50.610615476 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globalDefinitions.cpp 1.48 07/05/05 17:07:09 JVM" #endif /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,18 +27,23 @@ # include "incls/_precompiled.incl" # include "incls/_globalDefinitions.cpp.incl" - - // Basic error support +// Info for oops within a java object. Defaults are zero so +// things will break badly if incorrectly initialized. +int heapOopSize = 0; +int LogBytesPerHeapOop = 0; +int LogBitsPerHeapOop = 0; +int BytesPerHeapOop = 0; +int BitsPerHeapOop = 0; + void basic_fatal(const char* msg) { fatal(msg); } - // Something to help porters sleep at night -void check_basic_types() { +void basic_types_init() { #ifdef ASSERT #ifdef _LP64 assert(min_intx == (intx)CONST64(0x8000000000000000), "correct constant"); @@ -95,6 +100,7 @@ case T_LONG: case T_OBJECT: case T_ADDRESS: // random raw pointer + case T_NARROWOOP: // compressed pointer case T_CONFLICT: // might as well support a bottom type case T_VOID: // padding or other unaddressed word // layout type must map to itself @@ -137,11 +143,30 @@ os::java_to_os_priority[9] = JavaPriority9_To_OSPriority; if(JavaPriority10_To_OSPriority != -1 ) os::java_to_os_priority[10] = JavaPriority10_To_OSPriority; + + // Set the size of basic types here (after argument parsing but before + // stub generation). + if (UseCompressedOops) { + // Size info for oops within java objects is fixed + heapOopSize = jintSize; + LogBytesPerHeapOop = LogBytesPerInt; + LogBitsPerHeapOop = LogBitsPerInt; + BytesPerHeapOop = BytesPerInt; + BitsPerHeapOop = BitsPerInt; + } else { + heapOopSize = oopSize; + LogBytesPerHeapOop = LogBytesPerWord; + LogBitsPerHeapOop = LogBitsPerWord; + BytesPerHeapOop = BytesPerWord; + BitsPerHeapOop = BitsPerWord; + } + _type2aelembytes[T_OBJECT] = heapOopSize; + _type2aelembytes[T_ARRAY] = heapOopSize; } // Map BasicType to signature character -char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0}; +char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0, 0}; // Map BasicType to Java type name const char* type2name_tab[T_CONFLICT+1] = { @@ -158,6 +183,7 @@ "array", "void", "*address*", + "*narrowoop*", "*conflict*" }; @@ -173,7 +199,7 @@ // Map BasicType to size in words -int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1}; +int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, -1}; BasicType type2field[T_CONFLICT+1] = { (BasicType)0, // 0, @@ -192,7 +218,8 @@ T_OBJECT, // T_ARRAY = 13, T_VOID, // T_VOID = 14, T_ADDRESS, // T_ADDRESS = 15, - T_CONFLICT // T_CONFLICT = 16, + T_NARROWOOP, // T_NARROWOOP= 16, + T_CONFLICT // T_CONFLICT = 17, }; @@ -213,11 +240,12 @@ T_OBJECT, // T_ARRAY = 13, T_VOID, // T_VOID = 14, T_ADDRESS, // T_ADDRESS = 15, - T_CONFLICT // T_CONFLICT = 16, + T_NARROWOOP, // T_NARROWOOP = 16, + T_CONFLICT // T_CONFLICT = 17, }; -int type2aelembytes[T_CONFLICT+1] = { +int _type2aelembytes[T_CONFLICT+1] = { 0, // 0 0, // 1 0, // 2 @@ -233,16 +261,23 @@ T_OBJECT_aelem_bytes, // T_OBJECT = 12, T_ARRAY_aelem_bytes, // T_ARRAY = 13, 0, // T_VOID = 14, - T_INT_aelem_bytes, // T_ADDRESS = 15, - 0 // T_CONFLICT = 16, + T_OBJECT_aelem_bytes, // T_ADDRESS = 15, + T_NARROWOOP_aelem_bytes,// T_NARROWOOP= 16, + 0 // T_CONFLICT = 17, }; +#ifdef ASSERT +int type2aelembytes(BasicType t, bool allow_address) { + assert(allow_address || t != T_ADDRESS, " "); + return _type2aelembytes[t]; +} +#endif // Support for 64-bit integer arithmetic // The following code is mostly taken from JVM typedefs_md.h and system_md.c -static const jlong high_bit = (jlong)1 << (jlong)63; +static const jlong high_bit = (jlong)1 << (jlong)63; static const jlong other_bits = ~high_bit; jlong float2long(jfloat f) { --- old/hotspot/src/share/vm/utilities/globalDefinitions.hpp 2009-08-01 04:15:51.627797091 +0100 +++ new/hotspot/src/share/vm/utilities/globalDefinitions.hpp 2009-08-01 04:15:51.545335369 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globalDefinitions.hpp 1.217 07/05/23 10:54:27 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,24 +62,28 @@ const int WordsPerLong = 2; // Number of stack entries for longs -const int oopSize = sizeof(char*); -const int wordSize = sizeof(char*); +const int oopSize = sizeof(char*); // Full-width oop +extern int heapOopSize; // Oop within a java object +const int wordSize = sizeof(char*); const int longSize = sizeof(jlong); const int jintSize = sizeof(jint); const int size_tSize = sizeof(size_t); -// Size of a char[] needed to represent a jint as a string in decimal. -const int jintAsStringSize = 12; +const int BytesPerOop = BytesPerWord; // Full-width oop + +extern int LogBytesPerHeapOop; // Oop within a java object +extern int LogBitsPerHeapOop; +extern int BytesPerHeapOop; +extern int BitsPerHeapOop; -const int LogBytesPerOop = LogBytesPerWord; -const int LogBitsPerOop = LogBitsPerWord; -const int BytesPerOop = 1 << LogBytesPerOop; -const int BitsPerOop = 1 << LogBitsPerOop; - const int BitsPerJavaInteger = 32; +const int BitsPerJavaLong = 64; const int BitsPerSize_t = size_tSize * BitsPerByte; -// In fact this should be +// Size of a char[] needed to represent a jint as a string in decimal. +const int jintAsStringSize = 12; + +// In fact this should be // log2_intptr(sizeof(class JavaThread)) - log2_intptr(64); // see os::set_memory_serialize_page() #ifdef _LP64 @@ -97,19 +101,23 @@ // object size. class HeapWord { friend class VMStructs; -private: + private: char* i; +#ifndef PRODUCT + public: + char* value() { return i; } +#endif }; // HeapWordSize must be 2^LogHeapWordSize. -const int HeapWordSize = sizeof(HeapWord); +const int HeapWordSize = sizeof(HeapWord); #ifdef _LP64 -const int LogHeapWordSize = 3; +const int LogHeapWordSize = 3; #else -const int LogHeapWordSize = 2; +const int LogHeapWordSize = 2; #endif -const int HeapWordsPerOop = oopSize / HeapWordSize; -const int HeapWordsPerLong = BytesPerLong / HeapWordSize; +const int HeapWordsPerLong = BytesPerLong / HeapWordSize; +const int LogHeapWordsPerLong = LogBytesPerLong - LogHeapWordSize; // The larger HeapWordSize for 64bit requires larger heaps // for the same application running in 64bit. See bug 4967770. @@ -287,6 +295,9 @@ const int MinObjAlignmentInBytes = MinObjAlignment * HeapWordSize; const int MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1; +const int LogMinObjAlignment = LogHeapWordsPerLong; +const int LogMinObjAlignmentInBytes = LogMinObjAlignment + LogHeapWordSize; + // Machine dependent stuff #include "incls/_globalDefinitions_pd.hpp.incl" @@ -374,7 +385,7 @@ jlong long_value; }; -void check_basic_types(); // cannot define here; uses assert +void basic_types_init(); // cannot define here; uses assert // NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java @@ -391,10 +402,15 @@ T_ARRAY = 13, T_VOID = 14, T_ADDRESS = 15, - T_CONFLICT = 16, // for stack value type with conflicting contents + T_NARROWOOP= 16, + T_CONFLICT = 17, // for stack value type with conflicting contents T_ILLEGAL = 99 }; +inline bool is_java_primitive(BasicType t) { + return T_BOOLEAN <= t && t <= T_LONG; +} + // Convert a char from a classfile signature to a BasicType inline BasicType char2type(char c) { switch( c ) { @@ -437,6 +453,7 @@ T_LONG_size = 2, T_OBJECT_size = 1, T_ARRAY_size = 1, + T_NARROWOOP_size = 1, T_VOID_size = 0 }; @@ -464,10 +481,16 @@ T_OBJECT_aelem_bytes = 4, T_ARRAY_aelem_bytes = 4, #endif + T_NARROWOOP_aelem_bytes = 4, T_VOID_aelem_bytes = 0 }; -extern int type2aelembytes[T_CONFLICT+1]; // maps a BasicType to nof bytes used by its array element +extern int _type2aelembytes[T_CONFLICT+1]; // maps a BasicType to nof bytes used by its array element +#ifdef ASSERT +extern int type2aelembytes(BasicType t, bool allow_address = false); // asserts +#else +inline int type2aelembytes(BasicType t) { return _type2aelembytes[t]; } +#endif // JavaValue serves as a container for arbitrary Java values. @@ -875,7 +898,7 @@ i++; p *= 2; } // p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1)) - // (if p = 0 then overflow occured and i = 31) + // (if p = 0 then overflow occured and i = 63) return i; } @@ -887,6 +910,14 @@ return log2_intptr(x); } +//* the argument must be exactly a power of 2 +inline int exact_log2_long(jlong x) { + #ifdef ASSERT + if (!is_power_of_2_long(x)) basic_fatal("x must be a power of 2"); + #endif + return log2_long(x); +} + // returns integer round-up to the nearest multiple of s (s must be a power of two) inline intptr_t round_to(intptr_t x, uintx s) { --- old/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp 2009-08-01 04:15:52.597552495 +0100 +++ new/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp 2009-08-01 04:15:52.523759169 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)globalDefinitions_sparcWorks.hpp 1.80 07/05/05 17:07:10 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,23 +40,45 @@ # include # include # include +#ifdef SOLARIS # include +#endif # include +#ifdef LINUX +#ifndef FP_PZERO + // Linux doesn't have positive/negative zero + #define FP_PZERO FP_ZERO +#endif +#ifndef fpclass + #define fpclass fpclassify +#endif +#endif # include # include # include # include +#ifdef SOLARIS # include +#endif # include # include +#ifdef SOLARIS # include # include # include # include # include +#endif # ifdef SOLARIS_MUTATOR_LIBTHREAD # include # endif +#ifdef LINUX +# include +# include +# include +# include +#endif + // 4810578: varargs unsafe on 32-bit integer/64-bit pointer architectures // When __cplusplus is defined, NULL is defined as 0 (32-bit constant) in @@ -71,6 +93,11 @@ // pointer when it extracts the argument, then we have a problem. // // Solution: For 64-bit architectures, redefine NULL as 64-bit constant 0. +// +// Note: this fix doesn't work well on Linux because NULL will be overwritten +// whenever a system header file is included. Linux handles NULL correctly +// through a special type '__null'. +#ifdef SOLARIS #ifdef _LP64 #undef NULL #define NULL 0L @@ -79,13 +106,25 @@ #define NULL 0 #endif #endif +#endif // NULL vs NULL_WORD: // On Linux NULL is defined as a special type '__null'. Assigning __null to // integer variable will cause gcc warning. Use NULL_WORD in places where a -// pointer is stored as integer value. -#define NULL_WORD NULL +// pointer is stored as integer value. On some platforms, sizeof(intptr_t) > +// sizeof(void*), so here we want something which is integer type, but has the +// same size as a pointer. +#ifdef LINUX + #ifdef _LP64 + #define NULL_WORD 0L + #else + #define NULL_WORD 0 + #endif +#else + #define NULL_WORD NULL +#endif +#ifndef LINUX // Compiler-specific primitive types typedef unsigned short uint16_t; #ifndef _UINT32_T @@ -103,6 +142,7 @@ // If this gets an error, figure out a symbol XXX that implies the // prior definition of intptr_t, and add "&& !defined(XXX)" above. #endif +#endif // Additional Java basic types @@ -131,7 +171,7 @@ const jlong min_jlong = CONST64(0x8000000000000000); const jlong max_jlong = CONST64(0x7fffffffffffffff); - +#ifdef SOLARIS //---------------------------------------------------------------------------------------------------- // ANSI C++ fixes // NOTE:In the ANSI committee's continuing attempt to make each version @@ -165,7 +205,7 @@ typedef int (*int_fnP_cond_tP_i_vP)(cond_t *cv, int scope, void *arg); typedef int (*int_fnP_cond_tP)(cond_t *cv); }; - +#endif //---------------------------------------------------------------------------------------------------- // Debugging @@ -176,7 +216,7 @@ #define BREAKPOINT ::breakpoint() // checking for nanness - +#ifdef SOLARIS #ifdef SPARC inline int g_isnan(float f) { return isnanf(f); } #else @@ -185,6 +225,12 @@ #endif inline int g_isnan(double f) { return isnand(f); } +#elif LINUX +inline int g_isnan(float f) { return isnanf(f); } +inline int g_isnan(double f) { return isnan(f); } +#else +#error "missing platform-specific definition here" +#endif // Checking for finiteness @@ -198,9 +244,11 @@ // Misc +// NOTE: This one leads to an infinite recursion on Linux +#ifndef LINUX int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr); #define vsnprintf local_vsnprintf - +#endif // Portability macros #define PRAGMA_INTERFACE --- old/hotspot/src/share/vm/utilities/growableArray.cpp 2009-08-01 04:15:53.484802922 +0100 +++ new/hotspot/src/share/vm/utilities/growableArray.cpp 2009-08-01 04:15:53.411986219 +0100 @@ -46,12 +46,14 @@ #endif void* GenericGrowableArray::raw_allocate(int elementSize) { + assert(_max >= 0, "integer overflow"); + size_t byte_size = elementSize * (size_t) _max; if (on_stack()) { - return (void*)resource_allocate_bytes(elementSize * _max); + return (void*)resource_allocate_bytes(byte_size); } else if (on_C_heap()) { - return (void*)AllocateHeap(elementSize * _max, "GrET in " __FILE__); + return (void*)AllocateHeap(byte_size, "GrET in " __FILE__); } else { - return _arena->Amalloc(elementSize * _max); + return _arena->Amalloc(byte_size); } } --- old/hotspot/src/share/vm/utilities/growableArray.hpp 2009-08-01 04:15:54.331174530 +0100 +++ new/hotspot/src/share/vm/utilities/growableArray.hpp 2009-08-01 04:15:54.248263803 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)growableArray.hpp 1.55 07/05/05 17:07:09 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,6 +114,12 @@ } void* raw_allocate(int elementSize); + + // some uses pass the Thread explicitly for speed (4990299 tuning) + void* raw_allocate(Thread* thread, int elementSize) { + assert(on_stack(), "fast ResourceObj path only"); + return (void*)resource_allocate_bytes(thread, elementSize * _max); + } }; template class GrowableArray : public GenericGrowableArray { @@ -124,6 +130,11 @@ void raw_at_put_grow(int i, const E& p, const E& fill); void clear_and_deallocate(); public: + GrowableArray(Thread* thread, int initial_size) : GenericGrowableArray(initial_size, 0, false) { + _data = (E*)raw_allocate(thread, sizeof(E)); + for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E(); + } + GrowableArray(int initial_size, bool C_heap = false) : GenericGrowableArray(initial_size, 0, C_heap) { _data = (E*)raw_allocate(sizeof(E)); for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E(); @@ -162,10 +173,12 @@ void print(); - void append(const E& elem) { + int append(const E& elem) { check_nesting(); if (_len == _max) grow(_len); - _data[_len++] = elem; + int idx = _len++; + _data[idx] = elem; + return idx; } void append_if_missing(const E& elem) { --- old/hotspot/src/share/vm/utilities/hashtable.cpp 2009-08-01 04:15:55.145594026 +0100 +++ new/hotspot/src/share/vm/utilities/hashtable.cpp 2009-08-01 04:15:55.077164955 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)hashtable.cpp 1.13 07/05/05 17:07:10 JVM" #endif /* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,9 +46,11 @@ entry = _free_list; _free_list = _free_list->next(); } else { - const int block_size = 500; - if (_first_free_entry == _end_block) { + if (_first_free_entry + _entry_size >= _end_block) { + int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); int len = _entry_size * block_size; + len = 1 << log2_intptr(len); // round down to power of 2 + assert(len >= _entry_size, ""); _first_free_entry = NEW_C_HEAP_ARRAY(char, len); _end_block = _first_free_entry + len; } @@ -56,6 +58,7 @@ _first_free_entry += _entry_size; } + assert(_entry_size % HeapWordSize == 0, ""); entry->set_hash(hashValue); return entry; } --- old/hotspot/src/share/vm/utilities/macros.hpp 2009-08-01 04:15:55.986954972 +0100 +++ new/hotspot/src/share/vm/utilities/macros.hpp 2009-08-01 04:15:55.905494208 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)macros.hpp 1.44 07/08/29 13:42:30 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,8 +68,10 @@ // COMPILER2 variant #ifdef COMPILER2 #define COMPILER2_PRESENT(code) code +#define NOT_COMPILER2(code) #else // COMPILER2 #define COMPILER2_PRESENT(code) +#define NOT_COMPILER2(code) code #endif // COMPILER2 @@ -147,6 +149,14 @@ #define NOT_WINDOWS(code) code #endif +#if defined(IA32) || defined(AMD64) +#define X86 +#define X86_ONLY(code) code +#else +#undef X86 +#define X86_ONLY(code) +#endif + #ifdef IA32 #define IA32_ONLY(code) code #define NOT_IA32(code) --- old/hotspot/src/share/vm/utilities/ostream.cpp 2009-08-01 04:15:56.893968536 +0100 +++ new/hotspot/src/share/vm/utilities/ostream.cpp 2009-08-01 04:15:56.818686248 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ostream.cpp 1.80 07/09/28 10:22:57 JVM" #endif /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,8 +55,9 @@ _precount += _position + 1; _position = 0; } else if (ch == '\t') { - _position += 8; - _precount -= 7; // invariant: _precount + _position == total count + int tw = 8 - (_position & 7); + _position += tw; + _precount -= tw-1; // invariant: _precount + _position == total count } else { _position += 1; } @@ -136,7 +137,17 @@ } void outputStream::fill_to(int col) { - while (position() < col) sp(); + int need_fill = col - position(); + sp(need_fill); +} + +void outputStream::move_to(int col, int slop, int min_space) { + if (position() >= col + slop) + cr(); + int need_fill = col - position(); + if (need_fill < min_space) + need_fill = min_space; + sp(need_fill); } void outputStream::put(char ch) { @@ -145,8 +156,23 @@ write(buf, 1); } -void outputStream::sp() { - this->write(" ", 1); +#define SP_USE_TABS false + +void outputStream::sp(int count) { + if (count < 0) return; + if (SP_USE_TABS && count >= 8) { + int target = position() + count; + while (count >= 8) { + this->write("\t", 1); + count -= 8; + } + count = target - position(); + } + while (count > 0) { + int nw = (count > 8) ? 8 : count; + this->write(" ", nw); + count -= nw; + } } void outputStream::cr() { @@ -165,6 +191,17 @@ print_raw(buf); } +void outputStream::stamp(bool guard, + const char* prefix, + const char* suffix) { + if (!guard) { + return; + } + print_raw(prefix); + stamp(); + print_raw(suffix); +} + void outputStream::date_stamp(bool guard, const char* prefix, const char* suffix) { @@ -730,21 +767,28 @@ write(str, len); } -bufferedStream::bufferedStream(size_t initial_size) : outputStream() { +bufferedStream::bufferedStream(size_t initial_size, size_t bufmax) : outputStream() { buffer_length = initial_size; buffer = NEW_C_HEAP_ARRAY(char, buffer_length); buffer_pos = 0; buffer_fixed = false; + buffer_max = bufmax; } - -bufferedStream::bufferedStream(char* fixed_buffer, size_t fixed_buffer_size) : outputStream() { + +bufferedStream::bufferedStream(char* fixed_buffer, size_t fixed_buffer_size, size_t bufmax) : outputStream() { buffer_length = fixed_buffer_size; buffer = fixed_buffer; buffer_pos = 0; buffer_fixed = true; + buffer_max = bufmax; } void bufferedStream::write(const char* s, size_t len) { + + if(buffer_pos + len > buffer_max) { + flush(); + } + size_t end = buffer_pos + len; if (end >= buffer_length) { if (buffer_fixed) { @@ -788,7 +832,7 @@ #endif // Network access -networkStream::networkStream() { +networkStream::networkStream() : bufferedStream(1024*10, 1024*10) { _socket = -1; @@ -808,7 +852,9 @@ void networkStream::flush() { if (size() != 0) { - hpi::send(_socket, (char *)base(), (int)size(), 0); + int result = hpi::raw_send(_socket, (char *)base(), (int)size(), 0); + assert(result != -1, "connection error"); + assert(result == (int)size(), "didn't send enough data"); } reset(); } @@ -832,7 +878,7 @@ server.sin_port = htons(port); server.sin_addr.s_addr = inet_addr(ip); - if (server.sin_addr.s_addr == (unsigned long)-1) { + if (server.sin_addr.s_addr == (uint32_t)-1) { #ifdef _WINDOWS struct hostent* host = hpi::get_host_by_name((char*)ip); #else --- old/hotspot/src/share/vm/utilities/ostream.hpp 2009-08-01 04:15:57.858154042 +0100 +++ new/hotspot/src/share/vm/utilities/ostream.hpp 2009-08-01 04:15:57.773407731 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)ostream.hpp 1.44 07/09/28 10:22:57 JVM" #endif /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +62,7 @@ int indentation() const { return _indentation; } void set_indentation(int i) { _indentation = i; } void fill_to(int col); + void move_to(int col, int slop = 6, int min_space = 2); // sizing int width() const { return _width; } @@ -81,13 +82,17 @@ void print_raw_cr(const char* str) { write(str, strlen(str)); cr(); } void print_raw_cr(const char* str, int len){ write(str, len); cr(); } void put(char ch); - void sp(); + void sp(int count = 1); void cr(); void bol() { if (_position > 0) cr(); } // Time stamp TimeStamp& time_stamp() { return _stamp; } void stamp(); + void stamp(bool guard, const char* prefix, const char* suffix); + void stamp(bool guard) { + stamp(guard, "", ": "); + } // Date stamp void date_stamp(bool guard, const char* prefix, const char* suffix); // A simplified call that includes a suffix of ": " @@ -208,11 +213,12 @@ protected: char* buffer; size_t buffer_pos; + size_t buffer_max; size_t buffer_length; bool buffer_fixed; public: - bufferedStream(size_t initial_bufsize = 256); - bufferedStream(char* fixed_buffer, size_t fixed_buffer_size); + bufferedStream(size_t initial_bufsize = 256, size_t bufmax = 1024*1024*10); + bufferedStream(char* fixed_buffer, size_t fixed_buffer_size, size_t bufmax = 1024*1024*10); ~bufferedStream(); virtual void write(const char* c, size_t len); size_t size() { return buffer_pos; } --- old/hotspot/src/share/vm/utilities/taskqueue.cpp 2009-08-01 04:15:58.724746764 +0100 +++ new/hotspot/src/share/vm/utilities/taskqueue.cpp 2009-08-01 04:15:58.635283176 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)taskqueue.cpp 1.21 06/08/10 17:56:51 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,7 +68,8 @@ os::sleep(Thread::current(), millis, false); } -bool ParallelTaskTerminator::offer_termination() { +bool +ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) { Atomic::inc(&_offered_termination); juint yield_count = 0; @@ -94,7 +95,8 @@ sleep(WorkStealingSleepMillis); } - if (peek_in_queue_set()) { + if (peek_in_queue_set() || + (terminator != NULL && terminator->should_exit_termination())) { Atomic::dec(&_offered_termination); return false; } @@ -110,72 +112,72 @@ } } -bool ChunkTaskQueueWithOverflow::is_empty() { - return (_chunk_queue.size() == 0) && - (_overflow_stack->length() == 0); +bool RegionTaskQueueWithOverflow::is_empty() { + return (_region_queue.size() == 0) && + (_overflow_stack->length() == 0); } -bool ChunkTaskQueueWithOverflow::stealable_is_empty() { - return _chunk_queue.size() == 0; +bool RegionTaskQueueWithOverflow::stealable_is_empty() { + return _region_queue.size() == 0; } -bool ChunkTaskQueueWithOverflow::overflow_is_empty() { +bool RegionTaskQueueWithOverflow::overflow_is_empty() { return _overflow_stack->length() == 0; } -void ChunkTaskQueueWithOverflow::initialize() { - _chunk_queue.initialize(); +void RegionTaskQueueWithOverflow::initialize() { + _region_queue.initialize(); assert(_overflow_stack == 0, "Creating memory leak"); - _overflow_stack = - new (ResourceObj::C_HEAP) GrowableArray(10, true); + _overflow_stack = + new (ResourceObj::C_HEAP) GrowableArray(10, true); } -void ChunkTaskQueueWithOverflow::save(ChunkTask t) { - if (TraceChunkTasksQueuing && Verbose) { +void RegionTaskQueueWithOverflow::save(RegionTask t) { + if (TraceRegionTasksQueuing && Verbose) { gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t); } - if(!_chunk_queue.push(t)) { + if(!_region_queue.push(t)) { _overflow_stack->push(t); } } -// Note that using this method will retrieve all chunks +// Note that using this method will retrieve all regions // that have been saved but that it will always check // the overflow stack. It may be more efficient to // check the stealable queue and the overflow stack // separately. -bool ChunkTaskQueueWithOverflow::retrieve(ChunkTask& chunk_task) { - bool result = retrieve_from_overflow(chunk_task); +bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) { + bool result = retrieve_from_overflow(region_task); if (!result) { - result = retrieve_from_stealable_queue(chunk_task); + result = retrieve_from_stealable_queue(region_task); } - if (TraceChunkTasksQueuing && Verbose && result) { + if (TraceRegionTasksQueuing && Verbose && result) { gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result); } return result; } -bool ChunkTaskQueueWithOverflow::retrieve_from_stealable_queue( - ChunkTask& chunk_task) { - bool result = _chunk_queue.pop_local(chunk_task); - if (TraceChunkTasksQueuing && Verbose) { - gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task); +bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue( + RegionTask& region_task) { + bool result = _region_queue.pop_local(region_task); + if (TraceRegionTasksQueuing && Verbose) { + gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task); } return result; } -bool ChunkTaskQueueWithOverflow::retrieve_from_overflow( - ChunkTask& chunk_task) { +bool +RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) { bool result; if (!_overflow_stack->is_empty()) { - chunk_task = _overflow_stack->pop(); + region_task = _overflow_stack->pop(); result = true; } else { - chunk_task = (ChunkTask) NULL; + region_task = (RegionTask) NULL; result = false; } - if (TraceChunkTasksQueuing && Verbose) { - gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task); + if (TraceRegionTasksQueuing && Verbose) { + gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task); } return result; } --- old/hotspot/src/share/vm/utilities/taskqueue.hpp 2009-08-01 04:15:59.848458949 +0100 +++ new/hotspot/src/share/vm/utilities/taskqueue.hpp 2009-08-01 04:15:59.515737250 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)taskqueue.hpp 1.33 06/08/10 17:56:52 JVM" #endif /* - * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -123,6 +123,11 @@ return dirty_size(_bottom, get_top()); } + void set_empty() { + _bottom = 0; + _age = Age(); + } + // Maximum number of elements allowed in the queue. This is two less // than the actual queue size, for somewhat complicated reasons. juint max_elems() { return n() - 2; } @@ -158,6 +163,9 @@ // Delete any resource associated with the queue. ~GenericTaskQueue(); + // apply the closure to all elements in the task queue + void oops_do(OopClosure* f); + private: // Element array. volatile E* _elems; @@ -175,6 +183,24 @@ } template +void GenericTaskQueue::oops_do(OopClosure* f) { + // tty->print_cr("START OopTaskQueue::oops_do"); + int iters = size(); + juint index = _bottom; + for (int i = 0; i < iters; ++i) { + index = decrement_index(index); + // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, + // index, &_elems[index], _elems[index]); + E* t = (E*)&_elems[index]; // cast away volatility + oop* p = (oop*)t; + assert((*t)->is_oop_or_null(), "Not an oop or null"); + f->do_oop(p); + } + // tty->print_cr("END OopTaskQueue::oops_do"); +} + + +template bool GenericTaskQueue::push_slow(E t, juint dirty_n_elems) { if (dirty_n_elems == n() - 1) { // Actually means 0, so do the push. @@ -386,6 +412,12 @@ return false; } +// When to terminate from the termination protocol. +class TerminatorTerminator: public CHeapObj { +public: + virtual bool should_exit_termination() = 0; +}; + // A class to aid in the termination of a set of parallel tasks using // TaskQueueSet's for work stealing. @@ -410,7 +442,14 @@ // else is. If returns "true", all threads are terminated. If returns // "false", available work has been observed in one of the task queues, // so the global task is not complete. - bool offer_termination(); + bool offer_termination() { + return offer_termination(NULL); + } + + // As above, but it also terminates of the should_exit_termination() + // method of the terminator parameter returns true. If terminator is + // NULL, then it is ignored. + bool offer_termination(TerminatorTerminator* terminator); // Reset the terminator, so that it may be reused again. // The caller is responsible for ensuring that this is done @@ -493,36 +532,60 @@ typedef GenericTaskQueue OopTaskQueue; typedef GenericTaskQueueSet OopTaskQueueSet; -typedef oop* StarTask; + +#define COMPRESSED_OOP_MASK 1 + +// This is a container class for either an oop* or a narrowOop*. +// Both are pushed onto a task queue and the consumer will test is_narrow() +// to determine which should be processed. +class StarTask { + void* _holder; // either union oop* or narrowOop* + public: + StarTask(narrowOop *p) { _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); } + StarTask(oop *p) { _holder = (void*)p; } + StarTask() { _holder = NULL; } + operator oop*() { return (oop*)_holder; } + operator narrowOop*() { + return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK); + } + + // Operators to preserve const/volatile in assignments required by gcc + void operator=(const volatile StarTask& t) volatile { _holder = t._holder; } + + bool is_narrow() const { + return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0); + } +}; + typedef GenericTaskQueue OopStarTaskQueue; typedef GenericTaskQueueSet OopStarTaskQueueSet; -typedef size_t ChunkTask; // index for chunk -typedef GenericTaskQueue ChunkTaskQueue; -typedef GenericTaskQueueSet ChunkTaskQueueSet; +typedef size_t RegionTask; // index for region +typedef GenericTaskQueue RegionTaskQueue; +typedef GenericTaskQueueSet RegionTaskQueueSet; -class ChunkTaskQueueWithOverflow: public CHeapObj { +class RegionTaskQueueWithOverflow: public CHeapObj { protected: - ChunkTaskQueue _chunk_queue; - GrowableArray* _overflow_stack; + RegionTaskQueue _region_queue; + GrowableArray* _overflow_stack; public: - ChunkTaskQueueWithOverflow() : _overflow_stack(NULL) {} + RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {} // Initialize both stealable queue and overflow void initialize(); // Save first to stealable queue and then to overflow - void save(ChunkTask t); + void save(RegionTask t); // Retrieve first from overflow and then from stealable queue - bool retrieve(ChunkTask& chunk_index); + bool retrieve(RegionTask& region_index); // Retrieve from stealable queue - bool retrieve_from_stealable_queue(ChunkTask& chunk_index); + bool retrieve_from_stealable_queue(RegionTask& region_index); // Retrieve from overflow - bool retrieve_from_overflow(ChunkTask& chunk_index); + bool retrieve_from_overflow(RegionTask& region_index); bool is_empty(); bool stealable_is_empty(); bool overflow_is_empty(); - juint stealable_size() { return _chunk_queue.size(); } - ChunkTaskQueue* task_queue() { return &_chunk_queue; } + juint stealable_size() { return _region_queue.size(); } + RegionTaskQueue* task_queue() { return &_region_queue; } }; -#define USE_ChunkTaskQueueWithOverflow +#define USE_RegionTaskQueueWithOverflow --- old/hotspot/src/share/vm/utilities/vmError.cpp 2009-08-01 04:16:01.020820697 +0100 +++ new/hotspot/src/share/vm/utilities/vmError.cpp 2009-08-01 04:16:00.951919450 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)vmError.cpp 1.34 07/09/13 20:51:49 JVM" #endif /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -173,7 +173,8 @@ out->print_raw_cr(Arguments::java_vendor_url_bug()); // If the crash is in native code, encourage user to submit a bug to the // provider of that code. - if (thread && thread->is_Java_thread()) { + if (thread && thread->is_Java_thread() && + !thread->is_hidden_from_external_view()) { JavaThread* jt = (JavaThread*)thread; if (jt->thread_state() == _thread_in_native) { out->print_cr("# The crash happened outside the Java Virtual Machine in native code.\n# See problematic frame for where to report the bug."); @@ -252,10 +253,10 @@ BEGIN - STEP(10, "(printing unexpected error message)") + STEP(10, "(printing fatal error message)") st->print_cr("#"); - st->print_cr("# An unexpected error has been detected by Java Runtime Environment:"); + st->print_cr("# A fatal error has been detected by the Java Runtime Environment:"); STEP(15, "(printing type of error)") @@ -265,7 +266,7 @@ st->print("# java.lang.OutOfMemoryError: "); if (_size) { st->print("requested "); - sprintf(buf,"%d",_size); + sprintf(buf,SIZE_FORMAT,_size); st->print(buf); st->print(" bytes"); if (_message != NULL) { @@ -334,11 +335,14 @@ // VM version st->print_cr("#"); - st->print_cr("# Java VM: %s (%s %s %s)", + JDK_Version::current().to_string(buf, sizeof(buf)); + st->print_cr("# JRE version: %s", buf); + st->print_cr("# Java VM: %s (%s %s %s %s)", Abstract_VM_Version::vm_name(), Abstract_VM_Version::vm_release(), Abstract_VM_Version::vm_info_string(), - Abstract_VM_Version::vm_platform_string() + Abstract_VM_Version::vm_platform_string(), + UseCompressedOops ? "compressed oops" : "" ); STEP(60, "(printing problematic frame)") --- old/hotspot/src/share/vm/utilities/workgroup.cpp 2009-08-01 04:16:01.906491793 +0100 +++ new/hotspot/src/share/vm/utilities/workgroup.cpp 2009-08-01 04:16:01.830448491 +0100 @@ -31,13 +31,19 @@ // Definitions of WorkGang methods. AbstractWorkGang::AbstractWorkGang(const char* name, - bool are_GC_threads) : + bool are_GC_task_threads, + bool are_ConcurrentGC_threads) : _name(name), - _are_GC_threads(are_GC_threads) { + _are_GC_task_threads(are_GC_task_threads), + _are_ConcurrentGC_threads(are_ConcurrentGC_threads) { + + assert(!(are_GC_task_threads && are_ConcurrentGC_threads), + "They cannot both be STW GC and Concurrent threads" ); + // Other initialization. _monitor = new Monitor(/* priority */ Mutex::leaf, /* name */ "WorkGroup monitor", - /* allow_vm_block */ are_GC_threads); + /* allow_vm_block */ are_GC_task_threads); assert(monitor() != NULL, "Failed to allocate monitor"); _terminate = false; _task = NULL; @@ -47,16 +53,21 @@ } WorkGang::WorkGang(const char* name, - int workers, - bool are_GC_threads) : - AbstractWorkGang(name, are_GC_threads) { + int workers, + bool are_GC_task_threads, + bool are_ConcurrentGC_threads) : + AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) +{ // Save arguments. _total_workers = workers; + if (TraceWorkGang) { tty->print_cr("Constructing work gang %s with %d threads", name, workers); } _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, workers); - assert(gang_workers() != NULL, "Failed to allocate gang workers"); + if (gang_workers() == NULL) { + vm_exit_out_of_memory(0, "Cannot create GangWorker array."); + } for (int worker = 0; worker < total_workers(); worker += 1) { GangWorker* new_worker = new GangWorker(this, worker); assert(new_worker != NULL, "Failed to allocate GangWorker"); @@ -288,7 +299,11 @@ } bool GangWorker::is_GC_task_thread() const { - return gang()->are_GC_threads(); + return gang()->are_GC_task_threads(); +} + +bool GangWorker::is_ConcurrentGC_thread() const { + return gang()->are_ConcurrentGC_threads(); } void GangWorker::print_on(outputStream* st) const { @@ -315,26 +330,43 @@ WorkGangBarrierSync::WorkGangBarrierSync() : _monitor(Mutex::safepoint, "work gang barrier sync", true), - _n_workers(0), _n_completed(0) { + _n_workers(0), _n_completed(0), _should_reset(false) { } WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name) : _monitor(Mutex::safepoint, name, true), - _n_workers(n_workers), _n_completed(0) { + _n_workers(n_workers), _n_completed(0), _should_reset(false) { } void WorkGangBarrierSync::set_n_workers(int n_workers) { _n_workers = n_workers; _n_completed = 0; + _should_reset = false; } void WorkGangBarrierSync::enter() { MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); + if (should_reset()) { + // The should_reset() was set and we are the first worker to enter + // the sync barrier. We will zero the n_completed() count which + // effectively resets the barrier. + zero_completed(); + set_should_reset(false); + } inc_completed(); if (n_completed() == n_workers()) { + // At this point we would like to reset the barrier to be ready in + // case it is used again. However, we cannot set n_completed() to + // 0, even after the notify_all(), given that some other workers + // might still be waiting for n_completed() to become == + // n_workers(). So, if we set n_completed() to 0, those workers + // will get stuck (as they will wake up, see that n_completed() != + // n_workers() and go back to sleep). Instead, we raise the + // should_reset() flag and the barrier will be reset the first + // time a worker enters it again. + set_should_reset(true); monitor()->notify_all(); - } - else { + } else { while (n_completed() != n_workers()) { monitor()->wait(/* no_safepoint_check */ true); } @@ -445,3 +477,122 @@ } return false; } + +bool FreeIdSet::_stat_init = false; +FreeIdSet* FreeIdSet::_sets[NSets]; +bool FreeIdSet::_safepoint; + +FreeIdSet::FreeIdSet(int sz, Monitor* mon) : + _sz(sz), _mon(mon), _hd(0), _waiters(0), _index(-1), _claimed(0) +{ + _ids = new int[sz]; + for (int i = 0; i < sz; i++) _ids[i] = i+1; + _ids[sz-1] = end_of_list; // end of list. + if (_stat_init) { + for (int j = 0; j < NSets; j++) _sets[j] = NULL; + _stat_init = true; + } + // Add to sets. (This should happen while the system is still single-threaded.) + for (int j = 0; j < NSets; j++) { + if (_sets[j] == NULL) { + _sets[j] = this; + _index = j; + break; + } + } + guarantee(_index != -1, "Too many FreeIdSets in use!"); +} + +FreeIdSet::~FreeIdSet() { + _sets[_index] = NULL; +} + +void FreeIdSet::set_safepoint(bool b) { + _safepoint = b; + if (b) { + for (int j = 0; j < NSets; j++) { + if (_sets[j] != NULL && _sets[j]->_waiters > 0) { + Monitor* mon = _sets[j]->_mon; + mon->lock_without_safepoint_check(); + mon->notify_all(); + mon->unlock(); + } + } + } +} + +#define FID_STATS 0 + +int FreeIdSet::claim_par_id() { +#if FID_STATS + thread_t tslf = thr_self(); + tty->print("claim_par_id[%d]: sz = %d, claimed = %d\n", tslf, _sz, _claimed); +#endif + MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); + while (!_safepoint && _hd == end_of_list) { + _waiters++; +#if FID_STATS + if (_waiters > 5) { + tty->print("claim_par_id waiting[%d]: %d waiters, %d claimed.\n", + tslf, _waiters, _claimed); + } +#endif + _mon->wait(Mutex::_no_safepoint_check_flag); + _waiters--; + } + if (_hd == end_of_list) { +#if FID_STATS + tty->print("claim_par_id[%d]: returning EOL.\n", tslf); +#endif + return -1; + } else { + int res = _hd; + _hd = _ids[res]; + _ids[res] = claimed; // For debugging. + _claimed++; +#if FID_STATS + tty->print("claim_par_id[%d]: returning %d, claimed = %d.\n", + tslf, res, _claimed); +#endif + return res; + } +} + +bool FreeIdSet::claim_perm_id(int i) { + assert(0 <= i && i < _sz, "Out of range."); + MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); + int prev = end_of_list; + int cur = _hd; + while (cur != end_of_list) { + if (cur == i) { + if (prev == end_of_list) { + _hd = _ids[cur]; + } else { + _ids[prev] = _ids[cur]; + } + _ids[cur] = claimed; + _claimed++; + return true; + } else { + prev = cur; + cur = _ids[cur]; + } + } + return false; + +} + +void FreeIdSet::release_par_id(int id) { + MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); + assert(_ids[id] == claimed, "Precondition."); + _ids[id] = _hd; + _hd = id; + _claimed--; +#if FID_STATS + tty->print("[%d] release_par_id(%d), waiters =%d, claimed = %d.\n", + thr_self(), id, _waiters, _claimed); +#endif + if (_waiters > 0) + // Notify all would be safer, but this is OK, right? + _mon->notify_all(); +} --- old/hotspot/src/share/vm/utilities/workgroup.hpp 2009-08-01 04:16:02.777881172 +0100 +++ new/hotspot/src/share/vm/utilities/workgroup.hpp 2009-08-01 04:16:02.688331797 +0100 @@ -35,7 +35,7 @@ // An abstract task to be worked on by a gang. // You subclass this to supply your own work() method -class AbstractGangTask: public CHeapObj { +class AbstractGangTask VALUE_OBJ_CLASS_SPEC { public: // The abstract work method. // The argument tells you which member of the gang you are. @@ -75,7 +75,8 @@ // Here's the public interface to this class. public: // Constructor and destructor. - AbstractWorkGang(const char* name, bool are_GC_threads); + AbstractWorkGang(const char* name, bool are_GC_task_threads, + bool are_ConcurrentGC_threads); ~AbstractWorkGang(); // Run a task, returns when the task is done (or terminated). virtual void run_task(AbstractGangTask* task) = 0; @@ -86,7 +87,8 @@ const char* name() const; protected: // Initialize only instance data. - const bool _are_GC_threads; + const bool _are_GC_task_threads; + const bool _are_ConcurrentGC_threads; // Printing support. const char* _name; // The monitor which protects these data, @@ -133,8 +135,11 @@ int finished_workers() const { return _finished_workers; } - bool are_GC_threads() const { - return _are_GC_threads; + bool are_GC_task_threads() const { + return _are_GC_task_threads; + } + bool are_ConcurrentGC_threads() const { + return _are_ConcurrentGC_threads; } // Predicates. bool is_idle() const { @@ -193,7 +198,8 @@ class WorkGang: public AbstractWorkGang { public: // Constructor - WorkGang(const char* name, int workers, bool are_GC_threads); + WorkGang(const char* name, int workers, + bool are_GC_task_threads, bool are_ConcurrentGC_threads); // Run a task, returns when the task is done (or terminated). virtual void run_task(AbstractGangTask* task); }; @@ -209,6 +215,7 @@ virtual void run(); // Predicate for Thread virtual bool is_GC_task_thread() const; + virtual bool is_ConcurrentGC_thread() const; // Printing void print_on(outputStream* st) const; virtual void print() const { print_on(tty); } @@ -231,12 +238,17 @@ Monitor _monitor; int _n_workers; int _n_completed; + bool _should_reset; + + Monitor* monitor() { return &_monitor; } + int n_workers() { return _n_workers; } + int n_completed() { return _n_completed; } + bool should_reset() { return _should_reset; } - Monitor* monitor() { return &_monitor; } - int n_workers() { return _n_workers; } - int n_completed() { return _n_completed; } + void zero_completed() { _n_completed = 0; } + void inc_completed() { _n_completed++; } - void inc_completed() { _n_completed++; } + void set_should_reset(bool v) { _should_reset = v; } public: WorkGangBarrierSync(); @@ -346,3 +358,42 @@ // cleanup if necessary. bool all_tasks_completed(); }; + +// Represents a set of free small integer ids. +class FreeIdSet { + enum { + end_of_list = -1, + claimed = -2 + }; + + int _sz; + Monitor* _mon; + + int* _ids; + int _hd; + int _waiters; + int _claimed; + + static bool _safepoint; + typedef FreeIdSet* FreeIdSetPtr; + static const int NSets = 10; + static FreeIdSetPtr _sets[NSets]; + static bool _stat_init; + int _index; + +public: + FreeIdSet(int sz, Monitor* mon); + ~FreeIdSet(); + + static void set_safepoint(bool b); + + // Attempt to claim the given id permanently. Returns "true" iff + // successful. + bool claim_perm_id(int i); + + // Returns an unclaimed parallel id (waiting for one to be released if + // necessary). Returns "-1" if a GC wakes up a wait for an id. + int claim_par_id(); + + void release_par_id(int id); +}; --- old/hotspot/src/share/vm/utilities/xmlstream.cpp 2009-08-01 04:16:03.696134969 +0100 +++ new/hotspot/src/share/vm/utilities/xmlstream.cpp 2009-08-01 04:16:03.611624314 +0100 @@ -62,6 +62,7 @@ if (!is_open()) return; out()->write(s, len); + update_position(s, len); } --- old/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp 2009-08-01 04:16:04.529669083 +0100 +++ new/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp 2009-08-01 04:16:04.443945729 +0100 @@ -34,8 +34,8 @@ class WorkData; YieldingFlexibleWorkGang::YieldingFlexibleWorkGang( - const char* name, int workers, bool are_GC_threads) : - AbstractWorkGang(name, are_GC_threads) { + const char* name, int workers, bool are_GC_task_threads) : + AbstractWorkGang(name, are_GC_task_threads, false) { // Save arguments. _total_workers = workers; assert(_total_workers > 0, "Must have more than 1 worker"); --- old/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp 2009-08-01 04:16:05.352622626 +0100 +++ new/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp 2009-08-01 04:16:05.263859176 +0100 @@ -146,7 +146,8 @@ // Here's the public interface to this class. public: // Constructor and destructor. - YieldingFlexibleWorkGang(const char* name, int workers, bool are_GC_threads); + YieldingFlexibleWorkGang(const char* name, int workers, + bool are_GC_task_threads); YieldingFlexibleGangTask* yielding_task() const { assert(task() == NULL || task()->is_YieldingFlexibleGang_task(), --- old/hotspot/test/Makefile 2009-08-01 04:16:06.203089623 +0100 +++ new/hotspot/test/Makefile 2009-08-01 04:16:06.121571772 +0100 @@ -1,5 +1,5 @@ # -# Copyright 2006 Sun Microsystems, Inc. All Rights Reserved. +# Copyright 1995-2008 Sun Microsystems, Inc. All Rights Reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,17 +19,18 @@ # Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, # CA 95054 USA or visit www.sun.com if you need additional information or # have any questions. -# +# # # -# Makefile to run jtreg +# Makefile to run various jdk tests # +# Get OS/ARCH specifics OSNAME = $(shell uname -s) +SLASH_JAVA = /java ifeq ($(OSNAME), SunOS) PLATFORM = solaris - JCT_PLATFORM = solaris ARCH = $(shell uname -p) ifeq ($(ARCH), i386) ARCH=i586 @@ -37,203 +38,165 @@ endif ifeq ($(OSNAME), Linux) PLATFORM = linux - JCT_PLATFORM = linux ARCH = $(shell uname -m) ifeq ($(ARCH), i386) - ARCH=i586 + ARCH = i586 endif endif ifeq ($(OSNAME), Windows_NT) PLATFORM = windows - JCT_PLATFORM = win32 + SLASH_JAVA = J: ifeq ($(word 1, $(PROCESSOR_IDENTIFIER)),ia64) - ARCH=ia64 + ARCH = ia64 else ifeq ($(word 1, $(PROCESSOR_IDENTIFIER)),AMD64) - ARCH=x64 + ARCH = x64 else ifeq ($(word 1, $(PROCESSOR_IDENTIFIER)),EM64T) - ARCH=x64 + ARCH = x64 else - ARCH=i586 + ARCH = i586 endif endif endif + EXESUFFIX = .exe +endif + +# Utilities used +CD = cd +CP = cp +ECHO = echo +MKDIR = mkdir +ZIP = zip + +# Root of this test area (important to use full paths in some places) +TEST_ROOT := $(shell pwd) + +# Root of all test results +ABS_BUILD_ROOT = $(TEST_ROOT)/../build/$(PLATFORM)-$(ARCH) +ABS_TEST_OUTPUT_DIR = $(ABS_BUILD_ROOT)/testoutput + +# Expect JPRT to set PRODUCT_HOME (the product or jdk in this case to test) +ifndef PRODUCT_HOME + # Try to use j2sdk-image if it exists + ABS_JDK_IMAGE = $(ABS_BUILD_ROOT)/j2sdk-image + PRODUCT_HOME := \ + $(shell \ + if [ -d $(ABS_JDK_IMAGE) ] ; then \ + $(ECHO) "$(ABS_JDK_IMAGE)"; \ + else \ + $(ECHO) "$(ABS_BUILD_ROOT)" ; \ + fi) +endif + +# Expect JPRT to set JAVA_ARGS (e.g. -server etc.) +JAVA_OPTIONS = +ifdef JAVA_ARGS + JAVA_OPTIONS = $(JAVA_ARGS) +endif + +# Expect JPRT to set JPRT_ARCHIVE_BUNDLE (path to zip bundle for results) +ARCHIVE_BUNDLE = $(ABS_TEST_OUTPUT_DIR)/ARCHIVE_BUNDLE.zip +ifdef JPRT_ARCHIVE_BUNDLE + ARCHIVE_BUNDLE = $(JPRT_ARCHIVE_BUNDLE) +endif + +# How to create the test bundle (pass or fail, we want to create this) +BUNDLE_UP = ( $(MKDIR) -p `dirname $(ARCHIVE_BUNDLE)` \ + && $(CD) $(ABS_TEST_OUTPUT_DIR) \ + && $(ZIP) -q -r $(ARCHIVE_BUNDLE) . ) +BUNDLE_UP_FAILED = ( exitCode=$$? && $(BUNDLE_UP) && exit $${exitCode} ) + +################################################################ + +# Default make rule (runs jtreg_tests) +all: jtreg_tests + @$(ECHO) "Testing completed successfully" + +# Prep for output +prep: clean + @$(MKDIR) -p $(ABS_TEST_OUTPUT_DIR) + @$(MKDIR) -p `dirname $(ARCHIVE_BUNDLE)` + +# Cleanup +clean: + $(RM) -r $(ABS_TEST_OUTPUT_DIR) + $(RM) $(ARCHIVE_BUNDLE) + +################################################################ + +# jtreg tests + +# Expect JT_HOME to be set for jtreg tests. (home for jtreg) +JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg +ifdef JPRT_JTREG_HOME + JT_HOME = $(JPRT_JTREG_HOME) endif -# Default bundle of all test results (passed or not) -JPRT_ARCHIVE_BUNDLE=$(TEST_ROOT)/JPRT_ARCHIVE_BUNDLE.zip +# Expect JPRT to set TESTDIRS to the jtreg test dirs +JTREG_TESTDIRS = demo/jvmti/gctest demo/jvmti/hprof +ifdef TESTDIRS + JTREG_TESTDIRS = $(TESTDIRS) +endif + +# Default JTREG to run (win32 script works for everybody) +JTREG = $(JT_HOME)/win32/bin/jtreg -# Default home for JTREG +# Option to tell jtreg to not run tests marked with "ignore" ifeq ($(PLATFORM), windows) - JT_HOME = J:/svc/jct-tools3.2.2_01 + JTREG_KEY_OPTION = -k:!ignore else - JT_HOME = /java/svc/jct-tools3.2.2_01 + JTREG_KEY_OPTION = -k:\!ignore endif -# Default JTREG to run -JTREG = $(JT_HOME)/$(JCT_PLATFORM)/bin/jtreg +#EXTRA_JTREG_OPTIONS = -# Root of this test area -TEST_ROOT := $(shell pwd) +jtreg_tests: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG) + $(JTREG) -a -v:fail,error \ + $(JTREG_KEY_OPTION) \ + $(EXTRA_JTREG_OPTIONS) \ + -r:$(ABS_TEST_OUTPUT_DIR)/JTreport \ + -w:$(ABS_TEST_OUTPUT_DIR)/JTwork \ + -jdk:$(PRODUCT_HOME) \ + $(JAVA_OPTIONS:%=-vmoption:%) \ + $(JTREG_TESTDIRS) \ + || $(BUNDLE_UP_FAILED) + $(BUNDLE_UP) + +PHONY_LIST += jtreg_tests + +################################################################ + +# packtest + +# Expect JPRT to set JPRT_PACKTEST_HOME. +PACKTEST_HOME = /net/jprt-web.sfbay.sun.com/jprt/allproducts/packtest +ifdef JPRT_PACKTEST_HOME + PACKTEST_HOME = $(JPRT_PACKTEST_HOME) +endif -# Default JDK to test -JAVA_HOME = $(TEST_ROOT)/../build/$(PLATFORM)-$(ARCH) +#EXTRA_PACKTEST_OPTIONS = -# The test directories to run -DEFAULT_TESTDIRS = serviceability -TESTDIRS = $(DEFAULT_TESTDIRS) - -# Files that hold total passed and failed counts (passed==0 is bad) -JTREG_TOTALS_DIR = $(TEST_ROOT)/JTREG_TOTALS_$(PLATFORM)_$(ARCH) -JTREG_FAILED = $(JTREG_TOTALS_DIR)/failed_count -JTREG_PASSED = $(JTREG_TOTALS_DIR)/passed_count +packtest: prep $(PACKTEST_HOME)/ptest $(PRODUCT_HOME) + ( $(CD) $(PACKTEST_HOME) && \ + $(PACKTEST_HOME)/ptest \ + -t "$(PRODUCT_HOME)" \ + $(PACKTEST_STRESS_OPTION) \ + $(EXTRA_PACKTEST_OPTIONS) \ + -W $(ABS_TEST_OUTPUT_DIR) \ + $(JAVA_OPTIONS:%=-J %) \ + ) || $(BUNDLE_UP_FAILED) + $(BUNDLE_UP) + +packtest_stress: PACKTEST_STRESS_OPTION=-s +packtest_stress: packtest -# Root of all test results -JTREG_ALL_OUTPUT_DIRNAME = JTREG_OUTPUT_$(PLATFORM)_$(ARCH) -JTREG_ALL_OUTPUT_DIR = $(TEST_ROOT)/$(JTREG_ALL_OUTPUT_DIRNAME) +PHONY_LIST += packtest packtest_stress -# Test results for one test directory -JTREG_TEST_OUTPUT_DIR = $(JTREG_ALL_OUTPUT_DIR)/$@ -JTREG_TEST_REPORT_DIR = $(JTREG_TEST_OUTPUT_DIR)/JTreport -JTREG_TEST_WORK_DIR = $(JTREG_TEST_OUTPUT_DIR)/JTwork -JTREG_TEST_SUMMARY = $(JTREG_TEST_REPORT_DIR)/summary.txt - -# Temp files used by this Makefile -JTREG_TEST_TEMP_DIR = $(JTREG_ALL_OUTPUT_DIR)/$@/temp -JTREG_TEMP_PASSED = $(JTREG_TEST_TEMP_DIR)/passed -JTREG_TEMP_FAILED = $(JTREG_TEST_TEMP_DIR)/failed -JTREG_TEMP_OUTPUT = $(JTREG_TEST_TEMP_DIR)/output -JTREG_TEMP_RESULTS = $(JTREG_TEST_TEMP_DIR)/results - -# JTREG options (different for 2.1.6 and 3.2.2_01) -JTREG_COMMON_OPTIONS = -r:$(JTREG_TEST_REPORT_DIR) \ - -w:$(JTREG_TEST_WORK_DIR) \ - -testjdk:$(JAVA_HOME) \ - -automatic \ - -verbose:all -JTREG_216_OPTIONS = $(JTREG_COMMON_OPTIONS) $@ $(JAVA_ARGS) -JTREG_322_OPTIONS = $(JTREG_COMMON_OPTIONS) $(JAVA_ARGS:%=-vmoption:%) $@ - -# Default make rule -all: clean check tests - -# Chaeck to make sure these directories exist -check: $(JT_HOME) $(JAVA_HOME) $(JTREG) - -# Prime the test run -primecounts: FRC - @rm -f -r $(JTREG_TOTALS_DIR) - @mkdir -p $(JTREG_TOTALS_DIR) - @echo "0" > $(JTREG_FAILED) - @echo "0" > $(JTREG_PASSED) - -# Run the tests and determine the 'make' command exit status -# Ultimately we determine the make exit code based on the passed/failed count -tests: primecounts $(TESTDIRS) - @echo "JTREG TOTAL: passed=`cat $(JTREG_PASSED)` failed=`cat $(JTREG_FAILED)`" - zip -q -r $(JPRT_ARCHIVE_BUNDLE) $(JTREG_ALL_OUTPUT_DIRNAME) - @if [ `cat $(JTREG_FAILED)` -ne 0 -o \ - `cat $(JTREG_PASSED)` -le 0 ] ; then \ - echo "JTREG FAILED"; \ - exit 1; \ - else \ - echo "JTREG PASSED"; \ - exit 0; \ - fi - -# Just make sure these directires exist -$(JT_HOME) $(JAVA_HOME): FRC - @if [ ! -d $@ ] ; then \ - echo "ERROR: Directory $@ does not exist"; \ - exit 1; \ - fi - -# Make sure this file exists -$(JTREG): FRC - @if [ ! -f $@ ] ; then \ - echo "ERROR: File $@ does not exist"; \ - exit 1; \ - fi - -# Process each test directory one by one, this rule always completes. -# Note that the use of 'tee' tosses the jtreg process exit status, this -# is as expected because even if jtreg fails, we need to save the -# output. So we update the JTREG_PASSED and JTREG_FAILED count files. -# Note that missing the 'results:' line in the last few lines of output -# will indicate a failure (or a bump by one of the JTREG_FAILED file. -# Note that passed: 0 or no passed: indication means a failure. -# Note that any indication of the word 'failed' indicates failure. -# Ultimately if the contents of JTREG_FAILED is not 0, we have failed -# tests, and if the contents of JTREG_PASSED is 0, we consider that a -# failure. -$(TESTDIRS): FRC - @if [ ! -d $@ ] ; then \ - echo "ERROR: Directory $@ does not exist"; \ - exit 1; \ - fi - @echo "---------------------------------------------------" - @rm -f -r $(JTREG_TEST_OUTPUT_DIR) - @mkdir -p $(JTREG_TEST_OUTPUT_DIR) - @mkdir -p $(JTREG_TEST_WORK_DIR) - @mkdir -p $(JTREG_TEST_WORK_DIR)/scratch - @mkdir -p $(JTREG_TEST_REPORT_DIR) - @mkdir -p $(JTREG_TEST_TEMP_DIR) - @echo "Testing $@" - @echo "Using JAVA_HOME=$(JAVA_HOME)" - @echo "Using JAVA_ARGS=$(JAVA_ARGS)" - @if [ "`$(JTREG) -help 2>&1 | fgrep -- -vmoption`" != "" ] ; then \ - echo "Assume we are using jtreg 3.2.2_01 or newer"; \ - echo "$(JTREG) $(JTREG_322_OPTIONS)"; \ - $(JTREG) $(JTREG_322_OPTIONS) 2>&1 | tee $(JTREG_TEMP_OUTPUT) ; \ - else \ - echo "Assume we are using jtreg 2.1.6"; \ - echo "$(JTREG) $(JTREG_216_OPTIONS)"; \ - $(JTREG) $(JTREG_216_OPTIONS) 2>&1 | tee $(JTREG_TEMP_OUTPUT) ; \ - fi - @echo "---------------------------------------------------" - @echo "Extracting passed and failed counts from jtreg output" - @tail -10 $(JTREG_TEMP_OUTPUT) | fgrep -i 'results:' | \ - tail -1 | tee $(JTREG_TEMP_RESULTS) - @sed -e 's@.*\ passed:\ \([1-9][0-9]*\).*@\1@' $(JTREG_TEMP_RESULTS) \ - > $(JTREG_TEMP_PASSED) - @if [ "`cat $(JTREG_TEMP_PASSED)`" = "" ] ; then \ - echo "ERROR: No passed indication in results"; \ - expr `cat $(JTREG_FAILED)` '+' 1 > $(JTREG_FAILED); \ - elif [ `cat $(JTREG_TEMP_PASSED)` -le 0 ] ; then \ - echo "ERROR: Passed count appears to be 0"; \ - expr `cat $(JTREG_FAILED)` '+' 1 > $(JTREG_FAILED); \ - elif [ "`fgrep -i failed $(JTREG_TEMP_RESULTS)`" = "" ] ; then \ - echo "No indication anything failed"; \ - expr `cat $(JTREG_PASSED)` '+' `cat $(JTREG_TEMP_PASSED)` \ - > $(JTREG_PASSED); \ - else \ - sed -e 's@.*\ failed:\ \([1-9][0-9]*\).*@\1@' $(JTREG_TEMP_FAILED) \ - > $(JTREG_TEMP_FAILED); \ - if [ "`cat $(JTREG_TEMP_FAILED)`" = "" ] ; then \ - echo "ERROR: Failed pattern but no failed count in results"; \ - expr `cat $(JTREG_FAILED)` '+' 1 > $(JTREG_FAILED); \ - elif [ `cat $(JTREG_TEMP_FAILED)` -le 0 ] ; then \ - echo "ERROR: Failed count is 0, did something failed or not?"; \ - expr `cat $(JTREG_FAILED)` '+' 1 > $(JTREG_FAILED); \ - else \ - expr `cat $(JTREG_FAILED)` '+' `cat $(JTREG_TEMP_FAILED)` \ - > $(JTREG_FAILED); \ - fi; \ - fi - @echo "---------------------------------------------------" - @echo "Summary: " - @if [ -f $(JTREG_TEST_SUMMARY) ] ; then \ - cat $(JTREG_TEST_SUMMARY) ; \ - else \ - echo "ERROR: Missing $(JTREG_TEST_SUMMARY)"; \ - fi - @echo "---------------------------------------------------" +################################################################ -# Cleanup -clean: - rm -f -r $(JTREG_ALL_OUTPUT_DIR) - rm -f $(JPRT_ARCHIVE_BUNDLE) +# Phony targets (e.g. these are not filenames) +.PHONY: all clean prep $(PHONY_LIST) -FRC: +################################################################ --- old/hotspot/test/compiler/6659207/Test.java 2009-08-01 04:16:07.152374762 +0100 +++ new/hotspot/test/compiler/6659207/Test.java 2009-08-01 04:16:07.077746398 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/test/compiler/6663621/IVTest.java 2009-08-01 04:16:08.041819133 +0100 +++ new/hotspot/test/compiler/6663621/IVTest.java 2009-08-01 04:16:07.970015904 +0100 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/hotspot/test/compiler/6724218/Test.java 2009-08-01 04:16:08.903209429 +0100 +++ new/hotspot/test/compiler/6724218/Test.java 2009-08-01 04:16:08.830496126 +0100 @@ -39,7 +39,7 @@ static Test cache = null; Object get(int i) { - Test t = list; + Test t = list; list = t.next; size -= 1; Object o = t.value; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/NarrowOopField.java 2009-08-01 04:16:09.606762444 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package sun.jvm.hotspot.oops; + +import sun.jvm.hotspot.debugger.*; + +// The class for an oop field simply provides access to the value. +public class NarrowOopField extends OopField { + public NarrowOopField(FieldIdentifier id, long offset, boolean isVMField) { + super(id, offset, isVMField); + } + + public NarrowOopField(sun.jvm.hotspot.types.OopField vmField, long startOffset) { + super(new NamedFieldIdentifier(vmField.getName()), vmField.getOffset() + startOffset, true); + } + + public NarrowOopField(InstanceKlass holder, int fieldArrayIndex) { + super(holder, fieldArrayIndex); + } + + public Oop getValue(Oop obj) { + return obj.getHeap().newOop(getValueAsOopHandle(obj)); + } + + /** Debugging support */ + public OopHandle getValueAsOopHandle(Oop obj) { + return obj.getHandle().getCompOopHandleAt(getOffset()); + } + + public void setValue(Oop obj) throws MutationException { + // Fix this: setOopAt is missing in Address + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/NarrowOopField.java 2009-08-01 04:16:10.037790255 +0100 @@ -0,0 +1,41 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package sun.jvm.hotspot.types; + +import sun.jvm.hotspot.debugger.*; + +/** A specialization of Field which represents a field containing an + narrow oop value and which adds typechecked getValue() routines returning + OopHandles. */ + +public interface NarrowOopField extends OopField { + /** The field must be nonstatic and the type of the field must be an + oop type, or a WrongTypeException will be thrown. */ + public OopHandle getValue(Address addr) throws UnmappedAddressException, UnalignedAddressException, WrongTypeException; + + /** The field must be static and the type of the field must be an + oop type, or a WrongTypeException will be thrown. */ + public OopHandle getValue() throws UnmappedAddressException, UnalignedAddressException, WrongTypeException; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicNarrowOopField.java 2009-08-01 04:16:10.442164126 +0100 @@ -0,0 +1,65 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package sun.jvm.hotspot.types.basic; + +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.types.*; + +/** A specialization of BasicField which represents a field containing + an oop value and which adds typechecked getValue() routines + returning OopHandles. */ + +public class BasicNarrowOopField extends BasicOopField implements NarrowOopField { + + private static final boolean DEBUG = false; + + public BasicNarrowOopField (OopField oopf) { + super(oopf); + } + + public BasicNarrowOopField(BasicTypeDataBase db, Type containingType, String name, Type type, + boolean isStatic, long offset, Address staticFieldAddress) { + super(db, containingType, name, type, isStatic, offset, staticFieldAddress); + + if (DEBUG) { + System.out.println(" name " + name + " type " + type + " isStatic " + isStatic + " offset " + offset + " static addr " + staticFieldAddress); + } + if (!type.isOopType()) { + throw new WrongTypeException("Type of a BasicOopField must be an oop type"); + } + } + + /** The field must be nonstatic and the type of the field must be a + Java oop, or a WrongTypeException will be thrown. */ + public OopHandle getValue(Address addr) throws UnmappedAddressException, UnalignedAddressException, WrongTypeException { + return getNarrowOopHandle(addr); + } + + /** The field must be static and the type of the field must be a + Java oop, or a WrongTypeException will be thrown. */ + public OopHandle getValue() throws UnmappedAddressException, UnalignedAddressException, WrongTypeException { + return getNarrowOopHandle(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/hotspot_distro 2009-08-01 04:16:10.850608403 +0100 @@ -0,0 +1,32 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# + +# +# This file format must remain compatible with both +# GNU Makefile and Microsoft nmake formats. +# + +# Don't put quotes (fail windows build). +HOTSPOT_VM_DISTRO=Java HotSpot(TM) +COMPANY_NAME=Sun Microsystems, Inc. +PRODUCT_NAME=Java(TM) Platform SE --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/Makefile 2009-08-01 04:16:11.285602356 +0100 @@ -0,0 +1,312 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile creates a build tree and lights off a build. +# You can go back into the build tree and perform rebuilds or +# incremental builds as desired. Be sure to reestablish +# environment variable settings for LD_LIBRARY_PATH and JAVA_HOME. + +# The make process now relies on java and javac. These can be +# specified either implicitly on the PATH, by setting the +# (JDK-inherited) ALT_BOOTDIR environment variable to full path to a +# JDK in which bin/java and bin/javac are present and working (e.g., +# /usr/local/java/jdk1.3/solaris), or via the (JDK-inherited) +# default BOOTDIR path value. Note that one of ALT_BOOTDIR +# or BOOTDIR has to be set. We do *not* search javac, javah, rmic etc. +# from the PATH. +# +# One can set ALT_BOOTDIR or BOOTDIR to point to a jdk that runs on +# an architecture that differs from the target architecture, as long +# as the bootstrap jdk runs under the same flavor of OS as the target +# (i.e., if the target is linux, point to a jdk that runs on a linux +# box). In order to use such a bootstrap jdk, set the make variable +# REMOTE to the desired remote command mechanism, e.g., +# +# make REMOTE="rsh -l me myotherlinuxbox" + +# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding. +# JDI binding on SA produces two binaries: +# 1. sa-jdi.jar - This is build before building libjvm[_g].so +# Please refer to ./makefiles/sa.make +# 2. libsa[_g].so - Native library for SA - This is built after +# libjsig[_g].so (signal interposition library) +# Please refer to ./makefiles/vm.make +# If $(GAMMADIR)/agent dir is not present, SA components are not built. + +ifeq ($(GAMMADIR),) +include ../../make/defs.make +else +include $(GAMMADIR)/make/defs.make +endif +include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make + +ifndef LP64 +ifndef CC_INTERP +FORCE_TIERED=1 +endif +endif + +ifdef LP64 + ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") + _JUNK_ := $(shell echo >&2 \ + $(OSNAME) $(ARCH) "*** ERROR: this platform does not support 64-bit compilers!") + @exit 1 + endif +endif + +# we need to set up LP64 correctly to satisfy sanity checks in adlc +ifneq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") + MFLAGS += " LP64=1 " +endif + +# pass USE_SUNCC further, through MFLAGS +ifdef USE_SUNCC + MFLAGS += " USE_SUNCC=1 " +endif + +# The following renders pathnames in generated Makefiles valid on +# machines other than the machine containing the build tree. +# +# For example, let's say my build tree lives on /files12 on +# exact.east.sun.com. This logic will cause GAMMADIR to begin with +# /net/exact/files12/... +# +# We only do this on SunOS variants, for a couple of reasons: +# * It is extremely rare that source trees exist on other systems +# * It has been claimed that the Linux automounter is flakey, so +# changing GAMMADIR in a way that exercises the automounter could +# prove to be a source of unreliability in the build process. +# Obviously, this Makefile is only relevant on SunOS boxes to begin +# with, but the SunOS conditionalization will make it easier to +# combine Makefiles in the future (assuming we ever do that). + +ifeq ($(OSNAME),solaris) + + # prepend current directory to relative pathnames. + NEW_GAMMADIR := \ + $(shell echo $(GAMMADIR) | \ + sed -e "s=^\([^/].*\)=$(shell pwd)/\1=" \ + ) + unexport NEW_GAMMADIR + + # If NEW_GAMMADIR doesn't already start with "/net/": + ifeq ($(strip $(filter /net/%,$(NEW_GAMMADIR))),) + # prepend /net/$(HOST) + # remove /net/$(HOST) if name already began with /home/ + # remove /net/$(HOST) if name already began with /java/ + # remove /net/$(HOST) if name already began with /lab/ + NEW_GAMMADIR := \ + $(shell echo $(NEW_GAMMADIR) | \ + sed -e "s=^\(.*\)=/net/$(HOST)\1=" \ + -e "s=^/net/$(HOST)/home/=/home/=" \ + -e "s=^/net/$(HOST)/java/=/java/=" \ + -e "s=^/net/$(HOST)/lab/=/lab/=" \ + ) + # Don't use the new value for GAMMADIR unless a file with the new + # name actually exists. + ifneq ($(wildcard $(NEW_GAMMADIR)),) + GAMMADIR := $(NEW_GAMMADIR) + endif + endif + +endif + + +# There is a (semi-) regular correspondence between make targets and actions: +# +# Target Tree Type Build Dir +# +# debug compiler2 __compiler2/debug +# fastdebug compiler2 __compiler2/fastdebug +# jvmg compiler2 __compiler2/jvmg +# optimized compiler2 __compiler2/optimized +# profiled compiler2 __compiler2/profiled +# product compiler2 __compiler2/product +# +# debug1 compiler1 __compiler1/debug +# fastdebug1 compiler1 __compiler1/fastdebug +# jvmg1 compiler1 __compiler1/jvmg +# optimized1 compiler1 __compiler1/optimized +# profiled1 compiler1 __compiler1/profiled +# product1 compiler1 __compiler1/product +# +# debugcore core __core/debug +# fastdebugcore core __core/fastdebug +# jvmgcore core __core/jvmg +# optimizedcore core __core/optimized +# profiledcore core __core/profiled +# productcore core __core/product +# +# What you get with each target: +# +# debug* - "thin" libjvm_g - debug info linked into the gamma_g launcher +# fastdebug* - optimized compile, but with asserts enabled +# jvmg* - "fat" libjvm_g - debug info linked into libjvm_g.so +# optimized* - optimized compile, no asserts +# profiled* - gprof +# product* - the shippable thing: optimized compile, no asserts, -DPRODUCT + +# This target list needs to be coordinated with the usage message +# in the build.sh script: +TARGETS = debug jvmg fastdebug optimized profiled product + +SUBDIR_DOCS = $(OSNAME)_$(BUILDARCH)_docs +SUBDIRS_C1 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS)) +SUBDIRS_C2 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler2/,$(TARGETS)) +SUBDIRS_TIERED = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS)) +SUBDIRS_CORE = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS)) + +TARGETS_C2 = $(TARGETS) +TARGETS_C1 = $(addsuffix 1,$(TARGETS)) +TARGETS_TIERED = $(addsuffix tiered,$(TARGETS)) +TARGETS_CORE = $(addsuffix core,$(TARGETS)) + +BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make +BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) ARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) +BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) + +BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS) + +#------------------------------------------------------------------------------- + +# Could make everything by default, but that would take a while. +all: + @echo "Try '$(MAKE) ...' where is one or more of" + @echo " $(TARGETS_C2)" + @echo " $(TARGETS_C1)" + @echo " $(TARGETS_CORE)" + +checks: check_os_version check_j2se_version + +# We do not want people accidentally building on old systems (e.g. Linux 2.2.x, +# Solaris 2.5.1, 2.6). +# Disable this check by setting DISABLE_HOTSPOT_OS_VERSION_CHECK=ok. + +SUPPORTED_OS_VERSION = 2.4% 2.5% 2.6% 2.7% +OS_VERSION := $(shell uname -r) +EMPTY_IF_NOT_SUPPORTED = $(filter $(SUPPORTED_OS_VERSION),$(OS_VERSION)) + +check_os_version: +ifeq ($(DISABLE_HOTSPOT_OS_VERSION_CHECK)$(EMPTY_IF_NOT_SUPPORTED),) + $(QUIETLY) >&2 echo "*** This OS is not supported:" `uname -a`; exit 1; +endif + +# jvmti.make requires XSLT (J2SE 1.4.x or newer): +XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory +# If not found then fail fast. +check_j2se_version: + $(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \ + if [ $$? -ne 0 ]; then \ + $(REMOTE) $(RUN.JAVA) -version; \ + echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \ + "to bootstrap this build" 1>&2; \ + exit 1; \ + fi + +$(SUBDIRS_TIERED): $(BUILDTREE_MAKE) + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=tiered + +$(SUBDIRS_C2): $(BUILDTREE_MAKE) +ifdef FORCE_TIERED + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=tiered FORCE_TIERED=1 +else + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=compiler2 +endif + +$(SUBDIRS_C1): $(BUILDTREE_MAKE) + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=compiler1 + +$(SUBDIRS_CORE): $(BUILDTREE_MAKE) + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=core + +# Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME + +$(TARGETS_C2): $(SUBDIRS_C2) + cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install +endif + +$(TARGETS_TIERED): $(SUBDIRS_TIERED) + cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install +endif + +$(TARGETS_C1): $(SUBDIRS_C1) + cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install +endif + +$(TARGETS_CORE): $(SUBDIRS_CORE) + cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install +endif + +# Just build the tree, and nothing else: +tree: $(SUBDIRS_C2) +tree1: $(SUBDIRS_C1) +treecore: $(SUBDIRS_CORE) + +# Doc target. This is the same for all build options. +# Hence create a docs directory beside ...$(ARCH)_[...] +docs: checks + $(QUIETLY) mkdir -p $(SUBDIR_DOCS) + $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs + +# Synonyms for win32-like targets. +compiler2: jvmg product + +compiler1: jvmg1 product1 + +core: jvmgcore productcore + +clean_docs: + rm -rf $(SUBDIR_DOCS) + +clean_compiler1 clean_compiler2 clean_core: + rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@) + +clean: clean_compiler2 clean_compiler1 clean_core clean_docs + +include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make + +#------------------------------------------------------------------------------- + +.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) +.PHONY: tree tree1 treecore +.PHONY: all compiler1 compiler2 core +.PHONY: clean clean_compiler1 clean_compiler2 clean_core docs clean_docs +.PHONY: checks check_os_version check_j2se_version --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/README 2009-08-01 04:16:11.710646991 +0100 @@ -0,0 +1,28 @@ +Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. +DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + +This code is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License version 2 only, as +published by the Free Software Foundation. + +This code is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +version 2 for more details (a copy is included in the LICENSE file that +accompanied this code). + +You should have received a copy of the GNU General Public License version +2 along with this work; if not, write to the Free Software Foundation, +Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + +Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +CA 95054 USA or visit www.sun.com if you need additional information or +have any questions. + +________________________________________________________________________ + +@(#)README 1.5 07/03/14 16:32:04 + +Please refer to the comments in the Makefile in this directory +for instructions how to build the Solaris versions. + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/adlc_updater 2009-08-01 04:16:12.102592010 +0100 @@ -0,0 +1,19 @@ +#! /bin/sh +# +# This file is used by adlc.make to selectively update generated +# adlc files. Because source and target diretories are relative +# paths, this file is copied to the target build directory before +# use. +# +# adlc-updater +# +fix_lines() { + # repair bare #line directives in $1 to refer to $2 + awk < $1 > $1+ ' + /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next} + {print} + ' F2=$2 + mv $1+ $1 +} +[ -f $3/$1 ] && (fix_lines $2/$1 $3/$1; cmp -s $2/$1 $3/$1) || \ +( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 ) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/build.sh 2009-08-01 04:16:12.520185415 +0100 @@ -0,0 +1,95 @@ +#! /bin/sh +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Make sure the variable JAVA_HOME is set before running this script. + +set -u + + +if [ $# != 2 ]; then + echo "Usage : $0 Build_Options Location" + echo "Build Options : debug or optimized or basicdebug or basic or clean" + echo "Location : specify any workspace which has gamma sources" + exit 1 +fi + +# Just in case: +case ${JAVA_HOME} in +/*) true;; +?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;; +esac + +case `uname -m` in + i386|i486|i586|i686) + mach=i386 + ;; + *) + echo "Unsupported machine: " `uname -m` + exit 1 + ;; +esac + +if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then + echo "JAVA_HOME needs to be set to a valid JDK path" + echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux" + echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux" + exit 1 +fi + + +LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\ +${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.} + +# This is necessary as long as we are using the old launcher +# with the new distribution format: +CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.} + + +for gm in gmake gnumake +do + if [ "${GNUMAKE-}" != "" ]; then break; fi + ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm +done +: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'} + + +echo "### ENVIRONMENT SETTINGS:" +export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME" +export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" +export CLASSPATH ; echo "CLASSPATH=$CLASSPATH" +export GNUMAKE ; echo "GNUMAKE=$GNUMAKE" +echo "###" + +Build_Options=$1 +Location=$2 + +case ${Location} in +/*) true;; +?*) Location=`(cd ${Location}; pwd)`;; +esac + +echo \ +${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location} +${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/adjust-mflags.sh 2009-08-01 04:16:12.978960648 +0100 @@ -0,0 +1,87 @@ +#! /bin/sh +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This script is used only from top.make. +# The macro $(MFLAGS-adjusted) calls this script to +# adjust the "-j" arguments to take into account +# the HOTSPOT_BUILD_JOBS variable. The default +# handling of the "-j" argument by gnumake does +# not meet our needs, so we must adjust it ourselves. + +# This argument adjustment applies to two recursive +# calls to "$(MAKE) $(MFLAGS-adjusted)" in top.make. +# One invokes adlc.make, and the other invokes vm.make. +# The adjustment propagates the desired concurrency +# level down to the sub-make (of the adlc or vm). +# The default behavior of gnumake is to run all +# sub-makes without concurrency ("-j1"). + +# Also, we use a make variable rather than an explicit +# "-j" argument to control this setting, so that +# the concurrency setting (which must be tuned separately +# for each MP system) can be set via an environment variable. +# The recommended setting is 1.5x to 2x the number of available +# CPUs on the MP system, which is large enough to keep the CPUs +# busy (even though some jobs may be I/O bound) but not too large, +# we may presume, to overflow the system's swap space. + +set -eu + +default_build_jobs=4 + +case $# in +[12]) true;; +*) >&2 echo "Usage: $0 ${MFLAGS} ${HOTSPOT_BUILD_JOBS}"; exit 2;; +esac + +MFLAGS=$1 +HOTSPOT_BUILD_JOBS=${2-} + +# Normalize any -jN argument to the form " -j${HBJ}" +MFLAGS=` + echo "$MFLAGS" \ + | sed ' + s/^-/ -/ + s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -j[0-9][0-9]*/ -j/ + s/ -j\([^ ]\)/ -j -\1/ + s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ + ' ` + +case ${HOTSPOT_BUILD_JOBS} in \ + +'') case ${MFLAGS} in + *\ -j*) + >&2 echo "# Note: -jN is ineffective for setting parallelism in this makefile." + >&2 echo "# please set HOTSPOT_BUILD_JOBS=${default_build_jobs} in the command line or environment." + esac;; + +?*) case ${MFLAGS} in + *\ -j*) true;; + *) MFLAGS="-j${HOTSPOT_BUILD_JOBS} ${MFLAGS}";; + esac;; +esac + +echo "${MFLAGS}" --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/adlc.make 2009-08-01 04:16:13.413164657 +0100 @@ -0,0 +1,220 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile (adlc.make) is included from the adlc.make in the +# build directories. +# It knows how to compile, link, and run the adlc. + +include $(GAMMADIR)/make/$(Platform_os_family)/makefiles/rules.make + +# ######################################################################### + +# OUTDIR must be the same as AD_Dir = $(GENERATED)/adfiles in top.make: +GENERATED = ../generated +OUTDIR = $(GENERATED)/adfiles + +ARCH = $(Platform_arch) +OS = $(Platform_os_family) + +SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad + +SOURCES.AD = $(GAMMADIR)/src/cpu/$(ARCH)/vm/$(Platform_arch_model).ad \ + $(GAMMADIR)/src/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad + +Src_Dirs += $(GAMMADIR)/src/share/vm/adlc + +EXEC = $(OUTDIR)/adlc + +# set VPATH so make knows where to look for source files +Src_Dirs_V = ${Src_Dirs} $(GENERATED)/incls +VPATH += $(Src_Dirs_V:%=%:) + +# set INCLUDES for C preprocessor +Src_Dirs_I = ${Src_Dirs} $(GENERATED) +INCLUDES += $(Src_Dirs_I:%=-I%) + +# set flags for adlc compilation +CPPFLAGS = $(SYSDEFS) $(INCLUDES) + +# Force assertions on. +CPPFLAGS += -DASSERT + +# CFLAGS_WARN holds compiler options to suppress/enable warnings. +# Suppress warnings (for now) +CFLAGS_WARN = -w +CFLAGS += $(CFLAGS_WARN) + +OBJECTNAMES = \ + adlparse.o \ + archDesc.o \ + arena.o \ + dfa.o \ + dict2.o \ + filebuff.o \ + forms.o \ + formsopt.o \ + formssel.o \ + main.o \ + adlc-opcodes.o \ + output_c.o \ + output_h.o \ + +OBJECTS = $(OBJECTNAMES:%=$(OUTDIR)/%) + +GENERATEDNAMES = \ + ad_$(Platform_arch_model).cpp \ + ad_$(Platform_arch_model).hpp \ + ad_$(Platform_arch_model)_clone.cpp \ + ad_$(Platform_arch_model)_expand.cpp \ + ad_$(Platform_arch_model)_format.cpp \ + ad_$(Platform_arch_model)_gen.cpp \ + ad_$(Platform_arch_model)_misc.cpp \ + ad_$(Platform_arch_model)_peephole.cpp \ + ad_$(Platform_arch_model)_pipeline.cpp \ + adGlobals_$(Platform_arch_model).hpp \ + dfa_$(Platform_arch_model).cpp \ + +GENERATEDFILES = $(GENERATEDNAMES:%=$(OUTDIR)/%) + +# ######################################################################### + +all: $(EXEC) + +$(EXEC) : $(OBJECTS) + @echo Making adlc + $(QUIETLY) $(LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS) + +# Random dependencies: +$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp + +# The source files refer to ostream.h, which sparcworks calls iostream.h +$(OBJECTS): ostream.h + +ostream.h : + @echo >$@ '#include ' + +dump: + : OUTDIR=$(OUTDIR) + : OBJECTS=$(OBJECTS) + : products = $(GENERATEDFILES) + +all: $(GENERATEDFILES) + +$(GENERATEDFILES): refresh_adfiles + +# Get a unique temporary directory name, so multiple makes can run in parallel. +# Note that product files are updated via "mv", which is atomic. +TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$) + +# Pass -D flags into ADLC. +ADLCFLAGS += $(SYSDEFS) + +# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO. +ADLCFLAGS += -q -T + +# Normally, debugging is done directly on the ad_*.cpp files. +# But -g will put #line directives in those files pointing back to .ad. +#ADLCFLAGS += -g + +ifdef LP64 +ADLCFLAGS += -D_LP64 +else +ADLCFLAGS += -U_LP64 +endif + +# +# adlc_updater is a simple sh script, under sccs control. It is +# used to selectively update generated adlc files. This should +# provide a nice compilation speed improvement. +# +ADLC_UPDATER_DIRECTORY = $(GAMMADIR)/make/$(OS) +ADLC_UPDATER = adlc_updater +$(ADLC_UPDATER): $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER) + $(QUIETLY) cp $< $@; chmod +x $@ + +# This action refreshes all generated adlc files simultaneously. +# The way it works is this: +# 1) create a scratch directory to work in. +# 2) if the current working directory does not have $(ADLC_UPDATER), copy it. +# 3) run the compiled adlc executable. This will create new adlc files in the scratch directory. +# 4) call $(ADLC_UPDATER) on each generated adlc file. It will selectively update changed or missing files. +# 5) If we actually updated any files, echo a notice. +# +refresh_adfiles: $(EXEC) $(SOURCE.AD) $(ADLC_UPDATER) + @rm -rf $(TEMPDIR); mkdir $(TEMPDIR) + $(QUIETLY) $(EXEC) $(ADLCFLAGS) $(SOURCE.AD) \ + -c$(TEMPDIR)/ad_$(Platform_arch_model).cpp -h$(TEMPDIR)/ad_$(Platform_arch_model).hpp -a$(TEMPDIR)/dfa_$(Platform_arch_model).cpp -v$(TEMPDIR)/adGlobals_$(Platform_arch_model).hpp \ + || { rm -rf $(TEMPDIR); exit 1; } + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_clone.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_expand.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_format.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_gen.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_misc.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_peephole.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_pipeline.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) adGlobals_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) dfa_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) [ -f $(TEMPDIR)/made-change ] \ + || echo "Rescanned $(SOURCE.AD) but encountered no changes." + $(QUIETLY) rm -rf $(TEMPDIR) + + +# ######################################################################### + +$(SOURCE.AD): $(SOURCES.AD) + $(QUIETLY) $(PROCESS_AD_FILES) $(SOURCES.AD) > $(SOURCE.AD) + +#PROCESS_AD_FILES = cat +# Pass through #line directives, in case user enables -g option above: +PROCESS_AD_FILES = awk '{ \ + if (CUR_FN != FILENAME) { CUR_FN=FILENAME; NR_BASE=NR-1; need_lineno=1 } \ + if (need_lineno && $$0 !~ /\/\//) \ + { print "\n\n\#line " (NR-NR_BASE) " \"" FILENAME "\""; need_lineno=0 }; \ + print }' + +$(OUTDIR)/%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) + +# Some object files are given a prefix, to disambiguate +# them from objects of the same name built for the VM. +$(OUTDIR)/adlc-%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) + +# ######################################################################### + +clean : + rm $(OBJECTS) + +cleanall : + rm $(OBJECTS) $(EXEC) + +# ######################################################################### + +.PHONY: all dump refresh_adfiles clean cleanall --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/amd64.make 2009-08-01 04:16:13.822158016 +0100 @@ -0,0 +1,42 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Not included in includeDB because it has no dependencies +Obj_Files += linux_x86_64.o + +# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized +OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT) +# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized +OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT) +# Must also specify if CPU is little endian +CFLAGS += -DVM_LITTLE_ENDIAN + +CFLAGS += -D_LP64=1 + +# The serviceability agent relies on frame pointer (%rbp) to walk thread stack +ifndef USE_SUNCC + CFLAGS += -fno-omit-frame-pointer +endif + +OPT_CFLAGS/compactingPermGenGen.o = -O1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/buildtree.make 2009-08-01 04:16:14.231835932 +0100 @@ -0,0 +1,357 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Usage: +# +# $(MAKE) -f buildtree.make ARCH=arch BUILDARCH=buildarch LIBARCH=libarch +# GAMMADIR=dir OS_FAMILY=os VARIANT=variant +# +# The macros ARCH, GAMMADIR, OS_FAMILY and VARIANT must be defined in the +# environment or on the command-line: +# +# ARCH - sparc, i486, ... HotSpot cpu and os_cpu source directory +# BUILDARCH - build directory +# LIBARCH - the corresponding directory in JDK/JRE +# GAMMADIR - top of workspace +# OS_FAMILY - operating system +# VARIANT - core, compiler1, compiler2, or tiered +# HOTSPOT_RELEASE_VERSION - .-b (11.0-b07) +# HOTSPOT_BUILD_VERSION - internal, PRTjob ID, JPRTjob ID +# JRE_RELEASE_VERSION - .. (1.7.0) +# +# Builds the directory trees with makefiles plus some convenience files in +# each directory: +# +# Makefile - for "make foo" +# flags.make - with macro settings +# vm.make - to support making "$(MAKE) -v vm.make" in makefiles +# adlc.make - +# jvmti.make - generate JVMTI bindings from the spec (JSR-163) +# sa.make - generate SA jar file and natives +# env.[ck]sh - environment settings +# test_gamma - script to run the Queens program +# +# The makefiles are split this way so that "make foo" will run faster by not +# having to read the dependency files for the vm. + +include $(GAMMADIR)/make/scm.make + +# 'gmake MAKE_VERBOSE=y' or 'gmake QUIETLY=' gives all the gory details. +QUIETLY$(MAKE_VERBOSE) = @ + +# For now, until the compiler is less wobbly: +TESTFLAGS = -Xbatch -showversion + +ifdef USE_SUNCC +PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).suncc +else +PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH) +endif + +ifdef FORCE_TIERED +ifeq ($(VARIANT),tiered) +PLATFORM_DIR = $(OS_FAMILY)_$(BUILDARCH)_compiler2 +else +PLATFORM_DIR = $(OS_FAMILY)_$(BUILDARCH)_$(VARIANT) +endif +else +PLATFORM_DIR = $(OS_FAMILY)_$(BUILDARCH)_$(VARIANT) +endif + +# +# We do two levels of exclusion in the shared directory. +# TOPLEVEL excludes are pruned, they are not recursively searched, +# but lower level directories can be named without fear of collision. +# ALWAYS excludes are excluded at any level in the directory tree. +# + +ALWAYS_EXCLUDE_DIRS = $(SCM_DIRS) + +ifeq ($(VARIANT),tiered) +TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name agent +else +ifeq ($(VARIANT),compiler2) +TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name c1 -o -name agent +else +# compiler1 and core use the same exclude list +TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name opto -o -name libadt -o -name agent +endif +endif + +# Get things from the platform file. +COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE)) + +SIMPLE_DIRS = \ + $(PLATFORM_DIR)/generated/incls \ + $(PLATFORM_DIR)/generated/adfiles \ + $(PLATFORM_DIR)/generated/jvmtifiles + +TARGETS = debug fastdebug jvmg optimized product profiled +SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) + +# For dependencies and recursive makes. +BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make + +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \ + env.sh env.csh .dbxrc test_gamma + +BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ + ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) + +# Define variables to be set in flags.make. +# Default values are set in make/defs.make. +ifeq ($(HOTSPOT_BUILD_VERSION),) + HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION) +else + HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION) +endif +# Set BUILD_USER from system-dependent hints: $LOGNAME, $(whoami) +ifndef HOTSPOT_BUILD_USER + HOTSPOT_BUILD_USER := $(shell echo $$LOGNAME) +endif +ifndef HOTSPOT_BUILD_USER + HOTSPOT_BUILD_USER := $(shell whoami) +endif +# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro +# or make/hotspot_distro. +ifndef HOTSPOT_VM_DISTRO + CLOSED_DIR_EXISTS := $(shell \ + if [ -d $(GAMMADIR)/src/closed ] ; then \ + echo true; \ + else \ + echo false; \ + fi) + ifeq ($(CLOSED_DIR_EXISTS), true) + include $(GAMMADIR)/make/hotspot_distro + else + include $(GAMMADIR)/make/openjdk_distro + endif +endif + +BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) + +BUILDTREE = \ + $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS) + +BUILDTREE_COMMENT = echo "\# Generated by $(BUILDTREE_MAKE)" + +all: $(SUBMAKE_DIRS) + +# Run make in each subdirectory recursively. +$(SUBMAKE_DIRS): $(SIMPLE_DIRS) FORCE + $(QUIETLY) [ -d $@ ] || { mkdir -p $@; } + $(QUIETLY) cd $@ && $(BUILDTREE) TARGET=$(@F) + $(QUIETLY) touch $@ + +$(SIMPLE_DIRS): + $(QUIETLY) mkdir -p $@ + +flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo "Platform_file = $(PLATFORM_FILE)" | sed 's|$(GAMMADIR)|$$(GAMMADIR)|'; \ + sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \ + echo; \ + echo "GAMMADIR = $(GAMMADIR)"; \ + echo "SYSDEFS = \$$(Platform_sysdefs)"; \ + echo "SRCARCH = $(ARCH)"; \ + echo "BUILDARCH = $(BUILDARCH)"; \ + echo "LIBARCH = $(LIBARCH)"; \ + echo "TARGET = $(TARGET)"; \ + echo "HS_BUILD_VER = $(HS_BUILD_VER)"; \ + echo "JRE_RELEASE_VER = $(JRE_RELEASE_VERSION)"; \ + echo "SA_BUILD_VERSION = $(HS_BUILD_VER)"; \ + echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \ + echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \ + echo; \ + echo "Src_Dirs = \\"; \ + sed 's/$$/ \\/;s|$(GAMMADIR)|$$(GAMMADIR)|' ../shared_dirs.lst; \ + echo "\$$(GAMMADIR)/src/cpu/$(ARCH)/vm \\"; \ + echo "\$$(GAMMADIR)/src/os/$(OS_FAMILY)/vm \\"; \ + echo "\$$(GAMMADIR)/src/os_cpu/$(OS_FAMILY)_$(ARCH)/vm"; \ + [ -n "$(CFLAGS_BROWSE)" ] && \ + echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \ + [ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \ + echo && \ + echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ + echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \ + ) > $@ + +flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + [ "$(TARGET)" = profiled ] && \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ + ) > $@ + +../shared_dirs.lst: $(BUILDTREE_MAKE) $(GAMMADIR)/src/share/vm + @echo Creating directory list $@ + $(QUIETLY) find $(GAMMADIR)/src/share/vm/* -prune \ + -type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \ + \( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; > $@ + +Makefile: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/top.make"; \ + ) > $@ + +vm.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo include flags_vm.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +adlc.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +jvmti.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +sa.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +env.sh: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + [ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \ + { \ + echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \ + echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ + } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ + echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ + echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \ + ) > $@ + +env.csh: env.sh + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + [ -n "$$JAVA_HOME" ] && \ + { echo "if (! \$$?JAVA_HOME) setenv JAVA_HOME \"$$JAVA_HOME\""; }; \ + sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \ + ) > $@ + +.dbxrc: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + echo "echo '# Loading $(PLATFORM_DIR)/$(TARGET)/.dbxrc'"; \ + echo "if [ -f \"\$${HOTSPOT_DBXWARE}\" ]"; \ + echo "then"; \ + echo " source \"\$${HOTSPOT_DBXWARE}\""; \ + echo "elif [ -f \"\$$HOME/.dbxrc\" ]"; \ + echo "then"; \ + echo " source \"\$$HOME/.dbxrc\""; \ + echo "fi"; \ + ) > $@ + +# Skip the test for product builds (which only work when installed in a JDK), to +# avoid exiting with an error and causing make to halt. +NO_TEST_MSG = \ + echo "$@: skipping the test--this build must be tested in a JDK." + +NO_JAVA_HOME_MSG = \ + echo "JAVA_HOME must be set to run this test." + +DATA_MODE = $(DATA_MODE/$(BUILDARCH)) +JAVA_FLAG = $(JAVA_FLAG/$(DATA_MODE)) + +DATA_MODE/i486 = 32 +DATA_MODE/sparc = 32 +DATA_MODE/sparcv9 = 64 +DATA_MODE/amd64 = 64 +DATA_MODE/ia64 = 64 + +JAVA_FLAG/32 = -d32 +JAVA_FLAG/64 = -d64 + +WRONG_DATA_MODE_MSG = \ + echo "JAVA_HOME must point to $(DATA_MODE)bit JDK." + +test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java + @echo Creating $@ ... + $(QUIETLY) ( \ + echo '#!/bin/sh'; \ + $(BUILDTREE_COMMENT); \ + echo '. ./env.sh'; \ + echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \ + echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \ + echo "then"; \ + echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \ + echo "fi"; \ + echo "rm -f Queens.class"; \ + echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \ + echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \ + echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \ + ) > $@ + $(QUIETLY) chmod +x $@ + +include $(GAMMADIR)/build/linux/makefiles/rules.make + +Queens.class: $(GAMMADIR)/build/test/Queens.java + $(RM) -f $@ + $(REMOTE) $(COMPILE.JAVAC) -d . $< + +FORCE: + +.PHONY: all FORCE --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/compiler1.make 2009-08-01 04:16:14.667179967 +0100 @@ -0,0 +1,31 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making client version of VM + +TYPE=COMPILER1 + +VM_SUBDIR = client + +CFLAGS += -DCOMPILER1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/compiler2.make 2009-08-01 04:16:15.076227129 +0100 @@ -0,0 +1,31 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making server version of VM + +TYPE=COMPILER2 + +VM_SUBDIR = server + +CFLAGS += -DCOMPILER2 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/core.make 2009-08-01 04:16:15.485310812 +0100 @@ -0,0 +1,34 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making core version of VM + +# Note the effect on includeDB lists in top.make: +# includeDB_compiler* and ad_.*pp are excluded from the build, +TYPE=CORE + +# There is no "core" directory in JDK. Install core build in server directory. +VM_SUBDIR = server + +# Note: macros.hpp defines CORE --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/cscope.make 2009-08-01 04:16:15.893222786 +0100 @@ -0,0 +1,162 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# +# @(#)cscope.make 1.7 07/05/05 17:03:53 +# +# The cscope.out file is made in the current directory and spans the entire +# source tree. +# +# Things to note: +# 1. We use relative names for cscope. +# 2. We *don't* remove the old cscope.out file, because cscope is smart +# enough to only build what has changed. It can be confused, however, +# if files are renamed or removed, so it may be necessary to manually +# remove cscope.out if a lot of reorganization has occurred. +# + +include $(GAMMADIR)/make/scm.make + +NAWK = awk +RM = rm -f +HG = hg +CS_TOP = ../.. + +CSDIRS = $(CS_TOP)/src $(CS_TOP)/build +CSINCS = $(CSDIRS:%=-I%) + +CSCOPE = cscope +CSCOPE_FLAGS = -b + +# Allow .java files to be added from the environment (CSCLASSES=yes). +ifdef CSCLASSES +ADDCLASSES= -o -name '*.java' +endif + +# Adding CClassHeaders also pushes the file count of a full workspace up about +# 200 files (these files also don't exist in a new workspace, and thus will +# cause the recreation of the database as they get created, which might seem +# a little confusing). Thus allow these files to be added from the environment +# (CSHEADERS=yes). +ifndef CSHEADERS +RMCCHEADERS= -o -name CClassHeaders +endif + +# Use CS_GENERATED=x to include auto-generated files in the build directories. +ifdef CS_GENERATED +CS_ADD_GENERATED = -o -name '*.incl' +else +CS_PRUNE_GENERATED = -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?' +endif + +# OS-specific files for other systems are excluded by default. Use CS_OS=yes +# to include platform-specific files for other platforms. +ifndef CS_OS +CS_OS = linux macos solaris win32 +CS_PRUNE_OS = $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS})) +endif + +# Processor-specific files for other processors are excluded by default. Use +# CS_CPU=x to include platform-specific files for other platforms. +ifndef CS_CPU +CS_CPU = i486 sparc amd64 ia64 +CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU})) +endif + +# What files should we include? A simple rule might be just those files under +# SCCS control, however this would miss files we create like the opcodes and +# CClassHeaders. The following attempts to find everything that is *useful*. +# (.del files are created by sccsrm, demo directories contain many .java files +# that probably aren't useful for development, and the pkgarchive may contain +# duplicates of files within the source hierarchy). + +# Directories to exclude. +CS_PRUNE_STD = $(SCM_DIRS) \ + -o -name '.del-*' \ + -o -name '*demo' \ + -o -name pkgarchive + +CS_PRUNE = $(CS_PRUNE_STD) \ + $(CS_PRUNE_OS) \ + $(CS_PRUNE_CPU) \ + $(CS_PRUNE_GENERATED) \ + $(RMCCHEADERS) + +# File names to include. +CSFILENAMES = -name '*.[ch]pp' \ + -o -name '*.[Ccshlxy]' \ + $(CS_ADD_GENERATED) \ + -o -name '*.il' \ + -o -name '*.cc' \ + -o -name '*[Mm]akefile*' \ + -o -name '*.gmk' \ + -o -name '*.make' \ + -o -name '*.ad' \ + $(ADDCLASSES) + +.PRECIOUS: cscope.out + +cscope cscope.out: cscope.files FORCE + $(CSCOPE) $(CSCOPE_FLAGS) + +# The .raw file is reordered here in an attempt to make cscope display the most +# relevant files first. +cscope.files: .cscope.files.raw + echo "$(CSINCS)" > $@ + -egrep -v "\.java|\/make\/" $< >> $@ + -fgrep ".java" $< >> $@ + -fgrep "/make/" $< >> $@ + +.cscope.files.raw: .nametable.files + -find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \ + -type f \( $(CSFILENAMES) \) -print > $@ + +cscope.clean: nametable.clean + -$(RM) cscope.out cscope.files .cscope.files.raw + +TAGS: cscope.files FORCE + egrep -v '^-|^$$' $< | etags --members - + +TAGS.clean: nametable.clean + -$(RM) TAGS + +# .nametable.files and .nametable.files.tmp are used to determine if any files +# were added to/deleted from/renamed in the workspace. If not, then there's +# normally no need to rebuild the cscope database. To force a rebuild of +# the cscope database: gmake nametable.clean. +.nametable.files: .nametable.files.tmp + ( cmp -s $@ $< ) || ( cp $< $@ ) + -$(RM) $< + +# `hg status' is slightly faster than `hg fstatus'. Both are +# quite a bit slower on an NFS mounted file system, so this is +# really geared towards repos on local file systems. +.nametable.files.tmp: + -$(HG) fstatus -acmn > $@ +nametable.clean: + -$(RM) .nametable.files .nametable.files.tmp + +FORCE: + +.PHONY: cscope cscope.clean TAGS.clean nametable.clean FORCE --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/debug.make 2009-08-01 04:16:16.329456971 +0100 @@ -0,0 +1,44 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making debug version of VM + +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) +DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) +CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# Linker mapfile +MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug + +_JUNK_ := $(shell echo -e >&2 ""\ + "----------------------------------------------------------------------\n" \ + "WARNING: 'make debug' is deprecated. It will be removed in the future.\n" \ + "Please use 'make jvmg' to build debug JVM. \n" \ + "----------------------------------------------------------------------\n") + +G_SUFFIX = +VERSION = debug +SYSDEFS += -DASSERT -DDEBUG +PICFLAGS = DEFAULT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/defs.make 2009-08-01 04:16:16.763342544 +0100 @@ -0,0 +1,113 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# The common definitions for hotspot linux builds. +# Include the top level defs.make under make directory instead of this one. +# This file is included into make/defs.make. + +SLASH_JAVA ?= /java + +# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name +ARCH:=$(shell uname -m) +PATH_SEP = : +ifeq ($(LP64), 1) + ARCH_DATA_MODEL ?= 64 +else + ARCH_DATA_MODEL ?= 32 +endif + +# ia64 +ifeq ($(ARCH), ia64) + ARCH_DATA_MODEL = 64 + MAKE_ARGS += LP64=1 + PLATFORM = linux-ia64 + VM_PLATFORM = linux_ia64 + HS_ARCH = ia64 +endif + +# sparc +ifeq ($(ARCH), sparc64) + ifeq ($(ARCH_DATA_MODEL), 64) + ARCH_DATA_MODEL = 64 + MAKE_ARGS += LP64=1 + PLATFORM = linux-sparcv9 + VM_PLATFORM = linux_sparcv9 + else + ARCH_DATA_MODEL = 32 + PLATFORM = linux-sparc + VM_PLATFORM = linux_sparc + endif + HS_ARCH = sparc +endif + +# x86_64 +ifeq ($(ARCH), x86_64) + ifeq ($(ARCH_DATA_MODEL), 64) + ARCH_DATA_MODEL = 64 + MAKE_ARGS += LP64=1 + PLATFORM = linux-amd64 + VM_PLATFORM = linux_amd64 + HS_ARCH = x86 + else + ARCH_DATA_MODEL = 32 + PLATFORM = linux-i586 + VM_PLATFORM = linux_i486 + HS_ARCH = x86 + # We have to reset ARCH to i686 since SRCARCH relies on it + ARCH = i686 + endif +endif + +# i686 +ifeq ($(ARCH), i686) + ARCH_DATA_MODEL = 32 + PLATFORM = linux-i586 + VM_PLATFORM = linux_i486 + HS_ARCH = x86 +endif + +JDK_INCLUDE_SUBDIR=linux + +# FIXUP: The subdirectory for a debug build is NOT the same on all platforms +VM_DEBUG=jvmg + +EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html +EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server +EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt +EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjsig.so +EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so +ifeq ($(ARCH_DATA_MODEL), 32) + EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjsig.so + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so + EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so + EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar +else + ifeq ($(ARCH),ia64) + else + EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so + EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar + endif +endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/dtrace.make 2009-08-01 04:16:17.188669493 +0100 @@ -0,0 +1,27 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Linux does not build jvm_db +LIBJVM_DB = + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/fastdebug.make 2009-08-01 04:16:17.597837664 +0100 @@ -0,0 +1,64 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making debug version of VM + +# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make +OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) +OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) + +# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) + +ifeq ($(BUILDARCH), ia64) + # Bug in GCC, causes hang. -O1 will override the -O3 specified earlier + OPT_CFLAGS/callGenerator.o += -O1 + OPT_CFLAGS/ciTypeFlow.o += -O1 + OPT_CFLAGS/compile.o += -O1 + OPT_CFLAGS/concurrentMarkSweepGeneration.o += -O1 + OPT_CFLAGS/doCall.o += -O1 + OPT_CFLAGS/generateOopMap.o += -O1 + OPT_CFLAGS/generateOptoStub.o += -O1 + OPT_CFLAGS/graphKit.o += -O1 + OPT_CFLAGS/instanceKlass.o += -O1 + OPT_CFLAGS/interpreterRT_ia64.o += -O1 + OPT_CFLAGS/output.o += -O1 + OPT_CFLAGS/parse1.o += -O1 + OPT_CFLAGS/runtime.o += -O1 + OPT_CFLAGS/synchronizer.o += -O1 +endif + + +# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings +CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +# Linker mapfile +MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug + +G_SUFFIX = +VERSION = optimized +SYSDEFS += -DASSERT -DFASTDEBUG +PICFLAGS = DEFAULT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/gcc.make 2009-08-01 04:16:18.023662467 +0100 @@ -0,0 +1,171 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +#------------------------------------------------------------------------ +# CC, CPP & AS + +CPP = g++ +CC = gcc +AS = $(CC) -c + +# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only +# prints the numbers (e.g. "2.95", "3.2.1") +CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) +CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) + +# check for precompiled headers support +ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0" +USE_PRECOMPILED_HEADER=1 +PRECOMPILED_HEADER_DIR=. +PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/incls/_precompiled.incl.gch +endif + + +#------------------------------------------------------------------------ +# Compiler flags + +# position-independent code +PICFLAG = -fPIC + +VM_PICFLAG/LIBJVM = $(PICFLAG) +VM_PICFLAG/AOUT = +VM_PICFLAG = $(VM_PICFLAG/$(LINK_INTO)) + +CFLAGS += $(VM_PICFLAG) +CFLAGS += -fno-rtti +CFLAGS += -fno-exceptions +CFLAGS += -D_REENTRANT +CFLAGS += -fcheck-new + +ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) +ARCHFLAG/i486 = -m32 -march=i586 +ARCHFLAG/amd64 = -m64 +ARCHFLAG/ia64 = +ARCHFLAG/sparc = -m32 -mcpu=v9 +ARCHFLAG/sparcv9 = -m64 -mcpu=v9 + +CFLAGS += $(ARCHFLAG) +AOUT_FLAGS += $(ARCHFLAG) +LFLAGS += $(ARCHFLAG) +ASFLAGS += $(ARCHFLAG) + +# Use C++ Interpreter +ifdef CC_INTERP + CFLAGS += -DCC_INTERP +endif + +# Keep temporary files (.ii, .s) +ifdef NEED_ASM + CFLAGS += -save-temps +else + CFLAGS += -pipe +endif + +# Compiler warnings are treated as errors +WARNINGS_ARE_ERRORS = -Werror + +# Except for a few acceptable ones +# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit +# conversions which might affect the values. To avoid that, we need to turn +# it off explicitly. +ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" +ACCEPTABLE_WARNINGS = -Wpointer-arith -Wsign-compare +else +ACCEPTABLE_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare +endif + +CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ACCEPTABLE_WARNINGS) +# Special cases +CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) + +# The flags to use for an Optimized g++ build +OPT_CFLAGS += -O3 + +# Hotspot uses very unstrict aliasing turn this optimization off +OPT_CFLAGS += -fno-strict-aliasing + +# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp +# if we use expensive-optimizations +ifeq ($(BUILDARCH), ia64) +OPT_CFLAGS += -fno-expensive-optimizations +endif + +OPT_CFLAGS/NOOPT=-O0 + +# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. +ifneq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) = 3 \) \))" "0" +OPT_CFLAGS/mulnode.o += -O0 +endif + +#------------------------------------------------------------------------ +# Linker flags + +# statically link libstdc++.so, work with gcc but ignored by g++ +STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic + +# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. +ifneq ("${CC_VER_MAJOR}", "2") +STATIC_LIBGCC += -static-libgcc +endif + +ifeq ($(BUILDARCH), ia64) +LFLAGS += -Wl,-relax +endif + +# Enable linker optimization +LFLAGS += -Xlinker -O1 + +# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. +MAPFLAG = -Xlinker --version-script=FILENAME + +# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj +SONAMEFLAG = -Xlinker -soname=SONAME + +# Build shared library +SHARED_FLAG = -shared + +# Keep symbols even they are not used +AOUT_FLAGS += -export-dynamic + +#------------------------------------------------------------------------ +# Debug flags + +# Use the stabs format for debugging information (this is the default +# on gcc-2.91). It's good enough, has all the information about line +# numbers and local variables, and libjvm_g.so is only about 16M. +# Change this back to "-g" if you want the most expressive format. +# (warning: that could easily inflate libjvm_g.so to 150M!) +# Note: The Itanium gcc compiler crashes when using -gstabs. +DEBUG_CFLAGS/ia64 = -g +DEBUG_CFLAGS/amd64 = -g +DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) +ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) +DEBUG_CFLAGS += -gstabs +endif + +# DEBUG_BINARIES overrides everything, use full -g debug information +ifeq ($(DEBUG_BINARIES), true) + DEBUG_CFLAGS = -g + CFLAGS += $(DEBUG_CFLAGS) +endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/hp.make 2009-08-01 04:16:18.474283344 +0100 @@ -0,0 +1,29 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making premium version of VM + +TYPE=HP + +CFLAGS += -DCOMPILER2 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/hp1.make 2009-08-01 04:16:18.887081135 +0100 @@ -0,0 +1,29 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making premium version of VM + +TYPE=HP1 + +CFLAGS += -DCOMPILER1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/i486.make 2009-08-01 04:16:19.304205890 +0100 @@ -0,0 +1,36 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# TLS helper, assembled from .s file +# Not included in includeDB because it has no dependencies +Obj_Files += linux_x86_32.o + +# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized +OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT) +# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized +OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT) +# Must also specify if CPU is little endian +CFLAGS += -DVM_LITTLE_ENDIAN + +OPT_CFLAGS/compactingPermGenGen.o = -O1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/ia64.make 2009-08-01 04:16:19.746505809 +0100 @@ -0,0 +1,43 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# +# IA64 only uses c++ based interpreter +CFLAGS += -DCC_INTERP -D_LP64=1 -DVM_LITTLE_ENDIAN +# Hotspot uses very unstrict aliasing turn this optimization off +OPT_CFLAGS += -fno-strict-aliasing +ifeq ($(VERSION),debug) +ASM_FLAGS= -DDEBUG +else +ASM_FLAGS= +endif +# workaround gcc bug in compiling varargs +OPT_CFLAGS/jni.o = -O0 + +# gcc/ia64 has a bug that internal gcc functions linked with libjvm.so +# are made public. Hiding those symbols will cause undefined symbol error +# when VM is dropped into older JDK. We probably will need an IA64 +# mapfile to include those symbols as a workaround. Disable linker mapfile +# for now. +LDNOMAP=true --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/jsig.make 2009-08-01 04:16:20.163327890 +0100 @@ -0,0 +1,58 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build signal interposition library, used by vm.make + +# libjsig[_g].so: signal interposition library +JSIG = jsig$(G_SUFFIX) +LIBJSIG = lib$(JSIG).so + +JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm + +DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG) + +LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig + +# On Linux we really dont want a mapfile, as this library is small +# and preloaded using LD_PRELOAD, making functions private will +# cause problems with interposing. See CR: 6466665 +# LFLAGS_JSIG += $(MAPFLAG:FILENAME=$(LIBJSIG_MAPFILE)) + +LFLAGS_JSIG += -D_GNU_SOURCE -D_REENTRANT + +# DEBUG_BINARIES overrides everything, use full -g debug information +ifeq ($(DEBUG_BINARIES), true) + JSIG_DEBUG_CFLAGS = -g +endif + +$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE) + @echo Making signal interposition lib... + $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ + $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl + +install_jsig: $(LIBJSIG) + @echo "Copying $(LIBJSIG) to $(DEST_JSIG)" + $(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done" + +.PHONY: install_jsig --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/jvmg.make 2009-08-01 04:16:20.605615911 +0100 @@ -0,0 +1,41 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making debug version of VM + +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) +DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) +CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +# Linker mapfile +MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug + +G_SUFFIX = +VERSION = debug +SYSDEFS += -DASSERT -DDEBUG +PICFLAGS = DEFAULT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/jvmti.make 2009-08-01 04:16:21.031202067 +0100 @@ -0,0 +1,118 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile (jvmti.make) is included from the jvmti.make in the +# build directories. +# +# It knows how to build and run the tools to generate jvmti. + +include $(GAMMADIR)/make/linux/makefiles/rules.make + +# ######################################################################### + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles + +JvmtiSrcDir = $(GAMMADIR)/src/share/vm/prims +InterpreterSrcDir = $(GAMMADIR)/src/share/vm/interpreter +Src_Dirs += $(JvmtiSrcDir) + +# set VPATH so make knows where to look for source files +Src_Dirs_V = ${Src_Dirs} +VPATH += $(Src_Dirs_V:%=%:) + +JvmtiGeneratedNames = \ + jvmtiEnv.hpp \ + jvmtiEnter.cpp \ + jvmtiEnterTrace.cpp \ + jvmtiEnvRecommended.cpp \ + bytecodeInterpreterWithChecks.cpp \ + jvmti.h \ + +JvmtiEnvFillSource = $(JvmtiSrcDir)/jvmtiEnvFill.java +JvmtiEnvFillClass = $(JvmtiOutDir)/jvmtiEnvFill.class + +JvmtiGenSource = $(JvmtiSrcDir)/jvmtiGen.java +JvmtiGenClass = $(JvmtiOutDir)/jvmtiGen.class + +JvmtiGeneratedFiles = $(JvmtiGeneratedNames:%=$(JvmtiOutDir)/%) + +XSLT = $(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +.PHONY: all jvmtidocs clean cleanall + +# ######################################################################### + +all: $(JvmtiGeneratedFiles) + +both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl + +$(JvmtiGenClass): $(JvmtiGenSource) + $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiGenSource) + +$(JvmtiEnvFillClass): $(JvmtiEnvFillSource) + $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiEnvFillSource) + +$(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnter.cpp -PARAM interface jvmti + +$(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp: $(JvmtiGenClass) $(InterpreterSrcDir)/bytecodeInterpreter.cpp $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl + @echo Generating $@ + $(XSLT) -IN $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml -XSL $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl -OUT $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp + +$(JvmtiOutDir)/jvmtiEnterTrace.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnterTrace.cpp -PARAM interface jvmti -PARAM trace Trace + +$(JvmtiOutDir)/jvmtiEnvRecommended.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnv.xsl $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiEnvFillClass) + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnv.xsl -OUT $(JvmtiOutDir)/jvmtiEnvStub.cpp + $(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiEnvFill $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiOutDir)/jvmtiEnvStub.cpp $(JvmtiOutDir)/jvmtiEnvRecommended.cpp + +$(JvmtiOutDir)/jvmtiEnv.hpp: $(both) $(JvmtiSrcDir)/jvmtiHpp.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiHpp.xsl -OUT $(JvmtiOutDir)/jvmtiEnv.hpp + +$(JvmtiOutDir)/jvmti.h: $(both) $(JvmtiSrcDir)/jvmtiH.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiH.xsl -OUT $(JvmtiOutDir)/jvmti.h + +jvmtidocs: $(JvmtiOutDir)/jvmti.html + +$(JvmtiOutDir)/jvmti.html: $(both) $(JvmtiSrcDir)/jvmti.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmti.xsl -OUT $(JvmtiOutDir)/jvmti.html + +# ######################################################################### + +clean : + rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles) + +cleanall : + rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles) + +# ######################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/launcher.make 2009-08-01 04:16:21.465772664 +0100 @@ -0,0 +1,73 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build gamma launcher, used by vm.make + +# gamma[_g]: launcher +LAUNCHER = gamma$(G_SUFFIX) + +LAUNCHERDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher +LAUNCHERFLAGS = $(ARCHFLAG) \ + -I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \ + -DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ + -DARCH=\"$(LIBARCH)\" \ + -DGAMMA \ + -DLAUNCHER_TYPE=\"gamma\" \ + -DLINK_INTO_$(LINK_INTO) + +ifeq ($(LINK_INTO),AOUT) + LAUNCHER.o = launcher.o $(JVM_OBJ_FILES) + LAUNCHER_MAPFILE = mapfile_reorder + LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE)) + LFLAGS_LAUNCHER += $(SONAMEFLAG:SONAME=$(LIBJVM)) $(STATIC_LIBGCC) + LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS) +else + LAUNCHER.o = launcher.o + LFLAGS_LAUNCHER += -L `pwd` + LIBS_LAUNCHER += -l$(JVM) $(LIBS) +endif + +LINK_LAUNCHER = $(LINK.c) + +LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CC/PRE_HOOK) +LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK) + +launcher.o: launcher.c $(LAUNCHERDIR)/java.c $(LAUNCHERDIR)/java_md.c + $(CC) -g -c -o $@ launcher.c $(LAUNCHERFLAGS) $(CPPFLAGS) + +launcher.c: + @echo Generating $@ + $(QUIETLY) { \ + echo '#define debug launcher_debug'; \ + echo '#include "java.c"'; \ + echo '#include "java_md.c"'; \ + } > $@ + +$(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE) + $(QUIETLY) { \ + echo Linking launcher...; \ + $(LINK_LAUNCHER/PRE_HOOK) \ + $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \ + $(LINK_LAUNCHER/POST_HOOK) \ + } --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/makedeps.make 2009-08-01 04:16:21.907718315 +0100 @@ -0,0 +1,43 @@ +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +include $(GAMMADIR)/make/linux/makefiles/rules.make + +COMPILE.JAVAC.FLAGS += -d $(OUTDIR) + +MakeDepsSources=\ + $(GAMMADIR)/src/share/tools/MakeDeps/Database.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/DirectoryTree.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/DirectoryTreeNode.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/FileFormatException.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/FileList.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/FileName.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/Macro.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/MacroDefinitions.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/MakeDeps.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/MetroWerksMacPlatform.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/Platform.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/UnixPlatform.java + +MakeDepsOptions= --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/mapfile-vers-debug 2009-08-01 04:16:22.325582053 +0100 @@ -0,0 +1,295 @@ +# +# @(#)mapfile-vers-debug 1.18 07/10/25 16:47:35 +# + +# +# Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # JNI + JNI_CreateJavaVM; + JNI_GetCreatedJavaVMs; + JNI_GetDefaultJavaVMInitArgs; + + # JVM + JVM_Accept; + JVM_ActiveProcessorCount; + JVM_AllocateNewArray; + JVM_AllocateNewObject; + JVM_ArrayCopy; + JVM_AssertionStatusDirectives; + JVM_Available; + JVM_Bind; + JVM_ClassDepth; + JVM_ClassLoaderDepth; + JVM_Clone; + JVM_Close; + JVM_CX8Field; + JVM_CompileClass; + JVM_CompileClasses; + JVM_CompilerCommand; + JVM_Connect; + JVM_ConstantPoolGetClassAt; + JVM_ConstantPoolGetClassAtIfLoaded; + JVM_ConstantPoolGetDoubleAt; + JVM_ConstantPoolGetFieldAt; + JVM_ConstantPoolGetFieldAtIfLoaded; + JVM_ConstantPoolGetFloatAt; + JVM_ConstantPoolGetIntAt; + JVM_ConstantPoolGetLongAt; + JVM_ConstantPoolGetMethodAt; + JVM_ConstantPoolGetMethodAtIfLoaded; + JVM_ConstantPoolGetMemberRefInfoAt; + JVM_ConstantPoolGetSize; + JVM_ConstantPoolGetStringAt; + JVM_ConstantPoolGetUTF8At; + JVM_CountStackFrames; + JVM_CurrentClassLoader; + JVM_CurrentLoadedClass; + JVM_CurrentThread; + JVM_CurrentTimeMillis; + JVM_DefineClass; + JVM_DefineClassWithSource; + JVM_DesiredAssertionStatus; + JVM_DisableCompiler; + JVM_DoPrivileged; + JVM_DTraceGetVersion; + JVM_DTraceActivate; + JVM_DTraceIsProbeEnabled; + JVM_DTraceIsSupported; + JVM_DTraceDispose; + JVM_DumpAllStacks; + JVM_DumpThreads; + JVM_EnableCompiler; + JVM_Exit; + JVM_FillInStackTrace; + JVM_FindClassFromClass; + JVM_FindClassFromClassLoader; + JVM_FindClassFromBootLoader; + JVM_FindLibraryEntry; + JVM_FindLoadedClass; + JVM_FindPrimitiveClass; + JVM_FindSignal; + JVM_FreeMemory; + JVM_GC; + JVM_GetAllThreads; + JVM_GetArrayElement; + JVM_GetArrayLength; + JVM_GetCPClassNameUTF; + JVM_GetCPFieldClassNameUTF; + JVM_GetCPFieldModifiers; + JVM_GetCPFieldNameUTF; + JVM_GetCPFieldSignatureUTF; + JVM_GetCPMethodClassNameUTF; + JVM_GetCPMethodModifiers; + JVM_GetCPMethodNameUTF; + JVM_GetCPMethodSignatureUTF; + JVM_GetCallerClass; + JVM_GetClassAccessFlags; + JVM_GetClassAnnotations; + JVM_GetClassCPEntriesCount; + JVM_GetClassCPTypes; + JVM_GetClassConstantPool; + JVM_GetClassContext; + JVM_GetClassDeclaredConstructors; + JVM_GetClassDeclaredFields; + JVM_GetClassDeclaredMethods; + JVM_GetClassFieldsCount; + JVM_GetClassInterfaces; + JVM_GetClassLoader; + JVM_GetClassMethodsCount; + JVM_GetClassModifiers; + JVM_GetClassName; + JVM_GetClassNameUTF; + JVM_GetClassSignature; + JVM_GetClassSigners; + JVM_GetComponentType; + JVM_GetDeclaredClasses; + JVM_GetDeclaringClass; + JVM_GetEnclosingMethodInfo; + JVM_GetFieldAnnotations; + JVM_GetFieldIxModifiers; + JVM_GetHostName; + JVM_GetInheritedAccessControlContext; + JVM_GetInterfaceVersion; + JVM_GetLastErrorString; + JVM_GetManagement; + JVM_GetMethodAnnotations; + JVM_GetMethodDefaultAnnotationValue; + JVM_GetMethodIxArgsSize; + JVM_GetMethodIxByteCode; + JVM_GetMethodIxByteCodeLength; + JVM_GetMethodIxExceptionIndexes; + JVM_GetMethodIxExceptionTableEntry; + JVM_GetMethodIxExceptionTableLength; + JVM_GetMethodIxExceptionsCount; + JVM_GetMethodIxLocalsCount; + JVM_GetMethodIxMaxStack; + JVM_GetMethodIxModifiers; + JVM_GetMethodIxNameUTF; + JVM_GetMethodIxSignatureUTF; + JVM_GetMethodParameterAnnotations; + JVM_GetPrimitiveArrayElement; + JVM_GetProtectionDomain; + JVM_GetSockName; + JVM_GetSockOpt; + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetThreadStateNames; + JVM_GetThreadStateValues; + JVM_GetVersionInfo; + JVM_Halt; + JVM_HoldsLock; + JVM_IHashCode; + JVM_InitAgentProperties; + JVM_InitProperties; + JVM_InitializeCompiler; + JVM_InitializeSocketLibrary; + JVM_InternString; + JVM_Interrupt; + JVM_InvokeMethod; + JVM_IsArrayClass; + JVM_IsConstructorIx; + JVM_IsInterface; + JVM_IsInterrupted; + JVM_IsNaN; + JVM_IsPrimitiveClass; + JVM_IsSameClassPackage; + JVM_IsSilentCompiler; + JVM_IsSupportedJNIVersion; + JVM_IsThreadAlive; + JVM_LatestUserDefinedLoader; + JVM_Listen; + JVM_LoadClass0; + JVM_LoadLibrary; + JVM_Lseek; + JVM_MaxObjectInspectionAge; + JVM_MaxMemory; + JVM_MonitorNotify; + JVM_MonitorNotifyAll; + JVM_MonitorWait; + JVM_NanoTime; + JVM_NativePath; + JVM_NewArray; + JVM_NewInstanceFromConstructor; + JVM_NewMultiArray; + JVM_OnExit; + JVM_Open; + JVM_PrintStackTrace; + JVM_RaiseSignal; + JVM_RawMonitorCreate; + JVM_RawMonitorDestroy; + JVM_RawMonitorEnter; + JVM_RawMonitorExit; + JVM_Read; + JVM_Recv; + JVM_RecvFrom; + JVM_RegisterSignal; + JVM_ReleaseUTF; + JVM_ResolveClass; + JVM_ResumeThread; + JVM_Send; + JVM_SendTo; + JVM_SetArrayElement; + JVM_SetClassSigners; + JVM_SetLength; + JVM_SetPrimitiveArrayElement; + JVM_SetProtectionDomain; + JVM_SetSockOpt; + JVM_SetThreadPriority; + JVM_Sleep; + JVM_Socket; + JVM_SocketAvailable; + JVM_SocketClose; + JVM_SocketShutdown; + JVM_StartThread; + JVM_StopThread; + JVM_SuspendThread; + JVM_SupportsCX8; + JVM_Sync; + JVM_Timeout; + JVM_TotalMemory; + JVM_TraceInstructions; + JVM_TraceMethodCalls; + JVM_UnloadLibrary; + JVM_Write; + JVM_Yield; + JVM_handle_linux_signal; + + # Old reflection routines + # These do not need to be present in the product build in JDK 1.4 + # but their code has not been removed yet because there will not + # be a substantial code savings until JVM_InvokeMethod and + # JVM_NewInstanceFromConstructor can also be removed; see + # reflectionCompat.hpp. + JVM_GetClassConstructor; + JVM_GetClassConstructors; + JVM_GetClassField; + JVM_GetClassFields; + JVM_GetClassMethod; + JVM_GetClassMethods; + JVM_GetField; + JVM_GetPrimitiveField; + JVM_NewInstance; + JVM_SetField; + JVM_SetPrimitiveField; + + # Needed for dropping VM into JDK 1.3.x, 1.4 + _JVM_native_threads; + jdk_sem_init; + jdk_sem_post; + jdk_sem_wait; + jdk_pthread_sigmask; + jdk_waitpid; + + # debug JVM + JVM_AccessVMBooleanFlag; + JVM_AccessVMIntFlag; + JVM_VMBreakPoint; + + # miscellaneous functions + jio_fprintf; + jio_printf; + jio_snprintf; + jio_vfprintf; + jio_vsnprintf; + fork1; + numa_warn; + numa_error; + + # Needed because there is no JVM interface for this. + sysThreadAvailableStackWithSlack; + + # This is for Forte Analyzer profiling support. + AsyncGetCallTrace; + local: + *; +}; + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/mapfile-vers-jsig 2009-08-01 04:16:22.767291118 +0100 @@ -0,0 +1,42 @@ +# +# @(#)mapfile-vers-jsig 1.4 07/05/05 17:03:55 +# + +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define library interface. + +SUNWprivate_1.1 { + global: + JVM_begin_signal_setting; + JVM_end_signal_setting; + JVM_get_libjsig_version; + JVM_get_signal_action; + sigaction; + signal; + sigset; + local: + *; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/mapfile-vers-product 2009-08-01 04:16:23.176650362 +0100 @@ -0,0 +1,290 @@ +# +# @(#)mapfile-vers-product 1.19 08/02/12 10:56:37 +# + +# +# Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # JNI + JNI_CreateJavaVM; + JNI_GetCreatedJavaVMs; + JNI_GetDefaultJavaVMInitArgs; + + # JVM + JVM_Accept; + JVM_ActiveProcessorCount; + JVM_AllocateNewArray; + JVM_AllocateNewObject; + JVM_ArrayCopy; + JVM_AssertionStatusDirectives; + JVM_Available; + JVM_Bind; + JVM_ClassDepth; + JVM_ClassLoaderDepth; + JVM_Clone; + JVM_Close; + JVM_CX8Field; + JVM_CompileClass; + JVM_CompileClasses; + JVM_CompilerCommand; + JVM_Connect; + JVM_ConstantPoolGetClassAt; + JVM_ConstantPoolGetClassAtIfLoaded; + JVM_ConstantPoolGetDoubleAt; + JVM_ConstantPoolGetFieldAt; + JVM_ConstantPoolGetFieldAtIfLoaded; + JVM_ConstantPoolGetFloatAt; + JVM_ConstantPoolGetIntAt; + JVM_ConstantPoolGetLongAt; + JVM_ConstantPoolGetMethodAt; + JVM_ConstantPoolGetMethodAtIfLoaded; + JVM_ConstantPoolGetMemberRefInfoAt; + JVM_ConstantPoolGetSize; + JVM_ConstantPoolGetStringAt; + JVM_ConstantPoolGetUTF8At; + JVM_CountStackFrames; + JVM_CurrentClassLoader; + JVM_CurrentLoadedClass; + JVM_CurrentThread; + JVM_CurrentTimeMillis; + JVM_DefineClass; + JVM_DefineClassWithSource; + JVM_DesiredAssertionStatus; + JVM_DisableCompiler; + JVM_DoPrivileged; + JVM_DTraceGetVersion; + JVM_DTraceActivate; + JVM_DTraceIsProbeEnabled; + JVM_DTraceIsSupported; + JVM_DTraceDispose; + JVM_DumpAllStacks; + JVM_DumpThreads; + JVM_EnableCompiler; + JVM_Exit; + JVM_FillInStackTrace; + JVM_FindClassFromClass; + JVM_FindClassFromClassLoader; + JVM_FindClassFromBootLoader; + JVM_FindLibraryEntry; + JVM_FindLoadedClass; + JVM_FindPrimitiveClass; + JVM_FindSignal; + JVM_FreeMemory; + JVM_GC; + JVM_GetAllThreads; + JVM_GetArrayElement; + JVM_GetArrayLength; + JVM_GetCPClassNameUTF; + JVM_GetCPFieldClassNameUTF; + JVM_GetCPFieldModifiers; + JVM_GetCPFieldNameUTF; + JVM_GetCPFieldSignatureUTF; + JVM_GetCPMethodClassNameUTF; + JVM_GetCPMethodModifiers; + JVM_GetCPMethodNameUTF; + JVM_GetCPMethodSignatureUTF; + JVM_GetCallerClass; + JVM_GetClassAccessFlags; + JVM_GetClassAnnotations; + JVM_GetClassCPEntriesCount; + JVM_GetClassCPTypes; + JVM_GetClassConstantPool; + JVM_GetClassContext; + JVM_GetClassDeclaredConstructors; + JVM_GetClassDeclaredFields; + JVM_GetClassDeclaredMethods; + JVM_GetClassFieldsCount; + JVM_GetClassInterfaces; + JVM_GetClassLoader; + JVM_GetClassMethodsCount; + JVM_GetClassModifiers; + JVM_GetClassName; + JVM_GetClassNameUTF; + JVM_GetClassSignature; + JVM_GetClassSigners; + JVM_GetComponentType; + JVM_GetDeclaredClasses; + JVM_GetDeclaringClass; + JVM_GetEnclosingMethodInfo; + JVM_GetFieldAnnotations; + JVM_GetFieldIxModifiers; + JVM_GetHostName; + JVM_GetInheritedAccessControlContext; + JVM_GetInterfaceVersion; + JVM_GetLastErrorString; + JVM_GetManagement; + JVM_GetMethodAnnotations; + JVM_GetMethodDefaultAnnotationValue; + JVM_GetMethodIxArgsSize; + JVM_GetMethodIxByteCode; + JVM_GetMethodIxByteCodeLength; + JVM_GetMethodIxExceptionIndexes; + JVM_GetMethodIxExceptionTableEntry; + JVM_GetMethodIxExceptionTableLength; + JVM_GetMethodIxExceptionsCount; + JVM_GetMethodIxLocalsCount; + JVM_GetMethodIxMaxStack; + JVM_GetMethodIxModifiers; + JVM_GetMethodIxNameUTF; + JVM_GetMethodIxSignatureUTF; + JVM_GetMethodParameterAnnotations; + JVM_GetPrimitiveArrayElement; + JVM_GetProtectionDomain; + JVM_GetSockName; + JVM_GetSockOpt; + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetThreadStateNames; + JVM_GetThreadStateValues; + JVM_GetVersionInfo; + JVM_Halt; + JVM_HoldsLock; + JVM_IHashCode; + JVM_InitAgentProperties; + JVM_InitProperties; + JVM_InitializeCompiler; + JVM_InitializeSocketLibrary; + JVM_InternString; + JVM_Interrupt; + JVM_InvokeMethod; + JVM_IsArrayClass; + JVM_IsConstructorIx; + JVM_IsInterface; + JVM_IsInterrupted; + JVM_IsNaN; + JVM_IsPrimitiveClass; + JVM_IsSameClassPackage; + JVM_IsSilentCompiler; + JVM_IsSupportedJNIVersion; + JVM_IsThreadAlive; + JVM_LatestUserDefinedLoader; + JVM_Listen; + JVM_LoadClass0; + JVM_LoadLibrary; + JVM_Lseek; + JVM_MaxObjectInspectionAge; + JVM_MaxMemory; + JVM_MonitorNotify; + JVM_MonitorNotifyAll; + JVM_MonitorWait; + JVM_NanoTime; + JVM_NativePath; + JVM_NewArray; + JVM_NewInstanceFromConstructor; + JVM_NewMultiArray; + JVM_OnExit; + JVM_Open; + JVM_PrintStackTrace; + JVM_RaiseSignal; + JVM_RawMonitorCreate; + JVM_RawMonitorDestroy; + JVM_RawMonitorEnter; + JVM_RawMonitorExit; + JVM_Read; + JVM_Recv; + JVM_RecvFrom; + JVM_RegisterSignal; + JVM_ReleaseUTF; + JVM_ResolveClass; + JVM_ResumeThread; + JVM_Send; + JVM_SendTo; + JVM_SetArrayElement; + JVM_SetClassSigners; + JVM_SetLength; + JVM_SetPrimitiveArrayElement; + JVM_SetProtectionDomain; + JVM_SetSockOpt; + JVM_SetThreadPriority; + JVM_Sleep; + JVM_Socket; + JVM_SocketAvailable; + JVM_SocketClose; + JVM_SocketShutdown; + JVM_StartThread; + JVM_StopThread; + JVM_SuspendThread; + JVM_SupportsCX8; + JVM_Sync; + JVM_Timeout; + JVM_TotalMemory; + JVM_TraceInstructions; + JVM_TraceMethodCalls; + JVM_UnloadLibrary; + JVM_Write; + JVM_Yield; + JVM_handle_linux_signal; + + # Old reflection routines + # These do not need to be present in the product build in JDK 1.4 + # but their code has not been removed yet because there will not + # be a substantial code savings until JVM_InvokeMethod and + # JVM_NewInstanceFromConstructor can also be removed; see + # reflectionCompat.hpp. + JVM_GetClassConstructor; + JVM_GetClassConstructors; + JVM_GetClassField; + JVM_GetClassFields; + JVM_GetClassMethod; + JVM_GetClassMethods; + JVM_GetField; + JVM_GetPrimitiveField; + JVM_NewInstance; + JVM_SetField; + JVM_SetPrimitiveField; + + # Needed for dropping VM into JDK 1.3.x, 1.4 + _JVM_native_threads; + jdk_sem_init; + jdk_sem_post; + jdk_sem_wait; + jdk_pthread_sigmask; + jdk_waitpid; + + # miscellaneous functions + jio_fprintf; + jio_printf; + jio_snprintf; + jio_vfprintf; + jio_vsnprintf; + fork1; + numa_warn; + numa_error; + + # Needed because there is no JVM interface for this. + sysThreadAvailableStackWithSlack; + + # This is for Forte Analyzer profiling support. + AsyncGetCallTrace; + local: + *; +}; + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/optimized.make 2009-08-01 04:16:23.626659239 +0100 @@ -0,0 +1,44 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making optimized version of Gamma VM +# (This is the "product", not the "release" version.) + +# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make +OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) +OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) + +# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) + +# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings +CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +# Linker mapfile +MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug + +G_SUFFIX = +VERSION = optimized --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/product.make 2009-08-01 04:16:24.068709685 +0100 @@ -0,0 +1,54 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making optimized version of Gamma VM +# (This is the "product", not the "release" version.) + +# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make +OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) +OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) + +# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) + +# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings +CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +# Linker mapfile +MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-product + +G_SUFFIX = +SYSDEFS += -DPRODUCT +VERSION = optimized + +# use -g to strip library as -x will discard its symbol table; -x is fine for +# executables. +STRIP = strip +STRIP_LIBJVM = $(STRIP) -g $@ || exit 1; +STRIP_AOUT = $(STRIP) -x $@ || exit 1; + +# Don't strip in VM build; JDK build will strip libraries later +# LINK_LIB.CC/POST_HOOK += $(STRIP_$(LINK_INTO)) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/profiled.make 2009-08-01 04:16:24.510650176 +0100 @@ -0,0 +1,30 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making profiled version of Gamma VM +# (It is also optimized.) + +CFLAGS += -pg +AOUT_FLAGS += -pg +LDNOMAP = true --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/rules.make 2009-08-01 04:16:24.911435003 +0100 @@ -0,0 +1,198 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Common rules/macros for the vm, adlc. + +# Tell make that .cpp is important +.SUFFIXES: .cpp $(SUFFIXES) + +# For now. Other makefiles use CPP as the c++ compiler, but that should really +# name the preprocessor. +ifeq ($(CCC),) +CCC = $(CPP) +endif + +DEMANGLER = c++filt +DEMANGLE = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@ + +# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++). +C_COMPILE = $(CC) $(CPPFLAGS) $(CFLAGS) +CC_COMPILE = $(CCC) $(CPPFLAGS) $(CFLAGS) + +AS.S = $(AS) $(ASFLAGS) + +COMPILE.c = $(C_COMPILE) -c +GENASM.c = $(C_COMPILE) -S +LINK.c = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS) +LINK_LIB.c = $(CC) $(LFLAGS) $(SHARED_FLAG) +PREPROCESS.c = $(C_COMPILE) -E + +COMPILE.CC = $(CC_COMPILE) -c +GENASM.CC = $(CC_COMPILE) -S +LINK.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS) +LINK_NOPROF.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) +LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG) +PREPROCESS.CC = $(CC_COMPILE) -E + +# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k". +REMOVE_TARGET = rm -f $@ + +# Synonyms. +COMPILE.cpp = $(COMPILE.CC) +GENASM.cpp = $(GENASM.CC) +LINK.cpp = $(LINK.CC) +LINK_LIB.cpp = $(LINK_LIB.CC) +PREPROCESS.cpp = $(PREPROCESS.CC) + +# Note use of ALT_BOOTDIR to explicitly specify location of java and +# javac; this is the same environment variable used in the J2SE build +# process for overriding the default spec, which is BOOTDIR. +# Note also that we fall back to using JAVA_HOME if neither of these is +# specified. + +ifdef ALT_BOOTDIR + +RUN.JAVA = $(ALT_BOOTDIR)/bin/java +RUN.JAVAP = $(ALT_BOOTDIR)/bin/javap +RUN.JAVAH = $(ALT_BOOTDIR)/bin/javah +RUN.JAR = $(ALT_BOOTDIR)/bin/jar +COMPILE.JAVAC = $(ALT_BOOTDIR)/bin/javac +COMPILE.RMIC = $(ALT_BOOTDIR)/bin/rmic +BOOT_JAVA_HOME = $(ALT_BOOTDIR) + +else + +ifdef BOOTDIR + +RUN.JAVA = $(BOOTDIR)/bin/java +RUN.JAVAP = $(BOOTDIR)/bin/javap +RUN.JAVAH = $(BOOTDIR)/bin/javah +RUN.JAR = $(BOOTDIR)/bin/jar +COMPILE.JAVAC = $(BOOTDIR)/bin/javac +COMPILE.RMIC = $(BOOTDIR)/bin/rmic +BOOT_JAVA_HOME = $(BOOTDIR) + +else + +ifdef JAVA_HOME + +RUN.JAVA = $(JAVA_HOME)/bin/java +RUN.JAVAP = $(JAVA_HOME)/bin/javap +RUN.JAVAH = $(JAVA_HOME)/bin/javah +RUN.JAR = $(JAVA_HOME)/bin/jar +COMPILE.JAVAC = $(JAVA_HOME)/bin/javac +COMPILE.RMIC = $(JAVA_HOME)/bin/rmic +BOOT_JAVA_HOME = $(JAVA_HOME) + +else + +# take from the PATH, if ALT_BOOTDIR, BOOTDIR and JAVA_HOME are not defined +# note that this is to support hotspot build without SA. To build +# SA along with hotspot, you need to define ALT_BOOTDIR, BOOTDIR or JAVA_HOME + +RUN.JAVA = java +RUN.JAVAP = javap +RUN.JAVAH = javah +RUN.JAR = jar +COMPILE.JAVAC = javac +COMPILE.RMIC = rmic + +endif +endif +endif + +SUM = /usr/bin/sum + +# 'gmake MAKE_VERBOSE=y' gives all the gory details. +QUIETLY$(MAKE_VERBOSE) = @ +RUN.JAR$(MAKE_VERBOSE) += >/dev/null + +# With parallel makes, print a message at the end of compilation. +ifeq ($(findstring j,$(MFLAGS)),j) +COMPILE_DONE = && { echo Done with $<; } +endif + +# Include $(NONPIC_OBJ_FILES) definition +ifndef LP64 +include $(GAMMADIR)/make/pic.make +endif + +# The non-PIC object files are only generated for 32 bit platforms. +ifdef LP64 +%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) +else +%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \ + $(subst $(VM_PICFLAG), ,$(COMPILE.CC)) -o $@ $< $(COMPILE_DONE), \ + $(COMPILE.CC) -o $@ $< $(COMPILE_DONE)) +endif + +%.o: %.s + @echo Assembling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(AS.S) -o $@ $< $(COMPILE_DONE) + +%.s: %.cpp + @echo Generating assembly for $< + $(QUIETLY) $(GENASM.CC) -o $@ $< + $(QUIETLY) $(DEMANGLE) $(COMPILE_DONE) + +# Intermediate files (for debugging macros) +%.i: %.cpp + @echo Preprocessing $< to $@ + $(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE) + +# Override gnumake built-in rules which do sccs get operations badly. +# (They put the checked out code in the current directory, not in the +# directory of the original file.) Since this is a symptom of a teamware +# failure, and since not all problems can be detected by gnumake due +# to incomplete dependency checking... just complain and stop. +%:: s.% + @echo "=========================================================" + @echo File $@ + @echo is out of date with respect to its SCCS file. + @echo This file may be from an unresolved Teamware conflict. + @echo This is also a symptom of a Teamware bringover/putback failure + @echo in which SCCS files are updated but not checked out. + @echo Check for other out of date files in your workspace. + @echo "=========================================================" + @exit 666 + +%:: SCCS/s.% + @echo "=========================================================" + @echo File $@ + @echo is out of date with respect to its SCCS file. + @echo This file may be from an unresolved Teamware conflict. + @echo This is also a symptom of a Teamware bringover/putback failure + @echo in which SCCS files are updated but not checked out. + @echo Check for other out of date files in your workspace. + @echo "=========================================================" + @exit 666 + +.PHONY: default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/sa.make 2009-08-01 04:16:25.356040102 +0100 @@ -0,0 +1,97 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile (sa.make) is included from the sa.make in the +# build directories. + +# This makefile is used to build Serviceability Agent java code +# and generate JNI header file for native methods. + +include $(GAMMADIR)/make/linux/makefiles/rules.make + +AGENT_DIR = $(GAMMADIR)/agent + +include $(GAMMADIR)/make/sa.files + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated + +# tools.jar is needed by the JDI - SA binding +SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar + +# gnumake 3.78.1 does not accept the *s that +# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them +AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1)) +AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2)) + +SA_CLASSDIR = $(GENERATED)/saclasses + +SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)" + +SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties + +# if $(AGENT_DIR) does not exist, we don't build SA +# also, we don't build SA on Itanium. + +all: + if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" ] ; then \ + $(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \ + fi + +$(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2) + $(QUIETLY) echo "Making $@" + $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ + echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ + exit 1; \ + fi + $(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \ + echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\ + echo ""; \ + exit 1; \ + fi + $(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \ + mkdir -p $(SA_CLASSDIR); \ + fi + + $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1) + $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2) + + $(QUIETLY) $(REMOTE) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer + $(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) + $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js + $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql + $(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources + $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/* + $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/ + $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/ + $(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ . + $(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.sparc.SPARCThreadContext + +clean: + rm -rf $(SA_CLASSDIR) + rm -rf $(GENERATED)/sa-jdi.jar --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/saproc.make 2009-08-01 04:16:25.797983396 +0100 @@ -0,0 +1,85 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build serviceability agent library, used by vm.make + +# libsaproc[_g].so: serviceability agent +SAPROC = saproc$(G_SUFFIX) +LIBSAPROC = lib$(SAPROC).so + +AGENT_DIR = $(GAMMADIR)/agent + +SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family) + +SASRCFILES = $(SASRCDIR)/salibelf.c \ + $(SASRCDIR)/symtab.c \ + $(SASRCDIR)/libproc_impl.c \ + $(SASRCDIR)/ps_proc.c \ + $(SASRCDIR)/ps_core.c \ + $(SASRCDIR)/LinuxDebuggerLocal.c + +SAMAPFILE = $(SASRCDIR)/mapfile + +DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC) + +# DEBUG_BINARIES overrides everything, use full -g debug information +ifeq ($(DEBUG_BINARIES), true) + SA_DEBUG_CFLAGS = -g +endif + +# if $(AGENT_DIR) does not exist, we don't build SA +# also, we don't build SA on Itanium. + +checkAndBuildSA: + $(QUIETLY) if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" ] ; then \ + $(MAKE) -f vm.make $(LIBSAPROC); \ + fi + +SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) + +$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE) + $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ + echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ + exit 1; \ + fi + @echo Making SA debugger back-end... + $(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE \ + $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ + -I$(SASRCDIR) \ + -I$(GENERATED) \ + -I$(BOOT_JAVA_HOME)/include \ + -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \ + $(SASRCFILES) \ + $(SA_LFLAGS) \ + $(SA_DEBUG_CFLAGS) \ + -o $@ \ + -lthread_db + +install_saproc: checkAndBuildSA + $(QUIETLY) if [ -e $(LIBSAPROC) ] ; then \ + echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"; \ + cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"; \ + fi + +.PHONY: checkAndBuildSA install_saproc --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/sparc.make 2009-08-01 04:16:26.237623019 +0100 @@ -0,0 +1,27 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Not included in includeDB because it has no dependencies +Obj_Files += linux_sparc.o + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/sparcWorks.make 2009-08-01 04:16:26.646271298 +0100 @@ -0,0 +1,93 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +#------------------------------------------------------------------------ +# CC, CPP & AS + +CPP = CC +CC = cc +AS = $(CC) -c + +ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) +ARCHFLAG/i486 = -m32 +ARCHFLAG/amd64 = -m64 + +CFLAGS += $(ARCHFLAG) +AOUT_FLAGS += $(ARCHFLAG) +LFLAGS += $(ARCHFLAG) +ASFLAGS += $(ARCHFLAG) + +#------------------------------------------------------------------------ +# Compiler flags + +# position-independent code +PICFLAG = -KPIC + +CFLAGS += $(PICFLAG) +# no more exceptions +CFLAGS += -features=no%except +# Reduce code bloat by reverting back to 5.0 behavior for static initializers +CFLAGS += -features=no%split_init +# allow zero sized arrays +CFLAGS += -features=zla + +# Use C++ Interpreter +ifdef CC_INTERP + CFLAGS += -DCC_INTERP +endif + +# We don't need libCstd.so and librwtools7.so, only libCrun.so +CFLAGS += -library=Crun +LIBS += -lCrun + +CFLAGS += -mt +LFLAGS += -mt + +# Compiler warnings are treated as errors +#WARNINGS_ARE_ERRORS = -errwarn=%all +CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) +# Special cases +CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) + +# The flags to use for an Optimized build +OPT_CFLAGS+=-xO4 +OPT_CFLAGS/NOOPT=-xO0 + +#------------------------------------------------------------------------ +# Linker flags + +# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. +MAPFLAG = -Wl,--version-script=FILENAME + +# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj +SONAMEFLAG = -h SONAME + +# Build shared library +SHARED_FLAG = -G + +#------------------------------------------------------------------------ +# Debug flags +DEBUG_CFLAGS += -g +FASTDEBUG_CFLAGS = -g0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/sparcv9.make 2009-08-01 04:16:27.074684256 +0100 @@ -0,0 +1,31 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# + +# +# Not included in includeDB because it has no dependencies +Obj_Files += linux_sparc.o + +# gcc 4.0 miscompiles this code in -m64 +OPT_CFLAGS/macro.o = -O0 + +CFLAGS += -D_LP64=1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/tiered.make 2009-08-01 04:16:27.475236420 +0100 @@ -0,0 +1,31 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making tiered version of VM + +TYPE=TIERED + +VM_SUBDIR = server + +CFLAGS += -DCOMPILER2 -DCOMPILER1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/top.make 2009-08-01 04:16:27.901585428 +0100 @@ -0,0 +1,190 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# top.make is included in the Makefile in the build directories. +# It DOES NOT include the vm dependency info in order to be faster. +# It's main job is to implement the incremental form of make lists. +# It also: +# -builds and runs adlc via adlc.make +# -generates JVMTI source and docs via jvmti.make (JSR-163) +# -generate sa-jdi.jar (JDI binding to core files) + +# It assumes the following flags are set: +# CFLAGS Platform_file, Src_Dirs, SYSDEFS, AOUT, Obj_Files + +# -- D. Ungar (5/97) from a file by Bill Bush + +# Don't override the built-in $(MAKE). +# Instead, use "gmake" (or "gnumake") from the command line. --Rose +#MAKE = gmake + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +VM = $(GAMMADIR)/src/share/vm +Plat_File = $(Platform_file) +CDG = cd $(GENERATED); + +# Pick up MakeDeps' sources and definitions +include $(GAMMADIR)/make/$(Platform_os_family)/makefiles/makedeps.make +MakeDepsClass = MakeDeps.class + +ifdef USE_PRECOMPILED_HEADER +PrecompiledOption = -DUSE_PRECOMPILED_HEADER +UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS) +else +UpdatePCH = \# precompiled header is not used +PrecompiledOption = +endif + +MakeDeps = $(RUN.JAVA) $(PrecompiledOption) -classpath $(GENERATED) MakeDeps + +Include_DBs/GC = $(VM)/includeDB_gc \ + $(VM)/includeDB_gc_parallel \ + $(VM)/gc_implementation/includeDB_gc_parallelScavenge \ + $(VM)/gc_implementation/includeDB_gc_concurrentMarkSweep \ + $(VM)/gc_implementation/includeDB_gc_parNew \ + $(VM)/gc_implementation/includeDB_gc_g1 \ + $(VM)/gc_implementation/includeDB_gc_serial \ + $(VM)/gc_implementation/includeDB_gc_shared + +Include_DBs/CORE = $(VM)/includeDB_core $(Include_DBs/GC) \ + $(VM)/includeDB_jvmti \ + $(VM)/includeDB_features +Include_DBs/COMPILER1 = $(Include_DBs/CORE) $(VM)/includeDB_compiler1 +Include_DBs/COMPILER2 = $(Include_DBs/CORE) $(VM)/includeDB_compiler2 +Include_DBs/TIERED = $(Include_DBs/CORE) $(VM)/includeDB_compiler1 $(VM)/includeDB_compiler2 +Include_DBs = $(Include_DBs/$(TYPE)) + +Cached_plat = $(GENERATED)/platform.current +Cached_db = $(GENERATED)/includeDB.current + +Incremental_Lists = $(Cached_db) +# list generation also creates $(GENERATED)/$(Cached_plat) + + +AD_Dir = $(GENERATED)/adfiles +ADLC = $(AD_Dir)/adlc +AD_Spec = $(GAMMADIR)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad +AD_Src = $(GAMMADIR)/src/share/vm/adlc +AD_Names = ad_$(Platform_arch_model).hpp ad_$(Platform_arch_model).cpp +AD_Files = $(AD_Names:%=$(AD_Dir)/%) + +# AD_Files_If_Required/COMPILER1 = ad_stuff +AD_Files_If_Required/COMPILER2 = ad_stuff +AD_Files_If_Required/TIERED = ad_stuff +AD_Files_If_Required = $(AD_Files_If_Required/$(TYPE)) + +# Wierd argument adjustment for "gnumake -j..." +adjust-mflags = $(GENERATED)/adjust-mflags +MFLAGS-adjusted = -r `$(adjust-mflags) "$(MFLAGS)" "$(HOTSPOT_BUILD_JOBS)"` + + +# default target: make makeDeps, update lists, make vm +# done in stages to force sequential order with parallel make +# + +default: vm_build_preliminaries the_vm + @echo All done. + +# This is an explicit dependency for the sake of parallel makes. +vm_build_preliminaries: checks $(Incremental_Lists) $(AD_Files_If_Required) jvmti_stuff sa_stuff + @# We need a null action here, so implicit rules don't get consulted. + +# make makeDeps: (and zap the cached db files to force a nonincremental run) + +$(GENERATED)/$(MakeDepsClass): $(MakeDepsSources) + @$(REMOTE) $(COMPILE.JAVAC) -classpath $(GAMMADIR)/src/share/tools/MakeDeps -g -d $(GENERATED) $(MakeDepsSources) + @echo Removing $(Incremental_Lists) to force regeneration. + @rm -f $(Incremental_Lists) + @$(CDG) echo >$(Cached_plat) + +# make incremental_lists, if cached files out of date, run makeDeps + +$(Incremental_Lists): $(Include_DBs) $(Plat_File) $(GENERATED)/$(MakeDepsClass) + $(CDG) cat $(Include_DBs) > $(GENERATED)/includeDB + $(CDG) if [ ! -r incls ] ; then \ + mkdir incls ; \ + fi + $(CDG) (echo $(CDG) echo $(MakeDeps) diffs UnixPlatform $(Cached_plat) $(Cached_db) $(Plat_File) $(GENERATED)/includeDB $(MakeDepsOptions)) > makeDeps.sh + $(CDG) $(REMOTE) sh $(GENERATED)/makeDeps.sh + $(CDG) cp includeDB $(Cached_db) + $(CDG) cp $(Plat_File) $(Cached_plat) + +# symbolic target for command lines +lists: $(Incremental_Lists) + @: lists are now up to date + +# make AD files as necessary +ad_stuff: $(Incremental_Lists) $(adjust-mflags) + @$(MAKE) -f adlc.make $(MFLAGS-adjusted) + +# generate JVMTI files from the spec +jvmti_stuff: $(Incremental_Lists) $(adjust-mflags) + @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) + +# generate SA jar files and native header +sa_stuff: + @$(MAKE) -f sa.make $(MFLAGS-adjusted) + +# and the VM: must use other makefile with dependencies included + +# We have to go to great lengths to get control over the -jN argument +# to the recursive invocation of vm.make. The problem is that gnumake +# resets -jN to -j1 for recursive runs. (How helpful.) +# Note that the user must specify the desired parallelism level via a +# command-line or environment variable name HOTSPOT_BUILD_JOBS. +$(adjust-mflags): $(GAMMADIR)/make/$(Platform_os_family)/makefiles/adjust-mflags.sh + @+rm -f $@ $@+ + @+cat $< > $@+ + @+chmod +x $@+ + @+mv $@+ $@ + +the_vm: vm_build_preliminaries $(adjust-mflags) + @$(UpdatePCH) + @$(MAKE) -f vm.make $(MFLAGS-adjusted) + +install: the_vm + @$(MAKE) -f vm.make install + +# next rules support "make foo.[oi]" + +%.o %.i %.s: + $(UpdatePCH) + $(MAKE) -f vm.make $(MFLAGS) $@ + #$(MAKE) -f vm.make $@ + +# this should force everything to be rebuilt +clean: + rm -f $(GENERATED)/*.class + $(MAKE) $(MFLAGS) $(GENERATED)/$(MakeDepsClass) + $(MAKE) -f vm.make $(MFLAGS) clean + +# just in case it doesn't, this should do it +realclean: + $(MAKE) -f vm.make $(MFLAGS) clean + rm -fr $(GENERATED) + +.PHONY: default vm_build_preliminaries +.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean +.PHONY: checks check_os_version install --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/makefiles/vm.make 2009-08-01 04:16:28.366948965 +0100 @@ -0,0 +1,228 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build JVM and related libraries, included from vm.make in the build +# directory. + +# Common build rules. +MAKEFILES_DIR=$(GAMMADIR)/make/$(Platform_os_family)/makefiles +include $(MAKEFILES_DIR)/rules.make + +default: build + +#---------------------------------------------------------------------- +# Defs + +GENERATED = ../generated + +# read a generated file defining the set of .o's and the .o .h dependencies +include $(GENERATED)/Dependencies + +# read machine-specific adjustments (%%% should do this via buildtree.make?) +include $(MAKEFILES_DIR)/$(BUILDARCH).make + +# set VPATH so make knows where to look for source files +# Src_Dirs is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm +# The incls directory contains generated header file lists for inclusion. +# The adfiles directory contains ad_.[ch]pp. +# The jvmtifiles directory contains jvmti*.[ch]pp +Src_Dirs_V = $(GENERATED)/adfiles $(GENERATED)/jvmtifiles ${Src_Dirs} $(GENERATED)/incls +VPATH += $(Src_Dirs_V:%=%:) + +# set INCLUDES for C preprocessor +Src_Dirs_I = $(PRECOMPILED_HEADER_DIR) $(GENERATED)/adfiles $(GENERATED)/jvmtifiles ${Src_Dirs} $(GENERATED) +INCLUDES += $(Src_Dirs_I:%=-I%) + +ifeq (${VERSION}, debug) + SYMFLAG = -g +else + SYMFLAG = +endif + +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined +# in $(GAMMADIR)/make/defs.make +ifeq ($(HOTSPOT_BUILD_VERSION),) + BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\"" +else + BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)\"" +endif + +# The following variables are defined in the generated flags.make file. +BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HS_BUILD_VER)\"" +JRE_VERSION = -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\"" +HS_LIB_ARCH = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" +BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\"" +BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\"" +VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\"" + +CPPFLAGS = \ + ${SYSDEFS} \ + ${INCLUDES} \ + ${BUILD_VERSION} \ + ${BUILD_TARGET} \ + ${BUILD_USER} \ + ${HS_LIB_ARCH} \ + ${JRE_VERSION} \ + ${VM_DISTRO} + +# CFLAGS_WARN holds compiler options to suppress/enable warnings. +CFLAGS += $(CFLAGS_WARN/BYFILE) + +# Do not use C++ exception handling +CFLAGS += $(CFLAGS/NOEX) + +# Extra flags from gnumake's invocation or environment +CFLAGS += $(EXTRA_CFLAGS) + +LIBS += -lm -ldl -lpthread + +# By default, link the *.o into the library, not the executable. +LINK_INTO$(LINK_INTO) = LIBJVM + +JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH) + +#---------------------------------------------------------------------- +# jvm_db & dtrace +include $(MAKEFILES_DIR)/dtrace.make + +#---------------------------------------------------------------------- +# JVM + +JVM = jvm$(G_SUFFIX) +LIBJVM = lib$(JVM).so + +JVM_OBJ_FILES = $(Obj_Files) + +vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES)) + +mapfile : $(MAPFILE) + rm -f $@ + cat $^ > $@ + +mapfile_reorder : mapfile $(REORDERFILE) + rm -f $@ + cat $^ > $@ + +STATIC_CXX = true + +ifeq ($(LINK_INTO),AOUT) + LIBJVM.o = + LIBJVM_MAPFILE = + LIBS_VM = $(LIBS) +else + LIBJVM.o = $(JVM_OBJ_FILES) + LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder + LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE)) + LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM)) + + # JVM is statically linked with libgcc[_s] and libstdc++; this is needed to + # get around library dependency and compatibility issues. Must use gcc not + # g++ to link. + ifeq ($(STATIC_CXX), true) + LFLAGS_VM += $(STATIC_LIBGCC) + LIBS_VM += $(STATIC_STDCXX) + else + LIBS_VM += -lstdc++ + endif + + LIBS_VM += $(LIBS) +endif + +LINK_VM = $(LINK_LIB.c) + +# rule for building precompiled header +$(PRECOMPILED_HEADER): $(Precompiled_Files) + $(QUIETLY) echo Generating precompiled header $@ + $(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)/incls + $(QUIETLY) $(COMPILE.CC) -x c++-header -c $(GENERATED)/incls/_precompiled.incl -o $@ $(COMPILE_DONE) + +# making the library: + +ifneq ($(JVM_BASE_ADDR),) +# By default shared library is linked at base address == 0. Modify the +# linker script if JVM prefers a different base location. It can also be +# implemented with 'prelink -r'. But 'prelink' is not (yet) available on +# our build platform (AS-2.1). +LD_SCRIPT = libjvm.so.lds +$(LD_SCRIPT): $(LIBJVM_MAPFILE) + $(QUIETLY) { \ + rm -rf $@; \ + $(LINK_VM) -Wl,--verbose $(LFLAGS_VM) 2>&1 | \ + sed -e '/^======/,/^======/!d' \ + -e '/^======/d' \ + -e 's/0\( + SIZEOF_HEADERS\)/$(JVM_BASE_ADDR)\1/' \ + > $@; \ + } +LD_SCRIPT_FLAG = -Wl,-T,$(LD_SCRIPT) +endif + +# With more recent Redhat releases (or the cutting edge version Fedora), if +# SELinux is configured to be enabled, the runtime linker will fail to apply +# the text relocation to libjvm.so considering that it is built as a non-PIC +# DSO. To workaround that, we run chcon to libjvm.so after it is built. See +# details in bug 6538311. +$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT) + $(QUIETLY) { \ + echo Linking vm...; \ + $(LINK_LIB.CC/PRE_HOOK) \ + $(LINK_VM) $(LD_SCRIPT_FLAG) \ + $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \ + $(LINK_LIB.CC/POST_HOOK) \ + rm -f $@.1; ln -s $@ $@.1; \ + if [ -x /usr/sbin/selinuxenabled ] ; then \ + /usr/sbin/selinuxenabled; \ + if [ $$? = 0 ] ; then \ + /usr/bin/chcon -t textrel_shlib_t $@; \ + if [ $$? != 0 ]; then \ + echo "ERROR: Cannot chcon $@"; \ + fi \ + fi \ + fi \ + } + +DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM) + +install_jvm: $(LIBJVM) + @echo "Copying $(LIBJVM) to $(DEST_JVM)" + $(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done" + +#---------------------------------------------------------------------- +# Other files + +# Gamma launcher +include $(MAKEFILES_DIR)/launcher.make + +# Signal interposition library +include $(MAKEFILES_DIR)/jsig.make + +# Serviceability agent +include $(MAKEFILES_DIR)/saproc.make + +#---------------------------------------------------------------------- + +build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) checkAndBuildSA + +install: install_jvm install_jsig install_saproc + +.PHONY: default build install install_jvm --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/platform_amd64 2009-08-01 04:16:28.792195866 +0100 @@ -0,0 +1,15 @@ +os_family = linux + +arch = x86 + +arch_model = x86_64 + +os_arch = linux_x86 + +os_arch_model = linux_x86_64 + +lib_arch = amd64 + +compiler = gcc + +sysdefs = -DLINUX -D_GNU_SOURCE -DAMD64 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/platform_amd64.suncc 2009-08-01 04:16:29.198025965 +0100 @@ -0,0 +1,17 @@ +os_family = linux + +arch = x86 + +arch_model = x86_64 + +os_arch = linux_x86 + +os_arch_model = linux_x86_64 + +lib_arch = amd64 + +compiler = sparcWorks + +gnu_dis_arch = amd64 + +sysdefs = -DLINUX -DSPARC_WORKS -D_GNU_SOURCE -DAMD64 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/platform_i486 2009-08-01 04:16:29.860934551 +0100 @@ -0,0 +1,15 @@ +os_family = linux + +arch = x86 + +arch_model = x86_32 + +os_arch = linux_x86 + +os_arch_model = linux_x86_32 + +lib_arch = i386 + +compiler = gcc + +sysdefs = -DLINUX -D_GNU_SOURCE -DIA32 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/platform_i486.suncc 2009-08-01 04:16:30.253007149 +0100 @@ -0,0 +1,17 @@ +os_family = linux + +arch = x86 + +arch_model = x86_32 + +os_arch = linux_x86 + +os_arch_model = linux_x86_32 + +lib_arch = i386 + +compiler = sparcWorks + +gnu_dis_arch = i386 + +sysdefs = -DLINUX -DSPARC_WORKS -D_GNU_SOURCE -DIA32 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/platform_ia64 2009-08-01 04:16:30.628555802 +0100 @@ -0,0 +1,15 @@ +os_family = linux + +arch = ia64 + +os_arch = linux_ia64 + +lib_arch = ia64 + +compiler = gcc + +gnu_dis_arch = ia64 + +sysdefs = -DLINUX -D_GNU_SOURCE -DIA64 -DCC_INTERP + +mark_style = alignment --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/platform_sparc 2009-08-01 04:16:31.012627857 +0100 @@ -0,0 +1,15 @@ +os_family = linux + +arch = sparc + +arch_model = sparc + +os_arch = linux_sparc + +os_arch_model = linux_sparc + +lib_arch = sparc + +compiler = gcc + +sysdefs = -DLINUX -D_GNU_SOURCE -DSPARC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/linux/platform_sparcv9 2009-08-01 04:16:31.404950982 +0100 @@ -0,0 +1,15 @@ +os_family = linux + +arch = sparc + +arch_model = sparc + +os_arch = linux_sparc + +os_arch_model = linux_sparc + +lib_arch = sparcv9 + +compiler = gcc + +sysdefs = -DLINUX -D_GNU_SOURCE -DSPARC --- old/hotspot/build/hotspot_distro 2009-08-01 04:16:31.874054799 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,32 +0,0 @@ -# -# Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -# CA 95054 USA or visit www.sun.com if you need additional information or -# have any questions. -# - -# -# This file format must remain compatible with both -# GNU Makefile and Microsoft nmake formats. -# - -# Don't put quotes (fail windows build). -HOTSPOT_VM_DISTRO=OpenJDK -COMPANY_NAME= -PRODUCT_NAME=OpenJDK --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/openjdk_distro 2009-08-01 04:16:31.790664938 +0100 @@ -0,0 +1,32 @@ +# +# Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# + +# +# This file format must remain compatible with both +# GNU Makefile and Microsoft nmake formats. +# + +# Don't put quotes (fail windows build). +HOTSPOT_VM_DISTRO=OpenJDK +COMPANY_NAME= +PRODUCT_NAME=OpenJDK --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/pic.make 2009-08-01 04:16:32.626815932 +0100 @@ -0,0 +1,41 @@ +# +# Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# A list of object files built without the platform specific PIC flags, e.g. +# -fPIC on linux. Performance measurements show that by compiling GC related +# code, we could significantly reduce the GC pause time on 32 bit Linux/Unix +# platforms. See 6454213 for more details. +include $(GAMMADIR)/make/scm.make + +ifneq ($(OSNAME), windows) + ifndef LP64 + NONPIC_DIRS = memory oops gc_implementation gc_interface + NONPIC_DIRS := $(foreach dir,$(NONPIC_DIRS), $(GAMMADIR)/src/share/vm/$(dir)) + # Look for source files under NONPIC_DIRS + NONPIC_FILES := $(foreach dir,$(NONPIC_DIRS),\ + $(shell find $(dir) \( $(SCM_DIRS) \) -prune -o \ + -name '*.cpp' -print)) + NONPIC_OBJ_FILES := $(notdir $(subst .cpp,.o,$(NONPIC_FILES))) + endif +endif --- old/hotspot/build/sa.files 2009-08-01 04:16:33.123659902 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,133 +0,0 @@ -# -# Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -# CA 95054 USA or visit www.sun.com if you need additional information or -# have any questions. -# -# - -# This filelist macro is included in platform specific sa.make -# included all packages/*.java. package list can be generated by -# $(GAMMADIR)/agent/make/build-pkglist. Then manually removed all -# classes in sun.jvm.hotspot.ui (and subpackages), all ui classes -# in sun.jvm.hotspot.bugspot/hotspot and SPARC and x86 disassembler -# classes and sun.jvm.hotspot.utilities.soql. - -# define AGENT_DIR before including this file in sa.make - -AGENT_SRC_DIR = $(AGENT_DIR)/src/share/classes - -AGENT_ALLFILES = \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/DebugServer.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/HelloWorld.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotAgent.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotSolarisVtblAccess.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotTypeDataBase.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/LinuxVtblAccess.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/ObjectHistogram.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/RMIHelper.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/StackTrace.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/TestDebugger.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/Win32VtblAccess.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Immediate.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ImmediateOrRegister.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Operand.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Register.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/AMD64Register.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/AMD64Registers.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/IA64Register.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/IA64Registers.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCArgument.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegister.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegisterType.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegisters.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86Register.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86RegisterPart.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86Registers.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86SegmentRegister.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86SegmentRegisters.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/BugSpotAgent.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ia64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/elf/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_interface/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/interpreter/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/jdi/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/livejvm/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/memory/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/ia64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_ia64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/posix/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/sparc/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_amd64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_ia64/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/basic/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/*.java \ -$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/memo/*.java \ --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/sa.files 2009-08-01 04:16:33.042313405 +0100 @@ -0,0 +1,128 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This filelist macro is included in platform specific sa.make +# included all packages/*.java. package list can be generated by +# $(GAMMADIR)/agent/make/build-pkglist. Then manually removed all +# classes in sun.jvm.hotspot.ui (and subpackages), all ui classes +# in sun.jvm.hotspot.bugspot/hotspot and SPARC and x86 disassembler +# classes and sun.jvm.hotspot.utilities.soql. + +# define AGENT_DIR before including this file in sa.make + +AGENT_SRC_DIR = $(AGENT_DIR)/src/share/classes + +# Splitted the set of files into two sets because on linux plaform +# listing or compiling all the files results in 'Argument list too long' error. + +AGENT_FILES1 = \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/tree/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ia64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/elf/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_interface/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/interpreter/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/jdi/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/livejvm/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/memory/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java + + +AGENT_FILES2 = \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/ia64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_ia64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/posix/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/sparc/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_amd64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_ia64/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/soql/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/basic/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/memo/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/action/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/classbrowser/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/table/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/tree/*.java \ +$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/treetable/*.java \ +$(AGENT_SRC_DIR)/com/sun/java/swing/action/*.java \ +$(AGENT_SRC_DIR)/com/sun/java/swing/ui/*.java --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/Makefile 2009-08-01 04:16:33.971379342 +0100 @@ -0,0 +1,307 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile creates a build tree and lights off a build. +# You can go back into the build tree and perform rebuilds or +# incremental builds as desired. Be sure to reestablish +# environment variable settings for LD_LIBRARY_PATH and JAVA_HOME. + +# The make process now relies on java and javac. These can be +# specified either implicitly on the PATH, by setting the +# (JDK-inherited) ALT_BOOTDIR environment variable to full path to a +# JDK in which bin/java and bin/javac are present and working (e.g., +# /usr/local/java/jdk1.3/solaris), or via the (JDK-inherited) +# default BOOTDIR path value. Note that one of ALT_BOOTDIR +# or BOOTDIR has to be set. We do *not* search javac, javah, rmic etc. +# from the PATH. + +# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding. +# JDI binding on SA produces two binaries: +# 1. sa-jdi.jar - This is build before building libjvm[_g].so +# Please refer to ./makefiles/sa.make +# 2. libsaproc[_g].so - Native library for SA - This is built after +# libjsig[_g].so (signal interposition library) +# Please refer to ./makefiles/vm.make +# If $(GAMMADIR)/agent dir is not present, SA components are not built. + +ifeq ($(GAMMADIR),) +include ../../make/defs.make +else +include $(GAMMADIR)/make/defs.make +endif +include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make + +ifndef LP64 +ifndef CC_INTERP +FORCE_TIERED=1 +endif +endif + +ifdef LP64 + ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") + _JUNK_ := $(shell echo >&2 \ + $(OSNAME) $(ARCH) "*** ERROR: this platform does not support 64-bit compilers!") + @exit 1 + endif +endif + +# The following renders pathnames in generated Makefiles valid on +# machines other than the machine containing the build tree. +# +# For example, let's say my build tree lives on /files12 on +# exact.east.sun.com. This logic will cause GAMMADIR to begin with +# /net/exact/files12/... +# +# We only do this on SunOS variants, for a couple of reasons: +# * It is extremely rare that source trees exist on other systems +# * It has been claimed that the Linux automounter is flakey, so +# changing GAMMADIR in a way that exercises the automounter could +# prove to be a source of unreliability in the build process. +# Obviously, this Makefile is only relevant on SunOS boxes to begin +# with, but the SunOS conditionalization will make it easier to +# combine Makefiles in the future (assuming we ever do that). + +ifeq ($(OSNAME),solaris) + + # prepend current directory to relative pathnames. + NEW_GAMMADIR := \ + $(shell echo $(GAMMADIR) | \ + sed -e "s=^\([^/].*\)=$(shell pwd)/\1=" \ + ) + unexport NEW_GAMMADIR + + # If NEW_GAMMADIR doesn't already start with "/net/": + ifeq ($(strip $(filter /net/%,$(NEW_GAMMADIR))),) + # prepend /net/$(HOST) + # remove /net/$(HOST) if name already began with /home/ + # remove /net/$(HOST) if name already began with /java/ + # remove /net/$(HOST) if name already began with /lab/ + NEW_GAMMADIR := \ + $(shell echo $(NEW_GAMMADIR) | \ + sed -e "s=^\(.*\)=/net/$(HOST)\1=" \ + -e "s=^/net/$(HOST)/home/=/home/=" \ + -e "s=^/net/$(HOST)/java/=/java/=" \ + -e "s=^/net/$(HOST)/lab/=/lab/=" \ + ) + # Don't use the new value for GAMMADIR unless a file with the new + # name actually exists. + ifneq ($(wildcard $(NEW_GAMMADIR)),) + GAMMADIR := $(NEW_GAMMADIR) + endif + endif + +endif + + +# There is a (semi-) regular correspondence between make targets and actions: +# +# Target Tree Type Build Dir +# +# debug compiler2 __compiler2/debug +# fastdebug compiler2 __compiler2/fastdebug +# jvmg compiler2 __compiler2/jvmg +# optimized compiler2 __compiler2/optimized +# profiled compiler2 __compiler2/profiled +# product compiler2 __compiler2/product +# +# debug1 compiler1 __compiler1/debug +# fastdebug1 compiler1 __compiler1/fastdebug +# jvmg1 compiler1 __compiler1/jvmg +# optimized1 compiler1 __compiler1/optimized +# profiled1 compiler1 __compiler1/profiled +# product1 compiler1 __compiler1/product +# +# debugcore core __core/debug +# fastdebugcore core __core/fastdebug +# jvmgcore core __core/jvmg +# optimizedcore core __core/optimized +# profiledcore core __core/profiled +# productcore core __core/product +# +# What you get with each target: +# +# debug* - "thin" libjvm_g - debug info linked into the gamma_g launcher +# fastdebug* - optimized compile, but with asserts enabled +# jvmg* - "fat" libjvm_g - debug info linked into libjvm_g.so +# optimized* - optimized compile, no asserts +# profiled* - gprof +# product* - the shippable thing: optimized compile, no asserts, -DPRODUCT + +# This target list needs to be coordinated with the usage message +# in the build.sh script: +TARGETS = debug jvmg fastdebug optimized profiled product + +SUBDIR_DOCS = $(OSNAME)_$(BUILDARCH)_docs +SUBDIRS_C1 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS)) +SUBDIRS_C2 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler2/,$(TARGETS)) +SUBDIRS_TIERED = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS)) +SUBDIRS_CORE = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS)) +SUBDIRS_KERNEL = $(addprefix $(OSNAME)_$(BUILDARCH)_kernel/,$(TARGETS)) + +TARGETS_C2 = $(TARGETS) +TARGETS_C1 = $(addsuffix 1,$(TARGETS)) +TARGETS_TIERED = $(addsuffix tiered,$(TARGETS)) +TARGETS_CORE = $(addsuffix core,$(TARGETS)) +TARGETS_KERNEL = $(addsuffix kernel,$(TARGETS)) + +BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make +BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) ARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) +BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) + +BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS) + +#------------------------------------------------------------------------------- + +# Could make everything by default, but that would take a while. +all: + @echo "Try '$(MAKE) ...' where is one or more of" + @echo " $(TARGETS_C2)" + @echo " $(TARGETS_C1)" + @echo " $(TARGETS_CORE)" + +checks: check_os_version check_j2se_version + +# We do not want people accidentally building on old systems (e.g. Linux 2.2.x, +# Solaris 2.5.1, 2.6). +# Disable this check by setting DISABLE_HOTSPOT_OS_VERSION_CHECK=ok. + +SUPPORTED_OS_VERSION = 5.7 5.8 5.9 5.10 5.11 +OS_VERSION := $(shell uname -r) +EMPTY_IF_NOT_SUPPORTED = $(filter $(SUPPORTED_OS_VERSION),$(OS_VERSION)) + +check_os_version: +ifeq ($(DISABLE_HOTSPOT_OS_VERSION_CHECK)$(EMPTY_IF_NOT_SUPPORTED),) + $(QUIETLY) >&2 echo "*** This OS is not supported:" `uname -a`; exit 1; +endif + +# jvmti.make requires XSLT (J2SE 1.4.x or newer): +XSLT_CHECK = $(RUN.JAVAP) javax.xml.transform.TransformerFactory +# If not found then fail fast. +check_j2se_version: + $(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \ + if [ $$? -ne 0 ]; then \ + $(RUN.JAVA) -version; \ + echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \ + "to bootstrap this build" 1>&2; \ + exit 1; \ + fi + +$(SUBDIRS_TIERED): $(BUILDTREE_MAKE) + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=tiered + +$(SUBDIRS_C2): $(BUILDTREE_MAKE) +ifdef FORCE_TIERED + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=tiered FORCE_TIERED=1 +else + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=compiler2 +endif + +$(SUBDIRS_C1): $(BUILDTREE_MAKE) + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=compiler1 + +$(SUBDIRS_CORE): $(BUILDTREE_MAKE) + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=core + +$(SUBDIRS_KERNEL): $(BUILDTREE_MAKE) + $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks + $(BUILDTREE) VARIANT=kernel + +# Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME + +$(TARGETS_C2): $(SUBDIRS_C2) + cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install +endif + +$(TARGETS_TIERED): $(SUBDIRS_TIERED) + cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install +endif + +$(TARGETS_C1): $(SUBDIRS_C1) + cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install +endif + +$(TARGETS_CORE): $(SUBDIRS_CORE) + cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install +endif + +$(TARGETS_KERNEL): $(SUBDIRS_KERNEL) + cd $(OSNAME)_$(BUILDARCH)_kernel/$(patsubst %kernel,%,$@) && $(MAKE) $(MFLAGS) + cd $(OSNAME)_$(BUILDARCH)_kernel/$(patsubst %kernel,%,$@) && ./test_gamma +ifdef INSTALL + cd $(OSNAME)_$(BUILDARCH)_kernel/$(patsubst %kernel,%,$@) && $(MAKE) $(MFLAGS) install +endif + +# Just build the tree, and nothing else: +tree: $(SUBDIRS_C2) +tree1: $(SUBDIRS_C1) +treecore: $(SUBDIRS_CORE) +treekernel: $(SUBDIRS_KERNEL) + +# Doc target. This is the same for all build options. +# Hence create a docs directory beside ...$(ARCH)_[...] +docs: checks + $(QUIETLY) mkdir -p $(SUBDIR_DOCS) + $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs + +# Synonyms for win32-like targets. +compiler2: jvmg product + +compiler1: jvmg1 product1 + +core: jvmgcore productcore + +clean_docs: + rm -rf $(SUBDIR_DOCS) + +clean_compiler1 clean_compiler2 clean_core clean_kernel: + rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@) + +clean: clean_compiler2 clean_compiler1 clean_core clean_docs clean_kernel + +include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make + +#------------------------------------------------------------------------------- + +.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) +.PHONY: tree tree1 treecore +.PHONY: all compiler1 compiler2 core +.PHONY: clean clean_compiler1 clean_compiler2 clean_core docs clean_docs +.PHONY: checks check_os_version check_j2se_version --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/adlc_updater 2009-08-01 04:16:34.404307628 +0100 @@ -0,0 +1,19 @@ +#! /bin/sh +# +# This file is used by adlc.make to selectively update generated +# adlc files. Because source and target diretories are relative +# paths, this file is copied to the target build directory before +# use. +# +# adlc-updater +# +fix_lines() { + # repair bare #line directives in $1 to refer to $2 + awk < $1 > $1+ ' + /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next} + {print} + ' F2=$2 + mv $1+ $1 +} +[ -f $3/$1 ] && (fix_lines $2/$1 $3/$1; cmp -s $2/$1 $3/$1) || \ +( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 ) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/build.sh 2009-08-01 04:16:34.819189940 +0100 @@ -0,0 +1,127 @@ +#! /bin/sh +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Make sure the variable JAVA_HOME is set before running this script. + +set -u + + +usage() { + ( + echo "Usage : $0 [-sb | -sbfast] config ws_path" + echo "" + echo "Where:" + echo " -sb ::= enable source browser info generation for" + echo " all configs during compilation" + echo "" + echo " -sbfast ::= enable source browser info generation for" + echo " all configs without compilation" + echo "" + echo " config ::= debug | debug1 | debugcore" + echo " fastdebug | fastdebug1 | fastdebugcore" + echo " jvmg | jvmg1 | jvmgcore" + echo " optimized | optimized1 | optimizedcore" + echo " profiled | profiled1 | profiledcore" + echo " product | product1 | productcore" + echo "" + echo " ws_path ::= path to HotSpot workspace" + ) >&2 + exit 1 +} + +# extract possible options +options="" +if [ $# -gt 2 ]; then + case "$1" in + -sb) + options="CFLAGS_BROWSE=-xsb" + shift + ;; + -sbfast) + options="CFLAGS_BROWSE=-xsbfast" + shift + ;; + *) + echo "Unknown option: '$1'" >&2 + usage + ;; + esac +fi + +# should be just two args left at this point +if [ $# != 2 ]; then + usage +fi + +# Just in case: +case ${JAVA_HOME} in +/*) true;; +?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;; +esac + +if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/`uname -p` ]; then + echo "JAVA_HOME needs to be set to a valid JDK path" + echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris" + echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris" + exit 1 +fi + + +LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\ +${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.} + +# This is necessary as long as we are using the old launcher +# with the new distribution format: +CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.} + + +for gm in gmake gnumake +do + if [ "${GNUMAKE-}" != "" ]; then break; fi + ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm +done +: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'} + + +echo "### ENVIRONMENT SETTINGS:" +export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME" +export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" +export CLASSPATH ; echo "CLASSPATH=$CLASSPATH" +export GNUMAKE ; echo "GNUMAKE=$GNUMAKE" +echo "###" + +config=$1 +ws_path=$2 + +case ${ws_path} in +/*) true;; +?*) ws_path=`(cd ${ws_path}; pwd)`;; +esac + +echo \ +${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \ + $config GAMMADIR=${ws_path} $options +${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \ + $config GAMMADIR=${ws_path} $options --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/adjust-mflags.sh 2009-08-01 04:16:35.305397762 +0100 @@ -0,0 +1,87 @@ +#! /bin/sh +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This script is used only from top.make. +# The macro $(MFLAGS-adjusted) calls this script to +# adjust the "-j" arguments to take into account +# the HOTSPOT_BUILD_JOBS variable. The default +# handling of the "-j" argument by gnumake does +# not meet our needs, so we must adjust it ourselves. + +# This argument adjustment applies to two recursive +# calls to "$(MAKE) $(MFLAGS-adjusted)" in top.make. +# One invokes adlc.make, and the other invokes vm.make. +# The adjustment propagates the desired concurrency +# level down to the sub-make (of the adlc or vm). +# The default behavior of gnumake is to run all +# sub-makes without concurrency ("-j1"). + +# Also, we use a make variable rather than an explicit +# "-j" argument to control this setting, so that +# the concurrency setting (which must be tuned separately +# for each MP system) can be set via an environment variable. +# The recommended setting is 1.5x to 2x the number of available +# CPUs on the MP system, which is large enough to keep the CPUs +# busy (even though some jobs may be I/O bound) but not too large, +# we may presume, to overflow the system's swap space. + +set -eu + +default_build_jobs=4 + +case $# in +[12]) true;; +*) >&2 echo "Usage: $0 ${MFLAGS} ${HOTSPOT_BUILD_JOBS}"; exit 2;; +esac + +MFLAGS=$1 +HOTSPOT_BUILD_JOBS=${2-} + +# Normalize any -jN argument to the form " -j${HBJ}" +MFLAGS=` + echo "$MFLAGS" \ + | sed ' + s/^-/ -/ + s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -j[0-9][0-9]*/ -j/ + s/ -j\([^ ]\)/ -j -\1/ + s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ + ' ` + +case ${HOTSPOT_BUILD_JOBS} in \ + +'') case ${MFLAGS} in + *\ -j*) + >&2 echo "# Note: -jN is ineffective for setting parallelism in this makefile." + >&2 echo "# please set HOTSPOT_BUILD_JOBS=${default_build_jobs} in the command line or environment." + esac;; + +?*) case ${MFLAGS} in + *\ -j*) true;; + *) MFLAGS="-j${HOTSPOT_BUILD_JOBS} ${MFLAGS}";; + esac;; +esac + +echo "${MFLAGS}" --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/adlc.make 2009-08-01 04:16:35.739404318 +0100 @@ -0,0 +1,236 @@ +# +# Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile (adlc.make) is included from the adlc.make in the +# build directories. +# It knows how to compile, link, and run the adlc. + +include $(GAMMADIR)/make/$(Platform_os_family)/makefiles/rules.make + +# ######################################################################### + +# OUTDIR must be the same as AD_Dir = $(GENERATED)/adfiles in top.make: +GENERATED = ../generated +OUTDIR = $(GENERATED)/adfiles + +ARCH = $(Platform_arch) +OS = $(Platform_os_family) + +SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad + +SOURCES.AD = $(GAMMADIR)/src/cpu/$(ARCH)/vm/$(Platform_arch_model).ad \ + $(GAMMADIR)/src/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad + +Src_Dirs += $(GAMMADIR)/src/share/vm/adlc + +EXEC = $(OUTDIR)/adlc + +# set VPATH so make knows where to look for source files +Src_Dirs_V = ${Src_Dirs} $(GENERATED)/incls +VPATH += $(Src_Dirs_V:%=%:) + +# set INCLUDES for C preprocessor +Src_Dirs_I = ${Src_Dirs} $(GENERATED) +INCLUDES += $(Src_Dirs_I:%=-I%) + +# set flags for adlc compilation +CPPFLAGS = $(SYSDEFS) $(INCLUDES) + +# Force assertions on. +CPPFLAGS += -DASSERT + +ifndef USE_GCC + # We need libCstd.so for adlc + CFLAGS += -library=Cstd -g + LFLAGS += -library=Cstd -g +endif + +# CFLAGS_WARN holds compiler options to suppress/enable warnings. +CFLAGS += $(CFLAGS_WARN) + +ifeq ("${Platform_compiler}", "sparcWorks") +# Enable the following CFLAGS addition if you need to compare the +# built ELF objects. +# +# The -g option makes static data global and the "-Qoption ccfe +# -xglobalstatic" option tells the compiler to not globalize static +# data using a unique globalization prefix. Instead force the use +# of a static globalization prefix based on the source filepath so +# the objects from two identical compilations are the same. +#CFLAGS += -Qoption ccfe -xglobalstatic +endif # Platform_compiler == sparcWorks + +OBJECTNAMES = \ + adlparse.o \ + archDesc.o \ + arena.o \ + dfa.o \ + dict2.o \ + filebuff.o \ + forms.o \ + formsopt.o \ + formssel.o \ + main.o \ + adlc-opcodes.o \ + output_c.o \ + output_h.o \ + +OBJECTS = $(OBJECTNAMES:%=$(OUTDIR)/%) + +GENERATEDNAMES = \ + ad_$(Platform_arch_model).cpp \ + ad_$(Platform_arch_model).hpp \ + ad_$(Platform_arch_model)_clone.cpp \ + ad_$(Platform_arch_model)_expand.cpp \ + ad_$(Platform_arch_model)_format.cpp \ + ad_$(Platform_arch_model)_gen.cpp \ + ad_$(Platform_arch_model)_misc.cpp \ + ad_$(Platform_arch_model)_peephole.cpp \ + ad_$(Platform_arch_model)_pipeline.cpp \ + adGlobals_$(Platform_arch_model).hpp \ + dfa_$(Platform_arch_model).cpp \ + +GENERATEDFILES = $(GENERATEDNAMES:%=$(OUTDIR)/%) + +# ######################################################################### + +all: $(EXEC) + +$(EXEC) : $(OBJECTS) + @echo Making adlc + $(QUIETLY) $(LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS) + +# Random dependencies: +$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp + +# The source files refer to ostream.h, which sparcworks calls iostream.h +$(OBJECTS): ostream.h + +ostream.h : + @echo >$@ '#include ' + +dump: + : OUTDIR=$(OUTDIR) + : OBJECTS=$(OBJECTS) + : products = $(GENERATEDFILES) + +all: $(GENERATEDFILES) + +$(GENERATEDFILES): refresh_adfiles + +# Get a unique temporary directory name, so multiple makes can run in parallel. +# Note that product files are updated via "mv", which is atomic. +TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$) + +# Pass -D flags into ADLC. +ADLCFLAGS += $(SYSDEFS) + +# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO. +ADLCFLAGS += -q -T + +# Normally, debugging is done directly on the ad_*.cpp files. +# But -g will put #line directives in those files pointing back to .ad. +#ADLCFLAGS += -g + +ifdef LP64 +ADLCFLAGS += -D_LP64 +else +ADLCFLAGS += -U_LP64 +endif + +# +# adlc_updater is a simple sh script, under sccs control. It is +# used to selectively update generated adlc files. This should +# provide a nice compilation speed improvement. +# +ADLC_UPDATER_DIRECTORY = $(GAMMADIR)/make/$(OS) +ADLC_UPDATER = adlc_updater +$(ADLC_UPDATER): $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER) + $(QUIETLY) cp $< $@; chmod +x $@ + +# This action refreshes all generated adlc files simultaneously. +# The way it works is this: +# 1) create a scratch directory to work in. +# 2) if the current working directory does not have $(ADLC_UPDATER), copy it. +# 3) run the compiled adlc executable. This will create new adlc files in the scratch directory. +# 4) call $(ADLC_UPDATER) on each generated adlc file. It will selectively update changed or missing files. +# 5) If we actually updated any files, echo a notice. +# +refresh_adfiles: $(EXEC) $(SOURCE.AD) $(ADLC_UPDATER) + @rm -rf $(TEMPDIR); mkdir $(TEMPDIR) + $(QUIETLY) $(EXEC) $(ADLCFLAGS) $(SOURCE.AD) \ + -c$(TEMPDIR)/ad_$(Platform_arch_model).cpp -h$(TEMPDIR)/ad_$(Platform_arch_model).hpp -a$(TEMPDIR)/dfa_$(Platform_arch_model).cpp -v$(TEMPDIR)/adGlobals_$(Platform_arch_model).hpp \ + || { rm -rf $(TEMPDIR); exit 1; } + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_clone.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_expand.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_format.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_gen.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_misc.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_peephole.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_pipeline.cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) adGlobals_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) ./$(ADLC_UPDATER) dfa_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR) + $(QUIETLY) [ -f $(TEMPDIR)/made-change ] \ + || echo "Rescanned $(SOURCE.AD) but encountered no changes." + $(QUIETLY) rm -rf $(TEMPDIR) + + +# ######################################################################### + +$(SOURCE.AD): $(SOURCES.AD) + $(QUIETLY) $(PROCESS_AD_FILES) $(SOURCES.AD) > $(SOURCE.AD) + +#PROCESS_AD_FILES = cat +# Pass through #line directives, in case user enables -g option above: +PROCESS_AD_FILES = awk '{ \ + if (CUR_FN != FILENAME) { CUR_FN=FILENAME; NR_BASE=NR-1; need_lineno=1 } \ + if (need_lineno && $$0 !~ /\/\//) \ + { print "\n\n\#line " (NR-NR_BASE) " \"" FILENAME "\""; need_lineno=0 }; \ + print }' + +$(OUTDIR)/%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) + +# Some object files are given a prefix, to disambiguate +# them from objects of the same name built for the VM. +$(OUTDIR)/adlc-%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) + +# ######################################################################### + +clean : + rm $(OBJECTS) + +cleanall : + rm $(OBJECTS) $(EXEC) + +# ######################################################################### + +.PHONY: all dump refresh_adfiles clean cleanall --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/amd64.make 2009-08-01 04:16:36.174611883 +0100 @@ -0,0 +1,58 @@ +# +# Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Must also specify if CPU is little endian +CFLAGS += -DVM_LITTLE_ENDIAN + +# Not included in includeDB because it has no dependencies +Obj_Files += solaris_x86_64.o + +# +# Special case flags for compilers and compiler versions on amd64. +# +ifeq ("${Platform_compiler}", "sparcWorks") + +# Temporary until C++ compiler is fixed + +# _lwp_create_interpose must have a frame +OPT_CFLAGS/os_solaris_x86_64.o = -xO1 + +# Temporary until SS10 C++ compiler is fixed +OPT_CFLAGS/generateOptoStub.o = -xO2 +OPT_CFLAGS/thread.o = -xO2 + +else + +ifeq ("${Platform_compiler}", "gcc") +# gcc +# The serviceability agent relies on frame pointer (%rbp) to walk thread stack +CFLAGS += -fno-omit-frame-pointer + +else +# error +_JUNK2_ := $(shell echo >&2 \ + "*** ERROR: this compiler is not yet supported by this code base!") + @exit 1 +endif +endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/buildtree.make 2009-08-01 04:16:36.617559074 +0100 @@ -0,0 +1,369 @@ +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Usage: +# +# $(MAKE) -f buildtree.make ARCH=arch BUILDARCH=buildarch LIBARCH=libarch +# GAMMADIR=dir OS_FAMILY=os VARIANT=variant +# +# The macros ARCH, GAMMADIR, OS_FAMILY and VARIANT must be defined in the +# environment or on the command-line: +# +# ARCH - sparc, i486, ... HotSpot cpu and os_cpu source directory +# BUILDARCH - build directory +# LIBARCH - the corresponding directory in JDK/JRE +# GAMMADIR - top of workspace +# OS_FAMILY - operating system +# VARIANT - core, compiler1, compiler2, or tiered +# HOTSPOT_RELEASE_VERSION - .-b (11.0-b07) +# HOTSPOT_BUILD_VERSION - internal, PRTjob ID, JPRTjob ID +# JRE_RELEASE_VERSION - .. (1.7.0) +# +# Builds the directory trees with makefiles plus some convenience files in +# each directory: +# +# Makefile - for "make foo" +# flags.make - with macro settings +# vm.make - to support making "$(MAKE) -v vm.make" in makefiles +# adlc.make - +# jvmti.make - generate JVMTI bindings from the spec (JSR-163) +# sa.make - generate SA jar file and natives +# env.[ck]sh - environment settings +# test_gamma - script to run the Queens program +# +# The makefiles are split this way so that "make foo" will run faster by not +# having to read the dependency files for the vm. + +include $(GAMMADIR)/make/scm.make + +# 'gmake MAKE_VERBOSE=y' or 'gmake QUIETLY=' gives all the gory details. +QUIETLY$(MAKE_VERBOSE) = @ + +# For now, until the compiler is less wobbly: +TESTFLAGS = -Xbatch -showversion + +### maye ARCH_XXX instead? +ifdef USE_GCC +PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).gcc +GCC_LIB = /usr/local/lib +else +PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH) +GCC_LIB = +endif + +ifdef FORCE_TIERED +ifeq ($(VARIANT),tiered) +PLATFORM_DIR = $(OS_FAMILY)_$(BUILDARCH)_compiler2 +else +PLATFORM_DIR = $(OS_FAMILY)_$(BUILDARCH)_$(VARIANT) +endif +else +PLATFORM_DIR = $(OS_FAMILY)_$(BUILDARCH)_$(VARIANT) +endif + +# +# We do two levels of exclusion in the shared directory. +# TOPLEVEL excludes are pruned, they are not recursively searched, +# but lower level directories can be named without fear of collision. +# ALWAYS excludes are excluded at any level in the directory tree. +# + +ALWAYS_EXCLUDE_DIRS = $(SCM_DIRS) + +ifeq ($(VARIANT),tiered) +TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name agent +else +ifeq ($(VARIANT),compiler2) +TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name c1 -o -name agent +else +# compiler1 and core use the same exclude list +TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name opto -o -name libadt -o -name agent +endif +endif + +# Get things from the platform file. +COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE)) + +SIMPLE_DIRS = \ + $(PLATFORM_DIR)/generated/incls \ + $(PLATFORM_DIR)/generated/adfiles \ + $(PLATFORM_DIR)/generated/jvmtifiles + +TARGETS = debug fastdebug jvmg optimized product profiled +SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) + +# For dependencies and recursive makes. +BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make + +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \ + env.ksh env.csh .dbxrc test_gamma + +BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ + ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) + +# Define variables to be set in flags.make. +# Default values are set in make/defs.make. +ifeq ($(HOTSPOT_BUILD_VERSION),) + HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION) +else + HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION) +endif +# Set BUILD_USER from system-dependent hints: $LOGNAME, $(whoami) +ifndef HOTSPOT_BUILD_USER + HOTSPOT_BUILD_USER := $(shell echo $$LOGNAME) +endif +ifndef HOTSPOT_BUILD_USER + HOTSPOT_BUILD_USER := $(shell whoami) +endif +# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro +# or make/hotspot_distro. +ifndef HOTSPOT_VM_DISTRO + CLOSED_DIR_EXISTS := $(shell \ + if [ -d $(GAMMADIR)/src/closed ] ; then \ + echo true; \ + else \ + echo false; \ + fi) + ifeq ($(CLOSED_DIR_EXISTS), true) + include $(GAMMADIR)/make/hotspot_distro + else + include $(GAMMADIR)/make/openjdk_distro + endif +endif + +BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) + +BUILDTREE = \ + $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS) + +BUILDTREE_COMMENT = echo "\# Generated by $(BUILDTREE_MAKE)" + +all: $(SUBMAKE_DIRS) + +# Run make in each subdirectory recursively. +$(SUBMAKE_DIRS): $(SIMPLE_DIRS) FORCE + $(QUIETLY) [ -d $@ ] || { mkdir -p $@; } + $(QUIETLY) cd $@ && $(BUILDTREE) TARGET=$(@F) + $(QUIETLY) touch $@ + +$(SIMPLE_DIRS): + $(QUIETLY) mkdir -p $@ + +flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo "Platform_file = $(PLATFORM_FILE)" | sed 's|$(GAMMADIR)|$$(GAMMADIR)|'; \ + sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \ + echo; \ + echo "GAMMADIR = $(GAMMADIR)"; \ + echo "SYSDEFS = \$$(Platform_sysdefs)"; \ + echo "SRCARCH = $(ARCH)"; \ + echo "BUILDARCH = $(BUILDARCH)"; \ + echo "LIBARCH = $(LIBARCH)"; \ + echo "TARGET = $(TARGET)"; \ + echo "HS_BUILD_VER = $(HS_BUILD_VER)"; \ + echo "JRE_RELEASE_VER = $(JRE_RELEASE_VERSION)"; \ + echo "SA_BUILD_VERSION = $(HS_BUILD_VER)"; \ + echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \ + echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \ + echo "$(LP64_SETTING/$(DATA_MODE))"; \ + echo; \ + echo "Src_Dirs = \\"; \ + sed 's/$$/ \\/;s|$(GAMMADIR)|$$(GAMMADIR)|' ../shared_dirs.lst; \ + echo "\$$(GAMMADIR)/src/cpu/$(ARCH)/vm \\"; \ + echo "\$$(GAMMADIR)/src/os/$(OS_FAMILY)/vm \\"; \ + echo "\$$(GAMMADIR)/src/os_cpu/$(OS_FAMILY)_$(ARCH)/vm"; \ + [ -n "$(CFLAGS_BROWSE)" ] && \ + echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \ + [ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \ + echo && \ + echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ + echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \ + ) > $@ + +flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + [ "$(TARGET)" = profiled ] && \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ + ) > $@ + +../shared_dirs.lst: $(BUILDTREE_MAKE) $(GAMMADIR)/src/share/vm + @echo Creating directory list $@ + $(QUIETLY) find $(GAMMADIR)/src/share/vm/* -prune \ + -type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \ + \( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; > $@ + +Makefile: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/top.make"; \ + ) > $@ + +vm.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo include flags_vm.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +adlc.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +jvmti.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +sa.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + +env.ksh: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + [ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \ + { \ + echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \ + echo "unset LD_LIBRARY_PATH_32"; \ + echo "unset LD_LIBRARY_PATH_64"; \ + echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \ + } | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \ + echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \ + echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \ + ) > $@ + +env.csh: env.ksh + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + [ -n "$$JAVA_HOME" ] && \ + { echo "if (! \$$?JAVA_HOME) setenv JAVA_HOME \"$$JAVA_HOME\""; }; \ + sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \ + ) > $@ + +.dbxrc: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + echo "echo '# Loading $(PLATFORM_DIR)/$(TARGET)/.dbxrc'"; \ + echo "if [ -f \"\$${HOTSPOT_DBXWARE}\" ]"; \ + echo "then"; \ + echo " source \"\$${HOTSPOT_DBXWARE}\""; \ + echo "elif [ -f \"\$$HOME/.dbxrc\" ]"; \ + echo "then"; \ + echo " source \"\$$HOME/.dbxrc\""; \ + echo "fi"; \ + ) > $@ + +# Skip the test for product builds (which only work when installed in a JDK), to +# avoid exiting with an error and causing make to halt. +NO_TEST_MSG = \ + echo "$@: skipping the test--this build must be tested in a JDK." + +NO_JAVA_HOME_MSG = \ + echo "JAVA_HOME must be set to run this test." + +DATA_MODE = $(DATA_MODE/$(BUILDARCH)) +JAVA_FLAG = $(JAVA_FLAG/$(DATA_MODE)) + +DATA_MODE/i486 = 32 +DATA_MODE/sparc = 32 +DATA_MODE/sparcv9 = 64 +DATA_MODE/amd64 = 64 +DATA_MODE/ia64 = 64 + +# This bit is needed to enable local rebuilds. +# Unless the makefile itself sets LP64, any environmental +# setting of LP64 will interfere with the build. +LP64_SETTING/32 = LP64 = \#empty +LP64_SETTING/64 = LP64 = 1 + +JAVA_FLAG/32 = -d32 +JAVA_FLAG/64 = -d64 + +WRONG_DATA_MODE_MSG = \ + echo "JAVA_HOME must point to $(DATA_MODE)bit JDK." + +test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java + @echo Creating $@ ... + $(QUIETLY) ( \ + echo '#!/bin/ksh'; \ + $(BUILDTREE_COMMENT); \ + echo '. ./env.ksh'; \ + echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \ + echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \ + echo "then"; \ + echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \ + echo "fi"; \ + echo "rm -f Queens.class"; \ + echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \ + echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \ + echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \ + ) > $@ + $(QUIETLY) chmod +x $@ + +include $(GAMMADIR)/build/solaris/makefiles/rules.make + +Queens.class: $(GAMMADIR)/build/test/Queens.java + $(RM) -f $@ + $(REMOTE) $(COMPILE.JAVAC) -d . $< + +FORCE: + +.PHONY: all FORCE --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/compiler1.make 2009-08-01 04:16:37.059357260 +0100 @@ -0,0 +1,31 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making client version of VM + +TYPE=COMPILER1 + +VM_SUBDIR = client + +CFLAGS += -DCOMPILER1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/compiler2.make 2009-08-01 04:16:37.466153795 +0100 @@ -0,0 +1,31 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making server version of VM + +TYPE=COMPILER2 + +VM_SUBDIR = server + +CFLAGS += -DCOMPILER2 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/core.make 2009-08-01 04:16:37.868490106 +0100 @@ -0,0 +1,34 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making core version of VM + +# Note the effect on includeDB lists in top.make: +# includeDB_compiler* and ad_.*pp are excluded from the build, +TYPE=CORE + +# There is no "core" directory in JDK. Install core build in server directory. +VM_SUBDIR = server + +# Note: macros.hpp defines CORE --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/cscope.make 2009-08-01 04:16:38.269403751 +0100 @@ -0,0 +1,164 @@ +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# +# @(#)cscope.make 1.16 07/05/05 17:03:56 +# +# The cscope.out file is made in the current directory and spans the entire +# source tree. +# +# Things to note: +# 1. We use relative names for cscope. +# 2. We *don't* remove the old cscope.out file, because cscope is smart +# enough to only build what has changed. It can be confused, however, +# if files are renamed or removed, so it may be necessary to manually +# remove cscope.out if a lot of reorganization has occurred. +# + +include $(GAMMADIR)/make/scm.make + +NAWK = /usr/xpg4/bin/awk +RM = rm -f +HG = hg +CS_TOP = ../.. + +CSDIRS = $(CS_TOP)/src $(CS_TOP)/make +CSINCS = $(CSDIRS:%=-I%) + +CSCOPE = cscope +CSCOPE_FLAGS = -b + +# Allow .java files to be added from the environment (CSCLASSES=yes). +ifdef CSCLASSES +ADDCLASSES= -o -name '*.java' +endif + +# Adding CClassHeaders also pushes the file count of a full workspace up about +# 200 files (these files also don't exist in a new workspace, and thus will +# cause the recreation of the database as they get created, which might seem +# a little confusing). Thus allow these files to be added from the environment +# (CSHEADERS=yes). +ifndef CSHEADERS +RMCCHEADERS= -o -name CClassHeaders +endif + +# Use CS_GENERATED=x to include auto-generated files in the make directories. +ifdef CS_GENERATED +CS_ADD_GENERATED = -o -name '*.incl' +else +CS_PRUNE_GENERATED = -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?' +endif + +# OS-specific files for other systems are excluded by default. Use CS_OS=yes +# to include platform-specific files for other platforms. +ifndef CS_OS +CS_OS = linux macos solaris win32 +CS_PRUNE_OS = $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS})) +endif + +# Processor-specific files for other processors are excluded by default. Use +# CS_CPU=x to include platform-specific files for other platforms. +ifndef CS_CPU +CS_CPU = i486 sparc amd64 ia64 +CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU})) +endif + +# What files should we include? A simple rule might be just those files under +# SCCS control, however this would miss files we create like the opcodes and +# CClassHeaders. The following attempts to find everything that is *useful*. +# (.del files are created by sccsrm, demo directories contain many .java files +# that probably aren't useful for development, and the pkgarchive may contain +# duplicates of files within the source hierarchy). + +# Directories to exclude. +CS_PRUNE_STD = $(SCM_DIRS) \ + -o -name '.del-*' \ + -o -name '*demo' \ + -o -name pkgarchive + +CS_PRUNE = $(CS_PRUNE_STD) \ + $(CS_PRUNE_OS) \ + $(CS_PRUNE_CPU) \ + $(CS_PRUNE_GENERATED) \ + $(RMCCHEADERS) + +# File names to include. +CSFILENAMES = -name '*.[ch]pp' \ + -o -name '*.[Ccshlxy]' \ + $(CS_ADD_GENERATED) \ + -o -name '*.d' \ + -o -name '*.il' \ + -o -name '*.cc' \ + -o -name '*[Mm]akefile*' \ + -o -name '*.gmk' \ + -o -name '*.make' \ + -o -name '*.ad' \ + $(ADDCLASSES) + +.PRECIOUS: cscope.out + +cscope cscope.out: cscope.files FORCE + $(CSCOPE) $(CSCOPE_FLAGS) + +# The .raw file is reordered here in an attempt to make cscope display the most +# relevant files first. +cscope.files: .cscope.files.raw + echo "$(CSINCS)" > $@ + -egrep -v "\.java|\/make\/" $< >> $@ + -fgrep ".java" $< >> $@ + -fgrep "/make/" $< >> $@ + +.cscope.files.raw: .nametable.files + -find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \ + -type f \( $(CSFILENAMES) \) -print > $@ + +cscope.clean: nametable.clean + -$(RM) cscope.out cscope.files .cscope.files.raw + +TAGS: cscope.files FORCE + egrep -v '^-|^$$' $< | etags --members - + +TAGS.clean: nametable.clean + -$(RM) TAGS + +# .nametable.files and .nametable.files.tmp are used to determine if any files +# were added to/deleted from/renamed in the workspace. If not, then there's +# normally no need to rebuild the cscope database. To force a rebuild of +# the cscope database: gmake nametable.clean. +.nametable.files: .nametable.files.tmp + ( cmp -s $@ $< ) || ( cp $< $@ ) + -$(RM) $< + +# `hg status' is slightly faster than `hg fstatus'. Both are +# quite a bit slower on an NFS mounted file system, so this is +# really geared towards repos on local file systems. +.nametable.files.tmp: + -$(HG) fstatus -acmn > $@ + +nametable.clean: + -$(RM) .nametable.files .nametable.files.tmp + +FORCE: + +.PHONY: cscope cscope.clean TAGS.clean nametable.clean FORCE --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/debug.make 2009-08-01 04:16:38.711681897 +0100 @@ -0,0 +1,60 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making debug version of VM + +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) +DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) + +ifeq ("${Platform_compiler}", "sparcWorks") + +ifeq ($(COMPILER_REV_NUMERIC),508) + # SS11 SEGV when compiling with -g and -xarch=v8, using different backend + DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0 + DEBUG_CFLAGS/jvmtiTagMap.o = $(DEBUG_CFLAGS) -xO0 +endif +endif + +CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# Linker mapfiles +MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct + +# This mapfile is only needed when compiling with dtrace support, +# and mustn't be otherwise. +MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) + +_JUNK_ := $(shell echo >&2 ""\ + "-------------------------------------------------------------------------\n" \ + "WARNING: 'gnumake debug' is deprecated. It will be removed in the future.\n" \ + "Please use 'gnumake jvmg' to build debug JVM. \n" \ + "-------------------------------------------------------------------------\n") + +G_SUFFIX = +VERSION = debug +SYSDEFS += -DASSERT -DDEBUG +PICFLAGS = DEFAULT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/defs.make 2009-08-01 04:16:39.154048123 +0100 @@ -0,0 +1,90 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# The common definitions for hotspot solaris builds. +# Include the top level defs.make under make directory instead of this one. +# This file is included into make/defs.make. + +# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name +SLASH_JAVA ?= /java +ARCH:=$(shell uname -p) +PATH_SEP = : +ifeq ($(LP64), 1) + ARCH_DATA_MODEL=64 +else + ARCH_DATA_MODEL=32 +endif + +ifeq ($(ARCH),sparc) + ifeq ($(ARCH_DATA_MODEL), 64) + MAKE_ARGS += LP64=1 + PLATFORM=solaris-sparcv9 + VM_PLATFORM=solaris_sparcv9 + else + PLATFORM=solaris-sparc + VM_PLATFORM=solaris_sparc + endif + HS_ARCH=sparc +else + ifeq ($(ARCH_DATA_MODEL), 64) + MAKE_ARGS += LP64=1 + PLATFORM=solaris-amd64 + VM_PLATFORM=solaris_amd64 + HS_ARCH=x86 + else + PLATFORM=solaris-i586 + VM_PLATFORM=solaris_i486 + HS_ARCH=x86 + endif +endif + +JDK_INCLUDE_SUBDIR=solaris + +# FIXUP: The subdirectory for a debug build is NOT the same on all platforms +VM_DEBUG=jvmg + +EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html +EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server +EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt +EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjsig.so +EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so +EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.so +EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.so +ifeq ($(ARCH_DATA_MODEL), 32) + EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjsig.so + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so + ifeq ($(ARCH),sparc) + EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so + EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so + endif +endif + +EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so +EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/dtrace.make 2009-08-01 04:16:39.571830451 +0100 @@ -0,0 +1,252 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build jvm_db/dtrace, used by vm.make + +# we build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2 +# but not for CORE configuration + +ifneq ("${TYPE}", "CORE") +ifneq ("${TYPE}", "KERNEL") + +ifdef USE_GCC + +dtraceCheck: + $(QUIETLY) echo "**NOTICE** Dtrace support disabled for gcc builds" + +else + + +JVM_DB = libjvm_db +LIBJVM_DB = libjvm$(G_SUFFIX)_db.so + +JVM_DTRACE = jvm_dtrace +LIBJVM_DTRACE = libjvm$(G_SUFFIX)_dtrace.so + +JVMOFFS = JvmOffsets +JVMOFFS.o = $(JVMOFFS).o +GENOFFS = generate$(JVMOFFS) + +DTRACE_SRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/dtrace +DTRACE = dtrace +DTRACE.o = $(DTRACE).o + +# to remove '-g' option which causes link problems +# also '-z nodefs' is used as workaround +GENOFFS_CFLAGS = $(shell echo $(CFLAGS) | sed -e 's/ -g / /g' -e 's/ -g0 / /g';) + +ifdef LP64 +DTRACE_OPTS = -64 -D_LP64 +endif + +# making libjvm_db + +INCLS = $(GENERATED)/incls + +# Use mapfile with libjvm_db.so +LIBJVM_DB_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jvm_db +LFLAGS_JVM_DB += $(MAPFLAG:FILENAME=$(LIBJVM_DB_MAPFILE)) + +# Use mapfile with libjvm_dtrace.so +LIBJVM_DTRACE_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jvm_dtrace +LFLAGS_JVM_DTRACE += $(MAPFLAG:FILENAME=$(LIBJVM_DTRACE_MAPFILE)) + +ifdef USE_GCC +LFLAGS_JVM_DB += -D_REENTRANT $(PICFLAG) +LFLAGS_JVM_DTRACE += -D_REENTRANT $(PICFLAG) +else +LFLAGS_JVM_DB += -mt $(PICFLAG) -xnolib +LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib +endif + +ISA = $(subst i386,i486,$(shell isainfo -n)) + +# Making 64/libjvm_db.so: 64-bit version of libjvm_db.so which handles 32-bit libjvm.so +ifneq ("${ISA}","${BUILDARCH}") + +XLIBJVM_DB = 64/$(LIBJVM_DB) +XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE) + +$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE) + @echo Making $@ + $(QUIETLY) mkdir -p 64/ ; \ + $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \ + $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc +$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) + @echo Making $@ + $(QUIETLY) mkdir -p 64/ ; \ + $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \ + $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor +endif # ifneq ("${ISA}","${BUILDARCH}") + +ifdef USE_GCC +LFLAGS_GENOFFS += -D_REENTRANT +else +LFLAGS_GENOFFS += -mt -xnolib -norunpath +endif + +lib$(GENOFFS).so: $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \ + $(INCLS)/_vmStructs.cpp.incl $(LIBJVM.o) + $(QUIETLY) $(CCC) $(CPPFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \ + $(LFLAGS_GENOFFS) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS).cpp -lc + +$(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).so + $(QUIETLY) $(LINK.CC) -z nodefs -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \ + ./lib$(GENOFFS).so + +CONDITIONALLY_UPDATE_JVMOFFS_TARGET = \ + cmp -s $@ $@.tmp; \ + case $$? in \ + 0) rm -f $@.tmp;; \ + *) rm -f $@ && mv $@.tmp $@ && echo Updated $@;; \ + esac + +# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs. +$(JVMOFFS).h: $(GENOFFS) + $(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -header > $@.tmp + $(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET) + +$(JVMOFFS)Index.h: $(GENOFFS) + $(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -index > $@.tmp + $(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET) + +$(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h + $(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -table > $@.tmp + $(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET) + +$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp + $(QUIETLY) $(CCC) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp + +$(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE) + @echo Making $@ + $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \ + $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc + +$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) + @echo Making $@ + $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \ + $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor + +$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \ + $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d + $(QUIETLY) cat $^ > $@ + +# Dtrace is available, so we build $(DTRACE.o) +$(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files) + @echo Compiling $(DTRACE).d + + $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -o $@ -s $(DTRACE).d \ + $(DTraced_Files) ||\ + STATUS=$$?;\ + if [ x"$$STATUS" = x"1" -a \ + x`uname -r` = x"5.10" -a \ + x`uname -p` = x"sparc" ]; then\ + echo "*****************************************************************";\ + echo "* If you are building server compiler, and the error message is ";\ + echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\ + echo "* 6213962, \"dtrace -G doesn't work on sparcv8+ object files\".";\ + echo "* Either patch/upgrade your system (>= S10u1_15), or set the ";\ + echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\ + echo "* dtrace probes for this build.";\ + echo "*****************************************************************";\ + fi;\ + exit $$STATUS + # Since some DTraced_Files are in LIBJVM.o and they are touched by this + # command, and libgenerateJvmOffsets.so depends on LIBJVM.o, 'make' will + # think it needs to rebuild libgenerateJvmOffsets.so and thus JvmOffsets* + # files, but it doesn't, so we touch the necessary files to prevent later + # recompilation. Note: we only touch the necessary files if they already + # exist in order to close a race where an empty file can be created + # before the real build rule is executed. + # But, we can't touch the *.h files: This rule depends + # on them, and that would cause an infinite cycle of rebuilding. + # Neither the *.h or *.ccp files need to be touched, since they have + # rules which do not update them when the generator file has not + # changed their contents. + $(QUIETLY) if [ -f lib$(GENOFFS).so ]; then touch lib$(GENOFFS).so; fi + $(QUIETLY) if [ -f $(GENOFFS) ]; then touch $(GENOFFS); fi + $(QUIETLY) if [ -f $(JVMOFFS.o) ]; then touch $(JVMOFFS.o); fi + +.PHONY: dtraceCheck + +SYSTEM_DTRACE_H = /usr/include/dtrace.h +SYSTEM_DTRACE_PROG = /usr/sbin/dtrace +PATCH_DTRACE_PROG = /opt/SUNWdtrd/sbin/dtrace +systemDtraceFound := $(wildcard ${SYSTEM_DTRACE_PROG}) +patchDtraceFound := $(wildcard ${PATCH_DTRACE_PROG}) +systemDtraceHdrFound := $(wildcard $(SYSTEM_DTRACE_H)) + +ifneq ("$(systemDtraceHdrFound)", "") +CFLAGS += -DHAVE_DTRACE_H +endif + +ifneq ("$(patchDtraceFound)", "") +DTRACE_PROG=$(PATCH_DTRACE_PROG) +DTRACE_INCL=-I/opt/SUNWdtrd/include +else +ifneq ("$(systemDtraceFound)", "") +DTRACE_PROG=$(SYSTEM_DTRACE_PROG) +else + +endif # ifneq ("$(systemDtraceFound)", "") +endif # ifneq ("$(patchDtraceFound)", "") + +ifneq ("${DTRACE_PROG}", "") +ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "") + +DTRACE_OBJS = $(DTRACE.o) $(JVMOFFS.o) +CFLAGS += $(DTRACE_INCL) -DDTRACE_ENABLED +MAPFILE_DTRACE_OPT = $(MAPFILE_DTRACE) + +dtraceCheck: + +else # manually disabled + +dtraceCheck: + $(QUIETLY) echo "**NOTICE** Dtrace support disabled via environment variable" + +endif # ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "") + +else # No dtrace program found + +dtraceCheck: + $(QUIETLY) echo "**NOTICE** Dtrace support disabled: not supported by system" + +endif # ifneq ("${dtraceFound}", "") + +endif # ifdef USE_GCC + +else # KERNEL build + +dtraceCheck: + $(QUIETLY) echo "**NOTICE** Dtrace support disabled for KERNEL builds" + +endif # ifneq ("${TYPE}", "KERNEL") + +else # CORE build + +dtraceCheck: + $(QUIETLY) echo "**NOTICE** Dtrace support disabled for CORE builds" + +endif # ifneq ("${TYPE}", "CORE") --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/fastdebug.make 2009-08-01 04:16:40.014311465 +0100 @@ -0,0 +1,122 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making debug version of VM + +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +# They may also specify FASTDEBUG_CFLAGS, but it defaults to DEBUG_CFLAGS. + +FASTDEBUG_CFLAGS$(FASTDEBUG_CFLAGS) = $(DEBUG_CFLAGS) + +# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make +OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) +OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) + +ifeq ("${Platform_compiler}", "sparcWorks") +OPT_CFLAGS/SLOWER = -xO2 + +# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876) +ifeq ($(COMPILER_REV_NUMERIC), 509) + # To avoid jvm98 crash + OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER) + # Not clear this workaround could be skipped in some cases. + OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) + OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) + OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) +endif + +ifeq ($(COMPILER_REV_NUMERIC), 505) +# CC 5.5 has bug 4908364 with -xO4 (Fixed in 5.6) +OPT_CFLAGS/library_call.o = $(OPT_CFLAGS/SLOWER) +endif # COMPILER_REV_NUMERIC == 505 + +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \<= 504), 1) +# Compilation of *_.cpp can take an hour or more at O3. Use O2 +# See comments at top of sparc.make. +OPT_CFLAGS/ad_$(Platform_arch_model).o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/dfa_$(Platform_arch_model).o = $(OPT_CFLAGS/SLOWER) +endif # COMPILER_REV_NUMERIC <= 504 + +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 505), 1) +# Same problem with Solaris/x86 compiler (both 5.0 and 5.2) on ad_x86_{32,64}.cpp. +# CC build time is also too long for ad_$(Platform_arch_model)_{gen,misc}.o +OPT_CFLAGS/ad_$(Platform_arch_model).o = -c +OPT_CFLAGS/ad_$(Platform_arch_model)_gen.o = -c +OPT_CFLAGS/ad_$(Platform_arch_model)_misc.o = -c +ifeq ($(Platform_arch), x86) +# Same problem for the wrapper roosts: jni.o jvm.o +OPT_CFLAGS/jni.o = -c +OPT_CFLAGS/jvm.o = -c +# Same problem in parse2.o (probably the Big Switch over bytecodes) +OPT_CFLAGS/parse2.o = -c +endif # Platform_arch == x86 +endif + +# Frame size > 100k if we allow inlining via -g0! +DEBUG_CFLAGS/bytecodeInterpreter.o = -g +DEBUG_CFLAGS/bytecodeInterpreterWithChecks.o = -g +ifeq ($(Platform_arch), x86) +# ube explodes on x86 +OPT_CFLAGS/bytecodeInterpreter.o = -xO1 +OPT_CFLAGS/bytecodeInterpreterWithChecks.o = -xO1 +endif # Platform_arch == x86 + +endif # Platform_compiler == sparcWorks + +# Workaround for a bug in dtrace. If ciEnv::post_compiled_method_load_event() +# is inlined, the resulting dtrace object file needs a reference to this +# function, whose symbol name is too long for dtrace. So disable inlining +# for this method for now. (fix this when dtrace bug 6258412 is fixed) +OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_ + + +# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) + +# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings +CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +# The following lines are copied from debug.make, except that we +# consult FASTDEBUG_CFLAGS instead of DEBUG_CFLAGS. +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +DEBUG_CFLAGS/DEFAULT= $(FASTDEBUG_CFLAGS) +DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) +CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# Linker mapfiles +MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct + +# This mapfile is only needed when compiling with dtrace support, +# and mustn't be otherwise. +MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) + + +G_SUFFIX = +VERSION = optimized +SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS +PICFLAGS = DEFAULT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/gcc.make 2009-08-01 04:16:40.456394312 +0100 @@ -0,0 +1,183 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +#------------------------------------------------------------------------ +# CC, CPP & AS + +CPP = g++ +CC = gcc +AS = $(CC) -c + +Compiler = gcc + +# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only +# prints the numbers (e.g. "2.95", "3.2.1") +CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) +CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) + +# Check for the versions of C++ and C compilers ($CPP and $CC) used. + +# Get the last thing on the line that looks like x.x+ (x is a digit). +COMPILER_REV := \ +$(shell $(CPP) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) +C_COMPILER_REV := \ +$(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) + + +# check for precompiled headers support +ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0" +USE_PRECOMPILED_HEADER=1 +PRECOMPILED_HEADER_DIR=. +PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/incls/_precompiled.incl.gch +endif + + +#------------------------------------------------------------------------ +# Compiler flags + +# position-independent code +PICFLAG = -fPIC + +VM_PICFLAG/LIBJVM = $(PICFLAG) +VM_PICFLAG/AOUT = +VM_PICFLAG = $(VM_PICFLAG/$(LINK_INTO)) + +CFLAGS += $(VM_PICFLAG) +CFLAGS += -fno-rtti +CFLAGS += -fno-exceptions +CFLAGS += -D_REENTRANT +CFLAGS += -fcheck-new + +ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) + +ARCHFLAG/sparc = -m32 -mcpu=v9 +ARCHFLAG/sparcv9 = -m64 -mcpu=v9 +ARCHFLAG/i486 = -m32 -march=i586 +ARCHFLAG/amd64 = -m64 -march=k8 + + +# Optional sub-directory in /usr/lib where BUILDARCH libraries are kept. +ISA_DIR=$(ISA_DIR/$(BUILDARCH)) +ISA_DIR/amd64=/amd64 +ISA_DIR/i486= +ISA_DIR/sparcv9=/64 + + +CFLAGS += $(ARCHFLAG) +AOUT_FLAGS += $(ARCHFLAG) +LFLAGS += $(ARCHFLAG) +ASFLAGS += $(ARCHFLAG) + +ifeq ($(BUILDARCH), amd64) +ASFLAGS += -march=k8 -march=amd64 +LFLAGS += -march=k8 +endif + + +# Use C++ Interpreter +ifdef CC_INTERP + CFLAGS += -DCC_INTERP +endif + +# Keep temporary files (.ii, .s) +ifdef NEED_ASM + CFLAGS += -save-temps +else + CFLAGS += -pipe +endif + + +# Compiler warnings are treated as errors +WARNINGS_ARE_ERRORS = -Werror +# Enable these warnings. See 'info gcc' about details on these options +ADDITIONAL_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare +CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ADDITIONAL_WARNINGS) +# Special cases +CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) + +# The flags to use for an Optimized g++ build +OPT_CFLAGS += -O3 + +# Hotspot uses very unstrict aliasing turn this optimization off +OPT_CFLAGS += -fno-strict-aliasing + +# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp +# if we use expensive-optimizations +# Note: all ia64 setting reflect the ones for linux +# No actial testing was performed: there is no Solaris on ia64 presently +ifeq ($(BUILDARCH), ia64) +OPT_CFLAGS/bytecodeInterpreter.o += -fno-expensive-optimizations +endif + +OPT_CFLAGS/NOOPT=-O0 +#------------------------------------------------------------------------ +# Linker flags + +# statically link libstdc++.so, work with gcc but ignored by g++ +STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic + +# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. +ifneq ("${CC_VER_MAJOR}", "2") +STATIC_LIBGCC += -static-libgcc +endif + +ifeq ($(BUILDARCH), ia64) +# Note: all ia64 setting reflect the ones for linux +# No actial testing was performed: there is no Solaris on ia64 presently +LFLAGS += -Wl,-relax +endif + +ifdef USE_GNULD +# Enable linker optimization +LFLAGS += -Xlinker -O1 + +# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. +MAPFLAG = -Xlinker --version-script=FILENAME +else +MAPFLAG = -Xlinker -M -Xlinker FILENAME +endif + +# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj +SONAMEFLAG = -Xlinker -soname=SONAME + +# Build shared library +SHARED_FLAG = -shared + +#------------------------------------------------------------------------ +# Debug flags + +# Use the stabs format for debugging information (this is the default +# on gcc-2.91). It's good enough, has all the information about line +# numbers and local variables, and libjvm_g.so is only about 16M. +# Change this back to "-g" if you want the most expressive format. +# (warning: that could easily inflate libjvm_g.so to 150M!) +# Note: The Itanium gcc compiler crashes when using -gstabs. +DEBUG_CFLAGS/ia64 = -g +DEBUG_CFLAGS/amd64 = -g +DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) +ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) +DEBUG_CFLAGS += -gstabs +endif + +MCS = /usr/ccs/bin/mcs --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/hp.make 2009-08-01 04:16:40.905252826 +0100 @@ -0,0 +1,29 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making premium version of VM + +TYPE=HP + +CFLAGS += -DCOMPILER2 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/hp1.make 2009-08-01 04:16:41.316293548 +0100 @@ -0,0 +1,29 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making premium version of VM + +TYPE=HP1 + +CFLAGS += -DCOMPILER1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/i486.make 2009-08-01 04:16:41.708690886 +0100 @@ -0,0 +1,63 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Must also specify if CPU is little endian +CFLAGS += -DVM_LITTLE_ENDIAN + +# TLS helper, assembled from .s file +# Not included in includeDB because it has no dependencies +Obj_Files += solaris_x86_32.o + +# +# Special case flags for compilers and compiler versions on i486. +# +ifeq ("${Platform_compiler}", "sparcWorks") + +# _lwp_create_interpose must have a frame +OPT_CFLAGS/os_solaris_x86.o = -xO1 +else + +ifeq ("${Platform_compiler}", "gcc") +# gcc +# _lwp_create_interpose must have a frame +OPT_CFLAGS/os_solaris_x86.o = -fno-omit-frame-pointer +# +else +# error +_JUNK2_ := $(shell echo >&2 \ + "*** ERROR: this compiler is not yet supported by this code base!") + @exit 1 +endif +endif + +ifeq ("${Platform_compiler}", "sparcWorks") +# ILD is gone as of SS11 (5.8), not supported in SS10 (5.7) +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 507), 1) + # + # Bug in ild causes it to fail randomly. Until we get a fix we can't + # use ild. + # + ILDFLAG/debug = -xildoff +endif +endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/jsig.make 2009-08-01 04:16:42.132742971 +0100 @@ -0,0 +1,54 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build signal interposition library, used by vm.make + +# libjsig[_g].so: signal interposition library +JSIG = jsig$(G_SUFFIX) +LIBJSIG = lib$(JSIG).so + +JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm + +DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG) + +LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig + +LFLAGS_JSIG += $(MAPFLAG:FILENAME=$(LIBJSIG_MAPFILE)) + +ifdef USE_GCC +LFLAGS_JSIG += -D_REENTRANT +else +LFLAGS_JSIG += -mt -xnolib +endif + +$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE) + @echo Making signal interposition lib... + $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ + $(LFLAGS_JSIG) -o $@ $< -ldl + +install_jsig: $(LIBJSIG) + @echo "Copying $(LIBJSIG) to $(DEST_JSIG)" + $(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done" + +.PHONY: install_jsig --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/jvmg.make 2009-08-01 04:16:42.559894035 +0100 @@ -0,0 +1,57 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making debug version of VM + +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) +DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) + +ifeq ("${Platform_compiler}", "sparcWorks") + +ifeq ($(COMPILER_REV_NUMERIC),508) + # SS11 SEGV when compiling with -g and -xarch=v8, using different backend + DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0 + DEBUG_CFLAGS/jvmtiTagMap.o = $(DEBUG_CFLAGS) -xO0 +endif +endif + +CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +# Linker mapfiles +MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct + +# This mapfile is only needed when compiling with dtrace support, +# and mustn't be otherwise. +MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) + +G_SUFFIX = +VERSION = debug +SYSDEFS += -DASSERT -DDEBUG +PICFLAGS = DEFAULT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/jvmti.make 2009-08-01 04:16:43.012353045 +0100 @@ -0,0 +1,117 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile (jvmti.make) is included from the jvmti.make in the +# build directories. +# +# It knows how to build and run the tools to generate jvmti. + +include $(GAMMADIR)/make/solaris/makefiles/rules.make + +# ######################################################################### + +GENERATED = ../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles + +JvmtiSrcDir = $(GAMMADIR)/src/share/vm/prims +InterpreterSrcDir = $(GAMMADIR)/src/share/vm/interpreter +Src_Dirs += $(JvmtiSrcDir) + +# set VPATH so make knows where to look for source files +Src_Dirs_V = ${Src_Dirs} +VPATH += $(Src_Dirs_V:%=%:) + +JvmtiGeneratedNames = \ + jvmtiEnv.hpp \ + jvmtiEnter.cpp \ + jvmtiEnterTrace.cpp \ + jvmtiEnvRecommended.cpp\ + bytecodeInterpreterWithChecks.cpp\ + jvmti.h \ + +JvmtiEnvFillSource = $(JvmtiSrcDir)/jvmtiEnvFill.java +JvmtiEnvFillClass = $(JvmtiOutDir)/jvmtiEnvFill.class + +JvmtiGenSource = $(JvmtiSrcDir)/jvmtiGen.java +JvmtiGenClass = $(JvmtiOutDir)/jvmtiGen.class + +JvmtiGeneratedFiles = $(JvmtiGeneratedNames:%=$(JvmtiOutDir)/%) + +XSLT = $(QUIETLY) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +.PHONY: all jvmtidocs clean cleanall + +# ######################################################################### + +all: $(JvmtiGeneratedFiles) + +both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl + +$(JvmtiGenClass): $(JvmtiGenSource) + $(QUIETLY) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiGenSource) + +$(JvmtiEnvFillClass): $(JvmtiEnvFillSource) + $(QUIETLY) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiEnvFillSource) + +$(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnter.cpp -PARAM interface jvmti + +$(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp: $(JvmtiGenClass) $(InterpreterSrcDir)/bytecodeInterpreter.cpp $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl + @echo Generating $@ + $(XSLT) -IN $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml -XSL $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl -OUT $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp + +$(JvmtiOutDir)/jvmtiEnterTrace.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnterTrace.cpp -PARAM interface jvmti -PARAM trace Trace + +$(JvmtiOutDir)/jvmtiEnvRecommended.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnv.xsl $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiEnvFillClass) + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnv.xsl -OUT $(JvmtiOutDir)/jvmtiEnvStub.cpp + $(QUIETLY) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiEnvFill $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiOutDir)/jvmtiEnvStub.cpp $(JvmtiOutDir)/jvmtiEnvRecommended.cpp + +$(JvmtiOutDir)/jvmtiEnv.hpp: $(both) $(JvmtiSrcDir)/jvmtiHpp.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiHpp.xsl -OUT $(JvmtiOutDir)/jvmtiEnv.hpp + +$(JvmtiOutDir)/jvmti.h: $(both) $(JvmtiSrcDir)/jvmtiH.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiH.xsl -OUT $(JvmtiOutDir)/jvmti.h + +jvmtidocs: $(JvmtiOutDir)/jvmti.html + +$(JvmtiOutDir)/jvmti.html: $(both) $(JvmtiSrcDir)/jvmti.xsl + @echo Generating $@ + $(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmti.xsl -OUT $(JvmtiOutDir)/jvmti.html + +# ######################################################################### + +clean : + rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles) + +cleanall : + rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles) + +# ######################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/kernel.make 2009-08-01 04:16:43.466771009 +0100 @@ -0,0 +1,32 @@ +# +# Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# +# +# Sets make macros for making kernel version of VM. +# This target on solaris is just tempoarily for debugging the kernel build. + +TYPE=KERNEL + +VM_SUBDIR = client + +CFLAGS += -DKERNEL --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/launcher.make 2009-08-01 04:16:43.875784377 +0100 @@ -0,0 +1,92 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build gamma launcher, used by vm.make + +# gamma[_g]: launcher +LAUNCHER = gamma$(G_SUFFIX) + +LAUNCHERDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher +LAUNCHERFLAGS = $(ARCHFLAG) \ + -I$(LAUNCHERDIR) -I$(GAMMADIR)/src/share/vm/prims \ + -DFULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ + -DARCH=\"$(LIBARCH)\" \ + -DGAMMA \ + -DLAUNCHER_TYPE=\"gamma\" \ + -DLINK_INTO_$(LINK_INTO) + +ifeq ($(LINK_INTO),AOUT) + LAUNCHER.o = launcher.o $(JVM_OBJ_FILES) + LAUNCHER_MAPFILE = mapfile_reorder + LFLAGS_LAUNCHER$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LAUNCHER_MAPFILE)) + LIBS_LAUNCHER += $(LIBS) +else + LAUNCHER.o = launcher.o + LFLAGS_LAUNCHER += -L `pwd` + LIBS_LAUNCHER += -l$(JVM) $(LIBS) +endif + +LINK_LAUNCHER = $(LINK.CC) + +LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CC/PRE_HOOK) +LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK) + +ifeq ("${Platform_compiler}", "sparcWorks") +# Enable the following LAUNCHERFLAGS addition if you need to compare the +# built ELF objects. +# +# The -g option makes static data global and the "-W0,-noglobal" +# option tells the compiler to not globalize static data using a unique +# globalization prefix. Instead force the use of a static globalization +# prefix based on the source filepath so the objects from two identical +# compilations are the same. +# +# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't +# seem to work. I got "-W0,-noglobal" from Kelly and that works. +#LAUNCHERFLAGS += -W0,-noglobal +endif # Platform_compiler == sparcWorks + +launcher.o: launcher.c $(LAUNCHERDIR)/java.c $(LAUNCHERDIR)/java_md.c + $(CC) -g -c -o $@ launcher.c $(LAUNCHERFLAGS) $(CPPFLAGS) + +launcher.c: + @echo Generating $@ + $(QUIETLY) { \ + echo '#define debug launcher_debug'; \ + echo '#include "java.c"'; \ + echo '#include "java_md.c"'; \ + } > $@ + +$(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE) + $(QUIETLY) \ + case "$(CFLAGS_BROWSE)" in \ + -sbfast|-xsbfast) \ + ;; \ + *) \ + echo Linking launcher...; \ + $(LINK_LAUNCHER/PRE_HOOK) \ + $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \ + $(LINK_LAUNCHER/POST_HOOK) \ + ;; \ + esac --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/makedeps.make 2009-08-01 04:16:44.305687882 +0100 @@ -0,0 +1,43 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +include $(GAMMADIR)/make/solaris/makefiles/rules.make + +COMPILE.JAVAC.FLAGS += -d $(OUTDIR) + +MakeDepsSources=\ + $(GAMMADIR)/src/share/tools/MakeDeps/Database.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/DirectoryTree.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/DirectoryTreeNode.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/FileFormatException.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/FileList.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/FileName.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/Macro.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/MacroDefinitions.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/MakeDeps.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/MetroWerksMacPlatform.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/Platform.java \ + $(GAMMADIR)/src/share/tools/MakeDeps/UnixPlatform.java + +MakeDepsOptions= --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers 2009-08-01 04:16:44.735404214 +0100 @@ -0,0 +1,260 @@ +# +# @(#)mapfile-vers 1.32 07/10/25 16:47:36 +# + +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # JNI + JNI_CreateJavaVM; + JNI_GetCreatedJavaVMs; + JNI_GetDefaultJavaVMInitArgs; + + # JVM + JVM_Accept; + JVM_ActiveProcessorCount; + JVM_AllocateNewArray; + JVM_AllocateNewObject; + JVM_ArrayCopy; + JVM_AssertionStatusDirectives; + JVM_Available; + JVM_Bind; + JVM_ClassDepth; + JVM_ClassLoaderDepth; + JVM_Clone; + JVM_Close; + JVM_CX8Field; + JVM_CompileClass; + JVM_CompileClasses; + JVM_CompilerCommand; + JVM_Connect; + JVM_ConstantPoolGetClassAt; + JVM_ConstantPoolGetClassAtIfLoaded; + JVM_ConstantPoolGetDoubleAt; + JVM_ConstantPoolGetFieldAt; + JVM_ConstantPoolGetFieldAtIfLoaded; + JVM_ConstantPoolGetFloatAt; + JVM_ConstantPoolGetIntAt; + JVM_ConstantPoolGetLongAt; + JVM_ConstantPoolGetMethodAt; + JVM_ConstantPoolGetMethodAtIfLoaded; + JVM_ConstantPoolGetMemberRefInfoAt; + JVM_ConstantPoolGetSize; + JVM_ConstantPoolGetStringAt; + JVM_ConstantPoolGetUTF8At; + JVM_CountStackFrames; + JVM_CurrentClassLoader; + JVM_CurrentLoadedClass; + JVM_CurrentThread; + JVM_CurrentTimeMillis; + JVM_DefineClass; + JVM_DefineClassWithSource; + JVM_DesiredAssertionStatus; + JVM_DisableCompiler; + JVM_DoPrivileged; + JVM_DTraceGetVersion; + JVM_DTraceActivate; + JVM_DTraceIsProbeEnabled; + JVM_DTraceIsSupported; + JVM_DTraceDispose; + JVM_DumpAllStacks; + JVM_DumpThreads; + JVM_EnableCompiler; + JVM_Exit; + JVM_FillInStackTrace; + JVM_FindClassFromClass; + JVM_FindClassFromClassLoader; + JVM_FindClassFromBootLoader; + JVM_FindLibraryEntry; + JVM_FindLoadedClass; + JVM_FindPrimitiveClass; + JVM_FindSignal; + JVM_FreeMemory; + JVM_GC; + JVM_GetAllThreads; + JVM_GetArrayElement; + JVM_GetArrayLength; + JVM_GetCPClassNameUTF; + JVM_GetCPFieldClassNameUTF; + JVM_GetCPFieldModifiers; + JVM_GetCPFieldNameUTF; + JVM_GetCPFieldSignatureUTF; + JVM_GetCPMethodClassNameUTF; + JVM_GetCPMethodModifiers; + JVM_GetCPMethodNameUTF; + JVM_GetCPMethodSignatureUTF; + JVM_GetCallerClass; + JVM_GetClassAccessFlags; + JVM_GetClassAnnotations; + JVM_GetClassCPEntriesCount; + JVM_GetClassCPTypes; + JVM_GetClassConstantPool; + JVM_GetClassContext; + JVM_GetClassDeclaredConstructors; + JVM_GetClassDeclaredFields; + JVM_GetClassDeclaredMethods; + JVM_GetClassFieldsCount; + JVM_GetClassInterfaces; + JVM_GetClassLoader; + JVM_GetClassMethodsCount; + JVM_GetClassModifiers; + JVM_GetClassName; + JVM_GetClassNameUTF; + JVM_GetClassSignature; + JVM_GetClassSigners; + JVM_GetComponentType; + JVM_GetDeclaredClasses; + JVM_GetDeclaringClass; + JVM_GetEnclosingMethodInfo; + JVM_GetFieldAnnotations; + JVM_GetFieldIxModifiers; + JVM_GetHostName; + JVM_GetInheritedAccessControlContext; + JVM_GetInterfaceVersion; + JVM_GetLastErrorString; + JVM_GetManagement; + JVM_GetMethodAnnotations; + JVM_GetMethodDefaultAnnotationValue; + JVM_GetMethodIxArgsSize; + JVM_GetMethodIxByteCode; + JVM_GetMethodIxByteCodeLength; + JVM_GetMethodIxExceptionIndexes; + JVM_GetMethodIxExceptionTableEntry; + JVM_GetMethodIxExceptionTableLength; + JVM_GetMethodIxExceptionsCount; + JVM_GetMethodIxLocalsCount; + JVM_GetMethodIxMaxStack; + JVM_GetMethodIxModifiers; + JVM_GetMethodIxNameUTF; + JVM_GetMethodIxSignatureUTF; + JVM_GetMethodParameterAnnotations; + JVM_GetPrimitiveArrayElement; + JVM_GetProtectionDomain; + JVM_GetSockName; + JVM_GetSockOpt; + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetThreadStateNames; + JVM_GetThreadStateValues; + JVM_GetVersionInfo; + JVM_Halt; + JVM_HoldsLock; + JVM_IHashCode; + JVM_InitAgentProperties; + JVM_InitProperties; + JVM_InitializeCompiler; + JVM_InitializeSocketLibrary; + JVM_InternString; + JVM_Interrupt; + JVM_InvokeMethod; + JVM_IsArrayClass; + JVM_IsConstructorIx; + JVM_IsInterface; + JVM_IsInterrupted; + JVM_IsNaN; + JVM_IsPrimitiveClass; + JVM_IsSameClassPackage; + JVM_IsSilentCompiler; + JVM_IsSupportedJNIVersion; + JVM_IsThreadAlive; + JVM_LatestUserDefinedLoader; + JVM_Listen; + JVM_LoadClass0; + JVM_LoadLibrary; + JVM_Lseek; + JVM_MaxObjectInspectionAge; + JVM_MaxMemory; + JVM_MonitorNotify; + JVM_MonitorNotifyAll; + JVM_MonitorWait; + JVM_NativePath; + JVM_NanoTime; + JVM_NewArray; + JVM_NewInstanceFromConstructor; + JVM_NewMultiArray; + JVM_OnExit; + JVM_Open; + JVM_PrintStackTrace; + JVM_RaiseSignal; + JVM_RawMonitorCreate; + JVM_RawMonitorDestroy; + JVM_RawMonitorEnter; + JVM_RawMonitorExit; + JVM_Read; + JVM_Recv; + JVM_RecvFrom; + JVM_RegisterSignal; + JVM_ReleaseUTF; + JVM_ResolveClass; + JVM_ResumeThread; + JVM_Send; + JVM_SendTo; + JVM_SetArrayElement; + JVM_SetClassSigners; + JVM_SetLength; + JVM_SetPrimitiveArrayElement; + JVM_SetProtectionDomain; + JVM_SetSockOpt; + JVM_SetThreadPriority; + JVM_Sleep; + JVM_Socket; + JVM_SocketAvailable; + JVM_SocketClose; + JVM_SocketShutdown; + JVM_StartThread; + JVM_StopThread; + JVM_SuspendThread; + JVM_SupportsCX8; + JVM_Sync; + JVM_Timeout; + JVM_TotalMemory; + JVM_TraceInstructions; + JVM_TraceMethodCalls; + JVM_UnloadLibrary; + JVM_Write; + JVM_Yield; + JVM_handle_solaris_signal; + + # miscellaneous functions + jio_fprintf; + jio_printf; + jio_snprintf; + jio_vfprintf; + jio_vsnprintf; + + # Needed because there is no JVM interface for this. + sysThreadAvailableStackWithSlack; + + # This is for Forte Analyzer profiling support. + AsyncGetCallTrace; + local: + *; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER1 2009-08-01 04:16:45.193480502 +0100 @@ -0,0 +1,45 @@ +# +# @(#)mapfile-vers-COMPILER1 1.9 07/05/05 17:03:58 +# + +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # Dtrace support + __1cIUniverseP_methodKlassObj_; + __1cJCodeCacheF_heap_; + __1cIUniverseO_collectedHeap_; + __1cHnmethodG__vtbl_; + __1cICodeBlobG__vtbl_; + __1cKBufferBlobG__vtbl_; + __1cLRuntimeStubG__vtbl_; + __1cNSafepointBlobG__vtbl_; + __1cSDeoptimizationBlobG__vtbl_; + + __JvmOffsets; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-COMPILER2 2009-08-01 04:16:45.618772203 +0100 @@ -0,0 +1,48 @@ +# +# @(#)mapfile-vers-COMPILER2 1.8 07/05/05 17:03:58 +# + +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # Dtrace support + __1cIUniverseP_methodKlassObj_; + __1cJCodeCacheF_heap_; + __1cIUniverseO_collectedHeap_; + __1cHnmethodG__vtbl_; + __1cICodeBlobG__vtbl_; + __1cKBufferBlobG__vtbl_; + __1cLRuntimeStubG__vtbl_; + __1cNSafepointBlobG__vtbl_; + __1cSDeoptimizationBlobG__vtbl_; + + __1cNExceptionBlobG__vtbl_; + __1cQUncommonTrapBlobG__vtbl_; + + __JvmOffsets; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-CORE 2009-08-01 04:16:46.028325004 +0100 @@ -0,0 +1,33 @@ +# +# @(#)mapfile-vers-CORE 1.5 07/05/05 17:03:59 +# + +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-TIERED 2009-08-01 04:16:46.444890238 +0100 @@ -0,0 +1,47 @@ +# +# @(#)mapfile-vers-TIERED 1.4 07/05/05 17:03:58 +# + +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # Dtrace support + __1cIUniverseP_methodKlassObj_; + __1cJCodeCacheF_heap_; + __1cIUniverseO_collectedHeap_; + __1cHnmethodG__vtbl_; + __1cICodeBlobG__vtbl_; + __1cKBufferBlobG__vtbl_; + __1cLRuntimeStubG__vtbl_; + __1cNSafepointBlobG__vtbl_; + __1cSDeoptimizationBlobG__vtbl_; + __1cNExceptionBlobG__vtbl_; + __1cQUncommonTrapBlobG__vtbl_; + + __JvmOffsets; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-debug 2009-08-01 04:16:46.853411755 +0100 @@ -0,0 +1,39 @@ +# +# @(#)mapfile-vers-debug 1.9 07/05/05 17:03:59 +# + +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # debug JVM + JVM_AccessVMBooleanFlag; + JVM_AccessVMIntFlag; + JVM_VMBreakPoint; + + # miscellaneous +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-jsig 2009-08-01 04:16:47.262194865 +0100 @@ -0,0 +1,42 @@ +# +# @(#)mapfile-vers-jsig 1.4 07/05/05 17:03:59 +# + +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define library interface. + +SUNWprivate_1.1 { + global: + JVM_begin_signal_setting; + JVM_end_signal_setting; + JVM_get_libjsig_version; + JVM_get_signal_action; + sigaction; + signal; + sigset; + local: + *; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-jvm_db 2009-08-01 04:16:47.670738231 +0100 @@ -0,0 +1,40 @@ +# +# @(#)mapfile-vers-jvm_db 1.5 07/05/05 17:03:58 +# + +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define library interface. + +SUNWprivate_1.1 { + global: + Jagent_create; + Jagent_destroy; + Jframe_iter; + #Jget_vframe; + #Jlookup_by_regs; + local: + *; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-jvm_dtrace 2009-08-01 04:16:48.064323628 +0100 @@ -0,0 +1,39 @@ +# +# @(#)mapfile-vers-jvm_dtrace 1.4 07/05/05 17:03:58 +# + +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define library interface for JVM-DTrace interface + +SUNWprivate_1.1 { + global: + jvm_attach; + jvm_get_last_error; + jvm_enable_dtprobes; + jvm_detach; + local: + *; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/mapfile-vers-nonproduct 2009-08-01 04:16:48.473155983 +0100 @@ -0,0 +1,50 @@ +# +# @(#)mapfile-vers-nonproduct 1.8 07/05/05 17:03:59 +# + +# +# Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Define public interface. + +SUNWprivate_1.1 { + global: + # Old reflection routines + # These do not need to be present in the product build in JDK 1.4 + # but their code has not been removed yet because there will not + # be a substantial code savings until JVM_InvokeMethod and + # JVM_NewInstanceFromConstructor can also be removed; see + # reflectionCompat.hpp. + JVM_GetClassConstructor; + JVM_GetClassConstructors; + JVM_GetClassField; + JVM_GetClassFields; + JVM_GetClassMethod; + JVM_GetClassMethods; + JVM_GetField; + JVM_GetPrimitiveField; + JVM_NewInstance; + JVM_SetField; + JVM_SetPrimitiveField; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/optimized.make 2009-08-01 04:16:48.898499709 +0100 @@ -0,0 +1,66 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making optimized version of Gamma VM +# (This is the "product", not the "release" version.) + +# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make +OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) +OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) + +# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) +ifeq ("${Platform_compiler}", "sparcWorks") + +# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876) +ifeq ($(COMPILER_REV_NUMERIC),509) + # Not clear this workaround could be skipped in some cases. + OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g + OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g + OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) -g +endif + +# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12) +ifeq ($(COMPILER_REV_NUMERIC),508)) +OPT_CFLAGS/ciTypeFlow.o = $(OPT_CFLAGS/O2) +endif # COMPILER_REV_NUMERIC == 508 + +endif # Platform_compiler == sparcWorks + +# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings +CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) + +# Linker mapfiles +# NOTE: inclusion of nonproduct mapfile not necessary; read it for details +MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct + +# This mapfile is only needed when compiling with dtrace support, +# and mustn't be otherwise. +MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +G_SUFFIX = +VERSION = optimized --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/product.make 2009-08-01 04:16:49.324110111 +0100 @@ -0,0 +1,84 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making optimized version of Gamma VM +# (This is the "product", not the "release" version.) + +# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make +OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) +OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) + +# Workaround for a bug in dtrace. If ciEnv::post_compiled_method_load_event() +# is inlined, the resulting dtrace object file needs a reference to this +# function, whose symbol name is too long for dtrace. So disable inlining +# for this method for now. (fix this when dtrace bug 6258412 is fixed) +ifndef USE_GCC +OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_ +endif + +# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) +ifeq ("${Platform_compiler}", "sparcWorks") + +# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876) +ifeq ($(COMPILER_REV_NUMERIC),509) + # Not clear this workaround could be skipped in some cases. + OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g + OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g + OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) -g +endif + +# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12) +ifeq ($(COMPILER_REV_NUMERIC),508) +OPT_CFLAGS/ciTypeFlow.o = $(OPT_CFLAGS/O2) +endif # COMPILER_REV_NUMERIC == 508 + +endif # Platform_compiler == sparcWorks + +# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings +CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. + +# Linker mapfiles +# NOTE: inclusion of nonproduct mapfile not necessary; read it for details +ifdef USE_GCC +MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers +else +MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ + $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct + +# This mapfile is only needed when compiling with dtrace support, +# and mustn't be otherwise. +MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) + +REORDERFILE = $(GAMMADIR)/make/solaris/makefiles/reorder_$(TYPE)_$(BUILDARCH) +endif + +# Don't strip in VM build; JDK build will strip libraries later +# LINK_LIB.CC/POST_HOOK += $(STRIP_LIB.CC/POST_HOOK) + +G_SUFFIX = +SYSDEFS += -DPRODUCT +SYSDEFS += $(REORDER_FLAG) +VERSION = optimized --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/profiled.make 2009-08-01 04:16:49.749257947 +0100 @@ -0,0 +1,46 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making profiled version of Gamma VM +# (It is also optimized.) + +CFLAGS += -pg + +# On x86 Solaris 2.6, 7, and 8 if LD_LIBRARY_PATH has /usr/lib in it then +# adlc linked with -pg puts out empty header files. To avoid linking adlc +# with -pg the profile flag is split out separately and used in rules.make + +PROF_AOUT_FLAGS += -pg + +SYSDEFS += $(REORDER_FLAG) + +# To do a profiled build of the product, such as for generating the +# reordering file, set PROFILE_PRODUCT. Otherwise the reordering file will +# contain references to functions which are not defined in the PRODUCT build. + +ifdef PROFILE_PRODUCT + SYSDEFS += -DPRODUCT +endif + +LDNOMAP = true --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER1_amd64 2009-08-01 04:16:50.223046685 +0100 @@ -0,0 +1,5450 @@ +data = R0x2000; +text = LOAD ?RXO; + + +# Test Null +text: .text%__cplus_fini_at_exit: CCrti.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable.o; +text: .text%__1cFRInfo2t6M_v_: c1_AllocTable.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_AllocTable_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals.o; +text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Canonicalizer.o; +text: .text%__1cFRInfo2t6M_v_: c1_Canonicalizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator.o; +text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_CodeStubs_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compilation.o; +text: .text%__1cFRInfo2t6M_v_: c1_Compilation.o; +text: .text%__1cMelapsedTimer2t6M_v_: c1_Compilation.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compiler.o; +text: .text%__1cFRInfo2t6M_v_: c1_Compiler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap.o; +text: .text%__1cFRInfo2t6M_v_: c1_FrameMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_FrameMap_x86.o; +text: .text%__1cKc1_RegMask2t6M_v_: c1_FrameMap_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_GraphBuilder.o; +text: .text%__1cFRInfo2t6M_v_: c1_GraphBuilder.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_IR.o; +text: .text%__1cFRInfo2t6M_v_: c1_IR.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Instruction.o; +text: .text%__1cFRInfo2t6M_v_: c1_Instruction.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_InstructionPrinter.o; +text: .text%__1cFRInfo2t6M_v_: c1_InstructionPrinter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items.o; +text: .text%__1cFRInfo2t6M_v_: c1_Items.o; +text: .text%__1cIHintItem2t6MpnJValueType_i_v_: c1_Items.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_Items_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIR.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIR.o; +text: .text%__1cLLIR_OprFactHillegal6F_pnLLIR_OprDesc__: c1_LIR.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Loops.o; +text: .text%__1cFRInfo2t6M_v_: c1_Loops.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_MacroAssembler_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Optimizer.o; +text: .text%__1cFRInfo2t6M_v_: c1_Optimizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo.o; +text: .text%__1cFRInfo2t6M_v_: c1_RInfo.o; +text: .text%__1cKc1_RegMask2t6M_v_: c1_RInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_RInfo_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc.o; +text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1.o; +text: .text%__1cFRInfo2t6M_v_: c1_Runtime1.o; +text: .text%__1cIiEntries2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_Runtime1_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ScanBlocks.o; +text: .text%__1cFRInfo2t6M_v_: c1_ScanBlocks.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueMap.o; +text: .text%__1cFRInfo2t6M_v_: c1_ValueMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueSet.o; +text: .text%__1cFRInfo2t6M_v_: c1_ValueSet.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueStack.o; +text: .text%__1cFRInfo2t6M_v_: c1_ValueStack.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeBlob.o; +text: .text%__1cFRInfo2t6M_v_: codeBlob.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cICHeapObj2n6FI_pv_; +text: .text%__1cCosGmalloc6FI_pv_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cMelapsedTimer2t6M_v_: compilationPolicy.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cMelapsedTimer2t6M_v_: compileBroker.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compiledIC.o; +text: .text%__1cFRInfo2t6M_v_: compiledIC.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: deoptimization.o; +text: .text%__1cFRInfo2t6M_v_: deoptimization.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame.o; +text: .text%__1cFRInfo2t6M_v_: frame.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_x86.o; +text: .text%__1cFRInfo2t6M_v_: frame_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cNCellTypeStateLmake_bottom6F_0_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_any6Fi_0_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_top6F_0_: generateOopMap.o; +text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_x86.o; +text: .text%__1cFRInfo2t6M_v_: interpreter_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: java.o; +text: .text%__1cFRInfo2t6M_v_: java.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiEnvBase.o; +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_; +text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_: jvmtiEnvBase.o; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cRJvmtiEventEnabled2t6M_v_; +text: .text%__1cRJvmtiEventEnabledFclear6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiImpl.o; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__2t6Mii_v_: jvmtiImpl.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cJMemRegion2t6M_v_: jvmtiTagMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: klassVtable.o; +text: .text%__1cFRInfo2t6M_v_: klassVtable.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cJTimeStamp2t6M_v_: management.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cJMarkSweepSMarkAndPushClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepRFollowRootClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepSFollowStackClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepUAdjustPointerClosure2t6Mi_v_: markSweep.o; +text: .text%__1cJMarkSweepOIsAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepQKeepAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: memoryService.o; +text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_: memoryService.o; +text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodOop.o; +text: .text%__1cFRInfo2t6M_v_: methodOop.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_x86.o; +text: .text%__1cFRInfo2t6M_v_: nativeInst_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nmethod.o; +text: .text%__1cFRInfo2t6M_v_: nmethod.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris.o; +text: .text%__1cFRInfo2t6M_v_: os_solaris.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_x86.o; +text: .text%__1cFRInfo2t6M_v_: os_solaris_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o; +text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o; +text: .text%__1cHoopDescLheader_size6F_i_: parGCAllocBuffer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cMelapsedTimer2t6M_v_: psAdaptiveSizePolicy.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cMelapsedTimer2t6M_v_: psMarkSweep.o; +text: .text%__1cTPSAlwaysTrueClosure2t6M_v_: psMarkSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: psPromotionLAB.o; +text: .text%__1cRalign_object_size6Fi_i_: psPromotionLAB.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cMelapsedTimer2t6M_v_: psScavenge.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cQRelocationHolder2t6M_v_: relocInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint.o; +text: .text%__1cFRInfo2t6M_v_: safepoint.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_x86.o; +text: .text%__1cFRInfo2t6M_v_: safepoint_solaris_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedRuntime.o; +text: .text%__1cFRInfo2t6M_v_: sharedRuntime.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cRCardTableModRefBSbCpar_chunk_heapword_alignment6F_I_: tenuredGeneration.o; +text: .text%__1cEMIN24CI_6FTA0_0_: tenuredGeneration.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vframeArray.o; +text: .text%__1cFRInfo2t6M_v_: vframeArray.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cFRInfo2t6M_v_: vmStructs.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_x86.o; +text: .text%__1cFRInfo2t6M_v_: vtableStubs_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer_x86.o; +text: .text%JNI_CreateJavaVM; +text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cCosHSolarisWinitialize_system_info6F_v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cMSysClassPath2t6Mpkc_v_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cJArgumentsMget_property6Fpkc_2_; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cJArgumentsRverify_percentage6FIpkc_i_; +text: .text%__1cMSysClassPath2T6M_v_; +text: .text%__1cMSysClassPathNreset_item_at6Mi_v_: arguments.o; +text: .text%__1cJArgumentsbOparse_java_compiler_environment_variable6F_v_; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cCosHSolarisOset_mpss_range6FpcII_i_; +text: .text%__1cCosPuncommit_memory6FpcI_i_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o; +text: .text%__1cOresolve_symbol6Fpkc_pC_: os_solaris.o; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cNdefaultStreamMhas_log_file6M_i_; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cNdefaultStreamEinit6M_v_; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_x86.o; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cKPerfMemoryKinitialize6F_v_; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_; +text: .text%__1cUcreate_shared_memory6FI_pc_: perfMemory_solaris.o; +text: .text%__1cSmmap_create_shared6FI_pc_: perfMemory_solaris.o; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cNget_user_name6Fl_pc_: perfMemory_solaris.o; +text: .text%__1cQget_user_tmp_dir6Fpkc_pc_: perfMemory_solaris.o; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cWget_sharedmem_filename6Fpkci_pc_: perfMemory_solaris.o; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o; +text: .text%lstat: perfMemory_solaris.o; +text: .text%__1cPfilename_to_pid6Fpkc_l_: perfMemory_solaris.o; +text: .text%__1cbAcreate_sharedmem_resources6Fpkc1I_i_: perfMemory_solaris.o; +text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cKJavaThread2t6M_v_; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cFChunk2n6FII_pv_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cFChunk2t6MI_v_; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6MpnIHeapWord_22_v_; +text: .text%__1cWThreadLocalAllocBufferMinitial_size6F_I_; +text: .text%__1cWThreadLocalAllocBufferVinitialize_statistics6M_v_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cUThreadSafepointState2t6MpnKJavaThread__v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cCosScurrent_stack_size6F_I_; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cCosbBthread_local_storage_at_put6Fipv_v_; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cGThreadWset_as_starting_thread6M_i_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cCosHSolarisRunblocked_signals6F_pnIsigset_t__; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cCosHSolarisKvm_signals6F_pnIsigset_t__; +text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_; +text: .text%__1cCosNcommit_memory6FpcI_i_; +text: .text%__1cCosMguard_memory6FpcI_i_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cKManagementEinit6F_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cIPerfDataMcreate_entry6MnJBasicType_II_v_; +text: .text%__1cKPerfMemoryFalloc6FI_pc_; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cPoldgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cMPerfDataList2t6Mi_v_; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_iii_v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cLClassLoaderbCupdate_class_path_entry_list6Fpkc_v_; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%stat: os_solaris.o; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cLClassLoaderSget_canonical_path6Fpc1i_i_; +text: .text%JVM_RawMonitorCreate; +text: .text%JVM_NativePath; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%JVM_Open; +text: .text%JVM_Lseek; +text: .text%JVM_Close; +text: .text%__1cDhpiFclose6Fi_i_: jvm.o; +text: .text%__1cRClassPathZipEntry2t6Mppvpc_v_; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cLClassLoaderLadd_to_list6FpnOClassPathEntry__v_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cJCodeCacheKinitialize6F_v_; +text: .text%__1cICodeHeapHreserve6MIII_i_; +text: .text%__1cLlog2_intptr6Fi_i_: heap.o; +text: .text%__1cYalign_to_allocation_size6FI_I_: heap.o; +text: .text%__1cNReservedSpace2t6MI_v_; +text: .text%__1cNReservedSpaceKinitialize6MIIipc_v_; +text: .text%__1cCosOreserve_memory6FIpc_1_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cMVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cMVirtualSpaceQuncommitted_size6kM_I_; +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_; +text: .text%__1cCosNcommit_memory6FpcII_i_; +text: .text%__1cSalign_to_page_size6FI_I_: heap.o; +text: .text%__1cICodeHeapFclear6M_v_; +text: .text%__1cICodeHeapTmark_segmap_as_free6MII_v_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cICodeHeapIcapacity6kM_I_; +text: .text%__1cICodeHeapMmax_capacity6kM_I_; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cRalign_code_offset6Fi_I_; +text: .text%__1cICodeHeapLheader_size6F_I_; +text: .text%__1cKBufferBlob2n6FII_pv_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cICodeHeapIallocate6MI_pv_; +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__; +text: .text%__1cICodeHeapTmark_segmap_as_used6MII_v_; +text: .text%__1cKBufferBlob2t6Mpkci_v_; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cICodeHeapSallocated_capacity6kM_I_; +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cXresource_allocate_bytes6FI_pc_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_x86.o; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGpushfd6M_v_; +text: .text%__1cJAssemblerEpopl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerFpopfd6M_v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerFcpuid6M_v_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerDjmp6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cRAbstractAssemblerHbind_to6MrnFLabel_i_v_; +text: .text%__1cMDisplacementEbind6MrnFLabel_ipnRAbstractAssembler__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_22nHAddressLScaleFactor_irknQRelocationHolder__v_; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerDret6Mi_v_; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cKVM_VersionWget_processor_features6F_v_; +text: .text%__1cCosMsupports_sse6F_i_; +text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_x86.o; +text: .text%jio_snprintf; +text: .text%jio_vsnprintf; +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cMStubRoutinesLinitialize16F_v_; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_x86.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cJAssemblerEcall6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerJemit_data6MinJrelocInfoJrelocType_i_v_; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o; +text: .text%__1cJAssemblerJemit_data6MirknQRelocationHolder_i_v_; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cOMacroAssemblerJincrement6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerKget_thread6MpnMRegisterImpl__v_; +text: .text%__1cSThreadLocalStorageTpd_getTlsAccessMode6F_n0AQpd_tlsAccessMode__; +text: .text%__1cJAssemblerFpushl6Mi_v_; +text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_x86.o; +text: .text%__1cOMacroAssemblerFenter6M_v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEdecl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_x86.o; +text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_x86.o; +text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerGpushad6M_v_; +text: .text%__1cJAssemblerFpopad6M_v_; +text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_x86.o; +text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_x86.o; +text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_; +text: .text%__1cJAssemblerGfnsave6MnHAddress__v_; +text: .text%__1cJAssemblerFfwait6M_v_; +text: .text%__1cJAssemblerFfld_d6MnHAddress__v_; +text: .text%__1cJAssemblerFfst_d6MnHAddress__v_; +text: .text%__1cOMacroAssemblerPempty_FPU_stack6M_v_; +text: .text%__1cJAssemblerFffree6Mi_v_; +text: .text%__1cJAssemblerLemit_farith6Miii_v_; +text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_; +text: .text%__1cJAssemblerGfrstor6MnHAddress__v_; +text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_x86.o; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cICarSpaceEinit6F_v_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cLFileMapInfoKinitialize6M_i_; +text: .text%__1cLFileMapInfoNfail_continue6MpkcE_v_; +text: .text%__1cLFileMapInfoFclose6M_v_; +text: .text%__1cIUniversePinitialize_heap6F_i_; +text: .text%__1cPMarkSweepPolicy2t6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: collectorPolicy.o; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cPMarkSweepPolicyWinitialize_generations6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyVnumber_of_generations6M_i_: collectorPolicy.o; +text: .text%__1cXPermanentGenerationSpec2t6MnHPermGenEName_IIIIII_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cSPerfStringConstant2t6MnJCounterNS_pkc3_v_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cQGenCollectedHeap2t6MpnPCollectorPolicy__v_; +text: .text%__1cKSharedHeap2t6MpnPCollectorPolicy__v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cMSubTasksDone2t6Mi_v_; +text: .text%__1cMSubTasksDoneFclear6M_v_; +text: .text%__1cMSubTasksDoneFvalid6M_i_; +text: .text%__1cQGenCollectedHeapKinitialize6M_i_; +text: .text%__1cPCollectorPolicyLgenerations6M_ppnOGenerationSpec__: collectorPolicy.o; +text: .text%__1cPCollectorPolicyUpermanent_generation6M_pnXPermanentGenerationSpec__: collectorPolicy.o; +text: .text%__1cXPermanentGenerationSpecFalign6MI_v_; +text: .text%__1cQGenCollectedHeapIallocate6MIpnXPermanentGenerationSpec_pIpipnNReservedSpace__pc_; +text: .text%__1cOGenerationSpecRn_covered_regions6kM_i_: collectorPolicy.o; +text: .text%__1cNReservedSpace2t6MIIipc_v_; +text: .text%__1cPCollectorPolicyOcreate_rem_set6MnJMemRegion_i_pnJGenRemSet__; +text: .text%__1cbCTwoGenerationCollectorPolicyQbarrier_set_name6M_nKBarrierSetEName__: collectorPolicy.o; +text: .text%__1cLCardTableRS2t6MnJMemRegion_i_v_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cKSharedHeapPset_barrier_set6MpnKBarrierSet__v_; +text: .text%__1cNReservedSpaceKfirst_part6MIii_0_; +text: .text%__1cNReservedSpace2t6MpcI_v_; +text: .text%__1cOGenerationSpecEinit6MnNReservedSpace_ipnJGenRemSet__pnKGeneration__; +text: .text%__1cQDefNewGeneration2t6MnNReservedSpace_Iipkc_v_; +text: .text%__1cKGeneration2t6MnNReservedSpace_Ii_v_; +text: .text%__1cIageTable2t6Mi_v_; +text: .text%__1cIageTableFclear6M_v_; +text: .text%__1cFArenaEgrow6MI_pv_; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cFChunkEchop6M_v_; +text: .text%__1cFChunk2k6Fpv_v_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cRCardTableModRefBSbCfind_covering_region_by_base6MpnIHeapWord__i_; +text: .text%__1cRCardTableModRefBSbAlargest_prev_committed_end6kMi_pnIHeapWord__; +text: .text%__1cWSequentialSubTasksDoneFclear6M_v_; +text: .text%__1cSGenerationCounters2t6MpkciipnMVirtualSpace__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cOCSpaceCounters2t6MpkciIpnPContiguousSpace_pnSGenerationCounters__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cZContiguousSpaceUsedHelperLtake_sample6M_x_: cSpaceCounters.o; +text: .text%__1cPContiguousSpaceEused6kM_I_: space.o; +text: .text%__1cQDefNewGenerationYcompute_space_boundaries6MI_v_; +text: .text%__1cQCompactibleSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cFSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cFSpaceKset_bottom6MpnIHeapWord__v_: space.o; +text: .text%__1cJEdenSpaceHset_end6MpnIHeapWord__v_: space.o; +text: .text%__1cJEdenSpaceFclear6M_v_; +text: .text%__1cPContiguousSpaceFclear6M_v_; +text: .text%__1cFSpaceFclear6M_v_; +text: .text%__1cFSpaceHset_end6MpnIHeapWord__v_: space.o; +text: .text%__1cQDefNewGenerationPupdate_counters6M_v_; +text: .text%__1cSGenerationCountersKupdate_all6M_v_: generationCounters.o; +text: .text%__1cNReservedSpaceJlast_part6MI_0_; +text: .text%__1cRTenuredGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_; +text: .text%__1cOCardGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_; +text: .text%__1cWBlockOffsetSharedArray2t6MnJMemRegion_I_v_; +text: .text%__1cWBlockOffsetSharedArrayGresize6MI_v_; +text: .text%__1cNReservedSpaceSpage_align_size_up6FI_I_; +text: .text%__1cLCardTableRSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cLCardTableRSKis_aligned6MpnIHeapWord__i_: cardTableRS.o; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cWOffsetTableContigSpace2t6MpnWBlockOffsetSharedArray_nJMemRegion__v_; +text: .text%__1cQBlockOffsetArray2t6MpnWBlockOffsetSharedArray_nJMemRegion_i_v_; +text: .text%__1cWOffsetTableContigSpaceKset_bottom6MpnIHeapWord__v_; +text: .text%__1cQBlockOffsetArrayGresize6MI_v_: blockOffsetTable.o; +text: .text%__1cWOffsetTableContigSpaceHset_end6MpnIHeapWord__v_; +text: .text%__1cWOffsetTableContigSpaceFclear6M_v_; +text: .text%__1cbBBlockOffsetArrayContigSpaceUinitialize_threshold6M_pnIHeapWord__; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cXPermanentGenerationSpecEinit6MnNReservedSpace_IpnJGenRemSet__pnHPermGen__; +text: .text%__1cRCompactingPermGen2t6MnNReservedSpace_1IpnJGenRemSet_pnXPermanentGenerationSpec__v_; +text: .text%__1cUCompactingPermGenGen2t6MnNReservedSpace_1IipnJGenRemSet_pnPContiguousSpace_pnXPermanentGenerationSpec__v_; +text: .text%__1cUCompactingPermGenGenbFinitialize_performance_counters6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationIcapacity6kM_I_; +text: .text%__1cPCollectorPolicybFis_concurrent_mark_sweep_policy6M_i_: collectorPolicy.o; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cPGlobalTLABStatsKinitialize6M_v_; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cXAdaptiveWeightedAverageYcompute_adaptive_average6Mff_f_; +text: .text%__1cQGenCollectedHeapNtlab_capacity6kM_I_; +text: .text%__1cQDefNewGenerationYsupports_tlab_allocation6kM_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationNtlab_capacity6kM_I_: defNewGeneration.o; +text: .text%__1cKGenerationYsupports_tlab_allocation6kM_i_: tenuredGeneration.o; +text: .text%__1cWThreadLocalAllocBufferImax_size6F_I_; +text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF_vc_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJOperation__v4_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJCondition__v4_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF3_v3_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cRInvocationCounterDdef6Fn0AFState_ipFnMmethodHandle_pnGThread__pC_v_; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cICodeHeapJexpand_by6MI_i_; +text: .text%__1cJStubQdDueueOregister_queue6Fp0_v_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cSInterpreterCodeletKinitialize6MpkcinJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorTgenerate_error_exit6Mpkc_pC_; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cJAssemblerEcall6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerDhlt6M_v_; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cJAssemblerDnop6M_v_; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cOMacroAssemblerSload_unsigned_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovzxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cOMacroAssemblerSload_unsigned_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovzxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_base6MnITosState_ppCi_v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_FPU6MinITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cJAssemblerDjmp6MnHAddress__v_; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cKEntryPoint2t6MpC11111111_v_; +text: .text%__1cJAssemblerEincl6MpnMRegisterImpl__v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cJAssemblerEcmpl6MnHAddress_i_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCi_v_; +text: .text%__1cOMacroAssemblerOcall_VM_helper6MpnMRegisterImpl_pCii_v_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_pCnJrelocInfoJrelocType__v_; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cOMacroAssemblerGc2bool6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsetb6Mn0AJCondition_pnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerQsign_extend_byte6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerRsign_extend_short6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_2_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEtemp6F_pnMRegisterImpl__; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cTAbstractInterpreterRTosState_as_index6FnITosState__i_; +text: .text%__1cTAbstractInterpreterMreturn_entry6FnITosState_i_pC_; +text: .text%__1cKEntryPointFentry6kMnITosState__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl_33_v_; +text: .text%__1cZInterpreterMacroAssemblerRremove_activation6MnITosState_pnMRegisterImpl_iii_v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerElock6M_v_; +text: .text%__1cJAssemblerHcmpxchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHAddress2t6MinJrelocInfoJrelocType__v_; +text: .text%__1cOMacroAssemblerFleave6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cUInterpreterGeneratorXcheck_for_compiled_code6MrnFLabel__v_; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6M_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_x86.o; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_; +text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_2pC22_v_; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_; +text: .text%__1cLlog2_intptr6Fi_i_: interpreter_x86.o; +text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUInterpreterGeneratorXgenerate_abstract_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorTgenerate_math_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cOMacroAssemblerGsincos6Miii_v_; +text: .text%__1cJAssemblerFfld_s6Mi_v_; +text: .text%__1cJAssemblerEfabs6M_v_; +text: .text%__1cOMacroAssemblerEfcmp6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerHfucomip6Mi_v_; +text: .text%__1cOMacroAssemblerEfpop6M_v_; +text: .text%__1cJAssemblerHfincstp6M_v_; +text: .text%__1cJAssemblerEfsin6M_v_; +text: .text%__1cJAssemblerEfcos6M_v_; +text: .text%__1cJAssemblerFfsqrt6M_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cJAssemblerGmembar6M_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_i_v_; +text: .text%__1cJAssemblerSemit_arith_operand6MipnMRegisterImpl_nHAddress_i_v_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MnITosState__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cJAssemblerEfldz6M_v_; +text: .text%__1cJAssemblerEfld16M_v_; +text: .text%__1cJAssemblerFfaddp6Mi_v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cJAssemblerFbswap6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cJAssemblerEmovb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%__1cJAssemblerFfld_s6MnHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerbGget_unsigned_2_byte_index_at_bcp6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cJAssemblerEcmpb6MnHAddress_i_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorUset_wide_entry_point6MpnITemplate_rpC_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableRlocals_index_wide6FpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableLindex_check6FpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cJAssemblerKrepne_scan6M_v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerSstore_check_part_16MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerSstore_check_part_26MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEmovb6MnHAddress_i_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cJAssemblerEmovb6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cJAssemblerEmovw6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cJAssemblerFpushl6MnHAddress__v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cJAssemblerGfadd_s6MnHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerGf2ieee6M_v_; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cJAssemblerGfadd_d6MnHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerGd2ieee6M_v_; +text: .text%__1cJAssemblerEsbbl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerHfsubr_s6MnHAddress__v_; +text: .text%__1cJAssemblerHfsubr_d6MnHAddress__v_; +text: .text%__1cJAssemblerFimull6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cOMacroAssemblerElmul6Mii_v_; +text: .text%__1cJAssemblerEmull6MnHAddress__v_; +text: .text%__1cJAssemblerEmull6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerGfmul_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfmul_d6MnHAddress__v_; +text: .text%__1cJAssemblerFfld_x6MnHAddress__v_; +text: .text%__1cJAssemblerFfmulp6Mi_v_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cOMacroAssemblerPcorrected_idivl6MpnMRegisterImpl__i_; +text: .text%__1cJAssemblerEcdql6M_v_; +text: .text%__1cJAssemblerFidivl6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cZInterpreterMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cJAssemblerHfdivr_s6MnHAddress__v_; +text: .text%__1cJAssemblerHfdivr_d6MnHAddress__v_; +text: .text%__1cJAssemblerGfdivrp6Mi_v_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cOMacroAssemblerFfremr6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerIsave_eax6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerFfprem6M_v_; +text: .text%__1cJAssemblerJfnstsw_ax6M_v_; +text: .text%__1cJAssemblerEsahf6M_v_; +text: .text%__1cOMacroAssemblerLrestore_eax6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEfxch6Mi_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cJAssemblerEfchs6M_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerFshldl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_2i_v_; +text: .text%__1cJAssemblerFshrdl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cOMacroAssemblerLextend_sign6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGfild_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfild_d6MnHAddress__v_; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cOMacroAssemblerIlcmp2int6MpnMRegisterImpl_222_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_x86.o; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerYprofile_not_taken_branch6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cZInterpreterMacroAssemblerWdispatch_only_noverify6MnITosState__v_; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_22_v_; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerHfistp_d6MnHAddress__v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNTemplateTableQvolatile_barrier6F_v_; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableOprepare_invoke6FpnMRegisterImpl_2inJBytecodesECode__v_; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cNTemplateTableUinvokevirtual_helper6FpnMRegisterImpl_22_v_; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_22_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cQGenCollectedHeapItop_addr6kM_ppnIHeapWord__; +text: .text%__1cQDefNewGenerationItop_addr6kM_ppnIHeapWord__; +text: .text%__1cQGenCollectedHeapIend_addr6kM_ppnIHeapWord__; +text: .text%__1cQDefNewGenerationIend_addr6kM_ppnIHeapWord__; +text: .text%__1cOMacroAssemblerJdecrement6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cZInterpreterMacroAssemblerUdispatch_only_normal6MnITosState__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbCset_safepoints_for_all_bytes6M_v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cIUniverseYcompute_base_vtable_size6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o; +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%__1cKSharedHeapWpermanent_mem_allocate6MI_pnIHeapWord__: genCollectedHeap.o; +text: .text%__1cRCompactingPermGenMmem_allocate6MI_pnIHeapWord__; +text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: compactingPermGenGen.o; +text: .text%__1cWOffsetTableContigSpaceIallocate6MI_pnIHeapWord__: space.o; +text: .text%__1cPContiguousSpaceIallocate6MI_pnIHeapWord__; +text: .text%__1cbBBlockOffsetArrayContigSpaceLalloc_block6MpnIHeapWord_2_v_: blockOffsetTable.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: klass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableModRefBS.o; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o; +text: .text%__1cbBBlockOffsetArrayContigSpaceQalloc_block_work6MpnIHeapWord_2_v_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cJHashtableLhash_symbol6Fpkci_I_: symbolTable.o; +text: .text%__1cLSymbolTableGlookup6MipkciI_pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: symbolKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: symbolKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: symbolKlass.o; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassNexternal_name6FnJBasicType__pkc_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cLlog2_intptr6Fi_i_: typeArrayKlass.o; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cNsymbolOopDescLas_C_string6kMpci_1_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%__1cLlog2_intptr6Fi_i_: objArrayKlassKlass.o; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: typeArrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: typeArrayKlass.o; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: oopFactory.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: oopFactory.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: oopFactory.o; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cHoopDescSslow_identity_hash6M_i_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_; +text: .text%__1cNget_next_hash6F_i_: synchronizer.o; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKDictionaryJget_entry6MiInMsymbolHandle_nGHandle__pnPDictionaryEntry__; +text: .text%__1cQSystemDictionarybAcompute_loader_lock_object6FnGHandle_pnGThread__1_; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cQSystemDictionaryKfind_class6FiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableJnew_entry6MipnNsymbolOopDesc_pnHoopDesc__pnQPlaceholderEntry__; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRfind_shared_class6FnMsymbolHandle__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cMstringStream2t6MI_v_; +text: .text%__1cMstringStreamFwrite6MpkcI_v_; +text: .text%__1cMoutputStreamPupdate_position6MpkcI_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cPClassFileParserOcheck_property6MipkcipnGThread__v_; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: constantPoolKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constantPoolKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constantPoolKlass.o; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cPClassFileParserbFparse_constant_pool_class_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbJparse_constant_pool_methodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbGparse_constant_pool_string_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbHparse_constant_pool_integer_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cPClassFileParserbLparse_constant_pool_nameandtype_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cUinitialize_hashtable6FppnLNameSigHash__v_; +text: .text%__1cPclear_hashtable6FppnLNameSigHash__v_; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: methodOop.o; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cKoopFactoryPnew_constMethod6FiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: constMethodKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constMethodKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constMethodKlass.o; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: methodKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: methodKlass.o; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cRInvocationCounterJset_state6Mn0AFState__v_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cRSignatureIteratorGexpect6Mc_v_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cSconstMethodOopDescbEchecked_exceptions_length_addr6kM_pH_; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cXcopy_u2_with_conversion6FpH0i_v_: classFileParser.o; +text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o; +text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%method_compare: methodOop.o; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cLklassVtableQget_num_mirandas6FpnMklassOopDesc_pnPobjArrayOopDesc_4_i_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cNinstanceKlassbBdo_local_static_fields_impl6FnTinstanceKlassHandle_pFpnPfieldDescriptor_pnGThread__v5_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cPClassFileParserYcheck_super_class_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cTClassLoadingServiceScompute_class_size6FpnNinstanceKlass__I_; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cLClassLoaderOlookup_package6Fpkc_pnLPackageInfo__; +text: .text%__1cQPackageHashtableMcompute_hash6Mpkci_I_: classLoader.o; +text: .text%__1cQPackageHashtableJget_entry6MiIpkcI_pnLPackageInfo__: classLoader.o; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cVLoaderConstraintTableWfind_loader_constraint6MnMsymbolHandle_nGHandle__ppnVLoaderConstraintEntry__; +text: .text%__1cQSystemDictionaryQadd_to_hierarchy6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cQSystemDictionaryRupdate_dictionary6FiIiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryQfind_placeholder6FiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: dictionary.o; +text: .text%__1cKDictionaryJnew_entry6MIpnMklassOopDesc_pnHoopDesc__pnPDictionaryEntry__; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cPClassFileParserbIparse_constant_pool_fieldref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbSparse_constant_pool_interfacemethodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_long_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cJchar2type6Fc_nJBasicType__: fieldType.o; +text: .text%__1cRSignatureIteratorSskip_optional_size6M_v_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o; +text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cLklassVtableKis_miranda6FpnNmethodOopDesc_pnPobjArrayOopDesc_pnMklassOopDesc__i_; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cPClassFileParserXjava_lang_Class_fix_pre6MpnOobjArrayHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserYjava_lang_Class_fix_post6Mpi_v_; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cKSharedHeapYpermanent_object_iterate6MpnNObjectClosure__v_: genCollectedHeap.o; +text: .text%__1cHPermGenOobject_iterate6MpnNObjectClosure__v_: permGen.o; +text: .text%__1cRCompactingPermGenGas_gen6kM_pnKGeneration__: permGen.o; +text: .text%__1cbCOneContigSpaceCardGenerationOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cPContiguousSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cPContiguousSpaceTobject_iterate_from6MnJWaterMark_pnNObjectClosure__v_; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: instanceKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: instanceKlass.o; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cPClassFileParserbFparse_constant_pool_float_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o; +text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cPClassFileParserbGparse_constant_pool_double_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cSReferenceProcessorMinit_statics6F_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cKJNIHandlesKinitialize6F_v_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cQVerificationTypeKinitialize6F_v_; +text: .text%__1cOcompiler1_init6F_v_; +text: .text%__1cKSharedInfoKset_stack06Fi_v_; +text: .text%__1cKSharedInfoLset_regName6F_v_; +text: .text%__1cIRegAllocYinit_register_allocation6F_v_; +text: .text%__1cIFrameMapEinit6F_v_; +text: .text%__1cKc1_RegMaskKinit_masks6Fi_v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_x86.o; +text: .text%__1cNc1_AllocTableLinit_tables6F_v_; +text: .text%__1cIFrameMapOfirst_register6F_pnMRegisterImpl__; +text: .text%__1cIFrameMapLcpu_reg2rnr6FpnMRegisterImpl__i_; +text: .text%__1cIFrameMapLcpu_rnr2reg6Fi_pnMRegisterImpl__; +text: .text%__1cIRuntime1Kinitialize6F_v_; +text: .text%__1cKCodeBufferRinsts_memory_size6Fi_i_; +text: .text%__1cKCodeBufferQlocs_memory_size6Fi_i_; +text: .text%__1cIRuntime1Ninitialize_pd6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_; +text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_x86.o; +text: .text%__1cJAssemblerGfldenv6MnHAddress__v_; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_; +text: .text%__1cGOopMapbEmap_compiler_reg_to_oopmap_reg6MnHOptoRegEName_ii_nFVMRegEName__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: oopMap.o; +text: .text%__1cIFrameMapRfpu_stack_regname6Fi_nHOptoRegEName__; +text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_22pC_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEdecl6MnHAddress__v_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cSDeoptimizationBlob2n6FII_pv_; +text: .text%__1cSDeoptimizationBlob2t6MpnKCodeBuffer_ipnJOopMapSet_iiii_v_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_; +text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: codeBlob.o; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cKNativeCallLdestination6kM_pC_; +text: .text%__1cOCallRelocationPset_destination6MpCi_v_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_; +text: .text%__1cRNativeInstructionFwrote6Mi_v_; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cGOopMapJheap_size6kM_i_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cGOopMapHcopy_to6MpC_v_; +text: .text%__1cIRuntime1Rgenerate_blob_for6Fn0AGStubID__v_; +text: .text%__1cIRuntime1Pnew_code_buffer6F_pnKCodeBuffer__; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cNStubAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cIRuntime1Rgenerate_code_for6Fn0AGStubID_pnNStubAssembler_pi_pnJOopMapSet__; +text: .text%__1cIRuntime1Iname_for6Fn0AGStubID__pkc_; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cLRuntimeStub2n6FII_pv_; +text: .text%__1cLRuntimeStub2t6MpkcpnKCodeBuffer_iipnJOopMapSet_i_v_; +text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkci_v_; +text: .text%__1cNStubAssemblerIset_info6Mpkci_v_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC2_i_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pCi_i_; +text: .text%__1cJStubFrame2T6M_v_; +text: .text%__1cIRuntime1Ygenerate_exception_throw6FpnNStubAssembler_pCpnMRegisterImpl__pnJOopMapSet__; +text: .text%__1cOMacroAssemblerLtlab_refill6MrnFLabel_22_v_; +text: .text%__1cLlog2_intptr6Fi_i_: assembler_x86.o; +text: .text%__1cOMacroAssemblerNeden_allocate6MpnMRegisterImpl_2i2rnFLabel__v_; +text: .text%__1cOMacroAssemblerLverify_tlab6M_v_; +text: .text%__1cLlog2_intptr6Fi_i_: c1_Runtime1_x86.o; +text: .text%__1cOMacroAssemblerNtlab_allocate6MpnMRegisterImpl_2i22rnFLabel__v_; +text: .text%__1cRC1_MacroAssemblerRinitialize_object6MpnMRegisterImpl_22i22_v_; +text: .text%__1cRC1_MacroAssemblerRinitialize_header6MpnMRegisterImpl_22_v_; +text: .text%__1cRC1_MacroAssemblerPinitialize_body6MpnMRegisterImpl_2i2_v_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC22_i_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC222_i_; +text: .text%__1cIRuntime1Iblob_for6Fn0AGStubID__pnICodeBlob__; +text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkcipnMRegisterImpl_6_v_; +text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkcipnMRegisterImpl__v_; +text: .text%__1cIiEntries2t6Miiii_v_; +text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_; +text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_; +text: .text%__1cIRuntime1Rgenerate_patching6FpnNStubAssembler_pC_pnJOopMapSet__; +text: .text%__1cWrestore_live_registers6FpnOMacroAssembler__v_: c1_Runtime1_x86.o; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNSafepointBlob2n6FII_pv_; +text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cJAssemblerFfldcw6MnHAddress__v_; +text: .text%__1cJAssemblerGfnstcw6MnHAddress__v_; +text: .text%__1cJAssemblerHfcomp_d6MnHAddress__v_; +text: .text%__1cIiEntriesIset_base6MpC_v_; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cLVtableStubsKinitialize6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cRInlineCacheBufferKinitialize6F_v_; +text: .text%__1cRInlineCacheBufferOinit_next_stub6F_v_; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cOCompilerOraclePparse_from_file6F_v_; +text: .text%__1cHcc_file6F_pkc_: compilerOracle.o; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cSOnStackReplacementKinitialize6F_v_; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cLklassVtableVinitialize_from_super6MnLKlassHandle__i_; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cLklassVtableNput_method_at6MpnNmethodOopDesc_i_v_; +text: .text%__1cLklassVtableQfill_in_mirandas6Mri_v_; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cLklassVtableOcopy_vtable_to6MpnLvtableEntry__v_; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cIUniverseUreinitialize_itables6F_v_; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cRitableMethodEntryKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: instanceKlass.o; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapVunsafe_max_tlab_alloc6kM_I_; +text: .text%__1cQDefNewGenerationVunsafe_max_tlab_alloc6kM_I_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationVunsafe_max_alloc_nogc6kM_I_; +text: .text%__1cPContiguousSpaceEfree6kM_I_: space.o; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cWThreadLocalAllocBufferFreset6M_v_; +text: .text%__1cQGenCollectedHeapRallocate_new_tlab6MI_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapMmem_allocate6MIii_pnIHeapWord__; +text: .text%__1cbCTwoGenerationCollectorPolicyRmem_allocate_work6MIii_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapEheap6F_p0_; +text: .text%__1cQDefNewGenerationPshould_allocate6MIii_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationMpar_allocate6MIii_pnIHeapWord__: defNewGeneration.o; +text: .text%__1cJEdenSpaceMpar_allocate6MI_pnIHeapWord__; +text: .text%__1cPContiguousSpaceRpar_allocate_impl6MIkpnIHeapWord__2_: space.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: collectedHeap.o; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQGenCollectedHeapVlarge_typearray_limit6M_I_; +text: .text%__1cbCTwoGenerationCollectorPolicyYis_two_generation_policy6M_i_: collectorPolicy.o; +text: .text%__1cbCTwoGenerationCollectorPolicyVlarge_typearray_limit6M_I_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassLverify_code6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIRewriterScompute_index_maps6FnSconstantPoolHandle_rpnIintArray_rpnIintStack__v_; +text: .text%__1cIintArray2t6Mki1_v_: rewriter.o; +text: .text%__1cIRewriterXnew_constant_pool_cache6FrnIintArray_pnGThread__nXconstantPoolCacheHandle__; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: cpCacheKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: cpCacheKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: cpCacheKlass.o; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cWConstantPoolCacheEntryRset_initial_state6Mi_v_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cIRuntime1Mientries_for6FnMmethodHandle__pnIiEntries__; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cNSharedRuntimeUlookup_function_DD_D6FrpFpnHJNIEnv__pnH_jclass_dd_dpkc_v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cMNativeLookupNpure_jni_name6FnMmethodHandle__pc_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cMoutputStreamMdo_vsnprintf6FpcIpkcpvirI_3_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc__v_: nativeLookup.o; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc_ii_v_: nativeLookup.o; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cMNativeLookupMlookup_style6FnMmethodHandle_pcpkciiripnGThread__pC_; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cVlookup_special_native6Fpc_pC_: nativeLookup.o; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cQGenCollectedHeapIcapacity6kM_I_; +text: .text%__1cQDefNewGenerationIcapacity6kM_I_; +text: .text%__1cQGenCollectedHeapEused6kM_I_; +text: .text%__1cQDefNewGenerationEused6kM_I_; +text: .text%__1cbCOneContigSpaceCardGenerationEused6kM_I_; +text: .text%__1cQGenCollectedHeapPpost_initialize6M_v_; +text: .text%__1cQGenCollectedHeapTref_processing_init6M_v_; +text: .text%__1cKSharedHeapTref_processing_init6M_v_; +text: .text%__1cKGenerationSref_processor_init6M_v_; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: compactingPermGenGen.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: compactingPermGenGen.o; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: defNewGeneration.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: defNewGeneration.o; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: tenuredGeneration.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: tenuredGeneration.o; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cQGenCollectedHeapEkind6M_nNCollectedHeapEName__: genCollectedHeap.o; +text: .text%__1cNMemoryServicebBadd_gen_collected_heap_info6FpnQGenCollectedHeap__v_; +text: .text%__1cPMarkSweepPolicyUis_mark_sweep_policy6M_i_: collectorPolicy.o; +text: .text%__1cNMemoryManagerXget_copy_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cNMemoryManagerWget_msc_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryServicebAadd_generation_memory_pool6FpnKGeneration_pnNMemoryManager_4_v_; +text: .text%__1cQDefNewGenerationEkind6M_nKGenerationEName__: defNewGeneration.o; +text: .text%__1cNMemoryServiceJadd_space6FpnPContiguousSpace_pkciIi_pnKMemoryPool__; +text: .text%__1cTContiguousSpacePool2t6MpnPContiguousSpace_pkcnKMemoryPoolIPoolType_Ii_v_; +text: .text%__1cNMemoryServiceTadd_survivor_spaces6FpnQDefNewGeneration_pkciIi_pnKMemoryPool__; +text: .text%__1cbBSurvivorContiguousSpacePool2t6MpnQDefNewGeneration_pkcnKMemoryPoolIPoolType_Ii_v_; +text: .text%__1cRTenuredGenerationEkind6M_nKGenerationEName__: tenuredGeneration.o; +text: .text%__1cNMemoryServiceHadd_gen6FpnKGeneration_pkcii_pnKMemoryPool__; +text: .text%__1cOGenerationPool2t6MpnKGeneration_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cKGenerationMmax_capacity6kM_I_; +text: .text%__1cNMemoryServicebGadd_compact_perm_gen_memory_pool6FpnUCompactingPermGenGen_pnNMemoryManager__v_; +text: .text%__1cQGenCollectedHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cKGCStatInfo2t6Mi_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%__1cLJavaClassesPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_SystemPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cbIjava_security_AccessControlContextPcompute_offsets6F_v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectPcompute_offsets6F_v_; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cPjava_nio_BufferPcompute_offsets6F_v_; +text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_; +text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cMStubRoutinesLinitialize26F_v_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_x86.o; +text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_x86.o; +text: .text%__1cJAssemblerEincl6MnHAddress__v_; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cIVMThread2t6M_v_; +text: .text%__1cQVMOperationQdDueue2t6M_v_; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%_start: os_solaris.o; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cVscale_to_lwp_priority6Fiii_i_: os_solaris.o; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cIVMThreadEloop6M_v_; +text: .text%__1cQVMOperationQdDueueLremove_next6M_pnMVM_Operation__; +text: .text%__1cQVMOperationQdDueueLqueue_empty6Mi_i_; +text: .text%__1cQVMOperationQdDueueSqueue_remove_front6Mi_pnMVM_Operation__; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cQinitialize_class6FnMsymbolHandle_pnGThread__v_: thread.o; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWcall_class_initializer6MpnGThread__v_; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassRclass_initializer6M_pnNmethodOopDesc__; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cRCompilationPolicyNcanBeCompiled6FnMmethodHandle__i_; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cRruntime_type_from6FpnJJavaValue__nJBasicType__: javaCalls.o; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cPJavaCallWrapper2t6MnMmethodHandle_nGHandle_pnJJavaValue_pnGThread__v_; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cRJavaCallArgumentsKparameters6M_pi_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cMLinkResolverUresolve_invokestatic6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverNresolve_klass6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_at_put6MipnMklassOopDesc__v_: constantPoolOop.o; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cMLinkResolverbElinktime_resolve_static_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYlookup_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cICallInfoDset6MnLKlassHandle_nMmethodHandle_pnGThread__v_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2ipnGThread__v_; +text: .text%__1cSInterpreterRuntimeLcache_entry6FpnKJavaThread__pnWConstantPoolCacheEntry__: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cRSignatureIteratorTcheck_signature_end6M_v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: cpCacheOop.o; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cWConstantPoolCacheEntryIas_flags6MnITosState_iiiii_i_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_16MnJBytecodesECode__v_; +text: .text%__1cWConstantPoolCacheEntryGverify6kMpnMoutputStream__v_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cXSignatureHandlerLibraryKinitialize6F_v_; +text: .text%__1cXSignatureHandlerLibraryQset_handler_blob6F_pC_; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cNFingerprinterLfingerprint6M_X_: interpreterRuntime.o; +text: .text%__1cNGrowableArray4CX_Efind6kMkX_i_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_; +text: .text%__1cXSignatureHandlerLibraryLset_handler6FpnKCodeBuffer__pC_; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cPregister_native6FnLKlassHandle_nMsymbolHandle_1pCpnGThread__i_: jni.o; +text: .text%__1cPJavaCallWrapper2T6M_v_; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cNinstanceKlassbJset_initialization_state_and_notify6Mn0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassbOset_initialization_state_and_notify_impl6FnTinstanceKlassHandle_n0AKClassState_pnGThread__v_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cPFieldAccessInfoDset6MnLKlassHandle_nMsymbolHandle_iinJBasicType_nLAccessFlags__v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_26MnJBytecodesECode__v_; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cMLinkResolverVresolve_invokespecial6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cLStringTableGintern6FnGHandle_pHipnGThread__pnHoopDesc__; +text: .text%__1cLStringTableLhash_string6FpHi_i_; +text: .text%__1cLStringTableGlookup6MipHiI_pnHoopDesc__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringMbasic_create6FpnQtypeArrayOopDesc_ipnGThread__nGHandle__; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_virtual_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cMNativeLookupNlong_jni_name6FnMmethodHandle__pc_; +text: .text%__1cNFingerprinterJdo_object6Mii_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorDbox6Mii_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEfrom6F_pnMRegisterImpl__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorCto6F_pnMRegisterImpl__; +text: .text%JVM_DoPrivileged; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cLRegisterMapFclear6Mpi_v_; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cSvframeStreamCommonbBfill_from_interpreter_frame6M_v_; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%__1cNSharedRuntimeDf2i6Ff_i_; +text: .text%jni_FindClass: jni.o; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cKutf8_write6FpCH_0_: utf8.o; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%jni_ReleaseStringUTFChars; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%JVM_CurrentThread; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNFingerprinterGdo_int6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEmove6Mii_v_; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cTJvmtiEventCollectorYunset_jvmti_thread_state6M_v_; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%JVM_SetThreadPriority; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%JVM_StartThread; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cKJavaThreadRthread_main_inner6M_v_; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cNFingerprinterHdo_long6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_: interpreterRuntime.o; +text: .text%JVM_MonitorWait; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cCosHSolarisFEventEpark6M_v_: objectMonitor_solaris.o; +text: .text%jni_GetObjectClass: jni.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cMjniIdPrivateGid_for6FnTinstanceKlassHandle_i_i_: jniId.o; +text: .text%__1cIjniIdMapGcreate6FnTinstanceKlassHandle__p0_; +text: .text%__1cIjniIdMapRcompute_index_cnt6FnTinstanceKlassHandle__i_; +text: .text%__1cIjniIdMap2t6MpnMklassOopDesc_i_v_; +text: .text%__1cLjniIdBucket2t6MpnIjniIdMap_p0_v_; +text: .text%jni_NewStringUTF: jni.o; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cbAjni_check_async_exceptions6FpnKJavaThread__v_: jni.o; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cOJNIHandleBlockRrebuild_free_list6M_v_; +text: .text%jni_EnsureLocalCapacity; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cRjni_invoke_static6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%jni_NewString: jni.o; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%JVM_InitProperties; +text: .text%__1cMset_property6FnGHandle_pkc2pnGThread__v_: jvm.o; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: sharedHeap.o; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassSregister_finalizer6FpnPinstanceOopDesc_pnGThread__2_; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cJFieldTypeSskip_optional_size6FpnNsymbolOopDesc_pi_v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%JVM_IsArrayClass; +text: .text%JVM_GetComponentType; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cKReflectionbFbasic_type_arrayklass_to_mirror6FpnMklassOopDesc_pnGThread__pnHoopDesc__; +text: .text%JVM_IsPrimitiveClass; +text: .text%JVM_GetClassLoader; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cOJavaAssertionsLmatch_class6Fpkc_pn0AKOptionList__: javaAssertions.o; +text: .text%__1cOJavaAssertionsNmatch_package6Fpkc_pn0AKOptionList__; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%JVM_InternString; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%__1cKSharedHeapPis_in_permanent6kMpkv_i_: genCollectedHeap.o; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%JVM_NanoTime; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%JVM_GetCallerClass; +text: .text%JVM_SupportsCX8; +text: .text%__1cNFingerprinterHdo_bool6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRuntime.o; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverWresolve_interface_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverbHlinktime_resolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2pnGThread__v_; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cRfind_field_offset6FpnI_jobject_ipnGThread__i_; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%JVM_IHashCode; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cNinstanceKlassPjni_id_for_impl6FnTinstanceKlassHandle_i_pnFJNIid__; +text: .text%__1cFJNIid2t6MpnMklassOopDesc_ip0_v_; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: arrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: arrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: arrayKlass.o; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_IsInterface; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cKReflectionTget_exception_types6FnMmethodHandle_pnGThread__nOobjArrayHandle__; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%JVM_Clone; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: jvm.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: jvm.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: jvm.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: jvm.o; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%JVM_GetClassAccessFlags; +text: .text%JVM_GetClassName; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%JVM_GetClassModifiers; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cNFingerprinterIdo_array6Mii_v_: dump.o; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cKReflectionDbox6FpnGjvalue_nJBasicType_pnGThread__pnHoopDesc__; +text: .text%JVM_MaxMemory; +text: .text%__1cQGenCollectedHeapMmax_capacity6kM_I_; +text: .text%__1cQDefNewGenerationMmax_capacity6kM_I_; +text: .text%Unsafe_AllocateMemory; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%Unsafe_GetNativeByte; +text: .text%Unsafe_FreeMemory; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%__1cFJNIidEfind6Mi_p0_; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cMalloc_object6FpnH_jclass_pnGThread__pnPinstanceOopDesc__: jni.o; +text: .text%jni_GetStringRegion: jni.o; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%jni_GetObjectField: jni.o; +text: .text%jni_GetStringCritical: jni.o; +text: .text%__1cJGC_lockerNlock_critical6FpnKJavaThread__v_: jni.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%JVM_LoadLibrary; +text: .text%JVM_FindLibraryEntry; +text: .text%jni_GetJavaVM; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%jni_SetIntField: jni.o; +text: .text%jni_SetLongField: jni.o; +text: .text%JVM_FindSignal; +text: .text%JVM_RegisterSignal; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cJAssemblerFtestb6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerMemit_arith_b6MiipnMRegisterImpl_i_v_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cJAssemblerFfst_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfstp_d6Mi_v_; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: objArrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: objArrayKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: objArrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: objArrayKlass.o; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_; +text: .text%__1cQSimpleCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cQSimpleCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cMPeriodicTask2t6MI_v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cICompiler2t6M_v_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cICompilerOneeds_adapters6M_i_: c1_Compiler.o; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cICompilerKinitialize6M_v_; +text: .text%__1cMCompileQdDueueDget6M_pnLCompileTask__; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cLStatSamplerKinitialize6F_v_; +text: .text%__1cLStatSamplerUcreate_misc_perfdata6F_v_; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerTget_system_property6FpkcpnGThread__2_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cLStatSamplerXcreate_sampled_perfdata6F_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cMPerfDataListFclone6M_p0_; +text: .text%__1cMPerfDataList2t6Mp0_v_; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cNWatcherThreadFstart6F_v_; +text: .text%__1cNWatcherThread2t6M_v_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cMPeriodicTaskMtime_to_wait6F_I_: thread.o; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cNgetTimeMillis6F_x_; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%JVM_FindLoadedClass; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%jni_NewByteArray: jni.o; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorUhas_pending_requests6F_i_; +text: .text%__1cVjava_lang_ClassLoaderGparent6FpnHoopDesc__2_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcinMsymbolHandle_4nGHandle_6_v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinMsymbolHandle_4_i_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cTjava_lang_ThrowableNset_backtrace6FpnHoopDesc_2_v_; +text: .text%__1cTjava_lang_ThrowableQclear_stacktrace6FpnHoopDesc__v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cKExceptionsG_throw6FpnGThread_pkcinGHandle__v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinGHandle__i_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cNSharedRuntimebOraw_exception_handler_for_return_address6FpC_1_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cSInterpreterRuntimePset_bcp_and_mdp6FpCpnKJavaThread__v_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cNCompileBrokerXcompilation_is_in_queue6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTis_not_compile_only6FnMmethodHandle__i_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cNCompileBrokerRassign_compile_id6FnMmethodHandle_i_I_; +text: .text%__1cNCompileBrokerTis_compile_blocking6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTcreate_compile_task6FpnMCompileQdDueue_inMmethodHandle_i3ipkcii_pnLCompileTask__; +text: .text%__1cNCompileBrokerNallocate_task6F_pnLCompileTask__; +text: .text%__1cLCompileTaskKinitialize6MinMmethodHandle_i1ipkcii_v_; +text: .text%__1cMCompileQdDueueDadd6MpnLCompileTask__v_; +text: .text%__1cNCompileBrokerTwait_for_completion6FpnLCompileTask__pnHnmethod__; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cSCompileTaskWrapper2t6MpnLCompileTask__v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cNCompileBrokerVpush_jni_handle_block6F_v_; +text: .text%__1cNCompileBrokerOcheck_break_at6FnMmethodHandle_iii_i_; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cPciObjectFactoryNinit_ident_of6MpnIciObject__v_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cIciSymbolJmake_impl6Fpkc_p0_; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cICompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cLCompilation2t6MpnQAbstractCompiler_pnFciEnv_pnIciMethod_ipnRC1_MacroAssembler__v_; +text: .text%__1cTExceptionRangeTable2t6Mi_v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cLCompilationOcompile_method6M_v_; +text: .text%__1cLCompilationKinitialize6M_v_; +text: .text%__1cLCompilationEcode6kM_pnKCodeBuffer__; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cLCompilationTdebug_info_recorder6kM_pnYDebugInformationRecorder__; +text: .text%__1cLCompilationbBis_optimized_library_method6kM_i_; +text: .text%__1cLCompilationTcompile_java_method6MpnLCodeOffsets__i_; +text: .text%__1cLCompilationTinitialize_oop_maps6M_v_; +text: .text%__1cIciMethodMall_oop_maps6M_pnKciLocalMap__; +text: .text%__1cSciGenerateLocalMap2t6MpnFArena_nMmethodHandle__v_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cSciGenerateLocalMapWfind_jsr_return_points6MnMmethodHandle__v_; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: ciOopMap.o; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapOset_bbmark_bit6Mi_v_; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cSciGenerateLocalMapRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cSciGenerateLocalMapUbytecode_is_gc_point6FnJBytecodesECode_ii_i_; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapSget_basic_block_at6kMi_pnKBasicBlock__; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapNrestore_state6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cOGenerateOopMapCpp6MpnNCellTypeState_2_v_; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapFppop16MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cOGenerateOopMapKcheck_type6MnNCellTypeState_1_v_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapGppush16MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cOGenerateOopMapXdo_return_monitor_check6M_v_; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cSciGenerateLocalMapOreport_results6kM_i_: ciOopMap.o; +text: .text%__1cOGenerateOopMapNreport_result6M_v_; +text: .text%__1cSciGenerateLocalMapUfill_stackmap_prolog6Mi_v_; +text: .text%__1cSciGenerateLocalMapZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cKciLocalMap2t6MpnFArena_iii_v_; +text: .text%__1cKciLocalMapRset_bci_for_index6Mii_v_; +text: .text%__1cSciGenerateLocalMapUfill_stackmap_epilog6M_v_: ciOopMap.o; +text: .text%__1cSciGenerateLocalMapOfill_init_vars6MpnNGrowableArray4Ci___v_; +text: .text%__1cKciLocalMapSset_nof_initialize6Mi_v_; +text: .text%__1cLCompilationJbuild_hir6M_v_; +text: .text%__1cCIR2t6MpnLCompilation_pnIciMethod_i_v_; +text: .text%__1cJValueTypeKinitialize6F_v_; +text: .text%__1cMciNullObjectEmake6F_p0_; +text: .text%__1cMGraphBuilderKinitialize6F_v_; +text: .text%__1cHIRScope2t6MpnLCompilation_p0ipnIciMethod_ii_v_; +text: .text%__1cOLocalSlotArray2t6MkikpnJLocalSlot__v_: c1_IR.o; +text: .text%__1cGBitMap2t6MI_v_; +text: .text%__1cGBitMapGresize6MI_v_; +text: .text%__1cNWordSizeArray2t6Mki1_v_: c1_IR.o; +text: .text%__1cJXHandlers2t6MpnIciMethod__v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cHIRScopeLbuild_graph6MpnLCompilation_i_pnKBlockBegin__; +text: .text%__1cQBlockListBuilder2t6MpnHIRScope_ii_v_; +text: .text%__1cPBlockBeginArray2t6MkikpnKBlockBegin__v_: c1_GraphBuilder.o; +text: .text%__1cQBlockListBuilderLset_leaders6M_v_; +text: .text%__1cQciBytecodeStream2t6MpnIciMethod__v_; +text: .text%__1cQciBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cQBlockListBuilderMnew_block_at6MinKBlockBeginEFlag__p1_; +text: .text%__1cQBlockListBuilderUset_xhandler_entries6M_v_; +text: .text%__1cKValueStack2t6MpnHIRScope_ii_v_; +text: .text%__1cKValueArray2t6MkikpnLInstruction__v_: c1_ValueStack.o; +text: .text%__1cJLocalSlot2t6M_v_; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_IR.o; +text: .text%__1cKObjectTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cMGraphBuilder2t6MpnLCompilation_pnHIRScope_pnJBlockList_pnKBlockBegin__v_; +text: .text%__1cMGraphBuilderPpush_root_scope6MpnHIRScope_pnJBlockList_pnKBlockBegin__v_; +text: .text%__1cMGraphBuilderJScopeData2t6Mp1i_v_; +text: .text%__1cMGraphBuilderJScopeDataJset_scope6MpnHIRScope__v_; +text: .text%__1cMGraphBuilderUpush_exception_scope6M_v_; +text: .text%__1cOExceptionScope2t6M_v_; +text: .text%__1cOExceptionScopeEinit6M_v_; +text: .text%__1cIValueMap2t6M_v_; +text: .text%__1cMGraphBuilderJScopeDataQadd_to_work_list6MpnKBlockBegin__v_; +text: .text%__1cNResourceArrayGexpand6MIiri_v_; +text: .text%__1cMGraphBuilderSiterate_all_blocks6Mi_v_; +text: .text%__1cMGraphBuilderJScopeDataVremove_from_work_list6M_pnKBlockBegin__; +text: .text%__1cMGraphBuilderJScopeDataSis_work_list_empty6kM_i_; +text: .text%__1cMGraphBuilderOconnect_to_end6MpnKBlockBegin__pnIBlockEnd__; +text: .text%__1cIValueMapIkill_all6M_v_; +text: .text%__1cIValueMapRnumber_of_buckets6kM_i_; +text: .text%__1cIValueMapJbucket_at6Mi_pnGBucket__; +text: .text%__1cGBucketIkill_all6M_v_; +text: .text%__1cKValueStackEcopy6M_p0_; +text: .text%__1cGValuesIpush_all6Mpk0_v_: c1_ValueStack.o; +text: .text%__1cMGraphBuilderbBiterate_bytecodes_for_block6Mi_pnIBlockEnd__; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderJScopeDataIblock_at6Mi_pnKBlockBegin__; +text: .text%__1cMGraphBuilderKload_local6MpnJValueType_i_v_; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderGappend6MpnLInstruction__2_; +text: .text%__1cMGraphBuilderLappend_base6MpnLInstruction__2_; +text: .text%__1cJLoadLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cIValueMapEfind6MpnLInstruction__2_; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_GraphBuilder.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_GraphBuilder.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_GraphBuilder.o; +text: .text%__1cLInstructionEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_GraphBuilder.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cKValueStackLclear_store6Mi_v_; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderMaccess_field6MnJBytecodesECode__v_; +text: .text%__1cQciBytecodeStreamJget_field6kM_pnHciField__; +text: .text%__1cQciBytecodeStreamPget_field_index6kM_i_; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cFciEnvXget_field_by_index_impl6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cMas_ValueType6FnJBasicType__pnJValueType__; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cMLinkResolverXresolve_klass_no_update6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cMGraphBuilderKlock_stack6M_pnKValueStack__; +text: .text%__1cKValueStackKcopy_locks6M_p0_; +text: .text%__1cJLoadFieldFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_Instruction.o; +text: .text%__1cJLoadFieldEhash6kM_i_: c1_Instruction.o; +text: .text%__1cJLoadFieldEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cIValueMapNlookup_bucket6Mi_pnGBucket__; +text: .text%__1cGBucketEfind6MpnLInstruction_i_2_; +text: .text%__1cGBucketGappend6MpnLInstruction_i_v_; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Instruction.o; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Instruction.o; +text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cOExceptionScopeEcopy6M_p0_; +text: .text%__1cOExceptionScopeGlength6kM_i_; +text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cMGraphBuilderLstore_local6MpnJValueType_i_v_; +text: .text%__1cKValueStackDpop6MpnJValueType__pnLInstruction__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderLstore_local6MpnKValueStack_pnLInstruction_pnJValueType_ii_v_; +text: .text%__1cJValueTypeNas_ObjectType6M_pnKObjectType__: c1_ValueType.o; +text: .text%__1cKStoreLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cKValueStackLstore_local6MpnKStoreLocal_i_v_; +text: .text%__1cKValueStackQpin_stack_locals6Mi_v_; +text: .text%__1cKValueStackNpin_stack_all6MnLInstructionJPinReason__v_; +text: .text%__1cHIntTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cMGraphBuilderHif_zero6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cIConstantFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerLdo_Constant6MpnIConstant__v_; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Instruction.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Instruction.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Instruction.o; +text: .text%__1cIConstantEhash6kM_i_; +text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cLIntConstantOas_IntConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cIConstantEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cIConstantIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderHif_node6MpnLInstruction_n0BJCondition_2pnKValueStack__v_; +text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_GraphBuilder.o; +text: .text%__1cCIfFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerFdo_If6MpnCIf__v_; +text: .text%__1cJValueTypeLis_constant6kM_i_: c1_ValueType.o; +text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_Instruction.o; +text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_Instruction.o; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Canonicalizer.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Canonicalizer.o; +text: .text%__1cLInstructionEhash6kM_i_: c1_Canonicalizer.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Canonicalizer.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Canonicalizer.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Canonicalizer.o; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_Canonicalizer.o; +text: .text%__1cKBlockBeginItry_join6MpnKValueStack__i_; +text: .text%__1cKValueStack2t6Mp0_v_; +text: .text%__1cKValueStackEinit6Mp0_v_; +text: .text%__1cMGraphBuilderNmethod_return6MpnLInstruction__v_; +text: .text%__1cGReturnFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerJdo_Return6MpnGReturn__v_; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_GraphBuilder.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_GraphBuilder.o; +text: .text%__1cGReturnJas_Return6M_p0_: c1_GraphBuilder.o; +text: .text%__1cKValueStackbAeliminate_all_scope_stores6Mi_v_; +text: .text%__1cKValueStackQeliminate_stores6Mi_v_; +text: .text%__1cKValueStackMcaller_state6kM_p0_; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cFciEnvZcheck_klass_accessibility6MpnHciKlass_pnMklassOopDesc__i_; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cKObjectTypeNas_ObjectType6M_p0_: c1_ValueType.o; +text: .text%__1cJValueTypeOas_AddressType6M_pnLAddressType__: c1_ValueType.o; +text: .text%__1cKObjectTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cMGraphBuilderHif_same6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cJValueTypeOas_IntConstant6M_pnLIntConstant__: c1_ValueType.o; +text: .text%__1cKStoreFieldFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cIValueMapKkill_field6MpnHciField__v_; +text: .text%__1cGBucketKkill_field6MpnHciField__v_; +text: .text%__1cKValueStackQpin_stack_fields6MpnHciField__v_; +text: .text%__1cKValueStackVis_same_across_scopes6Mp0_i_; +text: .text%__1cMGraphBuilderNarithmetic_op6MpnJValueType_nJBytecodesECode_pnKValueStack__v_; +text: .text%__1cJValueTypeEmeet6kMp0_1_; +text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cMArithmeticOpIcan_trap6kM_i_; +text: .text%__1cMArithmeticOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cNCanonicalizerGdo_Op26MpnDOp2__v_; +text: .text%__1cLIntConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerTmove_const_to_right6MpnDOp2__v_; +text: .text%__1cMArithmeticOpOis_commutative6kM_i_; +text: .text%__1cMArithmeticOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cMArithmeticOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cMGraphBuilderJincrement6M_v_; +text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cMGraphBuilderMload_indexed6MnJBasicType__v_; +text: .text%__1cLLoadIndexedFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cLLoadIndexedEhash6kM_i_: c1_Instruction.o; +text: .text%__1cLLoadIndexedEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cIConstantIis_equal6kMpnLInstruction__i_; +text: .text%__1cIConstantLas_Constant6M_p0_: c1_Instruction.o; +text: .text%__1cEGotoFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerHdo_Goto6MpnEGoto__v_; +text: .text%__1cHIRScopeMheader_block6MpnKBlockBegin_n0BEFlag__2_; +text: .text%__1cCIRIoptimize6M_v_; +text: .text%__1cJOptimizer2t6MpnCIR__v_; +text: .text%__1cJOptimizerbHeliminate_conditional_expressions6M_v_; +text: .text%__1cCIRQiterate_preorder6MpnMBlockClosure__v_; +text: .text%__1cKBlockBeginQiterate_preorder6MpnMBlockClosure__v_; +text: .text%__1cJboolArray2t6Mki1_v_: c1_Instruction.o; +text: .text%__1cKBlockBeginQiterate_preorder6MrnJboolArray_pnMBlockClosure__v_; +text: .text%__1cNCE_EliminatorIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_IR.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Canonicalizer.o; +text: .text%__1cCIfFas_If6M_p0_: c1_Canonicalizer.o; +text: .text%__1cHIntTypeKas_IntType6M_p0_: c1_ValueType.o; +text: .text%__1cNCE_EliminatorRsimple_value_copy6MpnLInstruction__2_: c1_Optimizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_GraphBuilder.o; +text: .text%__1cJLoadLocalMas_LoadLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_GraphBuilder.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_GraphBuilder.o; +text: .text%__1cJOptimizerQeliminate_blocks6M_v_; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cSPredecessorCounterIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLBlockMergerIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLBlockMergerJtry_merge6MpnKBlockBegin__i_: c1_Optimizer.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_IR.o; +text: .text%__1cEGotoHas_Goto6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Canonicalizer.o; +text: .text%__1cJOptimizerVeliminate_null_checks6M_v_; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cGBitMapUclear_range_of_words6MII_v_: bitMap.o; +text: .text%__1cNValueSetArray2t6MkikpnIValueSet__v_: c1_Optimizer.o; +text: .text%__1cTNullCheckEliminatorHiterate6MpnKBlockBegin__v_; +text: .text%__1cTNullCheckEliminatorLiterate_all6M_v_; +text: .text%__1cTNullCheckEliminatorLiterate_one6MpnKBlockBegin__v_; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_Optimizer.o; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cKBlockBeginFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_BlockBegin6MpnKBlockBegin__v_; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_IR.o; +text: .text%__1cEBaseFvisit6MpnSInstructionVisitor__v_: c1_IR.o; +text: .text%__1cQNullCheckVisitorHdo_Base6MpnEBase__v_; +text: .text%__1cTNullCheckEliminatorPmerge_state_for6MpnKBlockBegin_pnKValueStack_pnIValueSet__i_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Optimizer.o; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o; +text: .text%__1cQNullCheckVisitorHdo_Goto6MpnEGoto__v_; +text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_GraphBuilder.o; +text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_Instruction.o; +text: .text%__1cKStoreLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cTNullCheckEliminatorIdo_value6FppnLInstruction__v_; +text: .text%__1cFLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cFLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorIdo_Local6MpnFLocal__v_; +text: .text%__1cLAccessFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cLAccessLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cTNullCheckEliminatorQhandle_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cQNullCheckVisitorMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cTNullCheckEliminatorShandle_AccessField6MpnLAccessField__v_; +text: .text%__1cQNullCheckVisitorNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cTNullCheckEliminatorRhandle_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cCIfPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o; +text: .text%__1cIConstantPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorLdo_Constant6MpnIConstant__v_; +text: .text%__1cQNullCheckVisitorFdo_If6MpnCIf__v_; +text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cNAccessIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cTNullCheckEliminatorShandle_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cGBitMapbCset_intersection_with_result6M0_i_; +text: .text%__1cKStoreFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cGReturnPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorJdo_Return6MpnGReturn__v_; +text: .text%__1cJboolArray2t6Mki1_v_: c1_Optimizer.o; +text: .text%__1cCIRTcompute_locals_size6M_v_; +text: .text%__1cHIRScopePallocate_locals6MipnMWordSizeList__i_; +text: .text%__1cHIRScopeGlocals6M_pnJLocalList__; +text: .text%__1cJLocalSlotOcollect_locals6MpnJLocalList__v_; +text: .text%__1cHIRScopePargument_locals6M_pnJLocalList__; +text: .text%__1cJLocalSlotXcollect_argument_locals6MpnJLocalList__v_; +text: .text%__1cCIRTallocate_local_name6M_i_; +text: .text%__1cMWordSizeListEgrow6Mki1_v_: c1_IR.o; +text: .text%__1cCIRSnotice_used_offset6Mi_v_; +text: .text%__1cCIRNcompute_loops6M_v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodOop.o; +text: .text%__1cKLoopFinder2t6MpnCIR_i_v_; +text: .text%__1cSBlockLoopInfoArray2t6MkikpnNBlockLoopInfo__v_: c1_Loops.o; +text: .text%__1cKLoopFinderNcompute_loops6Mi_pnILoopList__; +text: .text%__1cJboolArray2t6Mki1_v_: c1_Loops.o; +text: .text%__1cKLoopFinderScompute_dominators6MpnJboolArray__v_; +text: .text%__1cGBitMapGat_put6MIi_v_; +text: .text%__1cRCreateInfoClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNBlockLoopInfo2t6MpnKBlockBegin_i_v_; +text: .text%__1cPSetPredsClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cKLoopFinderSdominator_walk_sux6MpnKBlockBegin_pnJboolArray__v_; +text: .text%__1cGBitMapQset_intersection6M0_v_; +text: .text%__1cGBitMapHis_same6M0_i_; +text: .text%__1cKLoopFinderOfind_backedges6MpnJboolArray__pnILoopList__; +text: .text%__1cELoop2t6MpnKBlockBegin_2_v_: c1_Loops.o; +text: .text%__1cKLoopFinderSgather_loop_blocks6MpnILoopList__v_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Loops.o; +text: .text%__1cKLoopFinderKfind_loops6MpnILoopList_i_2_; +text: .text%__1cKScanBlocks2t6MpnJBlockList__v_; +text: .text%__1cIintStack2t6M_v_: c1_ScanBlocks.o; +text: .text%__1cKScanBlocksEscan6MpnKScanResult_i_v_; +text: .text%__1cKScanBlocksKscan_block6MpnKBlockBegin_pnKScanResult_i_v_; +text: .text%__1cLIllegalTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_GraphBuilder.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_GraphBuilder.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Instruction.o; +text: .text%__1cLAccessLocalOas_AccessLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_StoreLocal6M_pnKStoreLocal__: c1_GraphBuilder.o; +text: .text%__1cKStoreLocalNas_StoreLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cKScanBlocksRaccumulate_access6MinIValueTag_i_v_; +text: .text%__1cKScanBlocksPincrement_count6MnIValueTag_ii_v_; +text: .text%__1cKScanBlocksJget_array6MnIValueTag__pnIintStack__; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_ScanBlocks.o; +text: .text%__1cKScanBlocksLupdate_type6MinIValueTag__v_; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Canonicalizer.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Canonicalizer.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Canonicalizer.o; +text: .text%__1cJ_LoopListIpush_all6Mpk0_v_: c1_Loops.o; +text: .text%__1cKLoopFinderbEcompute_loop_exits_and_entries6MpnILoopList__v_; +text: .text%__1cKLoopFinderRfind_loop_entries6MpnKBlockBegin_pnELoop__v_; +text: .text%__1cKLoopFinderPfind_loop_exits6MpnKBlockBegin_pnELoop__v_; +text: .text%__1cKLoopFinderbDcompute_single_precision_flag6MpnILoopList__v_; +text: .text%__1cKLoopFinderNinsert_blocks6MpnILoopList__v_; +text: .text%__1cIintArray2t6Mki1_v_: c1_Loops.o; +text: .text%__1cJBlockListPiterate_forward6MpnMBlockClosure__v_; +text: .text%__1cGTaggerIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNPairCollectorIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNResourceArrayEsort6MIpGpkv2_i_v_; +text: .text%__1cRsort_by_block_ids6FppnJBlockPair_2_i_: c1_Loops.o; +text: .text%__1cKLoopFinderUinsert_caching_block6MpnILoopList_pnKBlockBegin_4_4_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_GraphBuilder.o; +text: .text%__1cKStateSplitFscope6kM_pnHIRScope__; +text: .text%__1cKLoopFinderJnew_block6MpnHIRScope_i_pnKBlockBegin__; +text: .text%__1cIBlockEndOsubstitute_sux6MpnKBlockBegin_2_v_; +text: .text%__1cILoopListMupdate_loops6MpnKBlockBegin_22_v_; +text: .text%__1cELoopSupdate_loop_blocks6MpnKBlockBegin_22_v_; +text: .text%__1cCIRMcompute_code6M_v_; +text: .text%__1cJboolArray2t6Mki1_v_: c1_IR.o; +text: .text%__1cCIRWiterate_and_set_weight6kMrnJboolArray_pnKBlockBegin_pnJBlockList_i_v_; +text: .text%__1cKBlockBeginKset_weight6Mi_v_; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_IR.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_IR.o; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_GraphBuilder.o; +text: .text%__1cDcmp6FppnKBlockBegin_2_i_: c1_IR.o; +text: .text%__1cUSuxAndWeightAdjusterIblock_do6MpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cJBlockListJblocks_do6MpFpnKBlockBegin__v_v_; +text: .text%__1cQUseCountComputerRcompute_use_count6FpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cQUseCountComputerXbasic_compute_use_count6FpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cQUseCountComputerQupdate_use_count6FppnLInstruction__v_: c1_IR.o; +text: .text%__1cFLocalIas_Local6M_p0_: c1_GraphBuilder.o; +text: .text%__1cKStateSplitPstate_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cKValueStackJvalues_do6MpFppnLInstruction__v_v_; +text: .text%__1cQUseCountComputerPupdated_pinning6FpnKBlockBegin__i_: c1_IR.o; +text: .text%__1cNCachingChangePinput_values_do6MpFppnLInstruction__v_v_: c1_Loops.o; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Loops.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_IR.o; +text: .text%__1cLCompilationIemit_lir6M_v_; +text: .text%__1cIFrameMap2t6Mi_v_; +text: .text%__1cIFrameMapLFpuStackSim2t6M_v_; +text: .text%__1cLCompilationNinit_framemap6MpnIFrameMap__v_; +text: .text%__1cIFrameMapbCset_local_name_to_offset_map6MpnMWordSizeList__v_; +text: .text%__1cLLIR_Emitter2t6MpnLCompilation__v_; +text: .text%__1cIValueGenOinit_value_gen6F_v_; +text: .text%__1cIRegAlloc2t6M_v_; +text: .text%__1cNc1_AllocTable2t6Mi_v_; +text: .text%__1cIRegAllocFclear6M_v_; +text: .text%__1cNCodeGenerator2t6MpnIValueGen_pnRValueGenInvariant__v_; +text: .text%__1cNCodeGeneratorIblock_do6MpnKBlockBegin__v_; +text: .text%__1cLLIR_EmitterMmust_bailout6kM_i_; +text: .text%__1cNCodeGeneratorPblock_do_prolog6MpnKBlockBegin__v_; +text: .text%__1cIValueGenLstart_block6MpnKBlockBegin__v_; +text: .text%__1cLLIR_EmitterLstart_block6MpnKBlockBegin__v_; +text: .text%__1cILIR_List2t6MpnLCompilation__v_; +text: .text%__1cIValueGenQbind_block_entry6MpnKBlockBegin__v_; +text: .text%__1cLLIR_EmitterQbind_block_entry6MpnKBlockBegin__v_; +text: .text%__1cIValueGenMblock_prolog6MpnKBlockBegin__v_; +text: .text%__1cIValueGenHdo_root6MpnLInstruction__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_GraphBuilder.o; +text: .text%__1cIValueGenNdo_BlockBegin6MpnKBlockBegin__v_; +text: .text%__1cQDelayedSpillMark2T6M_v_: c1_CodeGenerator.o; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_IR.o; +text: .text%__1cIValueGenHdo_Base6MpnEBase__v_; +text: .text%__1cIValueGenNreceiverRInfo6F_nFRInfo__; +text: .text%__1cIValueGenMicKlassRInfo6F_nFRInfo__; +text: .text%__1cLCompilationNget_init_vars6M_pnIintStack__; +text: .text%__1cLLIR_EmitterJstd_entry6MpnHIRScope_pnIintStack_nFRInfo_5_v_; +text: .text%__1cILIR_ListWunverified_entry_point6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterGmethod6kM_pnIciMethod__; +text: .text%__1cMCodeEmitInfo2t6MpnLLIR_Emitter_ipnIintStack_pnKValueStack_pnOExceptionScope_pnPRInfoCollection__v_; +text: .text%__1cLCompilationVvalue_stack2lir_stack6MpnKValueStack__pnNGrowableArray4CpnLLIR_OprDesc____; +text: .text%__1cIValueGenMblock_epilog6MpnKBlockBegin__v_; +text: .text%__1cNCodeGeneratorPblock_do_epilog6MpnKBlockBegin__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Canonicalizer.o; +text: .text%__1cIValueGenHdo_Goto6MpnEGoto__v_; +text: .text%__1cIValueGenNset_no_result6MpnLInstruction__v_; +text: .text%__1cIValueGenLmove_to_phi6MpnKValueStack_i_i_; +text: .text%__1cIValueGenWgoto_default_successor6MpnIBlockEnd_pnMCodeEmitInfo__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Instruction.o; +text: .text%__1cIValueGenMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cIValueGenEwalk6MpnLInstruction__v_; +text: .text%__1cIValueGenMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cIValueGenJload_item6MpnEItem__v_; +text: .text%__1cEItemGupdate6M_v_; +text: .text%__1cIValueGenQset_maynot_spill6MpnEItem__v_; +text: .text%__1cIValueGenSfpu_fanout_handled6MpnEItem__i_; +text: .text%__1cEItemEtype6kM_pnJValueType__: c1_Items.o; +text: .text%__1cIValueGenPlock_free_rinfo6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cIRegAllocMhas_free_reg6kMpnJValueType__i_; +text: .text%__1cIRegAllocMhas_free_reg6kMnIValueTag__i_; +text: .text%__1cNc1_AllocTableMhas_one_free6kM_i_; +text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cIRegAllocMget_free_reg6MpnJValueType__nFRInfo__; +text: .text%__1cIRegAllocMget_free_reg6MnIValueTag__nFRInfo__; +text: .text%__1cNc1_AllocTableIget_free6M_i_; +text: .text%__1cNc1_AllocTablePget_free_helper6Mi_i_; +text: .text%__1cIRegAllocIlock_reg6MpnLInstruction_nFRInfo_i_v_; +text: .text%__1cJRInfo2RegFdo_it6M_v_: c1_RegAlloc.o; +text: .text%__1cHLockRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOset_locked_cpu6MipnLInstruction_i_v_; +text: .text%__1cNc1_AllocTableKset_locked6Mi_v_; +text: .text%__1cLCompilationIitem2lir6MpknEItem__pnLLIR_OprDesc__; +text: .text%__1cLCompilationKitem2stack6MpknEItem__i_; +text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_ValueType.o; +text: .text%__1cMas_BasicType6FpnJValueType__nJBasicType__; +text: .text%__1cJValueTypeMas_ArrayType6M_pnJArrayType__: c1_ValueType.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_Compilation.o; +text: .text%__1cLLIR_EmitterEmove6MpnLLIR_OprDesc_nFRInfo__v_; +text: .text%__1cILIR_ListEmove6MpnLLIR_OprDesc_2pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenJitem_free6MpnEItem__v_; +text: .text%__1cIRegAllocPincr_spill_lock6MnFRInfo__v_; +text: .text%__1cQChangeSpillCountGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIValueGenFrfree6MpnEItem__v_; +text: .text%__1cIRegAllocPdecr_spill_lock6MnFRInfo__v_; +text: .text%__1cIRegAllocIfree_reg6MnFRInfo__v_; +text: .text%__1cHFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocMset_free_cpu6Mi_v_; +text: .text%__1cNc1_AllocTableIset_free6Mi_v_; +text: .text%__1cIValueGenWrlock_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cIValueGenFrlock6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cIRegAllocMget_lock_reg6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cLLIR_EmitterKfield_load6MnFRInfo_pnHciField_pnLLIR_OprDesc_iiipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListMload_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIR.o; +text: .text%__1cIRegAllocHset_reg6MnFRInfo_ipnLInstruction__v_; +text: .text%__1cGSetRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocLset_cpu_reg6MiipnLInstruction__v_; +text: .text%__1cIValueGenNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cEItemRhandle_float_kind6M_v_; +text: .text%__1cEItemNset_from_item6Mpk0_v_: c1_Items.o; +text: .text%__1cIValueGenXcan_inline_any_constant6kM_i_; +text: .text%__1cIValueGenSmust_copy_register6MpnEItem__i_; +text: .text%__1cIValueGenUcheck_float_register6MpnEItem__v_; +text: .text%__1cIRegAllocLis_free_reg6kMnFRInfo__i_; +text: .text%__1cJIsFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cNc1_AllocTableHis_free6kMi_i_; +text: .text%__1cLLIR_EmitterJopr2local6MipnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenFdo_If6MpnCIf__v_; +text: .text%__1cIHintItemNset_from_item6MpknEItem__v_; +text: .text%__1cIHintItemEtype6kM_pnJValueType__: c1_Items.o; +text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_ValueType.o; +text: .text%__1cIValueGenLdo_Constant6MpnIConstant__v_; +text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_Canonicalizer.o; +text: .text%__1cIValueGenOdont_load_item6MpnEItem__v_; +text: .text%__1cIValueGenWdont_load_item_nocheck6MpnEItem__v_; +text: .text%__1cLLIR_OprFactKvalue_type6FpnJValueType__pnLLIR_OprDesc__; +text: .text%__1cLLIR_EmitterFif_op6MinLInstructionJCondition_pnLLIR_OprDesc_4pnKBlockBegin_66pnMCodeEmitInfo__v_; +text: .text%__1cJLIR_ConstEtype6kM_nJBasicType__: c1_CacheLocals.o; +text: .text%__1cJLIR_ConstLas_constant6M_p0_: c1_CacheLocals.o; +text: .text%__1cLLIR_EmitterIlir_cond6MnLInstructionJCondition__nMLIR_OpBranchNLIR_Condition__; +text: .text%__1cILIR_ListDcmp6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnKBlockBegin__v_; +text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cEItemEtype6kM_pnJValueType__: c1_CodeGenerator.o; +text: .text%__1cJArrayTypeMas_ArrayType6M_p0_: c1_ValueType.o; +text: .text%__1cLLIR_EmitterHopr2int6MpnLLIR_OprDesc__i_; +text: .text%__1cILIR_ListJint2stack6Mii_v_: c1_LIREmitter.o; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Loops.o; +text: .text%__1cNCachingChangeFvisit6MpnSInstructionVisitor__v_: c1_Loops.o; +text: .text%__1cIValueGenQdo_CachingChange6MpnNCachingChange__v_; +text: .text%__1cIValueGenPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cIValueGenTdo_ArithmeticOp_Int6MpnMArithmeticOp__v_; +text: .text%__1cIValueGenOload_item_hint6MpnEItem_pk1_v_; +text: .text%__1cEItemRget_jint_constant6kM_i_; +text: .text%__1cLLIR_EmitterRarithmetic_op_int6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo__v_; +text: .text%__1cLLIR_EmitterNarithmetic_op6MnJBytecodesECode_pnLLIR_OprDesc_44inFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterYstrength_reduce_multiply6MpnLLIR_OprDesc_i22_i_; +text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter_x86.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter_x86.o; +text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter_x86.o; +text: .text%__1cILIR_ListKshift_left6MpnLLIR_OprDesc_222_v_; +text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o; +text: .text%__1cIValueGenWcan_inline_as_constant6MpnEItem__i_; +text: .text%__1cIRegAllocPget_register_rc6kMnFRInfo__i_; +text: .text%__1cLGetRefCountGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListDadd6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cJValueTypeLas_LongType6M_pnILongType__: c1_ValueType.o; +text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cMCodeEmitInfoVfill_expression_stack6M_v_; +text: .text%__1cLLIR_EmitterRarray_range_check6MpnLLIR_OprDesc_2pnMCodeEmitInfo_4_v_; +text: .text%__1cORangeCheckStub2t6MpnMCodeEmitInfo_nFRInfo_ii_v_; +text: .text%__1cMCodeEmitInfo2t6Mp0i_v_; +text: .text%__1cLLIR_EmitterLcmp_reg_mem6MnMLIR_OpBranchNLIR_Condition_nFRInfo_3inJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLcmp_reg_mem6MnMLIR_OpBranchNLIR_Condition_nFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnICodeStub__v_; +text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnICodeStub_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterMindexed_load6MnFRInfo_nJBasicType_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterNarray_address6MpnLLIR_OprDesc_2inJBasicType__pnLLIR_Address__; +text: .text%__1cLLIR_AddressFscale6FnJBasicType__n0AFScale__; +text: .text%__1cILIR_ListEmove6MpnLLIR_Address_pnLLIR_OprDesc_pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o; +text: .text%__1cIRegAllocNoops_in_spill6kM_pnIintStack__; +text: .text%__1cIRegAllocRoops_in_registers6kM_pnPRInfoCollection__; +text: .text%__1cIValueGenbDsafepoint_poll_needs_register6F_i_; +text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cLLIR_EmitterHgoto_op6MpnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListEjump6MpnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cIValueGenOscratch1_RInfo6kM_nFRInfo__; +text: .text%__1cIValueGenUprefer_alu_registers6kM_i_; +text: .text%__1cLLIR_EmitterLfield_store6MpnHciField_pnLLIR_OprDesc_i4iipnMCodeEmitInfo_nFRInfo__v_; +text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_1inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenJdo_Return6MpnGReturn__v_; +text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_ValueType.o; +text: .text%__1cIValueGenTresult_register_for6FpnJValueType_i_nFRInfo__; +text: .text%__1cIValueGenMreturn1RInfo6F_nFRInfo__; +text: .text%__1cIValueGenPload_item_force6MpnEItem_nFRInfo__v_; +text: .text%__1cIValueGenPlock_spill_temp6MpnLInstruction_nFRInfo__v_; +text: .text%__1cIRegAllocJlock_temp6MpnLInstruction_nFRInfo__v_; +text: .text%__1cLLIR_EmitterJreturn_op6MpnLLIR_OprDesc__v_; +text: .text%__1cNCodeGeneratorXclear_instruction_items6FpnKBlockBegin__v_; +text: .text%__1cQLIR_LocalCaching2t6MpnCIR__v_; +text: .text%__1cQLIR_LocalCachingQpreferred_locals6MpknIciMethod__pnMLocalMapping__; +text: .text%__1cMLocalMappingQinit_cached_regs6M_v_; +text: .text%__1cPRegisterManager2t6M_v_; +text: .text%__1cMLocalMappingNget_cache_reg6kMi_nFRInfo__; +text: .text%__1cQLIR_LocalCachingVcompute_cached_locals6M_v_; +text: .text%__1cQLIR_LocalCachingMcache_locals6M_v_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_IR.o; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Canonicalizer.o; +text: .text%__1cNCachingChangeQas_CachingChange6M_p0_: c1_Loops.o; +text: .text%__1cRBlockListScanInfo2t6MpnJBlockList__v_: c1_CacheLocals.o; +text: .text%__1cOLIR_OprRefList2t6M_v_: c1_CacheLocals.o; +text: .text%__1cRBlockListScanInfoItraverse6MpnKBlockBegin_pnKLIR_OpList__v_: c1_CacheLocals.o; +text: .text%__1cLLIR_OpLabelFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cHLIR_Op1Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cPRegisterManagerElock6MnFRInfo__v_; +text: .text%__1cHLIR_Op2Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cMLIR_OpBranchFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_x86.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeStubs_x86.o; +text: .text%__1cNc1_AllocTableFmerge6Mp0_v_; +text: .text%__1cGLIR_OpFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQLIR_LocalCachingXcache_locals_for_blocks6MpnJBlockList_pnPRegisterManager_i_pnMLocalMapping__; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Loops.o; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Loops.o; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Loops.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_IR.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_IR.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_IR.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_IR.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_IR.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_IR.o; +text: .text%__1cKScanBlocksQmost_used_locals6M_pnKALocalList__; +text: .text%__1cKScanBlocksMint_count_at6kMi_i_; +text: .text%__1cKScanBlocksIcount_at6kMnIValueTag_i_i_; +text: .text%__1cKScanBlocksJget_array6kMnIValueTag__pknIintStack__; +text: .text%__1cKScanBlocksNlong_count_at6kMi_i_; +text: .text%__1cKScanBlocksMobj_count_at6kMi_i_; +text: .text%__1cKScanBlocksLis_obj_only6kMi_i_; +text: .text%__1cKScanBlocksLis_int_only6kMi_i_; +text: .text%__1cGALocalUsort_by_access_count6Fpp02_i_: c1_ScanBlocks.o; +text: .text%__1cQLIR_LocalCachingPcompute_caching6MpnKALocalList_pnPRegisterManager__pnMLocalMapping__; +text: .text%__1cPRegisterManagerMnum_free_cpu6M_i_; +text: .text%__1cMLocalMappingNget_cache_reg6kMinIValueTag__nFRInfo__; +text: .text%__1cPRegisterManagerMhas_free_reg6MnIValueTag__i_; +text: .text%__1cPRegisterManagerNlock_free_reg6MnIValueTag__nFRInfo__; +text: .text%__1cQLIR_LocalCachingQadd_at_all_names6FpnPRInfoCollection_inFRInfo_pnMWordSizeList__v_; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_CacheLocals.o; +text: .text%__1cMLocalMappingFmerge6Mp0_v_; +text: .text%__1cGALocalNsort_by_index6Fpp02_i_: c1_CacheLocals.o; +text: .text%__1cSLocalMappingSetterIblock_do6MpnKBlockBegin__v_; +text: .text%__1cMLocalMappingEjoin6Mp0_v_; +text: .text%__1cPRegisterManagerLis_free_reg6MnFRInfo__i_; +text: .text%__1cQLIR_LocalCachingYinsert_transition_blocks6M_v_; +text: .text%__1cPBlockTransitionIblock_do6MpnKBlockBegin__v_: c1_CacheLocals.o; +text: .text%__1cGLIR_OpLas_OpBranch6M_pnMLIR_OpBranch__: c1_LIR.o; +text: .text%__1cMLocalMappingPemit_transition6FpnILIR_List_p03pnCIR__v_; +text: .text%__1cCIRThighest_used_offset6kM_i_; +text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_CacheLocals.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CacheLocals.o; +text: .text%__1cILIR_ListQsingle_stack2reg6MinFRInfo_nJBasicType__v_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Instruction.o; +text: .text%__1cLCompilationbBemit_code_prolog_non_native6MpnIFrameMap__v_; +text: .text%__1cHIRScopeJmax_stack6kM_i_; +text: .text%__1cNLIR_Optimizer2t6MpnCIR__v_; +text: .text%__1cRLIR_PeepholeState2t6M_v_; +text: .text%__1cRLIR_PeepholeStateKinitialize6MpnMLocalMapping__v_; +text: .text%__1cRLIR_PeepholeStateMclear_values6M_v_; +text: .text%__1cOLIR_OprRefList2t6M_v_: c1_LIROptimizer.o; +text: .text%__1cNLIR_OptimizerIoptimize6M_v_; +text: .text%__1cNLIR_OptimizerIoptimize6MpnJBlockList__v_; +text: .text%__1cNLIR_OptimizerIoptimize6MpnKBlockBegin__v_; +text: .text%__1cNLIR_OptimizerMblock_prolog6M_v_; +text: .text%__1cNLIR_OptimizerKprocess_op6M_v_; +text: .text%__1cGLIR_OpGas_Op16M_pnHLIR_Op1__: c1_LIR.o; +text: .text%__1cLLIR_OpLabelKas_OpLabel6M_p0_: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateVfinish_forward_branch6MpnFLabel__v_; +text: .text%__1cJLabelListIindex_of6kMkpnFLabel__i_: c1_LIROptimizer.o; +text: .text%__1cRLIR_PeepholeStateYset_disable_optimization6Mi_v_; +text: .text%__1cLLIR_OpLabelJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerMemit_opLabel6MpnLLIR_OpLabel__v_; +text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_x86.o; +text: .text%__1cHLIR_Op0Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op06MpnHLIR_Op0__v_; +text: .text%__1cHLIR_Op2Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op26MpnHLIR_Op2__v_; +text: .text%__1cNLIR_OptimizerKhandle_opr6MpnLLIR_OprDesc_nQLIR_OpVisitStateHOprMode__2_; +text: .text%__1cNLIR_OptimizerJis_cached6MpnLLIR_OprDesc__i_; +text: .text%__1cNLIR_OptimizerUrecord_opr_reference6MpnLLIR_OprDesc__v_; +text: .text%__1cRLIR_PeepholeStateUrecord_opr_reference6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateLdefining_op6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateJreg2index6MpnLLIR_OprDesc__i_; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_LIROptimizer.o; +text: .text%__1cNLIR_OptimizerMblock_epilog6M_v_; +text: .text%__1cRLIR_PeepholeStateRis_safe_to_delete6kMi_i_; +text: .text%__1cHLIR_Op1Gas_Op16M_p0_: c1_LIR.o; +text: .text%__1cHLIR_Op1Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op16MpnHLIR_Op1__v_; +text: .text%__1cNLIR_OptimizerMprocess_move6MpnHLIR_Op1__v_; +text: .text%__1cMLocalMappingNget_cache_reg6kMpnLLIR_OprDesc__2_; +text: .text%__1cRLIR_PeepholeStateTmark_safe_to_delete6Mi_v_; +text: .text%__1cNLIR_OptimizerRreplace_stack_opr6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerNoptimize_move6MpnHLIR_Op1_rpnLLIR_OprDesc_5_i_; +text: .text%__1cRLIR_PeepholeStatebFequivalent_register_or_constant6MpnLLIR_OprDesc__2_; +text: .text%__1cRLIR_PeepholeStateOequivalent_opr6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_x86.o; +text: .text%__1cNLIR_OptimizerMis_cache_reg6MpnLLIR_OprDesc__i_; +text: .text%__1cMLocalMappingMis_cache_reg6kMpnLLIR_OprDesc__i_; +text: .text%__1cMLocalMappingMis_cache_reg6kMnFRInfo__i_; +text: .text%__1cRLIR_PeepholeStateSequivalent_address6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerRresult_substitute6M_v_; +text: .text%__1cNLIR_OptimizerRnext_op_with_code6MnILIR_Code__pnGLIR_Op__; +text: .text%__1cNLIR_OptimizerFop_at6Mi_pnGLIR_Op__; +text: .text%__1cRLIR_PeepholeStateMkill_operand6MpnLLIR_OprDesc_i_v_; +text: .text%__1cRLIR_PeepholeStateQkill_equivalents6MpnLLIR_OprDesc__v_; +text: .text%__1cRLIR_PeepholeStateNkill_register6Mi_v_; +text: .text%__1cRLIR_PeepholeStateSrecord_defining_op6MpnLLIR_OprDesc_i_v_; +text: .text%__1cRLIR_PeepholeStatePset_defining_op6Mii_v_; +text: .text%__1cRLIR_PeepholeStateHdo_move6MpnLLIR_OprDesc_2_v_; +text: .text%__1cLLIR_OprListEgrow6MkikpnLLIR_OprDesc__v_: c1_LIROptimizer.o; +text: .text%__1cLLIR_AddressKas_address6M_p0_: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateTequivalent_register6MpnLLIR_OprDesc__2_; +text: .text%__1cKLIR_OprPtrLas_constant6M_pnJLIR_Const__: c1_LIR.o; +text: .text%__1cNLIR_OptimizerKallow_opto6M_i_; +text: .text%__1cNLIR_OptimizerLrecord_opto6MpnLLIR_OprDesc_2_2_; +text: .text%__1cLLIR_AddressEtype6kM_nJBasicType__: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateNincrement_ref6Mi_v_; +text: .text%__1cKLIR_OprPtrKas_address6M_pnLLIR_Address__: c1_CacheLocals.o; +text: .text%__1cMLIR_OpBranchLas_OpBranch6M_p0_: c1_LIR.o; +text: .text%__1cMLIR_OpBranchJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerNemit_opBranch6MpnMLIR_OpBranch__v_; +text: .text%__1cNLIR_OptimizerQopr_live_on_exit6MpnLLIR_OprDesc__i_; +text: .text%__1cNResourceArrayJremove_at6MIi_v_; +text: .text%__1cRLIR_PeepholeStateLstack2index6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStatePkill_stack_slot6Mi_v_; +text: .text%__1cRLIR_PeepholeStatebCequivalent_register_or_stack6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer.o; +text: .text%__1cNLIR_OptimizerLhandle_info6MpnMCodeEmitInfo__v_; +text: .text%__1cMCodeEmitInfoRset_local_mapping6MpnMLocalMapping__v_; +text: .text%__1cNLIR_OptimizerUrecord_register_oops6MpnMCodeEmitInfo__v_; +text: .text%__1cNLIR_OptimizerOemit_code_stub6MpnICodeStub__v_; +text: .text%__1cLCompilationOemit_code_body6MpnLCodeOffsets__i_; +text: .text%__1cNLIR_Assembler2t6MpnLCompilation_pnLCodeOffsets__v_; +text: .text%__1cNConstantTable2t6M_v_; +text: .text%__1cNLIR_AssemblerJemit_code6MpnJBlockList__v_; +text: .text%__1cQCollectConstantsIblock_do6MpnKBlockBegin__v_: c1_LIRAssembler.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_IR.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Instruction.o; +text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_Canonicalizer.o; +text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Loops.o; +text: .text%__1cNLIR_AssemblerOemit_constants6M_v_; +text: .text%__1cNConstantTableMemit_entries6MpnOMacroAssembler_i_v_; +text: .text%__1cLLIR_CodeGenIblock_do6MpnKBlockBegin__v_; +text: .text%__1cNLIR_AssemblerPcheck_codespace6M_v_; +text: .text%__1cNLIR_AssemblerMemit_opLabel6MpnLLIR_OpLabel__v_; +text: .text%__1cNLIR_AssemblerIemit_op06MpnHLIR_Op0__v_; +text: .text%__1cNLIR_AssemblerIemit_op26MpnHLIR_Op2__v_; +text: .text%__1cNLIR_AssemblerMneeds_icache6kMpnIciMethod__i_; +text: .text%__1cFRInfoLas_register6kM_pnMRegisterImpl__; +text: .text%__1cNLIR_AssemblerMcheck_icache6MpnMRegisterImpl_2_i_; +text: .text%__1cRC1_MacroAssemblerSinline_cache_check6MpnMRegisterImpl_2_v_; +text: .text%__1cRC1_MacroAssemblerOverified_entry6M_v_; +text: .text%__1cNLIR_AssemblerLbuild_frame6M_v_; +text: .text%__1cNLIR_AssemblerbBinitial_frame_size_in_bytes6M_i_; +text: .text%__1cIFrameMapJframesize6kM_i_; +text: .text%__1cRC1_MacroAssemblerLbuild_frame6Mi_v_; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: c1_Compiler.o; +text: .text%__1cNLIR_AssemblerVsetup_locals_at_entry6M_v_; +text: .text%__1cIFrameMapYsignature_type_array_for6FpknIciMethod__pnNBasicTypeList__; +text: .text%__1cIFrameMapScalling_convention6FpknIciMethod_pnIintArray__pnRCallingConvention__; +text: .text%__1cIFrameMapScalling_convention6FirknOBasicTypeArray_pnIintArray__pnRCallingConvention__; +text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_x86.o; +text: .text%__1cIFrameMapRname_for_argument6Fi_i_; +text: .text%__1cIFrameMapSfp_offset_for_name6kMiii_i_; +text: .text%__1cIFrameMapPnum_local_names6kM_i_; +text: .text%__1cIFrameMapNlocal_to_slot6kMii_i_; +text: .text%__1cIFrameMapSfp_offset_for_slot6kMi_i_; +text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_x86.o; +text: .text%__1cQArgumentLocationSset_stack_location6Mi_v_; +text: .text%__1cIFrameMapQaddress_for_name6kMiii_nHAddress__; +text: .text%__1cIFrameMapQmake_new_address6kMi_nHAddress__; +text: .text%__1cNLIR_AssemblerIemit_op16MpnHLIR_Op1__v_; +text: .text%__1cNLIR_AssemblerHmove_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerHmem2reg6MpnLLIR_Address_nFRInfo_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerKas_Address6MpnLLIR_Address__nHAddress__; +text: .text%__1cNLIR_AssemblerHcomp_op6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4nJBasicType__v_; +text: .text%__1cNLIR_AssemblerNemit_opBranch6MpnMLIR_OpBranch__v_; +text: .text%__1cNLIR_AssemblerJreg2stack6MnFRInfo_inJBasicType__v_; +text: .text%__1cNLIR_AssemblerLconst2stack6MpnJLIR_Const_i_v_; +text: .text%__1cNLIR_AssemblerJstack2reg6MpnLLIR_OprDesc_2nJBasicType__v_; +text: .text%__1cNLIR_AssemblerHreg2reg6MnFRInfo_1_v_; +text: .text%__1cNLIR_AssemblerJmove_regs6MpnMRegisterImpl_2_v_; +text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_i2_v_; +text: .text%__1cNLIR_AssemblerIarith_op6MnILIR_Code_pnLLIR_OprDesc_33pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerbIadd_debug_info_for_null_check_here6MpnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerLcode_offset6kM_i_; +text: .text%__1cNLIR_AssemblerbDadd_debug_info_for_null_check6MipnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerOemit_code_stub6MpnICodeStub__v_; +text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerCpc6kM_pC_; +text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o; +text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o; +text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler.o; +text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOsafepoint_poll6MnFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerZadd_debug_info_for_branch6MpnMCodeEmitInfo__v_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cMCodeEmitInfoRrecord_debug_info6MpnYDebugInformationRecorder_ii_v_; +text: .text%__1cMCodeEmitInfoHoop_map6M_pnGOopMap__; +text: .text%__1cMCodeEmitInfoScompute_debug_info6M_v_; +text: .text%__1cMCodeEmitInfoOcreate_oop_map6M_pnGOopMap__; +text: .text%__1cIFrameMapRoop_map_arg_count6M_i_; +text: .text%__1cMCodeEmitInfoTrecord_spilled_oops6kMpnIFrameMap_pnGOopMap__v_; +text: .text%__1cKciLocalMapNindex_for_bci6kMi_i_; +text: .text%__1cSciLocalMapIteratorJfind_next6M_v_: c1_LIREmitter.o; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_LIREmitter.o; +text: .text%__1cMCodeEmitInfoNget_cache_reg6kMinIValueTag__nFRInfo__; +text: .text%__1cIFrameMapTsingle_word_regname6kMi_nHOptoRegEName__; +text: .text%__1cIFrameMapMfp2sp_offset6kMi_i_; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cMCodeEmitInfoVlir_stack2value_stack6MpnNGrowableArray4CpnLLIR_OprDesc____pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cMCodeEmitInfobCcompute_debug_info_for_scope6MpnHIRScope_ipnNGrowableArray4CpnKScopeValue___inGValues_i_pnQIRScopeDebugInfo__; +text: .text%__1cMCodeEmitInfobCscope_value_for_local_offset6MinILocationEType_ppnKScopeValue__4_; +text: .text%__1cMCodeEmitInfobEget_cache_reg_for_local_offset6kMi_nFRInfo__; +text: .text%__1cMLocalMappingbEget_cache_reg_for_local_offset6kMi_nFRInfo__; +text: .text%__1cMCodeEmitInfoZlocation_for_local_offset6MinILocationEType__1_; +text: .text%__1cIFrameMapZlocation_for_local_offset6kMinILocationEType_p1_i_; +text: .text%__1cIFrameMapWlocation_for_fp_offset6kMinILocationEType_p1_i_; +text: .text%__1cILocationVlegal_offset_in_bytes6Fi_i_; +text: .text%__1cMCodeEmitInfoYscope_value_for_register6MnFRInfo_ppnKScopeValue_4nILocationEType__v_; +text: .text%__1cGOopMapJdeep_copy6M_p0_; +text: .text%__1cGOopMap2t6Mn0ANDeepCopyToken_p0_v_; +text: .text%__1cMOopMapStream2t6MpnGOopMap__v_; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: oopMap.o; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderLcheck_phase6Mn0AFPhase__v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderWserialize_scope_values6MpnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfo.o; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: location.o; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderYserialize_monitor_values6MpnNGrowableArray4CpnMMonitorValue____i_; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cLCompilationbEadd_exception_handlers_for_pco6MiipnOExceptionScope__v_; +text: .text%__1cNExceptionInfo2t6MiipnOExceptionScope__v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNLIR_AssemblerHreg2mem6MnFRInfo_pnLLIR_Address_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_OprDescGis_oop6kM_i_; +text: .text%__1cNLIR_AssemblerJreturn_op6MnFRInfo_i_v_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cRC1_MacroAssemblerLmethod_exit6Mi_v_; +text: .text%__1cLCompilationQemit_code_epilog6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerUemit_slow_case_stubs6M_v_; +text: .text%__1cNLIR_AssemblerKemit_stubs6MpnMCodeStubList__v_; +text: .text%__1cVImplicitNullCheckStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cZresource_reallocate_bytes6FpcII_0_; +text: .text%__1cFArenaIArealloc6MpvII_1_; +text: .text%__1cNLIR_AssemblerNadd_call_info6MipnMCodeEmitInfo__v_; +text: .text%__1cOdummy_location6FnIValueTag__pnKScopeValue__: c1_LIREmitter.o; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cORangeCheckStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerWemit_exception_handler6M_i_; +text: .text%__1cRC1_MacroAssemblerRexception_handler6Mii_v_; +text: .text%__1cNLIR_AssemblerPemit_call_stubs6M_v_; +text: .text%__1cNLIR_AssemblerbCmaybe_adjust_stack_alignment6MpnIciMethod__v_; +text: .text%__1cKreal_index6FpnIFrameMap_i_i_: c1_LIRAssembler_x86.o; +text: .text%__1cLCompilationbEgenerate_exception_range_table6M_v_; +text: .text%__1cOExceptionScopeGequals6kMp0_i_; +text: .text%__1cLCompilationbBadd_exception_range_entries6MiipnOExceptionScope_ip2pi_v_; +text: .text%__1cTExceptionRangeTablebCcompute_modified_at_call_pco6Fii_i_; +text: .text%__1cOExceptionScopeMcaller_scope6kM_p0_; +text: .text%__1cLLIR_EmitterKframe_size6M_i_; +text: .text%__1cNLIR_Assembler2T6M_v_; +text: .text%__1cLCompilationMinstall_code6MpnLCodeOffsets_i_v_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cFciEnvbUsystem_dictionary_modification_counter_changed6M_i_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethod2n6FIi_pv_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: codeBlob.o; +text: .text%__1cLPcDescCache2t6M_v_; +text: .text%__1cHnmFlagsFclear6M_v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cTExceptionRangeTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cKNativeJumpbEcheck_verified_entry_alignment6FpC1_v_; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_; +text: .text%__1cLCompilation2T6M_v_; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cTExceptionRangeTable2T6M_v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cMelapsedTimerDadd6M0_v_; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cNCompileBrokerUpop_jni_handle_block6F_v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cSCompileTaskWrapper2T6M_v_; +text: .text%__1cNCompileBrokerJfree_task6FpnLCompileTask__v_; +text: .text%__1cLCompileTaskEfree6M_v_; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: reflection.o; +text: .text%__1cNArgumentCountDset6MinJBasicType__v_: reflection.o; +text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cNSignatureInfoHdo_long6M_v_: reflection.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: reflection.o; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cKReflectionbFbasic_type_mirror_to_basic_type6FpnHoopDesc_pnGThread__nJBasicType__; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cKReflectionTunbox_for_primitive6FpnHoopDesc_pnGjvalue_pnGThread__nJBasicType__; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderMnew_instance6Mi_v_; +text: .text%__1cQciBytecodeStreamJget_klass6kM_pnHciKlass__; +text: .text%__1cQciBytecodeStreamPget_klass_index6kM_i_; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cMGraphBuilderMappend_split6MpnKStateSplit__pnLInstruction__; +text: .text%__1cLNewInstanceFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cLInstructionEhash6kM_i_: c1_Instruction.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Instruction.o; +text: .text%__1cKValueStackMclear_locals6M_v_; +text: .text%__1cKValueStackMclear_stores6M_v_; +text: .text%__1cKValueStackZpin_stack_for_state_split6M_v_; +text: .text%__1cLNewInstanceIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderIstack_op6MnJBytecodesECode__v_; +text: .text%__1cMGraphBuilderGinvoke6MnJBytecodesECode__v_; +text: .text%__1cQciBytecodeStreamKget_method6kM_pnIciMethod__; +text: .text%__1cQciBytecodeStreamQget_method_index6kM_i_; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cQciBytecodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cQciBytecodeStreamXget_method_holder_index6M_i_; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cKValueStackNpop_arguments6Mi_pnGValues__; +text: .text%__1cGInvoke2t6MnJBytecodesECode_pnJValueType_pnLInstruction_pnGValues_iiii_v_; +text: .text%__1cGInvokeFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cGInvokeJas_Invoke6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Instruction.o; +text: .text%__1cGInvokeIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderIthrow_op6M_v_; +text: .text%__1cFThrowFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerIdo_Throw6MpnFThrow__v_; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Instruction.o; +text: .text%__1cFThrowIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Instruction.o; +text: .text%__1cFThrowIas_Throw6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Instruction.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Instruction.o; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cTNullCheckEliminatorShandle_NewInstance6MpnLNewInstance__v_; +text: .text%__1cGInvokePinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cTNullCheckEliminatorNhandle_Invoke6MpnGInvoke__v_; +text: .text%__1cFThrowPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorIdo_Throw6MpnFThrow__v_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_IR.o; +text: .text%__1cLInstructionGnegate6Fn0AJCondition__1_; +text: .text%__1cFThrowPstate_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cFRInfoIoverlaps6kMk0_i_; +text: .text%__1cIValueGenOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cIValueGenVspill_values_on_stack6MpnKValueStack_nFRInfo_i_v_; +text: .text%__1cIRegAllocNlock_register6MpnLInstruction_nFRInfo__v_; +text: .text%__1cHHideReg2t6MpnIValueGen_pnJValueType__v_; +text: .text%__1cHHideReg2T6M_v_; +text: .text%__1cLLIR_EmitterMnew_instance6MnFRInfo_pnPciInstanceKlass_1111pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterZjobject2reg_with_patching6MnFRInfo_pnIciObject_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListNoop2reg_patch6MpnI_jobject_nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cPNewInstanceStub2t6MnFRInfo_pnLLIR_OprDesc_pnPciInstanceKlass_pnMCodeEmitInfo_nIRuntime1GStubID__v_; +text: .text%__1cIValueGenJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cIValueGenWinvoke_visit_arguments6MpnGInvoke_pnRCallingConvention__pnJItemArray__; +text: .text%__1cIValueGenNis_free_rinfo6MnFRInfo__i_; +text: .text%__1cGInvokeRsize_of_arguments6kM_i_; +text: .text%__1cLLIR_EmitterVstore_stack_parameter6MpnLLIR_OprDesc_i_v_; +text: .text%__1cILIR_ListFstore6MpnLLIR_OprDesc_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cHHideReg2t6MpnIValueGen_nFRInfo_i_v_; +text: .text%__1cIValueGenVinvoke_load_arguments6MpnGInvoke_pnJItemArray_pnRCallingConvention__v_; +text: .text%__1cIValueGenPinvoke_do_spill6MpnGInvoke_nFRInfo__v_; +text: .text%__1cIValueGenXis_caller_save_register6FnFRInfo__i_; +text: .text%__1cIValueGenLspill_value6MpnLInstruction__v_; +text: .text%__1cIValueGenKspill_item6MpnEItem__v_; +text: .text%__1cIValueGenQround_spill_item6MpnEItem_i_v_; +text: .text%__1cIRegAllocOget_lock_spill6MpnLInstruction_i_i_; +text: .text%__1cIValueGenJraw_rfree6MpnEItem__v_; +text: .text%__1cLLIR_EmitterFspill6MipnLLIR_OprDesc__v_; +text: .text%__1cIFrameMapKspill_name6kMi_i_; +text: .text%__1cIValueGenQinvoke_do_result6MpnGInvoke_ipnEItem__v_; +text: .text%__1cIVoidTypeLas_VoidType6M_p0_: c1_ValueType.o; +text: .text%__1cLCompilationXlir_opr_for_instruction6MpnLInstruction__pnLLIR_OprDesc__; +text: .text%__1cLLIR_EmitterHcall_op6MnJBytecodesECode_pknOBasicTypeArray_pnMCodeEmitInfo_iiinFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListQcall_opt_virtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenIdo_Throw6MpnFThrow__v_; +text: .text%__1cLNewInstanceKexact_type6kM_pnGciType__; +text: .text%__1cOExceptionScopeLcould_catch6kMpnPciInstanceKlass_i_i_; +text: .text%__1cIValueGenRexceptionOopRInfo6F_nFRInfo__; +text: .text%__1cIValueGenFsfree6MpnEItem__v_; +text: .text%__1cIRegAllocKfree_spill6MipnJValueType__v_; +text: .text%__1cIRegAllocNis_free_spill6kMipnJValueType__i_; +text: .text%__1cLNewInstanceOas_NewInstance6M_p0_: c1_Instruction.o; +text: .text%__1cIValueGenQexceptionPcRInfo6F_nFRInfo__; +text: .text%__1cILIR_ListPthrow_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator.o; +text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cOLIR_OpJavaCallFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_LIR.o; +text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cIFrameMapWcaller_save_cpu_reg_at6Fi_pnLLIR_OprDesc__; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Instruction.o; +text: .text%__1cIVoidTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Instruction.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Instruction.o; +text: .text%__1cRLIR_PeepholeStateHdo_call6M_v_; +text: .text%__1cOLIR_OpJavaCallJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerJemit_call6MpnOLIR_OpJavaCall__v_; +text: .text%__1cNLIR_AssemblerJconst2reg6MpnJLIR_Const_nFRInfo_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cMPatchingStubQalign_patch_site6MpnOMacroAssembler__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_pnI_jobject__v_; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cNLIR_AssemblerPpatching_epilog6MpnMPatchingStub_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_; +text: .text%__1cMPatchingStubHinstall6MpnOMacroAssembler_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_: c1_LIRAssembler.o; +text: .text%__1cNLIR_AssemblerUappend_patching_stub6MpnMPatchingStub__v_; +text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerJemit_call6MpnOLIR_OpJavaCall__v_; +text: .text%__1cNLIR_AssemblerKalign_call6MnILIR_Code__v_; +text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o; +text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerEcall6MpCnJrelocInfoJrelocType_pnMCodeEmitInfo__v_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cMCodeEmitInfoSappend_scope_value6MpnLLIR_OprDesc_pnNGrowableArray4CpnKScopeValue____v_; +text: .text%__1cMCodeEmitInfoRopr2location_type6MpnLLIR_OprDesc__nILocationEType__; +text: .text%__1cMCodeEmitInfoRlocation_for_name6MinILocationEType_ii_1_; +text: .text%__1cIFrameMapRlocation_for_name6kMinILocationEType_p1ii_i_; +text: .text%__1cNLIR_AssemblerIthrow_op6MnFRInfo_1pnMCodeEmitInfo_i_v_; +text: .text%__1cMCodeEmitInfoQadd_register_oop6MnFRInfo__v_; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIREmitter.o; +text: .text%__1cMCodeEmitInfoYadd_registers_to_oop_map6MpnGOopMap__v_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cJrelocInfoKset_format6Mi_v_; +text: .text%__1cMPatchingStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cRAbstractAssemblerGa_byte6Mi_v_; +text: .text%__1cRNativeGeneralJumpUinsert_unconditional6FpC1_v_; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cJrelocInfobDchange_reloc_info_for_address6FpnNRelocIterator_pCn0AJrelocType_4_v_; +text: .text%__1cJrelocInfoIset_type6Mn0AJrelocType__v_; +text: .text%__1cPNewInstanceStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cJOopMapSetMgrow_om_data6M_v_; +text: .text%__1cOStaticCallStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_; +text: .text%__1cKRelocationSpd_address_in_code6M_ppC_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cOoop_RelocationHoops_do6MpFppnHoopDesc__v_v_; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cRresolve_and_patch6FppnHoopDesc__v_; +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%__1cLStatSamplerOcollect_sample6F_v_; +text: .text%__1cLStatSamplerLsample_data6FpnMPerfDataList__v_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cNFingerprinterIdo_float6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRuntime.o; +text: .text%JVM_IsNaN; +text: .text%__1cNFingerprinterJdo_double6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRuntime.o; +text: .text%__1cXNativeSignatureIteratorLpass_double6M_v_: interpreterRuntime.o; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%JVM_Read; +text: .text%__1cDhpiEread6FipvI_I_: jvm.o; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_char6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cJCHAResult2t6MnLKlassHandle_nMsymbolHandle_2pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___n0E_i_v_; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cIciMethodJwill_link6MpnHciKlass_2nJBytecodesECode__i_; +text: .text%__1cMGraphBuilderKtry_inline6MpnIciMethod_i_i_; +text: .text%__1cMGraphBuilderUclear_inline_bailout6M_v_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cMGraphBuilderVtry_inline_intrinsics6MpnIciMethod__i_; +text: .text%__1cMGraphBuilderPtry_inline_full6MpnIciMethod_i_i_; +text: .text%__1cIciMethodIhas_jsrs6kM_i_; +text: .text%__1cMGraphBuilderWrecursive_inline_level6kMpnIciMethod__i_; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cJNullCheckFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cJNullCheckIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cKObjectTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_ValueStack.o; +text: .text%__1cMGraphBuilderKpush_scope6MpnIciMethod_pnKBlockBegin_i_v_; +text: .text%__1cKValueStackKpush_scope6MpnHIRScope__p0_; +text: .text%__1cOExceptionScopeKpush_scope6M_p0_; +text: .text%__1cOExceptionScope2t6Mp0_v_; +text: .text%__1cHIRScopeXcompute_lock_stack_size6M_v_; +text: .text%__1cMGraphBuilderJScopeDataRcaller_stack_size6kM_i_; +text: .text%__1cMGraphBuilderJScopeDataLnum_returns6M_i_; +text: .text%__1cMGraphBuilderJScopeDataXset_inline_cleanup_info6MpnKBlockBegin_pnLInstruction_pnKValueStack__v_; +text: .text%__1cMGraphBuilderJScopeDataQincr_num_returns6M_v_; +text: .text%__1cKValueStackJpop_scope6Mii_p0_; +text: .text%__1cMGraphBuilderJpop_scope6M_v_; +text: .text%__1cMGraphBuilderTpop_exception_scope6M_v_; +text: .text%__1cOExceptionScopeJpop_scope6M_p0_; +text: .text%__1cLCompilationVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cMGraphBuilderOinline_bailout6Mpkc_v_; +text: .text%__1cLInstructionEprev6MpnKBlockBegin__p0_; +text: .text%__1cKBlockBeginUresolve_substitution6M_v_; +text: .text%__1cKBlockBeginPblock_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cZresolve_substituted_value6FppnLInstruction__v_: c1_Instruction.o; +text: .text%__1cLInstructionFsubst6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_GraphBuilder.o; +text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_GraphBuilder.o; +text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_Instruction.o; +text: .text%__1cIConstantPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cIBlockEndPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cHIntTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cJNullCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cTNullCheckEliminatorQhandle_NullCheck6MpnJNullCheck__v_; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_GraphBuilder.o; +text: .text%__1cHIRScopeNtop_scope_bci6kM_i_; +text: .text%__1cQUseCountComputerPclear_use_count6FpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cIValueGenMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cJNullCheckKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterKnull_check6MpnLLIR_OprDesc_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenQlock_spill_rinfo6MpnLInstruction_nFRInfo__v_; +text: .text%__1cQIRScopeDebugInfoRrecord_debug_info6MpnYDebugInformationRecorder__v_: c1_LIREmitter.o; +text: .text%__1cIRuntime1Yresolve_opt_virtual_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cFframeZsender_for_compiled_frame6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cNSharedRuntimeQfind_callee_info6FpnKJavaThread_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cLPcDescCacheKpc_desc_at6kMpnHnmethod_pCi_pnGPcDesc__; +text: .text%__1cLPcDescCacheLadd_pc_desc6MpnGPcDesc__v_; +text: .text%__1cSvframeStreamCommonYfill_from_compiled_frame6MpnHnmethod_i_v_; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: vframe.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cXjvm_define_class_common6FpnHJNIEnv__pkcpnI_jobject_pkWi53pnGThread__pnH_jclass__: jvm.o; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cEhash6Fpkc1_I_; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cPDictionaryEntrybAcontains_protection_domain6kMpnHoopDesc__i_; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cPDictionaryEntryVadd_protection_domain6MpnHoopDesc__v_; +text: .text%__1cUverify_byte_codes_fn6F_pv_: verifier.o; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%JVM_GetClassCPTypes; +text: .text%JVM_GetClassNameUTF; +text: .text%JVM_ReleaseUTF; +text: .text%JVM_FindClassFromClass; +text: .text%jni_IsSameObject: jni.o; +text: .text%JVM_GetClassFieldsCount; +text: .text%JVM_GetClassMethodsCount; +text: .text%JVM_GetMethodIxModifiers; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%jni_NewLocalRef: jni.o; +text: .text%JVM_GetCPMethodModifiers; +text: .text%JVM_IsConstructorIx; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cVLoaderConstraintTableJnew_entry6MIpnNsymbolOopDesc_pnMklassOopDesc_ii_pnVLoaderConstraintEntry__; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: reflection.o; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%jni_CallIntMethod: jni.o; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%jni_DetachCurrentThread; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cFMutex2T6M_v_; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cUThreadSafepointState2T6M_v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%jni_DestroyJavaVM; +text: .text%jni_AttachCurrentThread; +text: .text%attach_current_thread: jni.o; +text: .text%__1cCosWcreate_attached_thread6FpnGThread__i_; +text: .text%__1cKJavaThreadSallocate_threadObj6MnGHandle_pcipnGThread__v_; +text: .text%__1cHThreadsKdestroy_vm6F_i_; +text: .text%__1cKJavaThreadVinvoke_shutdown_hooks6M_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cMPeriodicTaskLis_enrolled6kM_i_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cQprint_statistics6F_v_; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cIVMThreadXwait_for_vm_thread_exit6F_v_; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cKcopy_table6FppC1i_v_: interpreter.o; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cCosRcurrent_thread_id6F_i_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cUThreadSafepointStateMroll_forward6Mn0AMsuspend_type_pnHnmethod_i_v_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cUSafepointSynchronizeQdo_cleanup_tasks6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cNObjectMonitorHis_busy6kM_i_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_; +text: .text%__1cURecompilationMonitorbFstop_recompilation_monitor_task6F_v_; +text: .text%__1cIVMThreadHdestroy6F_v_; +text: .text%__SLIP.DELETER__A: vmThread.o; +text: .text%__1cSThreadLocalStorageRpd_invalidate_all6F_v_; +text: .text%__1cHVM_ExitNset_vm_exited6F_i_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cQVerificationTypeIfinalize6F_v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cIPerfData2T6M_v_; +text: .text%__1cKPerfMemoryHdestroy6F_v_; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cUdelete_shared_memory6FpcI_v_: perfMemory_solaris.o; +text: .text%__1cLremove_file6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cMostream_exit6F_v_; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__SLIP.FINAL__A: c1_Items.o; +# Test Exit +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%JVM_Halt; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cQVMOperationQdDueueDadd6MpnMVM_Operation__i_; +text: .text%__1cQVMOperationQdDueueOqueue_add_back6MipnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueGinsert6MpnMVM_Operation_2_v_; +text: .text%__1cQVMOperationQdDueueGunlink6MpnMVM_Operation__v_; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cIVMThreadSevaluate_operation6MpnMVM_Operation__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +# Test Hello +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%JVM_Write; +text: .text%__1cDhpiFwrite6FipkvI_I_: jvm.o; +# Test Sleep +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%JVM_GetCPClassNameUTF; +text: .text%JVM_Sleep; +text: .text%__1cCosHSolarisTsetup_interruptible6F_pnKJavaThread__; +text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cCosHSolarisVcleanup_interruptible6FpnKJavaThread__v_; +text: .text%__1cCosOunguard_memory6FpcI_i_; +# Test IntToString +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +text: .text%__1cJChunkPoolMfree_all_but6MI_v_: allocation.o; +# Test LoadToolkit +text: .text%JVM_GetClassContext; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: jvm.o; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cMGraphBuilderNload_constant6M_v_; +text: .text%__1cQciBytecodeStreamMget_constant6kM_nKciConstant__; +text: .text%__1cQciBytecodeStreamSget_constant_index6kM_i_; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Canonicalizer.o; +text: .text%__1cTsort_by_start_block6FppnELoop_2_i_: c1_Loops.o; +text: .text%__1cILIR_ListLcall_static6MpnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterLcmp_mem_int6MnMLIR_OpBranchNLIR_Condition_nFRInfo_iipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLcmp_mem_int6MnMLIR_OpBranchNLIR_Condition_nFRInfo_iipnMCodeEmitInfo__v_; +text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_Canonicalizer.o; +text: .text%__1cILIR_ListHint2reg6MinFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_Optimizer.o; +text: .text%__1cEIfOpPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cEIfOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorHdo_IfOp6MpnEIfOp__v_; +text: .text%__1cIValueGenHdo_IfOp6MpnEIfOp__v_; +text: .text%__1cLLIR_EmitterLifop_phase16MnLInstructionJCondition_pnLLIR_OprDesc_4_v_; +text: .text%__1cLLIR_EmitterLifop_phase26MnFRInfo_pnLLIR_OprDesc_3nLInstructionJCondition__v_; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnFLabel__v_; +text: .text%__1cRLIR_PeepholeStateUstart_forward_branch6MpnFLabel__v_; +text: .text%__1cOGenerateOopMapMdo_checkcast6M_v_; +text: .text%__1cMGraphBuilderLinstance_of6Mi_v_; +text: .text%__1cKInstanceOfFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cJTypeCheckIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderOdirect_compare6MpnHciKlass__i_; +text: .text%__1cKInstanceOfNas_InstanceOf6M_p0_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderKcheck_cast6Mi_v_; +text: .text%__1cJCheckCastFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cJValueTypeKas_IntType6M_pnHIntType__: c1_ValueType.o; +text: .text%__1cJTypeCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cQNullCheckVisitorMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cIValueGenNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator_x86.o; +text: .text%__1cLLIR_EmitterNinstanceof_op6MpnLLIR_OprDesc_2pnHciKlass_nFRInfo_5ipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListKinstanceof6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo__v_; +text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3pnHciKlass_33ipnMCodeEmitInfo_7pnICodeStub__v_; +text: .text%__1cIValueGenMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cILIR_ListJcheckcast6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo_6pnICodeStub__v_; +text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o; +text: .text%__1cPLIR_OpTypeCheckFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cPLIR_OpTypeCheckJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIROptimizer.o; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIROptimizer.o; +text: .text%__1cNLIR_AssemblerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cJAssemblerEcmpl6MnHAddress_pnI_jobject__v_; +text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cTSimpleExceptionStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cJLoadFieldIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cJLoadFieldMas_LoadField6M_p0_: c1_Instruction.o; +text: .text%__1cDPhiPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cDPhiFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorGdo_Phi6MpnDPhi__v_; +text: .text%__1cLInstructionIas_Local6M_pnFLocal__: c1_GraphBuilder.o; +text: .text%__1cDPhiGas_Phi6M_p0_: c1_GraphBuilder.o; +text: .text%__1cIValueGenScompute_phi_arrays6MpnKValueStack_pnGValues_pnIintStack_i_pnLInstruction__; +text: .text%__1cLLIR_EmitterTset_fpu_stack_empty6M_v_; +text: .text%__1cIRegAllocKlock_spill6MpnLInstruction_ii_v_; +text: .text%__1cIRegAllocRextend_spill_area6Mi_v_; +text: .text%__1cRclear_state_items6FppnLInstruction__v_: c1_CodeGenerator.o; +text: .text%__1cNLIR_AssemblerTset_fpu_stack_empty6M_v_; +text: .text%__1cIFrameMapLFpuStackSimFclear6M_v_; +text: .text%jni_GetEnv; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cVcreate_gc_point_array6FpnFArena_i_pnNGrowableArray4Ci___; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cOGenerateOopMapIppop_any6Mi_v_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: c1_IR.o; +text: .text%__1cMGraphBuilderQhandle_exception6Mi_v_; +text: .text%__1cOExceptionScopeFclear6M_v_; +text: .text%__1cMGraphBuilderJScopeDataJxhandlers6kM_pnJXHandlers__; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cTciConstantPoolCacheEfind6Mi_i_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cMGraphBuilderHif_null6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cOObjectConstantRas_ObjectConstant6M_p0_: c1_ValueType.o; +text: .text%__1cMas_ValueType6FnKciConstant__pnJValueType__; +text: .text%__1cLInstructionGmirror6Fn0AJCondition__1_; +text: .text%__1cHis_true6FxnLInstructionJCondition_x_i_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerNset_canonical6MpnLInstruction__v_; +text: .text%__1cKBlockBeginVadd_exception_handler6Mp0_v_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Instruction.o; +text: .text%__1cOExceptionScopeLadd_handler6MpnIXHandler__v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cPciObjectFactoryPinsert_non_perm6Mrpn0ANNonPermObject_pnHoopDesc_pnIciObject__v_; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_ValueType.o; +text: .text%__1cNClassConstantQas_ClassConstant6M_p0_: c1_ValueType.o; +text: .text%__1cOExceptionScopeKhandler_at6kMi_pnIXHandler__; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderIlogic_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cHLogicOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cHLogicOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cHLogicOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderHconvert6MnJBytecodesECode_nJBasicType_3_v_; +text: .text%__1cHConvertFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerKdo_Convert6MpnHConvert__v_; +text: .text%__1cHConvertEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cHConvertEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderNstore_indexed6MnJBasicType__v_; +text: .text%__1cIValueMapKkill_array6MpnJValueType__v_; +text: .text%__1cGBucketKkill_array6MpnJValueType__v_; +text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_Instruction.o; +text: .text%__1cKValueStackRpin_stack_indexed6MpnJValueType__v_; +text: .text%__1cMStoreIndexedFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLAccessFieldPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_Instruction.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cMStoreIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cHConvertPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cQNullCheckVisitorKdo_Convert6MpnHConvert__v_; +text: .text%__1cQNullCheckVisitorPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cTNullCheckEliminatorThandle_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cMciNullObjectMis_classless6kM_i_: ciNullObject.o; +text: .text%__1cJValueTypeQas_ClassConstant6M_pnNClassConstant__: c1_ValueType.o; +text: .text%__1cOObjectConstantIencoding6kM_pnI_jobject__; +text: .text%__1cIValueGenbBrlock_byte_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cNc1_AllocTableThas_one_free_masked6kMnKc1_RegMask__i_; +text: .text%__1cIRegAllocMget_lock_reg6MpnLInstruction_nKc1_RegMask__nFRInfo__; +text: .text%__1cIRegAllocMget_free_reg6MnKc1_RegMask__nFRInfo__; +text: .text%__1cNc1_AllocTablePget_free_masked6MnKc1_RegMask__i_; +text: .text%__1cNClassConstantIencoding6kM_pnI_jobject__; +text: .text%__1cLLIR_EmitterLopr2jobject6MpnLLIR_OprDesc__pnI_jobject__; +text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenMrelease_item6MpnEItem__v_; +text: .text%__1cIValueGenPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cIValueGenKdo_Convert6MpnHConvert__v_; +text: .text%__1cIValueGenKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cLLIR_EmitterIlogic_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_5_v_; +text: .text%__1cILIR_ListLlogical_and6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterKconvert_op6MnJBytecodesECode_pnLLIR_OprDesc_nFRInfo_i_v_; +text: .text%__1cILIR_ListHconvert6MnJBytecodesECode_pnLLIR_OprDesc_4i_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenKmust_round6MpnLInstruction_pknEItem__i_; +text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterNindexed_store6MnJBasicType_pnLLIR_OprDesc_33nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterXlo_word_offset_in_bytes6kM_i_; +text: .text%__1cLLIR_EmitterXhi_word_offset_in_bytes6kM_i_; +text: .text%__1cILIR_ListLstore_array6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenXexception_handler_start6MpnHIRScope_ipnKValueStack__v_; +text: .text%__1cLLIR_EmitterNhandler_entry6M_v_; +text: .text%__1cLLIR_OprFactQdummy_value_type6FpnJValueType__pnLLIR_OprDesc__; +text: .text%__1cLInstructionKexact_type6kM_pnGciType__: c1_GraphBuilder.o; +text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_GraphBuilder.o; +text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cNLIR_OpConvertJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_opConvert6MpnNLIR_OpConvert__v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_pnI_jobject__v_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_pnI_jobject__v_; +text: .text%__1cNLIR_AssemblerIlogic_op6MnILIR_Code_pnLLIR_OprDesc_33_v_; +text: .text%__1cNLIR_AssemblerOemit_opConvert6MpnNLIR_OpConvert__v_; +text: .text%__1cNLIR_AssemblerNarray_move_op6MpnLLIR_OprDesc_2nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerJreg2array6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerPas_ArrayAddress6MpnLLIR_Address_nJBasicType__nHAddress__; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cUDebugInfoWriteStreamMwrite_handle6MpnI_jobject__v_; +text: .text%__1cTExceptionRangeTableJadd_entry6Miiiiii_v_; +text: .text%__1cTExceptionRangeEntry2t6Miiiiii_v_; +text: .text%__1cTExceptionRangeTableJadd_entry6MnTExceptionRangeEntry__v_; +text: .text%__1cOExceptionScopeCid6kM_i_; +text: .text%__1cTExceptionRangeTableTentry_index_for_pco6kMi_i_; +text: .text%__1cTExceptionRangeTableIentry_at6kMi_pnTExceptionRangeEntry__; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%JVM_GetLastErrorString; +text: .text%jni_Throw: jni.o; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%JVM_DisableCompiler; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%JVM_Available; +text: .text%__1cOGenerateOopMapKpp_new_ref6MpnNCellTypeState_i_v_; +text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_Instruction.o; +text: .text%__1cHLogicOpOis_commutative6kM_i_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cLArrayLengthFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cLArrayLengthEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLArrayLengthEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderOnew_type_array6M_v_; +text: .text%__1cMNewTypeArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cINewArrayIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cJIntrinsicFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cJIntrinsicMas_Intrinsic6M_p0_: c1_GraphBuilder.o; +text: .text%__1cJIntrinsicIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLAccessArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cTNullCheckEliminatorShandle_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cINewArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cTNullCheckEliminatorPhandle_NewArray6MpnINewArray__v_; +text: .text%__1cJIntrinsicPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cJLoopArrayIindex_of6kMkpnELoop__i_: c1_Loops.o; +text: .text%__1cINewArrayLas_NewArray6M_p0_: c1_Instruction.o; +text: .text%__1cILIR_ListOcall_icvirtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListNstore_mem_int6MinFRInfo_inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cLLIR_EmitterMarray_length6MnFRInfo_pnLLIR_OprDesc_pnMCodeEmitInfo__v_; +text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter.o; +text: .text%__1cIValueGenPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cLLIR_EmitterOnew_type_array6MnFRInfo_nJBasicType_pnLLIR_OprDesc_11111pnMCodeEmitInfo__v_; +text: .text%__1cQNewTypeArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_; +text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_x86.o; +text: .text%__1cILIR_ListOallocate_array6MnFRInfo_11111nJBasicType_1pnICodeStub__v_; +text: .text%__1cIValueGenMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cIValueGenMdo_ArrayCopy6MpnJIntrinsic__v_; +text: .text%__1cIValueGenQarraycopy_helper6MpnJIntrinsic_pippnMciArrayKlass__v_; +text: .text%__1cJLoadFieldKexact_type6kM_pnGciType__; +text: .text%__1cJLoadFieldNdeclared_type6kM_pnGciType__; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cOas_array_klass6FpnGciType__pnMciArrayKlass__: c1_CodeGenerator.o; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cMNewTypeArrayKexact_type6kM_pnGciType__; +text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_Instruction.o; +text: .text%__1cRpositive_constant6FpnLInstruction__i_: c1_CodeGenerator.o; +text: .text%__1cLArrayLengthOas_ArrayLength6M_p0_: c1_GraphBuilder.o; +text: .text%__1cQis_constant_zero6FpnLInstruction__i_: c1_CodeGenerator.o; +text: .text%__1cILIR_ListJarraycopy6MpnLLIR_OprDesc_22222pnMciArrayKlass_ipnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o; +text: .text%__1cLLIR_EmitterNwrite_barrier6MpnLLIR_OprDesc_2_v_; +text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_x86.o; +text: .text%__1cILIR_ListUunsigned_shift_right6MpnLLIR_OprDesc_222_v_; +text: .text%__1cQLIR_OpAllocArrayFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cPLIR_OpArrayCopyFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQLIR_OpAllocArrayJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerQemit_alloc_array6MpnQLIR_OpAllocArray__v_; +text: .text%__1cPLIR_OpArrayCopyJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_; +text: .text%__1cNLIR_AssemblerHic_call6MpCpnMCodeEmitInfo__v_; +text: .text%__1cJAssemblerEcall6MpCrknQRelocationHolder__v_; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cNLIR_AssemblerJconst2mem6MpnJLIR_Const_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerQemit_alloc_array6MpnQLIR_OpAllocArray__v_; +text: .text%__1cNLIR_AssemblerSarray_element_size6kMnJBasicType__nHAddressLScaleFactor__; +text: .text%__1cRC1_MacroAssemblerOallocate_array6MpnMRegisterImpl_222inHAddressLScaleFactor_2rnFLabel__v_; +text: .text%__1cRC1_MacroAssemblerMtry_allocate6MpnMRegisterImpl_2i22rnFLabel__v_; +text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_; +text: .text%__1cMciArrayKlassMelement_type6M_pnGciType__; +text: .text%__1cNArrayCopyStub2t6MpnMCodeEmitInfo_pnOStaticCallStub__v_; +text: .text%__1cFRInfoMset_word_reg6MkpnMRegisterImpl__v_; +text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOpush_parameter6MpnMRegisterImpl_i_v_; +text: .text%__1cQNewTypeArrayStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNArrayCopyStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cIRuntime1Uresolve_virtual_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: compiledICHolderKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: compiledICHolderKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: compiledICHolderKlass.o; +text: .text%__1cXvirtual_call_RelocationJfirst_oop6M_pC_; +text: .text%__1cXvirtual_call_RelocationJoop_limit6M_pC_; +text: .text%__1cNRelocIteratorJset_limit6MpC_v_; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cGICStubIset_stub6MpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cRInlineCacheBufferLnew_ic_stub6F_pnGICStub__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%JVM_NewArray; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cQSimpleCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cICompilerMsupports_osr6M_i_: c1_Compiler.o; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cMGraphBuilderQnew_object_array6M_v_; +text: .text%__1cONewObjectArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cMGraphBuilderIshift_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cHShiftOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cHShiftOpEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cHShiftOpEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cLLoadIndexedOas_LoadIndexed6M_p0_: c1_Instruction.o; +text: .text%__1cMArithmeticOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cDOp2Gas_Op26M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cHciKlassMaccess_flags6M_i_; +text: .text%__1cILIR_ListPallocate_object6MnFRInfo_111ii1pnICodeStub__v_; +text: .text%__1cLLIR_EmitterOmembar_release6M_v_; +text: .text%__1cLLIR_EmitterGmembar6M_v_; +text: .text%__1cIValueGenRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cLLIR_EmitterQnew_object_array6MnFRInfo_pnHciKlass_pnLLIR_OprDesc_11111pnMCodeEmitInfo_7_v_; +text: .text%__1cSNewObjectArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cLLIR_EmitterOmembar_acquire6M_v_; +text: .text%__1cIValueGenKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cIValueGenPshiftCountRInfo6F_nFRInfo__; +text: .text%__1cLLIR_EmitterIshift_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_53_v_; +text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListKlogical_or6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cOLIR_OpAllocObjFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cOLIR_OpAllocObjJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; +text: .text%__1cNLIR_AssemblerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; +text: .text%__1cRC1_MacroAssemblerPallocate_object6MpnMRegisterImpl_22ii2rnFLabel__v_; +text: .text%__1cNLIR_AssemblerOmembar_release6M_v_; +text: .text%__1cNLIR_AssemblerGmembar6M_v_; +text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOmembar_acquire6M_v_; +text: .text%__1cEBaseHas_Base6M_p0_: c1_IR.o; +text: .text%__1cNLIR_AssemblerOemit_osr_entry6MpnHIRScope_ipnFLabel_i_v_; +text: .text%__1cSNewObjectArrayStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%jni_MonitorExit: jni.o; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%jni_CallStaticBooleanMethodV: jni.o; +text: .text%JVM_GetStackTraceDepth; +text: .text%__1cTjava_lang_ThrowableVget_stack_trace_depth6FpnHoopDesc_pnGThread__i_; +text: .text%__1cTjava_lang_ThrowableJbacktrace6FpnHoopDesc__2_; +text: .text%JVM_GetStackTraceElement; +text: .text%__1cTjava_lang_ThrowableXget_stack_trace_element6FpnHoopDesc_ipnGThread__2_; +text: .text%__1cbBjava_lang_StackTraceElementGcreate6FnMmethodHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cbBjava_lang_StackTraceElementNset_className6FpnHoopDesc_2_v_; +text: .text%__1cbBjava_lang_StackTraceElementOset_methodName6FpnHoopDesc_2_v_; +text: .text%__1cbBjava_lang_StackTraceElementMset_fileName6FpnHoopDesc_2_v_; +text: .text%__1cNmethodOopDescUline_number_from_bci6kMi_i_; +text: .text%__1cbECompressedLineNumberReadStream2t6MpC_v_; +text: .text%__1cbECompressedLineNumberReadStreamJread_pair6M_i_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: methodOop.o; +text: .text%__1cbBjava_lang_StackTraceElementOset_lineNumber6FpnHoopDesc_i_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cNObjectMonitorREntryQdDueue_insert6MpnMObjectWaiter_i_v_; +text: .text%__1cNObjectMonitorbAEntryQdDueue_SelectSuccessor6M_pnMObjectWaiter__; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%JVM_EnableCompiler; +text: .text%__1cCosHSolarisFEventEpark6Mx_i_: objectMonitor_solaris.o; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cJStubQdDueueMremove_first6Mi_v_; +text: .text%__1cJStubQdDueueMremove_first6M_v_; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cGICStubIfinalize6M_v_; +text: .text%__1cGICStubKcached_oop6kM_pnHoopDesc__; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cGICStubLdestination6kM_pC_; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cUThreadSafepointStateHrestart6M_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +# Test LoadFrame +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cICompilerPsupports_native6M_i_: c1_Compiler.o; +text: .text%__1cLCompilationVcompile_native_method6MpnLCodeOffsets__i_; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cLCompilationUemit_code_for_native6MpCpnLCodeOffsets__v_; +text: .text%__1cLCompilationXemit_code_prolog_native6MpnIFrameMap__v_; +text: .text%__1cNLIR_AssemblerRemit_method_entry6MpnLLIR_Emitter_pnHIRScope__v_; +text: .text%__1cOMacroAssemblerHfat_nop6M_v_; +text: .text%__1cNLIR_AssemblerQemit_native_call6MpCpnMCodeEmitInfo__v_; +text: .text%__1cMCodeEmitInfobGcreate_oop_map_for_own_signature6M_pnGOopMap__; +text: .text%__1cNLIR_AssemblerXemit_native_method_exit6MpnMCodeEmitInfo__v_; +text: .text%__1cNSignatureInfoHdo_char6M_v_: reflection.o; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: reflection.o; +text: .text%jni_CallObjectMethodV: jni.o; +text: .text%jni_SetObjectField: jni.o; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cbCTwoGenerationCollectorPolicybMshould_try_older_generation_allocation6kMI_i_; +text: .text%__1cQGenCollectedHeapSattempt_allocation6MIiii_pnIHeapWord__; +text: .text%__1cQDefNewGenerationIallocate6MIii_pnIHeapWord__: defNewGeneration.o; +text: .text%__1cKGenerationInext_gen6kM_p0_; +text: .text%__1cKGenerationYallocation_limit_reached6MpnFSpace_pnIHeapWord_I_4_: tenuredGeneration.o; +text: .text%__1cQDefNewGenerationTallocate_from_space6MI_pnIHeapWord__; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cPVM_GC_OperationZacquire_pending_list_lock6M_v_; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cbAVM_GenCollectForAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cbAVM_GenCollectForAllocationEdoit6M_v_; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cQGenCollectedHeapZsatisfy_failed_allocation6MIiipi_pnIHeapWord__; +text: .text%__1cbCTwoGenerationCollectorPolicyZsatisfy_failed_allocation6MIiipi_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapNdo_collection6MiiIiiipi_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cNMemoryServiceIgc_begin6Fi_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cKGCStatInfoMset_gc_usage6MinLMemoryUsage_i_v_; +text: .text%__1cTContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cTContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cbBSurvivorContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cbBSurvivorContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cOGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cOGenerationPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cQGenCollectedHeapLgc_prologue6Mi_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cWThreadLocalAllocBufferVaccumulate_statistics6MIi_v_; +text: .text%__1cPGlobalTLABStatsHpublish6M_v_; +text: .text%__1cQGenCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cQGenCollectedHeapSgeneration_iterate6Mpn0AKGenClosure_i_v_; +text: .text%__1cbCGenEnsureParseabilityClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: defNewGeneration.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: tenuredGeneration.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: compactingPermGenGen.o; +text: .text%__1cSAllocationProfilerViterate_since_last_gc6F_v_; +text: .text%__1cUGenGCPrologueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cQDefNewGenerationLgc_prologue6Mi_v_: defNewGeneration.o; +text: .text%__1cRTenuredGenerationLgc_prologue6Mi_v_; +text: .text%__1cKGenerationLgc_prologue6Mi_v_: compactingPermGenGen.o; +text: .text%__1cKGenerationOshould_collect6MiIii_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationKshort_name6kM_pkc_: defNewGeneration.o; +text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: defNewGeneration.o; +text: .text%__1cQGenCollectedHeapKsave_marks6M_v_; +text: .text%__1cQDefNewGenerationKsave_marks6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationKsave_marks6M_v_; +text: .text%__1cQDefNewGenerationHcollect6MiiIii_v_; +text: .text%__1cQDefNewGenerationbAcollection_attempt_is_safe6M_i_; +text: .text%__1cRTenuredGenerationZpromotion_attempt_is_safe6kMIi_i_; +text: .text%__1cKGenerationYmax_contiguous_available6kM_I_; +text: .text%__1cbCOneContigSpaceCardGenerationUcontiguous_available6kM_I_; +text: .text%__1cQDefNewGenerationbIinit_assuming_no_promotion_failure6M_v_; +text: .text%__1cQDefNewGenerationOIsAliveClosure2t6MpnKGeneration__v_; +text: .text%__1cSScanWeakRefClosure2t6MpnQDefNewGeneration__v_; +text: .text%__1cLCardTableRSbGprepare_for_younger_refs_iterate6Mi_v_; +text: .text%__1cULRUCurrentHeapPolicy2t6M_v_; +text: .text%__1cPCollectorPolicyPis_train_policy6M_i_: collectorPolicy.o; +text: .text%__1cPFastScanClosure2t6MpnQDefNewGeneration_i_v_; +text: .text%__1cQDefNewGenerationbCFastEvacuateFollowersClosure2t6MpnQGenCollectedHeap_ip0pnPFastScanClosure_6_v_; +text: .text%__1cQGenCollectedHeapUprocess_strong_roots6Miiin0ATClassScanningOption_pnQOopsInGenClosure_3_v_; +text: .text%__1cKSharedHeapbAchange_strong_roots_parity6M_v_; +text: .text%__1cMSubTasksDonePis_task_claimed6Mi_i_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cPFastScanClosureGdo_oop6MppnHoopDesc__v_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationWcopy_to_survivor_space6MpnHoopDesc_p2_2_; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cNchunk_oops_do6FpnKOopClosure_pnFChunk_pc_I_: handles.o; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cFframebDinterpreter_frame_monitor_end6kM_pnPBasicObjectLock__; +text: .text%__1cFframebFinterpreter_frame_monitor_begin6kM_pnPBasicObjectLock__; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cRInterpreterOopMapKinitialize6M_v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cLOopMapCacheIentry_at6kMi_pnQOopMapCacheEntry__; +text: .text%__1cRInterpreterOopMapIis_empty6M_i_; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cQOopMapCacheEntryFflush6M_v_; +text: .text%__1cQOopMapCacheEntryTdeallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cQOopMapCacheEntryRallocate_bit_mask6M_v_; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cTOopMapForCacheEntry2t6MnMmethodHandle_ipnQOopMapCacheEntry__v_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%__1cFframebHnext_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: oopMapCache.o; +text: .text%__1cFframebDoops_interpreted_arguments_do6MnMsymbolHandle_ipnKOopClosure__v_; +text: .text%__1cRArgumentOopFinderDset6MinJBasicType__v_: frame.o; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueHoops_do6MpnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueNqueue_oops_do6MipnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollectorXoops_do_for_all_threads6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cPDictionaryEntrybDprotection_domain_set_oops_do6MpnKOopClosure__v_: dictionary.o; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cUCompactingPermGenGenUyounger_refs_iterate6MpnQOopsInGenClosure__v_; +text: .text%__1cbCOneContigSpaceCardGenerationUyounger_refs_iterate6MpnQOopsInGenClosure__v_; +text: .text%__1cKGenerationbDyounger_refs_in_space_iterate6MpnFSpace_pnQOopsInGenClosure__v_; +text: .text%__1cLCardTableRSbDyounger_refs_in_space_iterate6MpnFSpace_pnQOopsInGenClosure__v_; +text: .text%__1cPContiguousSpaceLnew_dcto_cl6MpnKOopClosure_nRCardTableModRefBSOPrecisionStyle_pnIHeapWord__pnVDirtyCardToOopClosure__; +text: .text%__1cPContiguousSpaceZused_region_at_save_marks6kM_nJMemRegion__: space.o; +text: .text%__1cRCardTableModRefBSWnon_clean_card_iterate6MpnFSpace_nJMemRegion_pnVDirtyCardToOopClosure_pnQMemRegionClosure_i_v_; +text: .text%__1cRCardTableModRefBSbBnon_clean_card_iterate_work6MnJMemRegion_pnQMemRegionClosure_i_v_; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cYClearNoncleanCardWrapperMdo_MemRegion6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cYClearNoncleanCardWrapperKclear_card6MpW_i_: cardTableRS.o; +text: .text%__1cVDirtyCardToOopClosureMdo_MemRegion6MnJMemRegion__v_; +text: .text%__1cWOffsetTableContigSpaceLblock_start6kMpkv_pnIHeapWord__: space.o; +text: .text%__1cbBBlockOffsetArrayContigSpaceSblock_start_unsafe6kMpkv_pnIHeapWord__; +text: .text%__1cPContiguousSpaceKblock_size6kMpknIHeapWord__I_; +text: .text%__1cUContiguousSpaceDCTOCOget_actual_top6MpnIHeapWord_2_2_; +text: .text%__1cPContiguousSpaceRtoContiguousSpace6M_p0_: space.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cPFiltering_DCTOCPwalk_mem_region6MnJMemRegion_pnIHeapWord_3_v_; +text: .text%__1cUContiguousSpaceDCTOCXwalk_mem_region_with_cl6MnJMemRegion_pnIHeapWord_3pnQFilteringClosure__v_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: methodKlass.o; +text: .text%__1cLmethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure__v_; +text: .text%__1cLklassVtablePoop_oop_iterate6MpnKOopClosure__v_; +text: .text%__1cQFilteringClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cLklassItablePoop_oop_iterate6MpnKOopClosure__v_; +text: .text%__1cKklassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cKOopClosureXshould_remember_klasses6kM_ki_: space.o; +text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cWConstantPoolCacheEntryLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cParrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cNinstanceKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cNobjArrayKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: methodKlass.o; +text: .text%__1cLmethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: symbolKlass.o; +text: .text%__1cLsymbolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: symbolKlass.o; +text: .text%__1cLsymbolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cWConstantPoolCacheEntryNoop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cLklassVtableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cLklassItableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cKklassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cKOopClosureIdo_oop_v6MppnHoopDesc__v_: space.o; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: klassKlass.o; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: klassKlass.o; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: arrayKlassKlass.o; +text: .text%__1cLCardTableRSUyounger_refs_iterate6MpnKGeneration_pnQOopsInGenClosure__v_; +text: .text%__1cMSubTasksDoneTall_tasks_completed6M_v_; +text: .text%__1cQDefNewGenerationbCFastEvacuateFollowersClosureHdo_void6M_v_; +text: .text%__1cQGenCollectedHeapbCoop_since_save_marks_iterate6MipnPFastScanClosure_2_v_; +text: .text%__1cQDefNewGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cPContiguousSpacebFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_: typeArrayKlass.o; +text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cKGenerationHpromote6MpnHoopDesc_Ip2_2_; +text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cQGenCollectedHeapbAno_allocs_since_save_marks6Mi_i_; +text: .text%__1cQDefNewGenerationbAno_allocs_since_save_marks6M_i_; +text: .text%__1cbCOneContigSpaceCardGenerationbAno_allocs_since_save_marks6M_i_; +text: .text%__1cQDefNewGenerationUFastKeepAliveClosure2t6Mp0pnSScanWeakRefClosure__v_; +text: .text%__1cQDefNewGenerationQKeepAliveClosure2t6MpnSScanWeakRefClosure__v_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cSReferenceProcessorOprocess_phase16MppnHoopDesc_pnPReferencePolicy_pnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cQDefNewGenerationOIsAliveClosureLdo_object_b6MpnHoopDesc__i_; +text: .text%__1cULRUCurrentHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cQDefNewGenerationUFastKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cSReferenceProcessorOprocess_phase26MppnHoopDesc_pnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSReferenceProcessorOprocess_phase36MppnHoopDesc_ipnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cOJNIHandleBlockMweak_oops_do6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cQDefNewGenerationLswap_spaces6M_v_; +text: .text%__1cIageTablebAcompute_tenuring_threshold6MI_i_; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: defNewGeneration.o; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cQGenCollectedHeapPupdate_gc_stats6Mii_v_: genCollectedHeap.o; +text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: defNewGeneration.o; +text: .text%__1cRTenuredGenerationPupdate_gc_stats6Mii_v_; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: compactingPermGenGen.o; +text: .text%__1cRTenuredGenerationOshould_collect6MiIii_i_; +text: .text%__1cKGenerationPshould_allocate6MIii_i_: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationEfree6kM_I_; +text: .text%__1cQDefNewGenerationQcompute_new_size6M_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cQGenCollectedHeapLgc_epilogue6Mi_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cWThreadLocalAllocBufferGresize6M_v_; +text: .text%__1cUGenGCEpilogueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cQDefNewGenerationLgc_epilogue6Mi_v_; +text: .text%__1cRTenuredGenerationLgc_epilogue6Mi_v_; +text: .text%__1cbCOneContigSpaceCardGenerationLgc_epilogue6Mi_v_; +text: .text%__1cRTenuredGenerationPupdate_counters6M_v_; +text: .text%__1cUCompactingPermGenGenPupdate_counters6M_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cNMemoryServiceGgc_end6Fi_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%jni_GetIntArrayRegion: jni.o; +text: .text%jni_SetIntArrayRegion: jni.o; +text: .text%jni_PushLocalFrame: jni.o; +text: .text%jni_PopLocalFrame: jni.o; +text: .text%__1cMGraphBuilderJnegate_op6MpnJValueType__v_; +text: .text%__1cINegateOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cPciInstanceKlassLimplementor6M_p0_; +text: .text%__1cINegateOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cIValueGenJspill_one6MpnJValueType__v_; +text: .text%__1cIRegAllocbBget_smallest_value_to_spill6kMpnJValueType__pnLInstruction__; +text: .text%__1cLLIR_EmitterRarray_store_check6MpnLLIR_OprDesc_2nFRInfo_33pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLstore_check6MpnLLIR_OprDesc_2222pnMCodeEmitInfo__v_; +text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3333pnMCodeEmitInfo__v_; +text: .text%__1cXArrayStoreExceptionStub2t6MpnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLshift_right6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListLshift_right6MpnLLIR_OprDesc_222_v_; +text: .text%__1cIValueGenLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cLLIR_EmitterGnegate6MnFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListGnegate6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerEleal6MpnLLIR_OprDesc_2_v_; +text: .text%__1cNLIR_AssemblerGnegate6MpnLLIR_OprDesc_2_v_; +text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler_x86.o; +text: .text%__1cXArrayStoreExceptionStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cIRuntime1Tresolve_static_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cIRuntime1Thandle_wrong_method6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cNRelocIteratorEnext6M_i_: sharedRuntime.o; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cGICStubFclear6M_v_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cIRuntime1Jarraycopy6FpnHoopDesc_i2ii_i_; +text: .text%__1cMGraphBuilderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%jni_CallIntMethodV: jni.o; +text: .text%Unsafe_GetObject; +text: .text%jni_CallBooleanMethod: jni.o; +text: .text%jni_CallVoidMethodV: jni.o; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%JVM_InvokeMethod; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%JVM_IsInterrupted; +# Test LoadJFrame +text: .text%__1cTresource_free_bytes6FpcI_v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cJFloatTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_ValueType.o; +text: .text%__1cIValueGenTdo_ArithmeticOp_FPU6MpnMArithmeticOp__v_; +text: .text%__1cHLockRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOset_locked_fpu6MipnLInstruction_i_v_; +text: .text%__1cIValueGenNis_32bit_mode6M_i_; +text: .text%__1cLGetRefCountIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cJFloatTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cHFreeRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocMset_free_fpu6Mi_v_; +text: .text%__1cQChangeSpillCountIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cLLIR_EmitterRarithmetic_op_fpu6MnJBytecodesECode_pnLLIR_OprDesc_44i_v_; +text: .text%__1cILIR_ListDmul6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenKround_item6MpnEItem__v_; +text: .text%__1cLLIR_EmitterFround6MipnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListKround32bit6MnFRInfo_i_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenOspill_register6MnFRInfo__v_; +text: .text%__1cIRegAllocTget_value_for_rinfo6kMnFRInfo__pnLInstruction__; +text: .text%__1cLGetValueForGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIValueGenKdivInRInfo6F_nFRInfo__; +text: .text%__1cIValueGenLremOutRInfo6F_nFRInfo__; +text: .text%__1cMArithmeticOpKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cLLIR_EmitterParithmetic_idiv6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListEirem6MnFRInfo_111pnMCodeEmitInfo__v_; +text: .text%__1cHLIR_Op3Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cHLIR_Op3Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op36MpnHLIR_Op3__v_; +text: .text%__1cNLIR_AssemblerIfpu_push6MnFRInfo__v_; +text: .text%__1cIFrameMapLFpuStackSimEpush6Mi_v_; +text: .text%__1cNLIR_AssemblerKfpu_on_tos6MnFRInfo__v_; +text: .text%__1cIFrameMapLFpuStackSimPoffset_from_tos6kMi_i_; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_FrameMap_x86.o; +text: .text%__1cNLIR_AssemblerHfpu_pop6MnFRInfo__v_; +text: .text%__1cIFrameMapLFpuStackSimDpop6Mi_i_; +text: .text%__1cNLIR_AssemblerKround32_op6MpnLLIR_OprDesc_2_v_; +text: .text%__1cJAssemblerGfist_s6MnHAddress__v_; +text: .text%__1cNLIR_AssemblerJreset_FPU6M_v_; +text: .text%__1cNLIR_AssemblerIemit_op36MpnHLIR_Op3__v_; +text: .text%__1cNLIR_AssemblerParithmetic_idiv6MnILIR_Code_pnLLIR_OprDesc_333pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerXadd_debug_info_for_div06MipnMCodeEmitInfo__v_; +text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNDivByZeroStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_GraphBuilder.o; +text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_Instruction.o; +text: .text%__1cILIR_ListLlogical_xor6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cIRuntime1Ohandle_ic_miss6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cLVtableStubsGlookup6Fiii_pnKVtableStub__; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cKVtableStub2n6FIi_pv_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cLVtableStubsFenter6FiiipnKVtableStub__v_; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%Unsafe_StaticFieldOffset; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%Unsafe_GetIntVolatile; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: c1_GraphBuilder.o; +text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderMtable_switch6M_v_; +text: .text%__1cLTableSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_GraphBuilder.o; +text: .text%__1cGSwitchPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cIValueGenOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cIValueGenVsetup_phis_for_switch6MpnEItem_pnKValueStack__v_; +text: .text%__1cLLIR_EmitterOtableswitch_op6MpnLLIR_OprDesc_ipnKBlockBegin__v_; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +# Test JHello +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%JVM_InitializeSocketLibrary; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%JVM_Socket; +text: .text%Unsafe_PageSize; +text: .text%__1cNFingerprinterHdo_byte6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRuntime.o; +text: .text%Unsafe_SetMemory; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: unsafe.o; +text: .text%__1cNSharedRuntimeElrem6Fxx_x_; +text: .text%Unsafe_DefineClass1; +text: .text%__1cSUnsafe_DefineClass6FpnHJNIEnv__pnI_jstring_pnL_jbyteArray_iipnI_jobject_7_pnH_jclass__: unsafe.o; +text: .text%JVM_DefineClass; +text: .text%__1cPClassFileParserXverify_unqualified_name6MpcIi_i_; +text: .text%__1cVLoaderConstraintTableYextend_loader_constraint6MpnVLoaderConstraintEntry_nGHandle_pnMklassOopDesc__v_; +text: .text%__1cVLoaderConstraintTablebHensure_loader_constraint_capacity6MpnVLoaderConstraintEntry_i_v_; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cQInstanceConstantIencoding6kM_pnI_jobject__; +text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_Instruction.o; +text: .text%__1cILIR_ListQunwind_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cIRuntime1Tprimitive_arraycopy6FpnIHeapWord_2i_v_; +text: .text%__1cRComputeEntryStackHdo_char6M_v_: generateOopMap.o; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cbDinitializeDirectBufferSupport6FpnHJNIEnv___i_: jni.o; +text: .text%lookupDirectBufferClasses: jni.o; +text: .text%__1cJlookupOne6FpnHJNIEnv__pkcpnGThread__pnH_jclass__: jni.o; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cRComputeEntryStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%__1cSInterpreterRuntimeWslow_signature_handler6FpnKJavaThread_pnNmethodOopDesc_pi5_pC_; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_x86.o; +text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_x86.o; +text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_x86.o; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_x86.o; +text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_x86.o; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_x86.o; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%jni_GetCharArrayRegion: jni.o; +text: .text%jni_SetFloatField: jni.o; +text: .text%jni_NewFloatArray: jni.o; +text: .text%jni_SetFloatArrayRegion: jni.o; +# SwingSet +text: .text%JVM_GetFieldIxModifiers; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cMStoreIndexedPother_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%JVM_MonitorNotify; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cKValueStackElock6MpnHIRScope_pnLInstruction__i_; +text: .text%__1cKValueStackGunlock6M_i_; +text: .text%__1cLLIR_EmitterVmonitorenter_at_entry6MnFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterNmonitor_enter6MnFRInfo_111ipnMCodeEmitInfo_3_v_; +text: .text%__1cQMonitorEnterStub2t6MnFRInfo_1pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListbAload_stack_address_monitor6MinFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListLlock_object6MnFRInfo_111pnICodeStub_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenNsyncTempRInfo6F_nFRInfo__; +text: .text%__1cLLIR_EmitterQreturn_op_prolog6Mi_v_; +text: .text%__1cLLIR_EmitterMmonitor_exit6MnFRInfo_11i_v_; +text: .text%__1cILIR_ListNunlock_object6MnFRInfo_11pnICodeStub__v_; +text: .text%__1cKLIR_OpLockFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cKLIR_OpLockJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerJemit_lock6MpnKLIR_OpLock__v_; +text: .text%__1cNLIR_AssemblerPmonitor_address6MinFRInfo__v_; +text: .text%__1cIFrameMapbEaddress_for_monitor_lock_index6kMi_nHAddress__; +text: .text%__1cIFrameMapbAfp_offset_for_monitor_lock6kMi_i_; +text: .text%__1cNLIR_AssemblerJemit_lock6MpnKLIR_OpLock__v_; +text: .text%__1cRC1_MacroAssemblerLlock_object6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cIFrameMapWmonitor_object_regname6kMi_nHOptoRegEName__; +text: .text%__1cIFrameMapbCfp_offset_for_monitor_object6kMi_i_; +text: .text%__1cMCodeEmitInfobHlocation_for_monitor_object_index6Mi_nILocation__; +text: .text%__1cIFrameMapbHlocation_for_monitor_object_index6kMipnILocation__i_; +text: .text%__1cMCodeEmitInfobFlocation_for_monitor_lock_index6Mi_nILocation__; +text: .text%__1cIFrameMapbFlocation_for_monitor_lock_index6kMipnILocation__i_; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cRC1_MacroAssemblerNunlock_object6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o; +text: .text%__1cQMonitorEnterStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerRload_receiver_reg6MpnMRegisterImpl__v_; +text: .text%__1cNLIR_AssemblerLmonitorexit6MnFRInfo_1pnMRegisterImpl_i3_v_; +text: .text%__1cPMonitorExitStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%jni_NewIntArray: jni.o; +text: .text%__1cNCollectedHeapYlarge_typearray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cRTenuredGenerationKshort_name6kM_pkc_: tenuredGeneration.o; +text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: tenuredGeneration.o; +text: .text%__1cRTenuredGenerationHcollect6MiiIii_v_; +text: .text%__1cRTenuredGenerationbJretire_alloc_buffers_before_full_gc6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationHcollect6MiiIii_v_; +text: .text%__1cMGenMarkSweepTinvoke_at_safepoint6FipnSReferenceProcessor_i_v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cKJavaThreadLgc_prologue6M_v_; +text: .text%__1cKJavaThreadJframes_do6MpFpnFframe_pknLRegisterMap__v_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cQGenCollectedHeapRsave_used_regions6Mii_v_; +text: .text%__1cKGenerationQsave_used_region6M_v_: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationLused_region6kM_nJMemRegion__; +text: .text%__1cPContiguousSpaceLused_region6kM_nJMemRegion__: space.o; +text: .text%__1cKGenerationQsave_used_region6M_v_: defNewGeneration.o; +text: .text%__1cKGenerationLused_region6kM_nJMemRegion__: defNewGeneration.o; +text: .text%__1cKGenerationQsave_used_region6M_v_: compactingPermGenGen.o; +text: .text%__1cMGenMarkSweepPallocate_stacks6F_v_; +text: .text%__1cQGenCollectedHeapOgather_scratch6MpnKGeneration_I_pnMScratchBlock__; +text: .text%__1cQDefNewGenerationScontribute_scratch6MrpnMScratchBlock_pnKGeneration_I_v_; +text: .text%__1cKGenerationScontribute_scratch6MrpnMScratchBlock_p0I_v_: tenuredGeneration.o; +text: .text%__1cRsort_scratch_list6FrpnMScratchBlock__v_: genCollectedHeap.o; +text: .text%__1cVremoveSmallestScratch6FppnMScratchBlock__1_: genCollectedHeap.o; +text: .text%__1cMGenMarkSweepRmark_sweep_phase16Firii_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: genMarkSweep.o; +text: .text%__1cJMarkSweepRFollowRootClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cJMarkSweepLfollow_root6FppnHoopDesc__v_; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cJMarkSweepNpreserve_mark6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cIjniIdMapHoops_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cJMarkSweepQKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cJCodeCacheFalive6FpnICodeBlob__2_; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cHnmethodbHfollow_root_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_ppnHoopDesc_iri_v_; +text: .text%__1cOoop_RelocationJoop_value6M_pnHoopDesc__; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cMGenMarkSweepRmark_sweep_phase26F_v_; +text: .text%__1cQGenCollectedHeapWprepare_for_compaction6M_v_; +text: .text%__1cKGenerationWprepare_for_compaction6MpnMCompactPoint__v_; +text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: tenuredGeneration.o; +text: .text%__1cPContiguousSpaceWprepare_for_compaction6MpnMCompactPoint__v_; +text: .text%__1cWOffsetTableContigSpaceUinitialize_threshold6M_pnIHeapWord__; +text: .text%__1cMTenuredSpaceSallowed_dead_ratio6kM_i_; +text: .text%__1cQCompactibleSpaceHforward6MpnHoopDesc_IpnMCompactPoint_pnIHeapWord__6_; +text: .text%__1cWOffsetTableContigSpacePcross_threshold6MpnIHeapWord_2_2_; +text: .text%__1cQCompactibleSpaceQinsert_deadspace6MrIpnIHeapWord_I_i_; +text: .text%__1cQCompactibleSpaceVnext_compaction_space6kM_p0_: space.o; +text: .text%__1cQDefNewGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: defNewGeneration.o; +text: .text%__1cQCompactibleSpaceSallowed_dead_ratio6kM_i_: space.o; +text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: compactingPermGenGen.o; +text: .text%__1cPContigPermSpaceSallowed_dead_ratio6kM_i_; +text: .text%__1cMGenMarkSweepRmark_sweep_phase36Fi_v_; +text: .text%__1cUCompactingPermGenGenTpre_adjust_pointers6M_v_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cQGenCollectedHeapSprocess_weak_roots6MpnKOopClosure_2_v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cYGenAdjustPointersClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o; +text: .text%__1cKGenerationPadjust_pointers6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationNspace_iterate6MpnMSpaceClosure_i_v_; +text: .text%__1cVAdjustPointersClosureIdo_space6MpnFSpace__v_: generation.o; +text: .text%__1cQCompactibleSpacePadjust_pointers6M_v_; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQDefNewGenerationNspace_iterate6MpnMSpaceClosure_i_v_; +text: .text%__1cUCompactingPermGenGenPadjust_pointers6M_v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cMGenMarkSweepRmark_sweep_phase46F_v_; +text: .text%__1cUCompactingPermGenGenHcompact6M_v_; +text: .text%__1cQCompactibleSpaceHcompact6M_v_; +text: .text%__1cPContiguousSpaceWreset_after_compaction6M_v_: space.o; +text: .text%__1cRGenCompactClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o; +text: .text%__1cKGenerationHcompact6M_v_; +text: .text%__1cUCompactingPermGenGenMpost_compact6M_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cMGenMarkSweepRdeallocate_stacks6F_v_; +text: .text%__1cLCardTableRSSclear_into_younger6MpnKGeneration_i_v_; +text: .text%__1cLCardTableRSFclear6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cRCardTableModRefBSPclear_MemRegion6MnJMemRegion__v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cKJavaThreadLgc_epilogue6M_v_; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6MpC1_v_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cQGenCollectedHeapWupdate_time_of_last_gc6Mx_v_: genMarkSweep.o; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: tenuredGeneration.o; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: compactingPermGenGen.o; +text: .text%__1cbCOneContigSpaceCardGenerationVunsafe_max_alloc_nogc6kM_I_; +text: .text%__1cRTenuredGenerationQcompute_new_size6M_v_; +text: .text%__1cKGenerationEspec6M_pnOGenerationSpec__; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cLVtableStubsScreate_itable_stub6Fii_pnKVtableStub__; +text: .text%__1cLLIR_EmitterDnop6M_v_; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnI_jobject__v_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cLklassVtableTis_miranda_entry_at6Mi_i_; +text: .text%__1cRPrivilegedElementHoops_do6MpnKOopClosure__v_; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: defNewGeneration.o; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%jni_DeleteWeakGlobalRef: jni.o; +text: .text%__1cKJNIHandlesTdestroy_weak_global6FpnI_jobject__v_; +text: .text%__1cILIR_ListJoop2stack6MpnI_jobject_i_v_: c1_LIREmitter.o; +text: .text%__1cNObjectMonitorREntryQdDueue_unlink6MpnMObjectWaiter__v_; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cMGraphBuilderMmonitorenter6MpnLInstruction__v_; +text: .text%__1cMMonitorEnterFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cNAccessMonitorIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderLmonitorexit6MpnLInstruction__v_; +text: .text%__1cLMonitorExitFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cILongTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cNAccessMonitorPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cTNullCheckEliminatorUhandle_AccessMonitor6MpnNAccessMonitor__v_; +text: .text%__1cQNullCheckVisitorOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cIValueGenPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cNc1_AllocTableMhas_two_free6kM_i_; +text: .text%__1cMLongConstantPas_LongConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cFRInfoLas_rinfo_lo6kM_0_; +text: .text%__1cLLIR_EmitterJopr2intLo6MpnLLIR_OprDesc__i_; +text: .text%__1cFRInfoLas_rinfo_hi6kM_0_; +text: .text%__1cLLIR_EmitterJopr2intHi6MpnLLIR_OprDesc__i_; +text: .text%__1cIValueGenOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cNAccessMonitorQas_AccessMonitor6M_p0_: c1_GraphBuilder.o; +text: .text%__1cJAssemblerFpushl6MpnI_jobject__v_; +text: .text%__1cNLIR_AssemblerNas_Address_hi6MpnLLIR_Address__nHAddress__; +text: .text%__1cFRInfoOas_register_hi6kM_pnMRegisterImpl__; +text: .text%__1cNLIR_AssemblerNas_Address_lo6MpnLLIR_Address__nHAddress__; +text: .text%__1cFRInfoOas_register_lo6kM_pnMRegisterImpl__; +text: .text%__1cCosHrealloc6FpvI_1_; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cIValueGenQdo_currentThread6MpnJIntrinsic__v_; +text: .text%__1cILIR_ListKget_thread6MnFRInfo__v_: c1_CodeGenerator_x86.o; +text: .text%__1cNLIR_AssemblerKget_thread6MpnLLIR_OprDesc__v_; +text: .text%__1cIValueGenSload_item_patching6MpnHIRScope_ipnEItem_pnKValueStack_pnOExceptionScope__v_; +text: .text%__1cEItemUget_jobject_constant6kM_pnIciObject__; +text: .text%__1cJValueTypeTas_InstanceConstant6M_pnQInstanceConstant__: c1_ValueType.o; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_CodeGenerator.o; +text: .text%__1cMLinkResolverbEresolve_interface_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cGciTypeNis_subtype_of6Mp0_i_; +text: .text%__1cIValueGenOload_byte_item6MpnEItem__v_; +text: .text%__1cIValueGenPlock_free_rinfo6MpnLInstruction_nKc1_RegMask__nFRInfo__; +text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_nKc1_RegMask__nFRInfo__; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cIFrameMapNis_byte_rinfo6FnFRInfo__i_; +text: .text%Unsafe_AllocateInstance; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cQinstanceRefKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cNCanonicalizerMset_constant6Mi_v_: c1_Canonicalizer.o; +text: .text%__1cJTypeCheckPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cNLIR_AssemblerMcheck_icache6M_i_; +text: .text%__1cRC1_MacroAssemblerTfast_ObjectHashCode6MpnMRegisterImpl_2_v_; +text: .text%__1cNLIR_AssemblerZjobject2reg_with_patching6MpnMRegisterImpl_pnMCodeEmitInfo__v_; +text: .text%__1cHLogicOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cIRuntime1Mnew_instance6FpnKJavaThread_pnMklassOopDesc__v_; +text: .text%__1cQGenCollectedHeapXhandle_failed_promotion6MpnKGeneration_pnHoopDesc_Ip4_4_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceRefKlass.o; +text: .text%__1cbCOneContigSpaceCardGenerationTexpand_and_allocate6MIiii_pnIHeapWord__; +text: .text%__1cbCOneContigSpaceCardGenerationGexpand6MII_v_; +text: .text%__1cNGCMutexLocker2t6MpnFMutex__v_; +text: .text%__1cbCOneContigSpaceCardGenerationHgrow_by6MI_i_; +text: .text%__1cPContiguousSpaceNmangle_region6MnJMemRegion__v_; +text: .text%__1cJMarkSweepRFollowRootClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cQCompactibleSpaceUinitialize_threshold6M_pnIHeapWord__: space.o; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cRAlwaysTrueClosureLdo_object_b6MpnHoopDesc__i_: genCollectedHeap.o; +text: .text%__1cLCardTableRSTinvalidate_or_clear6MpnKGeneration_ii_v_; +text: .text%__1cJMemRegionFminus6kMk0_0_; +text: .text%__1cLCardTableRSKinvalidate6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cRCardTableModRefBSKinvalidate6MnJMemRegion__v_; +text: .text%__1cIRuntime1Onew_type_array6FpnKJavaThread_pnMklassOopDesc_i_v_; +text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cNFloatConstantQas_FloatConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cILIR_ListNstore_mem_oop6MpnI_jobject_nFRInfo_inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cNConstantTableMappend_float6Mf_v_; +text: .text%__1cRAbstractAssemblerGa_long6Mi_v_; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cDCHARprocess_interface6FnTinstanceKlassHandle_pnNGrowableArray4nLKlassHandle___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cINewArrayPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cLLIR_EmitterQfield_store_byte6MpnLLIR_OprDesc_i2nFRInfo_ipnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_222_v_; +text: .text%__1cIRuntime1Mmonitorenter6FpnKJavaThread_pnHoopDesc_pnPBasicObjectLock__v_; +text: .text%__1cIRuntime1Lmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cHnmethodVis_dependent_on_entry6MpnMklassOopDesc_2pnNmethodOopDesc__i_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cJCodeCacheNalive_nmethod6FpnICodeBlob__pnHnmethod__; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cHnmethodVmark_as_seen_on_stack6M_v_; +text: .text%__1cTinc_decompile_count6FpnHnmethod__v_: nmethod.o; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cKJavaThreadLnmethods_do6M_v_; +text: .text%__1cGThreadLnmethods_do6M_v_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cFframeVnmethods_code_blob_do6M_v_; +text: .text%__1cILIR_ListEidiv6MnFRInfo_i11pnMCodeEmitInfo__v_; +text: .text%__1cLlog2_intptr6Fi_i_: c1_LIRAssembler_x86.o; +text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cILongTypeEsize6kM_i_: c1_ValueType.o; +text: .text%JVM_HoldsLock; +text: .text%__1cSObjectSynchronizerZcurrent_thread_holds_lock6FpnKJavaThread_nGHandle__i_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cLLoadIndexedIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeIpass_int6M_v_: oopMapCache.o; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cMLocalMappingDadd6MinFRInfo__v_; +text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cLLIR_EmitterQfield_store_long6MpnLLIR_OprDesc_i2ipnMCodeEmitInfo__v_; +text: .text%__1cKScanBlocksMis_long_only6kMi_i_; +text: .text%__1cRLIR_PeepholeStateLreg2indexLo6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateLreg2indexHi6MpnLLIR_OprDesc__i_; +text: .text%__1cNSharedRuntimeDf2l6Ff_x_; +text: .text%__1cIValueGenLdo_getClass6MpnJIntrinsic__v_; +text: .text%__1cLLIR_EmitterIgetClass6MnFRInfo_1pnMCodeEmitInfo__v_; +text: .text%__1cMGraphBuilderKcompare_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cJCompareOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerMdo_CompareOp6MpnJCompareOp__v_; +text: .text%__1cJCompareOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cJCompareOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cJCompareOpMas_CompareOp6M_p0_: c1_Instruction.o; +text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_Canonicalizer.o; +text: .text%__1cGSetRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocLset_fpu_reg6MiipnLInstruction__v_; +text: .text%__1cJIsFreeRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cILIR_ListJfloat2reg6MfnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListMbranch_float6MnMLIR_OpBranchNLIR_Condition_pnFLabel_4_v_; +text: .text%__1cIValueGenNreturnF0RInfo6F_nFRInfo__; +text: .text%__1cLLIR_EmitterOset_fpu_result6MnFRInfo__v_; +text: .text%__1cILIR_ListIpush_fpu6MnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cNConstantTableZaddress_of_float_constant6Mf_pC_; +text: .text%__1cNLIR_AssemblerOfpu_two_on_tos6MnFRInfo_1i_v_; +text: .text%__1cIFrameMapLFpuStackSimEswap6M_v_; +text: .text%__1cIFrameMapLFpuStackSimRexchange_with_tos6Mi_v_; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cQPlaceholderEntryHoops_do6MpnKOopClosure__v_; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cICodeHeapPadd_to_freelist6MpnJHeapBlock__v_; +text: .text%__1cICodeHeapPfollowing_block6MpnJFreeBlock__2_; +text: .text%__1cRComputeEntryStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cICodeHeapMinsert_after6MpnJFreeBlock_2_v_; +text: .text%__1cICodeHeapLmerge_right6MpnJFreeBlock__v_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: nmethod.o; +text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cbCOneContigSpaceCardGenerationGshrink6MI_v_; +text: .text%__1cbCOneContigSpaceCardGenerationJshrink_by6MI_v_; +text: .text%__1cMVirtualSpaceJshrink_by6MI_v_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: oopMapCache.o; +text: .text%__1cRComputeEntryStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cODoubleConstantRas_DoubleConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cODoubleConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cHLockRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocRset_locked_double6MipnLInstruction_i_v_; +text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_ValueType.o; +text: .text%__1cIFrameMapUare_adjacent_indeces6kMii_i_; +text: .text%__1cQChangeSpillCountJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocZchange_double_spill_count6Mii_v_; +text: .text%__1cILIR_ListKdouble2reg6MdnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cHFreeRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocPset_free_double6Mi_v_; +text: .text%__1cILIR_ListDrem6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cLGetRefCountJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocNget_double_rc6kMi_i_; +text: .text%__1cLLIR_EmitterUcheck_double_address6Mi_v_; +text: .text%__1cILIR_ListQreg2double_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cRLIR_PeepholeStateNstack2indexHi6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateNstack2indexLo6MpnLLIR_OprDesc__i_; +text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cNConstantTableNappend_double6Md_v_; +text: .text%__1cNConstantTablebAaddress_of_double_constant6Md_pC_; +text: .text%__1cQGenCollectedHeapHcollect6MnHGCCauseFCause_i_v_; +text: .text%__1cQGenCollectedHeapOcollect_locked6MnHGCCauseFCause_i_v_; +text: .text%__1cRVM_GenCollectFullEname6kM_pkc_: vm_operations.o; +text: .text%__1cRVM_GenCollectFullEdoit6M_v_; +text: .text%__1cQGenCollectedHeapYmust_clear_all_soft_refs6M_i_; +text: .text%__1cQGenCollectedHeapSdo_full_collection6Miipi_v_; +text: .text%__1cKGenerationbHfull_collects_younger_generations6kM_i_: defNewGeneration.o; +text: .text%__1cKDoubleTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cIValueMapNresize_bucket6MpnGBucket__v_; +text: .text%__1cNFloatConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cJNullCheckMas_NullCheck6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterIopr2long6MpnLLIR_OprDesc__x_; +text: .text%__1cILIR_ListKlong2stack6Mxi_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenNreturnD0RInfo6F_nFRInfo__; +text: .text%__1cJIsFreeRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOis_free_double6kMi_i_; +text: .text%__1cGSetRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOset_double_reg6MiipnLInstruction__v_; +text: .text%__1cLLIR_EmitterNcopy_fpu_item6MnFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListHdup_fpu6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListDdiv6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cJAssemblerFfsubp6Mi_v_; +text: .text%__1cNLIR_AssemblerHdup_fpu6MnFRInfo_1_v_; +text: .text%__1cIFrameMapLFpuStackSimLmove_on_tos6Mi_i_; +text: .text%__1cJAssemblerGfdiv_d6MnHAddress__v_; +text: .text%__1cJAssemblerFfdivp6Mi_v_; +text: .text%__1cIValueGenMreturn2RInfo6F_nFRInfo__; +text: .text%__1cJValueTypeQas_FloatConstant6M_pnNFloatConstant__: c1_Canonicalizer.o; +text: .text%__1cIRuntime1Qnew_object_array6FpnKJavaThread_pnMklassOopDesc_i_v_; +text: .text%__1cIValueGenLdivOutRInfo6F_nFRInfo__; +text: .text%__1cILIR_ListEidiv6MnFRInfo_111pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListVvolatile_load_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cEItemSget_jlong_constant6kM_x_; +text: .text%__1cNLIR_AssemblerQvolatile_move_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cONewObjectArrayKexact_type6kM_pnGciType__; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cIRuntime1Noop_arraycopy6FpnIHeapWord_2i_v_; +text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cMLongConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cIValueGenUdo_ArithmeticOp_Long6MpnMArithmeticOp__v_; +text: .text%__1cLLIR_EmitterSarithmetic_op_long6MnJBytecodesECode_pnLLIR_OprDesc_44pnMCodeEmitInfo__v_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLArrayLengthIis_equal6kMpnLInstruction__i_: c1_GraphBuilder.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: compiledICHolderKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: compiledICHolderKlass.o; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cMGraphBuilderVappend_unsafe_put_raw6MpnIciMethod_nJBasicType__i_; +text: .text%__1cMUnsafePutRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cNCanonicalizerOdo_UnsafeRawOp6MpnLUnsafeRawOp__v_; +text: .text%__1cFmatch6FpnLUnsafeRawOp_ppnLInstruction_4pi_i_: c1_Canonicalizer.o; +text: .text%__1cLInstructionPas_ArithmeticOp6M_pnMArithmeticOp__: c1_Instruction.o; +text: .text%__1cIUnsafeOpLas_UnsafeOp6M_p0_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderVappend_unsafe_get_raw6MpnIciMethod_nJBasicType__i_; +text: .text%__1cMUnsafeGetRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cMGraphBuilderNlookup_switch6M_v_; +text: .text%__1cIintArray2t6Mki1_v_: c1_GraphBuilder.o; +text: .text%__1cMLookupSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cMUnsafePutRawPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cTNullCheckEliminatorPhandle_UnsafeOp6MpnIUnsafeOp__v_; +text: .text%__1cLUnsafeRawOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cQNullCheckVisitorPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cIValueGenPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cLLIR_EmitterOput_raw_unsafe6MpnLLIR_OprDesc_2i2nJBasicType__v_; +text: .text%__1cLLIR_EmitterMlong2address6MpnLLIR_OprDesc__nFRInfo__; +text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cLLIR_EmitterOget_raw_unsafe6MnFRInfo_pnLLIR_OprDesc_3inJBasicType__v_; +text: .text%__1cILIR_ListMload_mem_reg6MpnLLIR_Address_nFRInfo_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cUcreate_lookup_ranges6FpnMLookupSwitch__pnQLookupRangeArray__: c1_CodeGenerator_x86.o; +text: .text%__1cLLIR_EmitterVlookupswitch_range_op6MpnLLIR_OprDesc_iipnKBlockBegin__v_; +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; +text: .text%Unsafe_GetObjectVolatile; +text: .text%signalHandler; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_x86.o; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; +text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; +text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_; +text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_; +text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cSvframeStreamCommonbFfill_in_compiled_inlined_sender6M_i_; +text: .text%__1cJFloatTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cIValueGenNrelease_roots6MpnKValueStack__v_; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cOObjectConstantLis_constant6kM_i_: c1_ValueType.o; +text: .text%__1cILIR_ListLstore_array6MipnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerLconst2array6MpnJLIR_Const_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cQInstanceConstantLis_constant6kM_i_: c1_ValueType.o; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER1_i486 2009-08-01 04:16:50.867611779 +0100 @@ -0,0 +1,5451 @@ +data = R0x2000; +text = LOAD ?RXO; + + +# Test Null +text: .text%__cplus_fini_at_exit: CCrti.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable.o; +text: .text%__1cFRInfo2t6M_v_: c1_AllocTable.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_AllocTable_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals.o; +text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Canonicalizer.o; +text: .text%__1cFRInfo2t6M_v_: c1_Canonicalizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator.o; +text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_CodeStubs_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compilation.o; +text: .text%__1cFRInfo2t6M_v_: c1_Compilation.o; +text: .text%__1cMelapsedTimer2t6M_v_: c1_Compilation.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compiler.o; +text: .text%__1cFRInfo2t6M_v_: c1_Compiler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap.o; +text: .text%__1cFRInfo2t6M_v_: c1_FrameMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_FrameMap_x86.o; +text: .text%__1cKc1_RegMask2t6M_v_: c1_FrameMap_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_GraphBuilder.o; +text: .text%__1cFRInfo2t6M_v_: c1_GraphBuilder.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_IR.o; +text: .text%__1cFRInfo2t6M_v_: c1_IR.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Instruction.o; +text: .text%__1cFRInfo2t6M_v_: c1_Instruction.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_InstructionPrinter.o; +text: .text%__1cFRInfo2t6M_v_: c1_InstructionPrinter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items.o; +text: .text%__1cFRInfo2t6M_v_: c1_Items.o; +text: .text%__1cIHintItem2t6MpnJValueType_i_v_: c1_Items.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_Items_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIR.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIR.o; +text: .text%__1cLLIR_OprFactHillegal6F_pnLLIR_OprDesc__: c1_LIR.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Loops.o; +text: .text%__1cFRInfo2t6M_v_: c1_Loops.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_MacroAssembler_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Optimizer.o; +text: .text%__1cFRInfo2t6M_v_: c1_Optimizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo.o; +text: .text%__1cFRInfo2t6M_v_: c1_RInfo.o; +text: .text%__1cKc1_RegMask2t6M_v_: c1_RInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_RInfo_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc.o; +text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1.o; +text: .text%__1cFRInfo2t6M_v_: c1_Runtime1.o; +text: .text%__1cIiEntries2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_Runtime1_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ScanBlocks.o; +text: .text%__1cFRInfo2t6M_v_: c1_ScanBlocks.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueMap.o; +text: .text%__1cFRInfo2t6M_v_: c1_ValueMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueSet.o; +text: .text%__1cFRInfo2t6M_v_: c1_ValueSet.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueStack.o; +text: .text%__1cFRInfo2t6M_v_: c1_ValueStack.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeBlob.o; +text: .text%__1cFRInfo2t6M_v_: codeBlob.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cICHeapObj2n6FI_pv_; +text: .text%__1cCosGmalloc6FI_pv_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cMelapsedTimer2t6M_v_: compilationPolicy.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cMelapsedTimer2t6M_v_: compileBroker.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compiledIC.o; +text: .text%__1cFRInfo2t6M_v_: compiledIC.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: deoptimization.o; +text: .text%__1cFRInfo2t6M_v_: deoptimization.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame.o; +text: .text%__1cFRInfo2t6M_v_: frame.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_x86.o; +text: .text%__1cFRInfo2t6M_v_: frame_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cNCellTypeStateLmake_bottom6F_0_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_any6Fi_0_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_top6F_0_: generateOopMap.o; +text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_x86.o; +text: .text%__1cFRInfo2t6M_v_: interpreter_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: java.o; +text: .text%__1cFRInfo2t6M_v_: java.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiEnvBase.o; +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_; +text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_: jvmtiEnvBase.o; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cRJvmtiEventEnabled2t6M_v_; +text: .text%__1cRJvmtiEventEnabledFclear6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiImpl.o; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__2t6Mii_v_: jvmtiImpl.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cJMemRegion2t6M_v_: jvmtiTagMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: klassVtable.o; +text: .text%__1cFRInfo2t6M_v_: klassVtable.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cJTimeStamp2t6M_v_: management.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cJMarkSweepSMarkAndPushClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepRFollowRootClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepSFollowStackClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepUAdjustPointerClosure2t6Mi_v_: markSweep.o; +text: .text%__1cJMarkSweepOIsAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepQKeepAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: memoryService.o; +text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_: memoryService.o; +text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodOop.o; +text: .text%__1cFRInfo2t6M_v_: methodOop.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_x86.o; +text: .text%__1cFRInfo2t6M_v_: nativeInst_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nmethod.o; +text: .text%__1cFRInfo2t6M_v_: nmethod.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris.o; +text: .text%__1cFRInfo2t6M_v_: os_solaris.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_x86.o; +text: .text%__1cFRInfo2t6M_v_: os_solaris_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o; +text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o; +text: .text%__1cHoopDescLheader_size6F_i_: parGCAllocBuffer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cMelapsedTimer2t6M_v_: psAdaptiveSizePolicy.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cMelapsedTimer2t6M_v_: psMarkSweep.o; +text: .text%__1cTPSAlwaysTrueClosure2t6M_v_: psMarkSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: psPromotionLAB.o; +text: .text%__1cRalign_object_size6Fi_i_: psPromotionLAB.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cMelapsedTimer2t6M_v_: psScavenge.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cQRelocationHolder2t6M_v_: relocInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint.o; +text: .text%__1cFRInfo2t6M_v_: safepoint.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_x86.o; +text: .text%__1cFRInfo2t6M_v_: safepoint_solaris_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedRuntime.o; +text: .text%__1cFRInfo2t6M_v_: sharedRuntime.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cRCardTableModRefBSbCpar_chunk_heapword_alignment6F_I_: tenuredGeneration.o; +text: .text%__1cEMIN24CI_6FTA0_0_: tenuredGeneration.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vframeArray.o; +text: .text%__1cFRInfo2t6M_v_: vframeArray.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cFRInfo2t6M_v_: vmStructs.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_x86.o; +text: .text%__1cFRInfo2t6M_v_: vtableStubs_x86.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_x86.o; +text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer_x86.o; +text: .text%JNI_CreateJavaVM; +text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cCosHSolarisWinitialize_system_info6F_v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cMSysClassPath2t6Mpkc_v_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cJArgumentsMget_property6Fpkc_2_; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cJArgumentsRverify_percentage6FIpkc_i_; +text: .text%__1cMSysClassPath2T6M_v_; +text: .text%__1cMSysClassPathNreset_item_at6Mi_v_: arguments.o; +text: .text%__1cJArgumentsbOparse_java_compiler_environment_variable6F_v_; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cCosHSolarisOset_mpss_range6FpcII_i_; +text: .text%__1cCosPuncommit_memory6FpcI_i_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o; +text: .text%__1cOresolve_symbol6Fpkc_pC_: os_solaris.o; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cNdefaultStreamMhas_log_file6M_i_; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cNdefaultStreamEinit6M_v_; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_x86.o; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cKPerfMemoryKinitialize6F_v_; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_; +text: .text%__1cUcreate_shared_memory6FI_pc_: perfMemory_solaris.o; +text: .text%__1cSmmap_create_shared6FI_pc_: perfMemory_solaris.o; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cNget_user_name6Fl_pc_: perfMemory_solaris.o; +text: .text%__1cQget_user_tmp_dir6Fpkc_pc_: perfMemory_solaris.o; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cWget_sharedmem_filename6Fpkci_pc_: perfMemory_solaris.o; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o; +text: .text%lstat: perfMemory_solaris.o; +text: .text%__1cPfilename_to_pid6Fpkc_l_: perfMemory_solaris.o; +text: .text%__1cbAcreate_sharedmem_resources6Fpkc1I_i_: perfMemory_solaris.o; +text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cKJavaThread2t6M_v_; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cFChunk2n6FII_pv_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cFChunk2t6MI_v_; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6MpnIHeapWord_22_v_; +text: .text%__1cWThreadLocalAllocBufferMinitial_size6F_I_; +text: .text%__1cWThreadLocalAllocBufferVinitialize_statistics6M_v_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cUThreadSafepointState2t6MpnKJavaThread__v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cCosScurrent_stack_size6F_I_; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cCosbBthread_local_storage_at_put6Fipv_v_; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cGThreadWset_as_starting_thread6M_i_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cCosHSolarisRunblocked_signals6F_pnIsigset_t__; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cCosHSolarisKvm_signals6F_pnIsigset_t__; +text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_; +text: .text%__1cCosNcommit_memory6FpcI_i_; +text: .text%__1cCosMguard_memory6FpcI_i_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cKManagementEinit6F_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cIPerfDataMcreate_entry6MnJBasicType_II_v_; +text: .text%__1cKPerfMemoryFalloc6FI_pc_; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cPoldgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cMPerfDataList2t6Mi_v_; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_iii_v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cLClassLoaderbCupdate_class_path_entry_list6Fpkc_v_; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%stat: os_solaris.o; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cLClassLoaderSget_canonical_path6Fpc1i_i_; +text: .text%JVM_RawMonitorCreate; +text: .text%JVM_NativePath; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%JVM_Open; +text: .text%JVM_Lseek; +text: .text%JVM_Close; +text: .text%__1cDhpiFclose6Fi_i_: jvm.o; +text: .text%__1cRClassPathZipEntry2t6Mppvpc_v_; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cLClassLoaderLadd_to_list6FpnOClassPathEntry__v_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cJCodeCacheKinitialize6F_v_; +text: .text%__1cICodeHeapHreserve6MIII_i_; +text: .text%__1cLlog2_intptr6Fi_i_: heap.o; +text: .text%__1cYalign_to_allocation_size6FI_I_: heap.o; +text: .text%__1cNReservedSpace2t6MI_v_; +text: .text%__1cNReservedSpaceKinitialize6MIIipc_v_; +text: .text%__1cCosOreserve_memory6FIpc_1_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cMVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cMVirtualSpaceQuncommitted_size6kM_I_; +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_; +text: .text%__1cCosNcommit_memory6FpcII_i_; +text: .text%__1cSalign_to_page_size6FI_I_: heap.o; +text: .text%__1cICodeHeapFclear6M_v_; +text: .text%__1cICodeHeapTmark_segmap_as_free6MII_v_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cICodeHeapIcapacity6kM_I_; +text: .text%__1cICodeHeapMmax_capacity6kM_I_; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cRalign_code_offset6Fi_I_; +text: .text%__1cICodeHeapLheader_size6F_I_; +text: .text%__1cKBufferBlob2n6FII_pv_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cICodeHeapIallocate6MI_pv_; +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__; +text: .text%__1cICodeHeapTmark_segmap_as_used6MII_v_; +text: .text%__1cKBufferBlob2t6Mpkci_v_; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cICodeHeapSallocated_capacity6kM_I_; +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cXresource_allocate_bytes6FI_pc_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_x86.o; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGpushfd6M_v_; +text: .text%__1cJAssemblerEpopl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerFpopfd6M_v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerFcpuid6M_v_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerDjmp6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cRAbstractAssemblerHbind_to6MrnFLabel_i_v_; +text: .text%__1cMDisplacementEbind6MrnFLabel_ipnRAbstractAssembler__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_22nHAddressLScaleFactor_irknQRelocationHolder__v_; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerDret6Mi_v_; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cKVM_VersionWget_processor_features6F_v_; +text: .text%__1cCosMsupports_sse6F_i_; +text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_x86.o; +text: .text%jio_snprintf; +text: .text%jio_vsnprintf; +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cMStubRoutinesLinitialize16F_v_; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_x86.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cJAssemblerEcall6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerJemit_data6MinJrelocInfoJrelocType_i_v_; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o; +text: .text%__1cJAssemblerJemit_data6MirknQRelocationHolder_i_v_; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cOMacroAssemblerJincrement6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerKget_thread6MpnMRegisterImpl__v_; +text: .text%__1cSThreadLocalStorageTpd_getTlsAccessMode6F_n0AQpd_tlsAccessMode__; +text: .text%__1cJAssemblerFpushl6Mi_v_; +text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_x86.o; +text: .text%__1cOMacroAssemblerFenter6M_v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEdecl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_x86.o; +text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_x86.o; +text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerGpushad6M_v_; +text: .text%__1cJAssemblerFpopad6M_v_; +text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_x86.o; +text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_x86.o; +text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_; +text: .text%__1cJAssemblerGfnsave6MnHAddress__v_; +text: .text%__1cJAssemblerFfwait6M_v_; +text: .text%__1cJAssemblerFfld_d6MnHAddress__v_; +text: .text%__1cJAssemblerFfst_d6MnHAddress__v_; +text: .text%__1cOMacroAssemblerPempty_FPU_stack6M_v_; +text: .text%__1cJAssemblerFffree6Mi_v_; +text: .text%__1cJAssemblerLemit_farith6Miii_v_; +text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_; +text: .text%__1cJAssemblerGfrstor6MnHAddress__v_; +text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_x86.o; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cICarSpaceEinit6F_v_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cLFileMapInfoKinitialize6M_i_; +text: .text%__1cLFileMapInfoNfail_continue6MpkcE_v_; +text: .text%__1cLFileMapInfoFclose6M_v_; +text: .text%__1cIUniversePinitialize_heap6F_i_; +text: .text%__1cPMarkSweepPolicy2t6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: collectorPolicy.o; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cPMarkSweepPolicyWinitialize_generations6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyVnumber_of_generations6M_i_: collectorPolicy.o; +text: .text%__1cXPermanentGenerationSpec2t6MnHPermGenEName_IIIIII_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cSPerfStringConstant2t6MnJCounterNS_pkc3_v_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cQGenCollectedHeap2t6MpnPCollectorPolicy__v_; +text: .text%__1cKSharedHeap2t6MpnPCollectorPolicy__v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cMSubTasksDone2t6Mi_v_; +text: .text%__1cMSubTasksDoneFclear6M_v_; +text: .text%__1cMSubTasksDoneFvalid6M_i_; +text: .text%__1cQGenCollectedHeapKinitialize6M_i_; +text: .text%__1cPCollectorPolicyLgenerations6M_ppnOGenerationSpec__: collectorPolicy.o; +text: .text%__1cPCollectorPolicyUpermanent_generation6M_pnXPermanentGenerationSpec__: collectorPolicy.o; +text: .text%__1cXPermanentGenerationSpecFalign6MI_v_; +text: .text%__1cQGenCollectedHeapIallocate6MIpnXPermanentGenerationSpec_pIpipnNReservedSpace__pc_; +text: .text%__1cOGenerationSpecRn_covered_regions6kM_i_: collectorPolicy.o; +text: .text%__1cNReservedSpace2t6MIIipc_v_; +text: .text%__1cPCollectorPolicyOcreate_rem_set6MnJMemRegion_i_pnJGenRemSet__; +text: .text%__1cbCTwoGenerationCollectorPolicyQbarrier_set_name6M_nKBarrierSetEName__: collectorPolicy.o; +text: .text%__1cLCardTableRS2t6MnJMemRegion_i_v_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cKSharedHeapPset_barrier_set6MpnKBarrierSet__v_; +text: .text%__1cNReservedSpaceKfirst_part6MIii_0_; +text: .text%__1cNReservedSpace2t6MpcI_v_; +text: .text%__1cOGenerationSpecEinit6MnNReservedSpace_ipnJGenRemSet__pnKGeneration__; +text: .text%__1cQDefNewGeneration2t6MnNReservedSpace_Iipkc_v_; +text: .text%__1cKGeneration2t6MnNReservedSpace_Ii_v_; +text: .text%__1cIageTable2t6Mi_v_; +text: .text%__1cIageTableFclear6M_v_; +text: .text%__1cFArenaEgrow6MI_pv_; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cFChunkEchop6M_v_; +text: .text%__1cFChunk2k6Fpv_v_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cRCardTableModRefBSbCfind_covering_region_by_base6MpnIHeapWord__i_; +text: .text%__1cRCardTableModRefBSbAlargest_prev_committed_end6kMi_pnIHeapWord__; +text: .text%__1cWSequentialSubTasksDoneFclear6M_v_; +text: .text%__1cSGenerationCounters2t6MpkciipnMVirtualSpace__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cOCSpaceCounters2t6MpkciIpnPContiguousSpace_pnSGenerationCounters__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cZContiguousSpaceUsedHelperLtake_sample6M_x_: cSpaceCounters.o; +text: .text%__1cPContiguousSpaceEused6kM_I_: space.o; +text: .text%__1cQDefNewGenerationYcompute_space_boundaries6MI_v_; +text: .text%__1cQCompactibleSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cFSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cFSpaceKset_bottom6MpnIHeapWord__v_: space.o; +text: .text%__1cJEdenSpaceHset_end6MpnIHeapWord__v_: space.o; +text: .text%__1cJEdenSpaceFclear6M_v_; +text: .text%__1cPContiguousSpaceFclear6M_v_; +text: .text%__1cFSpaceFclear6M_v_; +text: .text%__1cFSpaceHset_end6MpnIHeapWord__v_: space.o; +text: .text%__1cQDefNewGenerationPupdate_counters6M_v_; +text: .text%__1cSGenerationCountersKupdate_all6M_v_: generationCounters.o; +text: .text%__1cNReservedSpaceJlast_part6MI_0_; +text: .text%__1cRTenuredGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_; +text: .text%__1cOCardGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_; +text: .text%__1cWBlockOffsetSharedArray2t6MnJMemRegion_I_v_; +text: .text%__1cWBlockOffsetSharedArrayGresize6MI_v_; +text: .text%__1cNReservedSpaceSpage_align_size_up6FI_I_; +text: .text%__1cLCardTableRSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cLCardTableRSKis_aligned6MpnIHeapWord__i_: cardTableRS.o; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cWOffsetTableContigSpace2t6MpnWBlockOffsetSharedArray_nJMemRegion__v_; +text: .text%__1cQBlockOffsetArray2t6MpnWBlockOffsetSharedArray_nJMemRegion_i_v_; +text: .text%__1cWOffsetTableContigSpaceKset_bottom6MpnIHeapWord__v_; +text: .text%__1cQBlockOffsetArrayGresize6MI_v_: blockOffsetTable.o; +text: .text%__1cWOffsetTableContigSpaceHset_end6MpnIHeapWord__v_; +text: .text%__1cWOffsetTableContigSpaceFclear6M_v_; +text: .text%__1cbBBlockOffsetArrayContigSpaceUinitialize_threshold6M_pnIHeapWord__; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cXPermanentGenerationSpecEinit6MnNReservedSpace_IpnJGenRemSet__pnHPermGen__; +text: .text%__1cRCompactingPermGen2t6MnNReservedSpace_1IpnJGenRemSet_pnXPermanentGenerationSpec__v_; +text: .text%__1cUCompactingPermGenGen2t6MnNReservedSpace_1IipnJGenRemSet_pnPContiguousSpace_pnXPermanentGenerationSpec__v_; +text: .text%__1cUCompactingPermGenGenbFinitialize_performance_counters6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationIcapacity6kM_I_; +text: .text%__1cPCollectorPolicybFis_concurrent_mark_sweep_policy6M_i_: collectorPolicy.o; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cPGlobalTLABStatsKinitialize6M_v_; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cXAdaptiveWeightedAverageYcompute_adaptive_average6Mff_f_; +text: .text%__1cQGenCollectedHeapNtlab_capacity6kM_I_; +text: .text%__1cQDefNewGenerationYsupports_tlab_allocation6kM_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationNtlab_capacity6kM_I_: defNewGeneration.o; +text: .text%__1cKGenerationYsupports_tlab_allocation6kM_i_: tenuredGeneration.o; +text: .text%__1cWThreadLocalAllocBufferImax_size6F_I_; +text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF_vc_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJOperation__v4_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJCondition__v4_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF3_v3_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cRInvocationCounterDdef6Fn0AFState_ipFnMmethodHandle_pnGThread__pC_v_; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cICodeHeapJexpand_by6MI_i_; +text: .text%__1cJStubQdDueueOregister_queue6Fp0_v_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cSInterpreterCodeletKinitialize6MpkcinJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorTgenerate_error_exit6Mpkc_pC_; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cJAssemblerEcall6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cJAssemblerDhlt6M_v_; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cJAssemblerDnop6M_v_; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cOMacroAssemblerSload_unsigned_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovzxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cOMacroAssemblerSload_unsigned_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovzxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_base6MnITosState_ppCi_v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_FPU6MinITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cJAssemblerDjmp6MnHAddress__v_; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cKEntryPoint2t6MpC11111111_v_; +text: .text%__1cJAssemblerEincl6MpnMRegisterImpl__v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cJAssemblerEcmpl6MnHAddress_i_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCi_v_; +text: .text%__1cOMacroAssemblerOcall_VM_helper6MpnMRegisterImpl_pCii_v_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_pCnJrelocInfoJrelocType__v_; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cOMacroAssemblerGc2bool6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsetb6Mn0AJCondition_pnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerQsign_extend_byte6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerRsign_extend_short6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_2_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEtemp6F_pnMRegisterImpl__; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cTAbstractInterpreterRTosState_as_index6FnITosState__i_; +text: .text%__1cTAbstractInterpreterMreturn_entry6FnITosState_i_pC_; +text: .text%__1cKEntryPointFentry6kMnITosState__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl_33_v_; +text: .text%__1cZInterpreterMacroAssemblerRremove_activation6MnITosState_pnMRegisterImpl_iii_v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerElock6M_v_; +text: .text%__1cJAssemblerHcmpxchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHAddress2t6MinJrelocInfoJrelocType__v_; +text: .text%__1cOMacroAssemblerFleave6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cUInterpreterGeneratorXcheck_for_compiled_code6MrnFLabel__v_; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6M_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_x86.o; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_; +text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_2pC22_v_; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_; +text: .text%__1cLlog2_intptr6Fi_i_: interpreter_x86.o; +text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUInterpreterGeneratorXgenerate_abstract_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorTgenerate_math_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cOMacroAssemblerGsincos6Miii_v_; +text: .text%__1cJAssemblerFfld_s6Mi_v_; +text: .text%__1cJAssemblerEfabs6M_v_; +text: .text%__1cOMacroAssemblerEfcmp6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerHfucomip6Mi_v_; +text: .text%__1cOMacroAssemblerEfpop6M_v_; +text: .text%__1cJAssemblerHfincstp6M_v_; +text: .text%__1cJAssemblerEfsin6M_v_; +text: .text%__1cJAssemblerEfcos6M_v_; +text: .text%__1cJAssemblerFfsqrt6M_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cJAssemblerGmembar6M_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_i_v_; +text: .text%__1cJAssemblerSemit_arith_operand6MipnMRegisterImpl_nHAddress_i_v_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MnITosState__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cJAssemblerEfldz6M_v_; +text: .text%__1cJAssemblerEfld16M_v_; +text: .text%__1cJAssemblerFfaddp6Mi_v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cJAssemblerFbswap6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cJAssemblerEmovb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%__1cJAssemblerFfld_s6MnHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerbGget_unsigned_2_byte_index_at_bcp6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cJAssemblerEcmpb6MnHAddress_i_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorUset_wide_entry_point6MpnITemplate_rpC_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableRlocals_index_wide6FpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableLindex_check6FpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cJAssemblerKrepne_scan6M_v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerSstore_check_part_16MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerSstore_check_part_26MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEmovb6MnHAddress_i_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cJAssemblerEmovb6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cJAssemblerEmovw6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cJAssemblerFpushl6MnHAddress__v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cJAssemblerGfadd_s6MnHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerGf2ieee6M_v_; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cJAssemblerGfadd_d6MnHAddress__v_; +text: .text%__1cZInterpreterMacroAssemblerGd2ieee6M_v_; +text: .text%__1cJAssemblerEsbbl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerHfsubr_s6MnHAddress__v_; +text: .text%__1cJAssemblerHfsubr_d6MnHAddress__v_; +text: .text%__1cJAssemblerFimull6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cOMacroAssemblerElmul6Mii_v_; +text: .text%__1cJAssemblerEmull6MnHAddress__v_; +text: .text%__1cJAssemblerEmull6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerGfmul_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfmul_d6MnHAddress__v_; +text: .text%__1cJAssemblerFfld_x6MnHAddress__v_; +text: .text%__1cJAssemblerFfmulp6Mi_v_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cOMacroAssemblerPcorrected_idivl6MpnMRegisterImpl__i_; +text: .text%__1cJAssemblerEcdql6M_v_; +text: .text%__1cJAssemblerFidivl6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cZInterpreterMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cJAssemblerHfdivr_s6MnHAddress__v_; +text: .text%__1cJAssemblerHfdivr_d6MnHAddress__v_; +text: .text%__1cJAssemblerGfdivrp6Mi_v_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cOMacroAssemblerFfremr6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerIsave_eax6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerFfprem6M_v_; +text: .text%__1cJAssemblerJfnstsw_ax6M_v_; +text: .text%__1cJAssemblerEsahf6M_v_; +text: .text%__1cOMacroAssemblerLrestore_eax6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEfxch6Mi_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cJAssemblerEfchs6M_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerFshldl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_2i_v_; +text: .text%__1cJAssemblerFshrdl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cOMacroAssemblerLextend_sign6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGfild_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfild_d6MnHAddress__v_; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cOMacroAssemblerIlcmp2int6MpnMRegisterImpl_222_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_x86.o; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerYprofile_not_taken_branch6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cZInterpreterMacroAssemblerWdispatch_only_noverify6MnITosState__v_; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_22_v_; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerHfistp_d6MnHAddress__v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNTemplateTableQvolatile_barrier6F_v_; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableOprepare_invoke6FpnMRegisterImpl_2inJBytecodesECode__v_; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cNTemplateTableUinvokevirtual_helper6FpnMRegisterImpl_22_v_; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_22_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cQGenCollectedHeapItop_addr6kM_ppnIHeapWord__; +text: .text%__1cQDefNewGenerationItop_addr6kM_ppnIHeapWord__; +text: .text%__1cQGenCollectedHeapIend_addr6kM_ppnIHeapWord__; +text: .text%__1cQDefNewGenerationIend_addr6kM_ppnIHeapWord__; +text: .text%__1cOMacroAssemblerJdecrement6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cZInterpreterMacroAssemblerUdispatch_only_normal6MnITosState__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbCset_safepoints_for_all_bytes6M_v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cIUniverseYcompute_base_vtable_size6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o; +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%__1cKSharedHeapWpermanent_mem_allocate6MI_pnIHeapWord__: genCollectedHeap.o; +text: .text%__1cRCompactingPermGenMmem_allocate6MI_pnIHeapWord__; +text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: compactingPermGenGen.o; +text: .text%__1cWOffsetTableContigSpaceIallocate6MI_pnIHeapWord__: space.o; +text: .text%__1cPContiguousSpaceIallocate6MI_pnIHeapWord__; +text: .text%__1cbBBlockOffsetArrayContigSpaceLalloc_block6MpnIHeapWord_2_v_: blockOffsetTable.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: klass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableModRefBS.o; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o; +text: .text%__1cbBBlockOffsetArrayContigSpaceQalloc_block_work6MpnIHeapWord_2_v_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cJHashtableLhash_symbol6Fpkci_I_: symbolTable.o; +text: .text%__1cLSymbolTableGlookup6MipkciI_pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: symbolKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: symbolKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: symbolKlass.o; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassNexternal_name6FnJBasicType__pkc_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cLlog2_intptr6Fi_i_: typeArrayKlass.o; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cNsymbolOopDescLas_C_string6kMpci_1_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%__1cLlog2_intptr6Fi_i_: objArrayKlassKlass.o; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: typeArrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: typeArrayKlass.o; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: oopFactory.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: oopFactory.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: oopFactory.o; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cHoopDescSslow_identity_hash6M_i_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_; +text: .text%__1cNget_next_hash6F_i_: synchronizer.o; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKDictionaryJget_entry6MiInMsymbolHandle_nGHandle__pnPDictionaryEntry__; +text: .text%__1cQSystemDictionarybAcompute_loader_lock_object6FnGHandle_pnGThread__1_; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cQSystemDictionaryKfind_class6FiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableJnew_entry6MipnNsymbolOopDesc_pnHoopDesc__pnQPlaceholderEntry__; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRfind_shared_class6FnMsymbolHandle__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cMstringStream2t6MI_v_; +text: .text%__1cMstringStreamFwrite6MpkcI_v_; +text: .text%__1cMoutputStreamPupdate_position6MpkcI_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cPClassFileParserOcheck_property6MipkcipnGThread__v_; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: constantPoolKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constantPoolKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constantPoolKlass.o; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cPClassFileParserbFparse_constant_pool_class_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbJparse_constant_pool_methodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbGparse_constant_pool_string_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbHparse_constant_pool_integer_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cPClassFileParserbLparse_constant_pool_nameandtype_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cUinitialize_hashtable6FppnLNameSigHash__v_; +text: .text%__1cPclear_hashtable6FppnLNameSigHash__v_; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: methodOop.o; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cKoopFactoryPnew_constMethod6FiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: constMethodKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constMethodKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constMethodKlass.o; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: methodKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: methodKlass.o; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cRInvocationCounterJset_state6Mn0AFState__v_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cRSignatureIteratorGexpect6Mc_v_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cSconstMethodOopDescbEchecked_exceptions_length_addr6kM_pH_; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cXcopy_u2_with_conversion6FpH0i_v_: classFileParser.o; +text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o; +text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%method_compare: methodOop.o; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cLklassVtableQget_num_mirandas6FpnMklassOopDesc_pnPobjArrayOopDesc_4_i_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cNinstanceKlassbBdo_local_static_fields_impl6FnTinstanceKlassHandle_pFpnPfieldDescriptor_pnGThread__v5_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cPClassFileParserYcheck_super_class_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cTClassLoadingServiceScompute_class_size6FpnNinstanceKlass__I_; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cLClassLoaderOlookup_package6Fpkc_pnLPackageInfo__; +text: .text%__1cQPackageHashtableMcompute_hash6Mpkci_I_: classLoader.o; +text: .text%__1cQPackageHashtableJget_entry6MiIpkcI_pnLPackageInfo__: classLoader.o; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cVLoaderConstraintTableWfind_loader_constraint6MnMsymbolHandle_nGHandle__ppnVLoaderConstraintEntry__; +text: .text%__1cQSystemDictionaryQadd_to_hierarchy6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cQSystemDictionaryRupdate_dictionary6FiIiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryQfind_placeholder6FiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: dictionary.o; +text: .text%__1cKDictionaryJnew_entry6MIpnMklassOopDesc_pnHoopDesc__pnPDictionaryEntry__; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cPClassFileParserbIparse_constant_pool_fieldref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbSparse_constant_pool_interfacemethodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_long_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cJchar2type6Fc_nJBasicType__: fieldType.o; +text: .text%__1cRSignatureIteratorSskip_optional_size6M_v_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o; +text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cLklassVtableKis_miranda6FpnNmethodOopDesc_pnPobjArrayOopDesc_pnMklassOopDesc__i_; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cPClassFileParserXjava_lang_Class_fix_pre6MpnOobjArrayHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserYjava_lang_Class_fix_post6Mpi_v_; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cKSharedHeapYpermanent_object_iterate6MpnNObjectClosure__v_: genCollectedHeap.o; +text: .text%__1cHPermGenOobject_iterate6MpnNObjectClosure__v_: permGen.o; +text: .text%__1cRCompactingPermGenGas_gen6kM_pnKGeneration__: permGen.o; +text: .text%__1cbCOneContigSpaceCardGenerationOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cPContiguousSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cPContiguousSpaceTobject_iterate_from6MnJWaterMark_pnNObjectClosure__v_; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: instanceKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: instanceKlass.o; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cPClassFileParserbFparse_constant_pool_float_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o; +text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cPClassFileParserbGparse_constant_pool_double_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cSReferenceProcessorMinit_statics6F_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cKJNIHandlesKinitialize6F_v_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cQVerificationTypeKinitialize6F_v_; +text: .text%__1cOcompiler1_init6F_v_; +text: .text%__1cKSharedInfoKset_stack06Fi_v_; +text: .text%__1cKSharedInfoLset_regName6F_v_; +text: .text%__1cIRegAllocYinit_register_allocation6F_v_; +text: .text%__1cIFrameMapEinit6F_v_; +text: .text%__1cKc1_RegMaskKinit_masks6Fi_v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_x86.o; +text: .text%__1cNc1_AllocTableLinit_tables6F_v_; +text: .text%__1cIFrameMapOfirst_register6F_pnMRegisterImpl__; +text: .text%__1cIFrameMapLcpu_reg2rnr6FpnMRegisterImpl__i_; +text: .text%__1cIFrameMapLcpu_rnr2reg6Fi_pnMRegisterImpl__; +text: .text%__1cIRuntime1Kinitialize6F_v_; +text: .text%__1cKCodeBufferRinsts_memory_size6Fi_i_; +text: .text%__1cKCodeBufferQlocs_memory_size6Fi_i_; +text: .text%__1cIRuntime1Ninitialize_pd6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_; +text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_x86.o; +text: .text%__1cJAssemblerGfldenv6MnHAddress__v_; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_; +text: .text%__1cGOopMapbEmap_compiler_reg_to_oopmap_reg6MnHOptoRegEName_ii_nFVMRegEName__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: oopMap.o; +text: .text%__1cIFrameMapRfpu_stack_regname6Fi_nHOptoRegEName__; +text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_22pC_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEdecl6MnHAddress__v_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cSDeoptimizationBlob2n6FII_pv_; +text: .text%__1cSDeoptimizationBlob2t6MpnKCodeBuffer_ipnJOopMapSet_iiii_v_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_; +text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: codeBlob.o; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cKNativeCallLdestination6kM_pC_; +text: .text%__1cOCallRelocationPset_destination6MpCi_v_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_; +text: .text%__1cRNativeInstructionFwrote6Mi_v_; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cGOopMapJheap_size6kM_i_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cGOopMapHcopy_to6MpC_v_; +text: .text%__1cIRuntime1Rgenerate_blob_for6Fn0AGStubID__v_; +text: .text%__1cIRuntime1Pnew_code_buffer6F_pnKCodeBuffer__; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cNStubAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cIRuntime1Rgenerate_code_for6Fn0AGStubID_pnNStubAssembler_pi_pnJOopMapSet__; +text: .text%__1cIRuntime1Iname_for6Fn0AGStubID__pkc_; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cLRuntimeStub2n6FII_pv_; +text: .text%__1cLRuntimeStub2t6MpkcpnKCodeBuffer_iipnJOopMapSet_i_v_; +text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkci_v_; +text: .text%__1cNStubAssemblerIset_info6Mpkci_v_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC2_i_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pCi_i_; +text: .text%__1cJStubFrame2T6M_v_; +text: .text%__1cIRuntime1Ygenerate_exception_throw6FpnNStubAssembler_pCpnMRegisterImpl__pnJOopMapSet__; +text: .text%__1cOMacroAssemblerLtlab_refill6MrnFLabel_22_v_; +text: .text%__1cLlog2_intptr6Fi_i_: assembler_x86.o; +text: .text%__1cOMacroAssemblerNeden_allocate6MpnMRegisterImpl_2i2rnFLabel__v_; +text: .text%__1cOMacroAssemblerLverify_tlab6M_v_; +text: .text%__1cLlog2_intptr6Fi_i_: c1_Runtime1_x86.o; +text: .text%__1cOMacroAssemblerNtlab_allocate6MpnMRegisterImpl_2i22rnFLabel__v_; +text: .text%__1cRC1_MacroAssemblerRinitialize_object6MpnMRegisterImpl_22i22_v_; +text: .text%__1cRC1_MacroAssemblerRinitialize_header6MpnMRegisterImpl_22_v_; +text: .text%__1cRC1_MacroAssemblerPinitialize_body6MpnMRegisterImpl_2i2_v_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC22_i_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC222_i_; +text: .text%__1cIRuntime1Iblob_for6Fn0AGStubID__pnICodeBlob__; +text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkcipnMRegisterImpl_6_v_; +text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkcipnMRegisterImpl__v_; +text: .text%__1cIiEntries2t6Miiii_v_; +text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_; +text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_; +text: .text%__1cIRuntime1Rgenerate_patching6FpnNStubAssembler_pC_pnJOopMapSet__; +text: .text%__1cWrestore_live_registers6FpnOMacroAssembler__v_: c1_Runtime1_x86.o; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNSafepointBlob2n6FII_pv_; +text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cJAssemblerFfldcw6MnHAddress__v_; +text: .text%__1cJAssemblerGfnstcw6MnHAddress__v_; +text: .text%__1cJAssemblerHfcomp_d6MnHAddress__v_; +text: .text%__1cIiEntriesIset_base6MpC_v_; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cLVtableStubsKinitialize6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cRInlineCacheBufferKinitialize6F_v_; +text: .text%__1cRInlineCacheBufferOinit_next_stub6F_v_; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cOCompilerOraclePparse_from_file6F_v_; +text: .text%__1cHcc_file6F_pkc_: compilerOracle.o; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cSOnStackReplacementKinitialize6F_v_; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cLklassVtableVinitialize_from_super6MnLKlassHandle__i_; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cLklassVtableNput_method_at6MpnNmethodOopDesc_i_v_; +text: .text%__1cLklassVtableQfill_in_mirandas6Mri_v_; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cLklassVtableOcopy_vtable_to6MpnLvtableEntry__v_; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cIUniverseUreinitialize_itables6F_v_; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cRitableMethodEntryKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: instanceKlass.o; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapVunsafe_max_tlab_alloc6kM_I_; +text: .text%__1cQDefNewGenerationVunsafe_max_tlab_alloc6kM_I_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationVunsafe_max_alloc_nogc6kM_I_; +text: .text%__1cPContiguousSpaceEfree6kM_I_: space.o; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cWThreadLocalAllocBufferFreset6M_v_; +text: .text%__1cQGenCollectedHeapRallocate_new_tlab6MI_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapMmem_allocate6MIii_pnIHeapWord__; +text: .text%__1cbCTwoGenerationCollectorPolicyRmem_allocate_work6MIii_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapEheap6F_p0_; +text: .text%__1cQDefNewGenerationPshould_allocate6MIii_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationMpar_allocate6MIii_pnIHeapWord__: defNewGeneration.o; +text: .text%__1cJEdenSpaceMpar_allocate6MI_pnIHeapWord__; +text: .text%__1cPContiguousSpaceRpar_allocate_impl6MIkpnIHeapWord__2_: space.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: collectedHeap.o; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQGenCollectedHeapVlarge_typearray_limit6M_I_; +text: .text%__1cbCTwoGenerationCollectorPolicyYis_two_generation_policy6M_i_: collectorPolicy.o; +text: .text%__1cbCTwoGenerationCollectorPolicyVlarge_typearray_limit6M_I_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassLverify_code6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIRewriterScompute_index_maps6FnSconstantPoolHandle_rpnIintArray_rpnIintStack__v_; +text: .text%__1cIintArray2t6Mki1_v_: rewriter.o; +text: .text%__1cIRewriterXnew_constant_pool_cache6FrnIintArray_pnGThread__nXconstantPoolCacheHandle__; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: cpCacheKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: cpCacheKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: cpCacheKlass.o; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cWConstantPoolCacheEntryRset_initial_state6Mi_v_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cIRuntime1Mientries_for6FnMmethodHandle__pnIiEntries__; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cNSharedRuntimeUlookup_function_DD_D6FrpFpnHJNIEnv__pnH_jclass_dd_dpkc_v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cMNativeLookupNpure_jni_name6FnMmethodHandle__pc_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cMoutputStreamMdo_vsnprintf6FpcIpkcpvirI_3_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc__v_: nativeLookup.o; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc_ii_v_: nativeLookup.o; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cMNativeLookupMlookup_style6FnMmethodHandle_pcpkciiripnGThread__pC_; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cVlookup_special_native6Fpc_pC_: nativeLookup.o; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cQGenCollectedHeapIcapacity6kM_I_; +text: .text%__1cQDefNewGenerationIcapacity6kM_I_; +text: .text%__1cQGenCollectedHeapEused6kM_I_; +text: .text%__1cQDefNewGenerationEused6kM_I_; +text: .text%__1cbCOneContigSpaceCardGenerationEused6kM_I_; +text: .text%__1cQGenCollectedHeapPpost_initialize6M_v_; +text: .text%__1cQGenCollectedHeapTref_processing_init6M_v_; +text: .text%__1cKSharedHeapTref_processing_init6M_v_; +text: .text%__1cKGenerationSref_processor_init6M_v_; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: compactingPermGenGen.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: compactingPermGenGen.o; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: defNewGeneration.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: defNewGeneration.o; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: tenuredGeneration.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: tenuredGeneration.o; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cQGenCollectedHeapEkind6M_nNCollectedHeapEName__: genCollectedHeap.o; +text: .text%__1cNMemoryServicebBadd_gen_collected_heap_info6FpnQGenCollectedHeap__v_; +text: .text%__1cPMarkSweepPolicyUis_mark_sweep_policy6M_i_: collectorPolicy.o; +text: .text%__1cNMemoryManagerXget_copy_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cNMemoryManagerWget_msc_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryServicebAadd_generation_memory_pool6FpnKGeneration_pnNMemoryManager_4_v_; +text: .text%__1cQDefNewGenerationEkind6M_nKGenerationEName__: defNewGeneration.o; +text: .text%__1cNMemoryServiceJadd_space6FpnPContiguousSpace_pkciIi_pnKMemoryPool__; +text: .text%__1cTContiguousSpacePool2t6MpnPContiguousSpace_pkcnKMemoryPoolIPoolType_Ii_v_; +text: .text%__1cNMemoryServiceTadd_survivor_spaces6FpnQDefNewGeneration_pkciIi_pnKMemoryPool__; +text: .text%__1cbBSurvivorContiguousSpacePool2t6MpnQDefNewGeneration_pkcnKMemoryPoolIPoolType_Ii_v_; +text: .text%__1cRTenuredGenerationEkind6M_nKGenerationEName__: tenuredGeneration.o; +text: .text%__1cNMemoryServiceHadd_gen6FpnKGeneration_pkcii_pnKMemoryPool__; +text: .text%__1cOGenerationPool2t6MpnKGeneration_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cKGenerationMmax_capacity6kM_I_; +text: .text%__1cNMemoryServicebGadd_compact_perm_gen_memory_pool6FpnUCompactingPermGenGen_pnNMemoryManager__v_; +text: .text%__1cQGenCollectedHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cKGCStatInfo2t6Mi_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%__1cLJavaClassesPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_SystemPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cbIjava_security_AccessControlContextPcompute_offsets6F_v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectPcompute_offsets6F_v_; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cPjava_nio_BufferPcompute_offsets6F_v_; +text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_; +text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cMStubRoutinesLinitialize26F_v_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_x86.o; +text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_x86.o; +text: .text%__1cJAssemblerEincl6MnHAddress__v_; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cIVMThread2t6M_v_; +text: .text%__1cQVMOperationQdDueue2t6M_v_; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%_start: os_solaris.o; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cVscale_to_lwp_priority6Fiii_i_: os_solaris.o; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cIVMThreadEloop6M_v_; +text: .text%__1cQVMOperationQdDueueLremove_next6M_pnMVM_Operation__; +text: .text%__1cQVMOperationQdDueueLqueue_empty6Mi_i_; +text: .text%__1cQVMOperationQdDueueSqueue_remove_front6Mi_pnMVM_Operation__; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cQinitialize_class6FnMsymbolHandle_pnGThread__v_: thread.o; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWcall_class_initializer6MpnGThread__v_; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassRclass_initializer6M_pnNmethodOopDesc__; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cRCompilationPolicyNcanBeCompiled6FnMmethodHandle__i_; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cRruntime_type_from6FpnJJavaValue__nJBasicType__: javaCalls.o; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cPJavaCallWrapper2t6MnMmethodHandle_nGHandle_pnJJavaValue_pnGThread__v_; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cRJavaCallArgumentsKparameters6M_pi_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cMLinkResolverUresolve_invokestatic6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverNresolve_klass6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_at_put6MipnMklassOopDesc__v_: constantPoolOop.o; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cMLinkResolverbElinktime_resolve_static_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYlookup_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cICallInfoDset6MnLKlassHandle_nMmethodHandle_pnGThread__v_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2ipnGThread__v_; +text: .text%__1cSInterpreterRuntimeLcache_entry6FpnKJavaThread__pnWConstantPoolCacheEntry__: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cRSignatureIteratorTcheck_signature_end6M_v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: cpCacheOop.o; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cWConstantPoolCacheEntryIas_flags6MnITosState_iiiii_i_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_16MnJBytecodesECode__v_; +text: .text%__1cWConstantPoolCacheEntryGverify6kMpnMoutputStream__v_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cXSignatureHandlerLibraryKinitialize6F_v_; +text: .text%__1cXSignatureHandlerLibraryQset_handler_blob6F_pC_; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cNFingerprinterLfingerprint6M_X_: interpreterRuntime.o; +text: .text%__1cNGrowableArray4CX_Efind6kMkX_i_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_; +text: .text%__1cXSignatureHandlerLibraryLset_handler6FpnKCodeBuffer__pC_; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cPregister_native6FnLKlassHandle_nMsymbolHandle_1pCpnGThread__i_: jni.o; +text: .text%__1cPJavaCallWrapper2T6M_v_; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cNinstanceKlassbJset_initialization_state_and_notify6Mn0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassbOset_initialization_state_and_notify_impl6FnTinstanceKlassHandle_n0AKClassState_pnGThread__v_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cPFieldAccessInfoDset6MnLKlassHandle_nMsymbolHandle_iinJBasicType_nLAccessFlags__v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_26MnJBytecodesECode__v_; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cMLinkResolverVresolve_invokespecial6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cLStringTableGintern6FnGHandle_pHipnGThread__pnHoopDesc__; +text: .text%__1cLStringTableLhash_string6FpHi_i_; +text: .text%__1cLStringTableGlookup6MipHiI_pnHoopDesc__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringMbasic_create6FpnQtypeArrayOopDesc_ipnGThread__nGHandle__; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_virtual_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cMNativeLookupNlong_jni_name6FnMmethodHandle__pc_; +text: .text%__1cNFingerprinterJdo_object6Mii_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorDbox6Mii_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEfrom6F_pnMRegisterImpl__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorCto6F_pnMRegisterImpl__; +text: .text%JVM_DoPrivileged; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cLRegisterMapFclear6Mpi_v_; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cSvframeStreamCommonbBfill_from_interpreter_frame6M_v_; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%__1cNSharedRuntimeDf2i6Ff_i_; +text: .text%jni_FindClass: jni.o; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cKutf8_write6FpCH_0_: utf8.o; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%jni_ReleaseStringUTFChars; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%JVM_CurrentThread; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNFingerprinterGdo_int6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEmove6Mii_v_; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cTJvmtiEventCollectorYunset_jvmti_thread_state6M_v_; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%JVM_SetThreadPriority; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%JVM_StartThread; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cKJavaThreadRthread_main_inner6M_v_; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cNFingerprinterHdo_long6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_: interpreterRuntime.o; +text: .text%JVM_MonitorWait; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cCosHSolarisFEventEpark6M_v_: objectMonitor_solaris.o; +text: .text%jni_GetObjectClass: jni.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cMjniIdPrivateGid_for6FnTinstanceKlassHandle_i_i_: jniId.o; +text: .text%__1cIjniIdMapGcreate6FnTinstanceKlassHandle__p0_; +text: .text%__1cIjniIdMapRcompute_index_cnt6FnTinstanceKlassHandle__i_; +text: .text%__1cIjniIdMap2t6MpnMklassOopDesc_i_v_; +text: .text%__1cLjniIdBucket2t6MpnIjniIdMap_p0_v_; +text: .text%jni_NewStringUTF: jni.o; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cbAjni_check_async_exceptions6FpnKJavaThread__v_: jni.o; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cOJNIHandleBlockRrebuild_free_list6M_v_; +text: .text%jni_EnsureLocalCapacity; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cRjni_invoke_static6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%jni_NewString: jni.o; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%JVM_InitProperties; +text: .text%__1cMset_property6FnGHandle_pkc2pnGThread__v_: jvm.o; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: sharedHeap.o; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassSregister_finalizer6FpnPinstanceOopDesc_pnGThread__2_; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cJFieldTypeSskip_optional_size6FpnNsymbolOopDesc_pi_v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%JVM_IsArrayClass; +text: .text%JVM_GetComponentType; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cKReflectionbFbasic_type_arrayklass_to_mirror6FpnMklassOopDesc_pnGThread__pnHoopDesc__; +text: .text%JVM_IsPrimitiveClass; +text: .text%JVM_GetClassLoader; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cOJavaAssertionsLmatch_class6Fpkc_pn0AKOptionList__: javaAssertions.o; +text: .text%__1cOJavaAssertionsNmatch_package6Fpkc_pn0AKOptionList__; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%JVM_InternString; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%__1cKSharedHeapPis_in_permanent6kMpkv_i_: genCollectedHeap.o; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%JVM_NanoTime; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%JVM_GetCallerClass; +text: .text%JVM_SupportsCX8; +text: .text%__1cNFingerprinterHdo_bool6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRuntime.o; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverWresolve_interface_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverbHlinktime_resolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2pnGThread__v_; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cRfind_field_offset6FpnI_jobject_ipnGThread__i_; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%JVM_IHashCode; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cNinstanceKlassPjni_id_for_impl6FnTinstanceKlassHandle_i_pnFJNIid__; +text: .text%__1cFJNIid2t6MpnMklassOopDesc_ip0_v_; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: arrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: arrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: arrayKlass.o; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%JVM_IsInterface; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cKReflectionTget_exception_types6FnMmethodHandle_pnGThread__nOobjArrayHandle__; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%JVM_Clone; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: jvm.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: jvm.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: jvm.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: jvm.o; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%JVM_GetClassAccessFlags; +text: .text%JVM_GetClassName; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%JVM_GetClassModifiers; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cNFingerprinterIdo_array6Mii_v_: dump.o; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cKReflectionDbox6FpnGjvalue_nJBasicType_pnGThread__pnHoopDesc__; +text: .text%JVM_MaxMemory; +text: .text%__1cQGenCollectedHeapMmax_capacity6kM_I_; +text: .text%__1cQDefNewGenerationMmax_capacity6kM_I_; +text: .text%Unsafe_AllocateMemory; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%Unsafe_GetNativeByte; +text: .text%Unsafe_FreeMemory; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%__1cFJNIidEfind6Mi_p0_; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cMalloc_object6FpnH_jclass_pnGThread__pnPinstanceOopDesc__: jni.o; +text: .text%jni_GetStringRegion: jni.o; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%jni_GetObjectField: jni.o; +text: .text%jni_GetStringCritical: jni.o; +text: .text%__1cJGC_lockerNlock_critical6FpnKJavaThread__v_: jni.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%JVM_LoadLibrary; +text: .text%JVM_FindLibraryEntry; +text: .text%jni_GetJavaVM; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%jni_SetIntField: jni.o; +text: .text%jni_SetLongField: jni.o; +text: .text%JVM_FindSignal; +text: .text%JVM_RegisterSignal; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cJAssemblerFtestb6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerMemit_arith_b6MiipnMRegisterImpl_i_v_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cJAssemblerFfst_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfstp_d6Mi_v_; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: objArrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: objArrayKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: objArrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: objArrayKlass.o; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_; +text: .text%__1cQSimpleCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cQSimpleCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cMPeriodicTask2t6MI_v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cICompiler2t6M_v_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cICompilerOneeds_adapters6M_i_: c1_Compiler.o; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cICompilerKinitialize6M_v_; +text: .text%__1cMCompileQdDueueDget6M_pnLCompileTask__; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cLStatSamplerKinitialize6F_v_; +text: .text%__1cLStatSamplerUcreate_misc_perfdata6F_v_; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerTget_system_property6FpkcpnGThread__2_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cLStatSamplerXcreate_sampled_perfdata6F_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cMPerfDataListFclone6M_p0_; +text: .text%__1cMPerfDataList2t6Mp0_v_; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cNWatcherThreadFstart6F_v_; +text: .text%__1cNWatcherThread2t6M_v_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cMPeriodicTaskMtime_to_wait6F_I_: thread.o; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cNgetTimeMillis6F_x_; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%JVM_FindLoadedClass; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%jni_NewByteArray: jni.o; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorUhas_pending_requests6F_i_; +text: .text%__1cVjava_lang_ClassLoaderGparent6FpnHoopDesc__2_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcinMsymbolHandle_4nGHandle_6_v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinMsymbolHandle_4_i_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cTjava_lang_ThrowableNset_backtrace6FpnHoopDesc_2_v_; +text: .text%__1cTjava_lang_ThrowableQclear_stacktrace6FpnHoopDesc__v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cKExceptionsG_throw6FpnGThread_pkcinGHandle__v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinGHandle__i_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cNSharedRuntimebOraw_exception_handler_for_return_address6FpC_1_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cSInterpreterRuntimePset_bcp_and_mdp6FpCpnKJavaThread__v_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cNCompileBrokerXcompilation_is_in_queue6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTis_not_compile_only6FnMmethodHandle__i_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cNCompileBrokerRassign_compile_id6FnMmethodHandle_i_I_; +text: .text%__1cNCompileBrokerTis_compile_blocking6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTcreate_compile_task6FpnMCompileQdDueue_inMmethodHandle_i3ipkcii_pnLCompileTask__; +text: .text%__1cNCompileBrokerNallocate_task6F_pnLCompileTask__; +text: .text%__1cLCompileTaskKinitialize6MinMmethodHandle_i1ipkcii_v_; +text: .text%__1cMCompileQdDueueDadd6MpnLCompileTask__v_; +text: .text%__1cNCompileBrokerTwait_for_completion6FpnLCompileTask__pnHnmethod__; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cSCompileTaskWrapper2t6MpnLCompileTask__v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cNCompileBrokerVpush_jni_handle_block6F_v_; +text: .text%__1cNCompileBrokerOcheck_break_at6FnMmethodHandle_iii_i_; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cPciObjectFactoryNinit_ident_of6MpnIciObject__v_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cIciSymbolJmake_impl6Fpkc_p0_; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cICompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cLCompilation2t6MpnQAbstractCompiler_pnFciEnv_pnIciMethod_ipnRC1_MacroAssembler__v_; +text: .text%__1cTExceptionRangeTable2t6Mi_v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cLCompilationOcompile_method6M_v_; +text: .text%__1cLCompilationKinitialize6M_v_; +text: .text%__1cLCompilationEcode6kM_pnKCodeBuffer__; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cLCompilationTdebug_info_recorder6kM_pnYDebugInformationRecorder__; +text: .text%__1cLCompilationbBis_optimized_library_method6kM_i_; +text: .text%__1cLCompilationTcompile_java_method6MpnLCodeOffsets__i_; +text: .text%__1cLCompilationTinitialize_oop_maps6M_v_; +text: .text%__1cIciMethodMall_oop_maps6M_pnKciLocalMap__; +text: .text%__1cSciGenerateLocalMap2t6MpnFArena_nMmethodHandle__v_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cSciGenerateLocalMapWfind_jsr_return_points6MnMmethodHandle__v_; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: ciOopMap.o; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapOset_bbmark_bit6Mi_v_; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cSciGenerateLocalMapRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cSciGenerateLocalMapUbytecode_is_gc_point6FnJBytecodesECode_ii_i_; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapSget_basic_block_at6kMi_pnKBasicBlock__; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapNrestore_state6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cOGenerateOopMapCpp6MpnNCellTypeState_2_v_; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapFppop16MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cOGenerateOopMapKcheck_type6MnNCellTypeState_1_v_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapGppush16MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cOGenerateOopMapXdo_return_monitor_check6M_v_; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cSciGenerateLocalMapOreport_results6kM_i_: ciOopMap.o; +text: .text%__1cOGenerateOopMapNreport_result6M_v_; +text: .text%__1cSciGenerateLocalMapUfill_stackmap_prolog6Mi_v_; +text: .text%__1cSciGenerateLocalMapZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cKciLocalMap2t6MpnFArena_iii_v_; +text: .text%__1cKciLocalMapRset_bci_for_index6Mii_v_; +text: .text%__1cSciGenerateLocalMapUfill_stackmap_epilog6M_v_: ciOopMap.o; +text: .text%__1cSciGenerateLocalMapOfill_init_vars6MpnNGrowableArray4Ci___v_; +text: .text%__1cKciLocalMapSset_nof_initialize6Mi_v_; +text: .text%__1cLCompilationJbuild_hir6M_v_; +text: .text%__1cCIR2t6MpnLCompilation_pnIciMethod_i_v_; +text: .text%__1cJValueTypeKinitialize6F_v_; +text: .text%__1cMciNullObjectEmake6F_p0_; +text: .text%__1cMGraphBuilderKinitialize6F_v_; +text: .text%__1cHIRScope2t6MpnLCompilation_p0ipnIciMethod_ii_v_; +text: .text%__1cOLocalSlotArray2t6MkikpnJLocalSlot__v_: c1_IR.o; +text: .text%__1cGBitMap2t6MI_v_; +text: .text%__1cGBitMapGresize6MI_v_; +text: .text%__1cNWordSizeArray2t6Mki1_v_: c1_IR.o; +text: .text%__1cJXHandlers2t6MpnIciMethod__v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cHIRScopeLbuild_graph6MpnLCompilation_i_pnKBlockBegin__; +text: .text%__1cQBlockListBuilder2t6MpnHIRScope_ii_v_; +text: .text%__1cPBlockBeginArray2t6MkikpnKBlockBegin__v_: c1_GraphBuilder.o; +text: .text%__1cQBlockListBuilderLset_leaders6M_v_; +text: .text%__1cQciBytecodeStream2t6MpnIciMethod__v_; +text: .text%__1cQciBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cQBlockListBuilderMnew_block_at6MinKBlockBeginEFlag__p1_; +text: .text%__1cQBlockListBuilderUset_xhandler_entries6M_v_; +text: .text%__1cKValueStack2t6MpnHIRScope_ii_v_; +text: .text%__1cKValueArray2t6MkikpnLInstruction__v_: c1_ValueStack.o; +text: .text%__1cJLocalSlot2t6M_v_; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_IR.o; +text: .text%__1cKObjectTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cMGraphBuilder2t6MpnLCompilation_pnHIRScope_pnJBlockList_pnKBlockBegin__v_; +text: .text%__1cMGraphBuilderPpush_root_scope6MpnHIRScope_pnJBlockList_pnKBlockBegin__v_; +text: .text%__1cMGraphBuilderJScopeData2t6Mp1i_v_; +text: .text%__1cMGraphBuilderJScopeDataJset_scope6MpnHIRScope__v_; +text: .text%__1cMGraphBuilderUpush_exception_scope6M_v_; +text: .text%__1cOExceptionScope2t6M_v_; +text: .text%__1cOExceptionScopeEinit6M_v_; +text: .text%__1cIValueMap2t6M_v_; +text: .text%__1cMGraphBuilderJScopeDataQadd_to_work_list6MpnKBlockBegin__v_; +text: .text%__1cNResourceArrayGexpand6MIiri_v_; +text: .text%__1cMGraphBuilderSiterate_all_blocks6Mi_v_; +text: .text%__1cMGraphBuilderJScopeDataVremove_from_work_list6M_pnKBlockBegin__; +text: .text%__1cMGraphBuilderJScopeDataSis_work_list_empty6kM_i_; +text: .text%__1cMGraphBuilderOconnect_to_end6MpnKBlockBegin__pnIBlockEnd__; +text: .text%__1cIValueMapIkill_all6M_v_; +text: .text%__1cIValueMapRnumber_of_buckets6kM_i_; +text: .text%__1cIValueMapJbucket_at6Mi_pnGBucket__; +text: .text%__1cGBucketIkill_all6M_v_; +text: .text%__1cKValueStackEcopy6M_p0_; +text: .text%__1cGValuesIpush_all6Mpk0_v_: c1_ValueStack.o; +text: .text%__1cMGraphBuilderbBiterate_bytecodes_for_block6Mi_pnIBlockEnd__; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderJScopeDataIblock_at6Mi_pnKBlockBegin__; +text: .text%__1cMGraphBuilderKload_local6MpnJValueType_i_v_; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderGappend6MpnLInstruction__2_; +text: .text%__1cMGraphBuilderLappend_base6MpnLInstruction__2_; +text: .text%__1cJLoadLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cIValueMapEfind6MpnLInstruction__2_; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_GraphBuilder.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_GraphBuilder.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_GraphBuilder.o; +text: .text%__1cLInstructionEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_GraphBuilder.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cKValueStackLclear_store6Mi_v_; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderMaccess_field6MnJBytecodesECode__v_; +text: .text%__1cQciBytecodeStreamJget_field6kM_pnHciField__; +text: .text%__1cQciBytecodeStreamPget_field_index6kM_i_; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cFciEnvXget_field_by_index_impl6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cMas_ValueType6FnJBasicType__pnJValueType__; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cMLinkResolverXresolve_klass_no_update6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cMGraphBuilderKlock_stack6M_pnKValueStack__; +text: .text%__1cKValueStackKcopy_locks6M_p0_; +text: .text%__1cJLoadFieldFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_Instruction.o; +text: .text%__1cJLoadFieldEhash6kM_i_: c1_Instruction.o; +text: .text%__1cJLoadFieldEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cIValueMapNlookup_bucket6Mi_pnGBucket__; +text: .text%__1cGBucketEfind6MpnLInstruction_i_2_; +text: .text%__1cGBucketGappend6MpnLInstruction_i_v_; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Instruction.o; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Instruction.o; +text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cOExceptionScopeEcopy6M_p0_; +text: .text%__1cOExceptionScopeGlength6kM_i_; +text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cMGraphBuilderLstore_local6MpnJValueType_i_v_; +text: .text%__1cKValueStackDpop6MpnJValueType__pnLInstruction__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderLstore_local6MpnKValueStack_pnLInstruction_pnJValueType_ii_v_; +text: .text%__1cJValueTypeNas_ObjectType6M_pnKObjectType__: c1_ValueType.o; +text: .text%__1cKStoreLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cKValueStackLstore_local6MpnKStoreLocal_i_v_; +text: .text%__1cKValueStackQpin_stack_locals6Mi_v_; +text: .text%__1cKValueStackNpin_stack_all6MnLInstructionJPinReason__v_; +text: .text%__1cHIntTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cMGraphBuilderHif_zero6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cIConstantFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerLdo_Constant6MpnIConstant__v_; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Instruction.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Instruction.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Instruction.o; +text: .text%__1cIConstantEhash6kM_i_; +text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cLIntConstantOas_IntConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cIConstantEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cIConstantIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderHif_node6MpnLInstruction_n0BJCondition_2pnKValueStack__v_; +text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_GraphBuilder.o; +text: .text%__1cCIfFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerFdo_If6MpnCIf__v_; +text: .text%__1cJValueTypeLis_constant6kM_i_: c1_ValueType.o; +text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_Instruction.o; +text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_Instruction.o; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Canonicalizer.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Canonicalizer.o; +text: .text%__1cLInstructionEhash6kM_i_: c1_Canonicalizer.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Canonicalizer.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Canonicalizer.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Canonicalizer.o; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_Canonicalizer.o; +text: .text%__1cKBlockBeginItry_join6MpnKValueStack__i_; +text: .text%__1cKValueStack2t6Mp0_v_; +text: .text%__1cKValueStackEinit6Mp0_v_; +text: .text%__1cMGraphBuilderNmethod_return6MpnLInstruction__v_; +text: .text%__1cGReturnFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerJdo_Return6MpnGReturn__v_; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_GraphBuilder.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_GraphBuilder.o; +text: .text%__1cGReturnJas_Return6M_p0_: c1_GraphBuilder.o; +text: .text%__1cKValueStackbAeliminate_all_scope_stores6Mi_v_; +text: .text%__1cKValueStackQeliminate_stores6Mi_v_; +text: .text%__1cKValueStackMcaller_state6kM_p0_; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cFciEnvZcheck_klass_accessibility6MpnHciKlass_pnMklassOopDesc__i_; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cKObjectTypeNas_ObjectType6M_p0_: c1_ValueType.o; +text: .text%__1cJValueTypeOas_AddressType6M_pnLAddressType__: c1_ValueType.o; +text: .text%__1cKObjectTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cMGraphBuilderHif_same6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cJValueTypeOas_IntConstant6M_pnLIntConstant__: c1_ValueType.o; +text: .text%__1cKStoreFieldFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cIValueMapKkill_field6MpnHciField__v_; +text: .text%__1cGBucketKkill_field6MpnHciField__v_; +text: .text%__1cKValueStackQpin_stack_fields6MpnHciField__v_; +text: .text%__1cKValueStackVis_same_across_scopes6Mp0_i_; +text: .text%__1cMGraphBuilderNarithmetic_op6MpnJValueType_nJBytecodesECode_pnKValueStack__v_; +text: .text%__1cJValueTypeEmeet6kMp0_1_; +text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cMArithmeticOpIcan_trap6kM_i_; +text: .text%__1cMArithmeticOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cNCanonicalizerGdo_Op26MpnDOp2__v_; +text: .text%__1cLIntConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerTmove_const_to_right6MpnDOp2__v_; +text: .text%__1cMArithmeticOpOis_commutative6kM_i_; +text: .text%__1cMArithmeticOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cMArithmeticOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cMGraphBuilderJincrement6M_v_; +text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cMGraphBuilderMload_indexed6MnJBasicType__v_; +text: .text%__1cLLoadIndexedFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cLLoadIndexedEhash6kM_i_: c1_Instruction.o; +text: .text%__1cLLoadIndexedEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cIConstantIis_equal6kMpnLInstruction__i_; +text: .text%__1cIConstantLas_Constant6M_p0_: c1_Instruction.o; +text: .text%__1cEGotoFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerHdo_Goto6MpnEGoto__v_; +text: .text%__1cHIRScopeMheader_block6MpnKBlockBegin_n0BEFlag__2_; +text: .text%__1cCIRIoptimize6M_v_; +text: .text%__1cJOptimizer2t6MpnCIR__v_; +text: .text%__1cJOptimizerbHeliminate_conditional_expressions6M_v_; +text: .text%__1cCIRQiterate_preorder6MpnMBlockClosure__v_; +text: .text%__1cKBlockBeginQiterate_preorder6MpnMBlockClosure__v_; +text: .text%__1cJboolArray2t6Mki1_v_: c1_Instruction.o; +text: .text%__1cKBlockBeginQiterate_preorder6MrnJboolArray_pnMBlockClosure__v_; +text: .text%__1cNCE_EliminatorIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_IR.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Canonicalizer.o; +text: .text%__1cCIfFas_If6M_p0_: c1_Canonicalizer.o; +text: .text%__1cHIntTypeKas_IntType6M_p0_: c1_ValueType.o; +text: .text%__1cNCE_EliminatorRsimple_value_copy6MpnLInstruction__2_: c1_Optimizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_GraphBuilder.o; +text: .text%__1cJLoadLocalMas_LoadLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_GraphBuilder.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_GraphBuilder.o; +text: .text%__1cJOptimizerQeliminate_blocks6M_v_; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cSPredecessorCounterIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLBlockMergerIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLBlockMergerJtry_merge6MpnKBlockBegin__i_: c1_Optimizer.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_IR.o; +text: .text%__1cEGotoHas_Goto6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Canonicalizer.o; +text: .text%__1cJOptimizerVeliminate_null_checks6M_v_; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cGBitMapUclear_range_of_words6MII_v_: bitMap.o; +text: .text%__1cNValueSetArray2t6MkikpnIValueSet__v_: c1_Optimizer.o; +text: .text%__1cTNullCheckEliminatorHiterate6MpnKBlockBegin__v_; +text: .text%__1cTNullCheckEliminatorLiterate_all6M_v_; +text: .text%__1cTNullCheckEliminatorLiterate_one6MpnKBlockBegin__v_; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_Optimizer.o; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cKBlockBeginFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_BlockBegin6MpnKBlockBegin__v_; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_IR.o; +text: .text%__1cEBaseFvisit6MpnSInstructionVisitor__v_: c1_IR.o; +text: .text%__1cQNullCheckVisitorHdo_Base6MpnEBase__v_; +text: .text%__1cTNullCheckEliminatorPmerge_state_for6MpnKBlockBegin_pnKValueStack_pnIValueSet__i_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Optimizer.o; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o; +text: .text%__1cQNullCheckVisitorHdo_Goto6MpnEGoto__v_; +text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_GraphBuilder.o; +text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_Instruction.o; +text: .text%__1cKStoreLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cTNullCheckEliminatorIdo_value6FppnLInstruction__v_; +text: .text%__1cFLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cFLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorIdo_Local6MpnFLocal__v_; +text: .text%__1cLAccessFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cLAccessLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cTNullCheckEliminatorQhandle_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cQNullCheckVisitorMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cTNullCheckEliminatorShandle_AccessField6MpnLAccessField__v_; +text: .text%__1cQNullCheckVisitorNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cTNullCheckEliminatorRhandle_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cCIfPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o; +text: .text%__1cIConstantPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorLdo_Constant6MpnIConstant__v_; +text: .text%__1cQNullCheckVisitorFdo_If6MpnCIf__v_; +text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cNAccessIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cTNullCheckEliminatorShandle_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cGBitMapbCset_intersection_with_result6M0_i_; +text: .text%__1cKStoreFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cGReturnPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorJdo_Return6MpnGReturn__v_; +text: .text%__1cJboolArray2t6Mki1_v_: c1_Optimizer.o; +text: .text%__1cCIRTcompute_locals_size6M_v_; +text: .text%__1cHIRScopePallocate_locals6MipnMWordSizeList__i_; +text: .text%__1cHIRScopeGlocals6M_pnJLocalList__; +text: .text%__1cJLocalSlotOcollect_locals6MpnJLocalList__v_; +text: .text%__1cHIRScopePargument_locals6M_pnJLocalList__; +text: .text%__1cJLocalSlotXcollect_argument_locals6MpnJLocalList__v_; +text: .text%__1cCIRTallocate_local_name6M_i_; +text: .text%__1cMWordSizeListEgrow6Mki1_v_: c1_IR.o; +text: .text%__1cCIRSnotice_used_offset6Mi_v_; +text: .text%__1cCIRNcompute_loops6M_v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodOop.o; +text: .text%__1cKLoopFinder2t6MpnCIR_i_v_; +text: .text%__1cSBlockLoopInfoArray2t6MkikpnNBlockLoopInfo__v_: c1_Loops.o; +text: .text%__1cKLoopFinderNcompute_loops6Mi_pnILoopList__; +text: .text%__1cJboolArray2t6Mki1_v_: c1_Loops.o; +text: .text%__1cKLoopFinderScompute_dominators6MpnJboolArray__v_; +text: .text%__1cGBitMapGat_put6MIi_v_; +text: .text%__1cRCreateInfoClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNBlockLoopInfo2t6MpnKBlockBegin_i_v_; +text: .text%__1cPSetPredsClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cKLoopFinderSdominator_walk_sux6MpnKBlockBegin_pnJboolArray__v_; +text: .text%__1cGBitMapQset_intersection6M0_v_; +text: .text%__1cGBitMapHis_same6M0_i_; +text: .text%__1cKLoopFinderOfind_backedges6MpnJboolArray__pnILoopList__; +text: .text%__1cELoop2t6MpnKBlockBegin_2_v_: c1_Loops.o; +text: .text%__1cKLoopFinderSgather_loop_blocks6MpnILoopList__v_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Loops.o; +text: .text%__1cKLoopFinderKfind_loops6MpnILoopList_i_2_; +text: .text%__1cKScanBlocks2t6MpnJBlockList__v_; +text: .text%__1cIintStack2t6M_v_: c1_ScanBlocks.o; +text: .text%__1cKScanBlocksEscan6MpnKScanResult_i_v_; +text: .text%__1cKScanBlocksKscan_block6MpnKBlockBegin_pnKScanResult_i_v_; +text: .text%__1cLIllegalTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_GraphBuilder.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_GraphBuilder.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Instruction.o; +text: .text%__1cLAccessLocalOas_AccessLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_StoreLocal6M_pnKStoreLocal__: c1_GraphBuilder.o; +text: .text%__1cKStoreLocalNas_StoreLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cKScanBlocksRaccumulate_access6MinIValueTag_i_v_; +text: .text%__1cKScanBlocksPincrement_count6MnIValueTag_ii_v_; +text: .text%__1cKScanBlocksJget_array6MnIValueTag__pnIintStack__; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_ScanBlocks.o; +text: .text%__1cKScanBlocksLupdate_type6MinIValueTag__v_; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Canonicalizer.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Canonicalizer.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Canonicalizer.o; +text: .text%__1cJ_LoopListIpush_all6Mpk0_v_: c1_Loops.o; +text: .text%__1cKLoopFinderbEcompute_loop_exits_and_entries6MpnILoopList__v_; +text: .text%__1cKLoopFinderRfind_loop_entries6MpnKBlockBegin_pnELoop__v_; +text: .text%__1cKLoopFinderPfind_loop_exits6MpnKBlockBegin_pnELoop__v_; +text: .text%__1cKLoopFinderbDcompute_single_precision_flag6MpnILoopList__v_; +text: .text%__1cKLoopFinderNinsert_blocks6MpnILoopList__v_; +text: .text%__1cIintArray2t6Mki1_v_: c1_Loops.o; +text: .text%__1cJBlockListPiterate_forward6MpnMBlockClosure__v_; +text: .text%__1cGTaggerIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNPairCollectorIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNResourceArrayEsort6MIpGpkv2_i_v_; +text: .text%__1cRsort_by_block_ids6FppnJBlockPair_2_i_: c1_Loops.o; +text: .text%__1cKLoopFinderUinsert_caching_block6MpnILoopList_pnKBlockBegin_4_4_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_GraphBuilder.o; +text: .text%__1cKStateSplitFscope6kM_pnHIRScope__; +text: .text%__1cKLoopFinderJnew_block6MpnHIRScope_i_pnKBlockBegin__; +text: .text%__1cIBlockEndOsubstitute_sux6MpnKBlockBegin_2_v_; +text: .text%__1cILoopListMupdate_loops6MpnKBlockBegin_22_v_; +text: .text%__1cELoopSupdate_loop_blocks6MpnKBlockBegin_22_v_; +text: .text%__1cCIRMcompute_code6M_v_; +text: .text%__1cJboolArray2t6Mki1_v_: c1_IR.o; +text: .text%__1cCIRWiterate_and_set_weight6kMrnJboolArray_pnKBlockBegin_pnJBlockList_i_v_; +text: .text%__1cKBlockBeginKset_weight6Mi_v_; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_IR.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_IR.o; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_GraphBuilder.o; +text: .text%__1cDcmp6FppnKBlockBegin_2_i_: c1_IR.o; +text: .text%__1cUSuxAndWeightAdjusterIblock_do6MpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cJBlockListJblocks_do6MpFpnKBlockBegin__v_v_; +text: .text%__1cQUseCountComputerRcompute_use_count6FpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cQUseCountComputerXbasic_compute_use_count6FpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cQUseCountComputerQupdate_use_count6FppnLInstruction__v_: c1_IR.o; +text: .text%__1cFLocalIas_Local6M_p0_: c1_GraphBuilder.o; +text: .text%__1cKStateSplitPstate_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cKValueStackJvalues_do6MpFppnLInstruction__v_v_; +text: .text%__1cQUseCountComputerPupdated_pinning6FpnKBlockBegin__i_: c1_IR.o; +text: .text%__1cNCachingChangePinput_values_do6MpFppnLInstruction__v_v_: c1_Loops.o; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Loops.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_IR.o; +text: .text%__1cLCompilationIemit_lir6M_v_; +text: .text%__1cIFrameMap2t6Mi_v_; +text: .text%__1cIFrameMapLFpuStackSim2t6M_v_; +text: .text%__1cLCompilationNinit_framemap6MpnIFrameMap__v_; +text: .text%__1cIFrameMapbCset_local_name_to_offset_map6MpnMWordSizeList__v_; +text: .text%__1cLLIR_Emitter2t6MpnLCompilation__v_; +text: .text%__1cIValueGenOinit_value_gen6F_v_; +text: .text%__1cIRegAlloc2t6M_v_; +text: .text%__1cNc1_AllocTable2t6Mi_v_; +text: .text%__1cIRegAllocFclear6M_v_; +text: .text%__1cNCodeGenerator2t6MpnIValueGen_pnRValueGenInvariant__v_; +text: .text%__1cNCodeGeneratorIblock_do6MpnKBlockBegin__v_; +text: .text%__1cLLIR_EmitterMmust_bailout6kM_i_; +text: .text%__1cNCodeGeneratorPblock_do_prolog6MpnKBlockBegin__v_; +text: .text%__1cIValueGenLstart_block6MpnKBlockBegin__v_; +text: .text%__1cLLIR_EmitterLstart_block6MpnKBlockBegin__v_; +text: .text%__1cILIR_List2t6MpnLCompilation__v_; +text: .text%__1cIValueGenQbind_block_entry6MpnKBlockBegin__v_; +text: .text%__1cLLIR_EmitterQbind_block_entry6MpnKBlockBegin__v_; +text: .text%__1cIValueGenMblock_prolog6MpnKBlockBegin__v_; +text: .text%__1cIValueGenHdo_root6MpnLInstruction__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_GraphBuilder.o; +text: .text%__1cIValueGenNdo_BlockBegin6MpnKBlockBegin__v_; +text: .text%__1cQDelayedSpillMark2T6M_v_: c1_CodeGenerator.o; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_IR.o; +text: .text%__1cIValueGenHdo_Base6MpnEBase__v_; +text: .text%__1cIValueGenNreceiverRInfo6F_nFRInfo__; +text: .text%__1cIValueGenMicKlassRInfo6F_nFRInfo__; +text: .text%__1cLCompilationNget_init_vars6M_pnIintStack__; +text: .text%__1cLLIR_EmitterJstd_entry6MpnHIRScope_pnIintStack_nFRInfo_5_v_; +text: .text%__1cILIR_ListWunverified_entry_point6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterGmethod6kM_pnIciMethod__; +text: .text%__1cMCodeEmitInfo2t6MpnLLIR_Emitter_ipnIintStack_pnKValueStack_pnOExceptionScope_pnPRInfoCollection__v_; +text: .text%__1cLCompilationVvalue_stack2lir_stack6MpnKValueStack__pnNGrowableArray4CpnLLIR_OprDesc____; +text: .text%__1cIValueGenMblock_epilog6MpnKBlockBegin__v_; +text: .text%__1cNCodeGeneratorPblock_do_epilog6MpnKBlockBegin__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Canonicalizer.o; +text: .text%__1cIValueGenHdo_Goto6MpnEGoto__v_; +text: .text%__1cIValueGenNset_no_result6MpnLInstruction__v_; +text: .text%__1cIValueGenLmove_to_phi6MpnKValueStack_i_i_; +text: .text%__1cIValueGenWgoto_default_successor6MpnIBlockEnd_pnMCodeEmitInfo__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Instruction.o; +text: .text%__1cIValueGenMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cIValueGenEwalk6MpnLInstruction__v_; +text: .text%__1cIValueGenMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cIValueGenJload_item6MpnEItem__v_; +text: .text%__1cEItemGupdate6M_v_; +text: .text%__1cIValueGenQset_maynot_spill6MpnEItem__v_; +text: .text%__1cIValueGenSfpu_fanout_handled6MpnEItem__i_; +text: .text%__1cEItemEtype6kM_pnJValueType__: c1_Items.o; +text: .text%__1cIValueGenPlock_free_rinfo6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cIRegAllocMhas_free_reg6kMpnJValueType__i_; +text: .text%__1cIRegAllocMhas_free_reg6kMnIValueTag__i_; +text: .text%__1cNc1_AllocTableMhas_one_free6kM_i_; +text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cIRegAllocMget_free_reg6MpnJValueType__nFRInfo__; +text: .text%__1cIRegAllocMget_free_reg6MnIValueTag__nFRInfo__; +text: .text%__1cNc1_AllocTableIget_free6M_i_; +text: .text%__1cNc1_AllocTablePget_free_helper6Mi_i_; +text: .text%__1cIRegAllocIlock_reg6MpnLInstruction_nFRInfo_i_v_; +text: .text%__1cJRInfo2RegFdo_it6M_v_: c1_RegAlloc.o; +text: .text%__1cHLockRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOset_locked_cpu6MipnLInstruction_i_v_; +text: .text%__1cNc1_AllocTableKset_locked6Mi_v_; +text: .text%__1cLCompilationIitem2lir6MpknEItem__pnLLIR_OprDesc__; +text: .text%__1cLCompilationKitem2stack6MpknEItem__i_; +text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_ValueType.o; +text: .text%__1cMas_BasicType6FpnJValueType__nJBasicType__; +text: .text%__1cJValueTypeMas_ArrayType6M_pnJArrayType__: c1_ValueType.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_Compilation.o; +text: .text%__1cLLIR_EmitterEmove6MpnLLIR_OprDesc_nFRInfo__v_; +text: .text%__1cILIR_ListEmove6MpnLLIR_OprDesc_2pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenJitem_free6MpnEItem__v_; +text: .text%__1cIRegAllocPincr_spill_lock6MnFRInfo__v_; +text: .text%__1cQChangeSpillCountGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIValueGenFrfree6MpnEItem__v_; +text: .text%__1cIRegAllocPdecr_spill_lock6MnFRInfo__v_; +text: .text%__1cIRegAllocIfree_reg6MnFRInfo__v_; +text: .text%__1cHFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocMset_free_cpu6Mi_v_; +text: .text%__1cNc1_AllocTableIset_free6Mi_v_; +text: .text%__1cIValueGenWrlock_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cIValueGenFrlock6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cIRegAllocMget_lock_reg6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cLLIR_EmitterKfield_load6MnFRInfo_pnHciField_pnLLIR_OprDesc_iiipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListMload_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIR.o; +text: .text%__1cIRegAllocHset_reg6MnFRInfo_ipnLInstruction__v_; +text: .text%__1cGSetRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocLset_cpu_reg6MiipnLInstruction__v_; +text: .text%__1cIValueGenNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cEItemRhandle_float_kind6M_v_; +text: .text%__1cEItemNset_from_item6Mpk0_v_: c1_Items.o; +text: .text%__1cIValueGenXcan_inline_any_constant6kM_i_; +text: .text%__1cIValueGenSmust_copy_register6MpnEItem__i_; +text: .text%__1cIValueGenUcheck_float_register6MpnEItem__v_; +text: .text%__1cIRegAllocLis_free_reg6kMnFRInfo__i_; +text: .text%__1cJIsFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cNc1_AllocTableHis_free6kMi_i_; +text: .text%__1cLLIR_EmitterJopr2local6MipnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenFdo_If6MpnCIf__v_; +text: .text%__1cIHintItemNset_from_item6MpknEItem__v_; +text: .text%__1cIHintItemEtype6kM_pnJValueType__: c1_Items.o; +text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_ValueType.o; +text: .text%__1cIValueGenLdo_Constant6MpnIConstant__v_; +text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_Canonicalizer.o; +text: .text%__1cIValueGenOdont_load_item6MpnEItem__v_; +text: .text%__1cIValueGenWdont_load_item_nocheck6MpnEItem__v_; +text: .text%__1cLLIR_OprFactKvalue_type6FpnJValueType__pnLLIR_OprDesc__; +text: .text%__1cLLIR_EmitterFif_op6MinLInstructionJCondition_pnLLIR_OprDesc_4pnKBlockBegin_66pnMCodeEmitInfo__v_; +text: .text%__1cJLIR_ConstEtype6kM_nJBasicType__: c1_CacheLocals.o; +text: .text%__1cJLIR_ConstLas_constant6M_p0_: c1_CacheLocals.o; +text: .text%__1cLLIR_EmitterIlir_cond6MnLInstructionJCondition__nMLIR_OpBranchNLIR_Condition__; +text: .text%__1cILIR_ListDcmp6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnKBlockBegin__v_; +text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cEItemEtype6kM_pnJValueType__: c1_CodeGenerator.o; +text: .text%__1cJArrayTypeMas_ArrayType6M_p0_: c1_ValueType.o; +text: .text%__1cLLIR_EmitterHopr2int6MpnLLIR_OprDesc__i_; +text: .text%__1cILIR_ListJint2stack6Mii_v_: c1_LIREmitter.o; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Loops.o; +text: .text%__1cNCachingChangeFvisit6MpnSInstructionVisitor__v_: c1_Loops.o; +text: .text%__1cIValueGenQdo_CachingChange6MpnNCachingChange__v_; +text: .text%__1cIValueGenPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cIValueGenTdo_ArithmeticOp_Int6MpnMArithmeticOp__v_; +text: .text%__1cIValueGenOload_item_hint6MpnEItem_pk1_v_; +text: .text%__1cEItemRget_jint_constant6kM_i_; +text: .text%__1cLLIR_EmitterRarithmetic_op_int6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo__v_; +text: .text%__1cLLIR_EmitterNarithmetic_op6MnJBytecodesECode_pnLLIR_OprDesc_44inFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterYstrength_reduce_multiply6MpnLLIR_OprDesc_i22_i_; +text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter_x86.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter_x86.o; +text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter_x86.o; +text: .text%__1cILIR_ListKshift_left6MpnLLIR_OprDesc_222_v_; +text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o; +text: .text%__1cIValueGenWcan_inline_as_constant6MpnEItem__i_; +text: .text%__1cIRegAllocPget_register_rc6kMnFRInfo__i_; +text: .text%__1cLGetRefCountGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListDadd6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cJValueTypeLas_LongType6M_pnILongType__: c1_ValueType.o; +text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cMCodeEmitInfoVfill_expression_stack6M_v_; +text: .text%__1cLLIR_EmitterRarray_range_check6MpnLLIR_OprDesc_2pnMCodeEmitInfo_4_v_; +text: .text%__1cORangeCheckStub2t6MpnMCodeEmitInfo_nFRInfo_ii_v_; +text: .text%__1cMCodeEmitInfo2t6Mp0i_v_; +text: .text%__1cLLIR_EmitterLcmp_reg_mem6MnMLIR_OpBranchNLIR_Condition_nFRInfo_3inJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLcmp_reg_mem6MnMLIR_OpBranchNLIR_Condition_nFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnICodeStub__v_; +text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnICodeStub_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterMindexed_load6MnFRInfo_nJBasicType_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterNarray_address6MpnLLIR_OprDesc_2inJBasicType__pnLLIR_Address__; +text: .text%__1cLLIR_AddressFscale6FnJBasicType__n0AFScale__; +text: .text%__1cILIR_ListEmove6MpnLLIR_Address_pnLLIR_OprDesc_pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o; +text: .text%__1cIRegAllocNoops_in_spill6kM_pnIintStack__; +text: .text%__1cIRegAllocRoops_in_registers6kM_pnPRInfoCollection__; +text: .text%__1cIValueGenbDsafepoint_poll_needs_register6F_i_; +text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cLLIR_EmitterHgoto_op6MpnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListEjump6MpnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cIValueGenOscratch1_RInfo6kM_nFRInfo__; +text: .text%__1cIValueGenUprefer_alu_registers6kM_i_; +text: .text%__1cLLIR_EmitterLfield_store6MpnHciField_pnLLIR_OprDesc_i4iipnMCodeEmitInfo_nFRInfo__v_; +text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_1inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenJdo_Return6MpnGReturn__v_; +text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_ValueType.o; +text: .text%__1cIValueGenTresult_register_for6FpnJValueType_i_nFRInfo__; +text: .text%__1cIValueGenMreturn1RInfo6F_nFRInfo__; +text: .text%__1cIValueGenPload_item_force6MpnEItem_nFRInfo__v_; +text: .text%__1cIValueGenPlock_spill_temp6MpnLInstruction_nFRInfo__v_; +text: .text%__1cIRegAllocJlock_temp6MpnLInstruction_nFRInfo__v_; +text: .text%__1cLLIR_EmitterJreturn_op6MpnLLIR_OprDesc__v_; +text: .text%__1cNCodeGeneratorXclear_instruction_items6FpnKBlockBegin__v_; +text: .text%__1cQLIR_LocalCaching2t6MpnCIR__v_; +text: .text%__1cQLIR_LocalCachingQpreferred_locals6MpknIciMethod__pnMLocalMapping__; +text: .text%__1cMLocalMappingQinit_cached_regs6M_v_; +text: .text%__1cPRegisterManager2t6M_v_; +text: .text%__1cMLocalMappingNget_cache_reg6kMi_nFRInfo__; +text: .text%__1cQLIR_LocalCachingVcompute_cached_locals6M_v_; +text: .text%__1cQLIR_LocalCachingMcache_locals6M_v_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_IR.o; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Canonicalizer.o; +text: .text%__1cNCachingChangeQas_CachingChange6M_p0_: c1_Loops.o; +text: .text%__1cRBlockListScanInfo2t6MpnJBlockList__v_: c1_CacheLocals.o; +text: .text%__1cOLIR_OprRefList2t6M_v_: c1_CacheLocals.o; +text: .text%__1cRBlockListScanInfoItraverse6MpnKBlockBegin_pnKLIR_OpList__v_: c1_CacheLocals.o; +text: .text%__1cLLIR_OpLabelFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cHLIR_Op1Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cPRegisterManagerElock6MnFRInfo__v_; +text: .text%__1cHLIR_Op2Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cMLIR_OpBranchFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_x86.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeStubs_x86.o; +text: .text%__1cNc1_AllocTableFmerge6Mp0_v_; +text: .text%__1cGLIR_OpFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQLIR_LocalCachingXcache_locals_for_blocks6MpnJBlockList_pnPRegisterManager_i_pnMLocalMapping__; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Loops.o; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Loops.o; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Loops.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_IR.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_IR.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_IR.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_IR.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_IR.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_IR.o; +text: .text%__1cKScanBlocksQmost_used_locals6M_pnKALocalList__; +text: .text%__1cKScanBlocksMint_count_at6kMi_i_; +text: .text%__1cKScanBlocksIcount_at6kMnIValueTag_i_i_; +text: .text%__1cKScanBlocksJget_array6kMnIValueTag__pknIintStack__; +text: .text%__1cKScanBlocksNlong_count_at6kMi_i_; +text: .text%__1cKScanBlocksMobj_count_at6kMi_i_; +text: .text%__1cKScanBlocksLis_obj_only6kMi_i_; +text: .text%__1cKScanBlocksLis_int_only6kMi_i_; +text: .text%__1cGALocalUsort_by_access_count6Fpp02_i_: c1_ScanBlocks.o; +text: .text%__1cQLIR_LocalCachingPcompute_caching6MpnKALocalList_pnPRegisterManager__pnMLocalMapping__; +text: .text%__1cPRegisterManagerMnum_free_cpu6M_i_; +text: .text%__1cMLocalMappingNget_cache_reg6kMinIValueTag__nFRInfo__; +text: .text%__1cPRegisterManagerMhas_free_reg6MnIValueTag__i_; +text: .text%__1cPRegisterManagerNlock_free_reg6MnIValueTag__nFRInfo__; +text: .text%__1cQLIR_LocalCachingQadd_at_all_names6FpnPRInfoCollection_inFRInfo_pnMWordSizeList__v_; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_CacheLocals.o; +text: .text%__1cMLocalMappingFmerge6Mp0_v_; +text: .text%__1cGALocalNsort_by_index6Fpp02_i_: c1_CacheLocals.o; +text: .text%__1cSLocalMappingSetterIblock_do6MpnKBlockBegin__v_; +text: .text%__1cMLocalMappingEjoin6Mp0_v_; +text: .text%__1cPRegisterManagerLis_free_reg6MnFRInfo__i_; +text: .text%__1cQLIR_LocalCachingYinsert_transition_blocks6M_v_; +text: .text%__1cPBlockTransitionIblock_do6MpnKBlockBegin__v_: c1_CacheLocals.o; +text: .text%__1cGLIR_OpLas_OpBranch6M_pnMLIR_OpBranch__: c1_LIR.o; +text: .text%__1cMLocalMappingPemit_transition6FpnILIR_List_p03pnCIR__v_; +text: .text%__1cCIRThighest_used_offset6kM_i_; +text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_CacheLocals.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CacheLocals.o; +text: .text%__1cILIR_ListQsingle_stack2reg6MinFRInfo_nJBasicType__v_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Instruction.o; +text: .text%__1cLCompilationbBemit_code_prolog_non_native6MpnIFrameMap__v_; +text: .text%__1cHIRScopeJmax_stack6kM_i_; +text: .text%__1cNLIR_Optimizer2t6MpnCIR__v_; +text: .text%__1cRLIR_PeepholeState2t6M_v_; +text: .text%__1cRLIR_PeepholeStateKinitialize6MpnMLocalMapping__v_; +text: .text%__1cRLIR_PeepholeStateMclear_values6M_v_; +text: .text%__1cOLIR_OprRefList2t6M_v_: c1_LIROptimizer.o; +text: .text%__1cNLIR_OptimizerIoptimize6M_v_; +text: .text%__1cNLIR_OptimizerIoptimize6MpnJBlockList__v_; +text: .text%__1cNLIR_OptimizerIoptimize6MpnKBlockBegin__v_; +text: .text%__1cNLIR_OptimizerMblock_prolog6M_v_; +text: .text%__1cNLIR_OptimizerKprocess_op6M_v_; +text: .text%__1cGLIR_OpGas_Op16M_pnHLIR_Op1__: c1_LIR.o; +text: .text%__1cLLIR_OpLabelKas_OpLabel6M_p0_: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateVfinish_forward_branch6MpnFLabel__v_; +text: .text%__1cJLabelListIindex_of6kMkpnFLabel__i_: c1_LIROptimizer.o; +text: .text%__1cRLIR_PeepholeStateYset_disable_optimization6Mi_v_; +text: .text%__1cLLIR_OpLabelJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerMemit_opLabel6MpnLLIR_OpLabel__v_; +text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_x86.o; +text: .text%__1cHLIR_Op0Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op06MpnHLIR_Op0__v_; +text: .text%__1cHLIR_Op2Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op26MpnHLIR_Op2__v_; +text: .text%__1cNLIR_OptimizerKhandle_opr6MpnLLIR_OprDesc_nQLIR_OpVisitStateHOprMode__2_; +text: .text%__1cNLIR_OptimizerJis_cached6MpnLLIR_OprDesc__i_; +text: .text%__1cNLIR_OptimizerUrecord_opr_reference6MpnLLIR_OprDesc__v_; +text: .text%__1cRLIR_PeepholeStateUrecord_opr_reference6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateLdefining_op6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateJreg2index6MpnLLIR_OprDesc__i_; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_LIROptimizer.o; +text: .text%__1cNLIR_OptimizerMblock_epilog6M_v_; +text: .text%__1cRLIR_PeepholeStateRis_safe_to_delete6kMi_i_; +text: .text%__1cHLIR_Op1Gas_Op16M_p0_: c1_LIR.o; +text: .text%__1cHLIR_Op1Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op16MpnHLIR_Op1__v_; +text: .text%__1cNLIR_OptimizerMprocess_move6MpnHLIR_Op1__v_; +text: .text%__1cMLocalMappingNget_cache_reg6kMpnLLIR_OprDesc__2_; +text: .text%__1cRLIR_PeepholeStateTmark_safe_to_delete6Mi_v_; +text: .text%__1cNLIR_OptimizerRreplace_stack_opr6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerNoptimize_move6MpnHLIR_Op1_rpnLLIR_OprDesc_5_i_; +text: .text%__1cRLIR_PeepholeStatebFequivalent_register_or_constant6MpnLLIR_OprDesc__2_; +text: .text%__1cRLIR_PeepholeStateOequivalent_opr6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_x86.o; +text: .text%__1cNLIR_OptimizerMis_cache_reg6MpnLLIR_OprDesc__i_; +text: .text%__1cMLocalMappingMis_cache_reg6kMpnLLIR_OprDesc__i_; +text: .text%__1cMLocalMappingMis_cache_reg6kMnFRInfo__i_; +text: .text%__1cRLIR_PeepholeStateSequivalent_address6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerRresult_substitute6M_v_; +text: .text%__1cNLIR_OptimizerRnext_op_with_code6MnILIR_Code__pnGLIR_Op__; +text: .text%__1cNLIR_OptimizerFop_at6Mi_pnGLIR_Op__; +text: .text%__1cRLIR_PeepholeStateMkill_operand6MpnLLIR_OprDesc_i_v_; +text: .text%__1cRLIR_PeepholeStateQkill_equivalents6MpnLLIR_OprDesc__v_; +text: .text%__1cRLIR_PeepholeStateNkill_register6Mi_v_; +text: .text%__1cRLIR_PeepholeStateSrecord_defining_op6MpnLLIR_OprDesc_i_v_; +text: .text%__1cRLIR_PeepholeStatePset_defining_op6Mii_v_; +text: .text%__1cRLIR_PeepholeStateHdo_move6MpnLLIR_OprDesc_2_v_; +text: .text%__1cLLIR_OprListEgrow6MkikpnLLIR_OprDesc__v_: c1_LIROptimizer.o; +text: .text%__1cLLIR_AddressKas_address6M_p0_: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateTequivalent_register6MpnLLIR_OprDesc__2_; +text: .text%__1cKLIR_OprPtrLas_constant6M_pnJLIR_Const__: c1_LIR.o; +text: .text%__1cNLIR_OptimizerKallow_opto6M_i_; +text: .text%__1cNLIR_OptimizerLrecord_opto6MpnLLIR_OprDesc_2_2_; +text: .text%__1cLLIR_AddressEtype6kM_nJBasicType__: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateNincrement_ref6Mi_v_; +text: .text%__1cKLIR_OprPtrKas_address6M_pnLLIR_Address__: c1_CacheLocals.o; +text: .text%__1cMLIR_OpBranchLas_OpBranch6M_p0_: c1_LIR.o; +text: .text%__1cMLIR_OpBranchJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerNemit_opBranch6MpnMLIR_OpBranch__v_; +text: .text%__1cNLIR_OptimizerQopr_live_on_exit6MpnLLIR_OprDesc__i_; +text: .text%__1cNResourceArrayJremove_at6MIi_v_; +text: .text%__1cRLIR_PeepholeStateLstack2index6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStatePkill_stack_slot6Mi_v_; +text: .text%__1cRLIR_PeepholeStatebCequivalent_register_or_stack6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer.o; +text: .text%__1cNLIR_OptimizerLhandle_info6MpnMCodeEmitInfo__v_; +text: .text%__1cMCodeEmitInfoRset_local_mapping6MpnMLocalMapping__v_; +text: .text%__1cNLIR_OptimizerUrecord_register_oops6MpnMCodeEmitInfo__v_; +text: .text%__1cNLIR_OptimizerOemit_code_stub6MpnICodeStub__v_; +text: .text%__1cLCompilationOemit_code_body6MpnLCodeOffsets__i_; +text: .text%__1cNLIR_Assembler2t6MpnLCompilation_pnLCodeOffsets__v_; +text: .text%__1cNConstantTable2t6M_v_; +text: .text%__1cNLIR_AssemblerJemit_code6MpnJBlockList__v_; +text: .text%__1cQCollectConstantsIblock_do6MpnKBlockBegin__v_: c1_LIRAssembler.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_IR.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Instruction.o; +text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_Canonicalizer.o; +text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Loops.o; +text: .text%__1cNLIR_AssemblerOemit_constants6M_v_; +text: .text%__1cNConstantTableMemit_entries6MpnOMacroAssembler_i_v_; +text: .text%__1cLLIR_CodeGenIblock_do6MpnKBlockBegin__v_; +text: .text%__1cNLIR_AssemblerPcheck_codespace6M_v_; +text: .text%__1cNLIR_AssemblerMemit_opLabel6MpnLLIR_OpLabel__v_; +text: .text%__1cNLIR_AssemblerIemit_op06MpnHLIR_Op0__v_; +text: .text%__1cNLIR_AssemblerIemit_op26MpnHLIR_Op2__v_; +text: .text%__1cNLIR_AssemblerMneeds_icache6kMpnIciMethod__i_; +text: .text%__1cFRInfoLas_register6kM_pnMRegisterImpl__; +text: .text%__1cNLIR_AssemblerMcheck_icache6MpnMRegisterImpl_2_i_; +text: .text%__1cRC1_MacroAssemblerSinline_cache_check6MpnMRegisterImpl_2_v_; +text: .text%__1cRC1_MacroAssemblerOverified_entry6M_v_; +text: .text%__1cNLIR_AssemblerLbuild_frame6M_v_; +text: .text%__1cNLIR_AssemblerbBinitial_frame_size_in_bytes6M_i_; +text: .text%__1cIFrameMapJframesize6kM_i_; +text: .text%__1cRC1_MacroAssemblerLbuild_frame6Mi_v_; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: c1_Compiler.o; +text: .text%__1cNLIR_AssemblerVsetup_locals_at_entry6M_v_; +text: .text%__1cIFrameMapYsignature_type_array_for6FpknIciMethod__pnNBasicTypeList__; +text: .text%__1cIFrameMapScalling_convention6FpknIciMethod_pnIintArray__pnRCallingConvention__; +text: .text%__1cIFrameMapScalling_convention6FirknOBasicTypeArray_pnIintArray__pnRCallingConvention__; +text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_x86.o; +text: .text%__1cIFrameMapRname_for_argument6Fi_i_; +text: .text%__1cIFrameMapSfp_offset_for_name6kMiii_i_; +text: .text%__1cIFrameMapPnum_local_names6kM_i_; +text: .text%__1cIFrameMapNlocal_to_slot6kMii_i_; +text: .text%__1cIFrameMapSfp_offset_for_slot6kMi_i_; +text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_x86.o; +text: .text%__1cQArgumentLocationSset_stack_location6Mi_v_; +text: .text%__1cIFrameMapQaddress_for_name6kMiii_nHAddress__; +text: .text%__1cIFrameMapQmake_new_address6kMi_nHAddress__; +text: .text%__1cNLIR_AssemblerIemit_op16MpnHLIR_Op1__v_; +text: .text%__1cNLIR_AssemblerHmove_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerHmem2reg6MpnLLIR_Address_nFRInfo_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerKas_Address6MpnLLIR_Address__nHAddress__; +text: .text%__1cNLIR_AssemblerHcomp_op6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4nJBasicType__v_; +text: .text%__1cNLIR_AssemblerNemit_opBranch6MpnMLIR_OpBranch__v_; +text: .text%__1cNLIR_AssemblerJreg2stack6MnFRInfo_inJBasicType__v_; +text: .text%__1cNLIR_AssemblerLconst2stack6MpnJLIR_Const_i_v_; +text: .text%__1cNLIR_AssemblerJstack2reg6MpnLLIR_OprDesc_2nJBasicType__v_; +text: .text%__1cNLIR_AssemblerHreg2reg6MnFRInfo_1_v_; +text: .text%__1cNLIR_AssemblerJmove_regs6MpnMRegisterImpl_2_v_; +text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_i2_v_; +text: .text%__1cNLIR_AssemblerIarith_op6MnILIR_Code_pnLLIR_OprDesc_33pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerbIadd_debug_info_for_null_check_here6MpnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerLcode_offset6kM_i_; +text: .text%__1cNLIR_AssemblerbDadd_debug_info_for_null_check6MipnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerOemit_code_stub6MpnICodeStub__v_; +text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerCpc6kM_pC_; +text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o; +text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o; +text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler.o; +text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOsafepoint_poll6MnFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerZadd_debug_info_for_branch6MpnMCodeEmitInfo__v_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cMCodeEmitInfoRrecord_debug_info6MpnYDebugInformationRecorder_ii_v_; +text: .text%__1cMCodeEmitInfoHoop_map6M_pnGOopMap__; +text: .text%__1cMCodeEmitInfoScompute_debug_info6M_v_; +text: .text%__1cMCodeEmitInfoOcreate_oop_map6M_pnGOopMap__; +text: .text%__1cIFrameMapRoop_map_arg_count6M_i_; +text: .text%__1cMCodeEmitInfoTrecord_spilled_oops6kMpnIFrameMap_pnGOopMap__v_; +text: .text%__1cKciLocalMapNindex_for_bci6kMi_i_; +text: .text%__1cSciLocalMapIteratorJfind_next6M_v_: c1_LIREmitter.o; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_LIREmitter.o; +text: .text%__1cMCodeEmitInfoNget_cache_reg6kMinIValueTag__nFRInfo__; +text: .text%__1cIFrameMapTsingle_word_regname6kMi_nHOptoRegEName__; +text: .text%__1cIFrameMapMfp2sp_offset6kMi_i_; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cMCodeEmitInfoVlir_stack2value_stack6MpnNGrowableArray4CpnLLIR_OprDesc____pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cMCodeEmitInfobCcompute_debug_info_for_scope6MpnHIRScope_ipnNGrowableArray4CpnKScopeValue___inGValues_i_pnQIRScopeDebugInfo__; +text: .text%__1cMCodeEmitInfobCscope_value_for_local_offset6MinILocationEType_ppnKScopeValue__4_; +text: .text%__1cMCodeEmitInfobEget_cache_reg_for_local_offset6kMi_nFRInfo__; +text: .text%__1cMLocalMappingbEget_cache_reg_for_local_offset6kMi_nFRInfo__; +text: .text%__1cMCodeEmitInfoZlocation_for_local_offset6MinILocationEType__1_; +text: .text%__1cIFrameMapZlocation_for_local_offset6kMinILocationEType_p1_i_; +text: .text%__1cIFrameMapWlocation_for_fp_offset6kMinILocationEType_p1_i_; +text: .text%__1cILocationVlegal_offset_in_bytes6Fi_i_; +text: .text%__1cMCodeEmitInfoYscope_value_for_register6MnFRInfo_ppnKScopeValue_4nILocationEType__v_; +text: .text%__1cGOopMapJdeep_copy6M_p0_; +text: .text%__1cGOopMap2t6Mn0ANDeepCopyToken_p0_v_; +text: .text%__1cMOopMapStream2t6MpnGOopMap__v_; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: oopMap.o; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderLcheck_phase6Mn0AFPhase__v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderWserialize_scope_values6MpnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfo.o; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: location.o; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderYserialize_monitor_values6MpnNGrowableArray4CpnMMonitorValue____i_; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cLCompilationbEadd_exception_handlers_for_pco6MiipnOExceptionScope__v_; +text: .text%__1cNExceptionInfo2t6MiipnOExceptionScope__v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNLIR_AssemblerHreg2mem6MnFRInfo_pnLLIR_Address_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_OprDescGis_oop6kM_i_; +text: .text%__1cNLIR_AssemblerJreturn_op6MnFRInfo_i_v_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cRC1_MacroAssemblerLmethod_exit6Mi_v_; +text: .text%__1cLCompilationQemit_code_epilog6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerUemit_slow_case_stubs6M_v_; +text: .text%__1cNLIR_AssemblerKemit_stubs6MpnMCodeStubList__v_; +text: .text%__1cVImplicitNullCheckStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cZresource_reallocate_bytes6FpcII_0_; +text: .text%__1cFArenaIArealloc6MpvII_1_; +text: .text%__1cNLIR_AssemblerNadd_call_info6MipnMCodeEmitInfo__v_; +text: .text%__1cOdummy_location6FnIValueTag__pnKScopeValue__: c1_LIREmitter.o; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cORangeCheckStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerWemit_exception_handler6M_i_; +text: .text%__1cRC1_MacroAssemblerRexception_handler6Mii_v_; +text: .text%__1cNLIR_AssemblerPemit_call_stubs6M_v_; +text: .text%__1cNLIR_AssemblerbCmaybe_adjust_stack_alignment6MpnIciMethod__v_; +text: .text%__1cKreal_index6FpnIFrameMap_i_i_: c1_LIRAssembler_x86.o; +text: .text%__1cLCompilationbEgenerate_exception_range_table6M_v_; +text: .text%__1cOExceptionScopeGequals6kMp0_i_; +text: .text%__1cLCompilationbBadd_exception_range_entries6MiipnOExceptionScope_ip2pi_v_; +text: .text%__1cTExceptionRangeTablebCcompute_modified_at_call_pco6Fii_i_; +text: .text%__1cOExceptionScopeMcaller_scope6kM_p0_; +text: .text%__1cLLIR_EmitterKframe_size6M_i_; +text: .text%__1cNLIR_Assembler2T6M_v_; +text: .text%__1cLCompilationMinstall_code6MpnLCodeOffsets_i_v_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cFciEnvbUsystem_dictionary_modification_counter_changed6M_i_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethod2n6FIi_pv_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: codeBlob.o; +text: .text%__1cLPcDescCache2t6M_v_; +text: .text%__1cHnmFlagsFclear6M_v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cTExceptionRangeTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cKNativeJumpbEcheck_verified_entry_alignment6FpC1_v_; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_; +text: .text%__1cLCompilation2T6M_v_; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cTExceptionRangeTable2T6M_v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cMelapsedTimerDadd6M0_v_; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cNCompileBrokerUpop_jni_handle_block6F_v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cSCompileTaskWrapper2T6M_v_; +text: .text%__1cNCompileBrokerJfree_task6FpnLCompileTask__v_; +text: .text%__1cLCompileTaskEfree6M_v_; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: reflection.o; +text: .text%__1cNArgumentCountDset6MinJBasicType__v_: reflection.o; +text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cNSignatureInfoHdo_long6M_v_: reflection.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: reflection.o; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cKReflectionbFbasic_type_mirror_to_basic_type6FpnHoopDesc_pnGThread__nJBasicType__; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cKReflectionTunbox_for_primitive6FpnHoopDesc_pnGjvalue_pnGThread__nJBasicType__; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderMnew_instance6Mi_v_; +text: .text%__1cQciBytecodeStreamJget_klass6kM_pnHciKlass__; +text: .text%__1cQciBytecodeStreamPget_klass_index6kM_i_; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cMGraphBuilderMappend_split6MpnKStateSplit__pnLInstruction__; +text: .text%__1cLNewInstanceFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cLInstructionEhash6kM_i_: c1_Instruction.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Instruction.o; +text: .text%__1cKValueStackMclear_locals6M_v_; +text: .text%__1cKValueStackMclear_stores6M_v_; +text: .text%__1cKValueStackZpin_stack_for_state_split6M_v_; +text: .text%__1cLNewInstanceIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderIstack_op6MnJBytecodesECode__v_; +text: .text%__1cMGraphBuilderGinvoke6MnJBytecodesECode__v_; +text: .text%__1cQciBytecodeStreamKget_method6kM_pnIciMethod__; +text: .text%__1cQciBytecodeStreamQget_method_index6kM_i_; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cQciBytecodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cQciBytecodeStreamXget_method_holder_index6M_i_; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cKValueStackNpop_arguments6Mi_pnGValues__; +text: .text%__1cGInvoke2t6MnJBytecodesECode_pnJValueType_pnLInstruction_pnGValues_iiii_v_; +text: .text%__1cGInvokeFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cGInvokeJas_Invoke6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Instruction.o; +text: .text%__1cGInvokeIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderIthrow_op6M_v_; +text: .text%__1cFThrowFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerIdo_Throw6MpnFThrow__v_; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Instruction.o; +text: .text%__1cFThrowIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Instruction.o; +text: .text%__1cFThrowIas_Throw6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Instruction.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Instruction.o; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cTNullCheckEliminatorShandle_NewInstance6MpnLNewInstance__v_; +text: .text%__1cGInvokePinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cTNullCheckEliminatorNhandle_Invoke6MpnGInvoke__v_; +text: .text%__1cFThrowPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorIdo_Throw6MpnFThrow__v_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_IR.o; +text: .text%__1cLInstructionGnegate6Fn0AJCondition__1_; +text: .text%__1cFThrowPstate_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cFRInfoIoverlaps6kMk0_i_; +text: .text%__1cIValueGenOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cIValueGenVspill_values_on_stack6MpnKValueStack_nFRInfo_i_v_; +text: .text%__1cIRegAllocNlock_register6MpnLInstruction_nFRInfo__v_; +text: .text%__1cHHideReg2t6MpnIValueGen_pnJValueType__v_; +text: .text%__1cHHideReg2T6M_v_; +text: .text%__1cLLIR_EmitterMnew_instance6MnFRInfo_pnPciInstanceKlass_1111pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterZjobject2reg_with_patching6MnFRInfo_pnIciObject_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListNoop2reg_patch6MpnI_jobject_nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cPNewInstanceStub2t6MnFRInfo_pnLLIR_OprDesc_pnPciInstanceKlass_pnMCodeEmitInfo_nIRuntime1GStubID__v_; +text: .text%__1cIValueGenJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cIValueGenWinvoke_visit_arguments6MpnGInvoke_pnRCallingConvention__pnJItemArray__; +text: .text%__1cIValueGenNis_free_rinfo6MnFRInfo__i_; +text: .text%__1cGInvokeRsize_of_arguments6kM_i_; +text: .text%__1cLLIR_EmitterVstore_stack_parameter6MpnLLIR_OprDesc_i_v_; +text: .text%__1cILIR_ListFstore6MpnLLIR_OprDesc_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cHHideReg2t6MpnIValueGen_nFRInfo_i_v_; +text: .text%__1cIValueGenVinvoke_load_arguments6MpnGInvoke_pnJItemArray_pnRCallingConvention__v_; +text: .text%__1cIValueGenPinvoke_do_spill6MpnGInvoke_nFRInfo__v_; +text: .text%__1cIValueGenXis_caller_save_register6FnFRInfo__i_; +text: .text%__1cIValueGenLspill_value6MpnLInstruction__v_; +text: .text%__1cIValueGenKspill_item6MpnEItem__v_; +text: .text%__1cIValueGenQround_spill_item6MpnEItem_i_v_; +text: .text%__1cIRegAllocOget_lock_spill6MpnLInstruction_i_i_; +text: .text%__1cIValueGenJraw_rfree6MpnEItem__v_; +text: .text%__1cLLIR_EmitterFspill6MipnLLIR_OprDesc__v_; +text: .text%__1cIFrameMapKspill_name6kMi_i_; +text: .text%__1cIValueGenQinvoke_do_result6MpnGInvoke_ipnEItem__v_; +text: .text%__1cIVoidTypeLas_VoidType6M_p0_: c1_ValueType.o; +text: .text%__1cLCompilationXlir_opr_for_instruction6MpnLInstruction__pnLLIR_OprDesc__; +text: .text%__1cLLIR_EmitterHcall_op6MnJBytecodesECode_pknOBasicTypeArray_pnMCodeEmitInfo_iiinFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListQcall_opt_virtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenIdo_Throw6MpnFThrow__v_; +text: .text%__1cLNewInstanceKexact_type6kM_pnGciType__; +text: .text%__1cOExceptionScopeLcould_catch6kMpnPciInstanceKlass_i_i_; +text: .text%__1cIValueGenRexceptionOopRInfo6F_nFRInfo__; +text: .text%__1cIValueGenFsfree6MpnEItem__v_; +text: .text%__1cIRegAllocKfree_spill6MipnJValueType__v_; +text: .text%__1cIRegAllocNis_free_spill6kMipnJValueType__i_; +text: .text%__1cLNewInstanceOas_NewInstance6M_p0_: c1_Instruction.o; +text: .text%__1cIValueGenQexceptionPcRInfo6F_nFRInfo__; +text: .text%__1cILIR_ListPthrow_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator.o; +text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cOLIR_OpJavaCallFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_LIR.o; +text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cIFrameMapWcaller_save_cpu_reg_at6Fi_pnLLIR_OprDesc__; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Instruction.o; +text: .text%__1cIVoidTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Instruction.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Instruction.o; +text: .text%__1cRLIR_PeepholeStateHdo_call6M_v_; +text: .text%__1cOLIR_OpJavaCallJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerJemit_call6MpnOLIR_OpJavaCall__v_; +text: .text%__1cNLIR_AssemblerJconst2reg6MpnJLIR_Const_nFRInfo_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cMPatchingStubQalign_patch_site6MpnOMacroAssembler__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_pnI_jobject__v_; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cNLIR_AssemblerPpatching_epilog6MpnMPatchingStub_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_; +text: .text%__1cMPatchingStubHinstall6MpnOMacroAssembler_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_: c1_LIRAssembler.o; +text: .text%__1cNLIR_AssemblerUappend_patching_stub6MpnMPatchingStub__v_; +text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerJemit_call6MpnOLIR_OpJavaCall__v_; +text: .text%__1cNLIR_AssemblerKalign_call6MnILIR_Code__v_; +text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o; +text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerEcall6MpCnJrelocInfoJrelocType_pnMCodeEmitInfo__v_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cMCodeEmitInfoSappend_scope_value6MpnLLIR_OprDesc_pnNGrowableArray4CpnKScopeValue____v_; +text: .text%__1cMCodeEmitInfoRopr2location_type6MpnLLIR_OprDesc__nILocationEType__; +text: .text%__1cMCodeEmitInfoRlocation_for_name6MinILocationEType_ii_1_; +text: .text%__1cIFrameMapRlocation_for_name6kMinILocationEType_p1ii_i_; +text: .text%__1cNLIR_AssemblerIthrow_op6MnFRInfo_1pnMCodeEmitInfo_i_v_; +text: .text%__1cMCodeEmitInfoQadd_register_oop6MnFRInfo__v_; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIREmitter.o; +text: .text%__1cMCodeEmitInfoYadd_registers_to_oop_map6MpnGOopMap__v_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cJrelocInfoKset_format6Mi_v_; +text: .text%__1cMPatchingStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cRAbstractAssemblerGa_byte6Mi_v_; +text: .text%__1cRNativeGeneralJumpUinsert_unconditional6FpC1_v_; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cJrelocInfobDchange_reloc_info_for_address6FpnNRelocIterator_pCn0AJrelocType_4_v_; +text: .text%__1cJrelocInfoIset_type6Mn0AJrelocType__v_; +text: .text%__1cPNewInstanceStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cJOopMapSetMgrow_om_data6M_v_; +text: .text%__1cOStaticCallStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_; +text: .text%__1cKRelocationSpd_address_in_code6M_ppC_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cOoop_RelocationHoops_do6MpFppnHoopDesc__v_v_; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cRresolve_and_patch6FppnHoopDesc__v_; +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%__1cLStatSamplerOcollect_sample6F_v_; +text: .text%__1cLStatSamplerLsample_data6FpnMPerfDataList__v_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cNFingerprinterIdo_float6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRuntime.o; +text: .text%JVM_IsNaN; +text: .text%__1cNFingerprinterJdo_double6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRuntime.o; +text: .text%__1cXNativeSignatureIteratorLpass_double6M_v_: interpreterRuntime.o; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%JVM_Read; +text: .text%__1cDhpiEread6FipvI_I_: jvm.o; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_char6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cJCHAResult2t6MnLKlassHandle_nMsymbolHandle_2pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___n0E_i_v_; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cIciMethodJwill_link6MpnHciKlass_2nJBytecodesECode__i_; +text: .text%__1cMGraphBuilderKtry_inline6MpnIciMethod_i_i_; +text: .text%__1cMGraphBuilderUclear_inline_bailout6M_v_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cMGraphBuilderVtry_inline_intrinsics6MpnIciMethod__i_; +text: .text%__1cMGraphBuilderPtry_inline_full6MpnIciMethod_i_i_; +text: .text%__1cIciMethodIhas_jsrs6kM_i_; +text: .text%__1cMGraphBuilderWrecursive_inline_level6kMpnIciMethod__i_; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cJNullCheckFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cJNullCheckIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cKObjectTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_ValueStack.o; +text: .text%__1cMGraphBuilderKpush_scope6MpnIciMethod_pnKBlockBegin_i_v_; +text: .text%__1cKValueStackKpush_scope6MpnHIRScope__p0_; +text: .text%__1cOExceptionScopeKpush_scope6M_p0_; +text: .text%__1cOExceptionScope2t6Mp0_v_; +text: .text%__1cHIRScopeXcompute_lock_stack_size6M_v_; +text: .text%__1cMGraphBuilderJScopeDataRcaller_stack_size6kM_i_; +text: .text%__1cMGraphBuilderJScopeDataLnum_returns6M_i_; +text: .text%__1cMGraphBuilderJScopeDataXset_inline_cleanup_info6MpnKBlockBegin_pnLInstruction_pnKValueStack__v_; +text: .text%__1cMGraphBuilderJScopeDataQincr_num_returns6M_v_; +text: .text%__1cKValueStackJpop_scope6Mii_p0_; +text: .text%__1cMGraphBuilderJpop_scope6M_v_; +text: .text%__1cMGraphBuilderTpop_exception_scope6M_v_; +text: .text%__1cOExceptionScopeJpop_scope6M_p0_; +text: .text%__1cLCompilationVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cMGraphBuilderOinline_bailout6Mpkc_v_; +text: .text%__1cLInstructionEprev6MpnKBlockBegin__p0_; +text: .text%__1cKBlockBeginUresolve_substitution6M_v_; +text: .text%__1cKBlockBeginPblock_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cZresolve_substituted_value6FppnLInstruction__v_: c1_Instruction.o; +text: .text%__1cLInstructionFsubst6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_GraphBuilder.o; +text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_GraphBuilder.o; +text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_Instruction.o; +text: .text%__1cIConstantPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cIBlockEndPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cHIntTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cJNullCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cTNullCheckEliminatorQhandle_NullCheck6MpnJNullCheck__v_; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_GraphBuilder.o; +text: .text%__1cHIRScopeNtop_scope_bci6kM_i_; +text: .text%__1cQUseCountComputerPclear_use_count6FpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cIValueGenMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cJNullCheckKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterKnull_check6MpnLLIR_OprDesc_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenQlock_spill_rinfo6MpnLInstruction_nFRInfo__v_; +text: .text%__1cQIRScopeDebugInfoRrecord_debug_info6MpnYDebugInformationRecorder__v_: c1_LIREmitter.o; +text: .text%__1cIRuntime1Yresolve_opt_virtual_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cFframeZsender_for_compiled_frame6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cNSharedRuntimeQfind_callee_info6FpnKJavaThread_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cLPcDescCacheKpc_desc_at6kMpnHnmethod_pCi_pnGPcDesc__; +text: .text%__1cLPcDescCacheLadd_pc_desc6MpnGPcDesc__v_; +text: .text%__1cSvframeStreamCommonYfill_from_compiled_frame6MpnHnmethod_i_v_; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: vframe.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cXjvm_define_class_common6FpnHJNIEnv__pkcpnI_jobject_pkWi53pnGThread__pnH_jclass__: jvm.o; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cEhash6Fpkc1_I_; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cPDictionaryEntrybAcontains_protection_domain6kMpnHoopDesc__i_; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cPDictionaryEntryVadd_protection_domain6MpnHoopDesc__v_; +text: .text%__1cUverify_byte_codes_fn6F_pv_: verifier.o; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%JVM_GetClassCPTypes; +text: .text%JVM_GetClassNameUTF; +text: .text%JVM_ReleaseUTF; +text: .text%JVM_FindClassFromClass; +text: .text%jni_IsSameObject: jni.o; +text: .text%JVM_GetClassFieldsCount; +text: .text%JVM_GetClassMethodsCount; +text: .text%JVM_GetMethodIxModifiers; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%jni_NewLocalRef: jni.o; +text: .text%JVM_GetCPMethodModifiers; +text: .text%JVM_IsConstructorIx; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cVLoaderConstraintTableJnew_entry6MIpnNsymbolOopDesc_pnMklassOopDesc_ii_pnVLoaderConstraintEntry__; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: reflection.o; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%jni_CallIntMethod: jni.o; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%jni_DetachCurrentThread; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cFMutex2T6M_v_; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cUThreadSafepointState2T6M_v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%jni_DestroyJavaVM; +text: .text%jni_AttachCurrentThread; +text: .text%attach_current_thread: jni.o; +text: .text%__1cCosWcreate_attached_thread6FpnGThread__i_; +text: .text%__1cKJavaThreadSallocate_threadObj6MnGHandle_pcipnGThread__v_; +text: .text%__1cHThreadsKdestroy_vm6F_i_; +text: .text%__1cKJavaThreadVinvoke_shutdown_hooks6M_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cMPeriodicTaskLis_enrolled6kM_i_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cQprint_statistics6F_v_; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cIVMThreadXwait_for_vm_thread_exit6F_v_; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cKcopy_table6FppC1i_v_: interpreter.o; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cCosRcurrent_thread_id6F_i_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cUThreadSafepointStateMroll_forward6Mn0AMsuspend_type_pnHnmethod_i_v_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cUSafepointSynchronizeQdo_cleanup_tasks6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cNObjectMonitorHis_busy6kM_i_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_; +text: .text%__1cURecompilationMonitorbFstop_recompilation_monitor_task6F_v_; +text: .text%__1cIVMThreadHdestroy6F_v_; +text: .text%__SLIP.DELETER__A: vmThread.o; +text: .text%__1cSThreadLocalStorageRpd_invalidate_all6F_v_; +text: .text%__1cHVM_ExitNset_vm_exited6F_i_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cQVerificationTypeIfinalize6F_v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cIPerfData2T6M_v_; +text: .text%__1cKPerfMemoryHdestroy6F_v_; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cUdelete_shared_memory6FpcI_v_: perfMemory_solaris.o; +text: .text%__1cLremove_file6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cMostream_exit6F_v_; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__SLIP.FINAL__A: c1_Items.o; +# Test Exit +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%JVM_Halt; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cQVMOperationQdDueueDadd6MpnMVM_Operation__i_; +text: .text%__1cQVMOperationQdDueueOqueue_add_back6MipnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueGinsert6MpnMVM_Operation_2_v_; +text: .text%__1cQVMOperationQdDueueGunlink6MpnMVM_Operation__v_; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cIVMThreadSevaluate_operation6MpnMVM_Operation__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +# Test Hello +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%JVM_Write; +text: .text%__1cDhpiFwrite6FipkvI_I_: jvm.o; +# Test Sleep +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%JVM_GetCPClassNameUTF; +text: .text%JVM_Sleep; +text: .text%__1cCosHSolarisTsetup_interruptible6F_pnKJavaThread__; +text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cCosHSolarisVcleanup_interruptible6FpnKJavaThread__v_; +text: .text%__1cCosOunguard_memory6FpcI_i_; +# Test IntToString +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +text: .text%__1cJChunkPoolMfree_all_but6MI_v_: allocation.o; +# Test LoadToolkit +text: .text%JVM_GetClassContext; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: jvm.o; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cMGraphBuilderNload_constant6M_v_; +text: .text%__1cQciBytecodeStreamMget_constant6kM_nKciConstant__; +text: .text%__1cQciBytecodeStreamSget_constant_index6kM_i_; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Canonicalizer.o; +text: .text%__1cTsort_by_start_block6FppnELoop_2_i_: c1_Loops.o; +text: .text%__1cILIR_ListLcall_static6MpnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterLcmp_mem_int6MnMLIR_OpBranchNLIR_Condition_nFRInfo_iipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLcmp_mem_int6MnMLIR_OpBranchNLIR_Condition_nFRInfo_iipnMCodeEmitInfo__v_; +text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_Canonicalizer.o; +text: .text%__1cILIR_ListHint2reg6MinFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_Optimizer.o; +text: .text%__1cEIfOpPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cEIfOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorHdo_IfOp6MpnEIfOp__v_; +text: .text%__1cIValueGenHdo_IfOp6MpnEIfOp__v_; +text: .text%__1cLLIR_EmitterLifop_phase16MnLInstructionJCondition_pnLLIR_OprDesc_4_v_; +text: .text%__1cLLIR_EmitterLifop_phase26MnFRInfo_pnLLIR_OprDesc_3nLInstructionJCondition__v_; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnFLabel__v_; +text: .text%__1cRLIR_PeepholeStateUstart_forward_branch6MpnFLabel__v_; +text: .text%__1cOGenerateOopMapMdo_checkcast6M_v_; +text: .text%__1cMGraphBuilderLinstance_of6Mi_v_; +text: .text%__1cKInstanceOfFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cJTypeCheckIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderOdirect_compare6MpnHciKlass__i_; +text: .text%__1cKInstanceOfNas_InstanceOf6M_p0_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderKcheck_cast6Mi_v_; +text: .text%__1cJCheckCastFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cJValueTypeKas_IntType6M_pnHIntType__: c1_ValueType.o; +text: .text%__1cJTypeCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cQNullCheckVisitorMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cIValueGenNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator_x86.o; +text: .text%__1cLLIR_EmitterNinstanceof_op6MpnLLIR_OprDesc_2pnHciKlass_nFRInfo_5ipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListKinstanceof6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo__v_; +text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3pnHciKlass_33ipnMCodeEmitInfo_7pnICodeStub__v_; +text: .text%__1cIValueGenMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cILIR_ListJcheckcast6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo_6pnICodeStub__v_; +text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o; +text: .text%__1cPLIR_OpTypeCheckFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cPLIR_OpTypeCheckJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIROptimizer.o; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIROptimizer.o; +text: .text%__1cNLIR_AssemblerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cJAssemblerEcmpl6MnHAddress_pnI_jobject__v_; +text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cTSimpleExceptionStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cJLoadFieldIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cJLoadFieldMas_LoadField6M_p0_: c1_Instruction.o; +text: .text%__1cDPhiPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cDPhiFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorGdo_Phi6MpnDPhi__v_; +text: .text%__1cLInstructionIas_Local6M_pnFLocal__: c1_GraphBuilder.o; +text: .text%__1cDPhiGas_Phi6M_p0_: c1_GraphBuilder.o; +text: .text%__1cIValueGenScompute_phi_arrays6MpnKValueStack_pnGValues_pnIintStack_i_pnLInstruction__; +text: .text%__1cLLIR_EmitterTset_fpu_stack_empty6M_v_; +text: .text%__1cIRegAllocKlock_spill6MpnLInstruction_ii_v_; +text: .text%__1cIRegAllocRextend_spill_area6Mi_v_; +text: .text%__1cRclear_state_items6FppnLInstruction__v_: c1_CodeGenerator.o; +text: .text%__1cNLIR_AssemblerTset_fpu_stack_empty6M_v_; +text: .text%__1cIFrameMapLFpuStackSimFclear6M_v_; +text: .text%jni_GetEnv; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cVcreate_gc_point_array6FpnFArena_i_pnNGrowableArray4Ci___; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cOGenerateOopMapIppop_any6Mi_v_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: c1_IR.o; +text: .text%__1cMGraphBuilderQhandle_exception6Mi_v_; +text: .text%__1cOExceptionScopeFclear6M_v_; +text: .text%__1cMGraphBuilderJScopeDataJxhandlers6kM_pnJXHandlers__; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cTciConstantPoolCacheEfind6Mi_i_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cMGraphBuilderHif_null6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cOObjectConstantRas_ObjectConstant6M_p0_: c1_ValueType.o; +text: .text%__1cMas_ValueType6FnKciConstant__pnJValueType__; +text: .text%__1cLInstructionGmirror6Fn0AJCondition__1_; +text: .text%__1cHis_true6FxnLInstructionJCondition_x_i_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerNset_canonical6MpnLInstruction__v_; +text: .text%__1cKBlockBeginVadd_exception_handler6Mp0_v_; +text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Instruction.o; +text: .text%__1cOExceptionScopeLadd_handler6MpnIXHandler__v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cPciObjectFactoryPinsert_non_perm6Mrpn0ANNonPermObject_pnHoopDesc_pnIciObject__v_; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_ValueType.o; +text: .text%__1cNClassConstantQas_ClassConstant6M_p0_: c1_ValueType.o; +text: .text%__1cOExceptionScopeKhandler_at6kMi_pnIXHandler__; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderIlogic_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cHLogicOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cHLogicOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cHLogicOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderHconvert6MnJBytecodesECode_nJBasicType_3_v_; +text: .text%__1cHConvertFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerKdo_Convert6MpnHConvert__v_; +text: .text%__1cHConvertEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cHConvertEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderNstore_indexed6MnJBasicType__v_; +text: .text%__1cIValueMapKkill_array6MpnJValueType__v_; +text: .text%__1cGBucketKkill_array6MpnJValueType__v_; +text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_Instruction.o; +text: .text%__1cKValueStackRpin_stack_indexed6MpnJValueType__v_; +text: .text%__1cMStoreIndexedFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLAccessFieldPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_Instruction.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cMStoreIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cHConvertPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cQNullCheckVisitorKdo_Convert6MpnHConvert__v_; +text: .text%__1cQNullCheckVisitorPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cTNullCheckEliminatorThandle_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cMciNullObjectMis_classless6kM_i_: ciNullObject.o; +text: .text%__1cJValueTypeQas_ClassConstant6M_pnNClassConstant__: c1_ValueType.o; +text: .text%__1cOObjectConstantIencoding6kM_pnI_jobject__; +text: .text%__1cIValueGenbBrlock_byte_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cNc1_AllocTableThas_one_free_masked6kMnKc1_RegMask__i_; +text: .text%__1cIRegAllocMget_lock_reg6MpnLInstruction_nKc1_RegMask__nFRInfo__; +text: .text%__1cIRegAllocMget_free_reg6MnKc1_RegMask__nFRInfo__; +text: .text%__1cNc1_AllocTablePget_free_masked6MnKc1_RegMask__i_; +text: .text%__1cNClassConstantIencoding6kM_pnI_jobject__; +text: .text%__1cLLIR_EmitterLopr2jobject6MpnLLIR_OprDesc__pnI_jobject__; +text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenMrelease_item6MpnEItem__v_; +text: .text%__1cIValueGenPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cIValueGenKdo_Convert6MpnHConvert__v_; +text: .text%__1cIValueGenKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cLLIR_EmitterIlogic_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_5_v_; +text: .text%__1cILIR_ListLlogical_and6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterKconvert_op6MnJBytecodesECode_pnLLIR_OprDesc_nFRInfo_i_v_; +text: .text%__1cILIR_ListHconvert6MnJBytecodesECode_pnLLIR_OprDesc_4i_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenKmust_round6MpnLInstruction_pknEItem__i_; +text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterNindexed_store6MnJBasicType_pnLLIR_OprDesc_33nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterXlo_word_offset_in_bytes6kM_i_; +text: .text%__1cLLIR_EmitterXhi_word_offset_in_bytes6kM_i_; +text: .text%__1cILIR_ListLstore_array6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenXexception_handler_start6MpnHIRScope_ipnKValueStack__v_; +text: .text%__1cLLIR_EmitterNhandler_entry6M_v_; +text: .text%__1cLLIR_OprFactQdummy_value_type6FpnJValueType__pnLLIR_OprDesc__; +text: .text%__1cLInstructionKexact_type6kM_pnGciType__: c1_GraphBuilder.o; +text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_GraphBuilder.o; +text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cNLIR_OpConvertJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_opConvert6MpnNLIR_OpConvert__v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_pnI_jobject__v_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_pnI_jobject__v_; +text: .text%__1cNLIR_AssemblerIlogic_op6MnILIR_Code_pnLLIR_OprDesc_33_v_; +text: .text%__1cNLIR_AssemblerOemit_opConvert6MpnNLIR_OpConvert__v_; +text: .text%__1cNLIR_AssemblerNarray_move_op6MpnLLIR_OprDesc_2nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerJreg2array6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerPas_ArrayAddress6MpnLLIR_Address_nJBasicType__nHAddress__; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cUDebugInfoWriteStreamMwrite_handle6MpnI_jobject__v_; +text: .text%__1cTExceptionRangeTableJadd_entry6Miiiiii_v_; +text: .text%__1cTExceptionRangeEntry2t6Miiiiii_v_; +text: .text%__1cTExceptionRangeTableJadd_entry6MnTExceptionRangeEntry__v_; +text: .text%__1cOExceptionScopeCid6kM_i_; +text: .text%__1cTExceptionRangeTableTentry_index_for_pco6kMi_i_; +text: .text%__1cTExceptionRangeTableIentry_at6kMi_pnTExceptionRangeEntry__; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%JVM_GetLastErrorString; +text: .text%jni_Throw: jni.o; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%JVM_DisableCompiler; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%JVM_Available; +text: .text%__1cOGenerateOopMapKpp_new_ref6MpnNCellTypeState_i_v_; +text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_Instruction.o; +text: .text%__1cHLogicOpOis_commutative6kM_i_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cLArrayLengthFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cLArrayLengthEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLArrayLengthEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderOnew_type_array6M_v_; +text: .text%__1cMNewTypeArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cINewArrayIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cJIntrinsicFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cJIntrinsicMas_Intrinsic6M_p0_: c1_GraphBuilder.o; +text: .text%__1cJIntrinsicIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLAccessArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cTNullCheckEliminatorShandle_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cINewArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cTNullCheckEliminatorPhandle_NewArray6MpnINewArray__v_; +text: .text%__1cJIntrinsicPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cJLoopArrayIindex_of6kMkpnELoop__i_: c1_Loops.o; +text: .text%__1cINewArrayLas_NewArray6M_p0_: c1_Instruction.o; +text: .text%__1cILIR_ListOcall_icvirtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListNstore_mem_int6MinFRInfo_inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cLLIR_EmitterMarray_length6MnFRInfo_pnLLIR_OprDesc_pnMCodeEmitInfo__v_; +text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter.o; +text: .text%__1cIValueGenPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cLLIR_EmitterOnew_type_array6MnFRInfo_nJBasicType_pnLLIR_OprDesc_11111pnMCodeEmitInfo__v_; +text: .text%__1cQNewTypeArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_; +text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_x86.o; +text: .text%__1cILIR_ListOallocate_array6MnFRInfo_11111nJBasicType_1pnICodeStub__v_; +text: .text%__1cIValueGenMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cIValueGenMdo_ArrayCopy6MpnJIntrinsic__v_; +text: .text%__1cIValueGenQarraycopy_helper6MpnJIntrinsic_pippnMciArrayKlass__v_; +text: .text%__1cJLoadFieldKexact_type6kM_pnGciType__; +text: .text%__1cJLoadFieldNdeclared_type6kM_pnGciType__; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cOas_array_klass6FpnGciType__pnMciArrayKlass__: c1_CodeGenerator.o; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cMNewTypeArrayKexact_type6kM_pnGciType__; +text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_Instruction.o; +text: .text%__1cRpositive_constant6FpnLInstruction__i_: c1_CodeGenerator.o; +text: .text%__1cLArrayLengthOas_ArrayLength6M_p0_: c1_GraphBuilder.o; +text: .text%__1cQis_constant_zero6FpnLInstruction__i_: c1_CodeGenerator.o; +text: .text%__1cILIR_ListJarraycopy6MpnLLIR_OprDesc_22222pnMciArrayKlass_ipnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o; +text: .text%__1cLLIR_EmitterNwrite_barrier6MpnLLIR_OprDesc_2_v_; +text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_x86.o; +text: .text%__1cILIR_ListUunsigned_shift_right6MpnLLIR_OprDesc_222_v_; +text: .text%__1cQLIR_OpAllocArrayFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cPLIR_OpArrayCopyFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQLIR_OpAllocArrayJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerQemit_alloc_array6MpnQLIR_OpAllocArray__v_; +text: .text%__1cPLIR_OpArrayCopyJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_; +text: .text%__1cNLIR_AssemblerHic_call6MpCpnMCodeEmitInfo__v_; +text: .text%__1cJAssemblerEcall6MpCrknQRelocationHolder__v_; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cNLIR_AssemblerJconst2mem6MpnJLIR_Const_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerQemit_alloc_array6MpnQLIR_OpAllocArray__v_; +text: .text%__1cNLIR_AssemblerSarray_element_size6kMnJBasicType__nHAddressLScaleFactor__; +text: .text%__1cRC1_MacroAssemblerOallocate_array6MpnMRegisterImpl_222inHAddressLScaleFactor_2rnFLabel__v_; +text: .text%__1cRC1_MacroAssemblerMtry_allocate6MpnMRegisterImpl_2i22rnFLabel__v_; +text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_; +text: .text%__1cMciArrayKlassMelement_type6M_pnGciType__; +text: .text%__1cNArrayCopyStub2t6MpnMCodeEmitInfo_pnOStaticCallStub__v_; +text: .text%__1cFRInfoMset_word_reg6MkpnMRegisterImpl__v_; +text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOpush_parameter6MpnMRegisterImpl_i_v_; +text: .text%__1cQNewTypeArrayStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNArrayCopyStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cIRuntime1Uresolve_virtual_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: compiledICHolderKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: compiledICHolderKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: compiledICHolderKlass.o; +text: .text%__1cXvirtual_call_RelocationJfirst_oop6M_pC_; +text: .text%__1cXvirtual_call_RelocationJoop_limit6M_pC_; +text: .text%__1cNRelocIteratorJset_limit6MpC_v_; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cGICStubIset_stub6MpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cRInlineCacheBufferLnew_ic_stub6F_pnGICStub__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%JVM_NewArray; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cQSimpleCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cICompilerMsupports_osr6M_i_: c1_Compiler.o; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cMGraphBuilderQnew_object_array6M_v_; +text: .text%__1cONewObjectArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cMGraphBuilderIshift_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cHShiftOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cHShiftOpEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cHShiftOpEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cLLoadIndexedOas_LoadIndexed6M_p0_: c1_Instruction.o; +text: .text%__1cMArithmeticOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cDOp2Gas_Op26M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cHciKlassMaccess_flags6M_i_; +text: .text%__1cILIR_ListPallocate_object6MnFRInfo_111ii1pnICodeStub__v_; +text: .text%__1cLLIR_EmitterOmembar_release6M_v_; +text: .text%__1cLLIR_EmitterGmembar6M_v_; +text: .text%__1cIValueGenRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cLLIR_EmitterQnew_object_array6MnFRInfo_pnHciKlass_pnLLIR_OprDesc_11111pnMCodeEmitInfo_7_v_; +text: .text%__1cSNewObjectArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cLLIR_EmitterOmembar_acquire6M_v_; +text: .text%__1cIValueGenKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cIValueGenPshiftCountRInfo6F_nFRInfo__; +text: .text%__1cLLIR_EmitterIshift_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_53_v_; +text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListKlogical_or6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cOLIR_OpAllocObjFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cOLIR_OpAllocObjJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; +text: .text%__1cNLIR_AssemblerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; +text: .text%__1cRC1_MacroAssemblerPallocate_object6MpnMRegisterImpl_22ii2rnFLabel__v_; +text: .text%__1cNLIR_AssemblerOmembar_release6M_v_; +text: .text%__1cNLIR_AssemblerGmembar6M_v_; +text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerOmembar_acquire6M_v_; +text: .text%__1cEBaseHas_Base6M_p0_: c1_IR.o; +text: .text%__1cNLIR_AssemblerOemit_osr_entry6MpnHIRScope_ipnFLabel_i_v_; +text: .text%__1cSNewObjectArrayStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%jni_MonitorExit: jni.o; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%jni_CallStaticBooleanMethodV: jni.o; +text: .text%JVM_GetStackTraceDepth; +text: .text%__1cTjava_lang_ThrowableVget_stack_trace_depth6FpnHoopDesc_pnGThread__i_; +text: .text%__1cTjava_lang_ThrowableJbacktrace6FpnHoopDesc__2_; +text: .text%JVM_GetStackTraceElement; +text: .text%__1cTjava_lang_ThrowableXget_stack_trace_element6FpnHoopDesc_ipnGThread__2_; +text: .text%__1cbBjava_lang_StackTraceElementGcreate6FnMmethodHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cbBjava_lang_StackTraceElementNset_className6FpnHoopDesc_2_v_; +text: .text%__1cbBjava_lang_StackTraceElementOset_methodName6FpnHoopDesc_2_v_; +text: .text%__1cbBjava_lang_StackTraceElementMset_fileName6FpnHoopDesc_2_v_; +text: .text%__1cNmethodOopDescUline_number_from_bci6kMi_i_; +text: .text%__1cbECompressedLineNumberReadStream2t6MpC_v_; +text: .text%__1cbECompressedLineNumberReadStreamJread_pair6M_i_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: methodOop.o; +text: .text%__1cbBjava_lang_StackTraceElementOset_lineNumber6FpnHoopDesc_i_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cNObjectMonitorREntryQdDueue_insert6MpnMObjectWaiter_i_v_; +text: .text%__1cNObjectMonitorbAEntryQdDueue_SelectSuccessor6M_pnMObjectWaiter__; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%JVM_EnableCompiler; +text: .text%__1cCosHSolarisFEventEpark6Mx_i_: objectMonitor_solaris.o; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cJStubQdDueueMremove_first6Mi_v_; +text: .text%__1cJStubQdDueueMremove_first6M_v_; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cGICStubIfinalize6M_v_; +text: .text%__1cGICStubKcached_oop6kM_pnHoopDesc__; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cGICStubLdestination6kM_pC_; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cUThreadSafepointStateHrestart6M_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +# Test LoadFrame +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cICompilerPsupports_native6M_i_: c1_Compiler.o; +text: .text%__1cLCompilationVcompile_native_method6MpnLCodeOffsets__i_; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cLCompilationUemit_code_for_native6MpCpnLCodeOffsets__v_; +text: .text%__1cLCompilationXemit_code_prolog_native6MpnIFrameMap__v_; +text: .text%__1cNLIR_AssemblerRemit_method_entry6MpnLLIR_Emitter_pnHIRScope__v_; +text: .text%__1cOMacroAssemblerHfat_nop6M_v_; +text: .text%__1cNLIR_AssemblerQemit_native_call6MpCpnMCodeEmitInfo__v_; +text: .text%__1cMCodeEmitInfobGcreate_oop_map_for_own_signature6M_pnGOopMap__; +text: .text%__1cNLIR_AssemblerXemit_native_method_exit6MpnMCodeEmitInfo__v_; +text: .text%__1cNSignatureInfoHdo_char6M_v_: reflection.o; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: reflection.o; +text: .text%jni_CallObjectMethodV: jni.o; +text: .text%jni_SetObjectField: jni.o; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cbCTwoGenerationCollectorPolicybMshould_try_older_generation_allocation6kMI_i_; +text: .text%__1cQGenCollectedHeapSattempt_allocation6MIiii_pnIHeapWord__; +text: .text%__1cQDefNewGenerationIallocate6MIii_pnIHeapWord__: defNewGeneration.o; +text: .text%__1cKGenerationInext_gen6kM_p0_; +text: .text%__1cKGenerationYallocation_limit_reached6MpnFSpace_pnIHeapWord_I_4_: tenuredGeneration.o; +text: .text%__1cQDefNewGenerationTallocate_from_space6MI_pnIHeapWord__; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cPVM_GC_OperationZacquire_pending_list_lock6M_v_; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cbAVM_GenCollectForAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cbAVM_GenCollectForAllocationEdoit6M_v_; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cQGenCollectedHeapZsatisfy_failed_allocation6MIiipi_pnIHeapWord__; +text: .text%__1cbCTwoGenerationCollectorPolicyZsatisfy_failed_allocation6MIiipi_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapNdo_collection6MiiIiiipi_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cNMemoryServiceIgc_begin6Fi_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cKGCStatInfoMset_gc_usage6MinLMemoryUsage_i_v_; +text: .text%__1cTContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cTContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cbBSurvivorContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cbBSurvivorContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cOGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cOGenerationPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cQGenCollectedHeapLgc_prologue6Mi_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cWThreadLocalAllocBufferVaccumulate_statistics6MIi_v_; +text: .text%__1cPGlobalTLABStatsHpublish6M_v_; +text: .text%__1cQGenCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cQGenCollectedHeapSgeneration_iterate6Mpn0AKGenClosure_i_v_; +text: .text%__1cbCGenEnsureParseabilityClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: defNewGeneration.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: tenuredGeneration.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: compactingPermGenGen.o; +text: .text%__1cSAllocationProfilerViterate_since_last_gc6F_v_; +text: .text%__1cUGenGCPrologueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cQDefNewGenerationLgc_prologue6Mi_v_: defNewGeneration.o; +text: .text%__1cRTenuredGenerationLgc_prologue6Mi_v_; +text: .text%__1cKGenerationLgc_prologue6Mi_v_: compactingPermGenGen.o; +text: .text%__1cKGenerationOshould_collect6MiIii_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationKshort_name6kM_pkc_: defNewGeneration.o; +text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: defNewGeneration.o; +text: .text%__1cQGenCollectedHeapKsave_marks6M_v_; +text: .text%__1cQDefNewGenerationKsave_marks6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationKsave_marks6M_v_; +text: .text%__1cQDefNewGenerationHcollect6MiiIii_v_; +text: .text%__1cQDefNewGenerationbAcollection_attempt_is_safe6M_i_; +text: .text%__1cRTenuredGenerationZpromotion_attempt_is_safe6kMIi_i_; +text: .text%__1cKGenerationYmax_contiguous_available6kM_I_; +text: .text%__1cbCOneContigSpaceCardGenerationUcontiguous_available6kM_I_; +text: .text%__1cQDefNewGenerationbIinit_assuming_no_promotion_failure6M_v_; +text: .text%__1cQDefNewGenerationOIsAliveClosure2t6MpnKGeneration__v_; +text: .text%__1cSScanWeakRefClosure2t6MpnQDefNewGeneration__v_; +text: .text%__1cLCardTableRSbGprepare_for_younger_refs_iterate6Mi_v_; +text: .text%__1cULRUCurrentHeapPolicy2t6M_v_; +text: .text%__1cPCollectorPolicyPis_train_policy6M_i_: collectorPolicy.o; +text: .text%__1cPFastScanClosure2t6MpnQDefNewGeneration_i_v_; +text: .text%__1cQDefNewGenerationbCFastEvacuateFollowersClosure2t6MpnQGenCollectedHeap_ip0pnPFastScanClosure_6_v_; +text: .text%__1cQGenCollectedHeapUprocess_strong_roots6Miiin0ATClassScanningOption_pnQOopsInGenClosure_3_v_; +text: .text%__1cKSharedHeapbAchange_strong_roots_parity6M_v_; +text: .text%__1cMSubTasksDonePis_task_claimed6Mi_i_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cPFastScanClosureGdo_oop6MppnHoopDesc__v_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationWcopy_to_survivor_space6MpnHoopDesc_p2_2_; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cNchunk_oops_do6FpnKOopClosure_pnFChunk_pc_I_: handles.o; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cFframebDinterpreter_frame_monitor_end6kM_pnPBasicObjectLock__; +text: .text%__1cFframebFinterpreter_frame_monitor_begin6kM_pnPBasicObjectLock__; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cRInterpreterOopMapKinitialize6M_v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cLOopMapCacheIentry_at6kMi_pnQOopMapCacheEntry__; +text: .text%__1cRInterpreterOopMapIis_empty6M_i_; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cQOopMapCacheEntryFflush6M_v_; +text: .text%__1cQOopMapCacheEntryTdeallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cQOopMapCacheEntryRallocate_bit_mask6M_v_; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cTOopMapForCacheEntry2t6MnMmethodHandle_ipnQOopMapCacheEntry__v_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%__1cFframebHnext_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: oopMapCache.o; +text: .text%__1cFframebDoops_interpreted_arguments_do6MnMsymbolHandle_ipnKOopClosure__v_; +text: .text%__1cRArgumentOopFinderDset6MinJBasicType__v_: frame.o; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueHoops_do6MpnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueNqueue_oops_do6MipnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollectorXoops_do_for_all_threads6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cPDictionaryEntrybDprotection_domain_set_oops_do6MpnKOopClosure__v_: dictionary.o; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cUCompactingPermGenGenUyounger_refs_iterate6MpnQOopsInGenClosure__v_; +text: .text%__1cbCOneContigSpaceCardGenerationUyounger_refs_iterate6MpnQOopsInGenClosure__v_; +text: .text%__1cKGenerationbDyounger_refs_in_space_iterate6MpnFSpace_pnQOopsInGenClosure__v_; +text: .text%__1cLCardTableRSbDyounger_refs_in_space_iterate6MpnFSpace_pnQOopsInGenClosure__v_; +text: .text%__1cPContiguousSpaceLnew_dcto_cl6MpnKOopClosure_nRCardTableModRefBSOPrecisionStyle_pnIHeapWord__pnVDirtyCardToOopClosure__; +text: .text%__1cPContiguousSpaceZused_region_at_save_marks6kM_nJMemRegion__: space.o; +text: .text%__1cRCardTableModRefBSWnon_clean_card_iterate6MpnFSpace_nJMemRegion_pnVDirtyCardToOopClosure_pnQMemRegionClosure_i_v_; +text: .text%__1cRCardTableModRefBSbBnon_clean_card_iterate_work6MnJMemRegion_pnQMemRegionClosure_i_v_; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cYClearNoncleanCardWrapperMdo_MemRegion6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cYClearNoncleanCardWrapperKclear_card6MpW_i_: cardTableRS.o; +text: .text%__1cVDirtyCardToOopClosureMdo_MemRegion6MnJMemRegion__v_; +text: .text%__1cWOffsetTableContigSpaceLblock_start6kMpkv_pnIHeapWord__: space.o; +text: .text%__1cbBBlockOffsetArrayContigSpaceSblock_start_unsafe6kMpkv_pnIHeapWord__; +text: .text%__1cPContiguousSpaceKblock_size6kMpknIHeapWord__I_; +text: .text%__1cUContiguousSpaceDCTOCOget_actual_top6MpnIHeapWord_2_2_; +text: .text%__1cPContiguousSpaceRtoContiguousSpace6M_p0_: space.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cPFiltering_DCTOCPwalk_mem_region6MnJMemRegion_pnIHeapWord_3_v_; +text: .text%__1cUContiguousSpaceDCTOCXwalk_mem_region_with_cl6MnJMemRegion_pnIHeapWord_3pnQFilteringClosure__v_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: methodKlass.o; +text: .text%__1cLmethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure__v_; +text: .text%__1cLklassVtablePoop_oop_iterate6MpnKOopClosure__v_; +text: .text%__1cQFilteringClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cLklassItablePoop_oop_iterate6MpnKOopClosure__v_; +text: .text%__1cKklassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cKOopClosureXshould_remember_klasses6kM_ki_: space.o; +text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cWConstantPoolCacheEntryLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cParrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cNinstanceKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cNobjArrayKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: methodKlass.o; +text: .text%__1cLmethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: symbolKlass.o; +text: .text%__1cLsymbolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: symbolKlass.o; +text: .text%__1cLsymbolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cWConstantPoolCacheEntryNoop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cLklassVtableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cLklassItableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cKklassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cKOopClosureIdo_oop_v6MppnHoopDesc__v_: space.o; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: klassKlass.o; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: klassKlass.o; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: arrayKlassKlass.o; +text: .text%__1cLCardTableRSUyounger_refs_iterate6MpnKGeneration_pnQOopsInGenClosure__v_; +text: .text%__1cMSubTasksDoneTall_tasks_completed6M_v_; +text: .text%__1cQDefNewGenerationbCFastEvacuateFollowersClosureHdo_void6M_v_; +text: .text%__1cQGenCollectedHeapbCoop_since_save_marks_iterate6MipnPFastScanClosure_2_v_; +text: .text%__1cQDefNewGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cPContiguousSpacebFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_: typeArrayKlass.o; +text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cKGenerationHpromote6MpnHoopDesc_Ip2_2_; +text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cQGenCollectedHeapbAno_allocs_since_save_marks6Mi_i_; +text: .text%__1cQDefNewGenerationbAno_allocs_since_save_marks6M_i_; +text: .text%__1cbCOneContigSpaceCardGenerationbAno_allocs_since_save_marks6M_i_; +text: .text%__1cQDefNewGenerationUFastKeepAliveClosure2t6Mp0pnSScanWeakRefClosure__v_; +text: .text%__1cQDefNewGenerationQKeepAliveClosure2t6MpnSScanWeakRefClosure__v_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cSReferenceProcessorOprocess_phase16MppnHoopDesc_pnPReferencePolicy_pnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cQDefNewGenerationOIsAliveClosureLdo_object_b6MpnHoopDesc__i_; +text: .text%__1cULRUCurrentHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cQDefNewGenerationUFastKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cSReferenceProcessorOprocess_phase26MppnHoopDesc_pnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSReferenceProcessorOprocess_phase36MppnHoopDesc_ipnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cOJNIHandleBlockMweak_oops_do6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cQDefNewGenerationLswap_spaces6M_v_; +text: .text%__1cIageTablebAcompute_tenuring_threshold6MI_i_; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: defNewGeneration.o; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cQGenCollectedHeapPupdate_gc_stats6Mii_v_: genCollectedHeap.o; +text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: defNewGeneration.o; +text: .text%__1cRTenuredGenerationPupdate_gc_stats6Mii_v_; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: compactingPermGenGen.o; +text: .text%__1cRTenuredGenerationOshould_collect6MiIii_i_; +text: .text%__1cKGenerationPshould_allocate6MIii_i_: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationEfree6kM_I_; +text: .text%__1cQDefNewGenerationQcompute_new_size6M_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cQGenCollectedHeapLgc_epilogue6Mi_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cWThreadLocalAllocBufferGresize6M_v_; +text: .text%__1cUGenGCEpilogueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cQDefNewGenerationLgc_epilogue6Mi_v_; +text: .text%__1cRTenuredGenerationLgc_epilogue6Mi_v_; +text: .text%__1cbCOneContigSpaceCardGenerationLgc_epilogue6Mi_v_; +text: .text%__1cRTenuredGenerationPupdate_counters6M_v_; +text: .text%__1cUCompactingPermGenGenPupdate_counters6M_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cNMemoryServiceGgc_end6Fi_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%jni_GetIntArrayRegion: jni.o; +text: .text%jni_SetIntArrayRegion: jni.o; +text: .text%jni_PushLocalFrame: jni.o; +text: .text%jni_PopLocalFrame: jni.o; +text: .text%__1cMGraphBuilderJnegate_op6MpnJValueType__v_; +text: .text%__1cINegateOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cPciInstanceKlassLimplementor6M_p0_; +text: .text%__1cINegateOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cIValueGenJspill_one6MpnJValueType__v_; +text: .text%__1cIRegAllocbBget_smallest_value_to_spill6kMpnJValueType__pnLInstruction__; +text: .text%__1cLLIR_EmitterRarray_store_check6MpnLLIR_OprDesc_2nFRInfo_33pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLstore_check6MpnLLIR_OprDesc_2222pnMCodeEmitInfo__v_; +text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3333pnMCodeEmitInfo__v_; +text: .text%__1cXArrayStoreExceptionStub2t6MpnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLshift_right6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListLshift_right6MpnLLIR_OprDesc_222_v_; +text: .text%__1cIValueGenLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cLLIR_EmitterGnegate6MnFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListGnegate6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNLIR_AssemblerEleal6MpnLLIR_OprDesc_2_v_; +text: .text%__1cNLIR_AssemblerGnegate6MpnLLIR_OprDesc_2_v_; +text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler_x86.o; +text: .text%__1cXArrayStoreExceptionStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cIRuntime1Tresolve_static_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cIRuntime1Thandle_wrong_method6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cNRelocIteratorEnext6M_i_: sharedRuntime.o; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cGICStubFclear6M_v_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cIRuntime1Jarraycopy6FpnHoopDesc_i2ii_i_; +text: .text%__1cMGraphBuilderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%jni_CallIntMethodV: jni.o; +text: .text%Unsafe_GetObject; +text: .text%jni_CallBooleanMethod: jni.o; +text: .text%jni_CallVoidMethodV: jni.o; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%JVM_InvokeMethod; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%JVM_IsInterrupted; +# Test LoadJFrame +text: .text%__1cTresource_free_bytes6FpcI_v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cJFloatTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_ValueType.o; +text: .text%__1cIValueGenTdo_ArithmeticOp_FPU6MpnMArithmeticOp__v_; +text: .text%__1cHLockRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOset_locked_fpu6MipnLInstruction_i_v_; +text: .text%__1cIValueGenNis_32bit_mode6M_i_; +text: .text%__1cLGetRefCountIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cJFloatTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cHFreeRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocMset_free_fpu6Mi_v_; +text: .text%__1cQChangeSpillCountIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cLLIR_EmitterRarithmetic_op_fpu6MnJBytecodesECode_pnLLIR_OprDesc_44i_v_; +text: .text%__1cILIR_ListDmul6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenKround_item6MpnEItem__v_; +text: .text%__1cLLIR_EmitterFround6MipnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListKround32bit6MnFRInfo_i_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenOspill_register6MnFRInfo__v_; +text: .text%__1cIRegAllocTget_value_for_rinfo6kMnFRInfo__pnLInstruction__; +text: .text%__1cLGetValueForGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIValueGenKdivInRInfo6F_nFRInfo__; +text: .text%__1cIValueGenLremOutRInfo6F_nFRInfo__; +text: .text%__1cMArithmeticOpKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cLLIR_EmitterParithmetic_idiv6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListEirem6MnFRInfo_111pnMCodeEmitInfo__v_; +text: .text%__1cHLIR_Op3Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cHLIR_Op3Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op36MpnHLIR_Op3__v_; +text: .text%__1cNLIR_AssemblerIfpu_push6MnFRInfo__v_; +text: .text%__1cIFrameMapLFpuStackSimEpush6Mi_v_; +text: .text%__1cNLIR_AssemblerKfpu_on_tos6MnFRInfo__v_; +text: .text%__1cIFrameMapLFpuStackSimPoffset_from_tos6kMi_i_; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_FrameMap_x86.o; +text: .text%__1cNLIR_AssemblerHfpu_pop6MnFRInfo__v_; +text: .text%__1cIFrameMapLFpuStackSimDpop6Mi_i_; +text: .text%__1cNLIR_AssemblerKround32_op6MpnLLIR_OprDesc_2_v_; +text: .text%__1cJAssemblerGfist_s6MnHAddress__v_; +text: .text%__1cNLIR_AssemblerJreset_FPU6M_v_; +text: .text%__1cNLIR_AssemblerIemit_op36MpnHLIR_Op3__v_; +text: .text%__1cNLIR_AssemblerParithmetic_idiv6MnILIR_Code_pnLLIR_OprDesc_333pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerXadd_debug_info_for_div06MipnMCodeEmitInfo__v_; +text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cNDivByZeroStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_GraphBuilder.o; +text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_Instruction.o; +text: .text%__1cILIR_ListLlogical_xor6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cIRuntime1Ohandle_ic_miss6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cLVtableStubsGlookup6Fiii_pnKVtableStub__; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cKVtableStub2n6FIi_pv_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cLVtableStubsFenter6FiiipnKVtableStub__v_; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%Unsafe_StaticFieldOffset; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%Unsafe_GetIntVolatile; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: c1_GraphBuilder.o; +text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderMtable_switch6M_v_; +text: .text%__1cLTableSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_GraphBuilder.o; +text: .text%__1cGSwitchPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cIValueGenOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cIValueGenVsetup_phis_for_switch6MpnEItem_pnKValueStack__v_; +text: .text%__1cLLIR_EmitterOtableswitch_op6MpnLLIR_OprDesc_ipnKBlockBegin__v_; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +# Test JHello +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%JVM_InitializeSocketLibrary; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%JVM_Socket; +text: .text%Unsafe_PageSize; +text: .text%__1cNFingerprinterHdo_byte6M_v_: dump.o; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRuntime.o; +text: .text%Unsafe_SetMemory; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: unsafe.o; +text: .text%__1cNSharedRuntimeElrem6Fxx_x_; +text: .text%Unsafe_DefineClass1; +text: .text%__1cSUnsafe_DefineClass6FpnHJNIEnv__pnI_jstring_pnL_jbyteArray_iipnI_jobject_7_pnH_jclass__: unsafe.o; +text: .text%JVM_DefineClass; +text: .text%__1cPClassFileParserXverify_unqualified_name6MpcIi_i_; +text: .text%__1cVLoaderConstraintTableYextend_loader_constraint6MpnVLoaderConstraintEntry_nGHandle_pnMklassOopDesc__v_; +text: .text%__1cVLoaderConstraintTablebHensure_loader_constraint_capacity6MpnVLoaderConstraintEntry_i_v_; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cQInstanceConstantIencoding6kM_pnI_jobject__; +text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_Instruction.o; +text: .text%__1cILIR_ListQunwind_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cIRuntime1Tprimitive_arraycopy6FpnIHeapWord_2i_v_; +text: .text%__1cRComputeEntryStackHdo_char6M_v_: generateOopMap.o; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cbDinitializeDirectBufferSupport6FpnHJNIEnv___i_: jni.o; +text: .text%lookupDirectBufferClasses: jni.o; +text: .text%__1cJlookupOne6FpnHJNIEnv__pkcpnGThread__pnH_jclass__: jni.o; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cRComputeEntryStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%__1cSInterpreterRuntimeWslow_signature_handler6FpnKJavaThread_pnNmethodOopDesc_pi5_pC_; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_x86.o; +text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_x86.o; +text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_x86.o; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_x86.o; +text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_x86.o; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_x86.o; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%jni_GetCharArrayRegion: jni.o; +text: .text%jni_SetFloatField: jni.o; +text: .text%jni_NewFloatArray: jni.o; +text: .text%jni_SetFloatArrayRegion: jni.o; +# SwingSet +text: .text%JVM_GetFieldIxModifiers; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cMStoreIndexedPother_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%JVM_MonitorNotify; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cKValueStackElock6MpnHIRScope_pnLInstruction__i_; +text: .text%__1cKValueStackGunlock6M_i_; +text: .text%__1cLLIR_EmitterVmonitorenter_at_entry6MnFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterNmonitor_enter6MnFRInfo_111ipnMCodeEmitInfo_3_v_; +text: .text%__1cQMonitorEnterStub2t6MnFRInfo_1pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListbAload_stack_address_monitor6MinFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListLlock_object6MnFRInfo_111pnICodeStub_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenNsyncTempRInfo6F_nFRInfo__; +text: .text%__1cLLIR_EmitterQreturn_op_prolog6Mi_v_; +text: .text%__1cLLIR_EmitterMmonitor_exit6MnFRInfo_11i_v_; +text: .text%__1cILIR_ListNunlock_object6MnFRInfo_11pnICodeStub__v_; +text: .text%__1cKLIR_OpLockFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o; +text: .text%__1cKLIR_OpLockJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerJemit_lock6MpnKLIR_OpLock__v_; +text: .text%__1cNLIR_AssemblerPmonitor_address6MinFRInfo__v_; +text: .text%__1cIFrameMapbEaddress_for_monitor_lock_index6kMi_nHAddress__; +text: .text%__1cIFrameMapbAfp_offset_for_monitor_lock6kMi_i_; +text: .text%__1cNLIR_AssemblerJemit_lock6MpnKLIR_OpLock__v_; +text: .text%__1cRC1_MacroAssemblerLlock_object6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o; +text: .text%__1cIFrameMapWmonitor_object_regname6kMi_nHOptoRegEName__; +text: .text%__1cIFrameMapbCfp_offset_for_monitor_object6kMi_i_; +text: .text%__1cMCodeEmitInfobHlocation_for_monitor_object_index6Mi_nILocation__; +text: .text%__1cIFrameMapbHlocation_for_monitor_object_index6kMipnILocation__i_; +text: .text%__1cMCodeEmitInfobFlocation_for_monitor_lock_index6Mi_nILocation__; +text: .text%__1cIFrameMapbFlocation_for_monitor_lock_index6kMipnILocation__i_; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cRC1_MacroAssemblerNunlock_object6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o; +text: .text%__1cQMonitorEnterStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerRload_receiver_reg6MpnMRegisterImpl__v_; +text: .text%__1cNLIR_AssemblerLmonitorexit6MnFRInfo_1pnMRegisterImpl_i3_v_; +text: .text%__1cPMonitorExitStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%jni_NewIntArray: jni.o; +text: .text%__1cNCollectedHeapYlarge_typearray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cRTenuredGenerationKshort_name6kM_pkc_: tenuredGeneration.o; +text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: tenuredGeneration.o; +text: .text%__1cRTenuredGenerationHcollect6MiiIii_v_; +text: .text%__1cRTenuredGenerationbJretire_alloc_buffers_before_full_gc6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationHcollect6MiiIii_v_; +text: .text%__1cMGenMarkSweepTinvoke_at_safepoint6FipnSReferenceProcessor_i_v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cKJavaThreadLgc_prologue6M_v_; +text: .text%__1cKJavaThreadJframes_do6MpFpnFframe_pknLRegisterMap__v_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cQGenCollectedHeapRsave_used_regions6Mii_v_; +text: .text%__1cKGenerationQsave_used_region6M_v_: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationLused_region6kM_nJMemRegion__; +text: .text%__1cPContiguousSpaceLused_region6kM_nJMemRegion__: space.o; +text: .text%__1cKGenerationQsave_used_region6M_v_: defNewGeneration.o; +text: .text%__1cKGenerationLused_region6kM_nJMemRegion__: defNewGeneration.o; +text: .text%__1cKGenerationQsave_used_region6M_v_: compactingPermGenGen.o; +text: .text%__1cMGenMarkSweepPallocate_stacks6F_v_; +text: .text%__1cQGenCollectedHeapOgather_scratch6MpnKGeneration_I_pnMScratchBlock__; +text: .text%__1cQDefNewGenerationScontribute_scratch6MrpnMScratchBlock_pnKGeneration_I_v_; +text: .text%__1cKGenerationScontribute_scratch6MrpnMScratchBlock_p0I_v_: tenuredGeneration.o; +text: .text%__1cRsort_scratch_list6FrpnMScratchBlock__v_: genCollectedHeap.o; +text: .text%__1cVremoveSmallestScratch6FppnMScratchBlock__1_: genCollectedHeap.o; +text: .text%__1cMGenMarkSweepRmark_sweep_phase16Firii_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: genMarkSweep.o; +text: .text%__1cJMarkSweepRFollowRootClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cJMarkSweepLfollow_root6FppnHoopDesc__v_; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cJMarkSweepNpreserve_mark6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cIjniIdMapHoops_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cJMarkSweepQKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cJCodeCacheFalive6FpnICodeBlob__2_; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cHnmethodbHfollow_root_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_ppnHoopDesc_iri_v_; +text: .text%__1cOoop_RelocationJoop_value6M_pnHoopDesc__; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cMGenMarkSweepRmark_sweep_phase26F_v_; +text: .text%__1cQGenCollectedHeapWprepare_for_compaction6M_v_; +text: .text%__1cKGenerationWprepare_for_compaction6MpnMCompactPoint__v_; +text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: tenuredGeneration.o; +text: .text%__1cPContiguousSpaceWprepare_for_compaction6MpnMCompactPoint__v_; +text: .text%__1cWOffsetTableContigSpaceUinitialize_threshold6M_pnIHeapWord__; +text: .text%__1cMTenuredSpaceSallowed_dead_ratio6kM_i_; +text: .text%__1cQCompactibleSpaceHforward6MpnHoopDesc_IpnMCompactPoint_pnIHeapWord__6_; +text: .text%__1cWOffsetTableContigSpacePcross_threshold6MpnIHeapWord_2_2_; +text: .text%__1cQCompactibleSpaceQinsert_deadspace6MrIpnIHeapWord_I_i_; +text: .text%__1cQCompactibleSpaceVnext_compaction_space6kM_p0_: space.o; +text: .text%__1cQDefNewGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: defNewGeneration.o; +text: .text%__1cQCompactibleSpaceSallowed_dead_ratio6kM_i_: space.o; +text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: compactingPermGenGen.o; +text: .text%__1cPContigPermSpaceSallowed_dead_ratio6kM_i_; +text: .text%__1cMGenMarkSweepRmark_sweep_phase36Fi_v_; +text: .text%__1cUCompactingPermGenGenTpre_adjust_pointers6M_v_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cQGenCollectedHeapSprocess_weak_roots6MpnKOopClosure_2_v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cYGenAdjustPointersClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o; +text: .text%__1cKGenerationPadjust_pointers6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationNspace_iterate6MpnMSpaceClosure_i_v_; +text: .text%__1cVAdjustPointersClosureIdo_space6MpnFSpace__v_: generation.o; +text: .text%__1cQCompactibleSpacePadjust_pointers6M_v_; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQDefNewGenerationNspace_iterate6MpnMSpaceClosure_i_v_; +text: .text%__1cUCompactingPermGenGenPadjust_pointers6M_v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cMGenMarkSweepRmark_sweep_phase46F_v_; +text: .text%__1cUCompactingPermGenGenHcompact6M_v_; +text: .text%__1cQCompactibleSpaceHcompact6M_v_; +text: .text%__1cPContiguousSpaceWreset_after_compaction6M_v_: space.o; +text: .text%__1cRGenCompactClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o; +text: .text%__1cKGenerationHcompact6M_v_; +text: .text%__1cUCompactingPermGenGenMpost_compact6M_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cMGenMarkSweepRdeallocate_stacks6F_v_; +text: .text%__1cLCardTableRSSclear_into_younger6MpnKGeneration_i_v_; +text: .text%__1cLCardTableRSFclear6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cRCardTableModRefBSPclear_MemRegion6MnJMemRegion__v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cKJavaThreadLgc_epilogue6M_v_; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6MpC1_v_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cQGenCollectedHeapWupdate_time_of_last_gc6Mx_v_: genMarkSweep.o; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: tenuredGeneration.o; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: compactingPermGenGen.o; +text: .text%__1cbCOneContigSpaceCardGenerationVunsafe_max_alloc_nogc6kM_I_; +text: .text%__1cRTenuredGenerationQcompute_new_size6M_v_; +text: .text%__1cKGenerationEspec6M_pnOGenerationSpec__; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cLVtableStubsScreate_itable_stub6Fii_pnKVtableStub__; +text: .text%__1cLLIR_EmitterDnop6M_v_; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnI_jobject__v_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cLklassVtableTis_miranda_entry_at6Mi_i_; +text: .text%__1cRPrivilegedElementHoops_do6MpnKOopClosure__v_; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: defNewGeneration.o; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%jni_DeleteWeakGlobalRef: jni.o; +text: .text%__1cKJNIHandlesTdestroy_weak_global6FpnI_jobject__v_; +text: .text%__1cILIR_ListJoop2stack6MpnI_jobject_i_v_: c1_LIREmitter.o; +text: .text%__1cNObjectMonitorREntryQdDueue_unlink6MpnMObjectWaiter__v_; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cMGraphBuilderMmonitorenter6MpnLInstruction__v_; +text: .text%__1cMMonitorEnterFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cNAccessMonitorIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderLmonitorexit6MpnLInstruction__v_; +text: .text%__1cLMonitorExitFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cILongTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cNAccessMonitorPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cTNullCheckEliminatorUhandle_AccessMonitor6MpnNAccessMonitor__v_; +text: .text%__1cQNullCheckVisitorOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cIValueGenPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cNc1_AllocTableMhas_two_free6kM_i_; +text: .text%__1cMLongConstantPas_LongConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cFRInfoLas_rinfo_lo6kM_0_; +text: .text%__1cLLIR_EmitterJopr2intLo6MpnLLIR_OprDesc__i_; +text: .text%__1cFRInfoLas_rinfo_hi6kM_0_; +text: .text%__1cLLIR_EmitterJopr2intHi6MpnLLIR_OprDesc__i_; +text: .text%__1cIValueGenOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cNAccessMonitorQas_AccessMonitor6M_p0_: c1_GraphBuilder.o; +text: .text%__1cJAssemblerFpushl6MpnI_jobject__v_; +text: .text%__1cNLIR_AssemblerNas_Address_hi6MpnLLIR_Address__nHAddress__; +text: .text%__1cFRInfoOas_register_hi6kM_pnMRegisterImpl__; +text: .text%__1cNLIR_AssemblerNas_Address_lo6MpnLLIR_Address__nHAddress__; +text: .text%__1cFRInfoOas_register_lo6kM_pnMRegisterImpl__; +text: .text%__1cCosHrealloc6FpvI_1_; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cIValueGenQdo_currentThread6MpnJIntrinsic__v_; +text: .text%__1cILIR_ListKget_thread6MnFRInfo__v_: c1_CodeGenerator_x86.o; +text: .text%__1cNLIR_AssemblerKget_thread6MpnLLIR_OprDesc__v_; +text: .text%__1cIValueGenSload_item_patching6MpnHIRScope_ipnEItem_pnKValueStack_pnOExceptionScope__v_; +text: .text%__1cEItemUget_jobject_constant6kM_pnIciObject__; +text: .text%__1cJValueTypeTas_InstanceConstant6M_pnQInstanceConstant__: c1_ValueType.o; +text: .text%__1cIintArrayIindex_of6kMki_i_: c1_CodeGenerator.o; +text: .text%__1cMLinkResolverbEresolve_interface_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cGciTypeNis_subtype_of6Mp0_i_; +text: .text%__1cIValueGenOload_byte_item6MpnEItem__v_; +text: .text%__1cIValueGenPlock_free_rinfo6MpnLInstruction_nKc1_RegMask__nFRInfo__; +text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_nKc1_RegMask__nFRInfo__; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cIFrameMapNis_byte_rinfo6FnFRInfo__i_; +text: .text%Unsafe_AllocateInstance; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cQinstanceRefKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cNCanonicalizerMset_constant6Mi_v_: c1_Canonicalizer.o; +text: .text%__1cJTypeCheckPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cNLIR_AssemblerMcheck_icache6M_i_; +text: .text%__1cRC1_MacroAssemblerTfast_ObjectHashCode6MpnMRegisterImpl_2_v_; +text: .text%__1cNLIR_AssemblerZjobject2reg_with_patching6MpnMRegisterImpl_pnMCodeEmitInfo__v_; +text: .text%__1cHLogicOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cIRuntime1Mnew_instance6FpnKJavaThread_pnMklassOopDesc__v_; +text: .text%__1cQGenCollectedHeapXhandle_failed_promotion6MpnKGeneration_pnHoopDesc_Ip4_4_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceRefKlass.o; +text: .text%__1cbCOneContigSpaceCardGenerationTexpand_and_allocate6MIiii_pnIHeapWord__; +text: .text%__1cbCOneContigSpaceCardGenerationGexpand6MII_v_; +text: .text%__1cNGCMutexLocker2t6MpnFMutex__v_; +text: .text%__1cbCOneContigSpaceCardGenerationHgrow_by6MI_i_; +text: .text%__1cPContiguousSpaceNmangle_region6MnJMemRegion__v_; +text: .text%__1cJMarkSweepRFollowRootClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cQCompactibleSpaceUinitialize_threshold6M_pnIHeapWord__: space.o; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cRAlwaysTrueClosureLdo_object_b6MpnHoopDesc__i_: genCollectedHeap.o; +text: .text%__1cLCardTableRSTinvalidate_or_clear6MpnKGeneration_ii_v_; +text: .text%__1cJMemRegionFminus6kMk0_0_; +text: .text%__1cLCardTableRSKinvalidate6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cRCardTableModRefBSKinvalidate6MnJMemRegion__v_; +text: .text%__1cIRuntime1Onew_type_array6FpnKJavaThread_pnMklassOopDesc_i_v_; +text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cNFloatConstantQas_FloatConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cILIR_ListNstore_mem_oop6MpnI_jobject_nFRInfo_inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cNConstantTableMappend_float6Mf_v_; +text: .text%__1cRAbstractAssemblerGa_long6Mi_v_; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cDCHARprocess_interface6FnTinstanceKlassHandle_pnNGrowableArray4nLKlassHandle___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cINewArrayPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cLLIR_EmitterQfield_store_byte6MpnLLIR_OprDesc_i2nFRInfo_ipnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_222_v_; +text: .text%__1cIRuntime1Mmonitorenter6FpnKJavaThread_pnHoopDesc_pnPBasicObjectLock__v_; +text: .text%__1cIRuntime1Lmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cHnmethodVis_dependent_on_entry6MpnMklassOopDesc_2pnNmethodOopDesc__i_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cJCodeCacheNalive_nmethod6FpnICodeBlob__pnHnmethod__; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cHnmethodVmark_as_seen_on_stack6M_v_; +text: .text%__1cTinc_decompile_count6FpnHnmethod__v_: nmethod.o; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cKJavaThreadLnmethods_do6M_v_; +text: .text%__1cGThreadLnmethods_do6M_v_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cFframeVnmethods_code_blob_do6M_v_; +text: .text%__1cILIR_ListEidiv6MnFRInfo_i11pnMCodeEmitInfo__v_; +text: .text%__1cLlog2_intptr6Fi_i_: c1_LIRAssembler_x86.o; +text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cILongTypeEsize6kM_i_: c1_ValueType.o; +text: .text%JVM_HoldsLock; +text: .text%__1cSObjectSynchronizerZcurrent_thread_holds_lock6FpnKJavaThread_nGHandle__i_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cLLoadIndexedIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeIpass_int6M_v_: oopMapCache.o; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cMLocalMappingDadd6MinFRInfo__v_; +text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cLLIR_EmitterQfield_store_long6MpnLLIR_OprDesc_i2ipnMCodeEmitInfo__v_; +text: .text%__1cKScanBlocksMis_long_only6kMi_i_; +text: .text%__1cRLIR_PeepholeStateLreg2indexLo6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateLreg2indexHi6MpnLLIR_OprDesc__i_; +text: .text%__1cNSharedRuntimeDf2l6Ff_x_; +text: .text%__1cIValueGenLdo_getClass6MpnJIntrinsic__v_; +text: .text%__1cLLIR_EmitterIgetClass6MnFRInfo_1pnMCodeEmitInfo__v_; +text: .text%__1cMGraphBuilderKcompare_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cJCompareOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerMdo_CompareOp6MpnJCompareOp__v_; +text: .text%__1cJCompareOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cJCompareOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cJCompareOpMas_CompareOp6M_p0_: c1_Instruction.o; +text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_Canonicalizer.o; +text: .text%__1cGSetRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocLset_fpu_reg6MiipnLInstruction__v_; +text: .text%__1cJIsFreeRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cILIR_ListJfloat2reg6MfnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListMbranch_float6MnMLIR_OpBranchNLIR_Condition_pnFLabel_4_v_; +text: .text%__1cIValueGenNreturnF0RInfo6F_nFRInfo__; +text: .text%__1cLLIR_EmitterOset_fpu_result6MnFRInfo__v_; +text: .text%__1cILIR_ListIpush_fpu6MnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cNConstantTableZaddress_of_float_constant6Mf_pC_; +text: .text%__1cNLIR_AssemblerOfpu_two_on_tos6MnFRInfo_1i_v_; +text: .text%__1cIFrameMapLFpuStackSimEswap6M_v_; +text: .text%__1cIFrameMapLFpuStackSimRexchange_with_tos6Mi_v_; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cQPlaceholderEntryHoops_do6MpnKOopClosure__v_; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cICodeHeapPadd_to_freelist6MpnJHeapBlock__v_; +text: .text%__1cICodeHeapPfollowing_block6MpnJFreeBlock__2_; +text: .text%__1cRComputeEntryStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cICodeHeapMinsert_after6MpnJFreeBlock_2_v_; +text: .text%__1cICodeHeapLmerge_right6MpnJFreeBlock__v_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: nmethod.o; +text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cbCOneContigSpaceCardGenerationGshrink6MI_v_; +text: .text%__1cbCOneContigSpaceCardGenerationJshrink_by6MI_v_; +text: .text%__1cMVirtualSpaceJshrink_by6MI_v_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: oopMapCache.o; +text: .text%__1cRComputeEntryStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cODoubleConstantRas_DoubleConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cODoubleConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cHLockRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocRset_locked_double6MipnLInstruction_i_v_; +text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_ValueType.o; +text: .text%__1cIFrameMapUare_adjacent_indeces6kMii_i_; +text: .text%__1cQChangeSpillCountJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocZchange_double_spill_count6Mii_v_; +text: .text%__1cILIR_ListKdouble2reg6MdnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cHFreeRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocPset_free_double6Mi_v_; +text: .text%__1cILIR_ListDrem6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cLGetRefCountJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocNget_double_rc6kMi_i_; +text: .text%__1cLLIR_EmitterUcheck_double_address6Mi_v_; +text: .text%__1cILIR_ListQreg2double_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cRLIR_PeepholeStateNstack2indexHi6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateNstack2indexLo6MpnLLIR_OprDesc__i_; +text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cNConstantTableNappend_double6Md_v_; +text: .text%__1cNConstantTablebAaddress_of_double_constant6Md_pC_; +text: .text%__1cQGenCollectedHeapHcollect6MnHGCCauseFCause_i_v_; +text: .text%__1cQGenCollectedHeapOcollect_locked6MnHGCCauseFCause_i_v_; +text: .text%__1cRVM_GenCollectFullEname6kM_pkc_: vm_operations.o; +text: .text%__1cRVM_GenCollectFullEdoit6M_v_; +text: .text%__1cQGenCollectedHeapYmust_clear_all_soft_refs6M_i_; +text: .text%__1cQGenCollectedHeapSdo_full_collection6Miipi_v_; +text: .text%__1cKGenerationbHfull_collects_younger_generations6kM_i_: defNewGeneration.o; +text: .text%__1cKDoubleTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cIValueMapNresize_bucket6MpnGBucket__v_; +text: .text%__1cNFloatConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cJNullCheckMas_NullCheck6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterIopr2long6MpnLLIR_OprDesc__x_; +text: .text%__1cILIR_ListKlong2stack6Mxi_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenNreturnD0RInfo6F_nFRInfo__; +text: .text%__1cJIsFreeRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOis_free_double6kMi_i_; +text: .text%__1cGSetRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOset_double_reg6MiipnLInstruction__v_; +text: .text%__1cLLIR_EmitterNcopy_fpu_item6MnFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListHdup_fpu6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListDdiv6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cJAssemblerFfsubp6Mi_v_; +text: .text%__1cNLIR_AssemblerHdup_fpu6MnFRInfo_1_v_; +text: .text%__1cIFrameMapLFpuStackSimLmove_on_tos6Mi_i_; +text: .text%__1cJAssemblerGfdiv_d6MnHAddress__v_; +text: .text%__1cJAssemblerFfdivp6Mi_v_; +text: .text%__1cIValueGenMreturn2RInfo6F_nFRInfo__; +text: .text%__1cJValueTypeQas_FloatConstant6M_pnNFloatConstant__: c1_Canonicalizer.o; +text: .text%__1cIRuntime1Qnew_object_array6FpnKJavaThread_pnMklassOopDesc_i_v_; +text: .text%__1cIValueGenLdivOutRInfo6F_nFRInfo__; +text: .text%__1cILIR_ListEidiv6MnFRInfo_111pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListVvolatile_load_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cEItemSget_jlong_constant6kM_x_; +text: .text%__1cNLIR_AssemblerQvolatile_move_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cONewObjectArrayKexact_type6kM_pnGciType__; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cIRuntime1Noop_arraycopy6FpnIHeapWord_2i_v_; +text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cMLongConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cIValueGenUdo_ArithmeticOp_Long6MpnMArithmeticOp__v_; +text: .text%__1cLLIR_EmitterSarithmetic_op_long6MnJBytecodesECode_pnLLIR_OprDesc_44pnMCodeEmitInfo__v_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLArrayLengthIis_equal6kMpnLInstruction__i_: c1_GraphBuilder.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: compiledICHolderKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: compiledICHolderKlass.o; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cMGraphBuilderVappend_unsafe_put_raw6MpnIciMethod_nJBasicType__i_; +text: .text%__1cMUnsafePutRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cNCanonicalizerOdo_UnsafeRawOp6MpnLUnsafeRawOp__v_; +text: .text%__1cFmatch6FpnLUnsafeRawOp_ppnLInstruction_4pi_i_: c1_Canonicalizer.o; +text: .text%__1cLInstructionPas_ArithmeticOp6M_pnMArithmeticOp__: c1_Instruction.o; +text: .text%__1cIUnsafeOpLas_UnsafeOp6M_p0_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderVappend_unsafe_get_raw6MpnIciMethod_nJBasicType__i_; +text: .text%__1cMUnsafeGetRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cMGraphBuilderNlookup_switch6M_v_; +text: .text%__1cIintArray2t6Mki1_v_: c1_GraphBuilder.o; +text: .text%__1cMLookupSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cMUnsafePutRawPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cTNullCheckEliminatorPhandle_UnsafeOp6MpnIUnsafeOp__v_; +text: .text%__1cLUnsafeRawOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cQNullCheckVisitorPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cIValueGenPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cLLIR_EmitterOput_raw_unsafe6MpnLLIR_OprDesc_2i2nJBasicType__v_; +text: .text%__1cLLIR_EmitterMlong2address6MpnLLIR_OprDesc__nFRInfo__; +text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cLLIR_EmitterOget_raw_unsafe6MnFRInfo_pnLLIR_OprDesc_3inJBasicType__v_; +text: .text%__1cILIR_ListMload_mem_reg6MpnLLIR_Address_nFRInfo_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cUcreate_lookup_ranges6FpnMLookupSwitch__pnQLookupRangeArray__: c1_CodeGenerator_x86.o; +text: .text%__1cLLIR_EmitterVlookupswitch_range_op6MpnLLIR_OprDesc_iipnKBlockBegin__v_; +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; +text: .text%Unsafe_GetObjectVolatile; +text: .text%signalHandler; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_x86.o; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; +text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; +text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_; +text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_; +text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cSvframeStreamCommonbFfill_in_compiled_inlined_sender6M_i_; +text: .text%__1cJFloatTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cIValueGenNrelease_roots6MpnKValueStack__v_; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cOObjectConstantLis_constant6kM_i_: c1_ValueType.o; +text: .text%__1cILIR_ListLstore_array6MipnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerLconst2array6MpnJLIR_Const_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cQInstanceConstantLis_constant6kM_i_: c1_ValueType.o; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER1_sparc 2009-08-01 04:16:51.534435373 +0100 @@ -0,0 +1,4512 @@ +data = R0x2000; +text = LOAD ?RXO; + + +# Test Null +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Canonicalizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compilation.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compiler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_GraphBuilder.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_IR.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Instruction.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_InstructionPrinter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIR.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Loops.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Optimizer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1.o; +text: .text%__1cIiEntries2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ScanBlocks.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueSet.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueStack.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeBlob.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cICHeapObj2n6FI_pv_; +text: .text%__1cCosGmalloc6FI_pv_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compiledIC.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: deoptimization.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interp_masm_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: java.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: klassVtable.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodOop.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nmethod.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedRuntime.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vframeArray.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_sparc.o; +text: .text%JNI_CreateJavaVM; +text: .text%__1cCosUatomic_add_bootstrap6Fipoi_i_; +text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cCosScurrent_stack_size6F_I_; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cCosNcommit_memory6FpcI_i_; +text: .text%__1cCosMguard_memory6FpcI_i_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cKPerfMemoryFalloc6FI_pc_; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cPoldgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cCosYatomic_cmpxchg_bootstrap6Fipoii_i_; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cPJavaFrameAnchorNmake_walkable6MpnKJavaThread__v_; +text: .text%bootstrap_flush_windows; +text: .text%__1cCosPfence_bootstrap6F_v_; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%JVM_RawMonitorCreate; +text: .text%JVM_NativePath; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%JVM_Open; +text: .text%JVM_Lseek; +text: .text%JVM_Close; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cICodeHeapHreserve6MIII_i_; +text: .text%__1cNReservedSpace2t6MI_v_; +text: .text%__1cCosOreserve_memory6FIpc_1_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cCosNcommit_memory6FpcII_i_; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_; +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cICodeHeapIcapacity6kM_I_; +text: .text%__1cICodeHeapMmax_capacity6kM_I_; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cICodeHeapLheader_size6F_I_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cICodeHeapIallocate6MI_pv_; +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cICodeHeapSallocated_capacity6kM_I_; +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cXresource_allocate_bytes6FI_pc_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%jio_snprintf; +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cNStubGenerator2t6MpnKCodeBuffer_i_v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorLstub_prolog6MpnMStubCodeDesc__v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorFalign6Mi_v_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerKget_thread6M_v_; +text: .text%__1cOMacroAssemblerKsave_frame6Mi_v_; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cKRelocationLunpack_data6M_v_: c1_Runtime1.o; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: c1_Runtime1.o; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cOMacroAssemblerLsave_thread6MkpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerNverify_thread6M_v_; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_sparc.o; +text: .text%__1cHAddress2t6Mn0AJaddr_type_i_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: stubGenerator_sparc.o; +text: .text%__1cJAssemblerSbranch_destination6Fii_i_; +text: .text%__1cJAssemblerOpatched_branch6Fiii_i_; +text: .text%__1cOMacroAssemblerDret6Mi_v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: stubGenerator_sparc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorSgenerate_test_stop6M_pC_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: assembler_sparc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: assembler_sparc.o; +text: .text%__1cOMacroAssemblerRload_ptr_contents6MrnHAddress_pnMRegisterImpl_i_v_: assembler_sparc.o; +text: .text%__1cOMacroAssemblerPstop_subroutine6M_v_; +text: .text%__1cVRegistersForDebuggingOsave_registers6FpnOMacroAssembler__v_: assembler_sparc.o; +text: .text%__1cVRegistersForDebuggingRrestore_registers6FpnOMacroAssembler_pnMRegisterImpl__v_: assembler_sparc.o; +text: .text%__1cNStubGeneratorbNgenerate_flush_callers_register_windows6M_pC_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerNflush_windows6M_v_; +text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorXgenerate_atomic_cmpxchg6M_pC_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerOcas_under_lock6MpnMRegisterImpl_22pCi_v_; +text: .text%__1cNStubGeneratorTgenerate_atomic_add6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbCgenerate_atomic_cmpxchg_long6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorOgenerate_fence6M_pC_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerGmembar6MnJAssemblerQMembar_mask_bits__v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbIgenerate_copy_words_aligned8_lower6M_pC_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbJgenerate_copy_words_aligned8_higher6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbBgenerate_set_words_aligned86M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbCgenerate_zero_words_aligned86M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbEgenerate_partial_subtype_check6M_pC_: stubGenerator_sparc.o; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cLFileMapInfoKinitialize6M_i_; +text: .text%__1cLFileMapInfoIvalidate6M_i_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cRClassPathZipEntryLis_jar_file6M_i_: classLoader.o; +text: .text%__1cRClassPathZipEntryEname6M_pkc_: classLoader.o; +text: .text%__1cPMarkSweepPolicy2t6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: collectorPolicy.o; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cPMarkSweepPolicyWinitialize_generations6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyVnumber_of_generations6M_i_: collectorPolicy.o; +text: .text%__1cXPermanentGenerationSpec2t6MnHPermGenEName_IIIIII_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cCosbDatomic_cmpxchg_long_bootstrap6Fxpoxx_x_; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cQGenCollectedHeap2t6MpnPCollectorPolicy__v_; +text: .text%__1cKSharedHeap2t6MpnPCollectorPolicy__v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cMSubTasksDone2t6Mi_v_; +text: .text%__1cMSubTasksDoneFvalid6M_i_; +text: .text%__1cQGenCollectedHeapKinitialize6M_i_; +text: .text%__1cPCollectorPolicyLgenerations6M_ppnOGenerationSpec__: collectorPolicy.o; +text: .text%__1cPCollectorPolicyUpermanent_generation6M_pnXPermanentGenerationSpec__: collectorPolicy.o; +text: .text%__1cXPermanentGenerationSpecFalign6MI_v_; +text: .text%__1cOGenerationSpecRn_covered_regions6kM_i_: collectorPolicy.o; +text: .text%__1cNReservedSpace2t6MIIipc_v_; +text: .text%__1cCosZattempt_reserve_memory_at6FIpc_1_; +text: .text%__1cPCollectorPolicyOcreate_rem_set6MnJMemRegion_i_pnJGenRemSet__; +text: .text%__1cbCTwoGenerationCollectorPolicyQbarrier_set_name6M_nKBarrierSetEName__: collectorPolicy.o; +text: .text%__1cLCardTableRS2t6MnJMemRegion_i_v_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cKSharedHeapPset_barrier_set6MpnKBarrierSet__v_; +text: .text%__1cNReservedSpaceKfirst_part6MIii_0_; +text: .text%__1cOGenerationSpecEinit6MnNReservedSpace_ipnJGenRemSet__pnKGeneration__; +text: .text%__1cQDefNewGeneration2t6MnNReservedSpace_Iipkc_v_; +text: .text%__1cKGeneration2t6MnNReservedSpace_Ii_v_; +text: .text%__1cIageTable2t6Mi_v_; +text: .text%__1cFArenaEgrow6MI_pv_; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cWSequentialSubTasksDoneFclear6M_v_; +text: .text%__1cSGenerationCounters2t6MpkciipnMVirtualSpace__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cOCSpaceCounters2t6MpkciIpnPContiguousSpace_pnSGenerationCounters__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cZContiguousSpaceUsedHelperLtake_sample6M_x_: cSpaceCounters.o; +text: .text%__1cPContiguousSpaceEused6kM_I_: space.o; +text: .text%__1cQDefNewGenerationYcompute_space_boundaries6MI_v_; +text: .text%__1cQCompactibleSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cFSpaceKset_bottom6MpnIHeapWord__v_: space.o; +text: .text%__1cJEdenSpaceHset_end6MpnIHeapWord__v_: space.o; +text: .text%__1cJEdenSpaceFclear6M_v_; +text: .text%__1cFSpaceHset_end6MpnIHeapWord__v_: space.o; +text: .text%__1cPContiguousSpaceFclear6M_v_; +text: .text%__1cQDefNewGenerationPupdate_counters6M_v_; +text: .text%__1cSGenerationCountersKupdate_all6M_v_: generationCounters.o; +text: .text%__1cNReservedSpaceJlast_part6MI_0_; +text: .text%__1cRTenuredGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_; +text: .text%__1cOCardGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_; +text: .text%__1cWBlockOffsetSharedArray2t6MnJMemRegion_I_v_; +text: .text%__1cNReservedSpaceSpage_align_size_up6FI_I_; +text: .text%__1cMVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cLCardTableRSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cLCardTableRSKis_aligned6MpnIHeapWord__i_: cardTableRS.o; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cWOffsetTableContigSpace2t6MpnWBlockOffsetSharedArray_nJMemRegion__v_; +text: .text%__1cQBlockOffsetArray2t6MpnWBlockOffsetSharedArray_nJMemRegion_i_v_; +text: .text%__1cWOffsetTableContigSpaceKset_bottom6MpnIHeapWord__v_; +text: .text%__1cQBlockOffsetArrayGresize6MI_v_: blockOffsetTable.o; +text: .text%__1cWOffsetTableContigSpaceHset_end6MpnIHeapWord__v_; +text: .text%__1cWOffsetTableContigSpaceFclear6M_v_; +text: .text%__1cbBBlockOffsetArrayContigSpaceUinitialize_threshold6M_pnIHeapWord__; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cXPermanentGenerationSpecEinit6MnNReservedSpace_IpnJGenRemSet__pnHPermGen__; +text: .text%__1cRCompactingPermGen2t6MnNReservedSpace_1IpnJGenRemSet_pnXPermanentGenerationSpec__v_; +text: .text%__1cUCompactingPermGenGen2t6MnNReservedSpace_1IipnJGenRemSet_pnPContiguousSpace_pnXPermanentGenerationSpec__v_; +text: .text%__1cLFileMapInfoJmap_space6MinNReservedSpace_pnPContiguousSpace__i_; +text: .text%__1cNReservedSpaceHrelease6M_v_; +text: .text%__1cCosOrelease_memory6FpcI_i_; +text: .text%__1cCosKmap_memory6FipkcIpcIii_3_; +text: .text%__1cUCompactingPermGenGenbFinitialize_performance_counters6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationIcapacity6kM_I_; +text: .text%__1cPCollectorPolicybFis_concurrent_mark_sweep_policy6M_i_: collectorPolicy.o; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cQGenCollectedHeapNtlab_capacity6kM_I_; +text: .text%__1cQDefNewGenerationYsupports_tlab_allocation6kM_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationNtlab_capacity6kM_I_: defNewGeneration.o; +text: .text%__1cKGenerationYsupports_tlab_allocation6kM_i_: tenuredGeneration.o; +text: .text%__1cUCompactingPermGenGenPinitialize_oops6F_v_; +text: .text%__1cQSystemDictionaryVset_shared_dictionary6FpnPHashtableBucket_ii_v_; +text: .text%__1cKDictionary2t6MipnPHashtableBucket_i_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6FpnPHashtableBucket_ii_v_; +text: .text%__1cUCompactingPermGenGenOserialize_oops6FpnTSerializeOopClosure__v_; +text: .text%__1cLReadClosureGdo_tag6Mi_v_: restore.o; +text: .text%__1cLReadClosureGdo_int6Mpi_v_: restore.o; +text: .text%__1cLFileMapInfoLassert_mark6Fi_v_; +text: .text%__1cQGenCollectedHeapEheap6F_p0_; +text: .text%__1cRCompactingPermGenGas_gen6kM_pnKGeneration__: permGen.o; +text: .text%__1cWBlockOffsetSharedArrayJserialize6MpnTSerializeOopClosure_pnIHeapWord_4_v_; +text: .text%__1cLReadClosureJdo_region6MpCI_v_: restore.o; +text: .text%__1cWOffsetTableContigSpacebKserialize_block_offset_array_offsets6MpnTSerializeOopClosure__v_; +text: .text%__1cbBBlockOffsetArrayContigSpaceJserialize6MpnTSerializeOopClosure__v_; +text: .text%__1cLReadClosureHreading6kM_i_: restore.o; +text: .text%__1cLReadClosureGdo_ptr6MppnIHeapWord__v_: restore.o; +text: .text%__1cLReadClosureGdo_ptr6Mppv_v_: restore.o; +text: .text%__1cLReadClosureJdo_size_t6MpI_v_: restore.o; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cLReadClosureGdo_oop6MppnHoopDesc__v_: restore.o; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cLFileMapInfoFclose6M_v_; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cICodeHeapJexpand_by6MI_i_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cZInterpreterMacroAssemblerZget_2_byte_integer_at_bcp6MipnMRegisterImpl_2n0ALsignedOrNot_n0AKsetCCOrNot__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerTdispatch_Lbyte_code6MnITosState_ppCii_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interp_masm_sparc.o; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: interp_masm_sparc.o; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cZInterpreterMacroAssemblerXget_constant_pool_cache6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interpreter_sparc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCii_v_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6M_v_; +text: .text%__1cOMacroAssemblerbBcheck_and_forward_exception6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: assembler_sparc.o; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerUstore_unaligned_long6MpnMRegisterImpl_2i_v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cOMacroAssemblerWstore_unaligned_double6MpnRFloatRegisterImpl_pnMRegisterImpl_i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cZInterpreterMacroAssemblerWempty_expression_stack6M_v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interp_masm_sparc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cOMacroAssemblerNget_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerDjmp6MpnMRegisterImpl_ipkci_v_; +text: .text%__1cZInterpreterMacroAssemblerbDunlock_if_synchronized_method6MnITosState_ii_v_; +text: .text%__1cOMacroAssemblerKbr_notnull6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerHbr_null6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cOMacroAssemblerPcasx_under_lock6MpnMRegisterImpl_22pCi_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cQRelocationHolderEplus6kMi_0_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC222_v_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: interpreter_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerHpop_ptr6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerNload_contents6MrnHAddress_pnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpnMRegisterImpl_pCi_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreter_sparc.o; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cUInterpreterGeneratorbCgenerate_check_compiled_code6MrnFLabel__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6MpnMRegisterImpl_22_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cZInterpreterMacroAssemblerbCincrement_invocation_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_sparc.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: interp_masm_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MirnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerbIcompute_extra_locals_size_in_bytes6MpnMRegisterImpl_22_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cZInterpreterMacroAssemblerUadd_monitor_to_stack6MipnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl_2_v_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cOMacroAssemblerUcalc_mem_param_words6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerOrestore_thread6MkpnMRegisterImpl__v_; +text: .text%__1cUInterpreterGeneratorVrestore_native_result6M_v_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MinITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerQtop_most_monitor6M_nHAddress__; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: templateTable_sparc.o; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cZInterpreterMacroAssemblerSget_cpool_and_tags6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cOMacroAssemblerVload_unaligned_double6MpnMRegisterImpl_ipnRFloatRegisterImpl__v_; +text: .text%__1cOMacroAssemblerTload_unaligned_long6MpnMRegisterImpl_i2_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_int6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cZInterpreterMacroAssemblerRaccess_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSaccess_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cZInterpreterMacroAssemblerTaccess_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLindex_check6MpnMRegisterImpl_2i22_v_; +text: .text%__1cZInterpreterMacroAssemblerXindex_check_without_pop6MpnMRegisterImpl_2i22_v_; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_int6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerQstore_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerRstore_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSstore_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerbCverify_oop_or_return_address6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_2222rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_x6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: assembler_sparc.o; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pCi_v_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_icc6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_22222_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_22222_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cOMacroAssemblerFlushr6MpnMRegisterImpl_22222_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cOMacroAssemblerCfb6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cOMacroAssemblerElcmp6MpnMRegisterImpl_2222_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cOMacroAssemblerJfloat_cmp6MiipnRFloatRegisterImpl_2pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cZInterpreterMacroAssemblerGif_cmp6MnJAssemblerJCondition_i_v_; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerbAincrement_backedge_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerbBtest_backedge_count_for_osr6MpnMRegisterImpl_22_v_; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cZInterpreterMacroAssemblerbAdispatch_next_noverify_oop6MnITosState_i_v_; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MnITosState_pnMRegisterImpl_3_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_222_v_; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cOMacroAssemblerDret6Mi_v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableXjvmti_post_field_access6Fiii_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_22_v_; +text: .text%__1cOMacroAssemblerGmembar6MnJAssemblerQMembar_mask_bits__v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableTinvokevfinal_helper6FpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: templateTable_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableUgenerate_vtable_call6FpnMRegisterImpl_22_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTablebDinvokeinterface_object_method6FpnMRegisterImpl_222_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cQGenCollectedHeapItop_addr6kM_ppnIHeapWord__; +text: .text%__1cQDefNewGenerationItop_addr6kM_ppnIHeapWord__; +text: .text%__1cQGenCollectedHeapIend_addr6kM_ppnIHeapWord__; +text: .text%__1cQDefNewGenerationIend_addr6kM_ppnIHeapWord__; +text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_1_x6MnJAssemblerJCondition_rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_26MpCpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cZInterpreterMacroAssemblerZget_4_byte_integer_at_bcp6MipnMRegisterImpl_2n0AKsetCCOrNot__v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_normal6MnITosState__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cIUniversebCinit_self_patching_vtbl_list6Fppvi_v_; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKSharedHeapWpermanent_mem_allocate6MI_pnIHeapWord__: genCollectedHeap.o; +text: .text%__1cRCompactingPermGenMmem_allocate6MI_pnIHeapWord__; +text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: compactingPermGenGen.o; +text: .text%__1cWOffsetTableContigSpaceIallocate6MI_pnIHeapWord__: space.o; +text: .text%__1cbBBlockOffsetArrayContigSpaceLalloc_block6MpnIHeapWord_2_v_: blockOffsetTable.o; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cKDictionaryRfind_shared_class6MiInMsymbolHandle__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cKSharedHeapYpermanent_object_iterate6MpnNObjectClosure__v_: genCollectedHeap.o; +text: .text%__1cHPermGenOobject_iterate6MpnNObjectClosure__v_: permGen.o; +text: .text%__1cbCOneContigSpaceCardGenerationOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cPContiguousSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cPContiguousSpaceTobject_iterate_from6MnJWaterMark_pnNObjectClosure__v_; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cOcompiler1_init6F_v_; +text: .text%__1cKSharedInfoKset_stack06Fi_v_; +text: .text%__1cKSharedInfoLset_regName6F_v_; +text: .text%__1cMRegisterImplEname6kM_pkc_; +text: .text%__1cRFloatRegisterImplEname6kM_pkc_; +text: .text%__1cIRegAllocYinit_register_allocation6F_v_; +text: .text%__1cIFrameMapEinit6F_v_; +text: .text%__1cKc1_RegMaskKinit_masks6Fi_v_; +text: .text%__1cFRInfoMset_long_reg6MkpnMRegisterImpl_k2_v_; +text: .text%__1cIFrameMapLcpu_reg2rnr6FpnMRegisterImpl__i_; +text: .text%__1cFRInfoMset_word_reg6MkpnMRegisterImpl__v_; +text: .text%__1cFRInfoNset_float_reg6MrkpnRFloatRegisterImpl__v_; +text: .text%__1cFRInfoOset_double_reg6MrkpnRFloatRegisterImpl__v_; +text: .text%__1cIFrameMapLcpu_rnr2reg6Fi_pnMRegisterImpl__; +text: .text%__1cIFrameMapXis_caller_save_register6FpnMRegisterImpl__i_; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_sparc.o; +text: .text%__1cNc1_AllocTableLinit_tables6F_v_; +text: .text%__1cIRuntime1Kinitialize6F_v_; +text: .text%__1cKCodeBufferRinsts_memory_size6Fi_i_; +text: .text%__1cKCodeBufferQlocs_memory_size6Fi_i_; +text: .text%__1cIRuntime1Ninitialize_pd6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cOMacroAssemblerZtotal_frame_size_in_bytes6Mi_i_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cOMacroAssemblerNsave_frame_c16Mi_v_; +text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_sparc.o; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_; +text: .text%__1cIFrameMapLfpu_regname6Fi_nHOptoRegEName__; +text: .text%__1cKRelocationJpack_data6M_i_: c1_Runtime1.o; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC22_v_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_; +text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: c1_Runtime1.o; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_; +text: .text%__1cRNativeInstructionLset_long_at6Mii_v_; +text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cIRuntime1Rgenerate_code_for6Fn0AGStubID_pnNStubAssembler_pi_pnJOopMapSet__; +text: .text%__1cNStubAssemblerIset_info6Mpkci_v_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: c1_Runtime1_sparc.o; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC2_i_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pCi_i_; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cOMacroAssemblerPget_vm_result_26MpnMRegisterImpl__v_; +text: .text%__1cIRuntime1Ygenerate_exception_throw6FpnNStubAssembler_pCpnMRegisterImpl__pnJOopMapSet__; +text: .text%__1cOMacroAssemblerDret6Mi_v_: c1_Runtime1_sparc.o; +text: .text%__1cOMacroAssemblerLtlab_refill6MrnFLabel_22_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: assembler_sparc.o; +text: .text%__1cOMacroAssemblerNeden_allocate6MpnMRegisterImpl_2i22rnFLabel__v_; +text: .text%__1cOMacroAssemblerNtlab_allocate6MpnMRegisterImpl_2i2rnFLabel__v_; +text: .text%__1cRC1_MacroAssemblerRinitialize_object6MpnMRegisterImpl_22i22_v_; +text: .text%__1cRC1_MacroAssemblerRinitialize_header6MpnMRegisterImpl_222_v_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: c1_MacroAssembler_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: c1_MacroAssembler_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: c1_Runtime1_sparc.o; +text: .text%__1cRC1_MacroAssemblerPinitialize_body6MpnMRegisterImpl_2_v_; +text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC22_i_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: c1_Runtime1_sparc.o; +text: .text%__1cIRuntime1Iblob_for6Fn0AGStubID__pnICodeBlob__; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: c1_Runtime1_sparc.o; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: c1_Runtime1_sparc.o; +text: .text%__1cIiEntries2t6Miiii_v_; +text: .text%__1cIRuntime1Rgenerate_patching6FpnNStubAssembler_pC_pnJOopMapSet__; +text: .text%__1cIRuntime1Qgenerate_handler6FpnNStubAssembler_pCi_pnJOopMapSet__; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQGenCollectedHeapVlarge_typearray_limit6M_I_; +text: .text%__1cbCTwoGenerationCollectorPolicyYis_two_generation_policy6M_i_: collectorPolicy.o; +text: .text%__1cbCTwoGenerationCollectorPolicyVlarge_typearray_limit6M_I_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapVunsafe_max_tlab_alloc6kM_I_; +text: .text%__1cQDefNewGenerationVunsafe_max_tlab_alloc6kM_I_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationVunsafe_max_alloc_nogc6kM_I_; +text: .text%__1cPContiguousSpaceEfree6kM_I_: space.o; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cQGenCollectedHeapRallocate_new_tlab6MI_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapMmem_allocate6MIii_pnIHeapWord__; +text: .text%__1cbCTwoGenerationCollectorPolicyRmem_allocate_work6MIii_pnIHeapWord__; +text: .text%__1cQDefNewGenerationPshould_allocate6MIii_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationMpar_allocate6MIii_pnIHeapWord__: defNewGeneration.o; +text: .text%__1cJEdenSpaceMpar_allocate6MI_pnIHeapWord__; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableModRefBS.o; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cMstringStream2t6MI_v_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cMstringStreamFwrite6MpkcI_v_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%__1cQGenCollectedHeapIcapacity6kM_I_; +text: .text%__1cQDefNewGenerationIcapacity6kM_I_; +text: .text%__1cQGenCollectedHeapEused6kM_I_; +text: .text%__1cQDefNewGenerationEused6kM_I_; +text: .text%__1cbCOneContigSpaceCardGenerationEused6kM_I_; +text: .text%__1cQGenCollectedHeapPpost_initialize6M_v_; +text: .text%__1cQGenCollectedHeapTref_processing_init6M_v_; +text: .text%__1cKSharedHeapTref_processing_init6M_v_; +text: .text%__1cKGenerationSref_processor_init6M_v_; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: compactingPermGenGen.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: compactingPermGenGen.o; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: defNewGeneration.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: defNewGeneration.o; +text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: tenuredGeneration.o; +text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: tenuredGeneration.o; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cQGenCollectedHeapEkind6M_nNCollectedHeapEName__: genCollectedHeap.o; +text: .text%__1cNMemoryServicebBadd_gen_collected_heap_info6FpnQGenCollectedHeap__v_; +text: .text%__1cPMarkSweepPolicyUis_mark_sweep_policy6M_i_: collectorPolicy.o; +text: .text%__1cNMemoryManagerXget_copy_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cNMemoryManagerWget_msc_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cQDefNewGenerationEkind6M_nKGenerationEName__: defNewGeneration.o; +text: .text%__1cTContiguousSpacePool2t6MpnPContiguousSpace_pkcnKMemoryPoolIPoolType_Ii_v_; +text: .text%__1cbBSurvivorContiguousSpacePool2t6MpnQDefNewGeneration_pkcnKMemoryPoolIPoolType_Ii_v_; +text: .text%__1cRTenuredGenerationEkind6M_nKGenerationEName__: tenuredGeneration.o; +text: .text%__1cOGenerationPool2t6MpnKGeneration_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cKGenerationMmax_capacity6kM_I_; +text: .text%__1cQGenCollectedHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%_start: os_solaris.o; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cIRuntime1Mientries_for6FnMmethodHandle__pnIiEntries__; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreterRT_sparc.o; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cNSignatureInfoGdo_int6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: c1_Runtime1_sparc.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_sparc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interpreterRT_sparc.o; +text: .text%JVM_DoPrivileged; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cFframeZsender_with_pc_adjustment6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%jni_FindClass: jni.o; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%jni_ReleaseStringUTFChars; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%JVM_CurrentThread; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: c1_Runtime1_sparc.o; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_sparc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRT_sparc.o; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%JVM_SetThreadPriority; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: c1_Runtime1_sparc.o; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: c1_Runtime1_sparc.o; +text: .text%JVM_StartThread; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cNSignatureInfoHdo_long6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRT_sparc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_; +text: .text%__1cOMacroAssemblerOstore_argument6MpnMRegisterImpl_rnIArgument__v_: interpreterRT_sparc.o; +text: .text%JVM_MonitorWait; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%jni_GetObjectClass: jni.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%jio_vsnprintf; +text: .text%jni_EnsureLocalCapacity; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%jni_NewString: jni.o; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%JVM_InitProperties; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%JVM_IsArrayClass; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: c1_Runtime1_sparc.o; +text: .text%JVM_GetComponentType; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%JVM_IsPrimitiveClass; +text: .text%JVM_GetClassLoader; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%JVM_InternString; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%JVM_NanoTime; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%JVM_GetCallerClass; +text: .text%JVM_SupportsCX8; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_sparc.o; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%JVM_IHashCode; +text: .text%__1cHoopDescSslow_identity_hash6M_i_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_; +text: .text%__1cCosGrandom6F_l_; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%JVM_IsInterface; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%JVM_Clone; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%JVM_GetClassAccessFlags; +text: .text%JVM_GetClassName; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%JVM_GetClassModifiers; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%method_compare: methodOop.o; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%JVM_MaxMemory; +text: .text%__1cQGenCollectedHeapMmax_capacity6kM_I_; +text: .text%__1cQDefNewGenerationMmax_capacity6kM_I_; +text: .text%Unsafe_AllocateMemory; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%Unsafe_GetNativeByte; +text: .text%Unsafe_FreeMemory; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%jni_NewObjectV: jni.o; +text: .text%jni_GetStringRegion: jni.o; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%jni_GetObjectField: jni.o; +text: .text%jni_GetStringCritical: jni.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_; +text: .text%__1cQSimpleCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cQSimpleCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%JVM_LoadLibrary; +text: .text%JVM_FindLibraryEntry; +text: .text%jni_GetJavaVM; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%jni_SetIntField: jni.o; +text: .text%jni_SetLongField: jni.o; +text: .text%JVM_FindSignal; +text: .text%JVM_RegisterSignal; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: jniFastGetField_sparc.o; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: jniFastGetField_sparc.o; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cLRegisterMapFclear6Mpi_v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cMPeriodicTask2t6MI_v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cICompiler2t6M_v_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cICompilerKinitialize6M_v_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%JVM_FindLoadedClass; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cNCompileBrokerTwait_for_completion6FpnLCompileTask__pnHnmethod__; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__; +text: .text%__1cKSharedHeapPis_in_permanent6kMpkv_i_: genCollectedHeap.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cICompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cIRuntime1Pnew_code_buffer6F_pnKCodeBuffer__; +text: .text%__1cLCompilation2t6MpnQAbstractCompiler_pnFciEnv_pnIciMethod_ipnRC1_MacroAssembler__v_; +text: .text%__1cTExceptionRangeTable2t6Mi_v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cLCompilationOcompile_method6M_v_; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cLCompilationbBis_optimized_library_method6kM_i_; +text: .text%__1cLCompilationTcompile_java_method6MpnLCodeOffsets__i_; +text: .text%__1cIciMethodMall_oop_maps6M_pnKciLocalMap__; +text: .text%__1cSciGenerateLocalMap2t6MpnFArena_nMmethodHandle__v_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cSciGenerateLocalMapRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cSciGenerateLocalMapOreport_results6kM_i_: ciOopMap.o; +text: .text%__1cOGenerateOopMapNreport_result6M_v_; +text: .text%__1cSciGenerateLocalMapUfill_stackmap_prolog6Mi_v_; +text: .text%__1cSciGenerateLocalMapZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cSciGenerateLocalMapUfill_stackmap_epilog6M_v_: ciOopMap.o; +text: .text%__1cSciGenerateLocalMapOfill_init_vars6MpnNGrowableArray4Ci___v_; +text: .text%__1cLCompilationJbuild_hir6M_v_; +text: .text%__1cCIR2t6MpnLCompilation_pnIciMethod_i_v_; +text: .text%__1cJValueTypeKinitialize6F_v_; +text: .text%__1cMciNullObjectEmake6F_p0_; +text: .text%__1cMGraphBuilderKinitialize6F_v_; +text: .text%__1cHIRScope2t6MpnLCompilation_p0ipnIciMethod_ii_v_; +text: .text%__1cOLocalSlotArray2t6MkikpnJLocalSlot__v_: c1_IR.o; +text: .text%__1cGBitMap2t6MI_v_; +text: .text%__1cNWordSizeArray2t6Mki1_v_: c1_IR.o; +text: .text%__1cJXHandlers2t6MpnIciMethod__v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cLCompilationTdebug_info_recorder6kM_pnYDebugInformationRecorder__; +text: .text%__1cHIRScopeLbuild_graph6MpnLCompilation_i_pnKBlockBegin__; +text: .text%__1cQBlockListBuilder2t6MpnHIRScope_ii_v_; +text: .text%__1cPBlockBeginArray2t6MkikpnKBlockBegin__v_: c1_GraphBuilder.o; +text: .text%__1cQBlockListBuilderLset_leaders6M_v_; +text: .text%__1cQciBytecodeStream2t6MpnIciMethod__v_; +text: .text%__1cQBlockListBuilderMnew_block_at6MinKBlockBeginEFlag__p1_; +text: .text%__1cKValueStack2t6MpnHIRScope_ii_v_; +text: .text%__1cKValueArray2t6MkikpnLInstruction__v_: c1_ValueStack.o; +text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_IR.o; +text: .text%__1cKObjectTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cMas_ValueType6FnJBasicType__pnJValueType__; +text: .text%__1cMGraphBuilder2t6MpnLCompilation_pnHIRScope_pnJBlockList_pnKBlockBegin__v_; +text: .text%__1cOExceptionScope2t6M_v_; +text: .text%__1cIValueMap2t6M_v_; +text: .text%__1cNResourceArrayGexpand6MIiri_v_; +text: .text%__1cIValueMapIkill_all6M_v_; +text: .text%__1cKValueStackEcopy6M_p0_; +text: .text%__1cMGraphBuilderbBiterate_bytecodes_for_block6Mi_pnIBlockEnd__; +text: .text%__1cQciBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderJScopeDataIblock_at6Mi_pnKBlockBegin__; +text: .text%__1cMGraphBuilderKload_local6MpnJValueType_i_v_; +text: .text%__1cMGraphBuilderLappend_base6MpnLInstruction__2_; +text: .text%__1cJLoadLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cIValueMapEfind6MpnLInstruction__2_; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_GraphBuilder.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_GraphBuilder.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_GraphBuilder.o; +text: .text%__1cLInstructionEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_GraphBuilder.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cKValueStackLclear_store6Mi_v_; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_GraphBuilder.o; +text: .text%__1cKValueStackDpop6MpnJValueType__pnLInstruction__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderHif_node6MpnLInstruction_n0BJCondition_2pnKValueStack__v_; +text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_GraphBuilder.o; +text: .text%__1cCIfFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o; +text: .text%__1cNCanonicalizerFdo_If6MpnCIf__v_; +text: .text%__1cJValueTypeLis_constant6kM_i_: c1_ValueType.o; +text: .text%__1cJValueTypeOas_IntConstant6M_pnLIntConstant__: c1_ValueType.o; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Canonicalizer.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Canonicalizer.o; +text: .text%__1cLInstructionEhash6kM_i_: c1_Canonicalizer.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Canonicalizer.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Canonicalizer.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Canonicalizer.o; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_Canonicalizer.o; +text: .text%__1cKValueStackNpin_stack_all6MnLInstructionJPinReason__v_; +text: .text%__1cKBlockBeginItry_join6MpnKValueStack__i_; +text: .text%__1cKValueStack2t6Mp0_v_; +text: .text%__1cKValueStackEinit6Mp0_v_; +text: .text%__1cMGraphBuilderJScopeDataQadd_to_work_list6MpnKBlockBegin__v_; +text: .text%__1cMGraphBuilderLinstance_of6Mi_v_; +text: .text%__1cQciBytecodeStreamJget_klass6kM_pnHciKlass__; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cKInstanceOfFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_GraphBuilder.o; +text: .text%__1cKValueStackMclear_locals6M_v_; +text: .text%__1cKValueStackZpin_stack_for_state_split6M_v_; +text: .text%__1cJTypeCheckIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cOExceptionScopeEcopy6M_p0_; +text: .text%__1cMGraphBuilderOdirect_compare6MpnHciKlass__i_; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cIConstantFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerLdo_Constant6MpnIConstant__v_; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Instruction.o; +text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Instruction.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Instruction.o; +text: .text%__1cIConstantEhash6kM_i_; +text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cLIntConstantOas_IntConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cIConstantEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Instruction.o; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Instruction.o; +text: .text%__1cIConstantIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_GraphBuilder.o; +text: .text%__1cKInstanceOfNas_InstanceOf6M_p0_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderNmethod_return6MpnLInstruction__v_; +text: .text%__1cGReturnFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerJdo_Return6MpnGReturn__v_; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_GraphBuilder.o; +text: .text%__1cGReturnJas_Return6M_p0_: c1_GraphBuilder.o; +text: .text%__1cKValueStackbAeliminate_all_scope_stores6Mi_v_; +text: .text%__1cKValueStackQeliminate_stores6Mi_v_; +text: .text%__1cMGraphBuilderKcheck_cast6Mi_v_; +text: .text%__1cJCheckCastFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cMGraphBuilderLstore_local6MpnKValueStack_pnLInstruction_pnJValueType_ii_v_; +text: .text%__1cKObjectTypeNas_ObjectType6M_p0_: c1_ValueType.o; +text: .text%__1cJValueTypeOas_AddressType6M_pnLAddressType__: c1_ValueType.o; +text: .text%__1cJLocalSlot2t6M_v_; +text: .text%__1cKStoreLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cKValueStackLstore_local6MpnKStoreLocal_i_v_; +text: .text%__1cKValueStackQpin_stack_locals6Mi_v_; +text: .text%__1cKObjectTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cMGraphBuilderMaccess_field6MnJBytecodesECode__v_; +text: .text%__1cQciBytecodeStreamJget_field6kM_pnHciField__; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cKValueStackKcopy_locks6M_p0_; +text: .text%__1cJLoadFieldFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_Instruction.o; +text: .text%__1cJLoadFieldEhash6kM_i_: c1_Instruction.o; +text: .text%__1cJLoadFieldEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cJValueTypeNas_ObjectType6M_pnKObjectType__: c1_ValueType.o; +text: .text%__1cHIntTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cKValueStackVis_same_across_scopes6Mp0_i_; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cMGraphBuilderIstack_op6MnJBytecodesECode__v_; +text: .text%__1cMGraphBuilderNarithmetic_op6MpnJValueType_nJBytecodesECode_pnKValueStack__v_; +text: .text%__1cJValueTypeEmeet6kMp0_1_; +text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cMArithmeticOpIcan_trap6kM_i_; +text: .text%__1cMArithmeticOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cNCanonicalizerGdo_Op26MpnDOp2__v_; +text: .text%__1cLIntConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cMArithmeticOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cMArithmeticOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderJincrement6M_v_; +text: .text%__1cMGraphBuilderMload_indexed6MnJBasicType__v_; +text: .text%__1cLArrayLengthFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_GraphBuilder.o; +text: .text%__1cLArrayLengthEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLArrayLengthEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cLLoadIndexedFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cLLoadIndexedEhash6kM_i_: c1_Instruction.o; +text: .text%__1cLLoadIndexedEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cIConstantIis_equal6kMpnLInstruction__i_; +text: .text%__1cIConstantLas_Constant6M_p0_: c1_Instruction.o; +text: .text%__1cHIRScopeMheader_block6MpnKBlockBegin_n0BEFlag__2_; +text: .text%__1cCIRIoptimize6M_v_; +text: .text%__1cJOptimizer2t6MpnCIR__v_; +text: .text%__1cJOptimizerbHeliminate_conditional_expressions6M_v_; +text: .text%__1cCIRQiterate_preorder6MpnMBlockClosure__v_; +text: .text%__1cKBlockBeginQiterate_preorder6MpnMBlockClosure__v_; +text: .text%__1cNCE_EliminatorIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_IR.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Canonicalizer.o; +text: .text%__1cKBlockBeginQiterate_preorder6MrnJboolArray_pnMBlockClosure__v_; +text: .text%__1cCIfFas_If6M_p0_: c1_Canonicalizer.o; +text: .text%__1cJValueTypeKas_IntType6M_pnHIntType__: c1_ValueType.o; +text: .text%__1cNCE_EliminatorRsimple_value_copy6MpnLInstruction__2_: c1_Optimizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_GraphBuilder.o; +text: .text%__1cJLoadLocalMas_LoadLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_GraphBuilder.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_GraphBuilder.o; +text: .text%__1cHIntTypeKas_IntType6M_p0_: c1_ValueType.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Instruction.o; +text: .text%__1cJOptimizerQeliminate_blocks6M_v_; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cSPredecessorCounterIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLBlockMergerIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o; +text: .text%__1cLBlockMergerJtry_merge6MpnKBlockBegin__i_: c1_Optimizer.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_IR.o; +text: .text%__1cEGotoHas_Goto6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Canonicalizer.o; +text: .text%__1cJOptimizerVeliminate_null_checks6M_v_; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cNValueSetArray2t6MkikpnIValueSet__v_: c1_Optimizer.o; +text: .text%__1cTNullCheckEliminatorLiterate_one6MpnKBlockBegin__v_; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cKBlockBeginFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_BlockBegin6MpnKBlockBegin__v_; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_IR.o; +text: .text%__1cEBaseFvisit6MpnSInstructionVisitor__v_: c1_IR.o; +text: .text%__1cQNullCheckVisitorHdo_Base6MpnEBase__v_; +text: .text%__1cTNullCheckEliminatorPmerge_state_for6MpnKBlockBegin_pnKValueStack_pnIValueSet__i_; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o; +text: .text%__1cEGotoFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o; +text: .text%__1cQNullCheckVisitorHdo_Goto6MpnEGoto__v_; +text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_GraphBuilder.o; +text: .text%__1cCIfPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o; +text: .text%__1cTNullCheckEliminatorIdo_value6FppnLInstruction__v_; +text: .text%__1cLAccessLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cFLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cFLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorIdo_Local6MpnFLocal__v_; +text: .text%__1cQNullCheckVisitorMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cQNullCheckVisitorFdo_If6MpnCIf__v_; +text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_Instruction.o; +text: .text%__1cGReturnPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cIConstantPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorLdo_Constant6MpnIConstant__v_; +text: .text%__1cQNullCheckVisitorJdo_Return6MpnGReturn__v_; +text: .text%__1cJTypeCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cQNullCheckVisitorMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cKStoreLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cLAccessFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cTNullCheckEliminatorShandle_AccessField6MpnLAccessField__v_; +text: .text%__1cGBitMapbCset_intersection_with_result6M0_i_; +text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cLAccessArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cTNullCheckEliminatorShandle_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cNAccessIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cTNullCheckEliminatorShandle_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cCIRTcompute_locals_size6M_v_; +text: .text%__1cHIRScopePallocate_locals6MipnMWordSizeList__i_; +text: .text%__1cHIRScopePargument_locals6M_pnJLocalList__; +text: .text%__1cCIRNcompute_loops6M_v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cKLoopFinder2t6MpnCIR_i_v_; +text: .text%__1cSBlockLoopInfoArray2t6MkikpnNBlockLoopInfo__v_: c1_Loops.o; +text: .text%__1cKLoopFinderNcompute_loops6Mi_pnILoopList__; +text: .text%__1cKLoopFinderScompute_dominators6MpnJboolArray__v_; +text: .text%__1cGBitMapGat_put6MIi_v_; +text: .text%__1cRCreateInfoClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cPSetPredsClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cKLoopFinderSdominator_walk_sux6MpnKBlockBegin_pnJboolArray__v_; +text: .text%__1cGBitMapQset_intersection6M0_v_; +text: .text%__1cGBitMapHis_same6M0_i_; +text: .text%__1cKLoopFinderOfind_backedges6MpnJboolArray__pnILoopList__; +text: .text%__1cKLoopFinderSgather_loop_blocks6MpnILoopList__v_; +text: .text%__1cKLoopFinderKfind_loops6MpnILoopList_i_2_; +text: .text%__1cKScanBlocks2t6MpnJBlockList__v_; +text: .text%__1cIintStack2t6M_v_: c1_ScanBlocks.o; +text: .text%__1cKScanBlocksEscan6MpnKScanResult_i_v_; +text: .text%__1cKScanBlocksKscan_block6MpnKBlockBegin_pnKScanResult_i_v_; +text: .text%__1cLIllegalTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_GraphBuilder.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_GraphBuilder.o; +text: .text%__1cLAccessLocalOas_AccessLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionNas_StoreLocal6M_pnKStoreLocal__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Instruction.o; +text: .text%__1cLInstructionKas_Convert6M_pnHConvert__: c1_Instruction.o; +text: .text%__1cLInstructionPas_ArithmeticOp6M_pnMArithmeticOp__: c1_Instruction.o; +text: .text%__1cMArithmeticOpPas_ArithmeticOp6M_p0_: c1_Instruction.o; +text: .text%__1cKStoreLocalNas_StoreLocal6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_GraphBuilder.o; +text: .text%__1cLInstructionKas_Convert6M_pnHConvert__: c1_GraphBuilder.o; +text: .text%__1cLInstructionPas_ArithmeticOp6M_pnMArithmeticOp__: c1_GraphBuilder.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Canonicalizer.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Canonicalizer.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Canonicalizer.o; +text: .text%__1cKLoopFinderPfind_loop_exits6MpnKBlockBegin_pnELoop__v_; +text: .text%__1cKLoopFinderNinsert_blocks6MpnILoopList__v_; +text: .text%__1cIintArray2t6Mki1_v_: c1_Loops.o; +text: .text%__1cJBlockListPiterate_forward6MpnMBlockClosure__v_; +text: .text%__1cGTaggerIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNPairCollectorIblock_do6MpnKBlockBegin__v_: c1_Loops.o; +text: .text%__1cNResourceArrayEsort6MIpGpkv2_i_v_; +text: .text%__1cRsort_by_block_ids6FppnJBlockPair_2_i_: c1_Loops.o; +text: .text%__1cKLoopFinderUinsert_caching_block6MpnILoopList_pnKBlockBegin_4_4_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_GraphBuilder.o; +text: .text%__1cKStateSplitFscope6kM_pnHIRScope__; +text: .text%__1cKLoopFinderJnew_block6MpnHIRScope_i_pnKBlockBegin__; +text: .text%__1cIBlockEndOsubstitute_sux6MpnKBlockBegin_2_v_; +text: .text%__1cCIRMcompute_code6M_v_; +text: .text%__1cCIRWiterate_and_set_weight6kMrnJboolArray_pnKBlockBegin_pnJBlockList_i_v_; +text: .text%__1cKBlockBeginKset_weight6Mi_v_; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_IR.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_IR.o; +text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_GraphBuilder.o; +text: .text%__1cDcmp6FppnKBlockBegin_2_i_: c1_IR.o; +text: .text%__1cUSuxAndWeightAdjusterIblock_do6MpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cLInstructionGnegate6Fn0AJCondition__1_; +text: .text%__1cJBlockListJblocks_do6MpFpnKBlockBegin__v_v_; +text: .text%__1cQUseCountComputerRcompute_use_count6FpnKBlockBegin__v_: c1_IR.o; +text: .text%__1cQUseCountComputerQupdate_use_count6FppnLInstruction__v_: c1_IR.o; +text: .text%__1cKStateSplitPstate_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cKValueStackJvalues_do6MpFppnLInstruction__v_v_; +text: .text%__1cNCachingChangePinput_values_do6MpFppnLInstruction__v_v_: c1_Loops.o; +text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Loops.o; +text: .text%__1cFLocalIas_Local6M_p0_: c1_GraphBuilder.o; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_IR.o; +text: .text%__1cLCompilationIemit_lir6M_v_; +text: .text%__1cIFrameMap2t6Mi_v_; +text: .text%__1cLCompilationNinit_framemap6MpnIFrameMap__v_; +text: .text%__1cIFrameMapbCset_local_name_to_offset_map6MpnMWordSizeList__v_; +text: .text%__1cLLIR_Emitter2t6MpnLCompilation__v_; +text: .text%__1cIValueGenOinit_value_gen6F_v_; +text: .text%__1cIRegAlloc2t6M_v_; +text: .text%__1cNc1_AllocTable2t6Mi_v_; +text: .text%__1cNCodeGenerator2t6MpnIValueGen_pnRValueGenInvariant__v_; +text: .text%__1cNCodeGeneratorIblock_do6MpnKBlockBegin__v_; +text: .text%__1cLLIR_EmitterMmust_bailout6kM_i_; +text: .text%__1cLLIR_EmitterLstart_block6MpnKBlockBegin__v_; +text: .text%__1cILIR_List2t6MpnLCompilation__v_; +text: .text%__1cLLIR_EmitterQbind_block_entry6MpnKBlockBegin__v_; +text: .text%__1cILIR_ListSbranch_destination6MpnFLabel__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenMblock_prolog6MpnKBlockBegin__v_; +text: .text%__1cIValueGenHdo_root6MpnLInstruction__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_GraphBuilder.o; +text: .text%__1cIValueGenNdo_BlockBegin6MpnKBlockBegin__v_; +text: .text%__1cQDelayedSpillMark2T6M_v_: c1_CodeGenerator.o; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_IR.o; +text: .text%__1cIValueGenHdo_Base6MpnEBase__v_; +text: .text%__1cLCompilationNget_init_vars6M_pnIintStack__; +text: .text%__1cLLIR_EmitterJstd_entry6MpnHIRScope_pnIintStack_nFRInfo_5_v_; +text: .text%__1cILIR_ListLalign_entry6M_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListWunverified_entry_point6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListUverified_entry_point6M_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListLbuild_frame6M_v_: c1_LIREmitter.o; +text: .text%__1cLCompilationVvalue_stack2lir_stack6MpnKValueStack__pnNGrowableArray4CpnLLIR_OprDesc____; +text: .text%__1cNCodeGeneratorPblock_do_epilog6MpnKBlockBegin__v_; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Canonicalizer.o; +text: .text%__1cIValueGenHdo_Goto6MpnEGoto__v_; +text: .text%__1cIValueGenLmove_to_phi6MpnKValueStack_i_i_; +text: .text%__1cIValueGenWgoto_default_successor6MpnIBlockEnd_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenFdo_If6MpnCIf__v_; +text: .text%__1cIValueGenEwalk6MpnLInstruction__v_; +text: .text%__1cIValueGenMdo_LoadLocal6MpnJLoadLocal__v_; +text: .text%__1cIValueGenJload_item6MpnEItem__v_; +text: .text%__1cEItemGupdate6M_v_; +text: .text%__1cIValueGenSfpu_fanout_handled6MpnEItem__i_; +text: .text%__1cEItemEtype6kM_pnJValueType__: c1_Items.o; +text: .text%__1cIRegAllocMhas_free_reg6kMpnJValueType__i_; +text: .text%__1cNc1_AllocTableMhas_one_free6kM_i_; +text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cIRegAllocMget_free_reg6MpnJValueType__nFRInfo__; +text: .text%__1cNc1_AllocTableIget_free6M_i_; +text: .text%__1cJRInfo2RegFdo_it6M_v_: c1_RegAlloc.o; +text: .text%__1cHLockRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cNc1_AllocTableKset_locked6Mi_v_; +text: .text%__1cLCompilationIitem2lir6MpknEItem__pnLLIR_OprDesc__; +text: .text%__1cLCompilationKitem2stack6MpknEItem__i_; +text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_ValueType.o; +text: .text%__1cMas_BasicType6FpnJValueType__nJBasicType__; +text: .text%__1cJValueTypeMas_ArrayType6M_pnJArrayType__: c1_ValueType.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_Compilation.o; +text: .text%__1cLLIR_EmitterEmove6MpnLLIR_OprDesc_nFRInfo__v_; +text: .text%__1cILIR_ListEmove6MpnLLIR_OprDesc_2pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenJitem_free6MpnEItem__v_; +text: .text%__1cIRegAllocPincr_spill_lock6MnFRInfo__v_; +text: .text%__1cQChangeSpillCountGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIValueGenFrfree6MpnEItem__v_; +text: .text%__1cIRegAllocPdecr_spill_lock6MnFRInfo__v_; +text: .text%__1cIRegAllocIfree_reg6MnFRInfo__v_; +text: .text%__1cHFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cNc1_AllocTableIset_free6Mi_v_; +text: .text%__1cIValueGenNset_no_result6MpnLInstruction__v_; +text: .text%__1cLLIR_EmitterFif_op6MinLInstructionJCondition_pnLLIR_OprDesc_4pnKBlockBegin_66pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListDcmp6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnKBlockBegin__v_; +text: .text%__1cIValueGenNdo_InstanceOf6MpnKInstanceOf__v_; +text: .text%__1cIValueGenVspill_values_on_stack6MpnKValueStack_nFRInfo_i_v_; +text: .text%__1cIValueGenWrlock_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cIValueGenFrlock6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cIRegAllocMget_lock_reg6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cLLIR_EmitterNinstanceof_op6MpnLLIR_OprDesc_2pnHciKlass_nFRInfo_5ipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListKinstanceof6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo__v_; +text: .text%__1cIRegAllocHset_reg6MnFRInfo_ipnLInstruction__v_; +text: .text%__1cGSetRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cEItemNset_from_item6Mpk0_v_: c1_Items.o; +text: .text%__1cIValueGenLdo_Constant6MpnIConstant__v_; +text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_Canonicalizer.o; +text: .text%__1cIValueGenUcheck_float_register6MpnEItem__v_; +text: .text%__1cEItemRget_jint_constant6kM_i_; +text: .text%__1cIValueGenOdont_load_item6MpnEItem__v_; +text: .text%__1cIRegAllocLis_free_reg6kMnFRInfo__i_; +text: .text%__1cJIsFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cNc1_AllocTableHis_free6kMi_i_; +text: .text%__1cLLIR_OprFactKvalue_type6FpnJValueType__pnLLIR_OprDesc__; +text: .text%__1cJLIR_ConstEtype6kM_nJBasicType__: c1_CacheLocals.o; +text: .text%__1cJLIR_ConstLas_constant6M_p0_: c1_CacheLocals.o; +text: .text%__1cIValueGenMdo_CheckCast6MpnJCheckCast__v_; +text: .text%__1cMCodeEmitInfo2t6MpnLLIR_Emitter_ipnIintStack_pnKValueStack_pnOExceptionScope_pnPRInfoCollection__v_; +text: .text%__1cKValueStackMcaller_state6kM_p0_; +text: .text%__1cILIR_ListJcheckcast6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo_6pnICodeStub__v_; +text: .text%__1cIValueGenNdo_StoreLocal6MpnKStoreLocal__v_; +text: .text%__1cEItemRhandle_float_kind6M_v_; +text: .text%__1cIValueGenXcan_inline_any_constant6kM_i_; +text: .text%__1cLLIR_EmitterJopr2local6MipnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Instruction.o; +text: .text%__1cIValueGenMdo_LoadField6MpnJLoadField__v_; +text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cLLIR_EmitterKfield_load6MnFRInfo_pnHciField_pnLLIR_OprDesc_iiipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListMload_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIRegAllocNoops_in_spill6kM_pnIintStack__; +text: .text%__1cEItemEtype6kM_pnJValueType__: c1_CodeGenerator.o; +text: .text%__1cJArrayTypeMas_ArrayType6M_p0_: c1_ValueType.o; +text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Loops.o; +text: .text%__1cNCachingChangeFvisit6MpnSInstructionVisitor__v_: c1_Loops.o; +text: .text%__1cIValueGenQdo_CachingChange6MpnNCachingChange__v_; +text: .text%__1cIValueGenPdo_ArithmeticOp6MpnMArithmeticOp__v_; +text: .text%__1cIValueGenTdo_ArithmeticOp_Int6MpnMArithmeticOp__v_; +text: .text%__1cIRegAllocRoops_in_registers6kM_pnPRInfoCollection__; +text: .text%__1cLLIR_EmitterRarithmetic_op_int6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo__v_; +text: .text%__1cLLIR_EmitterNarithmetic_op6MnJBytecodesECode_pnLLIR_OprDesc_44inFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenWcan_inline_as_constant6MpnEItem__i_; +text: .text%__1cILIR_ListDadd6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenOdo_ArrayLength6MpnLArrayLength__v_; +text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterMarray_length6MnFRInfo_pnLLIR_OprDesc_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenOdo_LoadIndexed6MpnLLoadIndexed__v_; +text: .text%__1cJValueTypeLas_LongType6M_pnILongType__: c1_ValueType.o; +text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cLLIR_EmitterSlength_range_check6MpnLLIR_OprDesc_2pnMCodeEmitInfo__v_; +text: .text%__1cORangeCheckStub2t6MpnMCodeEmitInfo_nFRInfo_ii_v_; +text: .text%__1cMCodeEmitInfo2t6Mp0i_v_; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnICodeStub__v_; +text: .text%__1cLLIR_EmitterMindexed_load6MnFRInfo_nJBasicType_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter_sparc.o; +text: .text%__1cILIR_ListKshift_left6MpnLLIR_OprDesc_222_v_; +text: .text%__1cLCompilationXlir_opr_for_instruction6MpnLInstruction__pnLLIR_OprDesc__; +text: .text%__1cIValueGenPlock_free_rinfo6MpnLInstruction_pnJValueType__nFRInfo__; +text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_sparc.o; +text: .text%__1cLLIR_EmitterHgoto_op6MpnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListEjump6MpnKBlockBegin_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenJdo_Return6MpnGReturn__v_; +text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_Canonicalizer.o; +text: .text%__1cIValueGenTresult_register_for6FpnJValueType_i_nFRInfo__; +text: .text%__1cIValueGenTcallee_return1RInfo6F_nFRInfo__; +text: .text%__1cIValueGenPload_item_force6MpnEItem_nFRInfo__v_; +text: .text%__1cIRegAllocJlock_temp6MpnLInstruction_nFRInfo__v_; +text: .text%__1cILIR_ListHint2reg6MinFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterJreturn_op6MpnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListJreturn_op6MpnLLIR_OprDesc__v_: c1_LIREmitter.o; +text: .text%__1cNCodeGeneratorXclear_instruction_items6FpnKBlockBegin__v_; +text: .text%__1cQLIR_LocalCaching2t6MpnCIR__v_; +text: .text%__1cQLIR_LocalCachingQpreferred_locals6MpknIciMethod__pnMLocalMapping__; +text: .text%__1cIFrameMapScalling_convention6FpknIciMethod_pnIintArray__pnRCallingConvention__; +text: .text%__1cIFrameMapScalling_convention6FirknOBasicTypeArray_pnIintArray__pnRCallingConvention__; +text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_sparc.o; +text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_sparc.o; +text: .text%__1cQArgumentLocationVincoming_reg_location6kM_nFRInfo__; +text: .text%__1cQArgumentLocationPis_register_arg6kM_i_; +text: .text%__1cMLocalMappingQinit_cached_regs6M_v_; +text: .text%__1cPRegisterManager2t6M_v_; +text: .text%__1cPRegisterManagerElock6MnFRInfo__v_; +text: .text%__1cQLIR_LocalCachingVcompute_cached_locals6M_v_; +text: .text%__1cQLIR_LocalCachingMcache_locals6M_v_; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_IR.o; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Canonicalizer.o; +text: .text%__1cNCachingChangeQas_CachingChange6M_p0_: c1_Loops.o; +text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Instruction.o; +text: .text%__1cRBlockListScanInfo2t6MpnJBlockList__v_: c1_CacheLocals.o; +text: .text%__1cOLIR_OprRefList2t6M_v_: c1_CacheLocals.o; +text: .text%__1cRBlockListScanInfoItraverse6MpnKBlockBegin_pnKLIR_OpList__v_: c1_CacheLocals.o; +text: .text%__1cLLIR_OpLabelFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cHLIR_Op1Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cHLIR_Op2Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cMLIR_OpBranchFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_sparc.o; +text: .text%__1cNc1_AllocTableFmerge6Mp0_v_; +text: .text%__1cGLIR_OpFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cPLIR_OpTypeCheckFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cQLIR_LocalCachingXcache_locals_for_blocks6MpnJBlockList_pnPRegisterManager_i_pnMLocalMapping__; +text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Loops.o; +text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Loops.o; +text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Loops.o; +text: .text%__1cLInstructionKas_Convert6M_pnHConvert__: c1_Loops.o; +text: .text%__1cLInstructionPas_ArithmeticOp6M_pnMArithmeticOp__: c1_Loops.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_IR.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_IR.o; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_IR.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_IR.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_IR.o; +text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_IR.o; +text: .text%__1cKScanBlocksQmost_used_locals6M_pnKALocalList__; +text: .text%__1cGALocalUsort_by_access_count6Fpp02_i_: c1_ScanBlocks.o; +text: .text%__1cKScanBlocksWmost_used_float_locals6M_pnKALocalList__; +text: .text%__1cGALocalUsort_by_access_count6Fpp02_i_: c1_CacheLocals.o; +text: .text%__1cQLIR_LocalCachingPcompute_caching6MpnKALocalList_pnPRegisterManager__pnMLocalMapping__; +text: .text%__1cPRegisterManagerMnum_free_cpu6M_i_; +text: .text%__1cGALocalNsort_by_index6Fpp02_i_: c1_CacheLocals.o; +text: .text%__1cMLocalMappingNget_cache_reg6kMinIValueTag__nFRInfo__; +text: .text%__1cPRegisterManagerLis_free_reg6MnFRInfo__i_; +text: .text%__1cQLIR_LocalCachingQadd_at_all_names6FpnPRInfoCollection_inFRInfo_pnMWordSizeList__v_; +text: .text%__1cPRegisterManagerMhas_free_reg6MnIValueTag__i_; +text: .text%__1cPRegisterManagerNlock_free_reg6MnIValueTag__nFRInfo__; +text: .text%__1cMLocalMappingFmerge6Mp0_v_; +text: .text%__1cSLocalMappingSetterIblock_do6MpnKBlockBegin__v_; +text: .text%__1cMLocalMappingNget_cache_reg6kMi_nFRInfo__; +text: .text%__1cMLocalMappingEjoin6Mp0_v_; +text: .text%__1cQLIR_LocalCachingYinsert_transition_blocks6M_v_; +text: .text%__1cPBlockTransitionIblock_do6MpnKBlockBegin__v_: c1_CacheLocals.o; +text: .text%__1cGLIR_OpLas_OpBranch6M_pnMLIR_OpBranch__: c1_LIR.o; +text: .text%__1cMLocalMappingPemit_transition6FpnILIR_List_p03pnCIR__v_; +text: .text%__1cCIRThighest_used_offset6kM_i_; +text: .text%__1cMLIR_OpBranchLas_OpBranch6M_p0_: c1_LIR.o; +text: .text%__1cNResourceArrayJremove_at6MIi_v_; +text: .text%__1cHIRScopeJmax_stack6kM_i_; +text: .text%__1cNLIR_Optimizer2t6MpnCIR__v_; +text: .text%__1cRLIR_PeepholeState2t6M_v_; +text: .text%__1cOLIR_OprRefList2t6M_v_: c1_LIROptimizer.o; +text: .text%__1cNLIR_OptimizerIoptimize6M_v_; +text: .text%__1cNLIR_OptimizerIoptimize6MpnJBlockList__v_; +text: .text%__1cNLIR_OptimizerIoptimize6MpnKBlockBegin__v_; +text: .text%__1cNLIR_OptimizerKprocess_op6M_v_; +text: .text%__1cGLIR_OpGas_Op16M_pnHLIR_Op1__: c1_LIR.o; +text: .text%__1cLLIR_OpLabelKas_OpLabel6M_p0_: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateYset_disable_optimization6Mi_v_; +text: .text%__1cLLIR_OpLabelJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerMemit_opLabel6MpnLLIR_OpLabel__v_; +text: .text%__1cHLIR_Op0Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op06MpnHLIR_Op0__v_; +text: .text%__1cHLIR_Op2Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op26MpnHLIR_Op2__v_; +text: .text%__1cNLIR_OptimizerKhandle_opr6MpnLLIR_OprDesc_nQLIR_OpVisitStateHOprMode__2_; +text: .text%__1cNLIR_OptimizerJis_cached6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateUrecord_opr_reference6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateLdefining_op6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateJreg2index6MpnLLIR_OprDesc__i_; +text: .text%__1cNLIR_OptimizerMblock_epilog6M_v_; +text: .text%__1cHLIR_Op1Gas_Op16M_p0_: c1_LIR.o; +text: .text%__1cHLIR_Op1Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op16MpnHLIR_Op1__v_; +text: .text%__1cNLIR_OptimizerMprocess_move6MpnHLIR_Op1__v_; +text: .text%__1cMLocalMappingNget_cache_reg6kMpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerRreplace_stack_opr6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerNoptimize_move6MpnHLIR_Op1_rpnLLIR_OprDesc_5_i_; +text: .text%__1cRLIR_PeepholeStatebFequivalent_register_or_constant6MpnLLIR_OprDesc__2_; +text: .text%__1cRLIR_PeepholeStateOequivalent_opr6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerRresult_substitute6M_v_; +text: .text%__1cRLIR_PeepholeStateMkill_operand6MpnLLIR_OprDesc_i_v_; +text: .text%__1cRLIR_PeepholeStateQkill_equivalents6MpnLLIR_OprDesc__v_; +text: .text%__1cRLIR_PeepholeStateNkill_register6Mi_v_; +text: .text%__1cRLIR_PeepholeStateSrecord_defining_op6MpnLLIR_OprDesc_i_v_; +text: .text%__1cRLIR_PeepholeStatePset_defining_op6Mii_v_; +text: .text%__1cRLIR_PeepholeStateHdo_move6MpnLLIR_OprDesc_2_v_; +text: .text%__1cRLIR_PeepholeStateTequivalent_register6MpnLLIR_OprDesc__2_; +text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_sparc.o; +text: .text%__1cNLIR_OptimizerMis_cache_reg6MpnLLIR_OprDesc__i_; +text: .text%__1cMLocalMappingMis_cache_reg6kMpnLLIR_OprDesc__i_; +text: .text%__1cMLocalMappingMis_cache_reg6kMnFRInfo__i_; +text: .text%__1cFRInfoLas_register6kM_pnMRegisterImpl__; +text: .text%__1cNLIR_OptimizerKallow_opto6M_i_; +text: .text%__1cNLIR_OptimizerLrecord_opto6MpnLLIR_OprDesc_2_2_; +text: .text%__1cMLIR_OpBranchJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerNemit_opBranch6MpnMLIR_OpBranch__v_; +text: .text%__1cNLIR_OptimizerMselect_delay6MpnMCodeEmitInfo__pnGLIR_Op__; +text: .text%__1cNLIR_OptimizerFop_at6Mi_pnGLIR_Op__; +text: .text%__1cNLIR_AssemblerVis_single_instruction6FpnGLIR_Op_pnIFrameMap__i_; +text: .text%__1cNLIR_OptimizerMdelayed_emit6MpnGLIR_Op__v_; +text: .text%__1cLLIR_OpDelayJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerKemit_delay6MpnLLIR_OpDelay__v_; +text: .text%__1cNLIR_OptimizerQopr_live_on_exit6MpnLLIR_OprDesc__i_; +text: .text%__1cPLIR_OpTypeCheckJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; +text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_sparc.o; +text: .text%__1cKLIR_OprPtrKas_address6M_pnLLIR_Address__: c1_CacheLocals.o; +text: .text%__1cNLIR_AssemblerRis_small_constant6FpnLLIR_OprDesc__i_; +text: .text%__1cNLIR_OptimizerOemit_code_stub6MpnICodeStub__v_; +text: .text%__1cNLIR_OptimizerLhandle_info6MpnMCodeEmitInfo__v_; +text: .text%__1cRLIR_PeepholeStateNincrement_ref6Mi_v_; +text: .text%__1cMCodeEmitInfoRset_local_mapping6MpnMLocalMapping__v_; +text: .text%__1cNLIR_OptimizerUrecord_register_oops6MpnMCodeEmitInfo__v_; +text: .text%__1cLLIR_AddressKas_address6M_p0_: c1_LIR.o; +text: .text%__1cRLIR_PeepholeStateSequivalent_address6MpnLLIR_OprDesc__2_; +text: .text%__1cLLIR_AddressEtype6kM_nJBasicType__: c1_LIR.o; +text: .text%__1cKLIR_OprPtrLas_constant6M_pnJLIR_Const__: c1_LIR.o; +text: .text%__1cORangeCheckStubXis_exception_throw_stub6kM_i_: c1_CodeStubs_sparc.o; +text: .text%__1cLLIR_OpDelayKas_OpDelay6M_p0_: c1_LIR.o; +text: .text%__1cNLIR_OptimizerKreplace_op6MipnGLIR_Op__v_; +text: .text%__1cNLIR_Assembler2t6MpnLCompilation_pnLCodeOffsets__v_; +text: .text%__1cNLIR_AssemblerJemit_code6MpnJBlockList__v_; +text: .text%__1cQCollectConstantsIblock_do6MpnKBlockBegin__v_: c1_LIRAssembler.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_IR.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Canonicalizer.o; +text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_Canonicalizer.o; +text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_Canonicalizer.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Instruction.o; +text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Loops.o; +text: .text%__1cNLIR_AssemblerOemit_constants6M_v_; +text: .text%__1cNConstantTableMemit_entries6MpnOMacroAssembler_i_v_; +text: .text%__1cLLIR_CodeGenIblock_do6MpnKBlockBegin__v_; +text: .text%__1cNLIR_AssemblerMemit_opLabel6MpnLLIR_OpLabel__v_; +text: .text%__1cNLIR_AssemblerIemit_op06MpnHLIR_Op0__v_; +text: .text%__1cNLIR_AssemblerIemit_op26MpnHLIR_Op2__v_; +text: .text%__1cNLIR_AssemblerMcheck_icache6MpnMRegisterImpl_2_i_; +text: .text%__1cRC1_MacroAssemblerSinline_cache_check6MpnMRegisterImpl_2_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: c1_MacroAssembler_sparc.o; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: c1_MacroAssembler_sparc.o; +text: .text%__1cRC1_MacroAssemblerOverified_entry6M_v_; +text: .text%__1cNLIR_AssemblerbBinitial_frame_size_in_bytes6M_i_; +text: .text%__1cIFrameMapJframesize6kM_i_; +text: .text%__1cRC1_MacroAssemblerLbuild_frame6Mi_v_; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: c1_Compiler.o; +text: .text%__1cNLIR_AssemblerVsetup_locals_at_entry6M_v_; +text: .text%__1cIFrameMapYsignature_type_array_for6FpknIciMethod__pnNBasicTypeList__; +text: .text%__1cNLIR_AssemblerMsetup_locals6MpnRCallingConvention_pnOBasicTypeArray__v_; +text: .text%__1cRDependenceBreakerPget_store_order6MnFRInfo__pnSDependenceEdgeList__: c1_LIRAssembler_sparc.o; +text: .text%__1cNLIR_AssemblerHreg2reg6MnFRInfo_1_v_; +text: .text%__1cQArgumentLocationMis_stack_arg6kM_i_; +text: .text%__1cNLIR_AssemblerHcomp_op6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4nJBasicType__v_; +text: .text%__1cNLIR_AssemblerNemit_opBranch6MpnMLIR_OpBranch__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: c1_LIRAssembler_sparc.o; +text: .text%__1cNLIR_AssemblerKemit_delay6MpnLLIR_OpDelay__v_; +text: .text%__1cNLIR_AssemblerLcode_offset6kM_i_; +text: .text%__1cNLIR_AssemblerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cLOopRecorderKfind_index6MpnI_jobject__i_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: c1_LIRAssembler_sparc.o; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cNLIR_AssemblerEload6MpnMRegisterImpl_i2nJBasicType_pnMCodeEmitInfo__i_; +text: .text%__1cNLIR_AssemblerIemit_op16MpnHLIR_Op1__v_; +text: .text%__1cNLIR_AssemblerHmove_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerOemit_code_stub6MpnICodeStub__v_; +text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_sparc.o; +text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerHmem2reg6MpnLLIR_Address_nFRInfo_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerbDadd_debug_info_for_null_check6MipnMCodeEmitInfo__v_; +text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerIarith_op6MnILIR_Code_pnLLIR_OprDesc_33pnMCodeEmitInfo__v_; +text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_i2_v_; +text: .text%__1cNLIR_AssemblerOsafepoint_poll6MnFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerZadd_debug_info_for_branch6MpnMCodeEmitInfo__v_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: c1_Runtime1.o; +text: .text%__1cMCodeEmitInfoRrecord_debug_info6MpnYDebugInformationRecorder_ii_v_; +text: .text%__1cMCodeEmitInfoScompute_debug_info6M_v_; +text: .text%__1cMCodeEmitInfoOcreate_oop_map6M_pnGOopMap__; +text: .text%__1cIFrameMapRoop_map_arg_count6M_i_; +text: .text%__1cKciLocalMapNindex_for_bci6kMi_i_; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cMCodeEmitInfoSappend_scope_value6MpnLLIR_OprDesc_pnNGrowableArray4CpnKScopeValue____v_; +text: .text%__1cLLIR_OprDescGis_oop6kM_i_; +text: .text%__1cMCodeEmitInfoYscope_value_for_register6MnFRInfo_ppnKScopeValue_4nILocationEType__v_; +text: .text%__1cMCodeEmitInfobCcompute_debug_info_for_scope6MpnHIRScope_ipnNGrowableArray4CpnKScopeValue___inGValues_i_pnQIRScopeDebugInfo__; +text: .text%__1cSciGenerateLocalMapUbytecode_is_gc_point6FnJBytecodesECode_ii_i_; +text: .text%__1cMLocalMappingbEget_cache_reg_for_local_offset6kMi_nFRInfo__; +text: .text%__1cGOopMapJdeep_copy6M_p0_; +text: .text%__1cGOopMap2t6Mn0ANDeepCopyToken_p0_v_; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cLCompilationbEadd_exception_handlers_for_pco6MiipnOExceptionScope__v_; +text: .text%__1cNLIR_AssemblerJconst2reg6MpnJLIR_Const_nFRInfo_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerJreturn_op6MnFRInfo_i_v_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: c1_Runtime1.o; +text: .text%__1cRC1_MacroAssemblerLmethod_exit6Mi_v_; +text: .text%__1cRreturn_RelocationEtype6M_nJrelocInfoJrelocType__: c1_Runtime1.o; +text: .text%__1cLCompilationQemit_code_epilog6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerUemit_slow_case_stubs6M_v_; +text: .text%__1cTSimpleExceptionStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerNadd_call_info6MipnMCodeEmitInfo__v_; +text: .text%__1cVImplicitNullCheckStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cZresource_reallocate_bytes6FpcII_0_; +text: .text%__1cFArenaIArealloc6MpvII_1_; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cORangeCheckStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerWemit_exception_handler6M_i_; +text: .text%__1cRC1_MacroAssemblerRexception_handler6Mii_v_; +text: .text%__1cHnmethodZsize_of_exception_handler6F_i_; +text: .text%__1cNLIR_AssemblerPemit_call_stubs6M_v_; +text: .text%__1cNLIR_AssemblerbQinterpreter_to_compiler_calling_convention6MpnIciMethod__v_; +text: .text%__1cQArgumentLocationVoutgoing_reg_location6kM_nFRInfo__; +text: .text%__1cNLIR_AssemblerKemit_stubs6MpnMCodeStubList__v_; +text: .text%__1cLCompilationbEgenerate_exception_range_table6M_v_; +text: .text%__1cOExceptionScopeGequals6kMp0_i_; +text: .text%__1cLCompilationbBadd_exception_range_entries6MiipnOExceptionScope_ip2pi_v_; +text: .text%__1cTExceptionRangeTablebCcompute_modified_at_call_pco6Fii_i_; +text: .text%__1cOExceptionScopeGlength6kM_i_; +text: .text%__1cOExceptionScopeMcaller_scope6kM_p0_; +text: .text%__1cLLIR_EmitterKframe_size6M_i_; +text: .text%__1cNLIR_Assembler2T6M_v_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: c1_Runtime1.o; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cOoop_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cOoop_RelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cTExceptionRangeTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cLCompilation2T6M_v_; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cTExceptionRangeTable2T6M_v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%jni_NewByteArray: jni.o; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cSThreadLocalStorageGthread6F_pnGThread__: assembler_sparc.o; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cMGraphBuilderHif_zero6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cMGraphBuilderMnew_instance6Mi_v_; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cLNewInstanceFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cLInstructionEhash6kM_i_: c1_Instruction.o; +text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Instruction.o; +text: .text%__1cLNewInstanceIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderGinvoke6MnJBytecodesECode__v_; +text: .text%__1cQciBytecodeStreamKget_method6kM_pnIciMethod__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cQciBytecodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cKValueStackNpop_arguments6Mi_pnGValues__; +text: .text%__1cGInvoke2t6MnJBytecodesECode_pnJValueType_pnLInstruction_pnGValues_iiii_v_; +text: .text%__1cGInvokeFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cGInvokeJas_Invoke6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Instruction.o; +text: .text%__1cGInvokeIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderIthrow_op6M_v_; +text: .text%__1cFThrowFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerIdo_Throw6MpnFThrow__v_; +text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Instruction.o; +text: .text%__1cFThrowIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Instruction.o; +text: .text%__1cFThrowIas_Throw6M_p0_: c1_Instruction.o; +text: .text%__1cKValueStackMclear_stores6M_v_; +text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Instruction.o; +text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Instruction.o; +text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cGInvokePinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cTNullCheckEliminatorNhandle_Invoke6MpnGInvoke__v_; +text: .text%__1cFThrowPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorIdo_Throw6MpnFThrow__v_; +text: .text%__1cFThrowPstate_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_ValueType.o; +text: .text%__1cFRInfoIoverlaps6kMk0_i_; +text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenOdo_NewInstance6MpnLNewInstance__v_; +text: .text%__1cIValueGenYset_with_result_register6MpnLInstruction__nFRInfo__; +text: .text%__1cIValueGenMreturn1RInfo6F_nFRInfo__; +text: .text%__1cIRegAllocNlock_register6MpnLInstruction_nFRInfo__v_; +text: .text%__1cLLIR_EmitterMnew_instance6MnFRInfo_pnPciInstanceKlass_1111pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListNoop2reg_patch6MpnI_jobject_nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cPNewInstanceStub2t6MnFRInfo_pnLLIR_OprDesc_pnPciInstanceKlass_pnMCodeEmitInfo_nIRuntime1GStubID__v_; +text: .text%__1cIValueGenJdo_Invoke6MpnGInvoke__v_; +text: .text%__1cIValueGenWinvoke_visit_arguments6MpnGInvoke_pnRCallingConvention__pnJItemArray__; +text: .text%__1cIValueGenXis_caller_save_register6FnFRInfo__i_; +text: .text%__1cIFrameMapXis_caller_save_register6FnFRInfo__i_; +text: .text%__1cIValueGenUtry_caller_to_callee6MpnLInstruction_nFRInfo__i_; +text: .text%__1cIRootItemHas_root6M_p0_: c1_CodeGenerator.o; +text: .text%__1cNc1_AllocTableThas_one_free_masked6kMnKc1_RegMask__i_; +text: .text%__1cIRegAllocKreallocate6MnFRInfo_nKc1_RegMask__1_; +text: .text%__1cLGetValueForGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cLGetRefCountGdo_cpu6Mi_v_: c1_RegAlloc.o; +text: .text%__1cNc1_AllocTablePget_free_masked6MnKc1_RegMask__i_; +text: .text%__1cIVoidTypeLas_VoidType6M_p0_: c1_ValueType.o; +text: .text%__1cLLIR_EmitterHcall_op6MnJBytecodesECode_pknOBasicTypeArray_pnMCodeEmitInfo_iiinFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListQcall_opt_virtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenIdo_Throw6MpnFThrow__v_; +text: .text%__1cLNewInstanceKexact_type6kM_pnGciType__; +text: .text%__1cOExceptionScopeLcould_catch6kMpnPciInstanceKlass_i_i_; +text: .text%__1cIValueGenRexceptionOopRInfo6F_nFRInfo__; +text: .text%__1cLNewInstanceOas_NewInstance6M_p0_: c1_Instruction.o; +text: .text%__1cIValueGenQexceptionPcRInfo6F_nFRInfo__; +text: .text%__1cILIR_ListPthrow_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cOLIR_OpJavaCallFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_LIR.o; +text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cIFrameMapWcaller_save_cpu_reg_at6Fi_pnLLIR_OprDesc__; +text: .text%__1cIVoidTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Instruction.o; +text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Instruction.o; +text: .text%__1cPRegisterManagerMlock_all_fpu6M_v_; +text: .text%__1cICodeStubXis_exception_throw_stub6kM_i_: c1_CodeStubs_sparc.o; +text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIROptimizer.o; +text: .text%__1cOLIR_OpJavaCallJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerJemit_call6MpnOLIR_OpJavaCall__v_; +text: .text%__1cNLIR_AssemblerZjobject2reg_with_patching6MpnMRegisterImpl_pnMCodeEmitInfo__v_; +text: .text%__1cMPatchingStubQalign_patch_site6MpnOMacroAssembler__v_; +text: .text%__1cNLIR_AssemblerPpatching_epilog6MpnMPatchingStub_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_; +text: .text%__1cMPatchingStubHinstall6MpnOMacroAssembler_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_: c1_LIRAssembler.o; +text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerJemit_call6MpnOLIR_OpJavaCall__v_; +text: .text%__1cNLIR_AssemblerKalign_call6MnILIR_Code__v_; +text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_sparc.o; +text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerEcall6MpCnJrelocInfoJrelocType_pnMCodeEmitInfo__v_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cNLIR_AssemblerIthrow_op6MnFRInfo_1pnMCodeEmitInfo_i_v_; +text: .text%__1cMCodeEmitInfoQadd_register_oop6MnFRInfo__v_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cMPatchingStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cRAbstractAssemblerGa_byte6Mi_v_; +text: .text%__1cRNativeGeneralJumpUinsert_unconditional6FpC1_v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: c1_CodeStubs_sparc.o; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cJrelocInfobDchange_reloc_info_for_address6FpnNRelocIterator_pCn0AJrelocType_4_v_; +text: .text%__1cPNewInstanceStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cOStaticCallStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cOMacroAssemblerUallocate_oop_address6MpnI_jobject_pnMRegisterImpl__nHAddress__; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: c1_CodeStubs_sparc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: c1_CodeStubs_sparc.o; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: reflection.o; +text: .text%__1cNArgumentCountDset6MinJBasicType__v_: reflection.o; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cNSignatureInfoHdo_long6M_v_: reflection.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: reflection.o; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cQComputeCallStackHdo_char6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cIciMethodJwill_link6MpnHciKlass_2nJBytecodesECode__i_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cMGraphBuilderVtry_inline_intrinsics6MpnIciMethod__i_; +text: .text%__1cMGraphBuilderPtry_inline_full6MpnIciMethod_i_i_; +text: .text%__1cIciMethodIhas_jsrs6kM_i_; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cJNullCheckFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cJNullCheckIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cKObjectTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_ValueStack.o; +text: .text%__1cMGraphBuilderKpush_scope6MpnIciMethod_pnKBlockBegin_i_v_; +text: .text%__1cKValueStackKpush_scope6MpnHIRScope__p0_; +text: .text%__1cOExceptionScopeKpush_scope6M_p0_; +text: .text%__1cHIRScopeXcompute_lock_stack_size6M_v_; +text: .text%__1cOExceptionScopeEinit6M_v_; +text: .text%__1cMGraphBuilderJScopeDataLnum_returns6M_i_; +text: .text%__1cNCanonicalizerHdo_Goto6MpnEGoto__v_; +text: .text%__1cMGraphBuilderJScopeDataQincr_num_returns6M_v_; +text: .text%__1cKValueStackJpop_scope6Mii_p0_; +text: .text%__1cGValuesIpush_all6Mpk0_v_: c1_ValueStack.o; +text: .text%__1cOExceptionScopeJpop_scope6M_p0_; +text: .text%__1cLCompilationVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cLInstructionEprev6MpnKBlockBegin__p0_; +text: .text%__1cKBlockBeginUresolve_substitution6M_v_; +text: .text%__1cZresolve_substituted_value6FppnLInstruction__v_: c1_Instruction.o; +text: .text%__1cLInstructionFsubst6M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_GraphBuilder.o; +text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_GraphBuilder.o; +text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_Instruction.o; +text: .text%__1cIConstantPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cIBlockEndPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cHIntTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cJNullCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cHIRScopeNtop_scope_bci6kM_i_; +text: .text%__1cIValueGenMdo_NullCheck6MpnJNullCheck__v_; +text: .text%__1cJNullCheckKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterKnull_check6MpnLLIR_OprDesc_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenbDsafepoint_poll_needs_register6F_i_; +text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cIValueGenQlock_spill_rinfo6MpnLInstruction_nFRInfo__v_; +text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_CacheLocals.o; +text: .text%__1cILIR_ListQsingle_stack2reg6MinFRInfo_nJBasicType__v_; +text: .text%__1cRLIR_PeepholeStatePkill_stack_slot6Mi_v_; +text: .text%__1cIFrameMapQaddress_for_name6kMiii_nHAddress__; +text: .text%__1cIFrameMapSfp_offset_for_name6kMiii_i_; +text: .text%__1cIFrameMapPnum_local_names6kM_i_; +text: .text%__1cIFrameMapNlocal_to_slot6kMii_i_; +text: .text%__1cIFrameMapQmake_new_address6kMi_nHAddress__; +text: .text%__1cRLIR_PeepholeStateTmark_safe_to_delete6Mi_v_; +text: .text%__1cNLIR_AssemblerJreg2stack6MnFRInfo_inJBasicType__v_; +text: .text%__1cNLIR_AssemblerFstore6MpnMRegisterImpl_2inJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerJstack2reg6MpnLLIR_OprDesc_2nJBasicType__v_; +text: .text%__1cIFrameMapTsingle_word_regname6kMi_nHOptoRegEName__; +text: .text%__1cIFrameMapZlocation_for_local_offset6kMinILocationEType_p1_i_; +text: .text%__1cIFrameMapWlocation_for_fp_offset6kMinILocationEType_p1_i_; +text: .text%__1cILocationVlegal_offset_in_bytes6Fi_i_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRT_sparc.o; +text: .text%JVM_IsNaN; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRT_sparc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_; +text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_Optimizer.o; +text: .text%__1cEIfOpPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cEIfOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorHdo_IfOp6MpnEIfOp__v_; +text: .text%__1cIValueGenHdo_IfOp6MpnEIfOp__v_; +text: .text%__1cLLIR_EmitterLifop_phase16MnLInstructionJCondition_pnLLIR_OprDesc_4_v_; +text: .text%__1cIHintItemNset_from_item6MpknEItem__v_; +text: .text%__1cIHintItemEtype6kM_pnJValueType__: c1_Items.o; +text: .text%__1cLLIR_EmitterLifop_phase26MnFRInfo_pnLLIR_OprDesc_3nLInstructionJCondition__v_; +text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnFLabel__v_; +text: .text%__1cRLIR_PeepholeStateUstart_forward_branch6MpnFLabel__v_; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_LIROptimizer.o; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_Instruction.o; +text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_Instruction.o; +text: .text%__1cKStoreFieldFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cIValueMapKkill_field6MpnHciField__v_; +text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_Instruction.o; +text: .text%__1cKValueStackQpin_stack_fields6MpnHciField__v_; +text: .text%__1cMGraphBuilderHif_null6MpnJValueType_nLInstructionJCondition__v_; +text: .text%__1cOObjectConstantRas_ObjectConstant6M_p0_: c1_ValueType.o; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cILongTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cHConvertFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerKdo_Convert6MpnHConvert__v_; +text: .text%__1cHConvertEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cHConvertEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderOnew_type_array6M_v_; +text: .text%__1cMNewTypeArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cINewArrayIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_ValueType.o; +text: .text%__1cNClassConstantQas_ClassConstant6M_p0_: c1_ValueType.o; +text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cMArithmeticOpOis_commutative6kM_i_; +text: .text%__1cMGraphBuilderIlogic_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cHLogicOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cHLogicOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cHLogicOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Instruction.o; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_Instruction.o; +text: .text%__1cKStoreFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cINewArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o; +text: .text%__1cHConvertPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorKdo_Convert6MpnHConvert__v_; +text: .text%__1cQNullCheckVisitorPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cJLoadFieldMas_LoadField6M_p0_: c1_Instruction.o; +text: .text%__1cQNullCheckVisitorKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cDPhiPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cDPhiFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorGdo_Phi6MpnDPhi__v_; +text: .text%__1cTsort_by_start_block6FppnELoop_2_i_: c1_Loops.o; +text: .text%__1cHConvertKas_Convert6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLInstructionIas_Local6M_pnFLocal__: c1_GraphBuilder.o; +text: .text%__1cDPhiGas_Phi6M_p0_: c1_GraphBuilder.o; +text: .text%__1cIValueGenbBrlock_byte_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__; +text: .text%__1cIValueGenNdo_StoreField6MpnKStoreField__v_; +text: .text%__1cIValueGenOscratch1_RInfo6kM_nFRInfo__; +text: .text%__1cIValueGenUprefer_alu_registers6kM_i_; +text: .text%__1cIValueGenOload_byte_item6MpnEItem__v_; +text: .text%__1cLLIR_EmitterLfield_store6MpnHciField_pnLLIR_OprDesc_i4iipnMCodeEmitInfo_nFRInfo__v_; +text: .text%__1cLLIR_EmitterQfield_store_byte6MpnLLIR_OprDesc_i2nFRInfo_ipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_1inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cMciNullObjectMis_classless6kM_i_: ciNullObject.o; +text: .text%__1cEItemUget_jobject_constant6kM_pnIciObject__; +text: .text%__1cMciNullObjectOis_null_object6kM_i_: ciNullObject.o; +text: .text%__1cJValueTypeQas_ClassConstant6M_pnNClassConstant__: c1_ValueType.o; +text: .text%__1cOObjectConstantIencoding6kM_pnI_jobject__; +text: .text%__1cNc1_AllocTableMhas_two_free6kM_i_; +text: .text%__1cIRegAllocOset_locked_cpu6MipnLInstruction_i_v_; +text: .text%__1cIRegAllocMset_free_cpu6Mi_v_; +text: .text%__1cFRInfoLas_rinfo_lo6kM_0_; +text: .text%__1cFRInfoLas_rinfo_hi6kM_0_; +text: .text%__1cLLIR_EmitterXlo_word_offset_in_bytes6kM_i_; +text: .text%__1cLLIR_EmitterXhi_word_offset_in_bytes6kM_i_; +text: .text%__1cIValueGenPdo_NewTypeArray6MpnMNewTypeArray__v_; +text: .text%__1cIValueGenKdo_Convert6MpnHConvert__v_; +text: .text%__1cLLIR_EmitterKconvert_op6MnJBytecodesECode_pnLLIR_OprDesc_nFRInfo_i_v_; +text: .text%__1cILIR_ListHconvert6MnJBytecodesECode_pnLLIR_OprDesc_4i_v_: c1_LIREmitter.o; +text: .text%__1cLLIR_EmitterOnew_type_array6MnFRInfo_nJBasicType_pnLLIR_OprDesc_11111pnMCodeEmitInfo__v_; +text: .text%__1cQNewTypeArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_sparc.o; +text: .text%__1cILIR_ListOallocate_array6MnFRInfo_11111nJBasicType_1pnICodeStub__v_; +text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cHciKlassMaccess_flags6M_i_; +text: .text%__1cILIR_ListPallocate_object6MnFRInfo_111ii1pnICodeStub__v_; +text: .text%__1cLLIR_OprFactQdummy_value_type6FpnJValueType__pnLLIR_OprDesc__; +text: .text%__1cIRegAllocTget_value_for_rinfo6kMnFRInfo__pnLInstruction__; +text: .text%__1cIValueGenQround_spill_item6MpnEItem_i_v_; +text: .text%__1cIRegAllocPget_register_rc6kMnFRInfo__i_; +text: .text%__1cIRegAllocOget_lock_spill6MpnLInstruction_i_i_; +text: .text%__1cLLIR_EmitterFspill6MipnLLIR_OprDesc__v_; +text: .text%__1cIFrameMapKspill_name6kMi_i_; +text: .text%__1cIRegAllocKfree_spill6MipnJValueType__v_; +text: .text%__1cIRegAllocNis_free_spill6kMipnJValueType__i_; +text: .text%__1cLLIR_EmitterOmembar_release6M_v_; +text: .text%__1cLLIR_EmitterNwrite_barrier6MpnLLIR_OprDesc_2_v_; +text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_sparc.o; +text: .text%__1cILIR_ListUunsigned_shift_right6MpnLLIR_OprDesc_222_v_; +text: .text%__1cILIR_ListHint2reg6MinFRInfo__v_: c1_LIREmitter_sparc.o; +text: .text%__1cILIR_ListFstore6MpnLLIR_OprDesc_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cLLIR_EmitterGmembar6M_v_; +text: .text%__1cILIR_ListGmembar6M_v_: c1_LIREmitter.o; +text: .text%__1cJValueTypeNas_ObjectType6M_pnKObjectType__: c1_Canonicalizer.o; +text: .text%__1cLLIR_EmitterOmembar_acquire6M_v_; +text: .text%__1cILIR_ListOcall_icvirtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cNClassConstantIencoding6kM_pnI_jobject__; +text: .text%__1cILIR_ListDadd6MpnLLIR_OprDesc_22_v_: c1_LIREmitter_sparc.o; +text: .text%__1cIValueGenKdo_LogicOp6MpnHLogicOp__v_; +text: .text%__1cLLIR_EmitterIlogic_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_5_v_; +text: .text%__1cILIR_ListKlogical_or6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenScompute_phi_arrays6MpnKValueStack_pnGValues_pnIintStack_i_pnLInstruction__; +text: .text%__1cIValueGenOload_item_hint6MpnEItem_pk1_v_; +text: .text%__1cLLIR_EmitterTset_fpu_stack_empty6M_v_; +text: .text%__1cILIR_ListTset_fpu_stack_empty6M_v_: c1_LIREmitter.o; +text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_ValueType.o; +text: .text%__1cILIR_ListLlogical_and6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cRclear_state_items6FppnLInstruction__v_: c1_CodeGenerator.o; +text: .text%__1cQLIR_OpAllocArrayFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cOLIR_OpAllocObjFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cINewArrayLas_NewArray6M_p0_: c1_Instruction.o; +text: .text%__1cNLIR_OpConvertJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_opConvert6MpnNLIR_OpConvert__v_; +text: .text%__1cQLIR_OpAllocArrayJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerQemit_alloc_array6MpnQLIR_OpAllocArray__v_; +text: .text%__1cOLIR_OpAllocObjJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; +text: .text%__1cNLIR_AssemblerHreg2mem6MnFRInfo_pnLLIR_Address_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerFstore6MnFRInfo_pnMRegisterImpl_inJBasicType__i_; +text: .text%__1cNLIR_AssemblerOemit_opConvert6MpnNLIR_OpConvert__v_; +text: .text%__1cFRInfoOas_register_lo6kM_pnMRegisterImpl__; +text: .text%__1cFRInfoOas_register_hi6kM_pnMRegisterImpl__; +text: .text%__1cNLIR_AssemblerQemit_alloc_array6MpnQLIR_OpAllocArray__v_; +text: .text%__1cRC1_MacroAssemblerOallocate_array6MpnMRegisterImpl_2222ii2rnFLabel__v_; +text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; +text: .text%__1cRC1_MacroAssemblerPallocate_object6MpnMRegisterImpl_222ii2rnFLabel__v_; +text: .text%__1cIFrameMapRlocation_for_name6kMinILocationEType_p1ii_i_; +text: .text%__1cNLIR_AssemblerOmembar_release6M_v_; +text: .text%__1cNLIR_AssemblerFstore6MnFRInfo_pnMRegisterImpl_3nJBasicType__i_; +text: .text%__1cNLIR_AssemblerGmembar6M_v_; +text: .text%__1cNLIR_AssemblerOmembar_acquire6M_v_; +text: .text%__1cNLIR_AssemblerHic_call6MpCpnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerCpc6kM_pC_; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: c1_LIRAssembler_sparc.o; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cNLIR_AssemblerIlogic_op6MnILIR_Code_pnLLIR_OprDesc_33_v_; +text: .text%__1cNLIR_AssemblerTset_fpu_stack_empty6M_v_; +text: .text%__1cQNewTypeArrayStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cIRuntime1Yresolve_opt_virtual_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cLRegisterMapIpd_clear6M_v_; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cLRegisterMapLpd_location6kMnFVMRegEName__pC_; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cRNativeMovConstRegIset_data6Mi_v_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nativeInst_sparc.o; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cIRuntime1Yprepare_interpreter_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cLLIR_EmitterYstrength_reduce_multiply6MpnLLIR_OprDesc_i22_i_; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cJLoadFieldIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cMGraphBuilderNload_constant6M_v_; +text: .text%__1cQciBytecodeStreamMget_constant6kM_nKciConstant__; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Canonicalizer.o; +text: .text%__1cIRegAllocKlock_spill6MpnLInstruction_ii_v_; +text: .text%__1cIValueGenVinvoke_load_arguments6MpnGInvoke_pnJItemArray_pnRCallingConvention__v_; +text: .text%__1cILIR_ListLcall_static6MpnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o; +text: .text%__1cIintStackEgrow6Mki1_v_: c1_CacheLocals.o; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: c1_CodeStubs_sparc.o; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cPParameterMapperJdo_object6Mii_v_: c1_Runtime1_sparc.o; +text: .text%__1cPParameterMapperGdo_int6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%jni_GetArrayLength: jni.o; +text: .text%JVM_Read; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%JVM_GetClassCPTypes; +text: .text%JVM_GetClassNameUTF; +text: .text%JVM_ReleaseUTF; +text: .text%JVM_FindClassFromClass; +text: .text%jni_IsSameObject: jni.o; +text: .text%JVM_GetClassFieldsCount; +text: .text%JVM_GetClassMethodsCount; +text: .text%JVM_GetMethodIxModifiers; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%jni_NewLocalRef: jni.o; +text: .text%JVM_GetCPMethodModifiers; +text: .text%JVM_IsConstructorIx; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: reflection.o; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%jni_CallIntMethod: jni.o; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cNFingerprinterIdo_array6Mii_v_: c1_Runtime1_sparc.o; +text: .text%jni_DetachCurrentThread; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cCosPuncommit_memory6FpcI_i_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cFMutex2T6M_v_; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%jni_DestroyJavaVM; +text: .text%jni_AttachCurrentThread; +text: .text%__1cKJavaThread2t6M_v_; +text: .text%__1cCosWcreate_attached_thread6FpnGThread__i_; +text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_; +text: .text%__1cKJavaThreadSallocate_threadObj6MnGHandle_pcipnGThread__v_; +text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_; +text: .text%__1cHThreadsKdestroy_vm6F_i_; +text: .text%__1cKJavaThreadVinvoke_shutdown_hooks6M_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cIVMThreadXwait_for_vm_thread_exit6F_v_; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cNObjectMonitorHis_busy6kM_i_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_; +text: .text%__1cURecompilationMonitorbFstop_recompilation_monitor_task6F_v_; +text: .text%__1cIVMThreadHdestroy6F_v_; +text: .text%__SLIP.DELETER__A: vmThread.o; +text: .text%__1cSThreadLocalStorageRpd_invalidate_all6F_v_; +text: .text%__1cHVM_ExitNset_vm_exited6F_i_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cCosRcurrent_thread_id6F_i_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cMostream_exit6F_v_; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__SLIP.FINAL__A: c1_Items.o; +# Test Exit +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%JVM_Halt; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +# Test Hello +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%JVM_Write; +# Test Sleep +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%JVM_GetCPClassNameUTF; +text: .text%JVM_Sleep; +text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +# Test IntToString +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +# Test LoadToolkit +text: .text%JVM_GetClassContext; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cIFrameMapRname_for_argument6Fi_i_; +text: .text%__1cNLIR_AssemblerEload6MpnMRegisterImpl_inFRInfo_nJBasicType__i_; +text: .text%__1cRComputeEntryStackHdo_char6M_v_: generateOopMap.o; +text: .text%__1cMGraphBuilderNstore_indexed6MnJBasicType__v_; +text: .text%__1cIValueMapKkill_array6MpnJValueType__v_; +text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_GraphBuilder.o; +text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_Instruction.o; +text: .text%__1cKValueStackRpin_stack_indexed6MpnJValueType__v_; +text: .text%__1cMStoreIndexedFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cMStoreIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cIValueGenPdo_StoreIndexed6MpnMStoreIndexed__v_; +text: .text%__1cLLIR_EmitterNindexed_store6MnJBasicType_pnLLIR_OprDesc_33nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cKValueStackElock6MpnHIRScope_pnLInstruction__i_; +text: .text%__1cKValueStackGunlock6M_i_; +text: .text%__1cLLIR_EmitterVmonitorenter_at_entry6MnFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cLLIR_EmitterGmethod6kM_pnIciMethod__; +text: .text%__1cLLIR_EmitterNmonitor_enter6MnFRInfo_111ipnMCodeEmitInfo_3_v_; +text: .text%__1cQMonitorEnterStub2t6MnFRInfo_1pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListbAload_stack_address_monitor6MinFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListLlock_object6MnFRInfo_111pnICodeStub_pnMCodeEmitInfo__v_; +text: .text%__1cIValueGenMrelease_item6MpnEItem__v_; +text: .text%__1cLLIR_EmitterQreturn_op_prolog6Mi_v_; +text: .text%__1cLLIR_EmitterMmonitor_exit6MnFRInfo_11i_v_; +text: .text%__1cILIR_ListNunlock_object6MnFRInfo_11pnICodeStub__v_; +text: .text%__1cKLIR_OpLockFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cKLIR_OpLockJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerJemit_lock6MpnKLIR_OpLock__v_; +text: .text%__1cNLIR_AssemblerPmonitor_address6MinFRInfo__v_; +text: .text%__1cIFrameMapbEaddress_for_monitor_lock_index6kMi_nHAddress__; +text: .text%__1cNLIR_AssemblerJemit_lock6MpnKLIR_OpLock__v_; +text: .text%__1cRC1_MacroAssemblerLlock_object6MpnMRegisterImpl_222rnFLabel__v_; +text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cIFrameMapWmonitor_object_regname6kMi_nHOptoRegEName__; +text: .text%__1cIFrameMapbHlocation_for_monitor_object_index6kMipnILocation__i_; +text: .text%__1cIFrameMapbFlocation_for_monitor_lock_index6kMipnILocation__i_; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cRC1_MacroAssemblerNunlock_object6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_sparc.o; +text: .text%__1cQMonitorEnterStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNLIR_AssemblerLmonitorexit6MpnMRegisterImpl_22i_v_; +text: .text%__1cPMonitorExitStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cMGraphBuilderIshift_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cHShiftOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cHShiftOpEhash6kM_i_: c1_GraphBuilder.o; +text: .text%__1cHShiftOpEname6kM_pkc_: c1_GraphBuilder.o; +text: .text%__1cHLogicOpOis_commutative6kM_i_; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cJIntrinsicFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cJIntrinsicMas_Intrinsic6M_p0_: c1_GraphBuilder.o; +text: .text%__1cJIntrinsicIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cMas_ValueType6FnKciConstant__pnJValueType__; +text: .text%__1cJIntrinsicPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cIValueGenKdo_ShiftOp6MpnHShiftOp__v_; +text: .text%__1cLLIR_EmitterIshift_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_53_v_; +text: .text%__1cILIR_ListLshift_right6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListLshift_right6MpnLLIR_OprDesc_222_v_; +text: .text%__1cIValueGenMdo_Intrinsic6MpnJIntrinsic__v_; +text: .text%__1cIValueGenRspill_caller_save6M_v_; +text: .text%__1cIFrameMapVcaller_save_registers6F_pnPRInfoCollection__; +text: .text%__1cJIsFreeRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIValueGenXload_item_with_reg_mask6MpnEItem_nKc1_RegMask__v_; +text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_nKc1_RegMask__nFRInfo__; +text: .text%__1cIValueGenQarraycopy_helper6MpnJIntrinsic_pippnMciArrayKlass__v_; +text: .text%__1cLInstructionKexact_type6kM_pnGciType__: c1_GraphBuilder.o; +text: .text%__1cOas_array_klass6FpnGciType__pnMciArrayKlass__: c1_CodeGenerator.o; +text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_GraphBuilder.o; +text: .text%__1cMNewTypeArrayKexact_type6kM_pnGciType__; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_Instruction.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_GraphBuilder.o; +text: .text%__1cHHideReg2t6MpnIValueGen_nKc1_RegMask__v_; +text: .text%__1cHHideReg2T6M_v_; +text: .text%__1cPLIR_OpArrayCopyFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cPLIR_OpArrayCopyJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_; +text: .text%__1cNLIR_AssemblerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_; +text: .text%__1cMciArrayKlassMelement_type6M_pnGciType__; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cNArrayCopyStub2t6MpnMCodeEmitInfo_pnOStaticCallStub__v_; +text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cNArrayCopyStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%jni_GetEnv; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: c1_IR.o; +text: .text%__1cMGraphBuilderQhandle_exception6Mi_v_; +text: .text%__1cOExceptionScopeFclear6M_v_; +text: .text%__1cLInstructionGmirror6Fn0AJCondition__1_; +text: .text%__1cKBlockBeginVadd_exception_handler6Mp0_v_; +text: .text%__1cOExceptionScopeLadd_handler6MpnIXHandler__v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_GraphBuilder.o; +text: .text%__1cLAccessFieldPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cLLIR_EmitterNhandler_entry6M_v_; +text: .text%__1cILIR_ListDnop6MpnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cFChunk2n6FII_pv_; +text: .text%__1cOExceptionScopeKhandler_at6kMi_pnIXHandler__; +text: .text%__1cTExceptionRangeTableJadd_entry6Miiiiii_v_; +text: .text%__1cTExceptionRangeTableTentry_index_for_pco6kMi_i_; +text: .text%__1cTExceptionRangeTableIentry_at6kMi_pnTExceptionRangeEntry__; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%JVM_GetLastErrorString; +text: .text%jni_Throw: jni.o; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%JVM_DisableCompiler; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%JVM_Available; +text: .text%__1cLArrayLengthIis_equal6kMpnLInstruction__i_: c1_GraphBuilder.o; +text: .text%__1cLArrayLengthOas_ArrayLength6M_p0_: c1_GraphBuilder.o; +text: .text%__1cJLoadFieldKexact_type6kM_pnGciType__; +text: .text%__1cJLoadFieldNdeclared_type6kM_pnGciType__; +text: .text%__1cIRuntime1Uresolve_virtual_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cOMacroAssemblerEjump6MrnHAddress_ipkci_v_; +text: .text%__1cOMacroAssemblerFjumpl6MrnHAddress_pnMRegisterImpl_ipkci_v_; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%JVM_NewArray; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cNSignatureInfoIdo_float6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cFBytesNget_native_u46FpC_I_: bytecodes.o; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cQSimpleCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cICompilerMsupports_osr6M_i_: c1_Compiler.o; +text: .text%__1cMGraphBuilderQnew_object_array6M_v_; +text: .text%__1cONewObjectArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cLLoadIndexedOas_LoadIndexed6M_p0_: c1_Instruction.o; +text: .text%__1cMArithmeticOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cDOp2Gas_Op26M_p0_: c1_Instruction.o; +text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cIValueGenRdo_NewObjectArray6MpnONewObjectArray__v_; +text: .text%__1cLLIR_EmitterQnew_object_array6MnFRInfo_pnHciKlass_pnLLIR_OprDesc_11111pnMCodeEmitInfo_7_v_; +text: .text%__1cSNewObjectArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cLLIR_EmitterZjobject2reg_with_patching6MnFRInfo_pnIciObject_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cEBaseHas_Base6M_p0_: c1_IR.o; +text: .text%__1cNLIR_AssemblerOemit_osr_entry6MpnHIRScope_ipnFLabel_i_v_; +text: .text%__1cHIRScopeGlocals6M_pnJLocalList__; +text: .text%__1cSNewObjectArrayStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%__1cICompilerPsupports_native6M_i_: c1_Compiler.o; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cLCompilationUemit_code_for_native6MpCpnLCodeOffsets__v_; +text: .text%__1cLCompilationXemit_code_prolog_native6MpnIFrameMap__v_; +text: .text%__1cNLIR_AssemblerRemit_method_entry6MpnLLIR_Emitter_pnHIRScope__v_; +text: .text%__1cNLIR_AssemblerMneeds_icache6kMpnIciMethod__i_; +text: .text%__1cNLIR_AssemblerQemit_native_call6MpCpnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerSpass_oop_to_native6MinHAddress_nIArgument__v_; +text: .text%__1cOMacroAssemblerOstore_argument6MpnMRegisterImpl_rnIArgument__v_: c1_LIRAssembler_sparc.o; +text: .text%__1cMCodeEmitInfobDcreate_oop_map_inside_natives6M_pnGOopMap__; +text: .text%__1cNLIR_AssemblerXemit_native_method_exit6MpnMCodeEmitInfo__v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: c1_LIRAssembler_sparc.o; +text: .text%__1cNLIR_AssemblerVsave_native_fp_result6MnJBasicType_nHAddress__v_; +text: .text%__1cNLIR_AssemblerYrestore_native_fp_result6MnJBasicType_nHAddress__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: c1_GraphBuilder.o; +text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderMtable_switch6M_v_; +text: .text%__1cLTableSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_GraphBuilder.o; +text: .text%__1cGSwitchPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter.o; +text: .text%__1cIValueGenOdo_TableSwitch6MpnLTableSwitch__v_; +text: .text%__1cLLIR_EmitterOtableswitch_op6MpnLLIR_OprDesc_ipnKBlockBegin__v_; +text: .text%__1cIRuntime1Tresolve_static_call6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cJValueTypeQas_ClassConstant6M_pnNClassConstant__: c1_Canonicalizer.o; +text: .text%__1cIRuntime1Thandle_wrong_method6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cQInstanceConstantTas_InstanceConstant6M_p0_: c1_ValueType.o; +text: .text%__1cQInstanceConstantIencoding6kM_pnI_jobject__; +text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_Instruction.o; +text: .text%__1cILIR_ListQunwind_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o; +text: .text%__1cIRuntime1Tprimitive_arraycopy6FpnIHeapWord_2i_v_; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%jni_MonitorExit: jni.o; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%jni_CallStaticBooleanMethodV: jni.o; +text: .text%JVM_GetStackTraceDepth; +text: .text%__1cTjava_lang_ThrowableVget_stack_trace_depth6FpnHoopDesc_pnGThread__i_; +text: .text%JVM_GetStackTraceElement; +text: .text%__1cTjava_lang_ThrowableXget_stack_trace_element6FpnHoopDesc_ipnGThread__2_; +text: .text%__1cbBjava_lang_StackTraceElementGcreate6FnMmethodHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cNmethodOopDescUline_number_from_bci6kMi_i_; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%JVM_EnableCompiler; +text: .text%__1cNFingerprinterHdo_bool6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cCosHSolarisFEventEpark6Mx_i_: objectMonitor_solaris.o; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cRNativeMovConstRegEdata6kM_i_; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cCosOunguard_memory6FpcI_i_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +# Test LoadFrame +text: .text%__1cNSignatureInfoHdo_char6M_v_: reflection.o; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: reflection.o; +text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_Instruction.o; +text: .text%__1cILIR_ListLlogical_xor6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%jni_CallObjectMethodV: jni.o; +text: .text%jni_SetObjectField: jni.o; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%jni_GetIntArrayRegion: jni.o; +text: .text%jni_SetIntArrayRegion: jni.o; +text: .text%jni_PushLocalFrame: jni.o; +text: .text%jni_PopLocalFrame: jni.o; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%__1cINegateOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cPciInstanceKlassLimplementor6M_p0_; +text: .text%__1cINegateOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cLLIR_EmitterRarray_store_check6MpnLLIR_OprDesc_2nFRInfo_33pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListLstore_check6MpnLLIR_OprDesc_2222pnMCodeEmitInfo__v_; +text: .text%__1cXArrayStoreExceptionStub2t6MpnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListDadd6MnFRInfo_i1_v_: c1_LIREmitter_sparc.o; +text: .text%__1cIValueGenLdo_NegateOp6MpnINegateOp__v_; +text: .text%__1cLLIR_EmitterGnegate6MnFRInfo_pnLLIR_OprDesc__v_; +text: .text%__1cILIR_ListGnegate6MnFRInfo_1_v_: c1_LIREmitter.o; +text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerbIadd_debug_info_for_null_check_here6MpnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerEload6MpnMRegisterImpl_2nFRInfo_nJBasicType__i_; +text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cNLIR_AssemblerGnegate6MpnLLIR_OprDesc_2_v_; +text: .text%__1cOMacroAssemblerHbr_zero6MnJAssemblerJCondition_in0BHPredict_pnMRegisterImpl_rnFLabel__v_; +text: .text%__1cXArrayStoreExceptionStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cIRuntime1Jarraycopy6FpnHoopDesc_i2ii_i_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%jni_CallIntMethodV: jni.o; +text: .text%__1cIRuntime1Ohandle_ic_miss6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cKVtableStub2n6FIi_pv_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%Unsafe_GetObject; +text: .text%jni_CallBooleanMethod: jni.o; +text: .text%jni_CallVoidMethodV: jni.o; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%JVM_InvokeMethod; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%JVM_IsInterrupted; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +# Test LoadJFrame +text: .text%__1cMArithmeticOpKlock_stack6kM_pnKValueStack__: c1_Instruction.o; +text: .text%__1cLLIR_EmitterParithmetic_idiv6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo_pnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListEirem6MnFRInfo_111pnMCodeEmitInfo__v_; +text: .text%__1cHLIR_Op3Fvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cHLIR_Op3Jemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerIemit_op36MpnHLIR_Op3__v_; +text: .text%__1cNLIR_AssemblerIemit_op36MpnHLIR_Op3__v_; +text: .text%__1cNLIR_AssemblerbCadd_debug_info_for_div0_here6MpnMCodeEmitInfo__v_; +text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_sparc.o; +text: .text%__1cNDivByZeroStubJemit_code6MpnNLIR_Assembler__v_; +text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cJFloatTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cIValueGenTdo_ArithmeticOp_FPU6MpnMArithmeticOp__v_; +text: .text%__1cHLockRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cQChangeSpillCountIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cHFreeRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cLLIR_EmitterRarithmetic_op_fpu6MnJBytecodesECode_pnLLIR_OprDesc_44i_v_; +text: .text%__1cILIR_ListDmul6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o; +text: .text%__1cLGetRefCountIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIFrameMapPadd_spill_slots6Mi_v_; +text: .text%__1cFRInfoMas_float_reg6kM_pnRFloatRegisterImpl__; +text: .text%__1cIFrameMapLnr2floatreg6Fi_pnRFloatRegisterImpl__; +text: .text%__1cOMacroAssemblerCfb6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: c1_LIRAssembler_sparc.o; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cMStoreIndexedPother_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cNSignatureInfoIdo_short6M_v_: c1_Runtime1_sparc.o; +text: .text%Unsafe_StaticFieldOffset; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%Unsafe_GetIntVolatile; +text: .text%JVM_Yield; +text: .text%__1cCosKdont_yield6F_i_; +# Test JHello +text: .text%__1cNSharedRuntimeElmul6Fxx_x_; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%JVM_InitializeSocketLibrary; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%JVM_Socket; +text: .text%Unsafe_PageSize; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRT_sparc.o; +text: .text%Unsafe_SetMemory; +text: .text%__1cNSharedRuntimeElrem6Fxx_x_; +text: .text%__1cRComputeEntryStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cMLinkResolverbEresolve_interface_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cGciTypeNis_subtype_of6Mp0_i_; +text: .text%__1cPParameterMapperHdo_byte6M_v_: c1_Runtime1_sparc.o; +text: .text%Unsafe_DefineClass1; +text: .text%JVM_DefineClass; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%jni_GetLongField: jni.o; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cSInterpreterRuntimeWslow_signature_handler6FpnKJavaThread_pnNmethodOopDesc_pi5_pC_; +text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_sparc.o; +text: .text%__1cUSlowSignatureHandlerNadd_signature6Mi_v_: interpreterRT_sparc.o; +text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_sparc.o; +text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_sparc.o; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%jni_GetCharArrayRegion: jni.o; +text: .text%jni_SetFloatField: jni.o; +text: .text%jni_NewFloatArray: jni.o; +text: .text%jni_SetFloatArrayRegion: jni.o; +text: .text%__1cNFingerprinterGdo_int6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +# SwingSet +text: .text%JVM_GetFieldIxModifiers; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cCosHrealloc6FpvI_1_; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%JVM_MonitorNotify; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cNFingerprinterJdo_object6Mii_v_: c1_Runtime1_sparc.o; +text: .text%jni_NewIntArray: jni.o; +text: .text%__1cbCTwoGenerationCollectorPolicybMshould_try_older_generation_allocation6kMI_i_; +text: .text%__1cQGenCollectedHeapSattempt_allocation6MIiii_pnIHeapWord__; +text: .text%__1cQDefNewGenerationIallocate6MIii_pnIHeapWord__: defNewGeneration.o; +text: .text%__1cKGenerationInext_gen6kM_p0_; +text: .text%__1cKGenerationYallocation_limit_reached6MpnFSpace_pnIHeapWord_I_4_: tenuredGeneration.o; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cbAVM_GenCollectForAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cbAVM_GenCollectForAllocationEdoit6M_v_; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cQGenCollectedHeapZsatisfy_failed_allocation6MIiipi_pnIHeapWord__; +text: .text%__1cbCTwoGenerationCollectorPolicyZsatisfy_failed_allocation6MIiipi_pnIHeapWord__; +text: .text%__1cQGenCollectedHeapNdo_collection6MiiIiiipi_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cTContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cTContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cbBSurvivorContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cbBSurvivorContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cOGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cOGenerationPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cQGenCollectedHeapLgc_prologue6Mi_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cQGenCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cbCGenEnsureParseabilityClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: defNewGeneration.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: tenuredGeneration.o; +text: .text%__1cKGenerationTensure_parseability6M_v_: compactingPermGenGen.o; +text: .text%__1cSAllocationProfilerViterate_since_last_gc6F_v_; +text: .text%__1cUGenGCPrologueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cQDefNewGenerationLgc_prologue6Mi_v_: defNewGeneration.o; +text: .text%__1cRTenuredGenerationLgc_prologue6Mi_v_; +text: .text%__1cKGenerationLgc_prologue6Mi_v_: compactingPermGenGen.o; +text: .text%__1cKGenerationOshould_collect6MiIii_i_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationKshort_name6kM_pkc_: defNewGeneration.o; +text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: defNewGeneration.o; +text: .text%__1cQDefNewGenerationKsave_marks6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationKsave_marks6M_v_; +text: .text%__1cQDefNewGenerationHcollect6MiiIii_v_; +text: .text%__1cRTenuredGenerationZpromotion_attempt_is_safe6kMIi_i_; +text: .text%__1cKGenerationYmax_contiguous_available6kM_I_; +text: .text%__1cbCOneContigSpaceCardGenerationUcontiguous_available6kM_I_; +text: .text%__1cMVirtualSpaceQuncommitted_size6kM_I_; +text: .text%__1cIageTableFclear6M_v_; +text: .text%__1cLCardTableRSbGprepare_for_younger_refs_iterate6Mi_v_; +text: .text%__1cULRUCurrentHeapPolicy2t6M_v_; +text: .text%__1cPCollectorPolicyPis_train_policy6M_i_: collectorPolicy.o; +text: .text%__1cQGenCollectedHeapUprocess_strong_roots6Miiin0ATClassScanningOption_pnQOopsInGenClosure_3_v_; +text: .text%__1cKSharedHeapbAchange_strong_roots_parity6M_v_; +text: .text%__1cMSubTasksDonePis_task_claimed6Mi_i_; +text: .text%__1cPFastScanClosureGdo_oop6MppnHoopDesc__v_: defNewGeneration.o; +text: .text%__1cQDefNewGenerationWcopy_to_survivor_space6MpnHoopDesc_p2_2_; +text: .text%__1cPContiguousSpaceIallocate6MI_pnIHeapWord__; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: oopMapCache.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cUCompactingPermGenGenUyounger_refs_iterate6MpnQOopsInGenClosure__v_; +text: .text%__1cbCOneContigSpaceCardGenerationUyounger_refs_iterate6MpnQOopsInGenClosure__v_; +text: .text%__1cLCardTableRSbDyounger_refs_in_space_iterate6MpnFSpace_pnQOopsInGenClosure__v_; +text: .text%__1cPContiguousSpaceLnew_dcto_cl6MpnKOopClosure_nRCardTableModRefBSOPrecisionStyle_pnIHeapWord__pnVDirtyCardToOopClosure__; +text: .text%__1cPContiguousSpaceZused_region_at_save_marks6kM_nJMemRegion__: space.o; +text: .text%__1cRCardTableModRefBSWnon_clean_card_iterate6MpnFSpace_nJMemRegion_pnVDirtyCardToOopClosure_pnQMemRegionClosure_i_v_; +text: .text%__1cRCardTableModRefBSbBnon_clean_card_iterate_work6MnJMemRegion_pnQMemRegionClosure_i_v_; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cYClearNoncleanCardWrapperMdo_MemRegion6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cVDirtyCardToOopClosureMdo_MemRegion6MnJMemRegion__v_; +text: .text%__1cWOffsetTableContigSpaceLblock_start6kMpkv_pnIHeapWord__: space.o; +text: .text%__1cbBBlockOffsetArrayContigSpaceSblock_start_unsafe6kMpkv_pnIHeapWord__; +text: .text%__1cPContiguousSpaceKblock_size6kMpknIHeapWord__I_; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cUContiguousSpaceDCTOCOget_actual_top6MpnIHeapWord_2_2_; +text: .text%__1cPContiguousSpaceRtoContiguousSpace6M_p0_: space.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cPFiltering_DCTOCPwalk_mem_region6MnJMemRegion_pnIHeapWord_3_v_; +text: .text%__1cUContiguousSpaceDCTOCXwalk_mem_region_with_cl6MnJMemRegion_pnIHeapWord_3pnQFilteringClosure__v_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: methodKlass.o; +text: .text%__1cLmethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cQFilteringClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure__v_; +text: .text%__1cLklassVtablePoop_oop_iterate6MpnKOopClosure__v_; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cLklassItablePoop_oop_iterate6MpnKOopClosure__v_; +text: .text%__1cKklassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cKOopClosureXshould_remember_klasses6kM_ki_: space.o; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cWConstantPoolCacheEntryNoop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: symbolKlass.o; +text: .text%__1cLsymbolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: methodKlass.o; +text: .text%__1cLmethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: symbolKlass.o; +text: .text%__1cLsymbolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cWConstantPoolCacheEntryLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cParrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cLklassVtableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cLklassItableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cKklassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cKOopClosureIdo_oop_v6MppnHoopDesc__v_: space.o; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure_nJMemRegion__v_; +text: .text%__1cNinstanceKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cNobjArrayKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cLCardTableRSUyounger_refs_iterate6MpnKGeneration_pnQOopsInGenClosure__v_; +text: .text%__1cMSubTasksDoneTall_tasks_completed6M_v_; +text: .text%__1cQGenCollectedHeapbCoop_since_save_marks_iterate6MipnPFastScanClosure_2_v_; +text: .text%__1cQDefNewGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cPContiguousSpacebFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_: typeArrayKlass.o; +text: .text%__1cKGenerationHpromote6MpnHoopDesc_Ip2_2_; +text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: tenuredGeneration.o; +text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cbCOneContigSpaceCardGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_; +text: .text%__1cQGenCollectedHeapbAno_allocs_since_save_marks6Mi_i_; +text: .text%__1cQDefNewGenerationbAno_allocs_since_save_marks6M_i_; +text: .text%__1cbCOneContigSpaceCardGenerationbAno_allocs_since_save_marks6M_i_; +text: .text%__1cQDefNewGenerationQKeepAliveClosure2t6MpnSScanWeakRefClosure__v_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cQDefNewGenerationOIsAliveClosureLdo_object_b6MpnHoopDesc__i_; +text: .text%__1cULRUCurrentHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cQDefNewGenerationUFastKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cQDefNewGenerationbCFastEvacuateFollowersClosureHdo_void6M_v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cIageTablebAcompute_tenuring_threshold6MI_i_; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: defNewGeneration.o; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: defNewGeneration.o; +text: .text%__1cRTenuredGenerationPupdate_gc_stats6Mii_v_; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: compactingPermGenGen.o; +text: .text%__1cRTenuredGenerationOshould_collect6MiIii_i_; +text: .text%__1cKGenerationPshould_allocate6MIii_i_: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationEfree6kM_I_; +text: .text%__1cQDefNewGenerationQcompute_new_size6M_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cQGenCollectedHeapLgc_epilogue6Mi_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cUGenGCEpilogueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o; +text: .text%__1cQDefNewGenerationLgc_epilogue6Mi_v_; +text: .text%__1cRTenuredGenerationLgc_epilogue6Mi_v_; +text: .text%__1cbCOneContigSpaceCardGenerationLgc_epilogue6Mi_v_; +text: .text%__1cRTenuredGenerationPupdate_counters6M_v_; +text: .text%__1cUCompactingPermGenGenPupdate_counters6M_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cLVtableStubsScreate_itable_stub6Fii_pnKVtableStub__; +text: .text%__1cLLIR_EmitterDnop6M_v_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cIValueGenSload_item_patching6MpnHIRScope_ipnEItem_pnKValueStack_pnOExceptionScope__v_; +text: .text%__1cJValueTypeTas_InstanceConstant6M_pnQInstanceConstant__: c1_ValueType.o; +text: .text%__1cPParameterMapperHdo_bool6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cMGraphBuilderMmonitorenter6MpnLInstruction__v_; +text: .text%__1cMMonitorEnterFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cNAccessMonitorIcan_trap6kM_i_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderLmonitorexit6MpnLInstruction__v_; +text: .text%__1cLMonitorExitFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cILongTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cNAccessMonitorPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cQNullCheckVisitorOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cIValueGenPdo_MonitorEnter6MpnMMonitorEnter__v_; +text: .text%__1cMLongConstantPas_LongConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLLIR_EmitterJopr2intLo6MpnLLIR_OprDesc__i_; +text: .text%__1cLLIR_EmitterJopr2intHi6MpnLLIR_OprDesc__i_; +text: .text%__1cQArgumentLocationVstack_offset_in_words6kM_i_; +text: .text%__1cLLIR_EmitterVstore_stack_parameter6MpnLLIR_OprDesc_i_v_; +text: .text%__1cIValueGenOdo_MonitorExit6MpnLMonitorExit__v_; +text: .text%__1cNAccessMonitorQas_AccessMonitor6M_p0_: c1_GraphBuilder.o; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: c1_MacroAssembler_sparc.o; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cIRuntime1Mmonitorenter6FpnKJavaThread_pnHoopDesc_pnPBasicObjectLock__v_; +text: .text%__1cIRuntime1Lmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%Unsafe_AllocateInstance; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cNCanonicalizerMset_constant6Mi_v_: c1_Canonicalizer.o; +text: .text%__1cJTypeCheckPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cRC1_MacroAssemblerTfast_ObjectHashCode6MpnMRegisterImpl_2_v_; +text: .text%__1cNFingerprinterHdo_char6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cPParameterMapperHdo_char6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: defNewGeneration.o; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceRefKlass.o; +text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_; +text: .text%__1cQinstanceRefKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_; +text: .text%__1cQGenCollectedHeapXhandle_failed_promotion6MpnKGeneration_pnHoopDesc_Ip4_4_; +text: .text%__1cbCOneContigSpaceCardGenerationTexpand_and_allocate6MIiii_pnIHeapWord__; +text: .text%__1cNGCMutexLocker2t6MpnFMutex__v_; +text: .text%__1cbCOneContigSpaceCardGenerationHgrow_by6MI_i_; +text: .text%__1cWBlockOffsetSharedArrayGresize6MI_v_; +text: .text%__1cPContiguousSpaceNmangle_region6MnJMemRegion__v_; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cRTenuredGenerationKshort_name6kM_pkc_: tenuredGeneration.o; +text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: tenuredGeneration.o; +text: .text%__1cRTenuredGenerationHcollect6MiiIii_v_; +text: .text%__1cbCOneContigSpaceCardGenerationHcollect6MiiIii_v_; +text: .text%__1cMGenMarkSweepTinvoke_at_safepoint6FipnSReferenceProcessor_i_v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cQGenCollectedHeapRsave_used_regions6Mii_v_; +text: .text%__1cKGenerationQsave_used_region6M_v_: tenuredGeneration.o; +text: .text%__1cbCOneContigSpaceCardGenerationLused_region6kM_nJMemRegion__; +text: .text%__1cPContiguousSpaceLused_region6kM_nJMemRegion__: space.o; +text: .text%__1cKGenerationQsave_used_region6M_v_: defNewGeneration.o; +text: .text%__1cKGenerationLused_region6kM_nJMemRegion__: defNewGeneration.o; +text: .text%__1cKGenerationQsave_used_region6M_v_: compactingPermGenGen.o; +text: .text%__1cQGenCollectedHeapOgather_scratch6MpnKGeneration_I_pnMScratchBlock__; +text: .text%__1cQDefNewGenerationScontribute_scratch6MrpnMScratchBlock_pnKGeneration_I_v_; +text: .text%__1cKGenerationScontribute_scratch6MrpnMScratchBlock_p0I_v_: tenuredGeneration.o; +text: .text%__1cJEventMark2t6MpkcE_v_: genMarkSweep.o; +text: .text%__1cJMarkSweepRFollowRootClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepRFollowRootClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cJMarkSweepQKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cQGenCollectedHeapWprepare_for_compaction6M_v_; +text: .text%__1cKGenerationWprepare_for_compaction6MpnMCompactPoint__v_; +text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: tenuredGeneration.o; +text: .text%__1cPContiguousSpaceWprepare_for_compaction6MpnMCompactPoint__v_; +text: .text%__1cWOffsetTableContigSpaceUinitialize_threshold6M_pnIHeapWord__; +text: .text%__1cMTenuredSpaceSallowed_dead_ratio6kM_i_; +text: .text%__1cQCompactibleSpaceHforward6MpnHoopDesc_IpnMCompactPoint_pnIHeapWord__6_; +text: .text%__1cWOffsetTableContigSpacePcross_threshold6MpnIHeapWord_2_2_; +text: .text%__1cbBBlockOffsetArrayContigSpaceQalloc_block_work6MpnIHeapWord_2_v_; +text: .text%__1cQCompactibleSpaceVnext_compaction_space6kM_p0_: space.o; +text: .text%__1cQDefNewGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: defNewGeneration.o; +text: .text%__1cQCompactibleSpaceSallowed_dead_ratio6kM_i_: space.o; +text: .text%__1cQCompactibleSpaceUinitialize_threshold6M_pnIHeapWord__: space.o; +text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: compactingPermGenGen.o; +text: .text%__1cPContigPermSpaceSallowed_dead_ratio6kM_i_; +text: .text%__1cUCompactingPermGenGenTpre_adjust_pointers6M_v_; +text: .text%__1cbIRecursiveAdjustSharedObjectClosureGdo_oop6MppnHoopDesc__v_: compactingPermGenGen.o; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: methodKlass.o; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: constantPoolKlass.o; +text: .text%__1cNinstanceKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: instanceKlassKlass.o; +text: .text%__1cKOopClosureIdo_oop_v6MppnHoopDesc__v_: compactingPermGenGen.o; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: objArrayKlassKlass.o; +text: .text%__1cNobjArrayKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_; +text: .text%__1cKOopClosureXshould_remember_klasses6kM_ki_: compactingPermGenGen.o; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: klassKlass.o; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cFKlassRoop_oop_iterate_v6MpnHoopDesc_pnKOopClosure__i_: arrayKlassKlass.o; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cQGenCollectedHeapSprocess_weak_roots6MpnKOopClosure_2_v_; +text: .text%__1cRAlwaysTrueClosureLdo_object_b6MpnHoopDesc__i_: genCollectedHeap.o; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cbASkipAdjustingSharedStringsGdo_oop6MppnHoopDesc__v_: genCollectedHeap.o; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cQGenCollectedHeapSgeneration_iterate6Mpn0AKGenClosure_i_v_; +text: .text%__1cYGenAdjustPointersClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o; +text: .text%__1cKGenerationPadjust_pointers6M_v_; +text: .text%__1cbCOneContigSpaceCardGenerationNspace_iterate6MpnMSpaceClosure_i_v_; +text: .text%__1cVAdjustPointersClosureIdo_space6MpnFSpace__v_: generation.o; +text: .text%__1cQCompactibleSpacePadjust_pointers6M_v_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQDefNewGenerationNspace_iterate6MpnMSpaceClosure_i_v_; +text: .text%__1cUCompactingPermGenGenPadjust_pointers6M_v_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cUCompactingPermGenGenHcompact6M_v_; +text: .text%__1cQCompactibleSpaceHcompact6M_v_; +text: .text%__1cPContiguousSpaceWreset_after_compaction6M_v_: space.o; +text: .text%__1cRGenCompactClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o; +text: .text%__1cKGenerationHcompact6M_v_; +text: .text%__1cUCompactingPermGenGenMpost_compact6M_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cQGenCollectedHeapKsave_marks6M_v_; +text: .text%__1cLCardTableRSTinvalidate_or_clear6MpnKGeneration_ii_v_; +text: .text%__1cJMemRegionFminus6kMk0_0_; +text: .text%__1cLCardTableRSKinvalidate6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cRCardTableModRefBSKinvalidate6MnJMemRegion__v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: c1_Runtime1.o; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: tenuredGeneration.o; +text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: compactingPermGenGen.o; +text: .text%__1cRTenuredGenerationQcompute_new_size6M_v_; +text: .text%__1cKGenerationEspec6M_pnOGenerationSpec__; +text: .text%__1cbCOneContigSpaceCardGenerationGexpand6MII_v_; +text: .text%jni_DeleteWeakGlobalRef: jni.o; +text: .text%__1cKJNIHandlesTdestroy_weak_global6FpnI_jobject__v_; +text: .text%__1cHLogicOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterLcmp_mem_int6MnMLIR_OpBranchNLIR_Condition_nFRInfo_iipnMCodeEmitInfo__v_; +text: .text%__1cILIR_ListDcmp6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_: c1_LIREmitter_sparc.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cNFloatConstantQas_FloatConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cILIR_ListJfloat2reg6MfnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cRAbstractAssemblerGa_long6Mi_v_; +text: .text%__1cNConstantTableZaddress_of_float_constant6Mf_pC_; +text: .text%__1cIRuntime1Onew_type_array6FpnKJavaThread_pnMklassOopDesc_i_v_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: oopMap.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: compiledICHolderKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: compiledICHolderKlass.o; +text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cOMacroAssemblerEmult6MpnMRegisterImpl_22_v_; +text: .text%__1cINewArrayPother_values_do6MpFppnLInstruction__v_v_; +text: .text%__1cJValueTypeLas_LongType6M_pnILongType__: c1_Canonicalizer.o; +text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_222_v_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%signalHandler; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cILIR_ListEidiv6MnFRInfo_i11pnMCodeEmitInfo__v_; +text: .text%__1cPParameterMapperHdo_long6M_v_: c1_Runtime1_sparc.o; +text: .text%__1cLLoadIndexedIis_equal6kMpnLInstruction__i_: c1_Instruction.o; +text: .text%JVM_HoldsLock; +text: .text%__1cSObjectSynchronizerZcurrent_thread_holds_lock6FpnKJavaThread_nGHandle__i_; +text: .text%__1cIValueGenLdo_getClass6MpnJIntrinsic__v_; +text: .text%__1cLLIR_EmitterIgetClass6MnFRInfo_1pnMCodeEmitInfo__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cONewObjectArrayKexact_type6kM_pnGciType__; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cIRuntime1Noop_arraycopy6FpnIHeapWord_2i_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%__1cMGraphBuilderKcompare_op6MpnJValueType_nJBytecodesECode__v_; +text: .text%__1cJCompareOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o; +text: .text%__1cNCanonicalizerMdo_CompareOp6MpnJCompareOp__v_; +text: .text%__1cJCompareOpEhash6kM_i_: c1_Instruction.o; +text: .text%__1cJCompareOpEname6kM_pkc_: c1_Instruction.o; +text: .text%__1cJCompareOpMas_CompareOp6M_p0_: c1_Instruction.o; +text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_Canonicalizer.o; +text: .text%__1cILIR_ListVvolatile_load_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cJValueTypeOas_IntConstant6M_pnLIntConstant__: c1_Canonicalizer.o; +text: .text%__1cEItemSget_jlong_constant6kM_x_; +text: .text%__1cRLIR_PeepholeStateLreg2indexLo6MpnLLIR_OprDesc__i_; +text: .text%__1cRLIR_PeepholeStateLreg2indexHi6MpnLLIR_OprDesc__i_; +text: .text%__1cNLIR_AssemblerQvolatile_move_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_; +text: .text%__1cHIntTypeKas_IntType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cLLIR_EmitterQfield_store_long6MpnLLIR_OprDesc_i2ipnMCodeEmitInfo__v_; +text: .text%__1cNSharedRuntimeDf2l6Ff_x_; +text: .text%__1cJFloatTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cJValueTypeQas_FloatConstant6M_pnNFloatConstant__: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o; +text: .text%__1cODoubleConstantRas_DoubleConstant6M_p0_: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_ValueType.o; +text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cODoubleConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cGSetRegIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIValueGenNreturnF0RInfo6F_nFRInfo__; +text: .text%__1cILIR_ListMbranch_float6MnMLIR_OpBranchNLIR_Condition_pnFLabel_4_v_; +text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_ValueType.o; +text: .text%__1cLLIR_EmitterOset_fpu_result6MnFRInfo__v_; +text: .text%__1cILIR_ListIpush_fpu6MnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cNc1_AllocTableNhas_pair_free6kM_i_; +text: .text%__1cNc1_AllocTableNget_pair_free6M_i_; +text: .text%__1cHLockRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocRset_locked_double6MipnLInstruction_i_v_; +text: .text%__1cNc1_AllocTablePset_pair_locked6Mi_v_; +text: .text%__1cQChangeSpillCountJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocZchange_double_spill_count6Mii_v_; +text: .text%__1cILIR_ListKdouble2reg6MdnFRInfo__v_: c1_LIREmitter.o; +text: .text%__1cHFreeRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocPset_free_double6Mi_v_; +text: .text%__1cNc1_AllocTableNset_pair_free6Mi_v_; +text: .text%__1cIValueGenPlock_spill_temp6MpnLInstruction_nFRInfo__v_; +text: .text%__1cLLIR_EmitterPcall_convert_op6MnJBytecodesECode_nFRInfo__v_; +text: .text%__1cILIR_ListRcall_runtime_leaf6MpCnFRInfo_ii_v_: c1_LIREmitter_sparc.o; +text: .text%__1cMLIR_OpRTCallFvisit6MpnQLIR_OpVisitState__v_; +text: .text%__1cMLIR_OpRTCallJemit_code6MpnVLIR_AbstractAssembler__v_; +text: .text%__1cNLIR_OptimizerLemit_rtcall6MpnMLIR_OpRTCall__v_; +text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cNLIR_AssemblerFstore6MpnRFloatRegisterImpl_pnMRegisterImpl_inJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerEload6MpnMRegisterImpl_ipnRFloatRegisterImpl_nJBasicType_pnMCodeEmitInfo__v_; +text: .text%__1cNLIR_AssemblerIfpu_push6MnFRInfo__v_; +text: .text%__1cFRInfoNas_double_reg6kM_pnRFloatRegisterImpl__; +text: .text%__1cNConstantTablebAaddress_of_double_constant6Md_pC_; +text: .text%__1cNLIR_AssemblerLemit_rtcall6MpnMLIR_OpRTCall__v_; +text: .text%__1cNLIR_AssemblerHrt_call6MpCnFRInfo_ii_v_; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cRComputeEntryStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cIValueGenMreturn2RInfo6F_nFRInfo__; +text: .text%__1cILIR_ListRcall_runtime_leaf6MpCnFRInfo_ii_v_: c1_CodeGenerator_sparc.o; +text: .text%__1cIFrameMapUare_adjacent_indeces6kMii_i_; +text: .text%__1cILIR_ListQreg2double_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o; +text: .text%__1cIValueGenUdo_ArithmeticOp_Long6MpnMArithmeticOp__v_; +text: .text%__1cLLIR_EmitterSarithmetic_op_long6MnJBytecodesECode_pnLLIR_OprDesc_44pnMCodeEmitInfo__v_; +text: .text%__1cRLIR_PeepholeStateNstack2indexHi6MpnLLIR_OprDesc__i_; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: oopMapCache.o; +text: .text%__1cTMaskFillerForNativeIpass_int6M_v_: oopMapCache.o; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cFframebDoops_interpreted_arguments_do6MnMsymbolHandle_ipnKOopClosure__v_; +text: .text%__1cLCardTableRSSclear_into_younger6MpnKGeneration_i_v_; +text: .text%__1cLCardTableRSFclear6MnJMemRegion__v_: cardTableRS.o; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cNSharedRuntimeEdrem6Fdd_d_; +text: .text%__1cRComputeEntryStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_ValueType.o; +text: .text%__1cKDoubleTypeEsize6kM_i_: c1_ValueType.o; +text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_ValueType.o; +text: .text%__1cLLIR_EmitterSarithmetic_call_op6MnJBytecodesECode_nFRInfo__v_; +text: .text%__1cILIR_ListRcall_runtime_leaf6MpCnFRInfo_ii_v_: c1_LIREmitter.o; +text: .text%__1cGSetRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOset_double_reg6MiipnLInstruction__v_; +text: .text%__1cJIsFreeRegJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocOis_free_double6kMi_i_; +text: .text%__1cNc1_AllocTableMis_pair_free6kMi_i_; +text: .text%__1cNc1_AllocTableIare_free6kMii_i_; +text: .text%__1cLGetValueForIdo_float6Mi_v_: c1_RegAlloc.o; +text: .text%__1cLGetRefCountJdo_double6Mi_v_: c1_RegAlloc.o; +text: .text%__1cIRegAllocNget_double_rc6kMi_i_; +text: .text%__1cJValueTypeMas_ArrayType6M_pnJArrayType__: c1_Canonicalizer.o; +text: .text%__1cKDoubleTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cOMacroAssemblerEfneg6MnRFloatRegisterImplFWidth_p13_v_; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cRArgumentOopFinderDset6MinJBasicType__v_: frame.o; +text: .text%__1cQArgumentLocationXincoming_stack_location6kM_nHAddress__; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: oopMapCache.o; +text: .text%__1cNFloatConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cJNullCheckMas_NullCheck6M_p0_: c1_GraphBuilder.o; +text: .text%__1cLLIR_EmitterIpop_item6MpnLLIR_OprDesc__v_; +text: .text%__1cIValueGenNreturnD0RInfo6F_nFRInfo__; +text: .text%__1cILIR_ListDdiv6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cKGC_SupportbDpreserve_callee_argument_oops6FknFframe_nMsymbolHandle_ipnKOopClosure__v_; +text: .text%__1cSFindSignatureTypesDset6MinJBasicType__v_: c1_Runtime1_sparc.o; +text: .text%__1cUC1_ArgumentOopFinderDset6MinJBasicType__v_: c1_Runtime1_sparc.o; +text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o; +text: .text%__1cMLongConstantLis_constant6kM_i_: c1_Canonicalizer.o; +text: .text%__1cRPrivilegedElementHoops_do6MpnKOopClosure__v_; +text: .text%__1cIValueGenLspill_value6MpnLInstruction__v_; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cMGraphBuilderVappend_unsafe_put_raw6MpnIciMethod_nJBasicType__i_; +text: .text%__1cMUnsafePutRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cFmatch6FpnLUnsafeRawOp_ppnLInstruction_4pi_i_: c1_Canonicalizer.o; +text: .text%__1cIUnsafeOpLas_UnsafeOp6M_p0_: c1_GraphBuilder.o; +text: .text%__1cMGraphBuilderVappend_unsafe_get_raw6MpnIciMethod_nJBasicType__i_; +text: .text%__1cMUnsafeGetRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cMGraphBuilderNlookup_switch6M_v_; +text: .text%__1cMLookupSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o; +text: .text%__1cNCanonicalizerPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cMUnsafePutRawPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cLUnsafeRawOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o; +text: .text%__1cQNullCheckVisitorPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cQNullCheckVisitorPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cIValueGenPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_; +text: .text%__1cLLIR_EmitterOput_raw_unsafe6MpnLLIR_OprDesc_2i2nJBasicType__v_; +text: .text%__1cLLIR_EmitterMlong2address6MpnLLIR_OprDesc__nFRInfo__; +text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_; +text: .text%__1cLLIR_EmitterOget_raw_unsafe6MnFRInfo_pnLLIR_OprDesc_3inJBasicType__v_; +text: .text%__1cILIR_ListMload_mem_reg6MpnLLIR_Address_nFRInfo_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; +text: .text%__1cIValueGenPdo_LookupSwitch6MpnMLookupSwitch__v_; +text: .text%__1cLLIR_EmitterPlookupswitch_op6MpnLLIR_OprDesc_ipnKBlockBegin__v_; +text: .text%__1cQInstanceConstantLis_constant6kM_i_: c1_ValueType.o; +text: .text%__1cOObjectConstantLis_constant6kM_i_: c1_ValueType.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cIRuntime1Mnew_instance6FpnKJavaThread_pnMklassOopDesc__v_; +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cIValueGenTcallee_return2RInfo6F_nFRInfo__; +text: .text%__1cILongTypeLas_LongType6M_p0_: c1_Canonicalizer.o; +text: .text%__1cJFloatTypeEsize6kM_i_: c1_Canonicalizer.o; +text: .text%__1cIValueGenNrelease_roots6MpnKValueStack__v_; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cbCOneContigSpaceCardGenerationVunsafe_max_alloc_nogc6kM_I_; +text: .text%__1cILongTypeLas_LongType6M_p0_: c1_ValueType.o; +text: .text%__1cHciKlassIis_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cIRuntime1Qnew_object_array6FpnKJavaThread_pnMklassOopDesc_i_v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER1_sparcv9 2009-08-01 04:16:52.140593160 +0100 @@ -0,0 +1 @@ +# Place holder for LP64 data. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER2_amd64 2009-08-01 04:16:52.608585371 +0100 @@ -0,0 +1,8115 @@ +data = R0x2000; +text = LOAD ?RXO; + + +text: .text%__1cECopyRpd_disjoint_words6FpnIHeapWord_2L_v_; +text: .text%__1cSPSPromotionManagerWcopy_to_survivor_space6MpnHoopDesc__2_; +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQIndexSetIteratorEnext6M_I_; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cNRelocIteratorEnext6M_i_; +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQObjectStartArrayMobject_start6kMpnIHeapWord__2_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__; +text: .text%__1cIPhaseIFGIadd_edge6MII_i_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_; +text: .text%__1cIMachNodeHis_Mach6M_p0_; +text: .text%__1cENodeHis_Copy6kM_I_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_; +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_; +text: .text%__1cHnmethodKcan_unload6MpnRBoolObjectClosure_pnKOopClosure_ppnHoopDesc_i_i_; +text: .text%__1cIMachNodeNrematerialize6kM_i_; +text: .text%__1cHRegMaskFis_UP6kM_i_; +text: .text%__1cXresource_allocate_bytes6FL_pc_; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__; +text: .text%__1cHRegMaskESize6kM_I_; +text: .text%__1cIIndexSetLalloc_block6M_pn0AIBitBlock__; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cDLRGOcompute_degree6kMr0_i_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cYPSPromotionFailedClosureJdo_object6MpnHoopDesc__v_; +text: .text%__1cENodeEjvms6kM_pnIJVMState__; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_; +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__; +text: .text%__1cENodeHdel_out6Mp0_v_; +text: .text%__1cKRelocationLunpack_data6M_v_; +text: .text%__1cIMachNodeJideal_reg6kM_I_; +text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_; +text: .text%__1cKRelocationSpd_address_in_code6M_ppC_; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cRMachSpillCopyNodeMis_SpillCopy6M_p0_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__; +text: .text%__1cENodeGis_CFG6kM_i_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_; +text: .text%__1cETypeDcmp6Fpk02_i_; +text: .text%__1cQObjectStartArrayWobject_starts_in_range6kMpnIHeapWord_2_i_; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cHRegMaskJis_bound16kM_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__; +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cHRegMaskJis_bound26kM_i_; +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__; +text: .text%__1cNGrowableArray4CI_Hat_grow6MirkI_I_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_; +text: .text%__1cMOopTaskQdDueueOpop_local_slow6MInOTaskQdDueueSuperDAge__i_; +text: .text%__1cRMachSpillCopyNodeHis_Copy6kM_I_; +text: .text%__1cENodeGpinned6kM_i_; +text: .text%__1cOoop_RelocationJoop_value6M_pnHoopDesc__; +text: .text%__1cJrRegPOperEtype6kM_pknEType__; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_LI_v_; +text: .text%__1cMOopTaskQdDueueKpop_global6MrpnHoopDesc__i_; +text: .text%__1cPOopTaskQdDueueSetPsteal_best_of_26MipirpnHoopDesc__i_; +text: .text%__1cJVectorSet2R6MI_rnDSet__; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cSPSPromotionManagerUflush_prefetch_queue6M_v_; +text: .text%__1cIProjNodeHis_Proj6M_p0_; +text: .text%__1cPDictionaryEntrybDprotection_domain_set_oops_do6MpnKOopClosure__v_; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cMloadConPNodePoper_input_base6kM_I_; +text: .text%__1cMloadConPNodeHtwo_adr6kM_I_; +text: .text%__1cMloadConPNodeErule6kM_I_; +text: .text%__1cHPhiNodeGis_Phi6M_p0_; +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__; +text: .text%__1cDff16FI_i_; +text: .text%__1cENodeNrematerialize6kM_i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_; +text: .text%__1cFframeVoopmapreg_to_location6kMipknLRegisterMap__ppnHoopDesc__; +text: .text%__1cIIndexSetKinitialize6MI_v_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__; +text: .text%__1cMMutableSpaceMcas_allocate6ML_pnIHeapWord__; +text: .text%__1cIMachNodeGOpcode6kM_i_; +text: .text%__1cMget_live_bit6Fpii_i_: buildOopMap.o; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__; +text: .text%__1cENodeHadd_req6Mp0_v_; +text: .text%__1cENodeIout_grow6MI_v_; +text: .text%__1cMset_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__; +text: .text%__1cIIndexSetKfree_block6MI_v_; +text: .text%__1cJCProjNodeGis_CFG6kM_i_; +text: .text%__1cETypeFuhash6Fpk0_i_; +text: .text%__1cJrRegIOperEtype6kM_pknEType__; +text: .text%__1cMPhaseChaitinLskip_copies6MpnENode__2_; +text: .text%__1cIMachNodeMcisc_operand6kM_i_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_; +text: .text%__1cICallNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeHis_Call6M_pnICallNode__; +text: .text%__1cNCollectedHeapbDcheck_for_bad_heap_word_value6MpnIHeapWord_L_v_; +text: .text%__1cEDictGInsert6Mpv1i_1_; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cINodeHashLhash_delete6MpknENode__i_; +text: .text%__1cETypeJtype_dict6F_pnEDict__; +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_; +text: .text%__1cOPSPromotionLABFflush6M_v_; +text: .text%__1cIrc_class6Fi_nCRC__: ad_amd64.o; +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_; +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_; +text: .text%__1cNCollectedHeapbAcommon_mem_allocate_noinit6FLipnGThread__pnIHeapWord__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__; +text: .text%__1cFArenaIcontains6kMpkv_i_; +text: .text%__1cJMultiNodeGis_CFG6kM_i_; +text: .text%__1cHPhiNodeGOpcode6kM_i_; +text: .text%__1cMPhaseChaitinKelide_copy6MpnENode_ipnFBlock_rnJNode_List_6i_i_; +text: .text%__1cKjmpDirNodeNis_block_proj6kM_pknENode__; +text: .text%__1cIProjNodeGis_CFG6kM_i_; +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_; +text: .text%__1cETypeIhashcons6M_pk0_; +text: .text%__1cENodeEhash6kM_I_; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_; +text: .text%__1cOlower_pressure6FpnDLRG_IpnFBlock_pI4_v_: ifg.o; +text: .text%__1cPOopTaskQdDueueSetFsteal6MipirpnHoopDesc__i_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cKRegionNodeGis_CFG6kM_i_; +text: .text%__1cHCompileRvalid_bundle_info6MpknENode__i_; +text: .text%__1cIProjNodeGOpcode6kM_i_; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__; +text: .text%__1cPVirtualCallDataKcell_count6M_i_; +text: .text%__1cIProjNodeGpinned6kM_i_; +text: .text%__1cENodeMcisc_operand6kM_i_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMloadConINodePoper_input_base6kM_I_; +text: .text%__1cMloadConINodeHtwo_adr6kM_I_; +text: .text%__1cHNTarjanEEVAL6M_p0_; +text: .text%__1cNMachIdealNodeErule6kM_I_; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cETypeLisa_oop_ptr6kM_i_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_; +text: .text%__1cENode2t6MI_v_; +text: .text%__1cJloadPNodeErule6kM_I_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__; +text: .text%__1cMloadConINodeErule6kM_I_; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__; +text: .text%__1cHTypeIntCeq6kMpknEType__i_; +text: .text%__1cLProfileDataPfollow_contents6M_v_; +text: .text%__1cLProfileDataPadjust_pointers6M_v_; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__; +text: .text%__1cHRegMaskMClearToPairs6M_v_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_IrnJVectorSet__v_; +text: .text%__1cLemit_opcode6FrnKCodeBuffer_i_v_; +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__; +text: .text%__1cFArenaIArealloc6MpvLL_1_; +text: .text%__1cGIfNodeGOpcode6kM_i_; +text: .text%__1cHTypePtrEhash6kM_i_; +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__; +text: .text%__1cIPhaseIFGJre_insert6MI_v_; +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__; +text: .text%__1cMclr_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cETypeEmeet6kMpk0_2_; +text: .text%__1cMPhaseChaitinQis_high_pressure6MpnFBlock_pnDLRG_I_i_; +text: .text%__1cKBranchDataKcell_count6M_i_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_; +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_; +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_; +text: .text%__1cDfh16FI_i_; +text: .text%__1cJraw_score6Fdd_d_: chaitin.o; +text: .text%__1cDLRGFscore6kM_d_; +text: .text%__1cKTypeOopPtrEhash6kM_i_; +text: .text%__1cIAddPNodeGOpcode6kM_i_; +text: .text%__1cKIfTrueNodeGOpcode6kM_i_; +text: .text%__1cMPhaseChaitinMchoose_color6MrnDLRG_i_i_; +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_; +text: .text%__1cGcmpkey6Fpkv1_i_; +text: .text%__1cETypeJsingleton6kM_i_; +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cSPSPromotionManagerMdrain_stacks6M_v_; +text: .text%__1cHConNodeGOpcode6kM_i_; +text: .text%__1cITypeLongCeq6kMpknEType__i_; +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_L_; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cMPhaseChaitinKbias_color6MrnDLRG_i_i_; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cLOptoRuntimeXdeoptimize_caller_frame6FpnKJavaThread_i_v_; +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cJCodeCacheFalive6FpnICodeBlob__2_; +text: .text%__1cHRegMaskQis_aligned_Pairs6kM_i_; +text: .text%__1cSis_single_register6FI_i_: postaloc.o; +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_; +text: .text%__1cECopyYconjoint_words_to_higher6FpnIHeapWord_2L_v_; +text: .text%__1cILRG_ListGextend6MII_v_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cJPhaseLiveGgetset6MpnFBlock__pnIIndexSet__; +text: .text%__1cIConINodeGOpcode6kM_i_; +text: .text%__1cENodeRis_cisc_alternate6kM_i_; +text: .text%__1cLIfFalseNodeGOpcode6kM_i_; +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_; +text: .text%__1cLCounterDataKcell_count6M_i_; +text: .text%__1cWThreadLocalAllocBufferFreset6M_v_; +text: .text%__1cMMachProjNodeGOpcode6kM_i_; +text: .text%__1cENodeEgrow6MI_v_; +text: .text%__1cIMachNodePcompute_padding6kMi_i_; +text: .text%__1cKup_one_dom6FpnENode__1_: ifnode.o; +text: .text%__1cIMachNodeSalignment_required6kM_i_; +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_L_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapVunsafe_max_tlab_alloc6kM_L_; +text: .text%__1cHNTarjanICOMPRESS6M_v_; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cJMultiNodeIis_Multi6M_p0_; +text: .text%__1cIBoolNodeGOpcode6kM_i_; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6ML_pnIHeapWord__; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6MpnIHeapWord_22_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2L_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cKTypeOopPtrCeq6kMpknEType__i_; +text: .text%__1cHTypePtrCeq6kMpknEType__i_; +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_; +text: .text%__1cIIndexSetFclear6M_v_; +text: .text%__1cHTypeIntJsingleton6kM_i_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_pnIIndexSet_rnJVectorSet__v_; +text: .text%__1cECopyXconjoint_words_to_lower6FpnIHeapWord_2L_v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cNRelocIteratorKinitialize6MlpnICodeBlob_pC3_v_; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cITypeNodeJideal_reg6kM_I_; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cHTypeIntEhash6kM_i_; +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cIMachNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__; +text: .text%__1cFState2T6M_v_; +text: .text%__1cPVirtualCallDataPfollow_contents6M_v_; +text: .text%__1cPVirtualCallDataPadjust_pointers6M_v_; +text: .text%__1cENodeGis_Con6kM_I_; +text: .text%__1cJrRegIOperJnum_edges6kM_I_; +text: .text%__1cENodeIget_long6kM_x_; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_; +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__; +text: .text%__1cHemit_rm6FrnKCodeBuffer_iii_v_; +text: .text%__1cHPhiNodeGpinned6kM_i_; +text: .text%__1cITypeNodeEhash6kM_I_; +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_; +text: .text%__1cJPhaseLiveKgetfreeset6M_pnIIndexSet__; +text: .text%__1cMMachProjNodeJideal_reg6kM_I_; +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIParmNodeGis_CFG6kM_i_; +text: .text%__1cENodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cKRegionNodeGOpcode6kM_i_; +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeFnew_C6FpnMklassOopDesc_pnKJavaThread__v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__; +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_; +text: .text%__1cLTypeInstPtrEhash6kM_i_; +text: .text%__1cLuse_dom_lca6FpnFBlock_pnENode_3rnLBlock_Array__1_: gcm.o; +text: .text%__1cITypeLongEhash6kM_i_; +text: .text%__1cIBoolNodeHis_Bool6M_p0_; +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_; +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__; +text: .text%__1cIJumpDataKcell_count6M_i_; +text: .text%__1cHNTarjanELINK6Mp01_v_; +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_; +text: .text%__1cIMachNode2t6M_v_; +text: .text%__1cFStateRMachOperGenerator6MipnIMachNode_pnHCompile__pnIMachOper__; +text: .text%__1cRPSOldPromotionLABFflush6M_v_; +text: .text%__1cICallNodeHis_Call6M_p0_; +text: .text%__1cENodeFIdeal6MpnIPhaseGVN_i_p0_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cENodeNis_block_proj6kM_pk0_; +text: .text%__1cUParallelScavengeHeapPis_in_permanent6kMpkv_i_; +text: .text%__1cHdom_lca6FpnFBlock_1_1_: gcm.o; +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_; +text: .text%__1cJVectorSet2F6kMI_i_; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cICallNodeLbottom_type6kM_pknEType__; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cIMachNodeLbottom_type6kM_pknEType__; +text: .text%__1cPClassFileParserOcheck_property6MipkcipnGThread__v_; +text: .text%__1cFState2t6M_v_; +text: .text%__1cFStateDDFA6MipknENode__i_; +text: .text%__1cJPhaseLiveHfreeset6MpknFBlock__v_; +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_; +text: .text%__1cKRegionNodeGpinned6kM_i_; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cRSignatureIteratorGexpect6Mc_v_; +text: .text%__1cIProjNodeEhash6kM_I_; +text: .text%__1cENodeFclone6kM_p0_; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_; +text: .text%__1cHCompileMFillLocArray6MpnENode_pnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cHRegMaskPfind_first_pair6kM_i_; +text: .text%__1cENodeKmatch_edge6kMI_I_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cNPhaseRegAllocKreg2offset6kMi_i_; +text: .text%__1cNPhaseRegAllocUreg2offset_unchecked6kMi_i_; +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cETypeFxmeet6kMpk0_2_; +text: .text%__1cENodeFis_If6M_pnGIfNode__; +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__; +text: .text%__1cICmpPNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockXcompute_gen_kill_single6MpnQciBytecodeStream__v_; +text: .text%__1cKjmpDirNodeMideal_Opcode6kM_i_; +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cILoadNodeHis_Load6M_p0_; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_; +text: .text%__1cRMachSafePointNodeEjvms6kM_pnIJVMState__; +text: .text%__1cNinstanceKlassMclass_loader6kM_pnHoopDesc__; +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_; +text: .text%__1cRMachSpillCopyNodeOimplementation6kMpnKCodeBuffer_pnNPhaseRegAlloc_i_I_; +text: .text%__1cKTypeAryPtrEhash6kM_i_; +text: .text%__1cIIndexSet2t6Mp0_v_; +text: .text%__1cNrFlagsRegOperEtype6kM_pknEType__; +text: .text%__1cHTypeInt2t6Miii_v_; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_; +text: .text%__1cIGraphKitHstopped6M_i_; +text: .text%__1cKTypeOopPtrJsingleton6kM_i_; +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_; +text: .text%__1cNSafePointNodeGpinned6kM_i_; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cIConLNodeGOpcode6kM_i_; +text: .text%__1cHPhiNodeEhash6kM_I_; +text: .text%__1cGIfNodeGpinned6kM_i_; +text: .text%__1cOis_diamond_phi6FpnENode__i_: cfgnode.o; +text: .text%__1cLTypeInstPtrCeq6kMpknEType__i_; +text: .text%__1cENodeHsize_of6kM_I_; +text: .text%__1cENodeSremove_dead_region6MpnIPhaseGVN_i_i_; +text: .text%__1cHRegMaskMSmearToPairs6M_v_; +text: .text%__1cSCallStaticJavaNodeEhash6kM_I_; +text: .text%jni_GetObjectField: jni.o; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cKNode_ArrayEgrow6MI_v_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cICmpINodeGOpcode6kM_i_; +text: .text%__1cHMatcherKLabel_Root6MpknENode_pnFState_p16_6_; +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cUGenericGrowableArrayMraw_allocate6Mi_pv_; +text: .text%__1cJMarkSweepNpreserve_mark6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cIMachNodeKconst_size6kM_i_; +text: .text%__1cGIfNodeFis_If6M_p0_; +text: .text%__1cINodeHashLhash_insert6MpnENode__v_; +text: .text%__1cOPhaseIdealLoopMis_dominator6MpnENode_2_i_; +text: .text%__1cKTypeOopPtrLxadd_offset6kMi_i_; +text: .text%JVM_Read; +text: .text%__1cDhpiEread6FipvI_L_; +text: .text%__1cIMachNodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_p0_; +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_; +text: .text%__1cIHaltNodeGis_CFG6kM_i_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cMMergeMemNodeGOpcode6kM_i_; +text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cHemit_d86FrnKCodeBuffer_i_v_; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cKCodeBuffer2T6M_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIMachNodeJemit_size6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLTypeInstPtr2t6MnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_v_; +text: .text%__1cLOptoRuntimeKjbyte_copy6FpW1L_v_; +text: .text%__1cJloadINodeErule6kM_I_; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_; +text: .text%__1cIAddINodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_; +text: .text%__1cOmatch_into_reg6FpnENode_iii1_i_: matcher.o; +text: .text%__1cENodeJis_Branch6kM_I_; +text: .text%__1cITypeLong2t6Mxxi_v_; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cIJVMStateIof_depth6kMi_p0_; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cJrRegLOperEtype6kM_pknEType__; +text: .text%__1cENode2t6Mp0_v_; +text: .text%__1cLTypeInstPtrEmake6FnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_pk0_; +text: .text%__1cJStartNodeGpinned6kM_i_; +text: .text%__1cHhashptr6Fpkv_i_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cFBlockLis_uncommon6kMrnLBlock_Array__i_; +text: .text%__1cEDict2F6kMpkv_pv_; +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cNCatchProjNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKjmpConNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadPNodeGOpcode6kM_i_; +text: .text%__1cLSymbolTableGlookup6MipkciI_pnNsymbolOopDesc__; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOindOffset8OperJnum_edges6kM_I_; +text: .text%__1cKBufferBlobIis_alive6kM_i_; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cJTypeTupleJsingleton6kM_i_; +text: .text%__1cKTypeAryPtrCeq6kMpknEType__i_; +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cETypeKhas_memory6kM_i_; +text: .text%__1cHMatcherKReduceOper6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachNodeFreloc6kM_i_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cICodeBlobOis_osr_adapter6kM_i_; +text: .text%__1cHConNodeGis_Con6kM_I_; +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_; +text: .text%__1cILoadNodeEhash6kM_I_; +text: .text%__1cRMachSpillCopyNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cICodeBlobTfix_oop_relocations6MpC1_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cIimmIOperIconstant6kM_l_; +text: .text%__1cHCmpNodeGis_Cmp6kM_pk0_; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cENodeHdel_req6MI_v_; +text: .text%__1cJCProjNodeEhash6kM_I_; +text: .text%__1cHCompileJcan_alias6MpknHTypePtr_i_i_; +text: .text%__1cJMultiNodeEhash6kM_I_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cHnmethodIis_alive6kM_i_; +text: .text%__1cIHaltNodeGOpcode6kM_i_; +text: .text%__1cMMergeMemNodeLis_MergeMem6M_p0_; +text: .text%__1cUPSMarkSweepDecoratorQinsert_deadspace6MrlpnIHeapWord_L_i_; +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_; +text: .text%__1cETypeEhash6kM_i_; +text: .text%__1cJHashtableLhash_symbol6Fpkci_I_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pnIciObject_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cYDebugInformationRecorderLcheck_phase6Mn0AFPhase__v_; +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cICodeBlobKis_nmethod6kM_i_; +text: .text%__1cICmpUNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMi_pnGOopMap__; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpC_pnGOopMap__; +text: .text%__1cIConPNodeGOpcode6kM_i_; +text: .text%__1cIMachNodeHtwo_adr6kM_I_; +text: .text%__1cIParmNodeGOpcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodeErule6kM_I_; +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_; +text: .text%__1cLAdapterInfoFequal6kMp0_i_; +text: .text%__1cGOopMapbEmap_compiler_reg_to_oopmap_reg6Miii_i_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_; +text: .text%__1cGTarjanEEVAL6M_p0_; +text: .text%__1cKRelocationYindex_to_runtime_address6Fl_pC_; +text: .text%__1cYexternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cJCatchNodeGOpcode6kM_i_; +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cITypeLongJsingleton6kM_i_; +text: .text%__1cNencode_RegMem6FrnKCodeBuffer_iiiiii_v_; +text: .text%__1cFBlockGselect6MrnJNode_List_rnLBlock_Array_pirnJVectorSet_IrnNGrowableArray4CI___pnENode__; +text: .text%__1cGOopMapHset_xxx6MinLOopMapValueJoop_types_iii_v_; +text: .text%__1cFMutexNowned_by_self6kM_i_; +text: .text%__1cTCreateExceptionNodeErule6kM_I_; +text: .text%__1cIJVMStateLdebug_start6kM_I_; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__; +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_; +text: .text%__1cIMachNodeNoperand_index6kMI_i_; +text: .text%__1cKMachIfNodeJis_MachIf6kM_pk0_; +text: .text%__1cFBlockIis_Empty6kM_i_; +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__; +text: .text%__1cFBlockOcode_alignment6M_I_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_; +text: .text%__1cGBitMapUclear_range_of_words6MLL_v_; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_; +text: .text%__1cJrRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKCastPPNodeGOpcode6kM_i_; +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__; +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cMMachCallNodeGpinned6kM_i_; +text: .text%__1cHnmethodJis_zombie6kM_i_; +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvI2LNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntEmake6Fi_pk0_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__; +text: .text%__1cFKlassMoop_is_array6kM_i_; +text: .text%__1cIBoolNodeEhash6kM_I_; +text: .text%__1cIimmPOperEtype6kM_pknEType__; +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__; +text: .text%__1cJrRegPOperJnum_edges6kM_I_; +text: .text%__1cOrFlagsRegUOperEtype6kM_pknEType__; +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cNsymbolOopDescLas_C_string6kMpci_1_; +text: .text%__1cGOopMapJset_value6Miii_v_; +text: .text%__1cITypeLongFxmeet6kMpknEType__3_; +text: .text%__1cNCollectedHeapbHcheck_for_non_bad_heap_word_value6MpnIHeapWord_L_v_; +text: .text%__1cHMatcherTReduceInst_Interior6MpnFState_ipnIMachNode_IrpnENode__I_; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_; +text: .text%__1cKjmpConNodeGpinned6kM_i_; +text: .text%__1cNSafePointNodebBneeds_polling_address_input6F_i_; +text: .text%__1cHCompileRprobe_alias_cache6MpknHTypePtr__pn0APAliasCacheEntry__; +text: .text%__1cHnmethodOis_not_entrant6kM_i_; +text: .text%__1cIAddPNodeHis_AddP6M_p0_; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cIMachNodeGExpand6MpnFState_rnJNode_List__p0_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cENodeIdestruct6M_v_; +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cGBitMapJset_union6M0_v_; +text: .text%__1cPciInstanceKlassMis_interface6M_i_; +text: .text%__1cHTypeIntEmake6Fiii_pk0_; +text: .text%__1cJTypeTupleEhash6kM_i_; +text: .text%__1cFParsePdo_one_bytecode6M_v_; +text: .text%__1cFParseNdo_exceptions6M_v_; +text: .text%__1cKRegionNodeEhash6kM_I_; +text: .text%__1cMMutableSpaceIallocate6ML_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6ML_pnIHeapWord__; +text: .text%__1cJPSPermGenSallocate_permanent6ML_pnIHeapWord__; +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeOis_block_start6kM_i_; +text: .text%__1cHMatcherQis_save_on_entry6Mi_i_; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_; +text: .text%__1cITypeLongEmake6Fxxi_pk0_; +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_; +text: .text%__1cGBitMap2t6MpLL_v_; +text: .text%__1cLOptoRuntimePnew_typeArray_C6FnJBasicType_ipnKJavaThread__v_; +text: .text%__1cHCompilePfind_alias_type6MpknHTypePtr_i_pn0AJAliasType__; +text: .text%__1cNnew_loc_value6FpnNPhaseRegAlloc_inILocationEType__pnNLocationValue__: output.o; +text: .text%__1cKjmpDirNodePoper_input_base6kM_I_; +text: .text%__1cMMachCallNodeLis_MachCall6M_p0_; +text: .text%__1cHPhiNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cOindOffset8OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOindOffset8OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cZPhaseConservativeCoalesceJcopy_copy6MpnENode_2pnFBlock_I_i_; +text: .text%__1cKutf8_write6FpCH_0_: utf8.o; +text: .text%__1cKNode_ArrayGremove6MI_v_; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cOindOffset8OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLjmpConUNodeMideal_Opcode6kM_i_; +text: .text%__1cFBlockJfind_node6kMpknENode__I_; +text: .text%__1cOis_range_check6FpnENode_r12ri_i_: ifnode.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__; +text: .text%__1cHhashkey6Fpkv_i_; +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_; +text: .text%__1cJLoadINodeGOpcode6kM_i_; +text: .text%__1cKis_x2logic6FpnIPhaseGVN_pnENode__3_: cfgnode.o; +text: .text%__1cHAbsNodeLis_absolute6FpnIPhaseGVN_pnENode__4_; +text: .text%__1cGTarjanICOMPRESS6M_v_; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cENodeHis_Goto6kM_I_; +text: .text%__1cLPCTableNodeGpinned6kM_i_; +text: .text%JVM_ReleaseUTF; +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFChunk2t6ML_v_; +text: .text%__1cFChunk2n6FLL_pv_; +text: .text%__1cOindOffset8OperFscale6kM_i_; +text: .text%__1cMCreateExNodeGOpcode6kM_i_; +text: .text%__1cFframebCsender_for_interpreter_frame6kMpnLRegisterMap__0_; +text: .text%__1cFChunk2k6Fpv_v_; +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__; +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_; +text: .text%__1cKjmpConNodePoper_input_base6kM_I_; +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_; +text: .text%__1cMciMethodDataHdata_at6Mi_pnLProfileData__; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cIProjNodeHsize_of6kM_I_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__; +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cHBitDataKcell_count6M_i_; +text: .text%__1cFframeZsender_for_compiled_frame6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cFArenaEgrow6ML_pv_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_; +text: .text%__1cLBoxLockNodeNrematerialize6kM_i_; +text: .text%__1cRMachSafePointNodeQis_MachSafePoint6M_p0_; +text: .text%__1cJloadBNodeErule6kM_I_; +text: .text%__1cITypeNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cHConNodeEhash6kM_I_; +text: .text%__1cICodeBlobLlink_offset6M_i_; +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_; +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cICodeBlobOis_i2c_adapter6kM_i_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cIJVMStateNclone_shallow6kM_p0_; +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGOopMapQset_callee_saved6Miiii_v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cIJVMStateJdebug_end6kM_I_; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cENode2t6Mp011_v_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__; +text: .text%__1cHMatcherTcollect_null_checks6MpnENode__v_; +text: .text%__1cNSafePointNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMemNodeMIdeal_common6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGBitMapGat_put6MLi_v_; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cNSafePointNodeHsize_of6kM_I_; +text: .text%__1cMgetTimeNanos6F_x_; +text: .text%__1cKciTypeFlowLStateVectorSapply_one_bytecode6MpnQciBytecodeStream__i_; +text: .text%__1cKPSScavengeUoop_promotion_failed6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cHTypePtrLmeet_offset6kMi_i_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_; +text: .text%__1cNPhaseRegAllocGis_oop6kMpknENode__i_; +text: .text%__1cSPSPromotionManagerUoop_promotion_failed6MpnHoopDesc_pnLmarkOopDesc__2_; +text: .text%__1cIMachOperLdisp_is_oop6kM_i_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cLLShiftLNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockIload_one6Mi_v_; +text: .text%__1cJTypeTupleCeq6kMpknEType__i_; +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__; +text: .text%__1cFDictI2i6M_v_; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%__1cKC2IAdapterIis_alive6kM_i_; +text: .text%__1cENodeHget_int6kM_i_; +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cIAddPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__; +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_; +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeXis_iteratively_computed6M_i_; +text: .text%__1cNloadConI0NodePoper_input_base6kM_I_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_; +text: .text%__1cENodeKreplace_by6Mp0_v_; +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cIBoolTestKcc2logical6kMpknEType__3_; +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJrelocInfoKset_format6Mi_v_; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cKRelocationJpack_data6M_i_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_; +text: .text%__1cNtestP_regNodeMideal_Opcode6kM_i_; +text: .text%__1cETypeFempty6kM_i_; +text: .text%__1cHMemNodeGis_Mem6M_p0_; +text: .text%__1cZload_can_see_stored_value6FpnILoadNode_pnENode_pnOPhaseTransform__3_: memnode.o; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cICodeBlobJis_zombie6kM_i_; +text: .text%__1cJTypeTupleGfields6FI_ppknEType__; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%method_compare: methodOop.o; +text: .text%__1cMMergeMemNodeEhash6kM_I_; +text: .text%__1cJloadPNodePoper_input_base6kM_I_; +text: .text%__1cILoadNodeKmatch_edge6kMI_I_; +text: .text%__1cFBlockUneeded_for_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cIIndexSetSpopulate_free_list6F_v_; +text: .text%__1cGIfNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIemit_d326FrnKCodeBuffer_i_v_; +text: .text%__1cNloadConI0NodeHtwo_adr6kM_I_; +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_; +text: .text%__1cITypeNodeDcmp6kMrknENode__I_; +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_; +text: .text%__1cRSignatureIteratorTcheck_signature_end6M_v_; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cHnmethodKis_nmethod6kM_i_; +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cSCallStaticJavaNodeRis_CallStaticJava6kM_pk0_; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_; +text: .text%__1cOno_flip_branch6FpnFBlock__i_: block.o; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cKjmpDirNodeGpinned6kM_i_; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cKjmpDirNodeHtwo_adr6kM_I_; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cJrRegLOperJnum_edges6kM_I_; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cNGrowableArray4CpnKScopeValue__2t6Mii_v_; +text: .text%__1cYDebugInformationRecorderWserialize_scope_values6MpnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cITypeNodeHsize_of6kM_I_; +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cNloadConI0NodeErule6kM_I_; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cQciBytecodeStreamMreset_to_bci6Mi_v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cHPhiNodeHsize_of6kM_I_; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_; +text: .text%__1cIMachNodeRget_base_and_disp6kMrlrpknHTypePtr__pknENode__; +text: .text%__1cLOopMapCacheIentry_at6kMi_pnQOopMapCacheEntry__; +text: .text%__1cOGenerateOopMapKcheck_type6MnNCellTypeState_1_v_; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cILoadNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMPhaseIterGVNbGregister_new_node_with_optimizer6MpnENode__2_; +text: .text%__1cKBufferBlobMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cLRegisterMapFclear6M_v_; +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__; +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLBlock_Array2t6MpnFArena__v_; +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%__1cNrFlagsRegOperJnum_edges6kM_I_; +text: .text%__1cLPhaseValuesGintcon6Mi_pnIConINode__; +text: .text%__1cKStoreINodeGOpcode6kM_i_; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cJcmpOpOperJnum_edges6kM_I_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cJloadPNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cOcompU_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cPloadConUL32NodePoper_input_base6kM_I_; +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_; +text: .text%__1cKI2CAdapterIis_alive6kM_i_; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__; +text: .text%__1cFframebFinterpreter_frame_monitor_begin6kM_pnPBasicObjectLock__; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cIGraphKitEstop6M_v_; +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o; +text: .text%__1cPloadConUL32NodeHtwo_adr6kM_I_; +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cKStorePNodeGOpcode6kM_i_; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__; +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_; +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFBlockLfind_remove6MpknENode__v_; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_; +text: .text%__1cGOopMapHset_oop6Miii_v_; +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_; +text: .text%__1cPloadConUL32NodeErule6kM_I_; +text: .text%__1cFChunkEchop6M_v_; +text: .text%__1cMciMethodDataJnext_data6MpnLProfileData__2_; +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cLBlock_StackXmost_frequent_successor6MpnFBlock__I_; +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_; +text: .text%__1cFBlockOschedule_local6MrnHMatcher_rnLBlock_Array_pirnJVectorSet_rnNGrowableArray4CI___i_; +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cOGenerateOopMapFppop16MnNCellTypeState__v_; +text: .text%__1cKstorePNodePoper_input_base6kM_I_; +text: .text%__1cMPhaseIterGVNFwiden6kMpknEType_3_3_; +text: .text%__1cKRegionNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cLis_cond_add6FpnIPhaseGVN_pnHPhiNode__pnENode__; +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o; +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGTarjanELINK6Mp01_v_; +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_; +text: .text%__1cIBoolNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o; +text: .text%__1cTremove_useless_bool6FpnGIfNode_pnIPhaseGVN__pnENode__: ifnode.o; +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_; +text: .text%__1cRInterpreterOopMapKinitialize6M_v_; +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_; +text: .text%__1cYCallStaticJavaDirectNodeHtwo_adr6kM_I_; +text: .text%__1cKDictionaryJget_entry6MiInMsymbolHandle_nGHandle__pnPDictionaryEntry__; +text: .text%__1cLLShiftINodeGOpcode6kM_i_; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_; +text: .text%__1cHnmethodMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cIMachOperOindex_position6kM_i_; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_irknQRelocationHolder_i_v_; +text: .text%__1cLjmpConUNodePoper_input_base6kM_I_; +text: .text%__1cHAddNodeEhash6kM_I_; +text: .text%__1cNtestP_regNodePoper_input_base6kM_I_; +text: .text%__1cHSubNodeGis_Sub6M_p0_; +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_; +text: .text%__1cIRootNodeGOpcode6kM_i_; +text: .text%__1cFframebDinterpreter_frame_monitor_end6kM_pnPBasicObjectLock__; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cSInterpreterRuntimeLcache_entry6FpnKJavaThread__pnWConstantPoolCacheEntry__; +text: .text%__1cOindOffset8OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cENode2t6Mp0111_v_; +text: .text%__1cITypeLongEmake6Fx_pk0_; +text: .text%__1cLjmpConUNodeGpinned6kM_i_; +text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__; +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKTypeRawPtrJsingleton6kM_i_; +text: .text%__1cNGCTaskManagerNresource_flag6MI_i_; +text: .text%__1cNGCTaskManagerYshould_release_resources6MI_i_; +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__; +text: .text%__1cNloadRangeNodeErule6kM_I_; +text: .text%__1cNrFlagsRegOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIIndexSetJlrg_union6MIIIpknIPhaseIFG_rknHRegMask__I_; +text: .text%__1cISubINodeGOpcode6kM_i_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cKstoreINodePoper_input_base6kM_I_; +text: .text%__1cNtestP_regNodeHtwo_adr6kM_I_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_; +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__; +text: .text%__1cJloadSNodeErule6kM_I_; +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__; +text: .text%__1cMBasicAdapterHoops_do6MpnKOopClosure__v_; +text: .text%__1cKjmpConNodeJnum_opnds6kM_I_; +text: .text%__1cLOptoRuntimeOnew_objArray_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cKjmpDirNodeHsize_of6kM_I_; +text: .text%__1cJloadPNodeJnum_opnds6kM_I_; +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cHOopFlowNcompute_reach6MpnNPhaseRegAlloc_ipnEDict__v_; +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGGCTaskKinitialize6M_v_; +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__; +text: .text%__1cNGCTaskManagerWdecrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__; +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_; +text: .text%__1cNGCTaskManagerWincrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueHdequeue6M_pnGGCTask__; +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_; +text: .text%__1cHMatcherXadjust_outgoing_stk_arg6Miiri_i_; +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_; +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cMBasicAdapterMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cXindIndexScaleOffsetOperJnum_edges6kM_I_; +text: .text%__1cLRShiftINodeGOpcode6kM_i_; +text: .text%__1cJStoreNodeIis_Store6kM_pk0_; +text: .text%Unsafe_CompareAndSwapLong; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cNtestI_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cJStoreNodeEhash6kM_I_; +text: .text%__1cOcompI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cKmethodOperGmethod6kM_l_; +text: .text%__1cHTypeAryRary_must_be_exact6kM_i_; +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_; +text: .text%__1cGGCTask2t6M_v_; +text: .text%__1cOcompU_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_; +text: .text%__1cOindOffset8OperNbase_position6kM_i_; +text: .text%__1cOindOffset8OperNconstant_disp6kM_i_; +text: .text%__1cKjmpDirNodeHis_Goto6kM_I_; +text: .text%__1cENodeHset_req6MIp0_v_; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_; +text: .text%__1cMMergeMemNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cXAdaptiveWeightedAverageYcompute_adaptive_average6Mff_f_; +text: .text%__1cOcompU_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvI2L_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_nMvmIntrinsicsCID__; +text: .text%__1cKstorePNodeJnum_opnds6kM_I_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cMMergeMemNodeQclone_all_memory6FpnENode__p0_; +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__; +text: .text%__1cLMachNopNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJchar2type6Fc_nJBasicType__; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cICallNodeLis_CallLeaf6kM_pknMCallLeafNode__; +text: .text%__1cLrecord_bias6FpknIPhaseIFG_ii_v_: coalesce.o; +text: .text%__1cJrRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_; +text: .text%__1cRMachSafePointNodeRis_safepoint_node6kM_i_; +text: .text%__1cHTypeIntFempty6kM_i_; +text: .text%__1cRInvocationCounterJset_state6Mn0AFState__v_; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cKTypeOopPtrWmake_from_klass_common6FpnHciKlass_ii_pk0_; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_; +text: .text%__1cLPhaseValuesFwiden6kMpknEType_3_3_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cNtestI_regNodeHtwo_adr6kM_I_; +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYCallStaticJavaDirectNodeSalignment_required6kM_i_; +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOPhaseIdealLoopIsplit_up6MpnENode_22_i_; +text: .text%__1cWConstantPoolCacheEntryRset_initial_state6Mi_v_; +text: .text%__1cKNativeCallGverify6M_v_; +text: .text%__1cNCompileBrokerLmaybe_block6F_v_; +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_; +text: .text%__1cJloadINodeMideal_Opcode6kM_i_; +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_; +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_; +text: .text%__1cITypeLongEmake6Fxx_pk0_; +text: .text%__1cKRegionNodeOis_block_start6kM_i_; +text: .text%__1cCosGmalloc6FL_pv_; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_; +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_; +text: .text%__1cRMachNullCheckNodeQis_MachNullCheck6M_p0_; +text: .text%__1cOGenerateOopMapGppush16MnNCellTypeState__v_; +text: .text%__1cGBitMapOset_difference6M0_v_; +text: .text%__1cMvalue_of_loc6FppnHoopDesc__l_; +text: .text%__1cRmethodDataOopDescTbytecode_cell_count6FnJBytecodesECode__i_; +text: .text%__1cRmethodDataOopDescRcompute_data_size6FpnOBytecodeStream__i_; +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_; +text: .text%__1cOcompI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cNGrowableArray4CpnKciTypeFlowFBlock__2t6MpnFArena_iirk2_v_; +text: .text%__1cLPhaseValuesHlongcon6Mx_pnIConLNode__; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cOcompI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_; +text: .text%__1cMPhaseChaitinJsplit_USE6MpnENode_pnFBlock_2IIiinNGrowableArray4CI__i_I_; +text: .text%__1cNCatchProjNodeMis_CatchProj6kM_pk0_; +text: .text%__1cOcompU_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNSafePointNodeGOpcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cMTypeKlassPtrEhash6kM_i_; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYlookup_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cIciMethodbCinterpreter_invocation_count6M_i_; +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_; +text: .text%JVM_CurrentThread; +text: .text%__1cPClassFileParserbLparse_constant_pool_nameandtype_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKstoreINodeJnum_opnds6kM_I_; +text: .text%__1cHTypeAryEhash6kM_i_; +text: .text%JVM_GetClassModifiers; +text: .text%JVM_GetClassAccessFlags; +text: .text%__1cRAbstractAssemblerGa_byte6Mi_v_; +text: .text%__1cJAssemblerGprefix6Mn0AGPrefix__v_; +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_; +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cENodeDcmp6kMrk0_I_; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMrax_RegPOperEtype6kM_pknEType__; +text: .text%__1cKTypeRawPtrEhash6kM_i_; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_; +text: .text%__1cXindIndexScaleOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cENode2t6Mp01_v_; +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJStartNodeIis_Start6M_p0_; +text: .text%__1cMURShiftINodeGOpcode6kM_i_; +text: .text%__1cNGrowableArray4CpnMMonitorValue__2t6Mii_v_; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderYserialize_monitor_values6MpnNGrowableArray4CpnMMonitorValue____i_; +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cOMethodLivenessKBasicBlockWcompute_gen_kill_range6MpnQciBytecodeStream__v_; +text: .text%__1cJAssemblerJemit_data6MirknQRelocationHolder_i_v_; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cOcompU_rRegNodeErule6kM_I_; +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cETypeOget_const_type6FpnGciType__pk0_; +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cLCounterDataOis_CounterData6M_i_; +text: .text%__1cRCompilationPolicyNcanBeCompiled6FnMmethodHandle__i_; +text: .text%__1cOcompU_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_; +text: .text%__1cOrFlagsRegUOperJnum_edges6kM_I_; +text: .text%__1cKCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cMtlsLoadPNodeErule6kM_I_; +text: .text%__1cIGraphKitGmemory6MI_pnENode__; +text: .text%__1cNRelocIteratorJset_limit6MpC_v_; +text: .text%__1cPcheckCastPPNodeErule6kM_I_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQciBytecodeStreamPget_field_index6M_i_; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cNchunk_oops_do6FpnKOopClosure_pnFChunk_pc_L_: handles.o; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cMUniverseOperFclone6kM_pnIMachOper__; +text: .text%__1cJlabelOperFclone6kM_pnIMachOper__; +text: .text%__1cRaddI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cLPhaseValuesHmakecon6MpknEType__pnHConNode__; +text: .text%__1cENodeHins_req6MIp0_v_; +text: .text%__1cIGraphKitLclean_stack6Mi_v_; +text: .text%__1cKRegionNodeHhas_phi6kM_pnHPhiNode__; +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_; +text: .text%__1cOMachReturnNodeNis_MachReturn6M_p0_; +text: .text%__1cNaddI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cQPreserveJVMState2t6MpnIGraphKit_i_v_; +text: .text%__1cNtestP_regNodeMcisc_operand6kM_i_; +text: .text%__1cKjmpConNodeHtwo_adr6kM_I_; +text: .text%__1cPClassFileParserbJparse_constant_pool_methodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_; +text: .text%__1cMPhaseChaitinVmay_be_copy_of_callee6kMpnENode__i_; +text: .text%__1cNtestP_regNodeErule6kM_I_; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_; +text: .text%__1cOcompU_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_; +text: .text%__1cKCodeBufferOadd_stub_reloc6MpCrknQRelocationHolder_i_v_; +text: .text%__1cKCodeBufferOalloc_relocate6M_pnORelocateBuffer__; +text: .text%__1cNtestI_regNodeErule6kM_I_; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cFParseKensure_phi6Mii_pnHPhiNode__; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cOGenerateOopMapSget_basic_block_at6kMi_pnKBasicBlock__; +text: .text%__1cHAddress2t6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_; +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cOcompI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cPciObjectFactoryNinit_ident_of6MpnIciObject__v_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cITypeFuncEhash6kM_i_; +text: .text%__1cOcompI_rRegNodeErule6kM_I_; +text: .text%__1cIciMethodPliveness_at_bci6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessPget_liveness_at6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessKBasicBlockPget_liveness_at6MpnIciMethod_i_nGBitMap__; +text: .text%__1cITypeLongFempty6kM_i_; +text: .text%__1cMTypeKlassPtrCeq6kMpknEType__i_; +text: .text%__1cJAssemblerJemit_data6MinJrelocInfoJrelocType_i_v_; +text: .text%__1cJLoadSNodeGOpcode6kM_i_; +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitObasic_plus_adr6MpnENode_2l_2_; +text: .text%__1cJVectorSet2L6MI_rnDSet__; +text: .text%__1cJVectorSetEgrow6MI_v_; +text: .text%__1cLCastP2LNodeGOpcode6kM_i_; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cJAssemblerEcall6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadLNodeGOpcode6kM_i_; +text: .text%__1cMLinkResolverNresolve_klass6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cIAndINodeGOpcode6kM_i_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_; +text: .text%__1cUParallelScavengeHeapNtlab_capacity6kM_L_; +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIPhaseCCPFwiden6kMpknEType_3_3_; +text: .text%__1cHTypePtrJsingleton6kM_i_; +text: .text%__1cNArgumentCountDset6MinJBasicType__v_; +text: .text%__1cWShouldNotReachHereNodeGpinned6kM_i_; +text: .text%__1cNsubI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNtestI_regNodePoper_input_base6kM_I_; +text: .text%__1cLBoxLockNodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeHtwo_adr6kM_I_; +text: .text%__1cQMachCallJavaNodePis_MachCallJava6M_p0_; +text: .text%__1cKStoreBNodeGOpcode6kM_i_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_22nHAddressLScaleFactor_ipCrknQRelocationHolder__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cITypeFuncCeq6kMpknEType__i_; +text: .text%__1cMloadConLNodePoper_input_base6kM_I_; +text: .text%__1cMPhaseIterGVNHmakecon6MpknEType__pnHConNode__; +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHoopDescGverify6M_v_; +text: .text%__1cQconstMethodKlassSoop_is_constMethod6kM_i_; +text: .text%__1cRconstantPoolKlassToop_is_constantPool6kM_i_; +text: .text%__1cOcompP_rRegNodePoper_input_base6kM_I_; +text: .text%__1cHTypePtrHget_con6kM_l_; +text: .text%__1cHMatcherWis_short_branch_offset6Mi_i_; +text: .text%__1cMloadConLNodeHtwo_adr6kM_I_; +text: .text%__1cMTypeKlassPtr2t6MnHTypePtrDPTR_pnHciKlass_i_v_; +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_; +text: .text%__1cYCallStaticJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cJVectorSet2t6MpnFArena__v_; +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_; +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_; +text: .text%__1cNloadConP0NodePoper_input_base6kM_I_; +text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_; +text: .text%__1cRNativeInstructionFwrote6Mi_v_; +text: .text%__1cMURShiftLNodeGOpcode6kM_i_; +text: .text%__1cOrFlagsRegUOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIciMethodLscale_count6Mi_i_; +text: .text%__1cLjmpConUNodeJnum_opnds6kM_I_; +text: .text%__1cQciBytecodeStreamQget_method_index6M_i_; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__; +text: .text%__1cGOopMapJheap_size6kM_i_; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cMloadConLNodeErule6kM_I_; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cNSafePointNode2t6MIpnIJVMState__v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__; +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_; +text: .text%__1cWConstantPoolCacheEntryIas_flags6MnITosState_iiiii_i_; +text: .text%__1cNloadConP0NodeHtwo_adr6kM_I_; +text: .text%__1cWThreadLocalAllocBufferVinitialize_statistics6M_v_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cWThreadLocalAllocBufferVaccumulate_statistics6MLi_v_; +text: .text%__1cWThreadLocalAllocBufferImax_size6F_L_; +text: .text%__1cWThreadLocalAllocBufferGresize6M_v_; +text: .text%__1cJloadINodePoper_input_base6kM_I_; +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__; +text: .text%__1cWConstantPoolCacheEntryGverify6kMpnMoutputStream__v_; +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_; +text: .text%__1cFParsePload_state_from6Mpn0AFBlock__v_; +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_; +text: .text%__1cNloadRangeNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIGraphKitQkill_dead_locals6M_v_; +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__; +text: .text%__1cKimmL32OperJconstantL6kM_x_; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_; +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_; +text: .text%__1cRaddP_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cICHeapObj2n6FL_pv_; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cKInlineTreeJcallee_at6kMipnIciMethod__p0_; +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_; +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_; +text: .text%__1cIHaltNodeGpinned6kM_i_; +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cOCallRelocationFvalue6M_pC_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCl_v_; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cOCallRelocationPset_destination6MpCl_v_; +text: .text%__1cHcommute6FpnENode_ii_i_: addnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__; +text: .text%__1cENodeQlatency_from_use6kMrnLBlock_Array_rnNGrowableArray4CI__pk0p0_i_; +text: .text%__1cHAddNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o; +text: .text%__1cNloadConP0NodeErule6kM_I_; +text: .text%__1cIGraphKitMreset_memory6M_pnENode__; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadConI0NodeMideal_Opcode6kM_i_; +text: .text%__1cJloadPNodeHtwo_adr6kM_I_; +text: .text%__1cNSignatureInfoGdo_int6M_v_; +text: .text%__1cKjmpDirNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cPThreadRootsTaskEname6M_pc_; +text: .text%__1cUThreadSafepointStateMroll_forward6Mn0AMsuspend_type__v_; +text: .text%__1cUThreadSafepointStateHrestart6M_v_; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cJrRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIAddLNodeGOpcode6kM_i_; +text: .text%__1cJLoadPNodeJideal_reg6kM_I_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cOkill_dead_code6FpnENode_pnMPhaseIterGVN__i_: node.o; +text: .text%__1cKjmpDirNodeFclone6kM_pnENode__; +text: .text%__1cOcompI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__2t6MpnFArena_iirk2_v_; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNtestP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNincI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2ipnGThread__v_; +text: .text%__1cHOrINodeGOpcode6kM_i_; +text: .text%__1cOcompI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMPhaseChaitinJsplit_DEF6MpnENode_pnFBlock_iIp25nNGrowableArray4CI__i_I_; +text: .text%__1cENodeHis_Type6M_pnITypeNode__; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cRaddP_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_; +text: .text%__1cENode2n6FLi_pv_; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cIAddINodeGadd_id6kM_pknEType__; +text: .text%__1cKStoreCNodeGOpcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cILoadNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cJStoreNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNFingerprinterLfingerprint6M_L_; +text: .text%__1cLConvI2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cIGraphKitRnull_check_common6MpnENode_nJBasicType_i_2_; +text: .text%__1cKBlock_ListGremove6MI_v_; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cKciTypeFlowGJsrSetJcopy_into6Mp1_v_; +text: .text%__1cICmpLNodeGOpcode6kM_i_; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cLcastP2LNodePoper_input_base6kM_I_; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cTciConstantPoolCacheEfind6Mi_i_; +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_L_; +text: .text%__1cNGrowableArray4CpnIciObject__Praw_at_put_grow6Mirk14_v_; +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__; +text: .text%__1cHOopFlowEmake6FpnFArena_i_p0_; +text: .text%__1cJloadLNodeErule6kM_I_; +text: .text%__1cNloadConI0NodeLbottom_type6kM_pknEType__; +text: .text%__1cJimmI0OperIconstant6kM_l_; +text: .text%__1cScompI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__; +text: .text%__1cNaddI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_; +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSignatureInfoHdo_void6M_v_; +text: .text%__1cLAdapterInfoKhash_value6kM_l_; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%__1cHOopFlowFclone6Mp0i_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6ML_v_; +text: .text%__1cILoopNodeHis_Loop6M_p0_; +text: .text%__1cPindOffset32OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cPindOffset32OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMCallLeafNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cSComputeAdapterInfoHcompute6Mplii_v_; +text: .text%__1cLAdapterInfoHcompute6MnMmethodHandle_i_v_; +text: .text%__1cLAdapterInfo2T6M_v_; +text: .text%__1cSComputeAdapterInfoLreturn_type6MnJBasicType__i_; +text: .text%__1cSComputeAdapterInfoMsize_in_bits6FnMmethodHandle__i_; +text: .text%__1cMAdapterCacheGlookup6MpnLAdapterInfo__pnMBasicAdapter__; +text: .text%__1cJloadINodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o; +text: .text%__1cTDerivedPointerTableDadd6FppnHoopDesc_3_v_; +text: .text%__1cFParseFBlockJinit_node6Mp0i_v_; +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_; +text: .text%__1cOcompP_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cKjmpDirNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMLinkResolverbFlinktime_resolve_virtual_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_; +text: .text%__1cKCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cKciTypeFlowNmake_range_at6Mi_pn0AFRange__; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cKoopFactoryPnew_constMethod6FiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cFBlockUhoist_LCA_above_defs6Mp01IrnLBlock_Array__1_; +text: .text%__1cScompI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cFciEnvXget_field_by_index_impl6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cQciBytecodeStreamJget_field6Mri_pnHciField__; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cKTypeOopPtrFempty6kM_i_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_16MnJBytecodesECode__v_; +text: .text%__1cKCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMMergeMemNodeNgrow_to_match6Mpk0_v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_; +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_; +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_; +text: .text%__1cLStringTableGlookup6MipHiI_pnHoopDesc__; +text: .text%__1cLBoxLockNodeHsize_of6kM_I_; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cMMergeMemNode2t6MpnENode__v_; +text: .text%__1cMMergeMemNodeRmake_empty_memory6F_pnENode__; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cNtestP_regNodeJnum_opnds6kM_I_; +text: .text%__1cJStartNodeGis_CFG6kM_i_; +text: .text%__1cRaddI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_; +text: .text%__1cMindIndexOperJnum_edges6kM_I_; +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__; +text: .text%__1cKGCStatInfoMset_gc_usage6MinLMemoryUsage_i_v_; +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__; +text: .text%__1cQPreserveJVMState2T6M_v_; +text: .text%__1cLRuntimeStubIis_alive6kM_i_; +text: .text%__1cMWarmCallInfoHis_cold6kM_i_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%__1cKjmpConNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIemit_d646FrnKCodeBuffer_l_v_; +text: .text%__1cFParseFmerge6Mi_v_; +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_; +text: .text%__1cIAddINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFParseMdo_one_block6M_v_; +text: .text%__1cFParseFBlockMrecord_state6Mp0_v_; +text: .text%__1cNtestP_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cJloadLNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIPhaseIFGFUnion6MII_v_; +text: .text%__1cNloadRangeNodeJnum_opnds6kM_I_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_26MnJBytecodesECode__v_; +text: .text%__1cOcompP_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cIBoolNodeJideal_reg6kM_I_; +text: .text%__1cHCmpNodeJideal_reg6kM_I_; +text: .text%__1cFStateM_sub_Op_Bool6MpknENode__v_; +text: .text%__1cJCatchNodeIis_Catch6kM_pk0_; +text: .text%__1cJLoadBNodeGOpcode6kM_i_; +text: .text%__1cENodeHlatency6MI_I_; +text: .text%__1cIGraphKit2t6MpnIJVMState__v_; +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cKTypeOopPtrHget_con6kM_l_; +text: .text%__1cMPhaseChaitinKprompt_use6MpnFBlock_I_i_; +text: .text%__1cIJVMStateLdebug_depth6kM_I_; +text: .text%__1cIGraphKitTadd_safepoint_edges6MpnNSafePointNode_i_v_; +text: .text%__1cENodeNadd_req_batch6Mp0I_v_; +text: .text%__1cIJVMStateKclone_deep6kM_p0_; +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_; +text: .text%__1cXindIndexScaleOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cXindIndexScaleOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cGBitMapVset_union_with_result6M0_i_; +text: .text%__1cNSafePointNodeEhash6kM_I_; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cJStartNodeGOpcode6kM_i_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_; +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_; +text: .text%__1cTC2IAdapterGeneratorXlazy_std_verified_entry6FnMmethodHandle__pC_; +text: .text%__1cPindOffset32OperJnum_edges6kM_I_; +text: .text%__1cPFieldAccessInfoDset6MnLKlassHandle_nMsymbolHandle_iinJBasicType_nLAccessFlags__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cNsubI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cTCreateExceptionNodeHtwo_adr6kM_I_; +text: .text%__1cPindOffset32OperFscale6kM_i_; +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cICmpPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cHTypePtrLdual_offset6kM_i_; +text: .text%__1cNMachIdealNodePoper_input_base6kM_I_; +text: .text%__1cSObjectSynchronizerOinflate_helper6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cKciTypeFlowIblock_at6Mipn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cKciTypeFlowFRangeNget_block_for6Mpn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cSvframeStreamCommonbBfill_from_interpreter_frame6M_v_; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%__1cLcastP2LNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadKlassNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cXindIndexScaleOffsetOperFscale6kM_i_; +text: .text%__1cQciBytecodeStreamKget_method6Mri_pnIciMethod__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cNstoreImmBNodePoper_input_base6kM_I_; +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_; +text: .text%__1cMindirectOperJnum_edges6kM_I_; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cTconvI2L_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cRshrL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__; +text: .text%__1cPClassFileParserbFparse_constant_pool_class_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cOMethodLivenessKBasicBlockMmerge_normal6MnGBitMap__i_; +text: .text%__1cTleaPIdxScaleOffNodeHtwo_adr6kM_I_; +text: .text%__1cETypeFwiden6kMpk0_2_; +text: .text%__1cKciTypeFlowLStateVector2t6Mp0_v_; +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cOcompU_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCatchProjNodeHsize_of6kM_I_; +text: .text%__1cNCatchProjNodeEhash6kM_I_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cKciTypeFlowFBlockPis_simpler_than6Mp1_i_; +text: .text%__1cJimmI8OperIconstant6kM_l_; +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__; +text: .text%__1cILoadNodeHsize_of6kM_I_; +text: .text%__1cHMatcherVReduceInst_Chain_Rule6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__; +text: .text%__1cNincI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cLjmpConUNodeHtwo_adr6kM_I_; +text: .text%__1cHMatcherScalling_convention6FpnLOptoRegPair_Ii_v_; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cENodeLnonnull_req6kM_p0_; +text: .text%__1cICmpINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHTypeAryCeq6kMpknEType__i_; +text: .text%__1cQSystemDictionaryKfind_class6FiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cQUnique_Node_ListEpush6MpnENode__v_; +text: .text%__1cILoopNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_; +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__; +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_; +text: .text%__1cQPSGenerationPoolImax_size6kM_L_; +text: .text%__1cQPSGenerationPoolNused_in_bytes6M_L_; +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cOMethodLivenessNwork_list_get6M_pn0AKBasicBlock__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cICallNodeOis_CallRuntime6kM_pknPCallRuntimeNode__; +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_; +text: .text%__1cNstoreImmBNodeMideal_Opcode6kM_i_; +text: .text%__1cKciTypeFlowLStateVectorEmeet6Mpk1_i_; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_; +text: .text%__1cJloadINodeJnum_opnds6kM_I_; +text: .text%__1cNaddI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cRMachSafePointNode2t6M_v_; +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__; +text: .text%__1cOcompP_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMPhaseChaitinKFind_const6kMpknENode__I_; +text: .text%__1cMPhaseChaitinKFind_const6kMI_I_; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cRInterpretedRFrameEinit6M_v_; +text: .text%__1cHemit_cc6FrnKCodeBuffer_ii_v_; +text: .text%__1cNtestI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cXvirtual_call_RelocationJfirst_oop6M_pC_; +text: .text%__1cXvirtual_call_RelocationJoop_limit6M_pC_; +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_; +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmPOperIconstant6kM_l_; +text: .text%__1cIimmPOperPconstant_is_oop6kM_i_; +text: .text%__1cOleaPIdxOffNodeHtwo_adr6kM_I_; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cTleaPIdxScaleOffNodeErule6kM_I_; +text: .text%JVM_IsNaN; +text: .text%__1cXinsert_anti_dependences6FrpnFBlock_pnENode_rnLBlock_Array__i_: gcm.o; +text: .text%__1cLOptoRuntimebCcomplete_monitor_unlocking_C6FpnHoopDesc_pnJBasicLock__v_; +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimebAcomplete_monitor_locking_C6FpnHoopDesc_pnJBasicLock_pnKJavaThread__v_; +text: .text%__1cHCompileKTracePhase2t6MpkcpnMelapsedTimer_i_v_; +text: .text%__1cHCompileKTracePhase2T6M_v_; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cHMatcherPc_frame_pointer6kM_i_; +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_; +text: .text%__1cMMachCallNode2t6M_v_; +text: .text%__1cICallNodeJideal_reg6kM_I_; +text: .text%__1cOleaPIdxOffNodeErule6kM_I_; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cOcompP_rRegNodeErule6kM_I_; +text: .text%__1cMany_RegPOperJnum_edges6kM_I_; +text: .text%__1cIGraphKitbLset_predefined_input_for_runtime_call6MpnNSafePointNode__v_; +text: .text%__1cMany_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cScompI_rReg_immNodeErule6kM_I_; +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_; +text: .text%__1cLRegisterMap2t6Mpk0_v_; +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_; +text: .text%__1cNmethodOopDescWwas_executed_more_than6kMi_i_; +text: .text%__1cKstoreCNodePoper_input_base6kM_I_; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cHi2sNodeErule6kM_I_; +text: .text%__1cIMulLNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopHdom_lca6kMpnENode_2_2_; +text: .text%__1cMPrefetchNodeGOpcode6kM_i_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_; +text: .text%__1cNtestI_regNodeJnum_opnds6kM_I_; +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIMachOperNconstant_disp6kM_i_; +text: .text%__1cIMachOperFscale6kM_i_; +text: .text%__1cFframeNis_java_frame6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNloadKlassNodeErule6kM_I_; +text: .text%__1cIciMethodRhas_compiled_code6M_i_; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MipnGOopMap__v_; +text: .text%__1cNincI_rRegNodeErule6kM_I_; +text: .text%__1cRMachSafePointNodePis_MachCallLeaf6M_pnQMachCallLeafNode__; +text: .text%__1cRMachSafePointNodeLset_oop_map6MpnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MipnGOopMap__v_; +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MipnGOopMap__v_; +text: .text%__1cHOopFlowNbuild_oop_map6MpnENode_ipnNPhaseRegAlloc_pi_pnGOopMap__; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNsubI_rRegNodeMcisc_operand6kM_i_; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%__1cRaddI_rReg_immNodeErule6kM_I_; +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cHRetNodeMideal_Opcode6kM_i_; +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_; +text: .text%__1cNsubI_rRegNodeErule6kM_I_; +text: .text%__1cRaddP_rReg_immNodeErule6kM_I_; +text: .text%__1cPClassFileParserbGparse_constant_pool_string_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cJloadLNodePoper_input_base6kM_I_; +text: .text%__1cRshrL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cITypeLongFxdual6kM_pknEType__; +text: .text%__1cRMachSafePointNodeSis_MachCallRuntime6M_pnTMachCallRuntimeNode__; +text: .text%__1cNaddI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_; +text: .text%__1cOcompP_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cIAndLNodeGOpcode6kM_i_; +text: .text%__1cMindIndexOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cOGenerateOopMapCpp6MpnNCellTypeState_2_v_; +text: .text%__1cMCallJavaNodeLis_CallJava6kM_pk0_; +text: .text%__1cICallNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_; +text: .text%__1cRcmpFastUnlockNodePoper_input_base6kM_I_; +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcompP_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cFStateW_sub_Op_CallStaticJava6MpknENode__v_; +text: .text%__1cWMachCallStaticJavaNodeVis_MachCallStaticJava6M_p0_; +text: .text%__1cRaddP_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cICallInfoDset6MnLKlassHandle_nMmethodHandle_pnGThread__v_; +text: .text%__1cSComputeAdapterInfoJdo_object6Mii_v_; +text: .text%__1cRMachSafePointNodeWis_MachCallInterpreter6M_pnXMachCallInterpreterNode__; +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__; +text: .text%__1cLConvL2INodeGOpcode6kM_i_; +text: .text%__1cOcompI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNaddI_rRegNodeErule6kM_I_; +text: .text%__1cHConNodeEmake6FpknEType__p0_; +text: .text%__1cScompI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cJLoadCNodeGOpcode6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeFreloc6kM_i_; +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cIGraphKitQset_saved_ex_oop6FpnNSafePointNode_pnENode__v_; +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__; +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cGOopMapHcopy_to6MpC_v_; +text: .text%__1cNstoreImmBNodeJnum_opnds6kM_I_; +text: .text%__1cVLoaderConstraintTableWfind_loader_constraint6MnMsymbolHandle_nGHandle__ppnVLoaderConstraintEntry__; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Ml_v_; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cNgetTimeMillis6F_x_; +text: .text%__1cRaddP_rReg_immNodeLbottom_type6kM_pknEType__; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cLPhaseValuesHzerocon6MnJBasicType__pnHConNode__; +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_; +text: .text%__1cTconvI2L_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cIGraphKitNuncommon_trap6MipnHciKlass_pkci_v_; +text: .text%__1cILoadNodeEmake6FpnENode_22pknHTypePtr_pknEType_nJBasicType__p0_; +text: .text%__1cIGraphKitJmake_load6MpnENode_2pknEType_nJBasicType_i_2_; +text: .text%__1cTconvI2L_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cTno_rax_rbx_RegPOperJnum_edges6kM_I_; +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__; +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__; +text: .text%__1cIHaltNode2t6MpnENode_2_v_; +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNaddI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cGPcDesc2t6Mii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cKciTypeFlowLStateVectorJcopy_into6kMp1_v_; +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_; +text: .text%__1cOcompL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cMoutputStreamPupdate_position6MpkcL_v_; +text: .text%__1cMstringStreamFwrite6MpkcL_v_; +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_; +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_; +text: .text%__1cKciTypeFlowFBlockKsuccessors6MpnQciBytecodeStream_pn0ALStateVector_pn0AGJsrSet__pnNGrowableArray4Cp1___; +text: .text%__1cKciTypeFlowOwork_list_next6M_pn0AFBlock__; +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_; +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cSCompareAndSwapNodeGis_CFG6kM_i_; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__Icontains6kMrk2_i_; +text: .text%__1cKciTypeFlowFBlock2t6Mp0pn0AFRange_pn0AGJsrSet__v_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowJJsrRecord__2t6MpnFArena_iirk2_v_; +text: .text%__1cKstoreCNodeJnum_opnds6kM_I_; +text: .text%__1cNmodI_rRegNodeErule6kM_I_; +text: .text%__1cKInlineTreeWfind_subtree_from_root6Fp0pnIJVMState_pnIciMethod_i_1_; +text: .text%__1cNGrowableArray4CpnPciInstanceKlass__2t6MpnFArena_iirk1_v_; +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_; +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_; +text: .text%__1cINodeHashJhash_find6MpknENode__p1_; +text: .text%__1cFParsePdo_field_access6Mii_v_; +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__; +text: .text%__1cOMethodLivenessNmake_block_at6Mipn0AKBasicBlock__2_; +text: .text%__1cKstorePNodeHtwo_adr6kM_I_; +text: .text%__1cKciTypeFlowPflow_successors6MpnNGrowableArray4Cpn0AFBlock___pn0ALStateVector__v_; +text: .text%__1cGciTypeMis_classless6kM_i_; +text: .text%__1cRsalI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cJloadFNodeErule6kM_I_; +text: .text%__1cKBranchDataNis_BranchData6M_i_; +text: .text%__1cIJumpDataLis_JumpData6M_i_; +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_; +text: .text%__1cLklassVtableNput_method_at6MpnNmethodOopDesc_i_v_; +text: .text%__1cHi2sNodeMideal_Opcode6kM_i_; +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_; +text: .text%__1cRshrI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadConI0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadKlassNodeJnum_opnds6kM_I_; +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__; +text: .text%__1cLStringTableGintern6FnGHandle_pHipnGThread__pnHoopDesc__; +text: .text%__1cLStringTableLhash_string6FpHi_i_; +text: .text%__1cMCreateExNodeGpinned6kM_i_; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRScavengeRootsTaskEname6M_pc_; +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNtestP_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKstoreINodeHtwo_adr6kM_I_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntEmake6Fii_pk0_; +text: .text%__1cRcmpFastUnlockNodeHtwo_adr6kM_I_; +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_; +text: .text%__1cPDictionaryEntrybAcontains_protection_domain6kMpnHoopDesc__i_; +text: .text%__1cIregFOperEtype6kM_pknEType__; +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cNsubI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__; +text: .text%__1cNsubI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_; +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cLcastP2LNodeJnum_opnds6kM_I_; +text: .text%__1cOMethodLivenessNwork_list_add6Mpn0AKBasicBlock__v_; +text: .text%__1cFParseFBlockNlocal_type_at6kMi_pknEType__; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cHTypeIntFxdual6kM_pknEType__; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHCompileZintrinsic_insertion_index6MpnIciMethod_i_i_; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_; +text: .text%__1cJVectorSetFClear6M_v_; +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cICodeHeapSallocated_capacity6kM_L_; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cIMachOperEtype6kM_pknEType__; +text: .text%__1cLjmpConUNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cLStatSamplerLsample_data6FpnMPerfDataList__v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_; +text: .text%__1cMPeriodicTaskMtime_to_wait6F_L_; +text: .text%__1cMPeriodicTaskOreal_time_tick6FL_v_; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_; +text: .text%__1cLStatSamplerOcollect_sample6F_v_; +text: .text%__1cJloadBNodePoper_input_base6kM_I_; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__; +text: .text%__1cIGraphKit2t6M_v_; +text: .text%__1cOemit_d64_reloc6FrnKCodeBuffer_lnJrelocInfoJrelocType_i_v_; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cENodeHget_ptr6kM_l_; +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_; +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKI2CAdapterOis_i2c_adapter6kM_i_; +text: .text%__1cOcompU_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQciBytecodeStreamWget_field_holder_index6M_i_; +text: .text%__1cQciBytecodeStreamZget_declared_field_holder6M_pnPciInstanceKlass__; +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cMorI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cFParseRensure_memory_phi6Mii_pnHPhiNode__; +text: .text%__1cNdecI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadINodeJideal_reg6kM_I_; +text: .text%__1cKRelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cPindOffset32OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_; +text: .text%__1cMFastLockNodeGOpcode6kM_i_; +text: .text%__1cScompU_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cHCompilebAallow_range_check_smearing6kM_i_; +text: .text%__1cLBuildCutout2T6M_v_; +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cICodeHeapIcapacity6kM_L_; +text: .text%__1cKMemoryPoolImax_size6kM_L_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_L_; +text: .text%__1cMPhaseChaitinTsplit_Rematerialize6MpnENode_pnFBlock_IrInNGrowableArray4CI__ipIp2i_2_; +text: .text%__1cJcmpOpOperFccode6kM_i_; +text: .text%__1cKjmpDirNodeTmay_be_short_branch6kM_i_; +text: .text%__1cKjmpDirNodeOis_pc_relative6kM_i_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cOcompL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_; +text: .text%__1cFParseKdo_get_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_; +text: .text%__1cHMulNodeEhash6kM_I_; +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_p0_v_; +text: .text%__1cTconvI2L_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cScompU_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNFingerprinterJdo_object6Mii_v_; +text: .text%__1cMloadConFNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cMloadConFNodeHtwo_adr6kM_I_; +text: .text%__1cICallNodeSis_CallDynamicJava6kM_pknTCallDynamicJavaNode__; +text: .text%__1cRcmpFastUnlockNodeJnum_opnds6kM_I_; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cUParallelScavengeHeapMmem_allocate6MLii_pnIHeapWord__; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cKjmpConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_; +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_; +text: .text%__1cMloadConFNodeErule6kM_I_; +text: .text%__1cLcastP2LNodeHtwo_adr6kM_I_; +text: .text%__1cIMachOperIconstant6kM_l_; +text: .text%__1cJloadSNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolImax_size6kM_L_; +text: .text%__1cUEdenMutableSpacePoolNused_in_bytes6M_L_; +text: .text%__1cUEdenMutableSpacePoolImax_size6kM_L_; +text: .text%__1cYSurvivorMutableSpacePoolNused_in_bytes6M_L_; +text: .text%__1cKjmpConNodeTmay_be_short_branch6kM_i_; +text: .text%__1cKjmpConNodeOis_pc_relative6kM_i_; +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferKend_a_stub6M_v_; +text: .text%__1cTemit_java_to_interp6FrnKCodeBuffer__v_; +text: .text%__1cKCodeBufferMstart_a_stub6M_v_; +text: .text%__1cFParseUprofile_taken_branch6Mi_v_; +text: .text%__1cKciTypeFlowGJsrSetNapply_control6Mp0pnQciBytecodeStream_pn0ALStateVector__v_; +text: .text%__1cKReturnNodeGis_CFG6kM_i_; +text: .text%__1cRSignatureIteratorSskip_optional_size6M_v_; +text: .text%__1cRaddI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHTypeIntFwiden6kMpknEType__3_; +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescbHhas_unloaded_classes_in_signature6FnMmethodHandle_pnGThread__i_; +text: .text%__1cIciObjectRis_instance_klass6M_i_; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cPloadConUL32NodeMideal_Opcode6kM_i_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cNloadRangeNodeHtwo_adr6kM_I_; +text: .text%__1cJloadLNodeJnum_opnds6kM_I_; +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__l_; +text: .text%__1cHoopDescSslow_identity_hash6M_l_; +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__; +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadCNodeErule6kM_I_; +text: .text%__1cKOSRAdapterIis_alive6kM_i_; +text: .text%__1cQjava_lang_StringMbasic_create6FpnQtypeArrayOopDesc_ipnGThread__nGHandle__; +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cXindIndexScaleOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOcompL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cKciTypeFlowLStateVectorJdo_invoke6MpnQciBytecodeStream_i_v_; +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_; +text: .text%__1cKRegionNodeJideal_reg6kM_I_; +text: .text%__1cJloadINodeHtwo_adr6kM_I_; +text: .text%__1cQmark_inner_loops6FpnIPhaseCFG_pnFBlock__v_: block.o; +text: .text%__1cIHaltNodeJideal_reg6kM_I_; +text: .text%__1cFStateM_sub_Op_Halt6MpknENode__v_; +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReturnNodeGOpcode6kM_i_; +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_; +text: .text%__1cKStoreLNodeGOpcode6kM_i_; +text: .text%__1cPCountedLoopNodeOis_CountedLoop6M_p0_; +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_; +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cNloadConI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2L_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_; +text: .text%__1cMWarmCallInfoGis_hot6kM_i_; +text: .text%__1cNprefetchwNodeMideal_Opcode6kM_i_; +text: .text%__1cIAddINodeJideal_reg6kM_I_; +text: .text%__1cNCatchProjNode2t6MpnENode_Ii_v_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__; +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmulL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cKciTypeFlowIcan_trap6MrnQciBytecodeStream__i_; +text: .text%__1cQVMOperationQdDueueLqueue_empty6Mi_i_; +text: .text%__1cIProjNodeDcmp6kMrknENode__I_; +text: .text%__1cSComputeAdapterInfoGdo_int6M_v_; +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cIGraphKitTtoo_many_recompiles6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cHCompileFstart6kM_pnJStartNode__; +text: .text%__1cNmulL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cLPCTableNodeEhash6kM_I_; +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_; +text: .text%__1cJStartNodeOis_block_start6kM_i_; +text: .text%__1cQComputeCallStackHdo_void6M_v_; +text: .text%__1cNaddI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOtoo_many_traps6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cNciCallProfileRapply_prof_factor6Mf_v_; +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__; +text: .text%__1cHCompileOcall_generator6MpnIciMethod_ipnIJVMState_if_pnNCallGenerator__; +text: .text%__1cHCompileOfind_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cMindIndexOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMindIndexOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cPClassFileParserbIparse_constant_pool_fieldref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNdecI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cNGrowableArray4Cl_2t6Mii_v_; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cLPCTableNodeKis_PCTable6kM_pk0_; +text: .text%__1cLPCTableNodeHsize_of6kM_I_; +text: .text%__1cNincI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cQciBytecodeStreamXget_method_holder_index6M_i_; +text: .text%__1cMorI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cLimmUL32OperJconstantL6kM_x_; +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_; +text: .text%__1cFParseHdo_call6M_v_; +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_; +text: .text%__1cFParseZcan_not_compile_call_site6MpnIciMethod_pnPciInstanceKlass__i_; +text: .text%__1cQciBytecodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cFParseMprofile_call6MpnENode__v_; +text: .text%__1cScompP_mem_rRegNodePoper_input_base6kM_I_; +text: .text%__1cICodeHeapLheader_size6F_L_; +text: .text%__1cJloadBNodeJnum_opnds6kM_I_; +text: .text%__1cENodeLbottom_type6kM_pknEType__; +text: .text%__1cXindIndexScaleOffsetOperNconstant_disp6kM_i_; +text: .text%__1cSindIndexOffsetOperJnum_edges6kM_I_; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cOGenerateOopMapNrestore_state6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_; +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_; +text: .text%__1cIBoolNodeHsize_of6kM_I_; +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cJloadSNodePoper_input_base6kM_I_; +text: .text%__1cPno_rax_RegPOperJnum_edges6kM_I_; +text: .text%__1cOcompI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%jni_SetIntField: jni.o; +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__; +text: .text%__1cMMutableSpaceFclear6M_v_; +text: .text%__1cNtestI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cNprefetchwNodePoper_input_base6kM_I_; +text: .text%__1cTCreateExceptionNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVExceptionHandlerTableJadd_entry6MnRHandlerTableEntry__v_; +text: .text%__1cPsalI_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cRaddP_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_; +text: .text%__1cIGraphKitNcast_not_null6MpnENode__2_; +text: .text%__1cTconvL2I_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cKPSYoungGenNused_in_bytes6kM_L_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_; +text: .text%__1cPcheckCastPPNodeJnum_opnds6kM_I_; +text: .text%__1cLLShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGIfNodeHsize_of6kM_I_; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cOcompL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cScompI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMMergeMemNodeJideal_reg6kM_I_; +text: .text%__1cNandL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cIciMethodWwas_executed_more_than6Mi_i_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cSReferenceProcessorOprocess_phase36MppnHoopDesc_ipnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorOprocess_phase26MppnHoopDesc_pnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cTleaPIdxScaleOffNodeMideal_Opcode6kM_i_; +text: .text%__1cTleaPIdxScaleOffNodePoper_input_base6kM_I_; +text: .text%__1cFLabelJadd_patch6Mi_v_; +text: .text%__1cKMemBarNodeEhash6kM_I_; +text: .text%__1cOcompP_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadConL0NodePoper_input_base6kM_I_; +text: .text%__1cNsubI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJMarkSweepSMarkAndPushClosureLdo_nmethods6kM_ki_; +text: .text%__1cIXorINodeGOpcode6kM_i_; +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cPindOffset32OperNbase_position6kM_i_; +text: .text%__1cPindOffset32OperNconstant_disp6kM_i_; +text: .text%__1cOcompU_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeHtwo_adr6kM_I_; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cFframeNis_glue_frame6kM_i_; +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_; +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_; +text: .text%__1cIIndexSetEswap6Mp0_v_; +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_; +text: .text%__1cRshrL_rReg_immNodeErule6kM_I_; +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_; +text: .text%__1cTpass_initial_checks6FpnIciMethod_i1_i_; +text: .text%__1cKInlineTreeMshouldInline6kMpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cOCompilerOracleNshould_inline6FnMmethodHandle__i_; +text: .text%__1cKInlineTreeNtry_to_inline6MpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cIciMethodbAinterpreter_throwout_count6kM_i_; +text: .text%__1cIciMethodNshould_inline6M_i_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cScompU_rReg_immNodeErule6kM_I_; +text: .text%__1cKjmpDirNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpDirNodeJis_Branch6kM_I_; +text: .text%__1cKjmpDirNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cILoadNodeDcmp6kMrknENode__I_; +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__; +text: .text%__1cFLabelSpatch_instructions6MpnRAbstractAssembler__v_; +text: .text%__1cRAbstractAssemblerHbind_to6MrnFLabel_i_v_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cNloadConL0NodeErule6kM_I_; +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__; +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cJrRegIOperFclone6kM_pnIMachOper__; +text: .text%__1cMindIndexOperFscale6kM_i_; +text: .text%__1cScompP_mem_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cRandI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cMMachProjNodeHsize_of6kM_I_; +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cScompP_mem_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeErule6kM_I_; +text: .text%__1cPindOffset32OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFStateM_sub_Op_CmpP6MpknENode__v_; +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_; +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cIciSymbolJmake_impl6Fpkc_p0_; +text: .text%__1cScompU_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cKimmL32OperIconstant6kM_l_; +text: .text%__1cHi2sNodePoper_input_base6kM_I_; +text: .text%__1cKimmL32OperJnum_edges6kM_I_; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cLBoxLockNodeKstack_slot6FpnENode__i_; +text: .text%__1cLBoxLockNodeKis_BoxLock6kM_pk0_; +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_; +text: .text%__1cKDataLayoutPneeds_array_len6FC_i_; +text: .text%__1cKDataLayoutKinitialize6MCHi_v_; +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframebHnext_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_; +text: .text%__1cFParseRbranch_prediction6Mrf_f_; +text: .text%__1cPshrI_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cOcompL_rRegNodeErule6kM_I_; +text: .text%__1cNGrowableArray4Cpv_Praw_at_put_grow6Mirk03_v_; +text: .text%__1cNGrowableArray4Cl_Praw_at_put_grow6Mirkl2_v_; +text: .text%__1cISubINodeLbottom_type6kM_pknEType__; +text: .text%__1cIGraphKitZset_all_rewritable_memory6MpnENode__v_; +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_; +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__; +text: .text%__1cJAssemblerEmovq6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cRsalI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_; +text: .text%__1cGRFrameMset_distance6Mi_v_; +text: .text%__1cICodeBlobOis_c2i_adapter6kM_i_; +text: .text%__1cFframeTis_first_java_frame6kM_i_; +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_; +text: .text%__1cGRFrameGcaller6M_p0_; +text: .text%__1cTStackWalkCompPolicyIsenderOf6MpnGRFrame_pnNGrowableArray4C2___2_; +text: .text%__1cGRFrameKnew_RFrame6FnFframe_pnKJavaThread_p0_4_; +text: .text%__1cKstoreLNodePoper_input_base6kM_I_; +text: .text%__1cTconstantPoolOopDescMklass_at_put6MipnMklassOopDesc__v_; +text: .text%__1cNFingerprinterGdo_int6M_v_; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cRaddI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cRshrL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_; +text: .text%__1cScompI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cLRethrowNodeGis_CFG6kM_i_; +text: .text%__1cIciObjectFklass6M_pnHciKlass__; +text: .text%__1cNloadConP0NodeMideal_Opcode6kM_i_; +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_; +text: .text%__1cNGCTaskManagerRset_resource_flag6MIi_v_; +text: .text%__1cRshrI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cUmembar_cpu_orderNodeMideal_Opcode6kM_i_; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cKEntryPointFentry6kMnITosState__pC_; +text: .text%__1cJloadCNodeMideal_Opcode6kM_i_; +text: .text%__1cKJavaThreadJframes_do6MpFpnFframe_pknLRegisterMap__v_v_; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cFStateM_sub_Op_RegL6MpknENode__v_; +text: .text%__1cNdecI_rRegNodeErule6kM_I_; +text: .text%__1cKjmpConNodeJis_Branch6kM_I_; +text: .text%__1cKjmpConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_; +text: .text%__1cKjmpConNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fl_v_; +text: .text%__1cNCallGeneratorJis_inline6kM_i_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cCosRcurrent_thread_id6F_l_; +text: .text%__1cKciTypeFlowLStateVectorMdo_getstatic6MpnQciBytecodeStream__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_; +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadKlassNodeHtwo_adr6kM_I_; +text: .text%__1cFParseYprofile_not_taken_branch6M_v_; +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_; +text: .text%__1cOcompL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cLRuntimeStubMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cOcompL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_; +text: .text%__1cNCallGeneratorKis_virtual6kM_i_; +text: .text%__1cKInlineTreePshouldNotInline6kMpnIciMethod_pnMWarmCallInfo__pkc_; +text: .text%__1cLcastP2LNodeErule6kM_I_; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cNPhaseRegAllocKoffset2reg6kMi_i_; +text: .text%__1cQjmpCon_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cQjmpCon_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_pnGRFrame__v_; +text: .text%__1cTconvI2L_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeRlatency_from_uses6kMrnLBlock_Array_rnNGrowableArray4CI___i_; +text: .text%__1cNGrowableArray4CI_Praw_at_put_grow6MirkI2_v_; +text: .text%__1cFParseFdo_if6MpnENode_2nIBoolTestEmask_2_v_; +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cXindIndexScaleOffsetOperOindex_position6kM_i_; +text: .text%__1cXindIndexScaleOffsetOperNbase_position6kM_i_; +text: .text%__1cPsalI_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_; +text: .text%__1cISubINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSconstMethodOopDescbEchecked_exceptions_length_addr6kM_pH_; +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_; +text: .text%__1cRsubI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cJloadCNodePoper_input_base6kM_I_; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cIAddPNodeJideal_reg6kM_I_; +text: .text%__1cTleaPIdxScaleOffNodeJnum_opnds6kM_I_; +text: .text%__1cRaddI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_; +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_; +text: .text%__1cKTypeRawPtrHget_con6kM_l_; +text: .text%__1cOClearArrayNodeGOpcode6kM_i_; +text: .text%__1cOoop_RelocationHoops_do6MpFppnHoopDesc__v_v_; +text: .text%__1cIciMethodbHhas_unloaded_classes_in_signature6M_i_; +text: .text%__1cScompP_mem_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_; +text: .text%__1cNincI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJimmP0OperEtype6kM_pknEType__; +text: .text%__1cNloadConP0NodeLbottom_type6kM_pknEType__; +text: .text%__1cPloadConUL32NodeLbottom_type6kM_pknEType__; +text: .text%__1cNloadConI0NodeHsize_of6kM_I_; +text: .text%__1cRaddI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cPshrI_rReg_1NodePoper_input_base6kM_I_; +text: .text%JVM_handle_solaris_signal; +text: .text%signalHandler; +text: .text%__1cQJNI_FastGetFieldQfind_slowcase_pc6FpC_1_; +text: .text%__1cMLinkResolverbElinktime_resolve_static_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cRresolve_and_patch6FppnHoopDesc__v_; +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_; +text: .text%__1cISubINodeDsub6kMpknEType_3_3_; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRInterpretedRFrameOis_interpreted6kM_i_; +text: .text%__1cGRFrameLis_compiled6kM_i_; +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_; +text: .text%__1cTStackWalkCompPolicyMshouldInline6FnMmethodHandle_fi_pkc_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cIGraphKitPstore_to_memory6MpnENode_22nJBasicType_i_2_; +text: .text%__1cJStoreNodeEmake6FpnENode_22pknHTypePtr_2nJBasicType__p0_; +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_; +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_; +text: .text%__1cKciTypeFlowGJsrSetSis_compatible_with6Mp1_i_; +text: .text%__1cENodeIadd_prec6Mp0_v_; +text: .text%__1cKOSRAdapterOis_osr_adapter6kM_i_; +text: .text%__1cIMulINodeGOpcode6kM_i_; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cNGrowableArray4CpnGciType__2t6MpnFArena_iirk1_v_; +text: .text%__1cKTypeAryPtrFempty6kM_i_; +text: .text%__1cHTypeAryFempty6kM_i_; +text: .text%__1cJloadCNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cRandI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cMelapsedTimerHseconds6kM_d_; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cJAssemblerDnop6M_v_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cKstoreLNodeJnum_opnds6kM_I_; +text: .text%__1cIjniIdMapHoops_do6MpnKOopClosure__v_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cJArrayDataKcell_count6M_i_; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cKType_ArrayEgrow6MI_v_; +text: .text%JVM_Write; +text: .text%__1cDhpiFwrite6FipkvI_L_; +text: .text%__1cMStartC2INodeGOpcode6kM_i_; +text: .text%__1cSindIndexOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__; +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cIParmNodeJideal_reg6kM_I_; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_; +text: .text%__1cRshrI_rReg_immNodeErule6kM_I_; +text: .text%__1cJcmpOpOperGnegate6M_v_; +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_; +text: .text%__1cFParseRoptimize_inlining6MpnIciMethod_ipnPciInstanceKlass_24irnKInlineTreeLInlineStyle_r2_v_; +text: .text%__1cQimprove_receiver6FpnPciInstanceKlass_pknLTypeInstPtr_ri_1_; +text: .text%__1cPcmpFastLockNodeHtwo_adr6kM_I_; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_; +text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_; +text: .text%__1cFParseSmerge_memory_edges6MpnMMergeMemNode_ii_v_; +text: .text%__1cJAssemblerEmovq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__v_; +text: .text%__1cUThreadSafepointStatebDhandle_polling_page_exception6M_v_; +text: .text%__1cLjmpConUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cKcmpOpUOperFccode6kM_i_; +text: .text%__1cLjmpConUNodeTmay_be_short_branch6kM_i_; +text: .text%__1cLjmpConUNodeOis_pc_relative6kM_i_; +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_; +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_; +text: .text%__1cJloadPNodeFreloc6kM_i_; +text: .text%__1cTno_rax_rbx_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNprefetchwNodeJnum_opnds6kM_I_; +text: .text%__1cKjmpConNodeGnegate6M_v_; +text: .text%__1cMindirectOperFscale6kM_i_; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cRsubI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cQComputeCallStackGdo_int6M_v_; +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNtestP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFParseMvisit_blocks6M_v_; +text: .text%__1cQjmpDir_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cQjmpDir_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKciTypeFlowLStateVectorLdo_getfield6MpnQciBytecodeStream__v_; +text: .text%__1cNaddI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cScompP_mem_rRegNodeErule6kM_I_; +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__; +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKMemBarNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOjmpLoopEndNodeMideal_Opcode6kM_i_; +text: .text%__1cFBlockTimplicit_null_check6MrnLBlock_Array_rnNGrowableArray4CI__pnENode_6_v_; +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%__1cJloadBNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cQVMOperationQdDueueSqueue_remove_front6Mi_pnMVM_Operation__; +text: .text%__1cOcompI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cRaddP_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIciMethodLis_accessor6kM_i_; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__; +text: .text%__1cLBoxLockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_; +text: .text%__1cQciBytecodeStreamSget_constant_index6kM_i_; +text: .text%__1cOGenerateOopMapOset_bbmark_bit6Mi_v_; +text: .text%__1cFParseOreturn_current6MpnENode__v_; +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_; +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cMorI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMloadConPNodeFreloc6kM_i_; +text: .text%__1cGThreadMis_VM_thread6kM_i_; +text: .text%__1cSPSPromotionManagerFreset6M_v_; +text: .text%__1cNPrefetchQdDueueFclear6M_v_; +text: .text%__1cSPSPromotionManagerKflush_labs6M_v_; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cNincI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cTJvmtiEventCollectorYunset_jvmti_thread_state6M_v_; +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cJTypeTupleFxdual6kM_pknEType__; +text: .text%__1cOcompP_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cHi2sNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_2_v_; +text: .text%__1cLcastP2LNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNCompileBrokerXcompilation_is_in_queue6FnMmethodHandle_i_i_; +text: .text%__1cRsubI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cETypeCeq6kMpk0_i_; +text: .text%__1cHMatcherPstack_alignment6F_I_; +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadSNodeJnum_opnds6kM_I_; +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRshrL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2L_reg_memNodeErule6kM_I_; +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_n0AJIcoResult__; +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%__1cLRethrowNodeGOpcode6kM_i_; +text: .text%__1cPcmpFastLockNodeJnum_opnds6kM_I_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__; +text: .text%__1cFParseIdo_exits6M_v_; +text: .text%__1cFParseLbuild_exits6M_v_; +text: .text%__1cFParseLinit_blocks6M_v_; +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cFParseNdo_all_blocks6M_v_; +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cOParseGeneratorJcan_parse6FpnIciMethod_i_i_; +text: .text%__1cFArenaEused6kM_L_; +text: .text%__1cRandI_rReg_immNodeErule6kM_I_; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cPno_rax_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_; +text: .text%__1cPClassFileParserUskip_over_field_name6MpciI_1_; +text: .text%__1cFParsePdo_method_entry6M_v_; +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_; +text: .text%__1cKciTypeFlowFBlockPclone_loop_head6Mp0ip1pn0AGJsrSet__3_; +text: .text%__1cLOpaque1NodeGOpcode6kM_i_; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cRruntime_type_from6FpnJJavaValue__nJBasicType__: javaCalls.o; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cPJavaCallWrapper2t6MnMmethodHandle_nGHandle_pnJJavaValue_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cRJavaCallArgumentsKparameters6M_pl_; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cLCastP2LNodeLbottom_type6kM_pknEType__; +text: .text%__1cPJavaCallWrapper2T6M_v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cMrax_RegPOperJnum_edges6kM_I_; +text: .text%__1cMrax_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cITypeFuncFxdual6kM_pknEType__; +text: .text%__1cIimmLOperJconstantL6kM_x_; +text: .text%__1cIMulLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNmethodOopDescWload_signature_classes6FnMmethodHandle_pnGThread__i_; +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cMLinkResolverXresolve_klass_no_update6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNaddL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cISubINodeGadd_id6kM_pknEType__; +text: .text%__1cNsubI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cMMutableSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cKInlineTree2t6MpnHCompile_pk0pnIciMethod_pnIJVMState_if_v_; +text: .text%__1cJEventMark2t6MpkcE_v_; +text: .text%__1cJloadCNodeJnum_opnds6kM_I_; +text: .text%__1cNaddI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQComputeCallStackHdo_long6M_v_; +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cRaddI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cQVMOperationQdDueueNqueue_oops_do6MipnKOopClosure__v_; +text: .text%__1cMCreateExNodeJideal_reg6kM_I_; +text: .text%__1cMorI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMorI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoHdo_long6M_v_; +text: .text%__1cLPCTableNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRinterpretedVFrameDbci6kM_i_; +text: .text%__1cKInlineTreeYcompute_callee_frequency6kMi_f_; +text: .text%__1cKInlineTreebCbuild_inline_tree_for_callee6MpnIciMethod_pnIJVMState_i_p0_; +text: .text%__1cRinterpretedVFrameDbcp6kM_pC_; +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__; +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_; +text: .text%__1cLRShiftLNodeGOpcode6kM_i_; +text: .text%__1cPsarI_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cNsubI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOMethodLivenessKBasicBlockIload_two6Mi_v_; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cNmulL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cNrFlagsRegOperFclone6kM_pnIMachOper__; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_; +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_; +text: .text%__1cIGraphKitRmake_slow_call_ex6MpnENode_pnPciInstanceKlass__v_; +text: .text%__1cTcompareAndSwapLNodePoper_input_base6kM_I_; +text: .text%__1cMloadConINodeHsize_of6kM_I_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_i_v_; +text: .text%__1cRMachSafePointNodeLis_MachCall6M_pnMMachCallNode__; +text: .text%__1cNstoreImmINodeMideal_Opcode6kM_i_; +text: .text%__1cJScopeDescGis_top6kM_i_; +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__; +text: .text%__1cPstoreImmI16NodeMideal_Opcode6kM_i_; +text: .text%__1cMindIndexOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cFStateQ_sub_Op_CreateEx6MpknENode__v_; +text: .text%__1cRshrL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cLjmpConUNodeJis_Branch6kM_I_; +text: .text%__1cLjmpConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLjmpConUNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cRaddI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_; +text: .text%__1cMMachCallNodeHis_Call6M_pnICallNode__; +text: .text%__1cNdecI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKStoreCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLklassVtableKis_miranda6FpnNmethodOopDesc_pnPobjArrayOopDesc_pnMklassOopDesc__i_; +text: .text%__1cTconvL2I_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cRalign_code_offset6Fi_I_; +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__; +text: .text%__1cMorI_rRegNodeErule6kM_I_; +text: .text%__1cMLinkResolverVresolve_invokespecial6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cTconvL2I_reg_regNodeErule6kM_I_; +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_; +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__; +text: .text%__1cRaddL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cLRShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFTypeFEhash6kM_i_; +text: .text%__1cIGraphKitMarray_length6MpnENode__2_; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_; +text: .text%__1cPsalI_rReg_1NodeErule6kM_I_; +text: .text%__1cIJVMState2t6Mi_v_; +text: .text%__1cNstoreImmBNodeHtwo_adr6kM_I_; +text: .text%__1cLLShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cScompU_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cNGrowableArray4Cl_Icontains6kMrkl_i_; +text: .text%__1cScompU_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%__1cOcompP_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKRegionNodeOhas_unique_phi6kM_pnHPhiNode__; +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__; +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__; +text: .text%__1cVExceptionHandlerTableMadd_subtable6MipnNGrowableArray4Cl__22_v_; +text: .text%__1cNandL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNObjectMonitorHis_busy6kM_l_; +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_; +text: .text%__1cJAssemblerElock6M_v_; +text: .text%__1cJAssemblerIcmpxchgq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_; +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cPshrI_rReg_1NodeErule6kM_I_; +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_; +text: .text%__1cIPSOldGenPupdate_counters6M_v_; +text: .text%__1cNSingletonBlobIis_alive6kM_i_; +text: .text%__1cKTypeRawPtrCeq6kMpknEType__i_; +text: .text%__1cIregDOperEtype6kM_pknEType__; +text: .text%__1cQleaPIdxScaleNodeHtwo_adr6kM_I_; +text: .text%__1cTStackWalkCompPolicyPshouldNotInline6FnMmethodHandle__pkc_; +text: .text%__1cMPrefetchNodeLbottom_type6kM_pknEType__; +text: .text%__1cPcmpFastLockNodeErule6kM_I_; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_; +text: .text%__1cMCallLeafNodeLis_CallLeaf6kM_pk0_; +text: .text%__1cQleaPIdxScaleNodeMideal_Opcode6kM_i_; +text: .text%__1cJcmpOpOperFequal6kM_i_; +text: .text%__1cScompI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_IsSameObject: jni.o; +text: .text%__1cNmulL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNmulL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_; +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMrcx_RegIOperJnum_edges6kM_I_; +text: .text%__1cFKlassNoop_is_method6kM_i_; +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_; +text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_; +text: .text%__1cRaddP_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cJloadLNodeHtwo_adr6kM_I_; +text: .text%__1cHMulNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMrep_stosNodePoper_input_base6kM_I_; +text: .text%__1cRsalI_rReg_immNodeErule6kM_I_; +text: .text%__1cJFieldTypeSskip_optional_size6FpnNsymbolOopDesc_pi_v_; +text: .text%__1cMloadConPNodeHsize_of6kM_I_; +text: .text%__1cSCallLeafDirectNodeHtwo_adr6kM_I_; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__; +text: .text%__1cQsolaris_mprotect6FpcLi_i_: os_solaris.o; +text: .text%__1cRaddI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cHnmethodLis_unloaded6kM_i_; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cIGraphKitXset_edges_for_java_call6MpnMCallJavaNode_i_v_; +text: .text%__1cTconvI2L_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cHi2sNodeJnum_opnds6kM_I_; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cHMatcherXadjust_incoming_stk_arg6Mi_i_; +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_; +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cRsubI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__; +text: .text%__1cTconvI2L_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_CmpU6MpknENode__v_; +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_; +text: .text%__1cKcopy_table6FppC1i_v_: interpreter.o; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_; +text: .text%__1cMVM_OperationVevaluate_at_safepoint6kM_i_; +text: .text%__1cMVM_OperationVevaluate_concurrently6kM_i_; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_; +text: .text%__1cXmembar_release_lockNodePoper_input_base6kM_I_; +text: .text%__1cRaddL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cScompP_mem_rRegNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cNincI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cPstoreImmI16NodePoper_input_base6kM_I_; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cQLibraryIntrinsicKis_virtual6kM_i_; +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__; +text: .text%__1cMciMethodData2t6M_v_; +text: .text%__1cPsarI_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cNstoreImmBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFTypeDEhash6kM_i_; +text: .text%__1cMPrefetchNodeKmatch_edge6kMI_I_; +text: .text%__1cHCompileQcan_generate_C2I6MpnIciMethod_i_i_; +text: .text%__1cPloadConUL32NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_; +text: .text%__1cLOpaque1NodeEhash6kM_I_; +text: .text%__1cXmembar_release_lockNodeHtwo_adr6kM_I_; +text: .text%JVM_GetMethodIxModifiers; +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNandL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cNandL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_; +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_; +text: .text%__1cIHaltNodeEhash6kM_I_; +text: .text%__1cNstoreImmINodePoper_input_base6kM_I_; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_; +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cOcompL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQVMOperationQdDueueLremove_next6M_pnMVM_Operation__; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_; +text: .text%__1cRsubI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFStateP_sub_Op_LShiftL6MpknENode__v_; +text: .text%__1cLjmpConUNodeGnegate6M_v_; +text: .text%__1cKcmpOpUOperGnegate6M_v_; +text: .text%__1cMrax_RegLOperJnum_edges6kM_I_; +text: .text%__1cLGCTaskQdDueueKinitialize6M_v_; +text: .text%__1cJStealTask2t6Mi_v_; +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_; +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerMnote_release6MI_v_; +text: .text%__1cJStealTaskEname6M_pc_; +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cQciBytecodeStreamMget_constant6M_nKciConstant__; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cJcmpOpOperFclone6kM_pnIMachOper__; +text: .text%__1cMrep_stosNodeMideal_Opcode6kM_i_; +text: .text%__1cEhash6Fpkc1_I_; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cKJavaThreadLgc_epilogue6M_v_; +text: .text%__1cKJavaThreadLgc_prologue6M_v_; +text: .text%__1cTsize_java_to_interp6F_I_; +text: .text%__1cUreloc_java_to_interp6F_I_; +text: .text%__1cQinit_input_masks6FIrnHRegMask_1_p0_: matcher.o; +text: .text%__1cKOSRAdapterHoops_do6MpnKOopClosure__v_; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cRitableMethodEntryKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cTcompareAndSwapLNodeMideal_Opcode6kM_i_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_; +text: .text%__1cNCallGeneratorCtf6kM_pknITypeFunc__; +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__; +text: .text%__1cKStoreBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNaddL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cTcompareAndSwapLNodeJnum_opnds6kM_I_; +text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_; +text: .text%__1cQleaPIdxScaleNodePoper_input_base6kM_I_; +text: .text%__1cNGrowableArray4CpnNmethodOopDesc__2t6Mii_v_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_; +text: .text%__1cNloadConP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cNsubL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cbAjni_check_async_exceptions6FpnKJavaThread__v_: jni.o; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%__1cRsalI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindIndexOperNbase_position6kM_i_; +text: .text%__1cMindIndexOperOindex_position6kM_i_; +text: .text%__1cMindIndexOperNconstant_disp6kM_i_; +text: .text%__1cJLoadSNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__2t6Mii_v_; +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_; +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%JVM_IsInterface; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_; +text: .text%__1cRshrL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjmpCon_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjmpCon_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpConNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cQjmpCon_shortNodeJis_Branch6kM_I_; +text: .text%__1cKJavaThreadNpd_last_frame6M_nFframe__; +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_; +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__; +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_; +text: .text%__1cNGrowableArray4CpnGRFrame__2t6Mii_v_; +text: .text%__1cKjavaVFrameNis_java_frame6kM_i_; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cQciBytecodeStreamPget_klass_index6M_i_; +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRMachNullCheckNode2t6MpnENode_2I_v_; +text: .text%__1cRsarI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_; +text: .text%__1cRMachSafePointNodePis_MachCallJava6M_pnQMachCallJavaNode__; +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cMStartI2CNodeGOpcode6kM_i_; +text: .text%__1cKOSRAdapterMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cNdecI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIMinINodeGOpcode6kM_i_; +text: .text%__1cNinstanceKlassbCfind_local_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cNinstanceKlassWfind_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cPciInstanceKlassTget_field_by_offset6Mii_pnHciField__; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cKmethodOperJnum_edges6kM_I_; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cISubLNodeGOpcode6kM_i_; +text: .text%__1cFStateO_sub_Op_StoreP6MpknENode__v_; +text: .text%__1cRshrI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsarL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNstoreImmBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstorePNodeFreloc6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYCallStaticJavaDirectNodeJnum_opnds6kM_I_; +text: .text%__1cQleaPIdxScaleNodeErule6kM_I_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__; +text: .text%__1cIGraphKitSclear_saved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cNloadConI0NodeFclone6kM_pnENode__; +text: .text%__1cJimmI0OperFclone6kM_pnIMachOper__; +text: .text%__1cLCastP2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cXcopy_u2_with_conversion6FpH0i_v_: classFileParser.o; +text: .text%__1cENodeGis_Sub6M_pnHSubNode__; +text: .text%__1cJAssemblerFtestq6MpnMRegisterImpl_2_v_; +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_; +text: .text%__1cZresource_reallocate_bytes6FpcLL_0_; +text: .text%__1cKstoreINodeFreloc6kM_i_; +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQciBytecodeStreamJget_klass6Mri_pnHciKlass__; +text: .text%__1cKMemBarNode2t6M_v_; +text: .text%__1cIDivINodeGOpcode6kM_i_; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_; +text: .text%__1cPshrI_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMorI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__; +text: .text%__1cPsalI_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_; +text: .text%__1cIAddLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIModINodeGOpcode6kM_i_; +text: .text%__1cNmulL_rRegNodeErule6kM_I_; +text: .text%__1cSsafePoint_pollNodeHtwo_adr6kM_I_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cJCHAResult2t6MnLKlassHandle_nMsymbolHandle_2pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___n0E_i_v_; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cKMemBarNodeJis_MemBar6kM_pk0_; +text: .text%__1cIGraphKitOinsert_mem_bar6MpnKMemBarNode__v_; +text: .text%__1cHi2sNodeHtwo_adr6kM_I_; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cNIdealLoopTreeObeautify_loops6MpnOPhaseIdealLoop__i_; +text: .text%__1cScompP_mem_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreBNodePoper_input_base6kM_I_; +text: .text%__1cRandI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSCallLeafDirectNodeRis_safepoint_node6kM_i_; +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKMemBarNodeJideal_reg6kM_I_; +text: .text%__1cJloadSNodeHtwo_adr6kM_I_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cUDebugInfoWriteStreamMwrite_handle6MpnI_jobject__v_; +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNaddI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJlog2_long6Fx_i_; +text: .text%__1cTconvL2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_; +text: .text%__1cOjmpLoopEndNodePoper_input_base6kM_I_; +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_; +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__; +text: .text%JVM_InternString; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cNGrowableArray4CpnENode__2t6Mii_v_; +text: .text%__1cPCheckCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_; +text: .text%__1cTconvI2L_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__; +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_; +text: .text%__1cFTypeFCeq6kMpknEType__i_; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__; +text: .text%__1cNandL_rRegNodeErule6kM_I_; +text: .text%__1cQjmpDir_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cQjmpDir_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKjmpDirNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cQjmpDir_shortNodeJis_Branch6kM_I_; +text: .text%__1cLBlock_ArrayEgrow6MI_v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cSCompareAndSwapNodeLbottom_type6kM_pknEType__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_; +text: .text%__1cSindIndexOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cSindIndexOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cNGrowableArray4CI_Egrow6Mi_v_; +text: .text%__1cHMatcherMreturn_value6Fii_nLOptoRegPair__; +text: .text%__1cFStateP_sub_Op_ConvI2L6MpknENode__v_; +text: .text%__1cOjmpLoopEndNodeGpinned6kM_i_; +text: .text%__1cNxorI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cNsubI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_; +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cQLRUMaxHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cLcastP2LNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcheckCastPPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__; +text: .text%__1cZCallInterpreterDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cILoopNodeHsize_of6kM_I_; +text: .text%__1cSindIndexOffsetOperFscale6kM_i_; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cRaddI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_; +text: .text%__1cGOopMapPset_derived_oop6Miiii_v_; +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_; +text: .text%__1cHi2bNodeErule6kM_I_; +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_; +text: .text%__1cMloadConDNodePoper_input_base6kM_I_; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__; +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_; +text: .text%__1cRjmpConU_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRjmpConU_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cMloadConDNodeHtwo_adr6kM_I_; +text: .text%__1cHnmethodKpc_desc_at6MpC_pnGPcDesc__; +text: .text%__1cJrRegPOperFclone6kM_pnIMachOper__; +text: .text%__1cFParseNpush_constant6MnKciConstant__i_; +text: .text%__1cMrep_stosNodeJnum_opnds6kM_I_; +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__; +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_; +text: .text%__1cPstoreImmI16NodeJnum_opnds6kM_I_; +text: .text%__1cTleaPIdxScaleOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitbMset_predefined_output_for_runtime_call6MpnENode_pnMMergeMemNode__v_; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cPsarI_rReg_1NodeErule6kM_I_; +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_; +text: .text%__1cIGraphKitOhas_ex_handler6M_i_; +text: .text%__1cMloadConDNodeErule6kM_I_; +text: .text%__1cHCompileQsync_stack_slots6kM_i_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cMURShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNdecI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cPClassFileParserbHparse_constant_pool_integer_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTDebugInfoReadStream2t6MpknHnmethod_i_v_; +text: .text%__1cRsalI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cJScopeDescJstream_at6kMi_pnTDebugInfoReadStream__; +text: .text%__1cVjava_lang_ClassLoaderGparent6FpnHoopDesc__2_; +text: .text%__1cIPhaseIFGEinit6MI_v_; +text: .text%__1cMPhaseChaitinQgather_lrg_masks6Mi_v_; +text: .text%__1cJPhaseLiveHcompute6MI_v_; +text: .text%JVM_GetCPClassNameUTF; +text: .text%__1cMLinkResolverUresolve_invokestatic6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNstoreImmINodeJnum_opnds6kM_I_; +text: .text%__1cITypeNodeHis_Type6M_p0_; +text: .text%__1cHRetNodePoper_input_base6kM_I_; +text: .text%__1cLCastP2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cPloadConUL32NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOFastUnlockNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cNprefetchwNodeHtwo_adr6kM_I_; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKstoreCNodeHtwo_adr6kM_I_; +text: .text%__1cQleaPIdxScaleNodeJnum_opnds6kM_I_; +text: .text%__1cNaddL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cOcompL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cTDebugInfoReadStreamLread_handle6M_nGHandle__; +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_; +text: .text%__1cFStateR_sub_Op_LoadRange6MpknENode__v_; +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_; +text: .text%__1cOcompU_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmovI_reg_gNodePoper_input_base6kM_I_; +text: .text%__1cLProfileDataSis_VirtualCallData6M_i_; +text: .text%__1cSmembar_acquireNodePoper_input_base6kM_I_; +text: .text%__1cNsubL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cNloadRangeNodeFreloc6kM_i_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowJJsrRecord__2t6Miirk2i_v_; +text: .text%__1cTcompareAndSwapLNodeErule6kM_I_; +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cMURShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOcompiledVFrameGis_top6kM_i_; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cNxorI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cRshrI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cKciTypeFlowXmark_known_range_starts6M_v_; +text: .text%__1cKciTypeFlowLfind_ranges6M_v_; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_; +text: .text%__1cKciTypeFlowKmap_blocks6M_v_; +text: .text%__1cKciTypeFlowHdo_flow6M_v_; +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__; +text: .text%__1cKciTypeFlowKflow_types6M_v_; +text: .text%__1cIAndINodeGadd_id6kM_pknEType__; +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadBNodeHtwo_adr6kM_I_; +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_L_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_; +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_; +text: .text%__1cHMatcherNfind_receiver6Fi_i_; +text: .text%__1cMciMethodDataJload_data6M_v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cJCmpL3NodeGOpcode6kM_i_; +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cUParallelScavengeHeapEused6kM_L_; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__; +text: .text%__1cIMaxINodeGOpcode6kM_i_; +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_; +text: .text%__1cPsalI_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cQSystemDictionarybAcompute_loader_lock_object6FnGHandle_pnGThread__1_; +text: .text%__1cHciKlassMis_interface6M_i_; +text: .text%__1cPmethodDataKlassRoop_is_methodData6kM_i_; +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__; +text: .text%__1cJloadCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueHoops_do6MpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cOJNIHandleBlockMweak_oops_do6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollectorXoops_do_for_all_threads6FpnKOopClosure__v_; +text: .text%__1cRindIndexScaleOperJnum_edges6kM_I_; +text: .text%__1cRindIndexScaleOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKstoreBNodeJnum_opnds6kM_I_; +text: .text%__1cNSignatureInfoJdo_double6M_v_; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cRsalI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cMrdx_RegIOperEtype6kM_pknEType__; +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_; +text: .text%__1cSmembar_acquireNodeHtwo_adr6kM_I_; +text: .text%__1cRshrI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%__1cSCompareAndSwapNodeKmatch_edge6kMI_I_; +text: .text%__1cISubINodeJideal_reg6kM_I_; +text: .text%__1cRMachSafePointNodeGpinned6kM_i_; +text: .text%__1cIimmIOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConINodeFclone6kM_pnENode__; +text: .text%__1cICodeHeapIallocate6ML_pv_; +text: .text%__1cICodeHeapPsearch_freelist6ML_pnJFreeBlock__; +text: .text%__1cbACallCompiledJavaDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLCastP2LNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNmulL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJLoadBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVmerge_point_too_heavy6FpnHCompile_pnENode__i_: loopopts.o; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%__1cFParseKdo_put_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cHnmethodOis_java_method6kM_i_; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_; +text: .text%__1cRsarL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cScompU_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFciEnvZcheck_klass_accessibility6MpnHciKlass_pnMklassOopDesc__i_; +text: .text%__1cIciObjectMis_obj_array6M_i_; +text: .text%__1cOLibraryCallKitOgenerate_guard6MpnENode_pnKRegionNode_f_v_; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMstringStream2t6ML_v_; +text: .text%__1cJloadINodeFreloc6kM_i_; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cOMethodLivenessKBasicBlockJstore_two6Mi_v_; +text: .text%__1cJloadINodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_pnIMachNode__; +text: .text%__1cTconvL2I_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cRandI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_; +text: .text%__1cOAbstractICachePcall_flush_stub6FpCi_v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cIGraphKitOmake_slow_call6MpknITypeFunc_pCpkcpnENode_88_8_; +text: .text%__1cICodeHeapPfollowing_block6MpnJFreeBlock__2_; +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__; +text: .text%__1cPshrI_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cEDictIdoubhash6M_v_; +text: .text%__1cTleaPIdxScaleOffNodeLbottom_type6kM_pknEType__; +text: .text%__1cIProjNodeJideal_reg6kM_I_; +text: .text%__1cHi2sNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLimmI_16OperJnum_edges6kM_I_; +text: .text%__1cUmembar_cpu_orderNodePoper_input_base6kM_I_; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cTCallInterpreterNodeGOpcode6kM_i_; +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRaddL_rReg_immNodeErule6kM_I_; +text: .text%__1cJLoadLNodeJideal_reg6kM_I_; +text: .text%__1cTleaPIdxScaleOffNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_; +text: .text%__1cENodeMsetup_is_top6M_v_; +text: .text%__1cIGotoNodeGOpcode6kM_i_; +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHCompilePneed_stack_bang6kMi_i_; +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cNFingerprinterIdo_array6Mii_v_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cMorI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKTypeRawPtrFempty6kM_i_; +text: .text%__1cHRetNodeGpinned6kM_i_; +text: .text%__1cHRetNodeHtwo_adr6kM_I_; +text: .text%__1cPsalI_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPGlobalTLABStatsKinitialize6M_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cUParallelScavengeHeapTensure_parseability6M_v_; +text: .text%__1cTDerivedPointerTableFclear6F_v_; +text: .text%__1cNMemoryServiceGgc_end6Fi_v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cSReferenceProcessorOprocess_phase16MppnHoopDesc_pnPReferencePolicy_pnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cKPSYoungGenPupdate_counters6M_v_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cNMemoryServiceIgc_begin6Fi_v_; +text: .text%__1cUParallelScavengeHeapOfill_all_tlabs6M_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_; +text: .text%__1cQPlaceholderTableJnew_entry6MipnNsymbolOopDesc_pnHoopDesc__pnQPlaceholderEntry__; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_; +text: .text%__1cYGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNaddL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cNaddL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cUPSAdaptiveSizePolicyZdecay_supplemental_growth6Mi_v_; +text: .text%__1cUPSAdaptiveSizePolicybPeden_increment_with_supplement_aligned_up6ML_L_; +text: .text%__1cUPSAdaptiveSizePolicyQdecaying_gc_cost6kM_d_; +text: .text%__1cUPSAdaptiveSizePolicybDcompute_generation_free_space6MLLLLLLLi_v_; +text: .text%__1cIPSOldGenMmax_gen_size6M_L_; +text: .text%__1cUPSAdaptiveSizePolicybHclear_generation_free_space_flags6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyOeden_increment6MLI_L_; +text: .text%__1cUPSAdaptiveSizePolicyVadjust_for_throughput6MipL1_v_; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cUmembar_cpu_orderNodeHtwo_adr6kM_I_; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cJAssemblerDjmp6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cPshrI_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cRmulI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNandI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cOMachEpilogNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLklassVtableVinitialize_from_super6MnLKlassHandle__i_; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cLklassVtableOcopy_vtable_to6MpnLvtableEntry__v_; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_; +text: .text%__1cPVM_GC_OperationOskip_operation6kM_i_; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cPVM_GC_OperationZacquire_pending_list_lock6M_v_; +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_; +text: .text%__1cKReturnNodeEhash6kM_I_; +text: .text%__1cHnmethodVis_dependent_on_entry6MpnMklassOopDesc_2pnNmethodOopDesc__i_; +text: .text%__1cbDVM_ParallelGCFailedAllocation2t6MLiiI_v_; +text: .text%__1cLlog2_intptr6Fl_i_; +text: .text%__1cKKlass_vtbl2n6FLrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cMloadConPNodeFclone6kM_pnENode__; +text: .text%__1cIimmPOperFclone6kM_pnIMachOper__; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cSCallLeafDirectNodeKmethod_set6Ml_v_; +text: .text%__1cJcmpOpOperJnot_equal6kM_i_; +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cCosOunguard_memory6FpcL_i_; +text: .text%__1cNandL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftINodeJideal_reg6kM_I_; +text: .text%__1cRsarI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_; +text: .text%__1cJLoadSNodeJideal_reg6kM_I_; +text: .text%__1cTconvL2I_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cIPhaseIFGISquareUp6M_v_; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cKciTypeFlowLStateVectorOmeet_exception6MpnPciInstanceKlass_pk1_i_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cCosOprotect_memory6FpcL_i_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cCosXserialize_thread_states6F_v_; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cUSafepointSynchronizeQdo_cleanup_tasks6F_v_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cNloadConP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cQVMOperationQdDueueGinsert6MpnMVM_Operation_2_v_; +text: .text%__1cQVMOperationQdDueueGunlink6MpnMVM_Operation__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cCosMget_priority6FpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cQVMOperationQdDueueOqueue_add_back6MipnMVM_Operation__v_; +text: .text%__1cGThreadMget_priority6Fpk0_nOThreadPriority__; +text: .text%__1cCosTget_native_priority6FpknGThread_pi_nIOSReturn__; +text: .text%__1cIVMThreadSevaluate_operation6MpnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueDadd6MpnMVM_Operation__i_; +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cNget_next_hash6F_l_: synchronizer.o; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cKPSScavengeXshould_attempt_scavenge6F_i_; +text: .text%__1cKPSScavengeQinvoke_no_policy6Fpi_i_; +text: .text%__1cPGlobalTLABStatsHpublish6M_v_; +text: .text%__1cUinitialize_hashtable6FppnLNameSigHash__v_; +text: .text%__1cPclear_hashtable6FppnLNameSigHash__v_; +text: .text%__1cQciBytecodeStreamUis_unresolved_string6kM_i_; +text: .text%__1cFciEnvUis_unresolved_string6kMpnPciInstanceKlass_i_i_; +text: .text%__1cFciEnvZis_unresolved_string_impl6kMpnNinstanceKlass_i_i_; +text: .text%__1cNtestP_regNodeFreloc6kM_i_; +text: .text%__1cNSCMemProjNodeGis_CFG6kM_i_; +text: .text%__1cKPSScavengeGinvoke6Fpi_v_; +text: .text%__1cUParallelScavengeHeapTfailed_mem_allocate6MpiLii_pnIHeapWord__; +text: .text%__1cbDVM_ParallelGCFailedAllocationEname6kM_pkc_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_; +text: .text%__1cKDictionaryJnew_entry6MIpnMklassOopDesc_pnHoopDesc__pnPDictionaryEntry__; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cQSystemDictionaryRupdate_dictionary6FiIiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryQfind_placeholder6FiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNIdealLoopTreeTcheck_inner_safepts6MpnOPhaseIdealLoop__v_; +text: .text%__1cPsarI_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_; +text: .text%__1cIAndINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectOis_null_object6kM_i_; +text: .text%__1cNIdealLoopTreeNDCE_loop_body6M_v_; +text: .text%__1cNprefetchwNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_release_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_; +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_; +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_; +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_; +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_; +text: .text%__1cNdecI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSComputeAdapterInfoHdo_long6M_v_; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cMrcx_RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%__1cENodeUdepends_only_on_test6kM_i_; +text: .text%__1cXmembar_acquire_lockNodePoper_input_base6kM_I_; +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_; +text: .text%__1cNGrowableArray4nLKlassHandle__Icontains6kMrkn0A__i_; +text: .text%__1cLGCTaskQdDueue2t6Mi_v_; +text: .text%__1cNaddL_rRegNodeErule6kM_I_; +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_; +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_; +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_; +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_; +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_; +text: .text%__1cSAdaptiveSizePolicyWminor_collection_begin6M_v_; +text: .text%__1cSAdaptiveSizePolicyUminor_collection_end6MnHGCCauseFCause__v_; +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_; +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_; +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__; +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_; +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_; +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_; +text: .text%__1cUWaitForBarrierGCTask2t6Mi_v_; +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_; +text: .text%__1cNBarrierGCTaskIdestruct6M_v_; +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_; +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_; +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_; +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_; +text: .text%__1cGGCTaskIdestruct6M_v_; +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_; +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_; +text: .text%__1cKPSYoungGenLswap_spaces6M_v_; +text: .text%__1cUPSAdaptiveSizePolicybPcompute_survivor_space_size_and_threshold6MiiL_i_; +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MLL_v_; +text: .text%__1cUPSAdaptiveSizePolicyPupdate_averages6MiLL_v_; +text: .text%__1cKPSYoungGenRresize_generation6MLL_i_; +text: .text%__1cKPSYoungGenGresize6MLL_v_; +text: .text%__1cKPSYoungGenNresize_spaces6MLL_v_; +text: .text%__1cHMatcherKcan_be_arg6Fi_i_; +text: .text%__1cHMatcherQis_spillable_arg6Fi_i_; +text: .text%__1cUPSAdaptiveSizePolicyOshould_full_GC6ML_i_; +text: .text%__1cSAdaptiveSizePolicybIupdate_minor_pause_young_estimator6Md_v_; +text: .text%__1cUPSAdaptiveSizePolicybGupdate_minor_pause_old_estimator6Md_v_; +text: .text%__1cNsubL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMStartOSRNodeGOpcode6kM_i_; +text: .text%__1cRsubI_rReg_memNodeErule6kM_I_; +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cXmembar_acquire_lockNodeHtwo_adr6kM_I_; +text: .text%__1cNandI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cNcmovI_regNodePoper_input_base6kM_I_; +text: .text%__1cMURShiftINodeJideal_reg6kM_I_; +text: .text%__1cMorI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cLRShiftINodeJideal_reg6kM_I_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cLklassVtableQfill_in_mirandas6Mri_v_; +text: .text%__1cRandI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cSmembar_releaseNodePoper_input_base6kM_I_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cJrRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_; +text: .text%__1cRmethodDataOopDescJis_mature6kM_i_; +text: .text%__1cJcmpOpOperEless6kM_i_; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cOcompL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cNloadKlassNodeFreloc6kM_i_; +text: .text%__1cRshrI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTjava_lang_ThrowableNset_backtrace6FpnHoopDesc_2_v_; +text: .text%__1cPcmovI_reg_gNodeMideal_Opcode6kM_i_; +text: .text%__1cIAndINodeGmul_id6kM_pknEType__; +text: .text%__1cTClassLoadingServiceScompute_class_size6FpnNinstanceKlass__L_; +text: .text%__1cLklassVtableQget_num_mirandas6FpnMklassOopDesc_pnPobjArrayOopDesc_4_i_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cRaddI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%__1cQSystemDictionaryQadd_to_hierarchy6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserYcheck_super_class_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cNinstanceKlassbBdo_local_static_fields_impl6FnTinstanceKlassHandle_pFpnPfieldDescriptor_pnGThread__v5_v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_; +text: .text%__1cScompI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cScompI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_; +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_; +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_; +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_; +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_; +text: .text%__1cFKlassRoop_is_methodData6kM_i_; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cQSystemDictionaryRload_shared_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRfind_shared_class6FnMsymbolHandle__pnMklassOopDesc__; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_; +text: .text%__1cRaddL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cKciTypeFlowLStateVectorGdo_ldc6MpnQciBytecodeStream__v_; +text: .text%__1cMPhaseIterGVNIoptimize6M_v_; +text: .text%__1cOrFlagsRegUOperFclone6kM_pnIMachOper__; +text: .text%__1cNmulL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cMrdi_RegPOperJnum_edges6kM_I_; +text: .text%__1cRsalI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cQPackageHashtableMcompute_hash6Mpkci_I_; +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cRsalL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cIConINodeHget_int6kMpi_i_; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__; +text: .text%__1cLOpaque2NodeGOpcode6kM_i_; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cILoopNode2t6MpnENode_2_v_; +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTconvI2L_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cScompP_mem_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIRewriterScompute_index_maps6FnSconstantPoolHandle_rpnIintArray_rpnIintStack__v_; +text: .text%__1cIRewriterXnew_constant_pool_cache6FrnIintArray_pnGThread__nXconstantPoolCacheHandle__; +text: .text%__1cIintArray2t6Mii_v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassLverify_code6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cFframeWsender_for_entry_frame6kMpnLRegisterMap__0_; +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_; +text: .text%__1cSmembar_releaseNodeHtwo_adr6kM_I_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserbSparse_constant_pool_interfacemethodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_; +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmodI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cNtestL_regNodeMideal_Opcode6kM_i_; +text: .text%__1cRaddI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIConFNodeGOpcode6kM_i_; +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTconvI2L_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNSharedRuntimebOraw_exception_handler_for_return_address6FpC_1_; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cOMethodLivenessKBasicBlockPmerge_exception6MnGBitMap__i_; +text: .text%__1cTconvI2L_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAndINodeKmul_opcode6kM_i_; +text: .text%__1cIAndINodeKadd_opcode6kM_i_; +text: .text%__1cPcmovI_reg_gNodeJnum_opnds6kM_I_; +text: .text%__1cKCMoveINodeGOpcode6kM_i_; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_; +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPloadConUL32NodeHsize_of6kM_I_; +text: .text%__1cJAssemblerEandq6MpnMRegisterImpl_i_v_; +text: .text%__1cLClassLoaderOlookup_package6Fpkc_pnLPackageInfo__; +text: .text%__1cQPackageHashtableJget_entry6MiIpkcL_pnLPackageInfo__; +text: .text%__1cIGraphKitRmerge_fast_memory6MpnENode_2i_v_; +text: .text%JVM_Clone; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cUCallCompiledJavaNodeGOpcode6kM_i_; +text: .text%__1cPsalI_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKadd_n_reqs6FpnENode_1_v_: graphKit.o; +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_; +text: .text%__1cQComputeCallStackJdo_double6M_v_; +text: .text%__1cKciTypeFlowLStateVectorMdo_putstatic6MpnQciBytecodeStream__v_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_; +text: .text%__1cIGraphKitOmake_merge_mem6MpnENode_22_v_; +text: .text%__1cGEventsDlog6FpkcE_v_; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cSsafePoint_pollNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cPshrI_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseWensure_phis_everywhere6M_v_; +text: .text%__1cNsubL_rRegNodeErule6kM_I_; +text: .text%__1cNIdealLoopTreeUiteration_split_impl6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNIdealLoopTreebBpolicy_do_remove_empty_loop6MpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeOpolicy_peeling6kMpnOPhaseIdealLoop__i_; +text: .text%__1cIBoolNodeZis_counted_loop_exit_test6M_i_; +text: .text%__1cJloadCNodeHtwo_adr6kM_I_; +text: .text%__1cUPSMarkSweepDecoratorVdestination_decorator6F_p0_; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cENode2n6FL_pv_; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cKBufferBlob2n6FLI_pv_; +text: .text%__1cFParseKarray_load6MnJBasicType__v_; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cKBufferBlob2t6Mpkci_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cKciTypeFlowLStateVectorLdo_putfield6MpnQciBytecodeStream__v_; +text: .text%__1cHnmethodNscope_desc_at6MpC_pnJScopeDesc__; +text: .text%__1cHnmethodJcode_size6kM_i_; +text: .text%__1cRtestP_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRtestP_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_; +text: .text%__1cOjmpLoopEndNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_i_v_; +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassRclass_initializer6M_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassWcall_class_initializer6MpnGThread__v_; +text: .text%__1cNinstanceKlassbOset_initialization_state_and_notify_impl6FnTinstanceKlassHandle_n0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassbJset_initialization_state_and_notify6Mn0AKClassState_pnGThread__v_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cMrdi_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cFTypeDCeq6kMpknEType__i_; +text: .text%__1cJLoadCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNtestL_regNodeHtwo_adr6kM_I_; +text: .text%__1cTconvL2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_; +text: .text%__1cMrax_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNmodI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNSignatureInfoIdo_short6M_v_; +text: .text%JVM_GetFieldIxModifiers; +text: .text%__1cNsubL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNandL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cNsubL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_; +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__; +text: .text%__1cICodeHeapTmark_segmap_as_used6MLL_v_; +text: .text%__1cMorI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cHOrINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_IsConstructorIx; +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_; +text: .text%__1cHMatcherLfind_shared6MpnENode__v_; +text: .text%__1cHMatcherFxform6MpnENode_i_2_; +text: .text%__1cJStartNodeHsize_of6kM_I_; +text: .text%__1cILRG_List2t6MI_v_; +text: .text%__1cHMatcherLreturn_addr6kM_i_; +text: .text%__1cSindIndexOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cGBundlePinitialize_nops6FppnIMachNode__v_; +text: .text%__1cOMachPrologNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMemNodeHsize_of6kM_I_; +text: .text%__1cNSignatureInfoIdo_float6M_v_; +text: .text%__1cRaddI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRmulI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cFParseNadd_safepoint6M_v_; +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_; +text: .text%__1cRaddI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOCompiledRFrameEinit6M_v_; +text: .text%__1cGvframeDtop6kM_p0_; +text: .text%__1cPsarI_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cRmethodDataOopDescYcompute_extra_data_count6Fii_i_; +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectIis_klass6M_i_; +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_; +text: .text%__1cRxorI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadConP0NodeHsize_of6kM_I_; +text: .text%__1cJAssemblerEaddq6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEsubq6MpnMRegisterImpl_2_v_; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6ML_v_; +text: .text%__1cTresource_free_bytes6FpcL_v_; +text: .text%__1cNSingletonBlobMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_; +text: .text%__1cUPSMarkSweepDecoratorHcompact6Mi_v_; +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeMideal_Opcode6kM_i_; +text: .text%__1cRindIndexScaleOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cRindIndexScaleOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_; +text: .text%__1cENodeHrm_prec6MI_v_; +text: .text%__1cHAddNodeGis_Add6kM_pk0_; +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_; +text: .text%__1cTMachCallRuntimeNodeSis_MachCallRuntime6M_p0_; +text: .text%__1cMrax_RegIOperJnum_edges6kM_I_; +text: .text%__1cICodeHeapLmerge_right6MpnJFreeBlock__v_; +text: .text%__1cNaddI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNmulL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cWandI_rReg_imm65535NodeMideal_Opcode6kM_i_; +text: .text%__1cKReturnNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRjmpConU_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLjmpConUNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cRjmpConU_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cRjmpConU_shortNodeJis_Branch6kM_I_; +text: .text%__1cKcmpOpUOperFclone6kM_pnIMachOper__; +text: .text%__1cRtestP_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cIregDOperJnum_edges6kM_I_; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cSindIndexOffsetOperNconstant_disp6kM_i_; +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__; +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQleaPIdxScaleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cRaddP_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMatcherQpost_fast_unlock6FpknENode__i_; +text: .text%__1cFStateV_sub_Op_MemBarRelease6MpknENode__v_; +text: .text%__1cOleaPIdxOffNodeMideal_Opcode6kM_i_; +text: .text%__1cILoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cScompI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cScompI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerDorq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cScompI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTcompareAndSwapLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cKciTypeFlowLStateVectorJhalf_type6FpnGciType__3_; +text: .text%__1cQmerge_point_safe6FpnENode__i_: loopopts.o; +text: .text%__1cRaddL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cHMatcherUc_calling_convention6FpnLOptoRegPair_I_v_; +text: .text%__1cPCallRuntimeNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cNaddL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNxorI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_; +text: .text%__1cENodeHget_int6kMpi_i_; +text: .text%__1cPCountedLoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJLoadFNodeGOpcode6kM_i_; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNincI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPClassFileParserbEparse_constant_pool_long_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPcmovI_reg_lNodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerEleaq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_; +text: .text%__1cNprefetchwNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cMPhaseChaitinOcache_lrg_info6M_v_; +text: .text%__1cMPhaseChaitinISimplify6M_v_; +text: .text%__1cMPhaseChaitinGSelect6M_I_; +text: .text%__1cRsarL_rReg_immNodeErule6kM_I_; +text: .text%__1cKcmpOpUOperJnot_equal6kM_i_; +text: .text%__1cScompU_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cScompU_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_; +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cWCallLeafNoFPDirectNodeHtwo_adr6kM_I_; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cSvframeStreamCommonYfill_from_compiled_frame6MpnHnmethod_i_v_; +text: .text%__1cNandL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cHnmethodQis_native_method6kM_i_; +text: .text%__1cTleaPIdxScaleOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNmulL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cODataRelocationGoffset6M_i_; +text: .text%__1cODataRelocationJset_value6MpC_v_; +text: .text%__1cKRelocationRpd_set_data_value6MpCl_v_; +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_; +text: .text%__1cIMulINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFStateO_sub_Op_StoreB6MpknENode__v_; +text: .text%__1cRaddL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIregFOperJnum_edges6kM_I_; +text: .text%__1cRandI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIRootNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOleaPIdxOffNodePoper_input_base6kM_I_; +text: .text%__1cJcmpOpOperKless_equal6kM_i_; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__; +text: .text%__1cMrep_stosNodeHtwo_adr6kM_I_; +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNsubI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHMemNodeScalculate_adr_type6FpknEType_pknHTypePtr__6_; +text: .text%__1cRmulI_rReg_immNodeErule6kM_I_; +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_; +text: .text%__1cNaddP_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_; +text: .text%__1cKstoreLNodeHtwo_adr6kM_I_; +text: .text%__1cNnegI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodePoper_input_base6kM_I_; +text: .text%__1cbFloadConL_0x6666666666666667NodeErule6kM_I_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cIGraphKitNstore_barrier6MpnENode_22_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_acquireNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMoutputStreamMdo_vsnprintf6FpcLpkcpnR__va_list_element_irL_3_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_; +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_; +text: .text%__1cMPhaseChaitinFSplit6MI_I_; +text: .text%__1cMPhaseChaitinHcompact6M_v_; +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_; +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_; +text: .text%__1cQComputeCallStackIdo_short6M_v_; +text: .text%__1cNFingerprinterHdo_long6M_v_; +text: .text%__1cIciMethodRinstructions_size6M_i_; +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__; +text: .text%__1cJimmL0OperJconstantL6kM_x_; +text: .text%__1cWandI_rReg_imm65535NodePoper_input_base6kM_I_; +text: .text%__1cIAndINodeJideal_reg6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cYexternal_word_RelocationJpack_data6M_i_; +text: .text%__1cJimmP0OperFclone6kM_pnIMachOper__; +text: .text%__1cKRelocationYruntime_address_to_index6FpC_l_; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_inJrelocInfoJrelocType_i_v_; +text: .text%__1cYexternal_word_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cRsalL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cLPhaseValues2T5B6M_v_; +text: .text%__1cNstoreImmBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateQ_sub_Op_URShiftL6MpknENode__v_; +text: .text%__1cJNode_ListEyank6MpnENode__v_; +text: .text%__1cNxorI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNxorI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEmovq6MpnMRegisterImpl_l_v_; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%__1cMFastLockNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTCallDynamicJavaNodeEhash6kM_I_; +text: .text%__1cMalloc_object6FpnH_jclass_pnGThread__pnPinstanceOopDesc__: jni.o; +text: .text%__1cRshrL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__; +text: .text%__1cJAssemblerEsubq6MpnMRegisterImpl_i_v_; +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_; +text: .text%__1cLPcDescCacheKpc_desc_at6kMpnHnmethod_pC_pnGPcDesc__; +text: .text%__1cKBlock_ListGinsert6MIpnFBlock__v_; +text: .text%__1cKtype2basic6FpknEType__nJBasicType__; +text: .text%__1cQleaPIdxScaleNodeLbottom_type6kM_pknEType__; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_; +text: .text%__1cIGraphKitOnull_check_oop6MpnKRegionNode_pnENode_i_4_; +text: .text%__1cJloadCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRxorI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cICodeHeapPadd_to_freelist6MpnJHeapBlock__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cJVectorSetGslamin6Mrk0_v_; +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_; +text: .text%__1cScompI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddI_rReg_memNodeErule6kM_I_; +text: .text%__1cYexternal_word_RelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cKRelocationYpd_get_address_from_code6M_pC_; +text: .text%__1cRxorI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_; +text: .text%__1cNandL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOCompilerOracleMshould_print6FnMmethodHandle__i_; +text: .text%__1cNstoreImmBNodeFreloc6kM_i_; +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_; +text: .text%__1cKBufferBlobEfree6Fp0_v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_FPU6MinITosState__v_; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cOGenerateOopMapIppop_any6Mi_v_; +text: .text%__1cKNode_ArrayFclear6M_v_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cJAssemblerFpushq6MpnMRegisterImpl__v_; +text: .text%__1cIRootNodeHis_Root6M_p0_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cRsalL_rReg_immNodeErule6kM_I_; +text: .text%__1cPstoreImmI16NodeHtwo_adr6kM_I_; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cNaddP_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cPsarI_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_; +text: .text%__1cNtestL_regNodeErule6kM_I_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__; +text: .text%__1cNstoreImmINodeHtwo_adr6kM_I_; +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_; +text: .text%__1cNSafePointNodeLpop_monitor6M_v_; +text: .text%__1cRsarI_rReg_immNodeErule6kM_I_; +text: .text%__1cNtestL_regNodePoper_input_base6kM_I_; +text: .text%__1cRsarL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindirectOperNbase_position6kM_i_; +text: .text%__1cMindirectOperNconstant_disp6kM_i_; +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_; +text: .text%__1cKciTypeFlowLStateVectorGdo_new6MpnQciBytecodeStream__v_; +text: .text%__1cHMatcherPprior_fast_lock6FpknENode__i_; +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_; +text: .text%__1cFStateV_sub_Op_MemBarAcquire6MpknENode__v_; +text: .text%__1cNSharedRuntimeQfind_callee_info6FpnKJavaThread_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cFKlassDLCA6Mp0_1_; +text: .text%__1cRtestP_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRtestP_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRtestP_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_; +text: .text%__1cUmembar_cpu_orderNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUmembar_cpu_orderNodeLbottom_type6kM_pknEType__; +text: .text%__1cTcompareAndSwapLNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cTcompareAndSwapLNodeHtwo_adr6kM_I_; +text: .text%__1cJScopeDescGsender6kM_p0_; +text: .text%__1cSindIndexOffsetOperOindex_position6kM_i_; +text: .text%__1cSindIndexOffsetOperNbase_position6kM_i_; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__; +text: .text%__1cNtestU_regNodeHtwo_adr6kM_I_; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cNGrowableArray4Cpv_2t6MpnFArena_iirk0_v_; +text: .text%__1cKstoreFNodePoper_input_base6kM_I_; +text: .text%__1cNGrowableArray4Cl_2t6MpnFArena_iirkl_v_; +text: .text%__1cNstoreImmINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMachEpilogNodeFreloc6kM_i_; +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_regNodeMideal_Opcode6kM_i_; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cFStateN_sub_Op_LoadL6MpknENode__v_; +text: .text%__1cNmodI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSignatureInfoHdo_char6M_v_; +text: .text%__1cNtestU_regNodeMideal_Opcode6kM_i_; +text: .text%__1cFStateQ_sub_Op_CallLeaf6MpknENode__v_; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cJloadLNodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_pnIMachNode__; +text: .text%__1cJloadLNodeFreloc6kM_i_; +text: .text%__1cSCallLeafDirectNodeFreloc6kM_i_; +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_; +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_; +text: .text%__1cNsubL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cJAssemblerEmovq6MpnMRegisterImpl_2_v_; +text: .text%__1cRmulL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cRsubI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cKciTypeFlowFRangeSprivate_copy_count6kMpn0AGJsrSet__i_; +text: .text%__1cOleaPIdxOffNodeJnum_opnds6kM_I_; +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_; +text: .text%__1cNandI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cHOrINodeGadd_id6kM_pknEType__; +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_; +text: .text%__1cOPhaseIdealLoop2t6MrnMPhaseIterGVN_pk0i_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_; +text: .text%__1cRsubI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__; +text: .text%__1cOjmpLoopEndNodeHtwo_adr6kM_I_; +text: .text%__1cJLoadBNodeJideal_reg6kM_I_; +text: .text%__1cNnegI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateS_sub_Op_FastUnlock6MpknENode__v_; +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMVirtualSpaceNreserved_size6kM_L_; +text: .text%__1cScompU_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKStoreFNodeGOpcode6kM_i_; +text: .text%__1cLCastP2LNodeJideal_reg6kM_I_; +text: .text%__1cPcmovI_reg_gNodeErule6kM_I_; +text: .text%__1cFStateP_sub_Op_CastP2L6MpknENode__v_; +text: .text%__1cScompU_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cScompU_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cScompU_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cWCallLeafNoFPDirectNodeRis_safepoint_node6kM_i_; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_pCnJrelocInfoJrelocType__v_; +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_; +text: .text%__1cIimmFOperJconstantF6kM_f_; +text: .text%__1cNcmovI_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_; +text: .text%__1cNcmovI_regNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEmovq6MnHAddress_i_v_; +text: .text%__1cIciObjectJis_method6M_i_; +text: .text%__1cIciObjectOis_method_data6M_i_; +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__; +text: .text%__1cHOrINodeJideal_reg6kM_I_; +text: .text%__1cNcmovI_regNodeMcisc_operand6kM_i_; +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__; +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_; +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cIAndLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIAndLNodeKadd_opcode6kM_i_; +text: .text%__1cFStateO_sub_Op_StoreC6MpknENode__v_; +text: .text%__1cIAndLNodeKmul_opcode6kM_i_; +text: .text%__1cRaddL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMrep_stosNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_acquire_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFParseJdo_ifnull6MnIBoolTestEmask__v_; +text: .text%__1cMtlsLoadPNodeHtwo_adr6kM_I_; +text: .text%__1cIGraphKitOset_pair_local6MipnENode__v_; +text: .text%__1cJLoadCNodeJideal_reg6kM_I_; +text: .text%__1cPcmovI_reg_lNodeMideal_Opcode6kM_i_; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cMrcx_RegIOperEtype6kM_pknEType__; +text: .text%__1cLConvL2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOPhaseIdealLoopKDominators6M_v_; +text: .text%__1cHNTarjanDDFS6Fp0rnJVectorSet_pnOPhaseIdealLoop_pI_i_; +text: .text%__1cHNTarjanIsetdepth6MIpI_v_; +text: .text%__1cIMulLNodeKmul_opcode6kM_i_; +text: .text%__1cIMulLNodeKadd_opcode6kM_i_; +text: .text%jni_SetLongField: jni.o; +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopRinit_dom_lca_tags6M_v_; +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLPcDescCacheLadd_pc_desc6MpnGPcDesc__v_; +text: .text%__1cScheck_phi_clipping6FpnHPhiNode_rpnHConNode_rI45rpnENode_5_i_: cfgnode.o; +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTconvI2L_reg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cRsubI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_; +text: .text%__1cTC2IAdapterGeneratorUgenerate_c2i_adapter6FnMmethodHandle__pnKC2IAdapter__; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cNaddP_rRegNodeErule6kM_I_; +text: .text%__1cRmulL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cICodeBlobOis_java_method6kM_i_; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cTjava_lang_ThrowableQclear_stacktrace6FpnHoopDesc__v_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cSInterpreterRuntimePset_bcp_and_mdp6FpCpnKJavaThread__v_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cNGrowableArray4nMmethodHandle__Icontains6kMrkn0A__i_; +text: .text%__1cLOpaque2NodeEhash6kM_I_; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cFParseGdo_new6M_v_; +text: .text%__1cFParseFBlockMadd_new_path6M_i_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cJimmI0OperJnum_edges6kM_I_; +text: .text%__1cRmulI_rReg_immNodeMcisc_operand6kM_i_; +text: .text%__1cICodeHeapMmax_capacity6kM_L_; +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_; +text: .text%__1cRindIndexScaleOperFscale6kM_i_; +text: .text%__1cNxorI_rRegNodeErule6kM_I_; +text: .text%__1cFParseFBlockNstack_type_at6kMi_pknEType__; +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFTypeFEmake6Ff_pk0_; +text: .text%__1cIModINodeLbottom_type6kM_pknEType__; +text: .text%__1cJcmpOpOperHgreater6kM_i_; +text: .text%__1cQComputeCallStackHdo_bool6M_v_; +text: .text%__1cJMemRegionMintersection6kM0_0_; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cRmulI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__; +text: .text%__1cIConDNodeGOpcode6kM_i_; +text: .text%__1cNandI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cLRethrowNodeEhash6kM_I_; +text: .text%__1cTC2IAdapterGeneratorSstd_verified_entry6FnMmethodHandle__pC_; +text: .text%__1cIDivLNodeGOpcode6kM_i_; +text: .text%__1cNandI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cGThreadOis_Java_thread6kM_i_; +text: .text%__1cSmembar_releaseNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMatcherQinline_cache_reg6F_i_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeJnum_opnds6kM_I_; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_; +text: .text%jni_NewLocalRef: jni.o; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_; +text: .text%__1cLOptoRuntimebAresolve_opt_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cPstoreImmI16NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIemit_d166FrnKCodeBuffer_i_v_; +text: .text%__1cKimmI16OperIconstant6kM_l_; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__; +text: .text%__1cJCodeCacheNalive_nmethod6FpnICodeBlob__pnHnmethod__; +text: .text%__1cJAssemblerGmovzbl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_; +text: .text%__1cPcmovI_reg_lNodeJnum_opnds6kM_I_; +text: .text%__1cMloadConLNodeHsize_of6kM_I_; +text: .text%__1cOMacroAssemblerSload_unsigned_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cTconvI2L_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNaddL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKstoreFNodeJnum_opnds6kM_I_; +text: .text%__1cNaddL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSComputeAdapterInfoJdo_double6M_v_; +text: .text%__1cLimmUL32OperFclone6kM_pnIMachOper__; +text: .text%__1cPloadConUL32NodeFclone6kM_pnENode__; +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_; +text: .text%__1cMtlsLoadPNodePoper_input_base6kM_I_; +text: .text%__1cPlocal_vsnprintf6FpcLpkcpnR__va_list_element__i_; +text: .text%__1cSComputeAdapterInfoHdo_bool6M_v_; +text: .text%jio_vsnprintf; +text: .text%__1cURethrowExceptionNodeGpinned6kM_i_; +text: .text%__1cNstoreImmINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAndLNodeJideal_reg6kM_I_; +text: .text%__1cURethrowExceptionNodeHtwo_adr6kM_I_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%jio_snprintf; +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeErule6kM_I_; +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cTcompareAndSwapLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetCPMethodModifiers; +text: .text%__1cFStateR_sub_Op_SafePoint6MpknENode__v_; +text: .text%__1cSsafePoint_pollNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cJloadCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQorI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cPsarI_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__; +text: .text%__1cKReturnNode2t6MpnENode_2222_v_; +text: .text%__1cKReturnNodeJideal_reg6kM_I_; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%JVM_DoPrivileged; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cNIdealLoopTreeMis_loop_exit6kMpnENode_pnOPhaseIdealLoop__2_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsafePoint_pollNodeFreloc6kM_i_; +text: .text%__1cLStrCompNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cKloadUBNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeGOpcode6kM_i_; +text: .text%__1cOLibraryCallKitNtry_to_inline6M_i_; +text: .text%__1cNFingerprinterHdo_bool6M_v_; +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_; +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_; +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cHnmethodOexception_size6kM_i_; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethodQscopes_data_size6kM_i_; +text: .text%__1cHnmethodJstub_size6kM_i_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cNtestU_regNodeErule6kM_I_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cICodeBlobWfix_relocation_at_move6Ml_v_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cMrdx_RegLOperEtype6kM_pknEType__; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cLPhaseValues2t6Mp0_v_; +text: .text%__1cINodeHash2t6Mp0_v_; +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_; +text: .text%__1cJAssemblerDjmp6MnHAddress__v_; +text: .text%__1cOJNIHandleBlockRrebuild_free_list6M_v_; +text: .text%__1cSstring_compareNodeZcheck_for_anti_dependence6kM_i_; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cIDivINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cICmpDNodeGOpcode6kM_i_; +text: .text%__1cPcmovI_reg_gNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_; +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_; +text: .text%__1cNGrowableArray4CpnNCallGenerator__2t6Mii_v_; +text: .text%__1cETypeKInitialize6FpnHCompile__v_; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_; +text: .text%__1cHCompileEInit6Mi_v_; +text: .text%__1cVExceptionHandlerTable2t6Mi_v_; +text: .text%__1cFDictIFreset6MpknEDict__v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%JVM_FindLoadedClass; +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_; +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_; +text: .text%__1cIPhaseCFGDDFS6MpnGTarjan__I_; +text: .text%__1cFArenaNmove_contents6Mp0_1_; +text: .text%__1cIPhaseIFG2t6MpnFArena__v_; +text: .text%__1cHMatcherUvalidate_null_checks6M_v_; +text: .text%__1cHCompileOcompute_old_SP6M_i_; +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_; +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_; +text: .text%__1cJStartNodeJideal_reg6kM_I_; +text: .text%__1cHMatcher2t6MrnJNode_List__v_; +text: .text%__1cFArena2t6ML_v_; +text: .text%__1cIPhaseCFGOschedule_early6MrnJVectorSet_rnJNode_List_rnLBlock_Array__i_; +text: .text%__1cWNode_Backward_Iterator2t6MpnENode_rnJVectorSet_rnJNode_List_rnLBlock_Array__v_; +text: .text%__1cHMatcherFmatch6M_v_; +text: .text%__1cFStateM_sub_Op_Goto6MpknENode__v_; +text: .text%__1cIPhaseCFGNschedule_late6MrnJVectorSet_rnJNode_List_rnNGrowableArray4CI___v_; +text: .text%__1cIPhaseCFGQFind_Inner_Loops6M_v_; +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_; +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_; +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_; +text: .text%__1cHCompileICode_Gen6M_v_; +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_; +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_; +text: .text%__1cMPhaseChaitinGde_ssa6M_v_; +text: .text%__1cMPhaseChaitinbGstretch_base_pointer_live_ranges6MpnMResourceArea__i_; +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_; +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_; +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_; +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_; +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_; +text: .text%__1cGTarjanIsetdepth6MI_v_; +text: .text%__1cIPhaseCFGKDominators6M_v_; +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_; +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_; +text: .text%__1cHCompileTframe_size_in_words6kM_i_; +text: .text%__1cOCompileWrapper2T6M_v_; +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_; +text: .text%__1cHCompileYinit_scratch_locs_memory6M_v_; +text: .text%__1cNPhasePeephole2t6MpnNPhaseRegAlloc_rnIPhaseCFG__v_; +text: .text%__1cJPhaseLive2T6M_v_; +text: .text%__1cNPhasePeephole2T6M_v_; +text: .text%__1cHCompileGOutput6M_v_; +text: .text%__1cHCompileQShorten_branches6MpnFLabel_ri333_v_; +text: .text%__1cHCompileLFill_buffer6M_v_; +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_; +text: .text%__1cHCompileRScheduleAndBundle6M_v_; +text: .text%__1cOMachPrologNodeFreloc6kM_i_; +text: .text%__1cNtestU_regNodePoper_input_base6kM_I_; +text: .text%__1cWemit_exception_handler6FrnKCodeBuffer__v_; +text: .text%__1cWsize_exception_handler6F_I_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cNPhasePeepholeMdo_transform6M_v_; +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_; +text: .text%__1cMPhaseChaitin2T6M_v_; +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_; +text: .text%__1cKCodeBufferOrelocate_stubs6M_v_; +text: .text%__1cIPhaseCFGLRemoveEmpty6M_v_; +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o; +text: .text%__1cHCompileMBuildOopMaps6M_v_; +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_; +text: .text%__1cNGrowableArray4CpnJNode_List__2t6Mii_v_; +text: .text%__1cRsarL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cFStateM_sub_Op_CmpL6MpknENode__v_; +text: .text%__1cJloadSNodeFreloc6kM_i_; +text: .text%__1cFStateN_sub_Op_LoadS6MpknENode__v_; +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_; +text: .text%__1cOCompiledRFrame2t6MnFframe_pnKJavaThread_pnGRFrame__v_; +text: .text%__1cKC2IAdapterOis_c2i_adapter6kM_i_; +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__; +text: .text%__1cOCompiledRFrameLis_compiled6kM_i_; +text: .text%__1cRmethodDataOopDescKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_bytes6FpnNmethodOopDesc__i_; +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cNxorI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_words6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescPpost_initialize6MpnOBytecodeStream__v_; +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateO_sub_Op_Return6MpknENode__v_; +text: .text%__1cHRetNodeFreloc6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_base6MnITosState_ppCi_v_; +text: .text%__1cZCallInterpreterDirectNodeHtwo_adr6kM_I_; +text: .text%__1cNloadConP0NodeFclone6kM_pnENode__; +text: .text%__1cOClearArrayNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_; +text: .text%__1cOcompiledVFrameScreate_stack_value6kMpnKScopeValue__pnKStackValue__; +text: .text%__1cQleaPIdxScaleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRindIndexScaleOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cIGraphKitNallocate_heap6MpnENode_222pknITypeFunc_pC22ipknKTypeOopPtr__2_; +text: .text%__1cPciInstanceKlassbBcompute_shared_has_subklass6M_i_; +text: .text%__1cNSignatureInfoHdo_byte6M_v_; +text: .text%__1cQorI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cNIdealLoopTreePiteration_split6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNandI_rRegNodeErule6kM_I_; +text: .text%__1cRsarI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cIMulINodeGadd_id6kM_pknEType__; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmodI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_; +text: .text%__1cHBitDataKis_BitData6M_i_; +text: .text%__1cQsalI_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cNaddP_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEcmpq6MnHAddress_i_v_; +text: .text%__1cNloadConP0NodeFreloc6kM_i_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__; +text: .text%__1cOMacroAssemblerKincrementq6MpnMRegisterImpl_i_v_; +text: .text%__1cRsarI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cNGrowableArray4nMmethodHandle__2t6Mii_v_; +text: .text%__1cLConvL2INodeJideal_reg6kM_I_; +text: .text%__1cNGrowableArray4nLKlassHandle__2t6Mii_v_; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%JVM_GetClassNameUTF; +text: .text%__1cMPrefetchNodeJideal_reg6kM_I_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cNprefetchwNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateQ_sub_Op_Prefetch6MpknENode__v_; +text: .text%__1cOjmpLoopEndNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNprefetchwNodeFreloc6kM_i_; +text: .text%__1cIAddLNodeJideal_reg6kM_I_; +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cKstoreCNodeFreloc6kM_i_; +text: .text%__1cNdecI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Ml_v_; +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_; +text: .text%__1cHi2bNodeMideal_Opcode6kM_i_; +text: .text%__1cNLocationValueLis_location6kM_i_; +text: .text%__1cNLocationValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cIMulLNodeJideal_reg6kM_I_; +text: .text%__1cNsubL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cFStateN_sub_Op_LoadB6MpknENode__v_; +text: .text%__1cNnegI_rRegNodeErule6kM_I_; +text: .text%__1cNFingerprinterJdo_double6M_v_; +text: .text%JVM_FindClassFromClass; +text: .text%__1cKcmpOpUOperEless6kM_i_; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cITypeLongFwiden6kMpknEType__3_; +text: .text%__1cQsalI_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cKReflectionDbox6FpnGjvalue_nJBasicType_pnGThread__pnHoopDesc__; +text: .text%__1cMLinkResolverbHlinktime_resolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cLBoxLockNodeEhash6kM_I_; +text: .text%__1cJOopMapSetMgrow_om_data6M_v_; +text: .text%__1cRxorI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cKciTypeFlowFBlockQset_private_copy6Mi_v_; +text: .text%__1cWandI_rReg_imm65535NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWandI_rReg_imm65535NodeErule6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_; +text: .text%__1cNcmovI_regNodeErule6kM_I_; +text: .text%__1cRsalL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNGrowableArray4CpnKInlineTree__Egrow6Mi_v_; +text: .text%__1cSComputeAdapterInfoIdo_short6M_v_; +text: .text%__1cNtestL_regNodeJnum_opnds6kM_I_; +text: .text%__1cLConvF2DNodeGOpcode6kM_i_; +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__; +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNaddP_rRegNodeLbottom_type6kM_pknEType__; +text: .text%__1cNmodL_rRegNodeErule6kM_I_; +text: .text%__1cRsalI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerDret6Mi_v_; +text: .text%__1cRshrI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_; +text: .text%JVM_IHashCode; +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cNxorI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLMachUEPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__; +text: .text%__1cNtestL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadFNodePoper_input_base6kM_I_; +text: .text%__1cHRetDataKcell_count6M_i_; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_; +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_; +text: .text%__1cMdecI_memNodePoper_input_base6kM_I_; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIPSOldGenHcompact6M_v_; +text: .text%__1cSsafePoint_pollNodeJnum_opnds6kM_I_; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cQObjectStartArrayFreset6M_v_; +text: .text%__1cIPSOldGenPadjust_pointers6M_v_; +text: .text%__1cScompP_mem_rRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJloadBNodeFreloc6kM_i_; +text: .text%__1cUandI_rReg_imm255NodeMideal_Opcode6kM_i_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowFBlock__Icontains6kMrk2_i_; +text: .text%__1cScompP_mem_rRegNodeFreloc6kM_i_; +text: .text%__1cNcmovP_regNodePoper_input_base6kM_I_; +text: .text%__1cTno_rax_rdx_RegIOperJnum_edges6kM_I_; +text: .text%__1cKciTypeFlowLStateVectorJdo_aaload6MpnQciBytecodeStream__v_; +text: .text%__1cJAssemblerMemit_operand6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnRFloatRegisterImpl_pnMRegisterImpl_4nHAddressLScaleFactor_ipCrknQRelocationHolder__v_; +text: .text%__1cNaddL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRethrowNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRaddI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsubI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cIModLNodeGOpcode6kM_i_; +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__; +text: .text%__1cFParseMdo_checkcast6M_v_; +text: .text%__1cIMulINodeGmul_id6kM_pknEType__; +text: .text%__1cMloadConINodeGis_Con6kM_I_; +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIMulDNodeGOpcode6kM_i_; +text: .text%__1cRsarL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNsubL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_; +text: .text%__1cTconvI2L_reg_memNodeFreloc6kM_i_; +text: .text%__1cSComputeAdapterInfoIdo_float6M_v_; +text: .text%__1cFParseLarray_store6MnJBasicType__v_; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc_ii_v_: nativeLookup.o; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%__1cZCallInterpreterDirectNodeSalignment_required6kM_i_; +text: .text%__1cZCallInterpreterDirectNodePoper_input_base6kM_I_; +text: .text%__1cZCallInterpreterDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRmulL_rReg_immNodeMcisc_operand6kM_i_; +text: .text%__1cNloadConI0NodeGis_Con6kM_I_; +text: .text%__1cKstoreBNodeHtwo_adr6kM_I_; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc__v_: nativeLookup.o; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_; +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o; +text: .text%__1cPsalI_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvI2L_reg_memNodeHtwo_adr6kM_I_; +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__; +text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_; +text: .text%__1cFStateM_sub_Op_AddL6MpknENode__v_; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cUmembar_cpu_orderNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUmembar_cpu_orderNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSCompareAndSwapNodeJideal_reg6kM_I_; +text: .text%__1cFStateW_sub_Op_MemBarCPUOrder6MpknENode__v_; +text: .text%__1cKciTypeFlowLStateVectorMdo_checkcast6MpnQciBytecodeStream__v_; +text: .text%__1cMorI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMrax_RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cNmodL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cMincI_memNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPshrI_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cRmulI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cNandI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbACallCompiledJavaDirectNodeHtwo_adr6kM_I_; +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cbLtransform_int_divide_to_long_multiply6FpnIPhaseGVN_pnENode_i_3_: divnode.o; +text: .text%__1cTno_rax_rdx_RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJAssemblerGmovzwl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cRmulL_rReg_immNodeErule6kM_I_; +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_; +text: .text%__1cHTypePtrFempty6kM_i_; +text: .text%__1cOMacroAssemblerSload_unsigned_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cOGenerateOopMapXdo_return_monitor_check6M_v_; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_; +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__; +text: .text%__1cIGraphKitMnext_monitor6M_i_; +text: .text%__1cLBoxLockNode2t6Mi_v_; +text: .text%__1cRmulI_rReg_immNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cJloadFNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIplus_adr6FpnENode_l_1_: generateOptoStub.o; +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__; +text: .text%__1cHConNode2t6MpknEType__v_; +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_; +text: .text%__1cNCompileBrokerTcreate_compile_task6FpnMCompileQdDueue_inMmethodHandle_i3ipkcii_pnLCompileTask__; +text: .text%__1cLCompileTaskKinitialize6MinMmethodHandle_i1ipkcii_v_; +text: .text%__1cNCompileBrokerNallocate_task6F_pnLCompileTask__; +text: .text%__1cMCompileQdDueueDadd6MpnLCompileTask__v_; +text: .text%__1cRxorI_rReg_memNodeErule6kM_I_; +text: .text%__1cMCompileQdDueueDget6M_pnLCompileTask__; +text: .text%__1cRsarI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQleaPIdxScaleNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cSCompileTaskWrapper2t6MpnLCompileTask__v_; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_MulL6MpknENode__v_; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_i_v_; +text: .text%__1cObox_handleNodePoper_input_base6kM_I_; +text: .text%__1cNCompileBrokerJfree_task6FpnLCompileTask__v_; +text: .text%__1cSCompileTaskWrapper2T6M_v_; +text: .text%__1cLCompileTaskEfree6M_v_; +text: .text%__1cNnegI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cMincI_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRandL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cRaddI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%jni_NewString: jni.o; +text: .text%__1cRxorI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_; +text: .text%__1cKloadUBNodePoper_input_base6kM_I_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassXoop_is_compiledICHolder6kM_i_; +text: .text%__1cJStoreNodeUdepends_only_on_test6kM_i_; +text: .text%__1cPcmovI_reg_lNodeErule6kM_I_; +text: .text%__1cOloadConL32NodePoper_input_base6kM_I_; +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_; +text: .text%__1cRtestI_reg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIimmDOperJconstantD6kM_d_; +text: .text%__1cFParsePmerge_exception6Mi_v_; +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNGrowableArray4CpnIciObject__2t6MpnFArena_iirk1_v_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2pnGThread__v_; +text: .text%__1cZCallDynamicJavaDirectNodeHtwo_adr6kM_I_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverWresolve_interface_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cNGrowableArray4CpnIciObject__JappendAll6Mpk2_v_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cRtestP_reg_memNodeFreloc6kM_i_; +text: .text%__1cNtestP_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNGrowableArray4CpnIciMethod__2t6MpnFArena_iirk1_v_; +text: .text%__1cNGrowableArray4CpnHciKlass__2t6MpnFArena_iirk1_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cNGrowableArray4CpnPciReturnAddress__2t6MpnFArena_iirk1_v_; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cNCompileBrokerVpush_jni_handle_block6F_v_; +text: .text%__1cIJVMStateNmonitor_depth6kM_i_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNCompileBrokerUpop_jni_handle_block6F_v_; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateQ_sub_Op_FastLock6MpknENode__v_; +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvD2I_reg_regNodeErule6kM_I_; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cLOptoRuntimeRmultianewarray1_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cWImplicitExceptionTableCat6kMI_I_; +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_; +text: .text%__1cLVtableStubsPstub_containing6FpC_pnKVtableStub__; +text: .text%__1cLVtableStubsIcontains6FpC_i_; +text: .text%__1cNFingerprinterIdo_float6M_v_; +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_; +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUjmpLoopEnd_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNmodI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cUjmpLoopEnd_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cKEntryPoint2t6MpC11111111_v_; +text: .text%jni_GetObjectClass: jni.o; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cRandI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_; +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cOloadConL32NodeHtwo_adr6kM_I_; +text: .text%__1cQshrI_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_; +text: .text%__1cNcmovI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdecI_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMrax_RegLOperEtype6kM_pknEType__; +text: .text%__1cRmulI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIXorINodeGadd_id6kM_pknEType__; +text: .text%__1cNtestP_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmovI_reg_gNodeHtwo_adr6kM_I_; +text: .text%__1cOPhaseIdealLoopKclone_loop6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cHi2bNodePoper_input_base6kM_I_; +text: .text%__1cRsalL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cKBinaryNodeGOpcode6kM_i_; +text: .text%__1cNxorI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_; +text: .text%JVM_GetClassLoader; +text: .text%__1cMstoreSSPNodeMideal_Opcode6kM_i_; +text: .text%__1cNmulL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRxorI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cINodeHashIround_up6FI_I_; +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_; +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_; +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_; +text: .text%__1cRaddP_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cINodeHash2t6MpnFArena_I_v_; +text: .text%__1cRaddI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cIJVMState2n6FL_pv_; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cOleaPIdxOffNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cMdecI_memNodeJnum_opnds6kM_I_; +text: .text%__1cIModINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_; +text: .text%__1cSInterpreterCodeletKinitialize6MpkcnJBytecodesECode__v_; +text: .text%__1cTconvI2L_reg_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNxorI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNaddP_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cOloadConL32NodeErule6kM_I_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cFframeVnmethods_code_blob_do6M_v_; +text: .text%__1cHi2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKcmpOpUOperKless_equal6kM_i_; +text: .text%__1cWandI_rReg_imm65535NodeJnum_opnds6kM_I_; +text: .text%__1cFParseTprofile_switch_case6Mi_v_; +text: .text%__1cKNativeJumpbEcheck_verified_entry_alignment6FpC1_v_; +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_; +text: .text%__1cFParseOmerge_new_path6Mi_v_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_; +text: .text%__1cNandI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cNmodL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cMloadConLNodeFclone6kM_pnENode__; +text: .text%__1cNtestU_regNodeJnum_opnds6kM_I_; +text: .text%__1cIimmLOperFclone6kM_pnIMachOper__; +text: .text%__1cRandL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cOleaPIdxOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cNCompileBrokerTis_not_compile_only6FnMmethodHandle__i_; +text: .text%__1cNCompileBrokerRassign_compile_id6FnMmethodHandle_i_I_; +text: .text%__1cNCompileBrokerTis_compile_blocking6FnMmethodHandle_i_i_; +text: .text%__1cIMulFNodeGOpcode6kM_i_; +text: .text%__1cNIdealLoopTreeQpolicy_peel_only6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeSpolicy_range_check6kMpnOPhaseIdealLoop__i_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cNIdealLoopTreeMpolicy_align6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeNpolicy_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNtestU_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cKC2CompilerOneeds_adapters6M_i_; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cIciMethodVshould_print_assembly6M_i_; +text: .text%__1cOMacroAssemblerOcall_VM_helper6MpnMRegisterImpl_pCii_v_; +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cMincI_memNodeJnum_opnds6kM_I_; +text: .text%__1cNCompileBrokerOcheck_break_at6FnMmethodHandle_iii_i_; +text: .text%__1cJAssemblerEcall6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cNCompileBrokerbAeager_compile_c2i_adapters6FpnFciEnv_pnIciMethod__v_; +text: .text%__1cNCompileBrokerbAeager_compile_i2c_adapters6FpnFciEnv_pnIciMethod__v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cMstoreSSPNodeHis_Copy6kM_I_; +text: .text%__1cQshrI_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cIciMethodQbreak_at_execute6M_i_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cFciEnvbUsystem_dictionary_modification_counter_changed6M_i_; +text: .text%__1cMelapsedTimerDadd6M0_v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cJStartNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cICodeHeapMinsert_after6MpnJFreeBlock_2_v_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%__1cKloadUBNodeErule6kM_I_; +text: .text%__1cQsalL_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cbACallCompiledJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbACallCompiledJavaDirectNodePoper_input_base6kM_I_; +text: .text%__1cTbasictype2arraycopy6FnJBasicType_i_pC_; +text: .text%__1cOLibraryCallKitQinline_arraycopy6M_i_; +text: .text%__1cPstoreImmI16NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLOptoRuntimeOarraycopy_Type6F_pknITypeFunc__; +text: .text%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cLPcDescCache2t6M_v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cRmulL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnQAbstractCompiler__p0_; +text: .text%__1cPcmovI_reg_lNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmFlagsFclear6M_v_; +text: .text%__1cHnmethod2n6FLi_pv_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnQAbstractCompiler__v_; +text: .text%__1cNaddI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cFStateN_sub_Op_LoadC6MpknENode__v_; +text: .text%__1cJloadCNodeFreloc6kM_i_; +text: .text%__1cFParseQjump_if_fork_int6MpnENode_2nIBoolTestEmask__pnGIfNode__; +text: .text%__1cWandI_rReg_imm65535NodeHtwo_adr6kM_I_; +text: .text%__1cNdivL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cINodeHashUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod_ii_v_; +text: .text%__1cHCompileWprint_compile_messages6M_v_; +text: .text%__1cPClassFileParserbGparse_constant_pool_double_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cQsalI_rReg_CLNodeErule6kM_I_; +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__; +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cIciMethodRbuild_method_data6M_v_; +text: .text%__1cHCompileIOptimize6M_v_; +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_; +text: .text%__1cHCompileLFinish_Warm6M_v_; +text: .text%__1cHCompileLInline_Warm6M_i_; +text: .text%__1cPno_rax_RegLOperJnum_edges6kM_I_; +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1_v_; +text: .text%__1cIciMethodRbuild_method_data6MnMmethodHandle__v_; +text: .text%__1cSstring_compareNodeErule6kM_I_; +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cHCompileVfinal_graph_reshaping6M_i_; +text: .text%__1cOcompI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cScompI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_; +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cMPhaseIterGVN2t6Mp0_v_; +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_; +text: .text%__1cIPhaseCCP2T6M_v_; +text: .text%__1cIPhaseCCPHanalyze6M_v_; +text: .text%__1cIPhaseCCPMdo_transform6M_v_; +text: .text%__1cOcompI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNsubL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConPcNodeMideal_Opcode6kM_i_; +text: .text%__1cKExceptionsG_throw6FpnGThread_pkcinGHandle__v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinGHandle__i_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cNandL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cQsalI_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__; +text: .text%__1cPsalL_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinMsymbolHandle_4_i_; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHMatcherbDinterpreter_frame_pointer_reg6F_i_; +text: .text%__1cQorI_rReg_immNodeErule6kM_I_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cKcmpOpUOperHgreater6kM_i_; +text: .text%__1cNCompileBrokerUcheck_adapter_result6FnMmethodHandle_ippnMBasicAdapter__i_; +text: .text%__1cJloadFNodeJnum_opnds6kM_I_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cSMachC2IEntriesNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cbFunnecessary_membar_volatileNodePoper_input_base6kM_I_; +text: .text%__1cRmulI_rReg_immNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQPSIsAliveClosureLdo_object_b6MpnHoopDesc__i_; +text: .text%__1cTCallInterpreterNodeSis_CallInterpreter6kM_pk0_; +text: .text%__1cZCallInterpreterDirectNodePcompute_padding6kMi_i_; +text: .text%__1cSMachC2IcheckICNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cZCallInterpreterDirectNodeKmethod_set6Ml_v_; +text: .text%__1cXMachCallInterpreterNodePret_addr_offset6M_i_; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cLOptoRuntimeInew_Type6F_pknITypeFunc__; +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_; +text: .text%__1cMTailCallNodeGOpcode6kM_i_; +text: .text%__1cJChunkPoolMfree_all_but6ML_v_; +text: .text%__1cIGraphKitMnew_instance6MpnPciInstanceKlass__pnENode__; +text: .text%__1cPcmpD_cc_regNodePoper_input_base6kM_I_; +text: .text%__1cRsalL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cLOptoRuntimeSnew_typeArray_Type6F_pknITypeFunc__; +text: .text%__1cObox_handleNodeMideal_Opcode6kM_i_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cIGraphKitJnew_array6MpnENode_nJBasicType_pknEType_pknMTypeKlassPtr__2_; +text: .text%__1cNdecL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOemit_d64_reloc6FrnKCodeBuffer_lrknQRelocationHolder_i_v_; +text: .text%__1cRtestI_reg_immNodeErule6kM_I_; +text: .text%__1cIAddFNodeGOpcode6kM_i_; +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cKloadUBNodeJnum_opnds6kM_I_; +text: .text%__1cNGrowableArray4CpnHoopDesc__2t6Mii_v_; +text: .text%__1cZCallDynamicJavaDirectNodeSalignment_required6kM_i_; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cQorI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUParallelScavengeHeapIcapacity6kM_L_; +text: .text%__1cJAssemblerEcmpq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNnegI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cPDictionaryEntryVadd_protection_domain6MpnHoopDesc__v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cRCardTableModRefBSPclear_MemRegion6MnJMemRegion__v_; +text: .text%__1cOleaPIdxOffNodeLbottom_type6kM_pknEType__; +text: .text%__1cNdivL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%JVM_IsInterrupted; +text: .text%__1cFParseRarray_store_check6M_v_; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cScompL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_; +text: .text%__1cTCallDynamicJavaNodeSis_CallDynamicJava6kM_pk0_; +text: .text%__1cCosHSolarisFEventEpark6M_v_; +text: .text%__1cIMinINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cODeoptimizationYtrap_state_is_recompiled6Fi_i_; +text: .text%__1cXSignatureHandlerLibraryKinitialize6F_v_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cNGrowableArray4CL_Efind6kMrkL_i_; +text: .text%__1cUandI_rReg_imm255NodePoper_input_base6kM_I_; +text: .text%__1cSReferenceProcessorZadd_to_discovered_list_mt6MppnHoopDesc_23_v_; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii_v_; +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cNObjectMonitorbAEntryQdDueue_SelectSuccessor6M_pnMObjectWaiter__; +text: .text%__1cNObjectMonitorREntryQdDueue_insert6MpnMObjectWaiter_i_v_; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cJAssemblerEpopq6MpnMRegisterImpl__v_; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cISubLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMTypeKlassPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cNloadConI0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSComputeAdapterInfoHdo_char6M_v_; +text: .text%__1cKPerfMemoryFalloc6FL_pc_; +text: .text%__1cIPerfData2T6M_v_; +text: .text%__1cIPerfDataMcreate_entry6MnJBasicType_LL_v_; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cPcmovI_reg_gNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEfrom6F_pnMRegisterImpl__; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_; +text: .text%__1cOjmpLoopEndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOjmpLoopEndNodeOis_pc_relative6kM_i_; +text: .text%__1cOjmpLoopEndNodeTmay_be_short_branch6kM_i_; +text: .text%jni_ReleaseStringUTFChars: jni.o; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cFStateW_sub_Op_CountedLoopEnd6MpknENode__v_; +text: .text%__1cNFingerprinterIdo_short6M_v_; +text: .text%__1cOcompU_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cJAssemblerEincq6MpnMRegisterImpl__v_; +text: .text%__1cFTypeDEmake6Fd_pk0_; +text: .text%__1cScompU_rReg_memNodeFreloc6kM_i_; +text: .text%__1cNtestL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_; +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cWCallLeafNoFPDirectNodeFreloc6kM_i_; +text: .text%__1cSstring_compareNodeJnum_opnds6kM_I_; +text: .text%__1cFStateU_sub_Op_CallLeafNoFP6MpknENode__v_; +text: .text%JVM_FindLibraryEntry; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cISubLNodeGadd_id6kM_pknEType__; +text: .text%__1cNmodI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMatcherOc_return_value6Fii_nLOptoRegPair__; +text: .text%__1cRxorI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQsarL_rReg_63NodeMideal_Opcode6kM_i_; +text: .text%__1cRmulI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cIMachOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cNandI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNnegI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cFStateS_sub_Op_ClearArray6MpknENode__v_; +text: .text%__1cRaddL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cHCompile2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cIXorINodeJideal_reg6kM_I_; +text: .text%__1cMrep_stosNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRtestI_reg_immNodePoper_input_base6kM_I_; +text: .text%__1cKC2CompilerPcompile_adapter6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cMrep_stosNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMAdapterCacheGinsert6MpnLAdapterInfo_pnMBasicAdapter__v_; +text: .text%__1cFStateO_sub_Op_StoreL6MpknENode__v_; +text: .text%__1cLAdapterInfoHcopy_to6Mp0_v_; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__; +text: .text%__1cIMinINodeGadd_id6kM_pknEType__; +text: .text%__1cNdecL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGThreadLnmethods_do6M_v_; +text: .text%__1cKmul_hiNodeMideal_Opcode6kM_i_; +text: .text%__1cKstoreLNodeFreloc6kM_i_; +text: .text%__1cMstoreSSPNodeLbottom_type6kM_pknEType__; +text: .text%__1cRsubI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cPsarL_rReg_2NodeMideal_Opcode6kM_i_; +text: .text%__1cTconvF2D_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRmulL_rReg_immNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNmodL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cRmulL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeHtwo_adr6kM_I_; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cScompU_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRInterpreterOopMapIis_empty6M_i_; +text: .text%__1cNFingerprinterHdo_char6M_v_; +text: .text%__1cOrepush_if_args6FpnFParse_pnENode_3_v_: parse2.o; +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__; +text: .text%__1cNGrowableArray4CpnHoopDesc__Uclear_and_deallocate6M_v_; +text: .text%__1cMrdx_RegLOperJnum_edges6kM_I_; +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__; +text: .text%__1cNGrowableArray4CpnJNode_List__Egrow6Mi_v_; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure__i_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cNGrowableArray4CpnFKlass__2t6Mii_v_; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure__i_; +text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_; +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MLnHGCCauseFCause__v_; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cNGrowableArray4CpnFKlass__Uclear_and_deallocate6M_v_; +text: .text%__1cKPSYoungGenHcompact6M_v_; +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_; +text: .text%__1cKPSYoungGenKprecompact6M_v_; +text: .text%__1cLPSMarkSweepQinvoke_no_policy6Fpii_v_; +text: .text%__1cLPSMarkSweepPallocate_stacks6F_v_; +text: .text%__1cLPSMarkSweepRdeallocate_stacks6F_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase16Fi_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase26F_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase36F_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase46F_v_; +text: .text%__1cLPSMarkSweepbAreset_millis_since_last_gc6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_; +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cIPSOldGenKprecompact6M_v_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cRCardTableModRefBSEis_a6MnKBarrierSetEName__i_; +text: .text%__1cJPSPermGenQcompute_new_size6ML_v_; +text: .text%__1cJPSPermGenKprecompact6M_v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cPcmpD_cc_regNodeHtwo_adr6kM_I_; +text: .text%__1cbFunnecessary_membar_volatileNodeHtwo_adr6kM_I_; +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIciSymbolHas_utf86M_pkc_; +text: .text%__1cQorI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cKJavaThreadLnmethods_do6M_v_; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_; +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cScompL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cNandI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cPstoreImmI16NodeFreloc6kM_i_; +text: .text%__1cPstoreImmI16NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cVlookup_special_native6Fpc_pC_: nativeLookup.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cMNativeLookupMlookup_style6FnMmethodHandle_pcpkciiripnGThread__pC_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%jni_GetStringCritical: jni.o; +text: .text%__1cSInterpreterRuntimeTnmethod_entry_point6FpnKJavaThread_pnNmethodOopDesc_pnHnmethod__pC_; +text: .text%__1cIPSOldGenOgen_size_limit6M_L_; +text: .text%__1cTI2CAdapterGeneratorSstd_verified_entry6FnMmethodHandle__pC_; +text: .text%__1cTI2CAdapterGeneratorUgenerate_i2c_adapter6FnMmethodHandle__pnKI2CAdapter__; +text: .text%__1cUPSAdaptiveSizePolicybQpromo_increment_with_supplement_aligned_up6ML_L_; +text: .text%__1cHnmethodXinterpreter_entry_point6M_pC_; +text: .text%__1cUParallelScavengeHeapOresize_old_gen6ML_v_; +text: .text%__1cUPSAdaptiveSizePolicyPpromo_increment6MLI_L_; +text: .text%__1cWandI_rReg_imm65535NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIPSOldGenGresize6ML_v_; +text: .text%__1cKConv2BNodeGOpcode6kM_i_; +text: .text%__1cObox_handleNodeHtwo_adr6kM_I_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cNstoreImmINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cLConvI2FNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNcmovI_regNodeHtwo_adr6kM_I_; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cIGraphKitbKcombine_and_pop_all_exception_states6M_pnNSafePointNode__; +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_; +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cKReflectionTget_exception_types6FnMmethodHandle_pnGThread__nOobjArrayHandle__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_i_v_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cFStateP_sub_Op_Rethrow6MpknENode__v_; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_; +text: .text%__1cQsalL_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cNdecL_rRegNodeErule6kM_I_; +text: .text%__1cLRethrowNodeJideal_reg6kM_I_; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNstoreImmINodeFreloc6kM_i_; +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMNativeLookupNpure_jni_name6FnMmethodHandle__pc_; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cURethrowExceptionNodeFreloc6kM_i_; +text: .text%__1cTCompareAndSwapLNode2t6MpnENode_2222_v_; +text: .text%__1cQshrI_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cSCompareAndSwapNode2t6MpnENode_2222_v_; +text: .text%__1cTcompareAndSwapLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTcompareAndSwapLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOLibraryCallKitRinline_unsafe_CAS6MnJBasicType__i_; +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateX_sub_Op_CompareAndSwapL6MpknENode__v_; +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_; +text: .text%__1cTcompareAndSwapLNodeFreloc6kM_i_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cOcompP_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPsarI_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cMmulD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMaddF_regNodePoper_input_base6kM_I_; +text: .text%__1cPcmpD_cc_regNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_; +text: .text%__1cbACallCompiledJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cYMachCallCompiledJavaNodePret_addr_offset6M_i_; +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKciTypeFlowLStateVectorEtrap6MpnQciBytecodeStream_pnHciKlass_i_v_; +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cJloadFNodeHtwo_adr6kM_I_; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cFframeZinterpreter_frame_set_mdx6Ml_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cQorI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cPClassFileParserbFparse_constant_pool_float_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRmulL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cQciBytecodeStreamFtable6MnJBytecodesECode__2_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cRxorI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cNmulI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_; +text: .text%__1cISubLNodeJideal_reg6kM_I_; +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__; +text: .text%__1cQOopMapCacheEntryFflush6M_v_; +text: .text%__1cQOopMapCacheEntryRallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryTdeallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cRsalL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPsalL_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cOstackSlotPOperJnum_edges6kM_I_; +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_; +text: .text%__1cFParseXfetch_interpreter_state6MipknEType_pnENode__5_; +text: .text%__1cObox_handleNodeJnum_opnds6kM_I_; +text: .text%__1cNaddP_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cSComputeAdapterInfoHdo_byte6M_v_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cOGenerateOopMapKpp_new_ref6MpnNCellTypeState_i_v_; +text: .text%__1cNcmovI_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cTOopMapForCacheEntry2t6MnMmethodHandle_ipnQOopMapCacheEntry__v_; +text: .text%__1cPcmpD_cc_immNodeMideal_Opcode6kM_i_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_; +text: .text%__1cTconvF2D_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cNdivL_rRegNodeErule6kM_I_; +text: .text%__1cRmulL_rReg_immNodeQuse_cisc_RegMask6M_v_; +text: .text%JVM_GetCallerClass; +text: .text%__1cQsalL_rReg_CLNodeErule6kM_I_; +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNloadConP0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_; +text: .text%__1cRxorI_rReg_immNodeErule6kM_I_; +text: .text%__1cZCallDynamicJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cFParseScreate_jump_tables6MpnENode_pnLSwitchRange_4_i_; +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cPcmovI_reg_lNodeHtwo_adr6kM_I_; +text: .text%__1cLConvD2INodeGOpcode6kM_i_; +text: .text%__1cNcmovP_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWPredictedCallGeneratorJis_inline6kM_i_; +text: .text%__1cUjmpLoopEnd_shortNodeJis_Branch6kM_I_; +text: .text%__1cQorI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cUjmpLoopEnd_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_; +text: .text%__1cbFloadConL_0x6666666666666667NodeMideal_Opcode6kM_i_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl_i_v_; +text: .text%__1cOjmpLoopEndNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cUjmpLoopEnd_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPCountedLoopNodeGstride6kM_pnENode__; +text: .text%__1cHi2bNodeJnum_opnds6kM_I_; +text: .text%__1cHTypeAryFxdual6kM_pknEType__; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cKstoreFNodeHtwo_adr6kM_I_; +text: .text%__1cNnegI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLencode_copy6FrnKCodeBuffer_ii_v_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2F_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cUCallNativeDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cKmul_hiNodePoper_input_base6kM_I_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cPcmpD_cc_regNodeMcisc_operand6kM_i_; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_; +text: .text%__1cPcmpD_cc_regNodeErule6kM_I_; +text: .text%__1cNtestU_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSPromotionLABRunallocate_object6MpnHoopDesc__i_; +text: .text%__1cPcmpD_cc_immNodeHtwo_adr6kM_I_; +text: .text%__1cOPhaseIdealLoopJdo_unroll6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cRandL_rReg_immNodeErule6kM_I_; +text: .text%__1cNloadConP0NodeGis_Con6kM_I_; +text: .text%__1cIMulINodeKmul_opcode6kM_i_; +text: .text%__1cNdivL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIMulINodeKadd_opcode6kM_i_; +text: .text%__1cRxorI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPregister_native6FnLKlassHandle_nMsymbolHandle_1pCpnGThread__i_: jni.o; +text: .text%__1cTno_rax_rdx_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOtypeArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cTno_rax_rdx_RegLOperJnum_edges6kM_I_; +text: .text%__1cOCallNativeNodeGOpcode6kM_i_; +text: .text%__1cQsalI_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__; +text: .text%__1cRxorI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cGHandle2t6MpnGThread_pnHoopDesc__v_; +text: .text%__1cQorI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_; +text: .text%__1cOloadConL32NodeMideal_Opcode6kM_i_; +text: .text%__1cNGrowableArray4Cpv_Egrow6Mi_v_; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cNGrowableArray4Cl_Egrow6Mi_v_; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cLResourceObj2n6FLn0APallocation_type__pv_; +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_; +text: .text%__1cJLoadPNodeUdepends_only_on_test6kM_i_; +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cScompL_rReg_immNodeErule6kM_I_; +text: .text%__1cQshrI_rReg_CLNodeErule6kM_I_; +text: .text%__1cNaddL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateT_sub_Op_ThreadLocal6MpknENode__v_; +text: .text%__1cVCallRuntimeDirectNodeHtwo_adr6kM_I_; +text: .text%__1cKciTypeFlowOsplit_range_at6Mi_pn0AFRange__; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cXjvm_define_class_common6FpnHJNIEnv__pkcpnI_jobject_pkWi53pnGThread__pnH_jclass__: jvm.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcinMsymbolHandle_4nGHandle_6_v_; +text: .text%__1cMmulD_immNodePoper_input_base6kM_I_; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cMmulF_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNGrowableArray4CpnKStackValue__2t6Mii_v_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cRandL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCi_v_; +text: .text%__1cUandI_rReg_imm255NodeErule6kM_I_; +text: .text%__1cRmulL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPsarL_rReg_2NodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerGpushaq6M_v_; +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_; +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_; +text: .text%__1cMrsi_RegPOperJnum_edges6kM_I_; +text: .text%__1cMstoreSSPNodePoper_input_base6kM_I_; +text: .text%__1cScompL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cKCodeBufferWinsert_double_constant6Md_pC_; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNaddP_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseHdo_irem6M_v_; +text: .text%__1cRsarL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_2_v_; +text: .text%__1cHi2bNodeHtwo_adr6kM_I_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cJAssemblerFmovsd6MnHAddress_pnRFloatRegisterImpl__v_; +text: .text%__1cTCallInterpreterNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cXMachCallInterpreterNodeWis_MachCallInterpreter6M_p0_; +text: .text%__1cFStateX_sub_Op_CallInterpreter6MpknENode__v_; +text: .text%__1cZCallInterpreterDirectNodeFreloc6kM_i_; +text: .text%__1cMStartC2INodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cMStartC2INodeKc2i_domain6FpknJTypeTuple__3_; +text: .text%__1cHCompilebMGenerate_Compiled_To_Interpreter_Graph6MpknITypeFunc_pC_v_; +text: .text%__1cZCallInterpreterDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFciEnvUregister_c2i_adapter6MpnIciMethod_pnJOopMapSet_pnKCodeBuffer_ii_v_; +text: .text%__1cSMachC2IcheckICNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSMachC2IEntriesNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHMatcherbAinterpreter_method_oop_reg6F_i_; +text: .text%__1cHMatcherXcompiler_method_oop_reg6F_i_; +text: .text%__1cIciMethodRinterpreter_entry6M_pC_; +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpD_cc_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cSTailCalljmpIndNodeGpinned6kM_i_; +text: .text%__1cSTailCalljmpIndNodeHtwo_adr6kM_I_; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cHBoxNodeGOpcode6kM_i_; +text: .text%__1cPcmpD_cc_regNodeJnum_opnds6kM_I_; +text: .text%__1cKC2IAdapter2t6MpnKCodeBuffer_iIpnJOopMapSet_i_v_; +text: .text%__1cKC2IAdapterPnew_c2i_adapter6FpnKCodeBuffer_IpnJOopMapSet_i_p0_; +text: .text%__1cKC2IAdapter2n6FLI_pv_; +text: .text%__1cJAssemblerFmovss6MnHAddress_pnRFloatRegisterImpl__v_; +text: .text%__1cIMulINodeJideal_reg6kM_I_; +text: .text%__1cKCMovePNodeGOpcode6kM_i_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF_vc_v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cFParseTjump_if_always_fork6Mii_v_; +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMmulF_immNodePoper_input_base6kM_I_; +text: .text%__1cPcmpD_cc_immNodePoper_input_base6kM_I_; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cJAssemblerDhlt6M_v_; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cQComputeCallStackIdo_float6M_v_; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_; +text: .text%__1cKciTypeFlowLStateVectorLdo_newarray6MpnQciBytecodeStream__v_; +text: .text%__1cWResolveOopMapConflictsRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmovI_reg_lNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cNFingerprinterHdo_byte6M_v_; +text: .text%__1cENode2t6Mp0111111_v_; +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cMmulD_immNodeErule6kM_I_; +text: .text%__1cMnegD_regNodePoper_input_base6kM_I_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cMaddF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cKmul_hiNodeJnum_opnds6kM_I_; +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRxorI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cNsubI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_2_v_; +text: .text%__1cHMatcherXinterpreter_arg_ptr_reg6F_i_; +text: .text%__1cINegDNodeGOpcode6kM_i_; +text: .text%__1cNdecL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cRsarI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cNcmovP_regNodeJnum_opnds6kM_I_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeVresolve_static_call_C6FpnKJavaThread__pC_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cMrsi_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cTAbstractInterpreterLdeopt_entry6FnITosState_i_pC_; +text: .text%__1cRindIndexScaleOperNconstant_disp6kM_i_; +text: .text%__1cQorI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cRtestI_reg_immNodeJnum_opnds6kM_I_; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cNtestI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_; +text: .text%__1cRxorI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cObox_handleNodeErule6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cOleaPIdxOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsarL_rReg_63NodePoper_input_base6kM_I_; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_; +text: .text%__1cHCompileRmake_vm_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cNmulI_rRegNodeErule6kM_I_; +text: .text%__1cNGrowableArray4Ci_2t6Mii_v_; +text: .text%__1cQsalL_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cNGrowableArray4Ci_Uclear_and_deallocate6M_v_; +text: .text%__1cPCountedLoopNode2t6MpnENode_2_v_; +text: .text%__1cSCountedLoopEndNode2t6MpnENode_2ff_v_; +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_; +text: .text%__1cENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMmulD_regNodePoper_input_base6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cIModINodeJideal_reg6kM_I_; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cObox_handleNodeLbottom_type6kM_pknEType__; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cPshrL_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cUverify_byte_codes_fn6F_pv_: verifier.o; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%JVM_GetClassCPTypes; +text: .text%__1cQComputeCallStackHdo_byte6M_v_; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%JVM_GetClassFieldsCount; +text: .text%JVM_GetClassMethodsCount; +text: .text%__1cINodeHashEgrow6M_v_; +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKmul_hiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEnegq6MpnMRegisterImpl__v_; +text: .text%__1cNmodL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKloadUBNodeHtwo_adr6kM_I_; +text: .text%__1cRxorI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_; +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cNsubL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOjmpLoopEndNodeGnegate6M_v_; +text: .text%__1cQorI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPsalL_rReg_1NodeErule6kM_I_; +text: .text%__1cPcmpD_cc_immNodeErule6kM_I_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cHCompileQgrow_alias_types6M_v_; +text: .text%__1cUandI_rReg_imm255NodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_CallIntMethod: jni.o; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cPno_rax_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMrdx_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNmulI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNxorI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2D_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cbFunnecessary_membar_volatileNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cENodeEgetd6kM_d_; +text: .text%__1cICmpFNodeGOpcode6kM_i_; +text: .text%__1cLOptoRuntimeThandle_wrong_method6FpnKJavaThread__pC_; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__Egrow6Mi_v_; +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%JVM_SetClassSigners; +text: .text%__1cNdivL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRandL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cOstackSlotPOperFscale6kM_i_; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cMdecI_memNodeHtwo_adr6kM_I_; +text: .text%__1cSalign_to_page_size6FL_L_: heap.o; +text: .text%__1cNmulI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cPsarL_rReg_2NodeErule6kM_I_; +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%jni_NewByteArray: jni.o; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_; +text: .text%__1cJAssemblerSemit_arith_operand6MipnMRegisterImpl_nHAddress_i_v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cMaddF_regNodeMcisc_operand6kM_i_; +text: .text%__1cRsubI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNloadConPcNodeHtwo_adr6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cKLoadPCNodeGOpcode6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cTconvD2I_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cMstoreSSPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNGrowableArray4Ci_Icontains6kMrki_i_; +text: .text%__1cKstoreBNodeFreloc6kM_i_; +text: .text%__1cObox_handleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMjniIdPrivateGid_for6FnTinstanceKlassHandle_i_l_; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cQshrI_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_; +text: .text%__1cRandL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_2_v_; +text: .text%__1cbACallCompiledJavaDirectNodeFreloc6kM_i_; +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__; +text: .text%__1cUCallCompiledJavaNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cFStateY_sub_Op_CallCompiledJava6MpknENode__v_; +text: .text%__1cFciEnvUregister_i2c_adapter6MpnIciMethod_pnJOopMapSet_pnKCodeBuffer_i_v_; +text: .text%__1cbACallCompiledJavaDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMStartI2CNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cHCompilebMGenerate_Interpreter_To_Compiled_Graph6MpknITypeFunc__v_; +text: .text%__1cPciObjectFactoryPinsert_non_perm6Mrpn0ANNonPermObject_pnHoopDesc_pnIciObject__v_; +text: .text%__1cKI2CAdapter2n6FLI_pv_; +text: .text%__1cKCodeBufferVinsert_float_constant6Mf_pC_; +text: .text%__1cbACallCompiledJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKI2CAdapterPnew_i2c_adapter6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cKmul_hiNodeErule6kM_I_; +text: .text%__1cKI2CAdapter2t6MpnKCodeBuffer_pnJOopMapSet_ii_v_; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cFJNIidEfind6Mi_p0_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6ML_v_; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cILogDNodeGOpcode6kM_i_; +text: .text%__1cXSignatureHandlerLibraryLset_handler6FpnKCodeBuffer__pC_; +text: .text%JVM_IsPrimitiveClass; +text: .text%__1cIDivDNodeGOpcode6kM_i_; +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cJCMoveNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cJAssemblerEcmpl6MnHAddress_i_v_; +text: .text%__1cHi2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLimmI_24OperJnum_edges6kM_I_; +text: .text%__1cRxorI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFTypeDJsingleton6kM_i_; +text: .text%__1cPsalI_rReg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cUPipeline_Use_Element2t6MIIIinXPipeline_Use_Cycle_Mask__v_; +text: .text%__1cTmembar_volatileNodePoper_input_base6kM_I_; +text: .text%__1cNloadConPcNodePoper_input_base6kM_I_; +text: .text%__1cXPipeline_Use_Cycle_Mask2t6MI_v_; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cNdecL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIciMethodOresolve_invoke6MpnHciKlass_2_p0_; +text: .text%__1cQChunkPoolCleanerEtask6M_v_; +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPsalL_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_; +text: .text%__1cZCallDynamicJavaDirectNodeFreloc6kM_i_; +text: .text%__1cQMachCallJavaNodeVis_MachCallStaticJava6M_pnWMachCallStaticJavaNode__; +text: .text%__1cUandI_rReg_imm255NodeJnum_opnds6kM_I_; +text: .text%__1cFStateX_sub_Op_CallDynamicJava6MpknENode__v_; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%jni_FindClass: jni.o; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cIMinINodeJideal_reg6kM_I_; +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cNCallGeneratorSfor_predicted_call6FpnHciKlass_p03_3_; +text: .text%__1cLTypeInstPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cTconvI2F_reg_regNodeErule6kM_I_; +text: .text%__1cWPredictedCallGeneratorKis_virtual6kM_i_; +text: .text%__1cTconvF2D_reg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNcmovP_regNodeErule6kM_I_; +text: .text%__1cMaddF_regNodeJnum_opnds6kM_I_; +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%JVM_MonitorWait; +text: .text%__1cPshrL_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cMaddF_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNCallGeneratorQfor_virtual_call6FpnIciMethod__p0_; +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cLPSMarkSweepbAabsorb_live_data_from_eden6FpnUPSAdaptiveSizePolicy_pnKPSYoungGen_pnIPSOldGen__i_; +text: .text%__1cUPSMarkSweepDecoratorbDadvance_destination_decorator6F_v_; +text: .text%__1cNmulI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNmulI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cSOnStackReplacementPget_osr_adapter6FnFframe_nMmethodHandle__pnKOSRAdapter__; +text: .text%__1cNGrowableArray4CpnKOSRAdapter__Hat_grow6Mirk1_1_; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cRCardTableModRefBSKinvalidate6MnJMemRegion__v_; +text: .text%__1cJLoadFNodeJideal_reg6kM_I_; +text: .text%__1cJAssemblerEaddq6MpnMRegisterImpl_2_v_; +text: .text%__1cFTypeFJsingleton6kM_i_; +text: .text%__1cTconvF2D_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cMstoreSSPNodeErule6kM_I_; +text: .text%__1cOloadConL32NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMstoreSSPNodeHtwo_adr6kM_I_; +text: .text%__1cMincI_memNodeHtwo_adr6kM_I_; +text: .text%__1cKcmpOpUOperFequal6kM_i_; +text: .text%__1cTconvF2D_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cHRegMask2t6M_v_; +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_; +text: .text%__1cNGrowableArray4Ci_2t6MpnFArena_iirki_v_; +text: .text%__1cNloadConL0NodeHsize_of6kM_I_; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cJCmpD3NodeGOpcode6kM_i_; +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpD_cc_immNodeJnum_opnds6kM_I_; +text: .text%__1cLConvL2DNodeGOpcode6kM_i_; +text: .text%__1cRmulI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerFcmovq6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cNminI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMmulD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cNminI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_MinI6MpknENode__v_; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cScompL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGICStubIfinalize6M_v_; +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_; +text: .text%__1cUandI_rReg_imm255NodeHtwo_adr6kM_I_; +text: .text%__1cJStubQdDueueMremove_first6M_v_; +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cUVirtualCallGeneratorKis_virtual6kM_i_; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_; +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_; +text: .text%__1cJAssemblerEcmpq6MpnMRegisterImpl_2_v_; +text: .text%__1cNGrowableArray4CpnIciObject__Egrow6Mi_v_; +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cNmodI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFParseNdo_instanceof6M_v_; +text: .text%__1cPcmpD_cc_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNIdealLoopTreeXpolicy_maximally_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cTmembar_volatileNodeHtwo_adr6kM_I_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_; +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_; +text: .text%__1cJLoadDNodeGOpcode6kM_i_; +text: .text%__1cNcmovL_regNodePoper_input_base6kM_I_; +text: .text%__1cMdecI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_; +text: .text%__1cQsalL_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__; +text: .text%__1cHMatcherXpost_store_load_barrier6FpknENode__i_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cJloadDNodePoper_input_base6kM_I_; +text: .text%__1cENodeEgetf6kM_f_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Ml_v_; +text: .text%__1cTconvD2I_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cSTailCalljmpIndNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2D_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cQorI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRaddI_mem_rRegNodePoper_input_base6kM_I_; +text: .text%__1cMmulF_immNodeErule6kM_I_; +text: .text%__1cJAssemblerGmovlpd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cPcmpF_cc_regNodePoper_input_base6kM_I_; +text: .text%__1cNCompileBrokerTcompile_adapter_for6FnMmethodHandle_ii_pnMBasicAdapter__; +text: .text%__1cCosbBthread_local_storage_at_put6Fipv_v_; +text: .text%__1cNCompileBrokerbBwait_for_adapter_completion6FpnLCompileTask__pnMBasicAdapter__; +text: .text%__1cOjmpLoopEndNodeJis_Branch6kM_I_; +text: .text%__1cOjmpLoopEndNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cNinstanceKlassSregister_finalizer6FpnPinstanceOopDesc_pnGThread__2_; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cKCMoveINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FLi_pnGThread__; +text: .text%__1cMrax_RegIOperEtype6kM_pknEType__; +text: .text%__1cOjmpLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateW_sub_Op_MemBarVolatile6MpknENode__v_; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cMincI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitXinsert_mem_bar_volatile6MpnKMemBarNode_i_v_; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%__1cCosHSolarisKmmap_chunk6FpcLii_2_; +text: .text%__1cbFloadConL_0x6666666666666667NodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cOloadConL32NodeLbottom_type6kM_pknEType__; +text: .text%__1cRxorI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMmulD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovP_regNodeLbottom_type6kM_pknEType__; +text: .text%__1cJScopeDescTdecode_scope_values6Mi_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cTAbstractInterpreterWlayout_activation_impl6FpnNmethodOopDesc_iiiipnFframe_4i_i_; +text: .text%__1cLconvI2BNodeMideal_Opcode6kM_i_; +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQsalI_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cPsarL_rReg_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmodL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cScompL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cQshrL_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cJCMoveNode2t6MpnENode_22pknEType__v_; +text: .text%__1cJCMoveNodeEmake6FpnENode_222pknEType__p0_; +text: .text%__1cVLoaderConstraintTableYextend_loader_constraint6MpnVLoaderConstraintEntry_nGHandle_pnMklassOopDesc__v_; +text: .text%__1cIimmIOperJnum_edges6kM_I_; +text: .text%__1cJAssemblerFmovss6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRandL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowFBlock__Gremove6Mrk2_v_; +text: .text%__1cVLoaderConstraintTablebHensure_loader_constraint_capacity6MpnVLoaderConstraintEntry_i_v_; +text: .text%__1cPsalL_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cMsubD_regNodePoper_input_base6kM_I_; +text: .text%__1cMstoreSSPNodeJnum_opnds6kM_I_; +text: .text%__1cMnegD_regNodeHtwo_adr6kM_I_; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cPClassFileParserXverify_unqualified_name6MpcIi_i_; +text: .text%__1cMdivD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cQsarL_rReg_63NodeErule6kM_I_; +text: .text%__1cRsubL_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMVirtualSpaceQuncommitted_size6kM_L_; +text: .text%__1cRsubL_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cMVirtualSpaceJexpand_by6ML_i_; +text: .text%__1cNstoreImmPNodeMideal_Opcode6kM_i_; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cJloadDNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cQComputeCallStackHdo_char6M_v_; +text: .text%__1cNdivI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cOcmovI_regUNodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cSvframeArrayElementDbci6kM_i_; +text: .text%__1cMaddF_regNodeErule6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeHtwo_adr6kM_I_; +text: .text%__1cNdecL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cMdecI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFStateN_sub_Op_LoadF6MpknENode__v_; +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__; +text: .text%__1cNaddI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerEdecl6MpnMRegisterImpl__v_; +text: .text%__1cOPhaseIdealLoopVinsert_pre_post_loops6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cJAssemblerGbswapl6MpnMRegisterImpl__v_; +text: .text%__1cRInlineCacheBufferLnew_ic_stub6F_pnGICStub__; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cIAddDNodeGOpcode6kM_i_; +text: .text%__1cMincI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_; +text: .text%__1cNloadConPcNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGICStubIset_stub6MpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cTconvD2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerHpop_ptr6MpnMRegisterImpl__v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_; +text: .text%__1cMelapsedTimer2t6M_v_; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_; +text: .text%__1cMdivD_immNodeErule6kM_I_; +text: .text%__1cTconvI2D_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cFTypeFFxmeet6kMpknEType__3_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_; +text: .text%__1cGICStubLdestination6kM_pC_; +text: .text%__1cRsalL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cPcmpD_cc_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMaddD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNdivI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__; +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUandI_rReg_imm255NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNdivL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKcastPPNodePoper_input_base6kM_I_; +text: .text%__1cMaddD_immNodePoper_input_base6kM_I_; +text: .text%__1cFTypeDFxmeet6kMpknEType__3_; +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIDivLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJloadDNodeErule6kM_I_; +text: .text%__1cRaddI_mem_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cPsarL_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cMmulD_regNodeMcisc_operand6kM_i_; +text: .text%__1cMmulF_memNodePoper_input_base6kM_I_; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%__1cHnmethodNis_osr_method6kM_i_; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cFParseScan_rerun_bytecode6M_i_; +text: .text%__1cISubFNodeGOpcode6kM_i_; +text: .text%__1cRjni_invoke_static6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cFTypeDGis_nan6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cOJavaAssertionsNmatch_package6Fpkc_pn0AKOptionList__; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cOJavaAssertionsLmatch_class6Fpkc_pn0AKOptionList__; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cHTypePtrFxdual6kM_pknEType__; +text: .text%__1cTconvI2F_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cMaddF_immNodeMideal_Opcode6kM_i_; +text: .text%__1cQshrL_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNnegI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPcmpF_cc_regNodeHtwo_adr6kM_I_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cISubDNodeGOpcode6kM_i_; +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSstring_compareNodeHtwo_adr6kM_I_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cRaddI_mem_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cKScopeValueLis_location6kM_i_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cMmulF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%JVM_MonitorNotify; +text: .text%__1cQsarL_rReg_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cIModLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cIjniIdMapGcreate6FnTinstanceKlassHandle__p0_; +text: .text%__1cPsarL_rReg_2NodeJnum_opnds6kM_I_; +text: .text%__1cKReflectionbFbasic_type_mirror_to_basic_type6FpnHoopDesc_pnGThread__nJBasicType__; +text: .text%__1cPcmpF_cc_regNodeMideal_Opcode6kM_i_; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cIjniIdMap2t6MpnMklassOopDesc_i_v_; +text: .text%__1cIjniIdMapRcompute_index_cnt6FnTinstanceKlassHandle__i_; +text: .text%__1cLjniIdBucket2t6MpnIjniIdMap_p0_v_; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_; +text: .text%__1cLTypeInstPtrLmirror_type6kM_pnGciType__; +text: .text%__1cMsubF_regNodePoper_input_base6kM_I_; +text: .text%__1cPcmpD_cc_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMlogD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvL2FNodeGOpcode6kM_i_; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cMPipeline_Use2t6MIIIpnUPipeline_Use_Element__v_; +text: .text%__1cKstorePNodeErule6kM_I_; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConPcNodeErule6kM_I_; +text: .text%__1cIPipeline2t6MIIiIIiiiipnSmachPipelineStages_2pInMPipeline_Use__v_; +text: .text%__1cRComputeEntryStackGdo_int6M_v_; +text: .text%__1cMstoreSSPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSMachBreakpointNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRsubL_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNtestU_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPsalL_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cNmodL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvF2D_reg_regNodeErule6kM_I_; +text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cObox_handleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsalI_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_rReg_memNodeErule6kM_I_; +text: .text%__1cLloadSSDNodePoper_input_base6kM_I_; +text: .text%__1cNCompileBrokerbAinvoke_compiler_on_adapter6FpnLCompileTask__v_; +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMaddF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRxorI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cMaddF_immNodePoper_input_base6kM_I_; +text: .text%__1cKCMoveLNodeGOpcode6kM_i_; +text: .text%__1cICodeHeapTmark_segmap_as_free6MLL_v_; +text: .text%__1cRaddL_rReg_memNodePoper_input_base6kM_I_; +text: .text%JVM_IsArrayClass; +text: .text%__1cJAssemblerEsbbq6MnHAddress_i_v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_; +text: .text%__1cMmulD_regNodeJnum_opnds6kM_I_; +text: .text%__1cODeoptimizationYquery_update_method_data6FnQmethodDataHandle_in0ALDeoptReason_rIri4_pnLProfileData__; +text: .text%__1cICodeHeapJexpand_by6ML_i_; +text: .text%__1cMmulD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cJAssemblerEaddq6MnHAddress_i_v_; +text: .text%JVM_GetClassName; +text: .text%__1cTconvF2D_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cMmulD_immNodeJnum_opnds6kM_I_; +text: .text%__1cNmulI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQorI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%__1cRsubL_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRaddL_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRsubL_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRsubL_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cPshrL_rReg_1NodeErule6kM_I_; +text: .text%__1cQshrI_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_RegD6MpknENode__v_; +text: .text%__1cQorI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cUCallNativeDirectNodeHtwo_adr6kM_I_; +text: .text%__1cTconvI2D_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_; +text: .text%__1cIMaxINodeJideal_reg6kM_I_; +text: .text%__1cFJNIid2t6MpnMklassOopDesc_ip0_v_; +text: .text%__1cNinstanceKlassPjni_id_for_impl6FnTinstanceKlassHandle_i_pnFJNIid__; +text: .text%__1cJAssemblerEaddq6MpnMRegisterImpl_nHAddress__v_; +text: .text%JVM_Open; +text: .text%__1cHRegMask2t6Miiiiiii_v_; +text: .text%__1cbFloadConL_0x6666666666666667NodeHtwo_adr6kM_I_; +text: .text%__1cNsubI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMmulF_regNodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_i_v_; +text: .text%__1cQConstantIntValuePis_constant_int6kM_i_; +text: .text%__1cRmulL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPsarL_rReg_2NodeHtwo_adr6kM_I_; +text: .text%__1cKmul_hiNodeHtwo_adr6kM_I_; +text: .text%__1cQConstantIntValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cRxorI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateM_sub_Op_ConD6MpknENode__v_; +text: .text%__1cLConvI2DNodeGOpcode6kM_i_; +text: .text%__1cVLoaderConstraintTableJnew_entry6MIpnNsymbolOopDesc_pnMklassOopDesc_ii_pnVLoaderConstraintEntry__; +text: .text%__1cNaddP_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPcmpF_cc_regNodeMcisc_operand6kM_i_; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cVscale_to_lwp_priority6Fiii_i_: os_solaris.o; +text: .text%__1cLOptoRuntimeWresolve_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cLStrCompNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMmulF_memNodeMideal_Opcode6kM_i_; +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJloadDNodeJnum_opnds6kM_I_; +text: .text%__1cFStateM_sub_Op_RegF6MpknENode__v_; +text: .text%__1cMmulF_immNodeJnum_opnds6kM_I_; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cNcmovP_regNodeHtwo_adr6kM_I_; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_; +text: .text%__1cScompL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cLCastP2LNodeUdepends_only_on_test6kM_i_; +text: .text%__1cTconvF2D_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulD_immNodeHtwo_adr6kM_I_; +text: .text%__1cOMacroAssemblerFleave6M_v_; +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMTailCallNode2t6MpnENode_222222_v_; +text: .text%__1cICmpDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_; +text: .text%__1cSTailCalljmpIndNodeFreloc6kM_i_; +text: .text%__1cObox_handleNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cOloadConL32NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMlogD_regNodePoper_input_base6kM_I_; +text: .text%__1cTconvI2F_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cMnegD_regNodeErule6kM_I_; +text: .text%__1cLvframeArrayRregister_location6kMi_pC_; +text: .text%__1cQorI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateQ_sub_Op_TailCall6MpknENode__v_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cMaddD_immNodeErule6kM_I_; +text: .text%__1cNmaxI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cPshrL_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvI2F_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNmaxI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cCosHSolarisKvm_signals6F_pnIsigset_t__; +text: .text%__1cCosHSolarisRunblocked_signals6F_pnIsigset_t__; +text: .text%__1cMaddF_immNodeErule6kM_I_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cFStateM_sub_Op_MaxI6MpknENode__v_; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cCosScurrent_stack_size6F_L_; +text: .text%__1cNPhaseRegAllocHset_oop6MpknENode_i_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cJloadFNodeFreloc6kM_i_; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cKstoreFNodeFreloc6kM_i_; +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_; +text: .text%__1cNcmovL_memNodeErule6kM_I_; +text: .text%__1cFStateO_sub_Op_StoreF6MpknENode__v_; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cNcmovL_regNodeMcisc_operand6kM_i_; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_L_i_; +text: .text%__1cLconvI2BNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapMdo_checkcast6M_v_; +text: .text%JVM_SetThreadPriority; +text: .text%__1cG_start6Fpv_0_: os_solaris.o; +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__; +text: .text%JVM_GetStackAccessControlContext; +text: .text%JVM_IsThreadAlive; +text: .text%__1cTconvL2D_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNdivI_rRegNodeErule6kM_I_; +text: .text%__1cNdecL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitYinline_native_time_funcs6Mi_i_; +text: .text%__1cNGrowableArray4CpknEType__2t6MpnFArena_iirk2_v_; +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_; +text: .text%__1cTconvL2F_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cJAssemblerExorq6MpnMRegisterImpl_2_v_; +text: .text%__1cNcmovL_regNodeJnum_opnds6kM_I_; +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__; +text: .text%__1cOcmovI_regUNodeMideal_Opcode6kM_i_; +text: .text%__1cNcmovL_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cMsubD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadConL0NodeFclone6kM_pnENode__; +text: .text%__1cPcmpF_cc_regNodeErule6kM_I_; +text: .text%__1cJimmL0OperFclone6kM_pnIMachOper__; +text: .text%__1cNmodI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmpF_cc_regNodeJnum_opnds6kM_I_; +text: .text%__1cPcmpF_cc_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cbFloadConL_0x6666666666666667NodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerFpop_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cTconvL2D_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cLConvD2FNodeGOpcode6kM_i_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEtemp6F_pnMRegisterImpl__; +text: .text%__1cMmulF_immNodeHtwo_adr6kM_I_; +text: .text%__1cQsarL_rReg_63NodeHtwo_adr6kM_I_; +text: .text%__1cQsarL_rReg_63NodeJnum_opnds6kM_I_; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cMsubF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWThreadLocalAllocBufferMinitial_size6F_L_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cTconvF2I_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cRandI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cRandI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cCosNcommit_memory6FpcL_i_; +text: .text%__1cNdivI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cENodeJis_MemBar6kM_pknKMemBarNode__; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%JVM_NativePath; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_; +text: .text%__1cUThreadSafepointState2t6MpnKJavaThread__v_; +text: .text%__1cCosMguard_memory6FpcL_i_; +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUCallNativeDirectNodePoper_input_base6kM_I_; +text: .text%__1cHnmethodTinc_decompile_count6M_v_; +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cMResourceMarkNreset_to_mark6M_v_; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cNloadConPcNodeLbottom_type6kM_pknEType__; +text: .text%__1cMmulD_regNodeErule6kM_I_; +text: .text%__1cMdivD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vL_v_; +text: .text%__1cPcmpD_cc_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2D_reg_regNodeErule6kM_I_; +text: .text%__1cQshrL_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cNcmovL_memNodePoper_input_base6kM_I_; +text: .text%__1cNdivL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpD_cc_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateT_sub_Op_CallRuntime6MpknENode__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_; +text: .text%__1cKcastPPNodeHtwo_adr6kM_I_; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cMsubD_regNodeMcisc_operand6kM_i_; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsalL_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cVCallRuntimeDirectNodeFreloc6kM_i_; +text: .text%__1cIGraphKitIset_jvms6MpnIJVMState__v_; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cTconvD2I_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cOsalI_mem_1NodePoper_input_base6kM_I_; +text: .text%__1cSMachCallNativeNodePret_addr_offset6M_i_; +text: .text%__1cMLinkResolverbEresolve_interface_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cZInterpreterMacroAssemblerFpop_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cMrdi_RegIOperEtype6kM_pknEType__; +text: .text%__1cVThreadStateTransitionKtransition6FpnKJavaThread_nPJavaThreadState_3_v_; +text: .text%__1cUCallNativeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKJavaThreadRthread_main_inner6M_v_; +text: .text%__1cQorI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitbAgen_stub_or_native_wrapper6MpCpkcpnIciMethod_iiiii_v_; +text: .text%__1cPsalL_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMResourceMark2t6M_v_; +text: .text%__1cQshrI_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescVdecode_monitor_values6Mi_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cSvframeArrayElementPunpack_on_stack6MiipnFframe_ii_v_; +text: .text%__1cTAbstractInterpreterRlayout_activation6FpnNmethodOopDesc_iiiipnFframe_4i_v_; +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__; +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____; +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_; +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_; +text: .text%__1cNGrowableArray4CpnLMonitorInfo__2t6Mii_v_; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cOMacroAssemblerKincrementl6MpnMRegisterImpl_i_v_; +text: .text%__1cFframebCinterpreter_frame_set_locals6Mpl_v_; +text: .text%__1cFframebHinterpreter_frame_set_monitor_end6MpnPBasicObjectLock__v_; +text: .text%__1cTAbstractInterpreterPsize_activation6FpnNmethodOopDesc_iiiii_i_; +text: .text%__1cSPerfStringConstant2t6MnJCounterNS_pkc3_v_; +text: .text%__1cTAbstractInterpreterQcontinuation_for6FpnNmethodOopDesc_pCiiri_3_; +text: .text%__1cZInterpreterMacroAssemblerLcall_VM_Ico6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cFframebCinterpreter_frame_set_method6MpnNmethodOopDesc__v_; +text: .text%__1cMmulF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cFframebBinterpreter_frame_sender_sp6kM_pl_; +text: .text%__1cMaddF_regNodeHtwo_adr6kM_I_; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cKstoreINodeErule6kM_I_; +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_i_v_; +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__; +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cTconvF2D_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cSvframeArrayElementNon_stack_size6kMiiii_i_; +text: .text%__1cMaddD_regNodePoper_input_base6kM_I_; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cMorL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cOcmovD_regUNodePoper_input_base6kM_I_; +text: .text%__1cPcmovI_reg_gNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMdivD_immNodePoper_input_base6kM_I_; +text: .text%__1cJloadDNodeHtwo_adr6kM_I_; +text: .text%__1cKReflectionTunbox_for_primitive6FpnHoopDesc_pnGjvalue_pnGThread__nJBasicType__; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cMmulF_memNodeJnum_opnds6kM_I_; +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cODeoptimizationVtrap_state_add_reason6Fii_i_; +text: .text%__1cDhpiFclose6Fi_i_; +text: .text%__1cJMemRegionFminus6kM0_0_; +text: .text%__1cMmulD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i_v_; +text: .text%__1cNcmovL_regNodeMideal_Opcode6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerWupdate_mdp_by_constant6MpnMRegisterImpl_i_v_; +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__; +text: .text%__1cRaddL_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2F_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvL2F_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cGICStubKcached_oop6kM_pnHoopDesc__; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cTconvD2F_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerFpushq6Mi_v_; +text: .text%JVM_Close; +text: .text%__1cMnegF_regNodePoper_input_base6kM_I_; +text: .text%__1cOcmovI_regUNodeJnum_opnds6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cOcmovI_regUNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQshrL_rReg_CLNodeErule6kM_I_; +text: .text%__1cTconvF2D_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__; +text: .text%__1cOcmovI_regUNodeMcisc_operand6kM_i_; +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__; +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_; +text: .text%__1cVCompressedWriteStreamKwrite_long6Mx_v_; +text: .text%__1cTconvF2I_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cLConvF2INodeGOpcode6kM_i_; +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_; +text: .text%__1cFParsePdo_monitor_exit6M_v_; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cKcastPPNodeErule6kM_I_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF3_v3_v_; +text: .text%__1cOsalI_mem_1NodeJnum_opnds6kM_I_; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%__1cPshrL_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cRandI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRandI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cQorI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cJAssemblerGmovslq6MpnMRegisterImpl_2_v_; +text: .text%__1cRandI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cRConstantLongValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%JVM_StartThread; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cTconvF2D_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMsubD_regNodeErule6kM_I_; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cNmulI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cLRuntimeStub2n6FLI_pv_; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cLRuntimeStub2t6MpkcpnKCodeBuffer_iipnJOopMapSet_i_v_; +text: .text%__1cTconvF2D_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cRxorI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMmulF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitbDis_method_invoke_or_aux_frame6MpnIJVMState__i_; +text: .text%__1cbFloadConL_0x6666666666666667NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAddFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOloadConL32NodeHsize_of6kM_I_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJOperation__v4_v_; +text: .text%__1cRaddL_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivLNodeJideal_reg6kM_I_; +text: .text%__1cGICStubFclear6M_v_; +text: .text%__1cTconvI2D_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMsubD_regNodeJnum_opnds6kM_I_; +text: .text%__1cMsubD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cHCompileWget_MethodAccessorImpl6M_pnPciInstanceKlass__; +text: .text%__1cHCompileRget_Method_invoke6M_pnIciMethod__; +text: .text%__1cNdecI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFTypeFFxdual6kM_pknEType__; +text: .text%__1cTconvL2D_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2D_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nMvmIntrinsicsCID__; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cFStateM_sub_Op_ConF6MpknENode__v_; +text: .text%__1cMloadConFNodeHsize_of6kM_I_; +text: .text%__1cPsarL_rReg_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsarL_rReg_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPoldgetTimeNanos6F_x_; +text: .text%__1cPno_rax_RegLOperFclone6kM_pnIMachOper__; +text: .text%__1cTAbstractInterpreterMreturn_entry6FnITosState_i_pC_; +text: .text%__1cPsarL_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cMnegD_regNodeJnum_opnds6kM_I_; +text: .text%__1cKmul_hiNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerEjccb6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cNcmovP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__; +text: .text%__1cLvframeArrayIallocate6FpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pnLRegisterMap_nFframe_9A9A9A_p0_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%__1cODeoptimizationScreate_vframeArray6FpnKJavaThread_nFframe_pnLRegisterMap__pnLvframeArray__; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_; +text: .text%__1cODeoptimizationPget_method_data6FpnKJavaThread_nMmethodHandle_i_pnRmethodDataOopDesc__; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__; +text: .text%__1cTconvI2D_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationRgather_statistics6Fn0ALDeoptReason_n0ALDeoptAction_nJBytecodesECode__v_; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_; +text: .text%__1cNGrowableArray4CpnOcompiledVFrame__2t6Mii_v_; +text: .text%__1cOcmovI_regUNodeErule6kM_I_; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cXpartialSubtypeCheckNodeMideal_Opcode6kM_i_; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cLvframeArrayZdeallocate_monitor_chunks6M_v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cMmulD_memNodePoper_input_base6kM_I_; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_; +text: .text%__1cOcompL_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cODeoptimizationLUnrollBlock2t6MiiiiiplppCnJBasicType__v_; +text: .text%__1cLvframeArrayHfill_in6MpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pknLRegisterMap_i_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%__1cMaddF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__; +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__; +text: .text%__1cNnmethodLocker2t6MpC_v_; +text: .text%__1cTconvD2I_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_; +text: .text%__1cLconvI2BNodeErule6kM_I_; +text: .text%__1cTconvF2I_reg_regNodeErule6kM_I_; +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__; +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQAbstractCompilerMsupports_osr6M_i_; +text: .text%__1cRaddL_mem_rRegNodePoper_input_base6kM_I_; +text: .text%__1cSCallLeafDirectNodeJnum_opnds6kM_I_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cMmulL_memNodePoper_input_base6kM_I_; +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cMaddF_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cMincL_memNodePoper_input_base6kM_I_; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cMmulL_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMaddD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__; +text: .text%__1cJAssemblerEmovb6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_; +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKloadUBNodeFreloc6kM_i_; +text: .text%__1cMStartOSRNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__; +text: .text%__1cMloadConPNodeGis_Con6kM_I_; +text: .text%__1cMmulD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cOPSVirtualSpaceJexpand_by6ML_i_; +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_; +text: .text%__1cFParseWload_interpreter_state6MpnENode_2_v_; +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOstackSlotDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl_i_v_; +text: .text%__1cSCardTableExtensionbEresize_covered_region_by_start6MnJMemRegion__v_; +text: .text%__1cQshrL_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRaddL_mem_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMlogD_regNodeErule6kM_I_; +text: .text%__1cXpartialSubtypeCheckNodePoper_input_base6kM_I_; +text: .text%__1cNmulI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cMdecI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsalL_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKemit_break6FrnKCodeBuffer__v_; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%__1cOstackSlotDOperJnum_edges6kM_I_; +text: .text%__1cMsubF_regNodeMcisc_operand6kM_i_; +text: .text%__1cMdecI_memNodeFreloc6kM_i_; +text: .text%__1cMdecI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRCardTableModRefBSbCfind_covering_region_by_base6MpnIHeapWord__i_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cINegFNodeGOpcode6kM_i_; +text: .text%__1cRCardTableModRefBSbAlargest_prev_committed_end6kMi_pnIHeapWord__; +text: .text%__1cLloadSSDNodeJnum_opnds6kM_I_; +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cLconvI2BNodeJnum_opnds6kM_I_; +text: .text%__1cNstoreImmPNodePoper_input_base6kM_I_; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cHCompile2t6MpnFciEnv_pF_pknITypeFunc_pCpkciiii_v_; +text: .text%__1cTconvL2F_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_; +text: .text%__1cNloadConPcNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cFStateM_sub_Op_CmpD6MpknENode__v_; +text: .text%__1cNloadConL0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cUCallNativeDirectNodeKmethod_set6Ml_v_; +text: .text%__1cKcastPPNodeMideal_Opcode6kM_i_; +text: .text%__1cNcmovL_memNodeJnum_opnds6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cPshrL_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cIGraphKitIgen_stub6MpCpkciii_v_; +text: .text%__1cbDcatch_cleanup_find_cloned_def6FpnFBlock_pnENode_1rnLBlock_Array_i_3_: lcm.o; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cTC2IAdapterGeneratorUmkh_unverified_entry6FnMmethodHandle__pC_; +text: .text%__1cRaddL_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cLOptoRuntimeNgenerate_stub6FpnFciEnv_pF_pknITypeFunc_pCpkciiii_8_; +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__; +text: .text%__1cbCcatch_cleanup_fix_all_inputs6FpnENode_11_v_: lcm.o; +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__; +text: .text%__1cNdivI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cNstoreImmPNodeHtwo_adr6kM_I_; +text: .text%__1cLOptoRuntimeRnew_objArray_Type6F_pknITypeFunc__; +text: .text%JVM_GetComponentType; +text: .text%__1cIMulDNodeJideal_reg6kM_I_; +text: .text%__1cTconvF2D_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cJAssemblerEsbbq6MpnMRegisterImpl_i_v_; +text: .text%__1cNcmovL_memNodeMideal_Opcode6kM_i_; +text: .text%jni_GetStringRegion: jni.o; +text: .text%jni_EnsureLocalCapacity: jni.o; +text: .text%__1cLloadSSDNodeHtwo_adr6kM_I_; +text: .text%__1cMaddF_memNodePoper_input_base6kM_I_; +text: .text%__1cFParseMdo_anewarray6M_v_; +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLconvI2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_; +text: .text%__1cMincL_memNodeJnum_opnds6kM_I_; +text: .text%__1cRandL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cRaddL_mem_rRegNodeJnum_opnds6kM_I_; +text: .text%JVM_NewArray; +text: .text%JVM_FreeMemory; +text: .text%JVM_TotalMemory; +text: .text%__1cMaddD_immNodeJnum_opnds6kM_I_; +text: .text%__1cMsubF_regNodeJnum_opnds6kM_I_; +text: .text%__1cLloadSSINodePoper_input_base6kM_I_; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cMincI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cMsubF_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMmulF_memNodeErule6kM_I_; +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_; +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_; +text: .text%__1cFStateL_sub_Op_Box6MpknENode__v_; +text: .text%__1cRaddL_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTconvL2F_reg_regNodeErule6kM_I_; +text: .text%__1cKPSYoungGenLpost_resize6M_v_; +text: .text%__1cNcmovL_regNodeErule6kM_I_; +text: .text%__1cOcmovD_regUNodeJnum_opnds6kM_I_; +text: .text%__1cRandI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeHtwo_adr6kM_I_; +text: .text%__1cTAbstractInterpreterRTosState_as_index6FnITosState__i_; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__; +text: .text%__1cMincL_memNodeMideal_Opcode6kM_i_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJloadCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosPuncommit_memory6FpcL_i_; +text: .text%__1cSInterpreterRuntimeJnote_trap6FpnKJavaThread_ipnGThread__v_; +text: .text%__1cRSignatureIteratorHiterate6M_v_; +text: .text%__1cIModLNodeJideal_reg6kM_I_; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__; +text: .text%__1cMaddF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHBoxNodeLbottom_type6kM_pknEType__; +text: .text%__1cFStateM_sub_Op_DivL6MpknENode__v_; +text: .text%__1cTconvL2D_reg_memNodeErule6kM_I_; +text: .text%JVM_GetSystemPackage; +text: .text%__1cCosNcommit_memory6FpcLL_i_; +text: .text%__1cOMacroAssemblerFenter6M_v_; +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_; +text: .text%__1cNTemplateTableLindex_check6FpnMRegisterImpl_2_v_; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_; +text: .text%__1cMincI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateP_sub_Op_ConvF2D6MpknENode__v_; +text: .text%__1cMmulL_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%Unsafe_DefineClass1; +text: .text%__1cSUnsafe_DefineClass6FpnHJNIEnv__pnI_jstring_pnL_jbyteArray_iipnI_jobject_7_pnH_jclass__: unsafe.o; +text: .text%__1cFTypeDFxdual6kM_pknEType__; +text: .text%__1cMincI_memNodeFreloc6kM_i_; +text: .text%__1cPcmpF_cc_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMsubF_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMsubF_memNodePoper_input_base6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%JVM_DefineClass; +text: .text%__1cMaddF_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMmulL_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cMmulL_memNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEshrq6MpnMRegisterImpl_i_v_; +text: .text%__1cTC2IAdapterGeneratorLadapter_for6FnMmethodHandle__pnKC2IAdapter__; +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MpnMRegisterImpl_i2rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerYprofile_not_taken_branch6MpnMRegisterImpl__v_; +text: .text%__1cTleaPIdxScaleOffNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cNloadConL0NodeGis_Con6kM_I_; +text: .text%__1cMset_property6FnGHandle_pkc2pnGThread__v_: jvm.o; +text: .text%JVM_GetCPFieldModifiers; +text: .text%JVM_InvokeMethod; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cZcatch_cleanup_inter_block6FpnENode_pnFBlock_13rnLBlock_Array_i_v_: lcm.o; +text: .text%__1cOsalI_mem_1NodeMideal_Opcode6kM_i_; +text: .text%__1cMaddF_immNodeJnum_opnds6kM_I_; +text: .text%__1cMsubD_immNodePoper_input_base6kM_I_; +text: .text%__1cMmulF_regNodeMcisc_operand6kM_i_; +text: .text%__1cMmulF_regNodeJnum_opnds6kM_I_; +text: .text%__1cMmulF_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cMmulD_regNodeHtwo_adr6kM_I_; +text: .text%__1cTconvD2F_reg_regNodeMcisc_operand6kM_i_; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cCosHSolarisOset_mpss_range6FpcLL_i_; +text: .text%__1cTconvF2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFParseOdo_tableswitch6M_v_; +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cMrdx_RegLOperFclone6kM_pnIMachOper__; +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJCondition__v4_v_; +text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_amd64.o; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cTconvF2D_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMmulF_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%Unsafe_AllocateInstance; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cOcmovD_regUNodeMideal_Opcode6kM_i_; +text: .text%__1cIciObjectMis_classless6kM_i_; +text: .text%__1cMsubD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cRInlineCacheBufferOinit_next_stub6F_v_; +text: .text%__1cPshrL_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_immNodeErule6kM_I_; +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLMoveF2INodeGOpcode6kM_i_; +text: .text%__1cNcmovL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_2_v_; +text: .text%__1cOcmovD_regUNodeErule6kM_I_; +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cMorL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvD2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cScompL_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cXpartialSubtypeCheckNodeErule6kM_I_; +text: .text%__1cOstackSlotDOperEtype6kM_pknEType__; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cLloadSSDNodeErule6kM_I_; +text: .text%__1cMsubD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRComputeEntryStackIdo_short6M_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorCto6F_pnMRegisterImpl__; +text: .text%__1cTconvF2D_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulL_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_; +text: .text%__1cMloadConFNodeKconst_size6kM_i_; +text: .text%__1cMorL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMmulD_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMaddD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2D_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cMloadConFNodeFreloc6kM_i_; +text: .text%__1cILogDNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cNstoreImmPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLStrCompNodeJideal_reg6kM_I_; +text: .text%__1cMlogD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cMaddD_regNodeMcisc_operand6kM_i_; +text: .text%__1cMaddD_regNodeErule6kM_I_; +text: .text%__1cScompL_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cIAddFNodeJideal_reg6kM_I_; +text: .text%__1cJimmP0OperPconstant_is_oop6kM_i_; +text: .text%__1cJimmP0OperIconstant6kM_l_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cETypeJis_finite6kM_i_; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%JVM_GetClassContext; +text: .text%__1cIciObjectTis_type_array_klass6M_i_; +text: .text%__1cNsubL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cIregFOperFclone6kM_pnIMachOper__; +text: .text%__1cRfind_field_offset6FpnI_jobject_ipnGThread__i_; +text: .text%__1cHBoxNodeJideal_reg6kM_I_; +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cLloadSSDNodeMideal_Opcode6kM_i_; +text: .text%__1cMsubF_regNodeErule6kM_I_; +text: .text%__1cRsubL_rReg_memNodeFreloc6kM_i_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_pnNsymbolOopDesc_pkc_nGHandle__; +text: .text%__1cTconvL2F_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMmulF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__; +text: .text%__1cLStatSamplerTget_system_property6FpkcpnGThread__2_; +text: .text%__1cRmethodDataOopDescRbci_to_extra_data6Mii_pnLProfileData__; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cVMoveF2I_reg_stackNodeMideal_Opcode6kM_i_; +text: .text%__1cNmodL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cQsalI_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_; +text: .text%__1cJStubQdDueueMremove_first6Mi_v_; +text: .text%__1cQinitialize_class6FnMsymbolHandle_pnGThread__v_: thread.o; +text: .text%__1cJAssemblerFcmovq6Mn0AJCondition_pnMRegisterImpl_nHAddress__v_; +text: .text%__1cXpartialSubtypeCheckNodeJnum_opnds6kM_I_; +text: .text%__1cMmulD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMaddF_immNodeHtwo_adr6kM_I_; +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cTconvL2D_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeErule6kM_I_; +text: .text%__1cVMoveL2D_reg_stackNodeMideal_Opcode6kM_i_; +text: .text%__1cFStateM_sub_Op_MulD6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_ModL6MpknENode__v_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cPloadConUL32NodeGis_Con6kM_I_; +text: .text%__1cQshrL_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cKJavaThreadbOcheck_special_condition_for_native_trans6Fp0_v_; +text: .text%__1cODeoptimizationYreset_invocation_counter6FpnJScopeDesc_i_v_; +text: .text%__1cZCallDynamicJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTconvF2I_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cMmulD_memNodeJnum_opnds6kM_I_; +text: .text%__1cHOrLNodeGOpcode6kM_i_; +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__; +text: .text%__1cMnegF_regNodeErule6kM_I_; +text: .text%__1cMsubF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_RawMonitorCreate; +text: .text%__1cOresolve_symbol6Fpkc_pC_: os_solaris.o; +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cKCMoveDNodeGOpcode6kM_i_; +text: .text%__1cFParseQdo_monitor_enter6M_v_; +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cXpartialSubtypeCheckNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvD2INodeJideal_reg6kM_I_; +text: .text%__1cKcastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallDynamicJavaDirectNodeJnum_opnds6kM_I_; +text: .text%__1cMlogD_regNodeJnum_opnds6kM_I_; +text: .text%Unsafe_CompareAndSwapInt; +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cMmulD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNmulI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKimmL32OperFclone6kM_pnIMachOper__; +text: .text%__1cIimmFOperFclone6kM_pnIMachOper__; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_22pC_v_; +text: .text%__1cOindOffset8OperFclone6kM_pnIMachOper__; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6MpnMRegisterImpl_i_v_; +text: .text%__1cOloadConL32NodeFclone6kM_pnENode__; +text: .text%__1cMloadConFNodeFclone6kM_pnENode__; +text: .text%__1cScompL_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cNTemplateTableRlocals_index_wide6FpnMRegisterImpl__v_; +text: .text%__1cVMoveL2D_reg_stackNodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_data_at6MpnMRegisterImpl_i2_v_; +text: .text%__1cKOSRAdapter2n6FLI_pv_; +text: .text%__1cKOSRAdapterPnew_osr_adapter6FpnKCodeBuffer_pnJOopMapSet_ii_p0_; +text: .text%__1cJAssemblerEincl6MnHAddress__v_; +text: .text%__1cKOSRAdapter2t6MpnKCodeBuffer_pnJOopMapSet_iii_v_; +text: .text%__1cTconvI2D_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cNSharedRuntimeRgenerate_osr_blob6Fi_pnKOSRAdapter__; +text: .text%__1cMaddD_regNodeJnum_opnds6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorUset_wide_entry_point6MpnITemplate_rpC_v_; +text: .text%__1cMmulF_regNodeErule6kM_I_; +text: .text%__1cIMulFNodeJideal_reg6kM_I_; +text: .text%__1cFStateM_sub_Op_MulF6MpknENode__v_; +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__; +text: .text%__1cHnmethodVmark_as_seen_on_stack6M_v_; +text: .text%__1cMloadConDNodeHsize_of6kM_I_; +text: .text%__1cOcmovI_regUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLconvI2BNodeHtwo_adr6kM_I_; +text: .text%__1cMorL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cQorI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cMaddD_immNodeHtwo_adr6kM_I_; +text: .text%__1cMloadConDNodeKconst_size6kM_i_; +text: .text%__1cLConvL2FNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cLloadSSINodeMideal_Opcode6kM_i_; +text: .text%__1cOstackSlotDOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cTconvF2D_reg_memNodeFreloc6kM_i_; +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvL2D_reg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cMloadConDNodeFreloc6kM_i_; +text: .text%JVM_Lseek; +text: .text%__1cPsarL_rReg_1NodeErule6kM_I_; +text: .text%__1cPsarL_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMaddD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOstackSlotDOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMorL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMmulF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMlogD_regNodeHtwo_adr6kM_I_; +text: .text%__1cRaddI_mem_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cFStateM_sub_Op_AddF6MpknENode__v_; +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cTconvL2F_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNGrowableArray4CpnKOSRAdapter__Praw_at_put_grow6Mirk14_v_; +text: .text%__1cFStateP_sub_Op_StrComp6MpknENode__v_; +text: .text%__1cTconvL2F_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cMaddF_memNodeJnum_opnds6kM_I_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cScompL_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cPcmpF_cc_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_; +text: .text%__1cQmulI_mem_immNodePoper_input_base6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cNdecL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_; +text: .text%__1cScompL_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cJAssemblerEsubq6MpnMRegisterImpl_nHAddress__v_; +text: .text%jni_GetEnv; +text: .text%JVM_NanoTime; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_2pC22_v_; +text: .text%__1cFTypeFJis_finite6kM_i_; +text: .text%__1cRmulI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cScompL_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cHMulNodeGis_Mul6kM_pk0_; +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_; +text: .text%__1cQmulI_mem_immNodeMideal_Opcode6kM_i_; +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJAssemblerLemit_data646MxnJrelocInfoJrelocType_i_v_; +text: .text%__1cJAssemblerFpushq6MnHAddress__v_; +text: .text%__1cIGraphKitSgen_native_wrapper6MpnIciMethod__v_; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_; +text: .text%__1cPcmpD_cc_immNodeKconst_size6kM_i_; +text: .text%__1cKLoadPCNodeJideal_reg6kM_I_; +text: .text%__1cMorL_rRegNodeErule6kM_I_; +text: .text%__1cUCallNativeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompP_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cScompP_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cSvframeStreamCommonbFfill_in_compiled_inlined_sender6M_i_; +text: .text%__1cNdivI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKcastPPNodeJnum_opnds6kM_I_; +text: .text%__1cTconvL2D_reg_memNodeHtwo_adr6kM_I_; +text: .text%__1cOLibraryCallKitbNinline_native_Reflection_getCallerClass6M_i_; +text: .text%__1cOLibraryCallKitZinline_native_Class_query6MnMvmIntrinsicsCID__i_; +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod__v_; +text: .text%__1cKciTypeFlowLStateVectorOdo_null_assert6MpnHciKlass__v_; +text: .text%__1cMsubD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_; +text: .text%__1cNGrowableArray4CpnGciType__Egrow6Mi_v_; +text: .text%__1cMdivD_immNodeJnum_opnds6kM_I_; +text: .text%__1cNstoreImmPNodeJnum_opnds6kM_I_; +text: .text%__1cMdivD_immNodeHtwo_adr6kM_I_; +text: .text%__1cLloadSSINodeHtwo_adr6kM_I_; +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cPcmpD_cc_immNodeFreloc6kM_i_; +text: .text%__1cUCallNativeDirectNodeFreloc6kM_i_; +text: .text%__1cNloadConPcNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulD_memNodeErule6kM_I_; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cFStateS_sub_Op_CallNative6MpknENode__v_; +text: .text%__1cFStateO_sub_Op_LoadPC6MpknENode__v_; +text: .text%__1cQAbstractCompilerPsupports_native6M_i_; +text: .text%__1cQorI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cMmulF_regNodeHtwo_adr6kM_I_; +text: .text%__1cPsalL_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQshrI_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableQvolatile_barrier6FnJAssemblerQMembar_mask_bits__v_; +text: .text%__1cNdivL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cVMoveL2D_reg_stackNodeErule6kM_I_; +text: .text%__1cRsalI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_; +text: .text%__1cYinternal_word_RelocationMforce_target6MpC_v_; +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__; +text: .text%__1cLloadSSINodeJnum_opnds6kM_I_; +text: .text%__1cKPSYoungGenRavailable_to_live6M_L_; +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNcmovL_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSstore_to_stackslot6FrnKCodeBuffer_iii_v_; +text: .text%__1cFTypeFGis_nan6kM_i_; +text: .text%__1cQshrL_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvD2F_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6M_v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_; +text: .text%__1cMmulD_immNodeFreloc6kM_i_; +text: .text%__1cQmulI_mem_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cOstackSlotIOperJnum_edges6kM_I_; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cUInterpreterGeneratorXcheck_for_compiled_code6MrnFLabel__v_; +text: .text%__1cRaddI_mem_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cLconvI2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMlogD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSVirtualSpaceJshrink_by6ML_i_; +text: .text%__1cTconvD2F_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cRCardTableModRefBSYcommitted_unique_to_self6kMinJMemRegion__1_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cFStateN_sub_Op_LoadD6MpknENode__v_; +text: .text%__1cTconvL2F_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cZInterpreterMacroAssemblerRremove_activation6MnITosState_pnMRegisterImpl_iii_v_; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cPcmpF_cc_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubF_memNodeJnum_opnds6kM_I_; +text: .text%__1cKPSYoungGenUavailable_to_min_gen6M_L_; +text: .text%__1cJAssemblerKrepne_scan6M_v_; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%__1cKPSYoungGenbCreset_survivors_after_shrink6M_v_; +text: .text%__1cKPSYoungGenQlimit_gen_shrink6ML_L_; +text: .text%__1cTconvI2D_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateP_sub_Op_ConvI2F6MpknENode__v_; +text: .text%__1cMmulD_immNodeKconst_size6kM_i_; +text: .text%__1cMmulD_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cMmulF_immNodeFreloc6kM_i_; +text: .text%__1cJloadBNodeHsize_of6kM_I_; +text: .text%__1cOcompI_rRegNodeHsize_of6kM_I_; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cJloadPNodeHsize_of6kM_I_; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cOtypeArrayKlassNexternal_name6FnJBasicType__pkc_; +text: .text%Unsafe_StaticFieldOffset; +text: .text%__1cFTypeFFempty6kM_i_; +text: .text%__1cNcmovL_regNodeHtwo_adr6kM_I_; +text: .text%__1cLloadSSDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_; +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_; +text: .text%__1cETypeFxdual6kM_pk0_; +text: .text%__1cVVM_ParallelGCSystemGC2t6MIInHGCCauseFCause__v_; +text: .text%__1cJCmpF3NodeGOpcode6kM_i_; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cMsubD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLdivL_10NodePoper_input_base6kM_I_; +text: .text%__1cVVM_ParallelGCSystemGCEname6kM_pkc_; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%__1cJAssemblerEjmpb6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cOcmovI_regUNodeHtwo_adr6kM_I_; +text: .text%__1cMaddD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEmovw6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerGmovsbl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cMrax_RegLOperFclone6kM_pnIMachOper__; +text: .text%__1cMorL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_; +text: .text%__1cJLoadDNodeJideal_reg6kM_I_; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cQmulI_mem_immNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_; +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerFpopaq6M_v_; +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKimmL10OperJnum_edges6kM_I_; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%__1cLcastP2LNodeHsize_of6kM_I_; +text: .text%__1cQmulI_mem_immNodeRis_cisc_alternate6kM_i_; +text: .text%__1cMsubD_regNodeHtwo_adr6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MnITosState__v_; +text: .text%__1cRsubI_rReg_memNodeHsize_of6kM_I_; +text: .text%__1cTconvL2F_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cNReservedSpace2t6MpcL_v_; +text: .text%__1cKmul_hiNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSmembar_acquireNodeJnum_opnds6kM_I_; +text: .text%__1cQsarL_rReg_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerMemit_arith_b6MiipnMRegisterImpl_i_v_; +text: .text%__1cPsarL_rReg_2NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLdivL_10NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%JVM_GC; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cScompP_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cIPSOldGenSexpand_to_reserved6M_i_; +text: .text%__1cQmulI_mem_immNodeJnum_opnds6kM_I_; +text: .text%__1cIPSOldGenJexpand_by6ML_i_; +text: .text%__1cIPSOldGenGexpand6ML_v_; +text: .text%__1cIPSOldGenXexpand_and_cas_allocate6ML_pnIHeapWord__; +text: .text%__1cPsarL_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cJAssemblerFtestb6MpnMRegisterImpl_i_v_; +text: .text%__1cXpartialSubtypeCheckNodeHtwo_adr6kM_I_; +text: .text%__1cMsubF_regNodeHtwo_adr6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_; +text: .text%__1cRaddL_rReg_memNodeFreloc6kM_i_; +text: .text%__1cVMoveL2D_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompP_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPsarL_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cOGenerateOopMapGdo_jsr6Mi_v_; +text: .text%__1cMmulF_memNodeHtwo_adr6kM_I_; +text: .text%__1cScompP_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cLPSMarkSweepGinvoke6Fpii_v_; +text: .text%__1cOcmovD_regUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovL_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvF2I_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cMmulF_immNodeKconst_size6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerbGget_unsigned_2_byte_index_at_bcp6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cMdecI_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cJloadDNodeFreloc6kM_i_; +text: .text%__1cMincL_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNaddL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cTconvD2F_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cMmulD_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLClassLoaderbCupdate_class_path_entry_list6Fpkc_v_; +text: .text%__1cMsubF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovI_regUNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cTconvL2D_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cLOptoRuntimeRresolve_call_Type6F_pknITypeFunc__; +text: .text%__1cHciKlassIis_klass6M_i_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cKScopeValuePis_constant_int6kM_i_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cMsubF_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cMsubF_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJMemRegion2t6M_v_; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cQsalL_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cJArgumentsRverify_percentage6FLpkc_i_; +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__; +text: .text%__1cRComputeEntryStackHdo_long6M_v_; +text: .text%__1cHnmethodVinvalidate_osr_method6M_v_; +text: .text%__1cMaddF_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%jni_SetObjectField: jni.o; +text: .text%__1cLConvD2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOBasicHashtable2t6Mii_v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cNandI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNcmovL_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__; +text: .text%__1cMTailJumpNodeGOpcode6kM_i_; +text: .text%__1cCosHSolarisVcleanup_interruptible6FpnKJavaThread__v_; +text: .text%__1cCosHSolarisTsetup_interruptible6F_pnKJavaThread__; +text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_; +text: .text%__1cMdivD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%JVM_LoadLibrary; +text: .text%JVM_Sleep; +text: .text%__1cNReservedSpaceKinitialize6MLLipc_v_; +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__; +text: .text%__1cOstackSlotIOperFscale6kM_i_; +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__; +text: .text%jint_cmp: parse2.o; +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cLloadSSINodeErule6kM_I_; +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVMoveF2I_reg_stackNodePoper_input_base6kM_I_; +text: .text%__1cLConvL2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIDivDNodeJideal_reg6kM_I_; +text: .text%__1cRandI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%jni_GetJavaVM: jni.o; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%jni_MonitorExit: jni.o; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cPciInstanceKlassbDcompute_shared_is_initialized6M_i_; +text: .text%__1cNGrowableArray4CpnIPerfData__Praw_at_put_grow6Mirk14_v_; +text: .text%__1cFciEnvOrecord_failure6Mpkc_v_; +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__; +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOstackSlotDOperFscale6kM_i_; +text: .text%__1cOstackSlotDOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOcmovI_regUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReturnNodeUdepends_only_on_test6kM_i_; +text: .text%__1cNcmovL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvD2F_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvF2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvL2F_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_immNodeJnum_opnds6kM_I_; +text: .text%__1cVMoveL2D_reg_stackNodeJnum_opnds6kM_I_; +text: .text%__1cRaddI_mem_rRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cTconvL2D_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXpartialSubtypeCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSharedRuntimeEdrem6Fdd_d_; +text: .text%__1cRaddI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMloadConDNodeFclone6kM_pnENode__; +text: .text%__1cScompP_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKC2IAdapterXreturn_from_interpreter6M_pC_; +text: .text%__1cKC2IAdapterRsetup_stack_frame6MnFframe_pnLvframeArray__v_; +text: .text%__1cIregDOperFclone6kM_pnIMachOper__; +text: .text%__1cJAssemblerGmovswl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cMsubF_memNodeErule6kM_I_; +text: .text%__1cIimmDOperFclone6kM_pnIMachOper__; +text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cKC2IAdapterSunpack_c2i_adapter6MnFframe_1pnLvframeArray__v_; +text: .text%__1cNdivI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_; +text: .text%__1cFframebFset_interpreter_frame_sender_sp6Mpl_v_; +text: .text%__1cPsarL_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cULinearLeastSquareFit2t6MI_v_; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%__1cMaddF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cJAssemblerFpopfq6M_v_; +text: .text%__1cCosOreserve_memory6FLpc_1_; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cUParallelScavengeHeapEkind6M_nNCollectedHeapEName__; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_LLii_v_; +text: .text%__1cNSpaceCounters2t6MpkciLpnMMutableSpace_pnSGenerationCounters__v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cMaddF_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cOcompiledVFrameUresolve_monitor_lock6kMnILocation__pnJBasicLock__; +text: .text%__1cTconvD2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%__1cNReservedSpaceKfirst_part6MLii_0_; +text: .text%__1cNCellTypeStateImake_any6Fi_0_; +text: .text%__1cMorL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cISubFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMnegF_regNodeJnum_opnds6kM_I_; +text: .text%__1cINegDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_; +text: .text%__1cISubDNodeGadd_id6kM_pknEType__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cMaddD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMMonitorChunk2t6Mi_v_; +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cZCompiledArgumentOopFinderDset6MinJBasicType__v_; +text: .text%__1cNstoreImmPNodeFreloc6kM_i_; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cLOptoRuntimebBhandle_wrong_method_ic_miss6FpnKJavaThread__pC_; +text: .text%__1cKJavaThreadUremove_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cKJavaThreadRadd_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cNReservedSpace2t6ML_v_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cNmulL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNmulI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_; +text: .text%Unsafe_GetNativeByte; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%__1cFframebLprevious_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cFStateP_sub_Op_ConvD2I6MpknENode__v_; +text: .text%__1cJAssemblerGpushfq6M_v_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_2_v_; +text: .text%__1cIDivFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cINegDNodeJideal_reg6kM_I_; +text: .text%__1cODeoptimizationZtrap_state_set_recompiled6Fii_i_; +text: .text%__1cPshrL_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2D_reg_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_; +text: .text%__1cNandI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pLi_v_; +text: .text%__1cMsubF_memNodeHtwo_adr6kM_I_; +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__; +text: .text%__1cRaddL_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cMmulL_memNodeFreloc6kM_i_; +text: .text%__1cLVtableStubsGlookup6Fiii_pnKVtableStub__; +text: .text%__1cMMonitorValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cFStateM_sub_Op_NegD6MpknENode__v_; +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_; +text: .text%__1cISubDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cNstoreImmPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetNodeJnum_opnds6kM_I_; +text: .text%__1cIDivINodeJideal_reg6kM_I_; +text: .text%__1cRInvocationCounterDdef6Fn0AFState_ipFnMmethodHandle_pnGThread__pC_v_; +text: .text%__1cMNativeLookupNlong_jni_name6FnMmethodHandle__pc_; +text: .text%__1cMaddF_memNodeErule6kM_I_; +text: .text%__1cOcmovD_regUNodeHtwo_adr6kM_I_; +text: .text%__1cMaddF_memNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cMorL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cOMacroAssemblerNpop_CPU_state6M_v_; +text: .text%__1cMmulF_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cOMacroAssemblerOpush_CPU_state6M_v_; +text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_; +text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_; +text: .text%__1cOMacroAssemblerMpop_IU_state6M_v_; +text: .text%__1cOMacroAssemblerNpush_IU_state6M_v_; +text: .text%__1cOMacroAssemblerSstore_check_part_26MpnMRegisterImpl__v_; +text: .text%__1cTconvL2D_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerSstore_check_part_16MpnMRegisterImpl__v_; +text: .text%__1cRaddL_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMaddF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRClassPathZipEntry2t6Mppvpc_v_; +text: .text%__1cNTemplateTableOprepare_invoke6FpnMRegisterImpl_2inJBytecodesECode__v_; +text: .text%__1cVMoveF2I_reg_stackNodeErule6kM_I_; +text: .text%__1cJAssemblerEandq6MpnMRegisterImpl_2_v_; +text: .text%__1cFParsePdo_lookupswitch6M_v_; +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_; +text: .text%__1cIAddDNodeJideal_reg6kM_I_; +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRComputeEntryStackJdo_double6M_v_; +text: .text%__1cMaddD_regNodeHtwo_adr6kM_I_; +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cJAssemblerEcmpb6MnHAddress_i_v_; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cLClassLoaderSget_canonical_path6Fpc1i_i_; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cMsubD_immNodeHtwo_adr6kM_I_; +text: .text%__1cLklassVtableTis_miranda_entry_at6Mi_i_; +text: .text%__1cKPSScavengeZclean_up_failed_promotion6F_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%JVM_Available; +text: .text%__1cJAssemblerHucomiss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl__v_; +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJAssemblerFimulq6MpnMRegisterImpl_2_v_; +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__; +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_; +text: .text%__1cLClassLoaderLadd_to_list6FpnOClassPathEntry__v_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cIRetTableHadd_jsr6Mii_v_; +text: .text%__1cMincL_memNodeHtwo_adr6kM_I_; +text: .text%__1cKPSYoungGenOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cNGrowableArray4CpnLmarkOopDesc__2t6Mii_v_; +text: .text%__1cUCompressedReadStreamJread_long6M_x_; +text: .text%__1cISubDNodeJideal_reg6kM_I_; +text: .text%__1cWNonPrintingResourceObj2n6FLnLResourceObjPallocation_type__pv_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cPaddress_of_flag6FnXCommandLineFlagWithType__pnEFlag__: globals.o; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cMmulI_memNodePoper_input_base6kM_I_; +text: .text%__1cOcompL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNGrowableArray4CpnLmarkOopDesc__Uclear_and_deallocate6M_v_; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cOcompI_rRegNodeFclone6kM_pnENode__; +text: .text%__1cRsubI_rReg_memNodeFclone6kM_pnENode__; +text: .text%__1cLcastP2LNodeFclone6kM_pnENode__; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%__1cRaddL_rReg_memNodeErule6kM_I_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl__v_; +text: .text%__1cOsalI_mem_1NodeHtwo_adr6kM_I_; +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVMoveL2D_reg_stackNodeHtwo_adr6kM_I_; +text: .text%__1cRaddL_mem_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cJloadPNodeFclone6kM_pnENode__; +text: .text%__1cJloadBNodeFclone6kM_pnENode__; +text: .text%__1cRaddL_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cMmulF_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMaddF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEmovb6MnHAddress_i_v_; +text: .text%__1cIAddDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%Unsafe_AllocateMemory; +text: .text%__1cVMoveF2I_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerHfxrstor6MnHAddress__v_; +text: .text%__1cJAssemblerGfxsave6MnHAddress__v_; +text: .text%__1cHCompilePget_invoke_name6M_pnIciSymbol__; +text: .text%__1cJAssemblerEsetb6Mn0AJCondition_pnMRegisterImpl__v_; +text: .text%__1cNxorI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_; +text: .text%__1cNGCTaskManagerGthread6MI_pnMGCTaskThread__; +text: .text%__1cRConstantLongValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cRConstantLongValueQis_constant_long6kM_i_; +text: .text%__1cKScopeValuePis_constant_oop6kM_i_; +text: .text%__1cKScopeValueSis_constant_double6kM_i_; +text: .text%__1cMmulD_memNodeHtwo_adr6kM_I_; +text: .text%__1cVMoveF2I_reg_stackNodeHtwo_adr6kM_I_; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cNcmovL_memNodeHtwo_adr6kM_I_; +text: .text%__1cFStateM_sub_Op_AddD6MpknENode__v_; +text: .text%__1cMmulI_memNodeMideal_Opcode6kM_i_; +text: .text%__1cScompL_rReg_memNodeFreloc6kM_i_; +text: .text%__1cLloadSSINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNGrowableArray4CpnIPerfData__2t6Mii_v_; +text: .text%__1cOCompilerThreadSis_Compiler_thread6kM_i_; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cLVtableStubsFenter6FiiipnKVtableStub__v_; +text: .text%__1cMmulI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOcmovD_regUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cNnegI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_; +text: .text%__1cUConstantOopReadValuePis_constant_oop6kM_i_; +text: .text%__1cHMatcherNlogDSupported6F_ki_; +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_; +text: .text%__1cLconvI2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_; +text: .text%__1cMPerfDataList2t6Mi_v_; +text: .text%__1cFStateP_sub_Op_ConvI2D6MpknENode__v_; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cJCodeCachebCmake_marked_nmethods_zombies6F_v_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cMmulI_memNodeJnum_opnds6kM_I_; +text: .text%__1cFStateM_sub_Op_CmpF6MpknENode__v_; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cOtailjmpIndNodeGpinned6kM_i_; +text: .text%__1cQshrL_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerGmovzbl6MpnMRegisterImpl_2_v_; +text: .text%__1cILogDNodeJideal_reg6kM_I_; +text: .text%__1cMmulI_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRaddL_mem_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMincL_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_; +text: .text%__1cNcmovL_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cMOopTaskQdDueueKinitialize6M_v_; +text: .text%__1cMmulD_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMOopTaskQdDueue2t6M_v_; +text: .text%__1cOLibraryCallKitbBinline_native_currentThread6M_i_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cMaddF_immNodeKconst_size6kM_i_; +text: .text%__1cVMoveL2D_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitSinline_math_native6MnMvmIntrinsicsCID__i_; +text: .text%__1cFciEnvbNArrayIndexOutOfBoundsException_instance6M_pnKciInstance__; +text: .text%__1cMsubD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddF_immNodeFreloc6kM_i_; +text: .text%__1cMaddD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_; +text: .text%__1cNReservedSpaceJlast_part6ML_0_; +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddD_immNodeFreloc6kM_i_; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%__1cMaddD_immNodeKconst_size6kM_i_; +text: .text%jni_Throw: jni.o; +text: .text%__1cRmulI_rReg_immNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cOsalI_mem_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSPSPromotionManager2t6M_v_; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_; +text: .text%__1cMsubF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerFmovss6MpnRFloatRegisterImpl_2_v_; +text: .text%JVM_GetLastErrorString; +text: .text%__1cJAssemblerFmovsd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_22_v_; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cFStateM_sub_Op_SubF6MpknENode__v_; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cKstoreBNodeErule6kM_I_; +text: .text%__1cKVtableStub2n6FLi_pv_; +text: .text%__1cJAssemblerEdecq6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_22_v_; +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cRaddI_mem_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_L_v_; +text: .text%__1cOLibraryCallKitMinline_trans6MnMvmIntrinsicsCID__i_; +text: .text%Unsafe_SetMemory; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cJTimeStamp2t6M_v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_2i_v_; +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorKpass_float6M_v_; +text: .text%__1cISubFNodeJideal_reg6kM_I_; +text: .text%__1cNGrowableArray4CpnIPerfData__Egrow6Mi_v_; +text: .text%__1cMSysClassPathNreset_item_at6Mi_v_; +text: .text%__1cFStateM_sub_Op_LogD6MpknENode__v_; +text: .text%__1cFTypeDFempty6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_2i_v_; +text: .text%__1cJlookupOne6FpnHJNIEnv__pkcpnGThread__pnH_jclass__: jni.o; +text: .text%__1cLloadSSINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl__v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cISubFNodeGadd_id6kM_pknEType__; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cFStateM_sub_Op_SubD6MpknENode__v_; +text: .text%JVM_RegisterSignal; +text: .text%JVM_FindSignal; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cMorL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cKConv2BNodeJideal_reg6kM_I_; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cLloadSSDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveF2I_reg_stackNodeJnum_opnds6kM_I_; +text: .text%__1cJArgumentsObuild_jvm_args6Fpkc_v_; +text: .text%__1cOLibraryCallKitMpop_math_arg6M_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cRaddI_mem_rRegNodeFreloc6kM_i_; +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_; +text: .text%__1cVMoveF2I_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cOtailjmpIndNodeHtwo_adr6kM_I_; +text: .text%__1cQmulI_mem_immNodeFreloc6kM_i_; +text: .text%__1cNincI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__; +text: .text%__1cUConstantOopReadValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cRaddI_mem_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdivD_immNodeKconst_size6kM_i_; +text: .text%__1cMmulD_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_; +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_; +text: .text%__1cMsubF_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_; +text: .text%__1cNGrowableArray4CpnTDerivedPointerEntry__Egrow6Mi_v_; +text: .text%__1cOtailjmpIndNodeJnum_opnds6kM_I_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cKGCStatInfo2t6Mi_v_; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_; +text: .text%__1cJMarkSweepUAdjustPointerClosure2t6Mi_v_; +text: .text%__1cCosHrealloc6FpvL_1_; +text: .text%__1cCosWactive_processor_count6F_i_; +text: .text%__1cSestimate_path_freq6FpnENode__f_: loopnode.o; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cOLibraryCallKitVinline_fp_conversions6MnMvmIntrinsicsCID__i_; +text: .text%__1cZcatch_cleanup_intra_block6FpnENode_1pnFBlock_ii_v_: lcm.o; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cNdefaultStreamMhas_log_file6M_i_; +text: .text%__1cNcmovL_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRalign_object_size6Fl_l_; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_; +text: .text%__1cNstoreImmBNodeErule6kM_I_; +text: .text%__1cNstoreImmINodeErule6kM_I_; +text: .text%__1cLloadSSDNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cLloadSSINodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cFParseRdo_multianewarray6M_v_; +text: .text%__1cMloadConDNodeGis_Con6kM_I_; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cPfilename_to_pid6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cNGrowableArray4CpnNmethodOopDesc__Egrow6Mi_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cXpartialSubtypeCheckNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cNGrowableArray4CpC_Egrow6Mi_v_; +text: .text%__1cNGrowableArray4CL_Egrow6Mi_v_; +text: .text%__1cObox_handleNodeHsize_of6kM_I_; +text: .text%__1cPsarL_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Lpkci_v_; +text: .text%__1cIPSOldGenYinitialize_virtual_space6MnNReservedSpace_L_v_; +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_; +text: .text%__1cNdivI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbCAbstractInterpreterGeneratorTgenerate_error_exit6Mpkc_pC_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cOPSVirtualSpace2t6M_v_; +text: .text%__1cOPSVirtualSpaceKinitialize6MnNReservedSpace_L_i_; +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_flag_at6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerWdispatch_only_noverify6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cKReflectionbFbasic_type_arrayklass_to_mirror6FpnMklassOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cMAdapterCache2t6M_v_; +text: .text%__1cSComputeAdapterInfoIdo_array6Mii_v_; +text: .text%__1cGatomll6Fpkcpx_i_: arguments.o; +text: .text%__1cJArgumentsRcheck_memory_size6Fxx_n0AJArgsRange__; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cYalign_to_allocation_size6FL_L_: heap.o; +text: .text%__1cJArgumentsRparse_memory_size6Fpkcpxx_n0AJArgsRange__; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cQAgentLibraryList2t6M_v_; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_; +text: .text%__1cMmulF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerGmovsbl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGmovswl6MpnMRegisterImpl_2_v_; +text: .text%__1cLOptoRuntimebDlazy_c2i_adapter_generation_C6FpnKJavaThread__pC_; +text: .text%__1cLOptoRuntimeVgenerate_handler_blob6FpCi_pnNSafepointBlob__; +text: .text%__1cRaddL_mem_rRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerGmovzwl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerFmovdq6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cRComputeEntryStackIdo_float6M_v_; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_nHAddress__v_; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_; +text: .text%__1cJAssemblerEcmpq6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerHucomisd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerFidivl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerFidivq6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEcdql6M_v_; +text: .text%__1cJAssemblerEcdqq6M_v_; +text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerDorq6MnHAddress_i_v_; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cMGCTaskThreadDrun6M_v_; +text: .text%__1cMGCTaskThreadFstart6M_v_; +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_; +text: .text%__1cJStubQdDueueOregister_queue6Fp0_v_; +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_; +text: .text%__1cJAssemblerFxaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNGCTaskManagerKset_thread6MIpnMGCTaskThread__v_; +text: .text%__1cJAssemblerHldmxcsr6MnHAddress__v_; +text: .text%__1cJAssemblerFxorps6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cKcastPPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMPeriodicTask2t6ML_v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cPOopTaskQdDueueSetOregister_queue6MipnMOopTaskQdDueue__v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cPdouble_quadword6Fpxxx_0_: templateTable_amd64.o; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUinvokevirtual_helper6FpnMRegisterImpl_22_v_; +text: .text%__1cEMIN24CL_6FTA0_0_; +text: .text%__1cRCardTableModRefBSbCpar_chunk_heapword_alignment6F_L_; +text: .text%__1cOMacroAssemblerPcorrected_idivl6MpnMRegisterImpl__i_; +text: .text%__1cOMacroAssemblerPcorrected_idivq6MpnMRegisterImpl__i_; +text: .text%__1cLNamedThread2t6M_v_; +text: .text%__1cLNamedThreadIset_name6MpkcE_v_; +text: .text%__1cOMacroAssemblerQserialize_memory6MpnMRegisterImpl_22_v_; +text: .text%__1cIDivDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_DivD6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvL2F6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvL2D6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvF2I6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvD2F6MpknENode__v_; +text: .text%__1cRcheck_if_clipping6FpknKRegionNode_rpnGIfNode_5_i_: cfgnode.o; +text: .text%__1cWcheck_compare_clipping6FipnGIfNode_pnHConNode_rpnENode__i_: cfgnode.o; +text: .text%__1cIciObjectOis_array_klass6M_i_; +text: .text%__1cScompP_rReg_memNodeFreloc6kM_i_; +text: .text%__1cKCastPPNodeJideal_reg6kM_I_; +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFTypeDJis_finite6kM_i_; +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvL2D_reg_memNodeFreloc6kM_i_; +text: .text%__1cMdivD_immNodeFreloc6kM_i_; +text: .text%__1cMmulF_memNodeFreloc6kM_i_; +text: .text%__1cMaddF_memNodeFreloc6kM_i_; +text: .text%__1cLConvF2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOcompP_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cKciTypeFlowLStateVectorRdo_multianewarray6MpnQciBytecodeStream__v_; +text: .text%__1cMorI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cNSafepointBlob2n6FLI_pv_; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cINegFNodeJideal_reg6kM_I_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_L_i_; +text: .text%__1cHMatcherQconvL2FSupported6F_ki_; +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_; +text: .text%__1cLConvF2INodeJideal_reg6kM_I_; +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_; +text: .text%__1cLConvL2FNodeJideal_reg6kM_I_; +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cJAssemblerEshrq6MpnMRegisterImpl__v_; +text: .text%__1cMsubF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEsubq6MnHAddress_i_v_; +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_; +text: .text%__1cMmulD_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cYSurvivorMutableSpacePool2t6MpnKPSYoungGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cKNoopGCTaskQcreate_on_c_heap6F_p0_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cWResolveOopMapConflictsOreport_results6kM_i_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cJAssemblerFxchgl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJAssemblerFxchgq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cJAssemblerIcmpxchgl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cINegFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cJArgumentsbOparse_java_compiler_environment_variable6F_v_; +text: .text%__1cHVM_ExitNset_vm_exited6F_i_; +text: .text%__1cICodeHeapHreserve6MLLL_i_; +text: .text%__1cQRelocationHolder2t6M_v_; +text: .text%__1cICodeHeapFclear6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cJArgumentsSset_bytecode_flags6F_v_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cQno_shared_spaces6F_v_: arguments.o; +text: .text%__1cJArgumentsMget_property6Fpkc_2_; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_; +text: .text%__1cNGCTaskManagerKinitialize6M_v_; +text: .text%__1cNGCTaskManager2t6MI_v_; +text: .text%__1cXSynchronizedGCTaskQdDueue2t6MpnLGCTaskQdDueue_pnFMutex__v_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cRInlineCacheBufferKinitialize6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cJAssemblerHclflush6MnHAddress__v_; +text: .text%__1cOAbstractICacheKinitialize6F_v_; +text: .text%__1cLGCTaskQdDueueQcreate_on_c_heap6F_p0_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cMSysClassPath2T6M_v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cYGCAdaptivePolicyCounters2t6MpkciipnSAdaptiveSizePolicy__v_; +text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_; +text: .text%__1cJAssemblerHstmxcsr6MnHAddress__v_; +text: .text%__1cJAssemblerFaddss6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFsubss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_; +text: .text%__1cMSysClassPath2t6Mpkc_v_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cJAssemblerFmulss6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFdivss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerFaddsd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cJAssemblerFsubsd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cJAssemblerFmulsd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFdivsd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%__1cJAssemblerGsqrtsd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +text: .text%__1cRArgumentOopFinderDset6MinJBasicType__v_; +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_LLL_v_; +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_LLLLLLL_v_; +text: .text%__1cHOrLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cZCompiledArgumentOopFinderRhandle_oop_offset6M_v_; +text: .text%__1cJAssemblerFxorps6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFxorpd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerFxorpd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerJcvtsi2ssl6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerJcvtsi2ssq6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerJcvtsi2sdl6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cJAssemblerJcvtsi2sdq6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: adaptiveSizePolicy.o; +text: .text%__1cSAdaptiveSizePolicy2t6ML_v_; +text: .text%__1cFframebDoops_interpreted_arguments_do6MnMsymbolHandle_ipnKOopClosure__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o; +text: .text%__1cJAssemblerKcvttss2sil6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cJAssemblerKcvttss2siq6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o; +text: .text%__1cJAssemblerKcvttsd2sil6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cJAssemblerKcvttsd2siq6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cQSystemDictionaryKmethods_do6FpFpnNmethodOopDesc__v_v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cJAssemblerIcvtss2sd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerIcvtsd2ss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cOMacroAssemblerKdecrementl6MpnMRegisterImpl_i_v_; +text: .text%__1cHVM_ExitEname6kM_pkc_; +text: .text%__1cKcastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cMPeriodicTaskLis_enrolled6kM_i_; +text: .text%__1cNMemoryServicebFadd_parallel_scavenge_heap_info6FpnUParallelScavengeHeap__v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cMadapter_init6F_v_; +text: .text%__1cTI2CAdapterGeneratorKinitialize6F_v_; +text: .text%__1cNMemoryServiceXadd_psYoung_memory_pool6FpnKPSYoungGen_pnNMemoryManager_4_v_; +text: .text%__1cTC2IAdapterGeneratorKinitialize6F_v_; +text: .text%__1cOstackSlotPOperFclone6kM_pnIMachOper__; +text: .text%__1cObox_handleNodeFclone6kM_pnENode__; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_amd64_pipeline.o; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl_3_v_; +text: .text%__1cFJNIidKdeallocate6Fp0_v_; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cSReferenceProcessorMinit_statics6F_v_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl_33_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cZInterpreterMacroAssemblerUdispatch_only_normal6MnITosState__v_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cNMemoryServiceVadd_psOld_memory_pool6FpnIPSOldGen_pnNMemoryManager__v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_; +text: .text%__1cKPSYoungGenUset_space_boundaries6MLL_v_; +text: .text%__1cKPSYoungGenbGcompute_initial_space_boundaries6M_v_; +text: .text%__1cKPSYoungGenPinitialize_work6M_v_; +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_L_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_L_v_; +text: .text%__1cKPSYoungGen2t6MLLL_v_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cRaddL_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRaddL_mem_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cLVtableStubsKinitialize6F_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cMincL_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cKPSScavengeKinitialize6F_v_; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cNMemoryServiceWadd_psPerm_memory_pool6FpnJPSPermGen_pnNMemoryManager__v_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbCset_safepoints_for_all_bytes6M_v_; +text: .text%__1cOsalI_mem_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSPSPromotionManagerKinitialize6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_; +text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cJPSPermGen2t6MnNReservedSpace_LLLLpkci_v_; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_; +text: .text%__1cIPSOldGen2t6MLLLpkci_v_; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cIPSOldGen2t6MnNReservedSpace_LLLLpkci_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cSInterpreterRuntimeYthrow_ClassCastException6FpnKJavaThread_pnHoopDesc__v_; +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cOMacroAssemblerGc2bool6MpnMRegisterImpl__v_; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cTPSAlwaysTrueClosure2t6M_v_: psMarkSweep.o; +text: .text%__1cXSignatureHandlerLibraryQset_handler_blob6F_pC_; +text: .text%__1cNGrowableArray4CpC_2t6Mii_v_; +text: .text%__1cNGrowableArray4CL_2t6Mii_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cOMacroAssemblerRsign_extend_short6MpnMRegisterImpl__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_ClassCastException_handler6M_pC_; +text: .text%__1cGThreadWset_as_starting_thread6M_i_; +text: .text%__1cLPSMarkSweepKinitialize6F_v_; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cNWatcherThread2t6M_v_; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadFstart6F_v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cOMacroAssemblerQsign_extend_byte6MpnMRegisterImpl__v_; +text: .text%__1cKJavaThread2t6M_v_; +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cKDictionaryKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cUInterpreterGeneratorTgenerate_math_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cUInterpreterGeneratorXgenerate_abstract_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cKDictionaryKfree_entry6MpnPDictionaryEntry__v_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cUPSAdaptiveSizePolicy2t6MLLLLLddI_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cHCompileRpd_compiler2_init6F_v_; +text: .text%__1cKC2CompilerKinitialize6M_v_; +text: .text%__1cFStateQ_sub_Op_TailJump6MpknENode__v_; +text: .text%__1cMorL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cFStateL_sub_Op_OrL6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_NegF6MpknENode__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_amd64_expand.o; +text: .text%__1cQprint_statistics6F_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cFStateP_sub_Op_MoveL2D6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectPcompute_offsets6F_v_; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_amd64.o; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_; +text: .text%__1cbIjava_security_AccessControlContextPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_SystemPcompute_offsets6F_v_; +text: .text%__1cPjava_nio_BufferPcompute_offsets6F_v_; +text: .text%__1cFStateO_sub_Op_CMoveD6MpknENode__v_; +text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_; +text: .text%__1cFStateO_sub_Op_CastPP6MpknENode__v_; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cLJavaClassesPcompute_offsets6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%__1cMTailJumpNode2t6MpnENode_22222_v_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cTConstantDoubleValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FL_v_; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_L_; +text: .text%__1cUdelete_shared_memory6FpcL_v_: perfMemory_solaris.o; +text: .text%__1cUcreate_shared_memory6FL_pc_: perfMemory_solaris.o; +text: .text%__1cOtailjmpIndNodeFreloc6kM_i_; +text: .text%__1cSmmap_create_shared6FL_pc_: perfMemory_solaris.o; +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cbAcreate_sharedmem_resources6Fpkc1L_i_: perfMemory_solaris.o; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cLremove_file6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cWget_sharedmem_filename6Fpkci_pc_: perfMemory_solaris.o; +text: .text%__1cNget_user_name6Fi_pc_: perfMemory_solaris.o; +text: .text%__1cQget_user_tmp_dir6Fpkc_pc_: perfMemory_solaris.o; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cFciEnvXget_or_create_exception6MrpnI_jobject_nMsymbolHandle__pnKciInstance__; +text: .text%__1cMloadConFNodeGis_Con6kM_I_; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKPerfMemoryHdestroy6F_v_; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cKPerfMemoryKinitialize6F_v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cNGrowableArray4CpnIPerfData__JappendAll6Mpk2_v_; +text: .text%__1cMPerfDataListFclone6M_p0_; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cMciKlassKlassEmake6F_p0_; +text: .text%__1cMPerfDataList2t6Mp0_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cMmulD_memNodeFreloc6kM_i_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cMsubD_immNodeFreloc6kM_i_; +text: .text%__1cMsubF_memNodeFreloc6kM_i_; +text: .text%lookupDirectBufferClasses: jni.o; +text: .text%__1cbDinitializeDirectBufferSupport6FpnHJNIEnv___i_: jni.o; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%JNI_CreateJavaVM; +text: .text%__1cFParseWprofile_null_checkcast6M_v_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cOsalI_mem_1NodeFreloc6kM_i_; +text: .text%__1cIciMethodMvtable_index6M_i_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cMmulI_memNodeFreloc6kM_i_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cMincL_memNodeFreloc6kM_i_; +text: .text%__1cRaddL_mem_rRegNodeFreloc6kM_i_; +text: .text%__1cRaddL_mem_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMincL_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jniFastGetField_amd64.o; +text: .text%__1cNcmovL_memNodeFreloc6kM_i_; +text: .text%__1cKJNIHandlesKinitialize6F_v_; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%JVM_InitProperties; +text: .text%JVM_Halt; +text: .text%JVM_MaxMemory; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cKCMoveDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOsalI_mem_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetDataKis_RetData6M_i_; +text: .text%JVM_InitializeSocketLibrary; +text: .text%JVM_Socket; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cPOopTaskQdDueueSet2t6Mi_v_; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%JVM_SupportsCX8; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_; +text: .text%__1cOCompilerOraclePparse_from_file6F_v_; +text: .text%__1cHcc_file6F_pkc_: compilerOracle.o; +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_; +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_; +text: .text%__1cRJvmtiEventEnabled2t6M_v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_; +text: .text%__1cRJvmtiEventEnabledFclear6M_v_; +text: .text%__1cNGrowableArray4CpnOCompilerThread__2t6Mii_v_; +text: .text%__1cFParseNfetch_monitor6MipnENode_2_2_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cNGrowableArray4CpnIciMethod__Egrow6Mi_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cUParallelScavengeHeapbCsupports_inline_contig_alloc6kM_i_; +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cUParallelScavengeHeapEheap6F_p0_; +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cOcompiler2_init6F_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_L_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cMaddF_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_; +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_; +text: .text%__1cHoopDescLheader_size6F_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserXjava_lang_Class_fix_pre6MpnOobjArrayHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserYjava_lang_Class_fix_post6Mpi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__2t6Mii_v_; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__Uclear_and_deallocate6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cMostream_exit6F_v_; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cIUniversePcheck_alignment6FLLpkc_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cNdefaultStreamEinit6M_v_; +text: .text%__1cIUniverseUreinitialize_itables6F_v_; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cIUniversePinitialize_heap6F_i_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cIUniverseYcompute_base_vtable_size6F_v_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%Unsafe_FreeMemory; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%Unsafe_PageSize; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_iipc_l_: os_solaris.o; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cOLibraryCallKitWinline_native_hashcode6Mii_i_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cCosOrelease_memory6FpcL_i_; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cNmulI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cQVerificationTypeKinitialize6F_v_; +text: .text%__1cQVerificationTypeIfinalize6F_v_; +text: .text%__1cJCodeCacheKinitialize6F_v_; +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cTClassLoadingServiceVnotify_class_unloaded6FpnNinstanceKlass_i_v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_; +text: .text%__1cNExceptionBlob2n6FLI_pv_; +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNExceptionBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cQUncommonTrapBlob2n6FLI_pv_; +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cQUncommonTrapBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cSDeoptimizationBlob2n6FLI_pv_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cSDeoptimizationBlob2t6MpnKCodeBuffer_ipnJOopMapSet_iiii_v_; +text: .text%__1cRLowMemoryDetectorUhas_pending_requests6F_i_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosHSolarisWinitialize_system_info6F_v_; +text: .text%__1cCosPphysical_memory6F_X_; +text: .text%__1cMFastLockNodeLis_FastLock6kM_pk0_; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: cmsAdaptiveSizePolicy.o; +text: .text%__1cKManagementEinit6F_v_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cJMarkSweepQKeepAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cNReservedSpace2t6MLLipc_v_; +text: .text%__1cJMarkSweepOIsAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cCosZset_memory_serialize_page6FpC_v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cJMarkSweepSFollowStackClosure2t6M_v_: markSweep.o; +text: .text%__1cNReservedSpaceUpage_align_size_down6FL_L_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FL_L_; +text: .text%__1cNGrowableArray4CpnKOSRAdapter__2t6Mii_v_; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cSOnStackReplacementKinitialize6F_v_; +text: .text%__1cJMarkSweepRFollowRootClosure2t6M_v_: markSweep.o; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cHOrLNodeJideal_reg6kM_I_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cJMarkSweepSMarkAndPushClosure2t6M_v_: markSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cQVMOperationQdDueue2t6M_v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cMsubD_immNodeKconst_size6kM_i_; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cHMatcherVfind_callee_arguments6FpnNsymbolOopDesc_ipi_pnLOptoRegPair__; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cIVMThread2t6M_v_; +text: .text%__1cNSharedRuntimeUlookup_function_DD_D6FrpFpnHJNIEnv__pnH_jclass_dd_dpkc_v_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_top6F_0_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_2_v_; +text: .text%__1cNCellTypeStateLmake_bottom6F_0_; +text: .text%__1cNcmovL_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cLOptoRuntimeYgenerate_arraycopy_stubs6F_v_; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_; +text: .text%__1cNSharedRuntimebBgenerate_class_cast_message6FpnKJavaThread_pkc_pc_; +text: .text%__1cNSharedRuntimebBgenerate_class_cast_message6Fpkc2_pc_; +text: .text%__1cLOptoRuntimebPgenerate_polling_page_return_handler_blob6F_v_; +text: .text%__1cIVMThreadEloop6M_v_; +text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o; +text: .text%__1cLOptoRuntimeUsetup_exception_blob6F_v_; +text: .text%__1cLOptoRuntimeWfill_in_exception_blob6F_v_; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cQPSGenerationPool2t6MpnIPSOldGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cJAssemblerFimull6MpnMRegisterImpl_2_v_; +text: .text%__1cLOptoRuntimebBgenerate_uncommon_trap_blob6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cQPSGenerationPool2t6MpnJPSPermGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__L_; +text: .text%__1cICarSpaceEinit6F_v_; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_; +text: .text%__1cLStatSamplerKinitialize6F_v_; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cSCommandLineFlagsExKuintxAtPut6FnXCommandLineFlagWithType_L_v_; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerUcreate_misc_perfdata6F_v_; +text: .text%__1cLStatSamplerXcreate_sampled_perfdata6F_v_; +text: .text%__1cJAssemblerDorq6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsarq6MpnMRegisterImpl__v_; +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEshlq6MpnMRegisterImpl__v_; +text: .text%__1cUEdenMutableSpacePool2t6MpnKPSYoungGen_pnMMutableSpace_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cNStubGeneratorQgenerate_initial6M_v_; +text: .text%__1cNStubGeneratorXgenerate_atomic_add_ptr6M_pC_; +text: .text%__1cNStubGeneratorTgenerate_atomic_add6M_pC_; +text: .text%__1cNStubGeneratorbCgenerate_atomic_cmpxchg_long6M_pC_; +text: .text%__1cNStubGeneratorXgenerate_atomic_cmpxchg6M_pC_; +text: .text%__1cNStubGeneratorYgenerate_atomic_xchg_ptr6M_pC_; +text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_; +text: .text%__1cNStubGeneratorMgenerate_all6M_v_; +text: .text%__1cNStubGeneratorSgenerate_d2l_fixup6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_d2i_fixup6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_f2l_fixup6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_f2i_fixup6M_pC_; +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__; +text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_; +text: .text%__1cNStubGeneratorVgenerate_verify_mxcsr6M_pC_; +text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_; +text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_; +text: .text%__1cMStubRoutinesLinitialize16F_v_; +text: .text%__1cMStubRoutinesLinitialize26F_v_; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_; +text: .text%__1cNGrowableArray4CpnTDerivedPointerEntry__2t6Mii_v_; +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cNGrowableArray4CpnHMonitor__2t6Mii_v_; +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray1_Type6F_pknITypeFunc__; +text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl__v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER2_i486 2009-08-01 04:16:53.351322189 +0100 @@ -0,0 +1,8395 @@ +data = R0x2000; +text = LOAD ?RXO; + + +text: .text%__1cQIndexSetIteratorEnext6M_I_: ifg.o; +text: .text%__1cSPSPromotionManagerWcopy_to_survivor_space6MpnHoopDesc__2_; +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: indexSet.o; +text: .text%__1cNSharedRuntimeElrem6Fxx_x_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: chaitin.o; +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; +text: .text%__1cIPhaseIFGIadd_edge6MII_i_; +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_; +text: .text%__1cIMachNodeNrematerialize6kM_i_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMOopTaskQdDueueKpop_global6MrpnHoopDesc__i_; +text: .text%__1cPOopTaskQdDueueSetPsteal_best_of_26MipirpnHoopDesc__i_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: live.o; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cIIndexSetLalloc_block6M_pn0AIBitBlock__; +text: .text%__1cHRegMaskFis_UP6kM_i_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_i486_misc.o; +text: .text%__1cDLRGOcompute_degree6kMr0_i_; +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__; +text: .text%__1cRMachSpillCopyNodeMis_SpillCopy6M_p0_: ad_i486.o; +text: .text%__1cENodeEjvms6kM_pnIJVMState__; +text: .text%__1cIMachNodeJideal_reg6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: classes.o; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_i486_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: classes.o; +text: .text%__1cHRegMaskJis_bound16kM_i_; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cENodeHis_Copy6kM_I_: classes.o; +text: .text%__1cQObjectStartArrayMobject_start6MpnIHeapWord__2_: cardTableExtension.o; +text: .text%__1cRMachSpillCopyNodeHis_Copy6kM_I_: ad_i486.o; +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cENodeHis_Copy6kM_I_: ad_i486_misc.o; +text: .text%__1cETypeDcmp6Fkpk03_i_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: loopnode.o; +text: .text%__1cHRegMaskJis_bound26kM_i_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_i486_misc.o; +text: .text%__1cHRegMaskESize6kM_I_; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cJeRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cJVectorSet2R6MI_rnDSet__; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_i486.o; +text: .text%__1cXresource_allocate_bytes6FI_pc_; +text: .text%__1cDff16FI_i_; +text: .text%__1cJeRegPOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cIProjNodeHis_Proj6M_p0_; +text: .text%__1cENodeGis_CFG6kM_i_: classes.o; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopopts.o; +text: .text%__1cIIndexSetKinitialize6MI_v_; +text: .text%__1cMloadConINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPOopTaskQdDueueSetFsteal6MipirpnHoopDesc__i_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopnode.o; +text: .text%__1cMloadConINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHPhiNodeGis_Phi6M_p0_: cfgnode.o; +text: .text%__1cENodeGpinned6kM_i_: classes.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_i486.o; +text: .text%__1cIIndexSetKfree_block6MI_v_; +text: .text%__1cIMachNodeGOpcode6kM_i_; +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_: psTasks.o; +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_i486_misc.o; +text: .text%__1cENodeIout_grow6MI_v_; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cENodeHadd_req6Mp0_v_; +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__: type.o; +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cETypeFuhash6Fkpk0_i_; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeNrematerialize6kM_i_: classes.o; +text: .text%__1cJloadPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIrc_class6FnHOptoRegEName__nCRC__: ad_i486.o; +text: .text%__1cNMachIdealNodeErule6kM_I_: ad_i486.o; +text: .text%__1cKjmpDirNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_: ad_i486.o; +text: .text%__1cOlower_pressure6FpnDLRG_IpnFBlock_pI4_v_: ifg.o; +text: .text%__1cMget_live_bit6Fpii_i_: buildOopMap.o; +text: .text%__1cMPhaseChaitinLskip_copies6MpnENode__2_; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_i486_misc.o; +text: .text%__1cINodeHashLhash_delete6MpknENode__i_; +text: .text%__1cEDictGInsert6Mpv1i_1_; +text: .text%__1cICallNodeKmatch_edge6kMI_I_; +text: .text%__1cJMultiNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cHTypeIntCeq6kMpknEType__i_; +text: .text%__1cENodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: ad_i486.o; +text: .text%__1cETypeJtype_dict6F_pnEDict__; +text: .text%__1cHPhiNodeGOpcode6kM_i_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cENodeHdel_out6Mp0_v_: matcher.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: classes.o; +text: .text%__1cFArenaIcontains6kMpkv_i_; +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_; +text: .text%__1cIProjNodeGis_CFG6kM_i_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: classes.o; +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__; +text: .text%__1cIPhaseIFGJre_insert6MI_v_; +text: .text%__1cJraw_score6Fdd_d_: chaitin.o; +text: .text%__1cDLRGFscore6kM_d_; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: classes.o; +text: .text%__1cETypeIhashcons6M_pk0_; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cENodeEhash6kM_I_; +text: .text%__1cIProjNodeGpinned6kM_i_; +text: .text%__1cMloadConPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMPhaseChaitinKelide_copy6MpnENode_ipnFBlock_rnJNode_List_6i_i_; +text: .text%__1cHNTarjanEEVAL6M_p0_; +text: .text%__1cMloadConPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: classes.o; +text: .text%__1cQIndexSetIteratorEnext6M_I_: coalesce.o; +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMset_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cHCompileRvalid_bundle_info6MpknENode__i_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_; +text: .text%__1cIProjNodeGOpcode6kM_i_; +text: .text%__1cENodeHdel_out6Mp0_v_: phaseX.o; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_IrnJVectorSet__v_; +text: .text%__1cDfh16FI_i_; +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__: ad_i486.o; +text: .text%__1cMPhaseChaitinMchoose_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: oopMap.o; +text: .text%__1cENodeHis_Copy6kM_I_: cfgnode.o; +text: .text%__1cIMachNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cIConINodeGOpcode6kM_i_; +text: .text%__1cGIfNodeGOpcode6kM_i_; +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cHTypePtrEhash6kM_i_; +text: .text%__1cMPhaseChaitinQis_high_pressure6MpnFBlock_pnDLRG_I_i_; +text: .text%__1cENode2t6MI_v_; +text: .text%__1cMPhaseChaitinKbias_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cENodeMcisc_operand6kM_i_: classes.o; +text: .text%__1cMOopTaskQdDueueOpop_local_slow6MInOTaskQdDueueSuperDAge__i_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: typeArrayKlass.o; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_; +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_; +text: .text%__1cETypeEmeet6kMpk0_2_; +text: .text%__1cETypeLisa_oop_ptr6kM_i_; +text: .text%__1cFArenaIArealloc6MpvII_1_; +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__: ad_i486.o; +text: .text%__1cKTypeOopPtrEhash6kM_i_; +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__: ad_i486.o; +text: .text%__1cIMachNodeMcisc_operand6kM_i_: ad_i486.o; +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_i486_misc.o; +text: .text%__1cKIfTrueNodeGOpcode6kM_i_; +text: .text%__1cIAddPNodeGOpcode6kM_i_; +text: .text%__1cENodeHdel_out6Mp0_v_: graphKit.o; +text: .text%__1cPDictionaryEntrybDprotection_domain_set_oops_do6MpnKOopClosure__v_: dictionary.o; +text: .text%__1cHTypeIntEhash6kM_i_; +text: .text%__1cSPSPromotionManagerUflush_prefetch_queue6M_v_: psPromotionManager.o; +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_; +text: .text%__1cMMachProjNodeGOpcode6kM_i_; +text: .text%__1cETypeJsingleton6kM_i_; +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_: ad_i486.o; +text: .text%__1cJleaP8NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJleaP8NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMclr_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: cfgnode.o; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_i486.o; +text: .text%__1cIMachNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cJPhaseLiveGgetset6MpnFBlock__pnIIndexSet__; +text: .text%__1cHConNodeGOpcode6kM_i_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cJloadINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSis_single_register6FI_i_: postaloc.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cILRG_ListGextend6MII_v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: classes.o; +text: .text%__1cLIfFalseNodeGOpcode6kM_i_; +text: .text%__1cECopyYconjoint_words_to_higher6FpnIHeapWord_2I_v_: node.o; +text: .text%__1cHTypeIntJsingleton6kM_i_; +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_; +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_pnIIndexSet_rnJVectorSet__v_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: cfgnode.o; +text: .text%__1cMMutableSpaceMcas_allocate6MI_pnIHeapWord__; +text: .text%__1cHNTarjanICOMPRESS6M_v_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_; +text: .text%__1cKTypeOopPtrCeq6kMpknEType__i_; +text: .text%__1cIBoolNodeGOpcode6kM_i_; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cENodeEgrow6MI_v_; +text: .text%__1cHTypePtrCeq6kMpknEType__i_; +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cJeRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLemit_opcode6FrnKCodeBuffer_i_v_; +text: .text%__1cMMachProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cENodeNrematerialize6kM_i_: cfgnode.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_i486_misc.o; +text: .text%__1cKup_one_dom6FpnENode__1_: ifnode.o; +text: .text%__1cJMultiNodeIis_Multi6M_p0_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_; +text: .text%__1cJPhaseLiveKgetfreeset6M_pnIIndexSet__; +text: .text%__1cHnmethodbHfollow_root_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_ppnHoopDesc_iri_v_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: classes.o; +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_; +text: .text%__1cFState2T6M_v_; +text: .text%__1cIParmNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cHTypeInt2t6Miii_v_; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_i486_misc.o; +text: .text%__1cECopyXconjoint_words_to_lower6FpnIHeapWord_2I_v_: node.o; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cHemit_rm6FrnKCodeBuffer_iii_v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: typeArrayKlass.o; +text: .text%__1cKRegionNodeGOpcode6kM_i_; +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__; +text: .text%__1cIMachNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_I_: parallelScavengeHeap.o; +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_; +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: cfgnode.o; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cHNTarjanELINK6Mp01_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: node.o; +text: .text%__1cLuse_dom_lca6FpnFBlock_pnENode_3rnLBlock_Array__1_: gcm.o; +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_; +text: .text%__1cIIndexSetFclear6M_v_: live.o; +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_; +text: .text%__1cOPSPromotionLABFflush6M_v_; +text: .text%__1cITypeNodeEhash6kM_I_; +text: .text%__1cJVectorSet2F6kMI_i_; +text: .text%__1cJPhaseLiveHfreeset6MpknFBlock__v_; +text: .text%__1cENodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_; +text: .text%__1cIBoolNodeHis_Bool6M_p0_: subnode.o; +text: .text%__1cTleaPIdxScaleOffNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNPhaseRegAllocUreg2offset_unchecked6kMnHOptoRegEName__i_; +text: .text%__1cNPhaseRegAllocKreg2offset6kMnHOptoRegEName__i_; +text: .text%__1cTleaPIdxScaleOffNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNode2t6M_v_; +text: .text%__1cITypeNodeJideal_reg6kM_I_; +text: .text%__1cLTypeInstPtrEhash6kM_i_; +text: .text%__1cFStateRMachOperGenerator6MipnIMachNode_pnHCompile__pnIMachOper__; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: classes.o; +text: .text%__1cHdom_lca6FpnFBlock_1_1_: gcm.o; +text: .text%__1cENodeNis_block_proj6kM_pk0_; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cPClassFileParserOcheck_property6MipkcipnGThread__v_; +text: .text%__1cKRegionNodeGpinned6kM_i_: classes.o; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cHPhiNodeGpinned6kM_i_: cfgnode.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_i486.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_i486.o; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cFStateDDFA6MipknENode__i_; +text: .text%__1cFState2t6M_v_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: classes.o; +text: .text%__1cKRelocationLunpack_data6M_v_: ad_i486.o; +text: .text%__1cHRegMaskMSmearToPairs6M_v_; +text: .text%__1cKjmpDirNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIIndexSet2t6Mp0_v_; +text: .text%__1cENodeFclone6kM_p0_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: cfgnode.o; +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__; +text: .text%__1cETypeFxmeet6kMpk0_2_; +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_; +text: .text%__1cENodeKmatch_edge6kMI_I_; +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_; +text: .text%__1cICallNodeLbottom_type6kM_pknEType__; +text: .text%__1cKTypeAryPtrEhash6kM_i_; +text: .text%__1cENodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cOMethodLivenessKBasicBlockXcompute_gen_kill_single6MpnQciByteCodeStream__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: callnode.o; +text: .text%__1cICallNodeHis_Call6M_p0_: callnode.o; +text: .text%__1cRMachSpillCopyNodeOimplementation6kMpnKCodeBuffer_pnNPhaseRegAlloc_i_I_; +text: .text%__1cIProjNodeEhash6kM_I_; +text: .text%__1cHemit_d86FrnKCodeBuffer_i_v_; +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_; +text: .text%__1cRMachSafePointNodeEjvms6kM_pnIJVMState__: ad_i486_misc.o; +text: .text%__1cENodeFIdeal6MpnIPhaseGVN_i_p0_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfo.o; +text: .text%__1cGIfNodeGpinned6kM_i_: classes.o; +text: .text%__1cRSignatureIteratorGexpect6Mc_v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: memnode.o; +text: .text%__1cENodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: instanceKlass.o; +text: .text%__1cNeFlagsRegOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_; +text: .text%__1cICmpPNodeGOpcode6kM_i_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: location.o; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cHPhiNodeEhash6kM_I_; +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: callnode.o; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cTCreateExceptionNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: cfgnode.o; +text: .text%__1cOis_diamond_phi6FpnENode__i_: cfgnode.o; +text: .text%__1cHCompileMFillLocArray6MpnENode_pnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: ad_i486.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_i486.o; +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_; +text: .text%__1cLTypeInstPtrCeq6kMpknEType__i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: memnode.o; +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_; +text: .text%__1cLimpl_helper6FpnKCodeBuffer_iiiiipkci_i_: ad_i486.o; +text: .text%__1cKTypeOopPtrJsingleton6kM_i_; +text: .text%__1cENodeHsize_of6kM_I_; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cIAddINodeGOpcode6kM_i_; +text: .text%__1cIGraphKitHstopped6M_i_; +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__; +text: .text%__1cGIfNodeFis_If6M_p0_: classes.o; +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: callnode.o; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cENodeSremove_dead_region6MpnIPhaseGVN_i_i_; +text: .text%__1cKTypeOopPtrLxadd_offset6kMi_i_; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: coalesce.o; +text: .text%__1cHMatcherKLabel_Root6MpknENode_pnFState_p16_6_; +text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_; +text: .text%__1cKRelocationSpd_address_in_code6M_ppC_; +text: .text%__1cNSafePointNodeGpinned6kM_i_: callnode.o; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cICmpINodeGOpcode6kM_i_; +text: .text%__1cFBlockLis_uncommon6kMrnLBlock_Array__i_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_i486_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: classes.o; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cSCallStaticJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cIHaltNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: classes.o; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%__1cGcmpkey6Fpkv1_i_; +text: .text%__1cMMergeMemNodeGOpcode6kM_i_; +text: .text%__1cIMachNodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_p0_; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: cfgnode.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: classes.o; +text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cOmatch_into_reg6FpnENode_iii1_i_: matcher.o; +text: .text%__1cENodeHdel_out6Mp0_v_: reg_split.o; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_i486.o; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_i486.o; +text: .text%__1cINodeHashLhash_insert6MpnENode__v_; +text: .text%__1cKTypeAryPtrCeq6kMpknEType__i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: callnode.o; +text: .text%__1cOindOffset8OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cLTypeInstPtr2t6MnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_v_; +text: .text%__1cENode2t6Mp0_v_; +text: .text%__1cIimmIOperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cLSymbolTableGlookup6MipkciI_pnNsymbolOopDesc__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: classes.o; +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cETypeKhas_memory6kM_i_; +text: .text%__1cNloadRangeNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLTypeInstPtrEmake6FnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_pk0_; +text: .text%__1cKjmpConNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIJVMStateIof_depth6kMi_p0_; +text: .text%__1cKNode_ArrayEgrow6MI_v_; +text: .text%__1cJStartNodeGpinned6kM_i_: callnode.o; +text: .text%__1cRPSOldPromotionLABFflush6M_v_; +text: .text%__1cNCatchProjNodeGOpcode6kM_i_; +text: .text%__1cENodeGis_CFG6kM_i_: connode.o; +text: .text%__1cHMatcherKReduceOper6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_; +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIConPNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntEmake6Fiii_pk0_; +text: .text%__1cRMachSpillCopyNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: instanceKlass.o; +text: .text%__1cJTypeTupleJsingleton6kM_i_; +text: .text%__1cJLoadPNodeGOpcode6kM_i_; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cKCodeBuffer2T6M_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_; +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIMachNodeJemit_size6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cHCmpNodeGis_Cmp6kM_pk0_: classes.o; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: instanceKlass.o; +text: .text%__1cHTypeIntEmake6Fi_pk0_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cENodeRis_cisc_alternate6kM_i_: ad_i486.o; +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNencode_RegMem6FrnKCodeBuffer_iiiiii_v_; +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cILoadNodeEhash6kM_I_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pnIciObject_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cENodeMcisc_operand6kM_i_: cfgnode.o; +text: .text%__1cJeRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cICmpUNodeGOpcode6kM_i_; +text: .text%__1cJHashtableLhash_symbol6Fpkci_I_: symbolTable.o; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: cfgnode.o; +text: .text%__1cJCProjNodeEhash6kM_I_: classes.o; +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cJMultiNodeEhash6kM_I_: classes.o; +text: .text%__1cENodeHdel_req6MI_v_; +text: .text%__1cHCompileJcan_alias6MpknHTypePtr_i_i_; +text: .text%__1cSPSPromotionManagerMdrain_stacks6M_v_; +text: .text%__1cETypeEhash6kM_i_; +text: .text%__1cLOptoRuntimeXdeoptimize_caller_frame6FpnKJavaThread_i_v_; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cIHaltNodeGOpcode6kM_i_; +text: .text%__1cZPhaseConservativeCoalesceJcopy_copy6MpnENode_2pnFBlock_I_i_; +text: .text%__1cENodeGis_CFG6kM_i_: subnode.o; +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_; +text: .text%__1cIParmNodeGOpcode6kM_i_; +text: .text%__1cIJVMStateLdebug_start6kM_I_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: classes.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: multnode.o; +text: .text%__1cGTarjanEEVAL6M_p0_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cOeFlagsRegUOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: classes.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: cfgnode.o; +text: .text%__1cFBlockGselect6MrnJNode_List_rnLBlock_Array_pirnJVectorSet_IrnNGrowableArray4CI___pnENode__; +text: .text%__1cKMachIfNodeJis_MachIf6kM_pk0_: ad_i486_misc.o; +text: .text%__1cEDict2F6kMpkv_pv_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: multnode.o; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: ad_i486.o; +text: .text%__1cHhashptr6Fpkv_i_; +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cENodeHget_int6kM_i_; +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cJCatchNodeGOpcode6kM_i_; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cOoop_RelocationJoop_value6M_pnHoopDesc__; +text: .text%__1cHConNodeGis_Con6kM_I_: classes.o; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cFBlockIis_Empty6kM_i_; +text: .text%__1cWThreadLocalAllocBufferFreset6M_v_; +text: .text%__1cENodeGis_Con6kM_I_: classes.o; +text: .text%__1cGBitMapUclear_range_of_words6MII_v_: bitMap.o; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: split_if.o; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cYDebugInformationRecorderLcheck_phase6Mn0AFPhase__v_; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__: memnode.o; +text: .text%__1cLLShiftINodeGOpcode6kM_i_; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableExtension.o; +text: .text%__1cFBlockOcode_alignment6M_I_; +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_; +text: .text%__1cKCastPPNodeGOpcode6kM_i_; +text: .text%__1cMMachCallNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__; +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: oopMap.o; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__; +text: .text%__1cENodeHis_Copy6kM_I_: memnode.o; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cUParallelScavengeHeapVunsafe_max_tlab_alloc6kM_I_; +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__; +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__; +text: .text%__1cIMachNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: classes.o; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cENodeGpinned6kM_i_: connode.o; +text: .text%__1cWThreadLocalAllocBufferKinitialize6MpnIHeapWord_22_v_; +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6MI_pnIHeapWord__; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: collectedHeap.o; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: sharedHeap.o; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cIBoolNodeEhash6kM_I_; +text: .text%__1cQciByteCodeStreamEjava6MnJBytecodesECode__2_; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cOindOffset8OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKjmpDirNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: subnode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: cfgnode.o; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__; +text: .text%__1cIMachNodeHis_Mach6M_p0_: machnode.o; +text: .text%__1cKjmpConNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: loopopts.o; +text: .text%__1cNsymbolOopDescLas_C_string6kMpci_1_; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cHMatcherTReduceInst_Interior6MpnFState_ipnIMachNode_IrpnENode__I_; +text: .text%__1cENodeJis_Branch6kM_I_: ad_i486.o; +text: .text%__1cIAddPNodeHis_AddP6M_p0_: classes.o; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_i486.o; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_; +text: .text%__1cJloadSNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJPSPermGenSallocate_permanent6MI_pnIHeapWord__; +text: .text%__1cMMutableSpaceIallocate6MI_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6MI_pnIHeapWord__; +text: .text%__1cHCompileRprobe_alias_cache6MpknHTypePtr__pn0APAliasCacheEntry__; +text: .text%__1cENodeIdestruct6M_v_; +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIMachNodeNoperand_index6kMI_i_; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cKRegionNodeEhash6kM_I_: classes.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: multnode.o; +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cGBitMapJset_union6M0_v_; +text: .text%__1cIMachNodeGExpand6MpnFState_rnJNode_List__p0_: ad_i486_misc.o; +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_: phaseX.o; +text: .text%__1cENodeOis_block_start6kM_i_; +text: .text%__1cPciInstanceKlassMis_interface6M_i_: ciInstanceKlass.o; +text: .text%__1cJeRegLOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cHPhiNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJTypeTupleEhash6kM_i_; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKjmpConNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseNdo_exceptions6M_v_; +text: .text%__1cFParsePdo_one_bytecode6M_v_; +text: .text%__1cFBlockJfind_node6kMpknENode__I_; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cLjmpConUNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGBitMap2t6MpII_v_; +text: .text%__1cLOptoRuntimeFnew_C6FpnMklassOopDesc_pnKJavaThread__v_; +text: .text%method_compare: methodOop.o; +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: classes.o; +text: .text%__1cKis_x2logic6FpnIPhaseGVN_pnENode__3_: cfgnode.o; +text: .text%__1cHAbsNodeLis_absolute6FpnIPhaseGVN_pnENode__4_; +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_; +text: .text%__1cLPhaseValuesGintcon6Mi_pnIConINode__; +text: .text%__1cHCompilePfind_alias_type6MpknHTypePtr_i_pn0AJAliasType__; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cJloadLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cGTarjanICOMPRESS6M_v_; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cHRegMaskMClearToPairs6M_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: block.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse1.o; +text: .text%__1cMMachCallNodeLis_MachCall6M_p0_: ad_i486_misc.o; +text: .text%__1cNSafePointNodebBneeds_polling_address_input6F_i_; +text: .text%__1cIJVMStateJdebug_end6kM_I_; +text: .text%__1cIMachNodeKconst_size6kM_i_: ad_i486.o; +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJLoadINodeGOpcode6kM_i_; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cIMachNodeFreloc6kM_i_: ad_i486.o; +text: .text%__1cIProjNodeHsize_of6kM_I_; +text: .text%__1cHMatcherQis_save_on_entry6Mi_i_; +text: .text%__1cLBoxLockNodeNrematerialize6kM_i_: classes.o; +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__; +text: .text%__1cOindOffset8OperFscale6kM_i_: ad_i486.o; +text: .text%__1cNloadConI0NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIIndexSetSpopulate_free_list6F_v_; +text: .text%__1cLPCTableNodeGpinned6kM_i_: classes.o; +text: .text%__1cNnew_loc_value6FpnNPhaseRegAlloc_nHOptoRegEName_nILocationEType__pnNLocationValue__: output.o; +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: cfgnode.o; +text: .text%__1cENodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%JVM_ReleaseUTF; +text: .text%__1cKutf8_write6FpCH_0_: utf8.o; +text: .text%__1cKNode_ArrayGremove6MI_v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOis_range_check6FpnENode_r12ri_i_: ifnode.o; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodDataOop.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: multnode.o; +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cMciMethodDataHdata_at6Mi_pnLProfileData__; +text: .text%__1cENodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_i486_misc.o; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cMCreateExNodeGOpcode6kM_i_; +text: .text%__1cSloadL_volatileNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cGOopMapbEmap_compiler_reg_to_oopmap_reg6MnHOptoRegEName_ii_nFVMRegEName__; +text: .text%__1cHhashkey6Fpkv_i_; +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_; +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_; +text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_; +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cENodeHdel_out6Mp0_v_: coalesce.o; +text: .text%__1cGBitMapGat_put6MIi_v_; +text: .text%__1cJloadBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse2.o; +text: .text%__1cHMatcherTcollect_null_checks6MpnENode__v_; +text: .text%__1cNloadConI0NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENode2t6Mp011_v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cHMemNodeMIdeal_common6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cITypeLongCeq6kMpknEType__i_; +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__; +text: .text%__1cRMachSafePointNodeQis_MachSafePoint6M_p0_: ad_i486_misc.o; +text: .text%__1cOMethodLivenessKBasicBlockIload_one6Mi_v_; +text: .text%__1cNSafePointNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: ad_i486_misc.o; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: classes.o; +text: .text%__1cIemit_d326FrnKCodeBuffer_i_v_; +text: .text%__1cFDictI2i6M_v_; +text: .text%__1cIJVMStateNclone_shallow6kM_p0_; +text: .text%__1cNloadConI0NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: subnode.o; +text: .text%__1cIMachNodeFreloc6kM_i_: ad_i486_misc.o; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cNSafePointNodeHsize_of6kM_I_; +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cHTypePtrLmeet_offset6kMi_i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: connode.o; +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFChunk2t6MI_v_; +text: .text%__1cFChunk2n6FII_pv_; +text: .text%__1cKciTypeFlowLStateVectorSapply_one_bytecode6MpnQciByteCodeStream__i_; +text: .text%__1cGOopMapJset_value6MnHOptoRegEName_ii_v_; +text: .text%__1cIMachOperLdisp_is_oop6kM_i_; +text: .text%__1cJloadPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFChunk2k6Fpv_v_; +text: .text%__1cOcompU_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_; +text: .text%__1cIIndexSetFclear6M_v_: indexSet.o; +text: .text%__1cIIndexSetJlrg_union6MIIkIpknIPhaseIFG_rknHRegMask__I_; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%__1cETypeFempty6kM_i_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cJTypeTupleCeq6kMpknEType__i_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: split_if.o; +text: .text%__1cENodeHdel_out6Mp0_v_: memnode.o; +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_; +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIBoolTestKcc2logical6kMpknEType__3_; +text: .text%__1cIAddPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFArenaEgrow6MI_pv_; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: multnode.o; +text: .text%__1cJeRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cUParallelScavengeHeapPis_in_permanent6kMpkv_i_: parallelScavengeHeap.o; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: ad_i486.o; +text: .text%__1cENodeHdel_out6Mp0_v_: loopopts.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: cfgnode.o; +text: .text%__1cKBranchDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cENodeGis_CFG6kM_i_: memnode.o; +text: .text%__1cKjmpDirNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cFBlockUneeded_for_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cILoadNodeKmatch_edge6kMI_I_; +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHMemNodeGis_Mem6M_p0_: classes.o; +text: .text%__1cMPhaseIterGVNbGregister_new_node_with_optimizer6MpnENode__2_; +text: .text%__1cENodeKreplace_by6Mp0_v_; +text: .text%__1cNPhaseRegAllocGis_oop6kMpknENode__i_; +text: .text%__1cGIfNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: classes.o; +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_; +text: .text%__1cKjmpDirNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__: cfgnode.o; +text: .text%__1cHPhiNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cJTypeTupleGfields6FI_ppknEType__; +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cZload_can_see_stored_value6FpnILoadNode_pnENode_pnOPhaseTransform__3_: memnode.o; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cNtestP_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLrecord_bias6FpknIPhaseIFG_ii_v_: coalesce.o; +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_; +text: .text%__1cKNativeCallLdestination6kM_pC_; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: memnode.o; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: callnode.o; +text: .text%__1cSCallStaticJavaNodeRis_CallStaticJava6kM_pk0_: callnode.o; +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMMergeMemNodeEhash6kM_I_; +text: .text%__1cJcmpOpOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cUPSMarkSweepDecoratorQinsert_deadspace6MripnIHeapWord_I_i_; +text: .text%__1cMMergeMemNodeLis_MergeMem6M_p0_: memnode.o; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__: subnode.o; +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_; +text: .text%__1cKStorePNodeGOpcode6kM_i_; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: methodOop.o; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cLjmpConUNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cOno_flip_branch6FpnFBlock__i_: block.o; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cKRelocationJpack_data6M_i_: ad_i486.o; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cISubINodeGOpcode6kM_i_; +text: .text%__1cKStoreINodeGOpcode6kM_i_; +text: .text%__1cNeFlagsRegOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQciByteCodeStreamMreset_to_bci6Mi_v_; +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: lcm.o; +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: machnode.o; +text: .text%__1cRSignatureIteratorTcheck_signature_end6M_v_; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cENodeHis_Goto6kM_I_: classes.o; +text: .text%__1cILoadNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cITypeLongEhash6kM_i_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: block.o; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cJloadPNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cMPhaseIterGVNFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cLis_cond_add6FpnIPhaseGVN_pnHPhiNode__pnENode__; +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: classes.o; +text: .text%__1cKRegionNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: block.o; +text: .text%__1cITypeNodeHsize_of6kM_I_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: memnode.o; +text: .text%__1cFMutexNowned_by_self6kM_i_; +text: .text%__1cIMachNodeRget_base_and_disp6kMrirpknHTypePtr__pknENode__; +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIIndexSetFclear6M_v_: chaitin.o; +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cFBlockOschedule_local6MrnHMatcher_rnLBlock_Array_pirnJVectorSet_rnNGrowableArray4CI___i_; +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_; +text: .text%__1cLBlock_StackXmost_frequent_successor6MpnFBlock__I_; +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cENodeGpinned6kM_i_: subnode.o; +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLCounterDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGTarjanELINK6Mp01_v_; +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_; +text: .text%__1cYDebugInformationRecorderWserialize_scope_values6MpnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__; +text: .text%__1cENodeGis_Con6kM_I_: cfgnode.o; +text: .text%__1cMciMethodDataJnext_data6MpnLProfileData__2_; +text: .text%__1cENodeGpinned6kM_i_: memnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: subnode.o; +text: .text%__1cIBoolNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cOcompU_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: parse1.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_i486.o; +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o; +text: .text%__1cIGraphKitEstop6M_v_; +text: .text%__1cTremove_useless_bool6FpnGIfNode_pnIPhaseGVN__pnENode__: ifnode.o; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cJeRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: subnode.o; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cMloadConLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cILoadNodeHis_Load6M_p0_: classes.o; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cKjmpConNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHAddNodeEhash6kM_I_; +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__: callnode.o; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_irknQRelocationHolder_i_v_; +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_; +text: .text%__1cMURShiftINodeGOpcode6kM_i_; +text: .text%__1cOcompI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cIRootNodeGOpcode6kM_i_; +text: .text%__1cFChunkEchop6M_v_; +text: .text%__1cIMachOperOindex_position6kM_i_; +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIimmPOperEtype6kM_pknEType__: ad_i486_clone.o; +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOindOffset8OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOindOffset8OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOindOffset8OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cHRegMaskQis_aligned_Pairs6kM_i_; +text: .text%__1cMloadConLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompU_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cHSubNodeGis_Sub6M_p0_: classes.o; +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_; +text: .text%__1cYCallStaticJavaDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNeFlagsRegOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cIregDOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cSInterpreterRuntimeLcache_entry6FpnKJavaThread__pnWConstantPoolCacheEntry__: interpreterRuntime.o; +text: .text%__1cJStoreNodeIis_Store6kM_pk0_: classes.o; +text: .text%__1cKDictionaryJget_entry6MiInMsymbolHandle_nGHandle__pnPDictionaryEntry__; +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cHi2sNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNtestI_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cJloadPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPimpl_mov_helper6FpnKCodeBuffer_iiii_i_: ad_i486.o; +text: .text%__1cHConNodeEhash6kM_I_; +text: .text%__1cKjmpDirNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%Unsafe_CompareAndSwapLong; +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__; +text: .text%__1cFBlockLfind_remove6MpknENode__v_; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cKTypeRawPtrJsingleton6kM_i_; +text: .text%__1cMloadConDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJStoreNodeEhash6kM_I_; +text: .text%__1cMPhaseChaitinJsplit_USE6MpnENode_pnFBlock_2IIiinNGrowableArray4CI__i_I_; +text: .text%__1cMloadConDNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENode2t6Mp0111_v_; +text: .text%__1cXindIndexScaleOffsetOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cHMatcherXadjust_outgoing_stk_arg6MinHOptoRegEName_r2_2_; +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_; +text: .text%__1cJeRegLOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstorePNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cLRegisterMapFclear6Mpi_v_; +text: .text%__1cNtestI_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: cfgnode.o; +text: .text%__1cENodeMcisc_operand6kM_i_: memnode.o; +text: .text%__1cNtestP_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cHTypeAryRary_must_be_exact6kM_i_; +text: .text%__1cRMachNullCheckNodeQis_MachNullCheck6M_p0_: machnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: callnode.o; +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJCodeCacheFalive6FpnICodeBlob__2_; +text: .text%__1cLRShiftINodeGOpcode6kM_i_; +text: .text%__1cOcompU_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: reg_split.o; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cHTypeIntFempty6kM_i_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJchar2type6Fc_nJBasicType__: fieldType.o; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__; +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: subnode.o; +text: .text%__1cITypeNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHOopFlowNcompute_reach6MpnNPhaseRegAlloc_ipnEDict__v_; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cRInvocationCounterJset_state6Mn0AFState__v_; +text: .text%__1cLOptoRuntimePnew_typeArray_C6FnJBasicType_ipnKJavaThread__v_; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: multnode.o; +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cMMergeMemNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%JVM_GetClassModifiers; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%JVM_GetClassAccessFlags; +text: .text%__1cOcompU_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeHis_Goto6kM_I_: ad_i486_misc.o; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeHdel_out6Mp0_v_: split_if.o; +text: .text%__1cLMachNopNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLPhaseValuesFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: connode.o; +text: .text%__1cOcompU_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cICallNodeLis_CallLeaf6kM_pknMCallLeafNode__: callnode.o; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cKmethodOperGmethod6kM_i_: ad_i486.o; +text: .text%__1cWConstantPoolCacheEntryRset_initial_state6Mi_v_; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cJloadINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: loopTransform.o; +text: .text%__1cLProfileDataPfollow_contents6M_v_: methodDataOop.o; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: methodDataOop.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: classes.o; +text: .text%__1cOindOffset8OperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cOcompI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNaddI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_; +text: .text%__1cRmethodDataOopDescTbytecode_cell_count6FnJBytecodesECode__i_; +text: .text%__1cRmethodDataOopDescRcompute_data_size6FpnOBytecodeStream__i_; +text: .text%__1cMMergeMemNodeQclone_all_memory6FpnENode__p0_; +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: multnode.o; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cOPhaseIdealLoopIsplit_up6MpnENode_22_i_; +text: .text%__1cKTypeOopPtrWmake_from_klass_common6FpnHciKlass_ii_pk0_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: postaloc.o; +text: .text%__1cOcompU_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: subnode.o; +text: .text%__1cNtestI_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_: subnode.o; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cNCompileBrokerLmaybe_block6F_v_; +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_; +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cIJumpDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: symbolKlass.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: symbolKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: symbolKlass.o; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: callnode.o; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: machnode.o; +text: .text%__1cHRegMaskPfind_first_pair6kM_nHOptoRegEName__; +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__; +text: .text%__1cRMachSafePointNodeRis_safepoint_node6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_; +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_; +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_; +text: .text%__1cOeFlagsRegUOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cENodeHdel_out6Mp0_v_: cfgnode.o; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cMtlsLoadPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbLparse_constant_pool_nameandtype_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cGBitMapOset_difference6M0_v_; +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cYCallStaticJavaDirectNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNtestP_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: memnode.o; +text: .text%__1cKstorePNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOindOffset8OperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRaddI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKstoreINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: memnode.o; +text: .text%__1cOcompI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: loopnode.o; +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQciByteCodeStreamFEOBCs6M_nJBytecodesECode__; +text: .text%__1cITypeNodeDcmp6kMrknENode__I_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_; +text: .text%__1cHTypeAryEhash6kM_i_; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cNsubI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYlookup_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cPVirtualCallDataPadjust_pointers6M_v_; +text: .text%__1cPVirtualCallDataPfollow_contents6M_v_; +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_; +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cKTypeRawPtrEhash6kM_i_; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: machnode.o; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cENodeHis_Copy6kM_I_: machnode.o; +text: .text%__1cIAndINodeGOpcode6kM_i_; +text: .text%__1cRCompilationPolicyNcanBeCompiled6FnMmethodHandle__i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: multnode.o; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJlabelOperFclone6kM_pnIMachOper__; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cNCatchProjNodeMis_CatchProj6kM_pk0_: cfgnode.o; +text: .text%__1cENode2t6Mp01_v_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cNtestI_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: subnode.o; +text: .text%__1cKTypeOopPtrHget_con6kM_i_; +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMUniverseOperFclone6kM_pnIMachOper__; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: multnode.o; +text: .text%__1cJloadINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMTypeKlassPtrEhash6kM_i_; +text: .text%__1cKRegionNodeHhas_phi6kM_pnHPhiNode__; +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cENodeHins_req6MIp0_v_; +text: .text%__1cPconvI2L_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIciMethodbCinterpreter_invocation_count6M_i_; +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cKStoreCNodeGOpcode6kM_i_; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%__1cOcompI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRshrI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerJemit_data6MirknQRelocationHolder_i_v_; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cOeFlagsRegUOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLjmpConUNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafePointNodeGOpcode6kM_i_; +text: .text%__1cJVectorSet2L6MI_rnDSet__; +text: .text%__1cJVectorSetEgrow6MI_v_; +text: .text%__1cOMethodLivenessKBasicBlockWcompute_gen_kill_range6MpnQciByteCodeStream__v_; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: gcm.o; +text: .text%__1cKStoreBNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserbJparse_constant_pool_methodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: connode.o; +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_; +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_; +text: .text%__1cLeAXRegPOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o; +text: .text%__1cENodeHdel_out6Mp0_v_: gcm.o; +text: .text%__1cETypeOget_const_type6FpnGciType__pk0_; +text: .text%__1cPcheckCastPPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJVectorSet2t6MpnFArena__v_; +text: .text%__1cIGraphKitGmemory6MI_pnENode__; +text: .text%__1cITypeLong2t6Mxxi_v_; +text: .text%__1cKCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQciByteCodeStreamPget_field_index6M_i_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: memnode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: memnode.o; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cENodeDcmp6kMrk0_I_; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_; +text: .text%__1cJloadINodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodOop.o; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cKjmpConNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_; +text: .text%__1cMPhaseChaitinVmay_be_copy_of_callee6kMpnENode__i_; +text: .text%__1cXroundDouble_mem_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cIPhaseCCPFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cFParseKensure_phi6Mii_pnHPhiNode__; +text: .text%__1cJloadFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: postaloc.o; +text: .text%__1cOMachReturnNodeNis_MachReturn6M_p0_: ad_i486_misc.o; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_i486.o; +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cPciObjectFactoryNinit_ident_of6MpnIciObject__v_; +text: .text%__1cQPreserveJVMState2T6M_v_; +text: .text%__1cQPreserveJVMState2t6MpnIGraphKit_i_v_; +text: .text%__1cNtestP_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cIGraphKitLclean_stack6Mi_v_; +text: .text%__1cYDebugInformationRecorderYserialize_monitor_values6MpnNGrowableArray4CpnMMonitorValue____i_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_: spaceCounters.o; +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cLcastP2INodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cLBoxLockNodeGOpcode6kM_i_; +text: .text%__1cHTypePtrHget_con6kM_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: callnode.o; +text: .text%__1cITypeFuncEhash6kM_i_; +text: .text%__1cJAssemblerJemit_data6MinJrelocInfoJrelocType_i_v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_22nHAddressLScaleFactor_irknQRelocationHolder__v_; +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMLinkResolverNresolve_klass6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cNloadRangeNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cLPhaseValuesHmakecon6MpknEType__pnHConNode__; +text: .text%__1cOcompI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIciMethodPliveness_at_bci6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessPget_liveness_at6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessKBasicBlockPget_liveness_at6MpnIciMethod_i_nGBitMap__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cITypeLongJsingleton6kM_i_; +text: .text%__1cOcompI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cLCounterDataOis_CounterData6M_i_: ciMethodData.o; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cLCastP2INodeGOpcode6kM_i_; +text: .text%__1cKCodeBufferOadd_stub_reloc6MpCrknQRelocationHolder_i_v_; +text: .text%__1cKCodeBufferOalloc_relocate6M_pnORelocateBuffer__; +text: .text%__1cMCallLeafNodeGOpcode6kM_i_; +text: .text%__1cJLoadSNodeGOpcode6kM_i_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: subnode.o; +text: .text%__1cKjmpDirNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHTypePtrJsingleton6kM_i_; +text: .text%__1cJAssemblerEcall6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cIGraphKitObasic_plus_adr6MpnENode_2i_2_; +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIimmPOperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRshrI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMPhaseIterGVNHmakecon6MpknEType__pnHConNode__; +text: .text%__1cMTypeKlassPtrCeq6kMpknEType__i_; +text: .text%__1cRaddI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%__1cRNativeInstructionFwrote6Mi_v_; +text: .text%__1cWShouldNotReachHereNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryIas_flags6MnITosState_iiiii_i_; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cQMachCallJavaNodePis_MachCallJava6M_p0_: ad_i486_misc.o; +text: .text%__1cWShouldNotReachHereNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJimmI0OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cNloadConI0NodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cHMatcherWis_short_branch_offset6Mi_i_; +text: .text%__1cWConstantPoolCacheEntryGverify6kMpnMoutputStream__v_; +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cITypeFuncCeq6kMpknEType__i_; +text: .text%__1cNSafePointNode2t6MIpnIJVMState__v_; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cHcommute6FpnENode_ii_i_: addnode.o; +text: .text%__1cJeRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: cfgnode.o; +text: .text%__1cGOopMapJheap_size6kM_i_; +text: .text%__1cHAddNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_: ad_i486.o; +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_; +text: .text%__1cIAddINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKjmpDirNodeFclone6kM_pnENode__; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_; +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_; +text: .text%__1cOCallRelocationPset_destination6MpCi_v_; +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__: ad_i486.o; +text: .text%__1cIHaltNodeGpinned6kM_i_: classes.o; +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_; +text: .text%__1cYCallStaticJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cJStartNodeGpinned6kM_i_: classes.o; +text: .text%__1cMTypeKlassPtr2t6MnHTypePtrDPTR_pnHciKlass_i_v_; +text: .text%__1cJloadPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%JVM_IsNaN; +text: .text%__1cJStoreNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQciByteCodeStreamQget_method_index6M_i_; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cIGraphKitQkill_dead_locals6M_v_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cICallNodeHis_Call6M_p0_: classes.o; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cNaddI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cJLoadPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIGraphKitMreset_memory6M_pnENode__; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cNaddI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeQlatency_from_use6kMrnLBlock_Array_rnNGrowableArray4CI__pk0p0_i_; +text: .text%__1cMPhaseChaitinKprompt_use6MpnFBlock_I_i_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2ipnGThread__v_; +text: .text%__1cOkill_dead_code6FpnENode_pnMPhaseIterGVN__i_: node.o; +text: .text%__1cFParsePload_state_from6Mpn0AFBlock__v_; +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_; +text: .text%__1cNloadConI0NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStartNodeIis_Start6M_p0_: callnode.o; +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_; +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cIAddINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLProfileDataPfollow_contents6M_v_: ciMethodData.o; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: ciMethodData.o; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cNsubI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cNloadRangeNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFBlockUhoist_LCA_above_defs6Mp01IrnLBlock_Array__1_; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cIciMethodLscale_count6Mi_i_; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: ad_i486.o; +text: .text%__1cNtestP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cScompP_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_; +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_; +text: .text%__1cNSafePointNodeGpinned6kM_i_: classes.o; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cENodeHis_Type6M_pnITypeNode__: classes.o; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitRnull_check_common6MpnENode_nJBasicType_i_2_; +text: .text%__1cICmpLNodeGOpcode6kM_i_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constMethodKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constMethodKlass.o; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cKoopFactoryPnew_constMethod6FiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: constMethodKlass.o; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodKlass.o; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: methodKlass.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: methodKlass.o; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cOcompU_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTciConstantPoolCacheEfind6Mi_i_; +text: .text%__1cKciTypeFlowGJsrSetJcopy_into6Mp1_v_; +text: .text%__1cJLoadLNodeGOpcode6kM_i_; +text: .text%__1cHOrINodeGOpcode6kM_i_; +text: .text%__1cILoadNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cJMarkSweepNpreserve_mark6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cJloadINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_; +text: .text%__1cQleaPIdxScaleNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeOopPtrFempty6kM_i_; +text: .text%__1cJleaP8NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%JVM_CurrentThread; +text: .text%__1cPindOffset32OperFscale6kM_i_: ad_i486.o; +text: .text%__1cKBranchDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cLOopMapCacheIentry_at6kMi_pnQOopMapCacheEntry__; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cHOopFlowEmake6FpnFArena_i_p0_; +text: .text%__1cOGenerateOopMapKcheck_type6MnNCellTypeState_1_v_; +text: .text%__1cMMergeMemNodeNgrow_to_match6Mpk0_v_; +text: .text%__1cVeADXRegL_low_onlyOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cVloadConL_low_onlyNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypeIntFxdual6kM_pknEType__; +text: .text%__1cVloadConL_low_onlyNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNMachIdealNodePoper_input_base6kM_I_: machnode.o; +text: .text%__1cNSharedRuntimeDf2i6Ff_i_; +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVloadConL_low_onlyNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__; +text: .text%__1cRaddI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cRInterpretedRFrameEinit6M_v_; +text: .text%__1cMMergeMemNodeRmake_empty_memory6F_pnENode__; +text: .text%__1cMMergeMemNode2t6MpnENode__v_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: callnode.o; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cFciEnvXget_field_by_index_impl6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cQciByteCodeStreamJget_field6Mri_pnHciField__; +text: .text%__1cKBlock_ListGremove6MI_v_; +text: .text%__1cECopyXconjoint_words_to_lower6FpnIHeapWord_2I_v_: block.o; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_16MnJBytecodesECode__v_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: connode.o; +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_; +text: .text%__1cMLinkResolverbFlinktime_resolve_virtual_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%__1cKCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNstoreImmBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHOopFlowFclone6Mp0i_v_; +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cMPhaseChaitinJsplit_DEF6MpnENode_pnFBlock_iIp25nNGrowableArray4CI__i_I_; +text: .text%__1cNGCTaskManagerNresource_flag6MI_i_; +text: .text%__1cNGCTaskManagerYshould_release_resources6MI_i_; +text: .text%__1cNtestP_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLStringTableGlookup6MipHiI_pnHoopDesc__; +text: .text%__1cIBoolNodeJideal_reg6kM_I_: subnode.o; +text: .text%__1cFStateM_sub_Op_Bool6MpknENode__v_; +text: .text%__1cHCmpNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_; +text: .text%__1cRcmpFastUnlockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: subnode.o; +text: .text%__1cKciTypeFlowNmake_range_at6Mi_pn0AFRange__; +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNaddI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cJStartNodeGOpcode6kM_i_; +text: .text%__1cPconvF2D_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: subnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: subnode.o; +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cNsubI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cMWarmCallInfoHis_cold6kM_i_; +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_; +text: .text%__1cJleaP8NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseFBlockJinit_node6Mp0i_v_; +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_; +text: .text%__1cGBitMapVset_union_with_result6M0_i_; +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_; +text: .text%__1cNtestI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIPhaseIFGFUnion6MII_v_; +text: .text%__1cIGraphKit2t6MpnIJVMState__v_; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cLcastP2INodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_26MnJBytecodesECode__v_; +text: .text%__1cJStartNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cLBoxLockNodeHsize_of6kM_I_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_; +text: .text%__1cNincI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpConNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: graphKit.o; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cScompI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Goto6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: memnode.o; +text: .text%__1cOGenerateOopMapFppop16MnNCellTypeState__v_; +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cScompI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: subnode.o; +text: .text%__1cHTypePtrLdual_offset6kM_i_; +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__; +text: .text%__1cGGCTaskKinitialize6M_v_; +text: .text%__1cNGCTaskManagerWdecrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__; +text: .text%__1cNGCTaskManagerWincrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueHdequeue6M_pnGGCTask__; +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_; +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_; +text: .text%__1cENodeHlatency6MI_I_; +text: .text%__1cNsubI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNtestI_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFframeZsender_for_compiled_frame6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cPClassFileParserbFparse_constant_pool_class_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNstoreImmPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeBlobLlink_offset6M_i_; +text: .text%__1cFParseFBlockMrecord_state6Mp0_v_; +text: .text%__1cFParseMdo_one_block6M_v_; +text: .text%__1cJCatchNodeIis_Catch6kM_pk0_: classes.o; +text: .text%__1cENodeHdel_out6Mp0_v_: addnode.o; +text: .text%__1cOMethodLivenessKBasicBlockMmerge_normal6MnGBitMap__i_; +text: .text%__1cLConvI2LNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: callnode.o; +text: .text%__1cIGraphKitTadd_safepoint_edges6MpnNSafePointNode_i_v_; +text: .text%__1cIJVMStateKclone_deep6kM_p0_; +text: .text%__1cENodeNadd_req_batch6Mp0I_v_; +text: .text%__1cIJVMStateLdebug_depth6kM_I_; +text: .text%__1cSvframeStreamCommonbBfill_from_interpreter_frame6M_v_; +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFParseFmerge6Mi_v_; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cPFieldAccessInfoDset6MnLKlassHandle_nMsymbolHandle_iinJBasicType_nLAccessFlags__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cNaddI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cICmpPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cFframebFinterpreter_frame_monitor_begin6kM_pnPBasicObjectLock__; +text: .text%__1cGGCTask2t6M_v_; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cNstoreImmBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cETypeFwiden6kMpk0_2_: type.o; +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__; +text: .text%__1cJLoadBNodeGOpcode6kM_i_; +text: .text%__1cRsalI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFframeNis_java_frame6kM_i_; +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_; +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: callnode.o; +text: .text%__1cJloadCNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafePointNodeEhash6kM_I_: callnode.o; +text: .text%__1cMnabxRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_; +text: .text%__1cKciTypeFlowIblock_at6Mipn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cKciTypeFlowFRangeNget_block_for6Mpn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_; +text: .text%__1cLjmpConUNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNaddI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTCreateExceptionNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_; +text: .text%__1cLRegisterMap2t6Mpk0_v_; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cIMachNodeMcisc_operand6kM_i_: machnode.o; +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_; +text: .text%__1cENodeLnonnull_req6kM_p0_; +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_; +text: .text%__1cITypeLongFxmeet6kMpknEType__3_; +text: .text%__1cRcmpFastUnlockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKstoreCNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVector2t6Mp0_v_; +text: .text%__1cOMethodLivenessNwork_list_get6M_pn0AKBasicBlock__; +text: .text%__1cIIndexSetFclear6M_v_: coalesce.o; +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_; +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_; +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_; +text: .text%__1cCosGmalloc6FI_pv_; +text: .text%__1cNmethodOopDescWwas_executed_more_than6kMi_i_; +text: .text%__1cRInterpreterOopMapKinitialize6M_v_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: machnode.o; +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cILoopNodeGOpcode6kM_i_; +text: .text%__1cICmpINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cHCompileKTracePhase2t6MpkcpnMelapsedTimer_i_v_; +text: .text%__1cHCompileKTracePhase2T6M_v_; +text: .text%__1cILoadNodeHsize_of6kM_I_; +text: .text%__1cKciTypeFlowFBlockPis_simpler_than6Mp1_i_; +text: .text%__1cRshrI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cQciByteCodeStreamKget_method6Mri_pnIciMethod__; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cQleaPIdxScaleNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__: cfgnode.o; +text: .text%__1cXindIndexScaleOffsetOperFscale6kM_i_: ad_i486.o; +text: .text%__1cNCatchProjNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cKciTypeFlowLStateVectorEmeet6Mpk1_i_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cRMachSafePointNode2t6M_v_; +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cQleaPIdxScaleNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOcompI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHemit_cc6FrnKCodeBuffer_ii_v_; +text: .text%__1cJloadLNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNCatchProjNodeEhash6kM_I_; +text: .text%__1cNincI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopHdom_lca6kMpnENode_2_2_; +text: .text%__1cHMatcherScalling_convention6FpnLRegPair_Ii_v_; +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_; +text: .text%__1cQindOffset32XOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cICallNodeOis_CallRuntime6kM_pknPCallRuntimeNode__: callnode.o; +text: .text%__1cXinsert_anti_dependences6FrpnFBlock_pnENode_rnLBlock_Array__i_: gcm.o; +text: .text%__1cIimmLOperJconstantL6kM_x_: ad_i486_clone.o; +text: .text%__1cWflagsReg_long_LTGEOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cENodeHdel_out6Mp0_v_: lcm.o; +text: .text%__1cXAdaptiveWeightedAverageYcompute_adaptive_average6Mff_f_; +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cFframebDinterpreter_frame_monitor_end6kM_pnPBasicObjectLock__; +text: .text%__1cHMatcherVReduceInst_Chain_Rule6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_; +text: .text%__1cICodeBlobTfix_oop_relocations6MpC1_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cHMatcherPc_frame_pointer6kM_nHOptoRegEName__; +text: .text%__1cMMachCallNode2t6M_v_; +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cHTypeIntEmake6Fii_pk0_; +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_; +text: .text%__1cNsubI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cNsubI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cQSystemDictionaryKfind_class6FiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cIGraphKitbLset_predefined_input_for_runtime_call6MpnNSafePointNode__v_; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%__1cLcastP2INodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: ciStreams.o; +text: .text%__1cHRetNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHBitDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cScompP_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_; +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_; +text: .text%__1cOcompU_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNstoreImmPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpFastUnlockNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: connode.o; +text: .text%__1cPindOffset32OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cPClassFileParserbGparse_constant_pool_string_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNloadConI0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: connode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: connode.o; +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_; +text: .text%__1cNincI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: callnode.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: memnode.o; +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMorI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_; +text: .text%__1cMoutputStreamPupdate_position6MpkcI_v_; +text: .text%__1cMstringStreamFwrite6MpkcI_v_; +text: .text%__1cMFastLockNodeGOpcode6kM_i_; +text: .text%__1cIciMethodRhas_compiled_code6M_i_; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cICallInfoDset6MnLKlassHandle_nMmethodHandle_pnGThread__v_; +text: .text%__1cKstoreCNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvL2INodeGOpcode6kM_i_; +text: .text%__1cRMachSafePointNodeSis_MachCallRuntime6M_pnTMachCallRuntimeNode__: ad_i486_misc.o; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__; +text: .text%__1cITypeLongEmake6Fx_pk0_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: ciTypeFlow.o; +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICallNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cRMachSafePointNodePis_MachCallLeaf6M_pnQMachCallLeafNode__: ad_i486_misc.o; +text: .text%__1cRMachSafePointNodeLset_oop_map6MpnGOopMap__v_: ad_i486_misc.o; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cHOopFlowNbuild_oop_map6MpnENode_ipnNPhaseRegAlloc_pi_pnGOopMap__; +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_; +text: .text%__1cENodeGis_Con6kM_I_: callnode.o; +text: .text%__1cILoopNodeHis_Loop6M_p0_: classes.o; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cJVectorSetFClear6M_v_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: cfgnode.o; +text: .text%__1cMCallJavaNodeLis_CallJava6kM_pk0_: callnode.o; +text: .text%__1cICallNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachOperIconstant6kM_i_; +text: .text%__1cRaddI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cWMachCallStaticJavaNodeVis_MachCallStaticJava6M_p0_: ad_i486_misc.o; +text: .text%__1cFStateW_sub_Op_CallStaticJava6MpknENode__v_; +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cLklassVtableNput_method_at6MpnNmethodOopDesc_i_v_; +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNdecI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse3.o; +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJleaP8NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o; +text: .text%__1cJimmI8OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGOopMapHcopy_to6MpC_v_; +text: .text%__1cRMachSafePointNodeWis_MachCallInterpreter6M_pnXMachCallInterpreterNode__: ad_i486_misc.o; +text: .text%__1cLPhaseValuesHzerocon6MnJBasicType__pnHConNode__; +text: .text%__1cScompI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: classes.o; +text: .text%__1cOGenerateOopMapGppush16MnNCellTypeState__v_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: cpCacheOop.o; +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cKInlineTreeJcallee_at6kMipnIciMethod__p0_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOleaPIdxOffNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_kp0_v_; +text: .text%__1cYCallStaticJavaDirectNodeFreloc6kM_i_; +text: .text%__1cKciTypeFlowLStateVectorJcopy_into6kMp1_v_; +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJLoadCNodeGOpcode6kM_i_; +text: .text%__1cKciTypeFlowFBlockKsuccessors6MpnQciByteCodeStream_pn0ALStateVector_pn0AGJsrSet__pnNGrowableArray4Cp1___; +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_; +text: .text%__1cKciTypeFlowOwork_list_next6M_pn0AFBlock__; +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cIGraphKitNuncommon_trap6MipnHciKlass_pkci_v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cIGraphKitJmake_load6MpnENode_2pknEType_nJBasicType_i_2_; +text: .text%__1cILoadNodeEmake6FpnENode_22pknHTypePtr_pknEType_nJBasicType__p0_; +text: .text%__1cOleaPIdxOffNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__; +text: .text%__1cIHaltNode2t6MpnENode_2_v_; +text: .text%__1cMindirectOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKciTypeFlowFBlock2t6Mp0pn0AFRange_pn0AGJsrSet__v_; +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__; +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_; +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_; +text: .text%__1cScompU_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: psTasks.o; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cIimmPOperPconstant_is_oop6kM_i_: ad_i486_clone.o; +text: .text%__1cLanyRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLanyRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOMethodLivenessNwork_list_add6Mpn0AKBasicBlock__v_; +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParsePdo_field_access6Mii_v_; +text: .text%__1cPCountedLoopNodeOis_CountedLoop6M_p0_: classes.o; +text: .text%__1cIAndLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitQset_saved_ex_oop6FpnNSafePointNode_pnENode__v_; +text: .text%__1cKciTypeFlowPflow_successors6MpnNGrowableArray4Cpn0AFBlock___pn0ALStateVector__v_; +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__; +text: .text%__1cLcastP2INodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRshrI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_; +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__; +text: .text%__1cIJumpDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cNdecI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFTypeDCeq6kMpknEType__i_; +text: .text%__1cSindIndexOffsetOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cIMulLNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessNmake_block_at6Mipn0AKBasicBlock__2_; +text: .text%__1cENodeHis_Goto6kM_I_: cfgnode.o; +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__; +text: .text%__1cINodeHashJhash_find6MpknENode__p1_; +text: .text%__1cNloadConL0NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cLLShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsalI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJLoadINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLnaxRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseFBlockNlocal_type_at6kMi_pknEType__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: connode.o; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: loopnode.o; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cNaddI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: methodLiveness.o; +text: .text%__1cNloadConL0NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cLStringTableGintern6FnGHandle_pHipnGThread__pnHoopDesc__; +text: .text%__1cLStringTableLhash_string6FpHi_i_; +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cPshlI_eReg_1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMCreateExNodeGpinned6kM_i_: classes.o; +text: .text%__1cIXorINodeGOpcode6kM_i_; +text: .text%__1cRindIndexScaleOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNloadConL0NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVLoaderConstraintTableWfind_loader_constraint6MnMsymbolHandle_nGHandle__ppnVLoaderConstraintEntry__; +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_; +text: .text%__1cHi2sNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRindIndexScaleOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMPhaseChaitinTsplit_Rematerialize6MpnENode_pnFBlock_IrInNGrowableArray4CI__ipIp2i_2_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: ad_i486_misc.o; +text: .text%__1cIConLNodeGOpcode6kM_i_; +text: .text%__1cHCompileZintrinsic_insertion_index6MpnIciMethod_i_i_; +text: .text%__1cNstoreImmBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cILoopNodeHis_Loop6M_p0_: loopnode.o; +text: .text%__1cRandI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKstoreINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cGciTypeMis_classless6kM_i_: ciType.o; +text: .text%__1cTleaPIdxScaleOffNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypeIntFwiden6kMpknEType__3_; +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_; +text: .text%__1cIGraphKit2t6M_v_; +text: .text%__1cHMulNodeEhash6kM_I_; +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o; +text: .text%__1cIAddLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_; +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cScompU_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHCompilebAallow_range_check_smearing6kM_i_; +text: .text%__1cITypeLongEmake6Fxxi_pk0_; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmodI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJeRegIOperFclone6kM_pnIMachOper__; +text: .text%__1cQciByteCodeStreamZget_declared_field_holder6M_pnPciInstanceKlass__; +text: .text%__1cQciByteCodeStreamWget_field_holder_index6M_i_; +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: classes.o; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cNdecI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeOnew_objArray_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadSNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKReturnNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cNmethodOopDescbHhas_unloaded_classes_in_signature6FnMmethodHandle_pnGThread__i_; +text: .text%__1cPindOffset32OperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cPindOffset32OperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cQmark_inner_loops6FpnIPhaseCFG_pnFBlock__v_: block.o; +text: .text%__1cPindOffset32OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cPindOffset32OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cPindOffset32OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_inJrelocInfoJrelocType_i_v_; +text: .text%__1cJloadLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadSNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cKjmpConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: memnode.o; +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_; +text: .text%__1cLBuildCutout2T6M_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cPDictionaryEntrybAcontains_protection_domain6kMpnHoopDesc__i_; +text: .text%__1cNPhaseRegAllocKoffset2reg6kMi_nHOptoRegEName__; +text: .text%__1cFParseRensure_memory_phi6Mii_pnHPhiNode__; +text: .text%__1cHTypeAryCeq6kMpknEType__i_; +text: .text%__1cMorI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadRangeNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: memnode.o; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeGpinned6kM_i_: loopnode.o; +text: .text%__1cJloadINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKBranchDataNis_BranchData6M_i_: ciMethodData.o; +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIJumpDataLis_JumpData6M_i_: ciMethodData.o; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o; +text: .text%__1cJxRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cNsubI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_; +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_; +text: .text%__1cOcompI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJcmpOpOperFccode6kM_i_: ad_i486_clone.o; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__; +text: .text%__1cENodeHdel_out6Mp0_v_: ifnode.o; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cRSignatureIteratorSskip_optional_size6M_v_; +text: .text%__1cKjmpDirNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFParseKdo_get_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cScompP_mem_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapSget_basic_block_at6kMi_pnKBasicBlock__; +text: .text%__1cJleaP8NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRaddI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_; +text: .text%__1cRshrI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowGJsrSetNapply_control6Mp0pnQciByteCodeStream_pn0ALStateVector__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cKReturnNodeGOpcode6kM_i_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cNchunk_oops_do6FpnKOopClosure_pnFChunk_pc_I_: handles.o; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cQjava_lang_StringMbasic_create6FpnQtypeArrayOopDesc_ipnGThread__nGHandle__; +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__; +text: .text%__1cUParallelScavengeHeapNtlab_capacity6kM_I_; +text: .text%__1cKjmpConNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpConNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: connode.o; +text: .text%__1cPshlI_eReg_1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: machnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: machnode.o; +text: .text%__1cHi2sNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: memnode.o; +text: .text%__1cENodeRis_cisc_alternate6kM_i_: machnode.o; +text: .text%__1cICallNodeSis_CallDynamicJava6kM_pknTCallDynamicJavaNode__: callnode.o; +text: .text%__1cKTypeRawPtrHget_con6kM_i_; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: machnode.o; +text: .text%__1cKStoreLNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserbIparse_constant_pool_fieldref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cScompU_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_; +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_; +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_; +text: .text%__1cQindOffset32XOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRandI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_; +text: .text%__1cMWarmCallInfoGis_hot6kM_i_; +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cMPhaseChaitinKFind_const6kMI_I_; +text: .text%__1cMPhaseChaitinKFind_const6kMpknENode__I_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopTransform.o; +text: .text%__1cHCompileFstart6kM_pnJStartNode__; +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cTemit_java_to_interp6FrnKCodeBuffer__v_; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cKCodeBufferMstart_a_stub6M_v_; +text: .text%__1cKCodeBufferKend_a_stub6M_v_; +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cPThreadRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cIBoolNodeHsize_of6kM_I_; +text: .text%__1cFParseUprofile_taken_branch6Mi_v_; +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cTleaPIdxScaleOffNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFframeNis_glue_frame6kM_i_; +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cKstoreLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: machnode.o; +text: .text%__1cICodeHeapLheader_size6F_I_; +text: .text%__1cScompU_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: methodDataOop.o; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cTleaPIdxScaleOffNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_; +text: .text%__1cPcheckCastPPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__; +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_; +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_Halt6MpknENode__v_; +text: .text%__1cIHaltNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cWflagsReg_long_EQdDNEOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cWThreadLocalAllocBufferVinitialize_statistics6M_v_; +text: .text%__1cWThreadLocalAllocBufferImax_size6F_I_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_: ciMethodData.o; +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_; +text: .text%__1cTStackWalkCompPolicyIsenderOf6MpnGRFrame_pnNGrowableArray4C2___2_; +text: .text%__1cENodeLbottom_type6kM_pknEType__; +text: .text%__1cFframeTis_first_java_frame6kM_i_; +text: .text%__1cGRFrameGcaller6M_p0_; +text: .text%__1cGRFrameMset_distance6Mi_v_; +text: .text%__1cGRFrameKnew_RFrame6FnFframe_pnKJavaThread_kp0_4_; +text: .text%__1cHConNodeEmake6FpknEType__p0_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cWThreadLocalAllocBufferVaccumulate_statistics6MIi_v_; +text: .text%__1cWThreadLocalAllocBufferGresize6M_v_; +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitOtoo_many_traps6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cWflagsReg_long_LEGTOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cQindOffset32XOperFscale6kM_i_: ad_i486.o; +text: .text%__1cUThreadSafepointStateMroll_forward6Mn0AMsuspend_type_pnHnmethod_i_v_; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cUThreadSafepointStateHrestart6M_v_; +text: .text%__1cIGraphKitTtoo_many_recompiles6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cGIfNodeHsize_of6kM_I_: classes.o; +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_; +text: .text%__1cIIndexSetEswap6Mp0_v_; +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_; +text: .text%__1cLcastP2INodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_; +text: .text%__1cScompP_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cPcmpFastLockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_mem_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cXcmpL_reg_flags_LTGENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o; +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_; +text: .text%__1cHCompileOcall_generator6MpnIciMethod_ipnIJVMState_if_pnNCallGenerator__; +text: .text%__1cNciCallProfileRapply_prof_factor6Mf_v_; +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__; +text: .text%__1cHCompileOfind_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: multnode.o; +text: .text%__1cENodeHdel_out6Mp0_v_: compile.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: codeBlob.o; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_kpnGRFrame__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cXindIndexScaleOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cNCatchProjNode2t6MpnENode_Ii_v_; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: cfgnode.o; +text: .text%__1cQciByteCodeStreamXget_method_holder_index6M_i_; +text: .text%__1cFParseHdo_call6M_v_; +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_; +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_; +text: .text%__1cFParseZcan_not_compile_call_site6MpnIciMethod_pnPciInstanceKlass__i_; +text: .text%__1cQciByteCodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cFParseMprofile_call6MpnENode__v_; +text: .text%__1cKstorePNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMMachProjNodeHsize_of6kM_I_: classes.o; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cSindIndexOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMeADXRegLOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKciTypeFlowLStateVectorJdo_invoke6MpnQciByteCodeStream_i_v_; +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLPCTableNodeHsize_of6kM_I_: classes.o; +text: .text%__1cLPCTableNodeKis_PCTable6kM_pk0_: classes.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: cfgnode.o; +text: .text%__1cJStartNodeIis_Start6M_p0_: classes.o; +text: .text%__1cLPCTableNodeEhash6kM_I_; +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cIProjNodeDcmp6kMrknENode__I_; +text: .text%__1cXindIndexScaleOffsetOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKDataLayoutKinitialize6MCHi_v_; +text: .text%__1cKDataLayoutPneeds_array_len6FC_i_; +text: .text%__1cScompP_mem_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNincI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__; +text: .text%jni_GetObjectField: jni.o; +text: .text%__1cSCompareAndSwapNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cJloadBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRAbstractAssemblerHbind_to6MrnFLabel_i_v_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cMnabxRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: ad_i486_misc.o; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_; +text: .text%__1cKMemBarNodeEhash6kM_I_; +text: .text%__1cPstoreImmI16NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIciSymbolJmake_impl6Fpkc_p0_; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cXcmpL_reg_flags_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadConI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNtestI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitNcast_not_null6MpnENode__2_; +text: .text%__1cKEntryPointFentry6kMnITosState__pC_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopnode.o; +text: .text%__1cJleaP8NodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKGCStatInfoMset_gc_usage6MinLMemoryUsage_i_v_; +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cGRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cRInterpretedRFrameOis_interpreted6kM_i_: rframe.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: callnode.o; +text: .text%__1cRsalI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_; +text: .text%__1cENodeHdel_out6Mp0_v_: callnode.o; +text: .text%__1cISubINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cILoadNodeDcmp6kMrknENode__I_; +text: .text%__1cMDisplacementEbind6MrnFLabel_ipnRAbstractAssembler__v_; +text: .text%__1cTStackWalkCompPolicyMshouldInline6FnMmethodHandle_fi_pkc_; +text: .text%__1cTconstantPoolOopDescMklass_at_put6MipnMklassOopDesc__v_: constantPoolOop.o; +text: .text%__1cFTypeDEhash6kM_i_; +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cKInlineTreeWfind_subtree_from_root6Fp0pnIJVMState_pnIciMethod_i_1_; +text: .text%__1cIciMethodNshould_inline6M_i_; +text: .text%__1cKInlineTreeMshouldInline6kMpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cTpass_initial_checks6FpnIciMethod_i1_i_; +text: .text%__1cOCompilerOracleNshould_inline6FnMmethodHandle__i_; +text: .text%__1cIciMethodbAinterpreter_throwout_count6kM_i_; +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_; +text: .text%__1cKInlineTreeNtry_to_inline6MpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cVExceptionHandlerTableJadd_entry6MnRHandlerTableEntry__v_; +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRaddI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTCreateExceptionNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOjmpLoopEndNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cLRethrowNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cFStateM_sub_Op_CmpP6MpknENode__v_; +text: .text%__1cMorI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cIciMethodWwas_executed_more_than6Mi_i_; +text: .text%__1cRshrI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConINodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_; +text: .text%__1cScompP_mem_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cLOpaque1NodeGOpcode6kM_i_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cPcmpFastLockNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLnaxRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cJloadCNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%__1cbAjni_check_async_exceptions6FpnKJavaThread__v_: jni.o; +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_; +text: .text%__1cLeDXRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cJFieldTypeSskip_optional_size6FpnNsymbolOopDesc_pi_v_; +text: .text%__1cQjmpCon_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjmpCon_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cIGraphKitZset_all_rewritable_memory6MpnENode__v_; +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_; +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKjmpDirNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_; +text: .text%__1cKjmpDirNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cScompI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cRaddI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_: connode.o; +text: .text%__1cJloadCNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOClearArrayNodeGOpcode6kM_i_; +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_; +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMeADXRegLOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseRbranch_prediction6Mrf_f_; +text: .text%__1cRandI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLeAXRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLeAXRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cPshlI_eReg_1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVectorMdo_getstatic6MpnQciByteCodeStream__v_; +text: .text%__1cISubINodeDsub6kMpknEType_3_3_; +text: .text%__1cNCompileBrokerXcompilation_is_in_queue6FnMmethodHandle_i_i_; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_; +text: .text%__1cRindIndexScaleOperFscale6kM_i_: ad_i486.o; +text: .text%__1cNaddI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cENodeHdel_out6Mp0_v_: loopnode.o; +text: .text%__1cJxRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: loopnode.o; +text: .text%__1cIParmNodeJideal_reg6kM_I_; +text: .text%__1cICodeHeapSallocated_capacity6kM_I_; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cNtestP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICHeapObj2n6FI_pv_; +text: .text%__1cENodeHdel_out6Mp0_v_: loopTransform.o; +text: .text%__1cQleaPIdxScaleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cNloadConI0NodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypeAryFempty6kM_i_; +text: .text%__1cKTypeAryPtrFempty6kM_i_; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cNgetTimeMillis6F_x_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: scopeDesc.o; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_; +text: .text%__1cENodeRlatency_from_uses6kMrnLBlock_Array_rnNGrowableArray4CI___i_; +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cNaddI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_i_v_; +text: .text%__1cNloadKlassNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_; +text: .text%__1cNmethodOopDescWload_signature_classes6FnMmethodHandle_pnGThread__i_; +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cITypeLongFempty6kM_i_; +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cJloadSNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%__1cIAddPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cPcmpFastLockNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNmulL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStoreNodeEmake6FpnENode_22pknHTypePtr_2nJBasicType__p0_; +text: .text%__1cIGraphKitPstore_to_memory6MpnENode_22nJBasicType_i_2_; +text: .text%__1cHi2sNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cLcastP2INodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompP_mem_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNincI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNdecI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciObjectFklass6M_pnHciKlass__; +text: .text%__1cQPSGenerationPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cFParseYprofile_not_taken_branch6M_v_; +text: .text%__1cLRShiftLNodeGOpcode6kM_i_; +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cMURShiftLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: parse2.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: cfgnode.o; +text: .text%__1cIMulINodeGOpcode6kM_i_; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cIConDNodeGOpcode6kM_i_; +text: .text%__1cKInlineTreePshouldNotInline6kMpnIciMethod_pnMWarmCallInfo__pkc_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cNandL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cKciTypeFlowIcan_trap6MrnQciByteCodeStream__i_; +text: .text%__1cNxorI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cZload_long_indOffset32OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_; +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKjmpConNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKjmpConNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverbElinktime_resolve_static_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cLencode_Copy6FrnKCodeBuffer_ii_v_; +text: .text%__1cQjmpDir_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjmpDir_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cENodeIadd_prec6Mp0_v_; +text: .text%__1cLjmpConUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachNodeSalignment_required6kM_i_: machnode.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: machnode.o; +text: .text%__1cKType_ArrayEgrow6MI_v_; +text: .text%__1cMorI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMorI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cUmembar_cpu_orderNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSconstMethodOopDescbEchecked_exceptions_length_addr6kM_pH_; +text: .text%__1cFParseFdo_if6MpnENode_2nIBoolTestEmask_2_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: ciMethodData.o; +text: .text%__1cLBoxLockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJxRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cIConINodeHget_int6kMpi_i_: classes.o; +text: .text%__1cIciMethodbHhas_unloaded_classes_in_signature6M_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_; +text: .text%__1cJloadCNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNsubI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cLStatSamplerLsample_data6FpnMPerfDataList__v_; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_; +text: .text%__1cLStatSamplerOcollect_sample6F_v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cMPeriodicTaskMtime_to_wait6F_I_: thread.o; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2I_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cOoop_RelocationHoops_do6MpFppnHoopDesc__v_v_; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSdivD_reg_roundNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%__1cICodeHeapIcapacity6kM_I_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cRresolve_and_patch6FppnHoopDesc__v_; +text: .text%__1cMorI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_; +text: .text%__1cIMachOperEtype6kM_pknEType__; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cIregFOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_; +text: .text%JVM_Write; +text: .text%__1cDhpiFwrite6FipkvI_I_: jvm.o; +text: .text%__1cPstoreImmI16NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cKciTypeFlowLStateVectorLdo_getfield6MpnQciByteCodeStream__v_; +text: .text%__1cFParseMvisit_blocks6M_v_; +text: .text%__1cNsubI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cRsalI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2I_reg_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowGJsrSetSis_compatible_with6Mp1_i_; +text: .text%__1cOcompU_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLOpaque1NodeEhash6kM_I_; +text: .text%__1cXcmpL_reg_flags_LTGENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: classes.o; +text: .text%__1cJcmpOpOperGnegate6M_v_: ad_i486_clone.o; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_2_v_; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cKMemBarNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKjmpConNodeGnegate6M_v_: ad_i486_misc.o; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%JVM_RawMonitorEnter; +text: .text%JVM_RawMonitorExit; +text: .text%__1cXmembar_release_lockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cITypeLongEmake6Fxx_pk0_; +text: .text%__1cNaddL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKcmpOpUOperFccode6kM_i_: ad_i486_clone.o; +text: .text%__1cPClassFileParserUskip_over_field_name6MpciI_1_; +text: .text%__1cLjmpConUNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFParseOreturn_current6MpnENode__v_; +text: .text%__1cETypeCeq6kMpk0_i_; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cRaddI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLeCXRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cLBoxLockNodeKis_BoxLock6kM_pk0_: classes.o; +text: .text%__1cLBoxLockNodeKstack_slot6FpnENode__nHOptoRegEName__; +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cFParseRoptimize_inlining6MpnIciMethod_ipnPciInstanceKlass_24irnKInlineTreeLInlineStyle_r2_v_; +text: .text%__1cQimprove_receiver6FpnPciInstanceKlass_pknLTypeInstPtr_ri_1_; +text: .text%__1cJloadPNodeFreloc6kM_i_; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseSmerge_memory_edges6MpnMMergeMemNode_ii_v_; +text: .text%__1cJloadBNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFBlockTimplicit_null_check6MrnLBlock_Array_rnNGrowableArray4CI__pnENode_6_v_; +text: .text%__1cJTypeTupleFxdual6kM_pknEType__; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmulL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cISubINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: loopnode.o; +text: .text%__1cRsarI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: loopnode.o; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cLCastP2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIciMethodLis_accessor6kM_i_; +text: .text%__1cRScavengeRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cPsarI_eReg_1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXindIndexScaleOffsetOperOindex_position6kM_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cZload_long_indOffset32OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cScompU_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: memnode.o; +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__: rframe.o; +text: .text%__1cRinterpretedVFrameDbcp6kM_pC_; +text: .text%__1cRinterpretedVFrameDbci6kM_i_; +text: .text%__1cMCallLeafNodeLis_CallLeaf6kM_pk0_: classes.o; +text: .text%__1cJAssemblerEpopl6MpnMRegisterImpl__v_; +text: .text%__1cKStoreCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRethrowNodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: loopnode.o; +text: .text%__1cPsarI_eReg_1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cZload_long_indOffset32OperFscale6kM_i_: ad_i486.o; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cFArenaEused6kM_I_; +text: .text%__1cFParseNdo_all_blocks6M_v_; +text: .text%__1cOParseGeneratorJcan_parse6FpnIciMethod_i_i_; +text: .text%__1cFParseLbuild_exits6M_v_; +text: .text%__1cFParseIdo_exits6M_v_; +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_; +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__; +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cFParseLinit_blocks6M_v_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_; +text: .text%__1cFParsePdo_method_entry6M_v_; +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cTStackWalkCompPolicyPshouldNotInline6FnMmethodHandle__pkc_; +text: .text%__1cRaddI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTcompareAndSwapLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cHi2sNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIget_long6kM_x_; +text: .text%__1cQciByteCodeStreamSget_constant_index6kM_i_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cFArena2T6M_v_; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: cfgnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: cfgnode.o; +text: .text%__1cNdecI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNstoreImmINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cPJavaCallWrapper2t6MnMmethodHandle_nGHandle_pnJJavaValue_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cRJavaCallArgumentsKparameters6M_pi_; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cRruntime_type_from6FpnJJavaValue__nJBasicType__: javaCalls.o; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPJavaCallWrapper2T6M_v_; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cSCallLeafDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowFBlockPclone_loop_head6Mp0ip1pn0AGJsrSet__3_; +text: .text%__1cITypeFuncFxdual6kM_pknEType__; +text: .text%__1cNxorI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHMatcherPstack_alignment6F_I_; +text: .text%__1cKInlineTree2t6MpnHCompile_pk0pnIciMethod_pnIJVMState_if_v_; +text: .text%__1cMLinkResolverXresolve_klass_no_update6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cMURShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLeSIRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSloadL_volatileNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapCpp6MpnNCellTypeState_2_v_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%__1cHoopDescSslow_identity_hash6M_i_; +text: .text%__1cQindOffset32XOperLdisp_is_oop6kM_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodDataKlass.o; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKInlineTreeYcompute_callee_frequency6kMi_f_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cKInlineTreebCbuild_inline_tree_for_callee6MpnIciMethod_pnIJVMState_i_p0_; +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cJGC_lockerNlock_critical6FpnKJavaThread__v_: jni.o; +text: .text%__1cJAssemblerElock6M_v_; +text: .text%__1cRindIndexScaleOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cKRegionNodeEhash6kM_I_: loopnode.o; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cXmembar_release_lockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cLklassVtableKis_miranda6FpnNmethodOopDesc_pnPobjArrayOopDesc_pnMklassOopDesc__i_; +text: .text%__1cENodeHdel_out6Mp0_v_: library_call.o; +text: .text%__1cRandI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_; +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cPshlI_eReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cRalign_code_offset6Fi_I_; +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_; +text: .text%__1cMLinkResolverVresolve_invokespecial6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cJAssemblerHcmpxchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_; +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__; +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_; +text: .text%__1cOjmpLoopEndNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: ad_i486.o; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cRsalI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_: psScavenge.o; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cIDivINodeGOpcode6kM_i_; +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cIimmDOperJconstantD6kM_d_: ad_i486_clone.o; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2I_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMrep_stosNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJStartNodeOis_block_start6kM_i_: callnode.o; +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cOMethodLivenessKBasicBlockIload_two6Mi_v_; +text: .text%__1cIGraphKitMarray_length6MpnENode__2_; +text: .text%__1cJloadCNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cMCreateExNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNincI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cMindirectOperFscale6kM_i_: ad_i486.o; +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLPCTableNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cLRShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJcmpOpOperFclone6kM_pnIMachOper__; +text: .text%__1cIJVMState2t6Mi_v_; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cJStartNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cNeFlagsRegOperFclone6kM_pnIMachOper__; +text: .text%__1cMMachCallNodeHis_Call6M_pnICallNode__: ad_i486_misc.o; +text: .text%__1cKTypeRawPtrCeq6kMpknEType__i_; +text: .text%__1cFStateQ_sub_Op_CreateEx6MpknENode__v_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjectFactory.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciObjectFactory.o; +text: .text%__1cLjmpConUNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLjmpConUNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_; +text: .text%__1cEDict2T6M_v_; +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_; +text: .text%__1cIGraphKitRmake_slow_call_ex6MpnENode_pnPciInstanceKlass__v_; +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_; +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_; +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cPconvL2I_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_; +text: .text%__1cTJvmtiEventCollectorYunset_jvmti_thread_state6M_v_; +text: .text%__1cZload_long_indOffset32OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cZload_long_indOffset32OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cZload_long_indOffset32OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cIMinINodeGOpcode6kM_i_; +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cHMemNodeScalculate_adr_type6FpknEType_pknHTypePtr__6_; +text: .text%__1cHMatcherXadjust_incoming_stk_arg6MnHOptoRegEName__2_; +text: .text%__1cFStateM_sub_Op_CmpU6MpknENode__v_; +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_: psScavenge.o; +text: .text%__1cFTypeDEmake6Fd_pk0_; +text: .text%jni_IsSameObject: jni.o; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cQindOffset32XOperMdisp_as_type6kM_pknHTypePtr__: ad_i486.o; +text: .text%__1cQindOffset32XOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cNstoreImmBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTcompareAndSwapLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLlog2_intptr6Fi_i_: mulnode.o; +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cScompP_mem_eRegNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cHMulNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRaddI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__; +text: .text%__1cKjavaVFrameNis_java_frame6kM_i_: vframe.o; +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_; +text: .text%__1cRMachSafePointNodeLis_MachCall6M_pnMMachCallNode__: ad_i486_misc.o; +text: .text%__1cKStoreBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNandL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLCounterDataOis_CounterData6M_i_: methodDataOop.o; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cENodeHget_int6kMpi_i_; +text: .text%__1cOcompI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cYSurvivorMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cTleaPIdxScaleOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJcmpOpOperFequal6kM_i_: ad_i486_clone.o; +text: .text%__1cMorI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cKRegionNodeOhas_unique_phi6kM_pnHPhiNode__; +text: .text%__1cNnegI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRsarI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPsarI_eReg_1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cPshrI_eReg_1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cQVMOperationQdDueueLqueue_empty6Mi_i_; +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFTypeFEhash6kM_i_; +text: .text%__1cVExceptionHandlerTableMadd_subtable6MipnNGrowableArray4Ci__2_v_; +text: .text%__1cQLibraryIntrinsicKis_virtual6kM_i_: library_call.o; +text: .text%__1cScompP_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNxorI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: vframe.o; +text: .text%__1cRMachSafePointNodePis_MachCallJava6M_pnQMachCallJavaNode__: ad_i486_misc.o; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: classes.o; +text: .text%__1cIimmIOperFclone6kM_pnIMachOper__; +text: .text%__1cJleaP8NodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cMloadConINodeFclone6kM_pnENode__; +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cMciMethodData2t6M_v_; +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__; +text: .text%__1cRitableMethodEntryKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cRaddL_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIHaltNodeEhash6kM_I_: classes.o; +text: .text%__1cNmulL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQinit_input_masks6FIrnHRegMask_1_p0_: matcher.o; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cNIdealLoopTreeObeautify_loops6MpnOPhaseIdealLoop__i_; +text: .text%__1cOjmpLoopEndNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cMrep_stosNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cFStateO_sub_Op_StoreP6MpknENode__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: phaseX.o; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: callnode.o; +text: .text%__1cPshlI_eReg_1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cSindIndexOffsetOperFscale6kM_i_: ad_i486.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: cfgnode.o; +text: .text%__1cNaddL_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%__1cMMutableSpaceFclear6M_v_; +text: .text%__1cQjmpCon_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cQjmpCon_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cQjmpCon_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpConNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cPshrI_eReg_1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cYmulI_imm_RShift_highNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPstoreImmI16NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cUParallelScavengeHeapMmem_allocate6MIii_pnIHeapWord__; +text: .text%__1cNstoreImmBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cIGraphKitXset_edges_for_java_call6MpnMCallJavaNode_i_v_; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cKcmpOpUOperGnegate6M_v_: ad_i486_clone.o; +text: .text%__1cLjmpConUNodeGnegate6M_v_: ad_i486_misc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: loopnode.o; +text: .text%__1cRandI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_; +text: .text%__1cNnegI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__; +text: .text%__1cOstackSlotDOperEtype6kM_pknEType__: ad_i486.o; +text: .text%JVM_GetMethodIxModifiers; +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_; +text: .text%__1cNandL_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cJleaP8NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%__1cENodeGis_Sub6M_pnHSubNode__: classes.o; +text: .text%__1cTcompareAndSwapLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cScompU_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cYmulI_imm_RShift_highNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNinstanceKlassbCfind_local_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_; +text: .text%__1cJLoadSNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%JVM_IsInterface; +text: .text%__1cNinstanceKlassWfind_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cPciInstanceKlassTget_field_by_offset6Mii_pnHciField__; +text: .text%__1cFStateM_sub_Op_RegL6MpknENode__v_; +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__: machnode.o; +text: .text%__1cRshrI_eReg_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cScompP_mem_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadSNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXconvI2L_reg_reg_zexNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cRsalI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJimmI0OperFclone6kM_pnIMachOper__; +text: .text%__1cNloadConI0NodeFclone6kM_pnENode__; +text: .text%__1cWflagsReg_long_LTGEOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQciByteCodeStreamMget_constant6M_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cRMachNullCheckNode2t6MpnENode_2I_v_; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: machnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: machnode.o; +text: .text%__1cXconvI2L_reg_reg_zexNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLLShiftLNodeGOpcode6kM_i_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cEhash6Fpkc1_I_; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cZresource_reallocate_bytes6FpcII_0_; +text: .text%__1cLeCXRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cScompI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLCastP2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cUreloc_java_to_interp6F_I_; +text: .text%__1cTsize_java_to_interp6F_I_; +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompU_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotLOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeRis_safepoint_node6kM_i_: ad_i486_misc.o; +text: .text%__1cQciByteCodeStreamPget_klass_index6M_i_; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cMnadxRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cJCHAResult2t6MnLKlassHandle_nMsymbolHandle_2pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___n0E_i_v_; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cRaddL_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKMemBarNode2t6M_v_; +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_; +text: .text%__1cKMemBarNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMloadConFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKPSYoungGenNused_in_bytes6kM_I_; +text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o; +text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cMloadConFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSReferenceProcessorOprocess_phase36MppnHoopDesc_ipnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorOprocess_phase26MppnHoopDesc_pnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cNandL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLLShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOFastUnlockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cMnegF_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIModINodeGOpcode6kM_i_; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIMaxINodeGOpcode6kM_i_; +text: .text%__1cQjmpDir_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKjmpDirNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cQjmpDir_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cQjmpDir_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKMemBarNodeJis_MemBar6kM_pk0_: classes.o; +text: .text%__1cIMachNodeKconst_size6kM_i_: machnode.o; +text: .text%__1cIMachNodeFreloc6kM_i_: machnode.o; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_: machnode.o; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cIGraphKitOinsert_mem_bar6MpnKMemBarNode__v_; +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: callGenerator.o; +text: .text%__1cNCallGeneratorCtf6kM_pknITypeFunc__; +text: .text%__1cNCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cLOptoRuntimebCcomplete_monitor_unlocking_C6FpnHoopDesc_pnJBasicLock__v_; +text: .text%__1cITypeNodeHis_Type6M_p0_: classes.o; +text: .text%__1cNdecI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: chaitin.o; +text: .text%__1cYmulI_imm_RShift_highNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTleaPIdxScaleOffNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNGCTaskManagerRset_resource_flag6MIi_v_; +text: .text%__1cScompI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__; +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_; +text: .text%__1cNmulL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMeADXRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQleaPIdxScaleNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cXcopy_u2_with_conversion6FpH0i_v_: classFileParser.o; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_locking_C6FpnHoopDesc_pnJBasicLock_pnKJavaThread__v_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cIGraphKitbMset_predefined_output_for_runtime_call6MpnENode_pnMMergeMemNode__v_; +text: .text%__1cNminI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__; +text: .text%__1cJloadLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: parse1.o; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_; +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_; +text: .text%__1cNaddL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMrep_stosNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cZCallInterpreterDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: methodDataOop.o; +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLBlock_ArrayEgrow6MI_v_; +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQciByteCodeStreamJget_klass6Mri_pnHciKlass__; +text: .text%__1cNandL_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNandL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHi2sNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cIAndINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: callnode.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: callnode.o; +text: .text%__1cOGenerateOopMapNrestore_state6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cPCheckCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cISubINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_; +text: .text%__1cXcmpL_reg_flags_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: loopnode.o; +text: .text%__1cSloadL_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cJPhaseLiveHcompute6MI_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: live.o; +text: .text%__1cIPhaseIFGEinit6MI_v_; +text: .text%__1cMPhaseChaitinQgather_lrg_masks6Mi_v_; +text: .text%__1cRjmpConU_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRjmpConU_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVmerge_point_too_heavy6FpnHCompile_pnENode__i_: loopopts.o; +text: .text%__1cMloadConPNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHRetNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: loopnode.o; +text: .text%__1cNandI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_; +text: .text%__1cPsarI_eReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_acquire_lockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKstoreBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXconvI2L_reg_reg_zexNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: connode.o; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: oopFactory.o; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: oopFactory.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: oopFactory.o; +text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_; +text: .text%__1cOleaPIdxOffNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_: parse2.o; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cPshlI_eReg_1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbHparse_constant_pool_integer_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerFpushl6Mi_v_; +text: .text%__1cKmethodOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: reg_split.o; +text: .text%__1cMLinkResolverUresolve_invokestatic6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNxorI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNxorI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHMatcherMreturn_value6Fii_nLRegPair__; +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_; +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cbACallCompiledJavaDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSsafePoint_pollNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSloadL_volatileNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cLcastP2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o; +text: .text%__1cFStateR_sub_Op_LoadRange6MpknENode__v_; +text: .text%__1cOcompU_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_; +text: .text%__1cHMemNodeHsize_of6kM_I_; +text: .text%__1cYCallStaticJavaDirectNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNRelocIteratorJset_limit6MpC_v_; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_: ad_i486.o; +text: .text%__1cRsarI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cVjava_lang_ClassLoaderGparent6FpnHoopDesc__2_; +text: .text%__1cLOpaque2NodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLCastP2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitSclear_saved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__; +text: .text%__1cKstoreINodeFreloc6kM_i_; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cNaddL_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNaddL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitOmake_slow_call6MpknITypeFunc_pCpkcpnENode_88_8_; +text: .text%__1cPcheckCastPPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_; +text: .text%__1cNnegI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJStartNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cKciTypeFlowHdo_flow6M_v_; +text: .text%__1cKciTypeFlowKmap_blocks6M_v_; +text: .text%__1cKciTypeFlowKflow_types6M_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_i_v_; +text: .text%__1cKciTypeFlowLfind_ranges6M_v_; +text: .text%__1cKciTypeFlowXmark_known_range_starts6M_v_; +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cOeFlagsRegUOperFclone6kM_pnIMachOper__; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_; +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cFframebHnext_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cNsubL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMorI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cMciMethodDataJload_data6M_v_; +text: .text%__1cPconvL2I_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJimmI0OperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: multnode.o; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cPconvI2L_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%JVM_GetCPClassNameUTF; +text: .text%__1cLConvI2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNIdealLoopTreeNDCE_loop_body6M_v_; +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_; +text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cKstoreCNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__; +text: .text%__1cICodeHeapIallocate6MI_pv_; +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionarybAcompute_loader_lock_object6FnGHandle_pnGThread__1_; +text: .text%__1cLeAXRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseKdo_put_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cPmethodDataKlassRoop_is_methodData6kM_i_: methodDataKlass.o; +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodDataKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: methodDataKlass.o; +text: .text%__1cTcompareAndSwapLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOLibraryCallKitOgenerate_guard6MpnENode_pnKRegionNode_f_v_; +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: jvm.o; +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_: psGenerationCounters.o; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cWflagsReg_long_LTGEOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNloadRangeNodeFreloc6kM_i_; +text: .text%__1cFParseNpush_constant6MnKciConstant__i_; +text: .text%__1cMstringStream2t6MI_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: classes.o; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cNaddL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMURShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJrelocInfoKset_format6Mi_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: objArrayKlass.o; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cISubLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse2.o; +text: .text%__1cNminI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cQVMOperationQdDueueSqueue_remove_front6Mi_pnMVM_Operation__; +text: .text%__1cLProfileDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cJScopeDescGis_top6kM_i_; +text: .text%__1cMorI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOjmpLoopEndNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeRawPtrFempty6kM_i_; +text: .text%__1cTCallInterpreterNodeGOpcode6kM_i_; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cScompI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRxorI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cXmembar_acquire_lockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNPrefetchQdDueueFclear6M_v_: psPromotionManager.o; +text: .text%__1cSPSPromotionManagerFreset6M_v_; +text: .text%__1cXconvI2L_reg_reg_zexNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSPSPromotionManagerKflush_labs6M_v_; +text: .text%__1cRaddI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cIAndINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIregDOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cSCompareAndSwapNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cICodeHeapPfollowing_block6MpnJFreeBlock__2_; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cFciEnvZcheck_klass_accessibility6MpnHciKlass_pnMklassOopDesc__i_; +text: .text%__1cCosRcurrent_thread_id6F_i_; +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__; +text: .text%__1cHciKlassMis_interface6M_i_: ciObjArrayKlass.o; +text: .text%__1cNtestP_regNodeFreloc6kM_i_; +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_; +text: .text%__1cIGotoNodeGOpcode6kM_i_; +text: .text%__1cENodeMsetup_is_top6M_v_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cOleaPIdxOffNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvL2I_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHCompilePneed_stack_bang6kMi_i_; +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cNsubI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSmembar_acquireNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHRetNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cHRetNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKimmL32OperJconstantL6kM_x_: ad_i486_clone.o; +text: .text%__1cRindIndexScaleOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cRindIndexScaleOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRindIndexScaleOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cQleaPIdxScaleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_; +text: .text%__1cRxorI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOMethodLivenessKBasicBlockJstore_two6Mi_v_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cJloadINodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_pnIMachNode__; +text: .text%__1cJloadINodeFreloc6kM_i_; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOleaPIdxOffNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cLklassVtableVinitialize_from_super6MnLKlassHandle__i_; +text: .text%__1cLklassVtableOcopy_vtable_to6MpnLvtableEntry__v_; +text: .text%__1cJloadBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSCallLeafDirectNodeKmethod_set6Mi_v_; +text: .text%__1cKReturnNodeEhash6kM_I_: classes.o; +text: .text%__1cTleaPIdxScaleOffNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cIPhaseIFGISquareUp6M_v_; +text: .text%__1cYmulI_imm_RShift_highNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: klass.o; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: classes.o; +text: .text%__1cQPlaceholderTableJnew_entry6MipnNsymbolOopDesc_pnHoopDesc__pnQPlaceholderEntry__; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cJcmpOpOperJnot_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cIAndINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cKCMoveINodeGOpcode6kM_i_; +text: .text%__1cOMachEpilogNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopnode.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopnode.o; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopnode.o; +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_; +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_; +text: .text%__1cPsarI_eReg_1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cHCompileQsync_stack_slots6kM_i_; +text: .text%__1cPindOffset32OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cUDebugInfoWriteStreamMwrite_handle6MpnI_jobject__v_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cPconvI2D_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNIdealLoopTreeTcheck_inner_safepts6MpnOPhaseIdealLoop__v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: assembler_i486.o; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cRaddI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNIdealLoopTreeUiteration_split_impl6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNIdealLoopTreeOpolicy_peeling6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreebBpolicy_do_remove_empty_loop6MpnOPhaseIdealLoop__i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%JVM_InternString; +text: .text%__1cXcmpL_reg_flags_LEGTNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cNget_next_hash6F_i_: synchronizer.o; +text: .text%__1cPshrI_eReg_1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cUimpl_fp_store_helper6FpnKCodeBuffer_iiiiiii_i_: ad_i486.o; +text: .text%__1cVloadConL_low_onlyNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMacroAssemblerJincrement6MpnMRegisterImpl_i_v_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeHdel_out6Mp0_v_: callGenerator.o; +text: .text%__1cJCmpL3NodeGOpcode6kM_i_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cPclear_hashtable6FppnLNameSigHash__v_; +text: .text%__1cWflagsReg_long_LEGTOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cEDictIdoubhash6M_v_; +text: .text%__1cUinitialize_hashtable6FppnLNameSigHash__v_; +text: .text%__1cRcmpOp_commuteOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cScompU_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSshrL_eReg_1_31NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPCountedLoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRMachSafePointNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cUmembar_cpu_orderNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cNmodI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLRShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMMutableSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%jni_SetIntField: jni.o; +text: .text%__1cENodeHis_Copy6kM_I_: ad_i486.o; +text: .text%__1cKstorePNodeFreloc6kM_i_; +text: .text%__1cNincI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cNcmovI_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTresource_free_bytes6FpcI_v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cQSystemDictionaryRupdate_dictionary6FiIiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: dictionary.o; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryQfind_placeholder6FiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cIProjNodeJideal_reg6kM_I_; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cKDictionaryJnew_entry6MIpnMklassOopDesc_pnHoopDesc__pnPDictionaryEntry__; +text: .text%__1cVloadConL_low_onlyNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPstoreImmI16NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cWflagsReg_long_EQdDNEOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cIAndINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIAndINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cRandI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cJLoadBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cNmodI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRshrI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJLoadSNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOGenerateOopMapOset_bbmark_bit6Mi_v_; +text: .text%__1cIGraphKitOhas_ex_handler6M_i_; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: ciMethodData.o; +text: .text%__1cRaddI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRsubI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_; +text: .text%__1cPRoundDoubleNodeGOpcode6kM_i_; +text: .text%__1cRsarI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cICallNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_i_v_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: doCall.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: connode.o; +text: .text%__1cLklassVtableQfill_in_mirandas6Mri_v_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSshlL_eReg_1_31NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cLregFPR1OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cLeDIRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLregFPR1OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cSmembar_acquireNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitRmerge_fast_memory6MpnENode_2i_v_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cSCompareAndSwapNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cSindIndexOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cSindIndexOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cSindIndexOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cNstoreImmBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitOmake_merge_mem6MpnENode_22_v_; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_; +text: .text%__1cOcompP_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cSmembar_releaseNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: classes.o; +text: .text%__1cJLoadFNodeGOpcode6kM_i_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_2_v_; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cTjava_lang_ThrowableNset_backtrace6FpnHoopDesc_2_v_; +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMStartOSRNodeGOpcode6kM_i_; +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cTClassLoadingServiceScompute_class_size6FpnNinstanceKlass__I_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cNinstanceKlassbBdo_local_static_fields_impl6FnTinstanceKlassHandle_pFpnPfieldDescriptor_pnGThread__v5_v_; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cLklassVtableQget_num_mirandas6FpnMklassOopDesc_pnPobjArrayOopDesc_4_i_; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: constantPoolKlass.o; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cQSystemDictionaryQadd_to_hierarchy6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constantPoolKlass.o; +text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cPClassFileParserYcheck_super_class_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constantPoolKlass.o; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cLloadSSFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNObjectMonitorHis_busy6kM_i_; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cSindIndexOffsetOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cENodeHis_Call6M_pnICallNode__: loopnode.o; +text: .text%__1cQSystemDictionaryRfind_shared_class6FnMsymbolHandle__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cHMatcherNfind_receiver6Fi_nFVMRegEName__; +text: .text%__1cNnegI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cENodeHdel_out6Mp0_v_: generateOptoStub.o; +text: .text%__1cRsubI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNandI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cILoopNode2t6MpnENode_2_v_; +text: .text%__1cVeADXRegL_low_onlyOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cENodeGis_Con6kM_I_: memnode.o; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: debugInfo.o; +text: .text%__1cMPhaseIterGVNIoptimize6M_v_; +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQPackageHashtableMcompute_hash6Mpkci_I_: classLoader.o; +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_; +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_; +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_; +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_; +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_; +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_; +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_; +text: .text%__1cTMachCallRuntimeNodeSis_MachCallRuntime6M_p0_: ad_i486_misc.o; +text: .text%__1cMloadConPNodeFreloc6kM_i_; +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: cpCacheKlass.o; +text: .text%__1cNinstanceKlassLverify_code6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cIRewriterScompute_index_maps6FnSconstantPoolHandle_rpnIintArray_rpnIintStack__v_; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cIintArray2t6Mki1_v_: rewriter.o; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: cpCacheKlass.o; +text: .text%__1cIRewriterXnew_constant_pool_cache6FrnIintArray_pnGThread__nXconstantPoolCacheHandle__; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: cpCacheKlass.o; +text: .text%__1cRmethodDataOopDescJis_mature6kM_i_; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cQciByteCodeStreamUis_unresolved_string6kM_i_; +text: .text%__1cFciEnvUis_unresolved_string6kMpnPciInstanceKlass_i_i_; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%__1cFciEnvZis_unresolved_string_impl6kMpnNinstanceKlass_i_i_; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_; +text: .text%__1cNmulL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNminI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: classes.o; +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_; +text: .text%__1cPconvI2L_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSshrL_eReg_1_31NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJLoadLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPshrI_eReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cYmulI_imm_RShift_highNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHMatcherUc_calling_convention6FpnLRegPair_I_v_; +text: .text%__1cPCallRuntimeNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: classes.o; +text: .text%__1cHAddNodeGis_Add6kM_pk0_: classes.o; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cRshrI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMelapsedTimerHseconds6kM_d_; +text: .text%__1cRaddL_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNaddI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cSloadL_volatileNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNandI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cQPackageHashtableJget_entry6MiIpkcI_pnLPackageInfo__: classLoader.o; +text: .text%__1cLClassLoaderOlookup_package6Fpkc_pnLPackageInfo__; +text: .text%__1cLeDIRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: connode.o; +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_; +text: .text%__1cQVMOperationQdDueueNqueue_oops_do6MipnKOopClosure__v_; +text: .text%__1cUCallCompiledJavaNodeGOpcode6kM_i_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cUmembar_cpu_orderNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbSparse_constant_pool_interfacemethodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRsalI_eReg_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cNloadKlassNodeFreloc6kM_i_; +text: .text%__1cFStateV_sub_Op_MemBarRelease6MpknENode__v_; +text: .text%__1cMloadConLNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherQpost_fast_unlock6FpknENode__i_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cLeSIRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKciTypeFlowLStateVectorMdo_putstatic6MpnQciByteCodeStream__v_; +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNaddP_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cLregFPR1OperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cKciTypeFlowLStateVectorGdo_ldc6MpnQciByteCodeStream__v_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: cfgnode.o; +text: .text%__1cIPSOldGenPupdate_counters6M_v_; +text: .text%__1cNSharedRuntimebOraw_exception_handler_for_return_address6FpC_1_; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cNaddI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHOrINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cMnegF_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMnegF_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVectorLdo_putfield6MpnQciByteCodeStream__v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cSshlL_eReg_1_31NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%signalHandler; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cMnegF_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassRclass_initializer6M_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassbOset_initialization_state_and_notify_impl6FnTinstanceKlassHandle_n0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassbJset_initialization_state_and_notify6Mn0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassWcall_class_initializer6MpnGThread__v_; +text: .text%__1cKcopy_table6FppC1i_v_: interpreter.o; +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQVMOperationQdDueueLremove_next6M_pnMVM_Operation__; +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopopts.o; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_; +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKBufferBlob2t6Mpkci_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cNaddL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKBufferBlob2n6FII_pv_; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cMeBCXRegLOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: compile.o; +text: .text%__1cScompP_mem_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; +text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_i486.o; +text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; +text: .text%__1cKloadUBNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cLGCTaskQdDueueKinitialize6M_v_; +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_; +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cJStealTaskEname6M_pc_: psTasks.o; +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cJStealTask2t6Mi_v_; +text: .text%__1cNGCTaskManagerMnote_release6MI_v_; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: memnode.o; +text: .text%__1cJcmpOpOperEless6kM_i_: ad_i486_clone.o; +text: .text%__1cSaddD_reg_roundNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSmembar_releaseNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeHeapTmark_segmap_as_used6MII_v_; +text: .text%__1cJloadCNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cNsubL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJimmL0OperJconstantL6kM_x_: ad_i486_clone.o; +text: .text%__1cFParseWensure_phis_everywhere6M_v_; +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNxorI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeHsize_of6kM_I_; +text: .text%__1cHMatcherFxform6MpnENode_i_2_; +text: .text%__1cHMatcherLfind_shared6MpnENode__v_; +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_; +text: .text%__1cILRG_List2t6MI_v_; +text: .text%__1cILoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_; +text: .text%__1cGBundlePinitialize_nops6FppnIMachNode__v_; +text: .text%__1cOMachPrologNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNstoreImmPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLeAXRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cNsubL_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cHi2bNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRmethodDataOopDescYcompute_extra_data_count6Fii_i_; +text: .text%__1cWCallLeafNoFPDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse3.o; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: jni.o; +text: .text%__1cKimmI16OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cSshrL_eReg_1_31NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPstoreImmI16NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIemit_d166FrnKCodeBuffer_i_v_; +text: .text%__1cRxorI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRsubI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJeRegPOperFclone6kM_pnIMachOper__; +text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_; +text: .text%__1cKciTypeFlowLStateVectorJhalf_type6FpnGciType__3_: ciTypeFlow.o; +text: .text%__1cJAssemblerDnop6M_v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: connode.o; +text: .text%__1cMFastLockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%JVM_GetFieldIxModifiers; +text: .text%__1cNmodI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKReturnNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsarI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseKarray_load6MnJBasicType__v_; +text: .text%__1cRjmpConU_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cRjmpConU_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cRjmpConU_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKcmpOpUOperFclone6kM_pnIMachOper__; +text: .text%__1cFStateO_sub_Op_StoreB6MpknENode__v_; +text: .text%__1cFTypeFCeq6kMpknEType__i_; +text: .text%__1cFParseNadd_safepoint6M_v_; +text: .text%__1cLOpaque2NodeEhash6kM_I_; +text: .text%JVM_IsConstructorIx; +text: .text%__1cIimmPOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConPNodeFclone6kM_pnENode__; +text: .text%__1cSshrL_eReg_1_31NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: connode.o; +text: .text%__1cRxorI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cKadd_n_reqs6FpnENode_1_v_: graphKit.o; +text: .text%__1cNdecI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cILoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cICodeHeapLmerge_right6MpnJFreeBlock__v_; +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNsubI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXconvI2L_reg_reg_zexNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_; +text: .text%__1cMPhaseChaitinISimplify6M_v_; +text: .text%__1cMPhaseChaitinGSelect6M_I_; +text: .text%__1cMPhaseChaitinOcache_lrg_info6M_v_; +text: .text%__1cNtestU_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJScopeDescJstream_at6kMi_pnTDebugInfoReadStream__; +text: .text%__1cTDebugInfoReadStream2t6MpknHnmethod_i_v_; +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_; +text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cENodeHrm_prec6MI_v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cKciTypeFlowLStateVectorOmeet_exception6MpnPciInstanceKlass_pk1_i_; +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cWflagsReg_long_EQdDNEOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_; +text: .text%__1cIBoolNodeZis_counted_loop_exit_test6M_i_; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cRaddL_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cQsalI_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNnegI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cILoopNodeHsize_of6kM_I_: loopnode.o; +text: .text%__1cMPhaseChaitinFSplit6MI_I_; +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_; +text: .text%__1cMPhaseChaitinHcompact6M_v_; +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_; +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_; +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_; +text: .text%__1cOcompP_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherQis_spillable_arg6Fi_i_; +text: .text%__1cSsafePoint_pollNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMatcherKcan_be_arg6Fi_i_; +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIregFOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cTDebugInfoReadStreamLread_handle6M_nGHandle__; +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_; +text: .text%__1cYcmpL_zero_flags_LEGTNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%__1cXvirtual_call_RelocationJfirst_oop6M_pC_; +text: .text%__1cXvirtual_call_RelocationJoop_limit6M_pC_; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cNminI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYcmpL_zero_flags_LEGTNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompiledVFrameGis_top6kM_i_; +text: .text%__1cFStateQ_sub_Op_CallLeaf6MpknENode__v_; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cHi2sNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cMoutputStreamMdo_vsnprintf6FpcIpkcpvirI_3_; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cRaddI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cITypeLongFxdual6kM_pknEType__; +text: .text%__1cSloadL_volatileNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeFreloc6kM_i_; +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSvframeStreamCommonYfill_from_compiled_frame6MpnHnmethod_i_v_; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: cfgnode.o; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: cfgnode.o; +text: .text%__1cTshrL_eReg_32_63NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cPshrI_eReg_1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cJNode_ListEyank6MpnENode__v_; +text: .text%__1cQLRUMaxHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cIGraphKitNstore_barrier6MpnENode_22_v_; +text: .text%__1cMrep_stosNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cWflagsReg_long_LEGTOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cSThreadLocalStorageTpd_getTlsAccessMode6F_n0AQpd_tlsAccessMode__; +text: .text%__1cOMacroAssemblerKget_thread6MpnMRegisterImpl__v_; +text: .text%__1cKloadUBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cIRootNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_; +text: .text%__1cNSafePointNodeLpop_monitor6M_v_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__; +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cFStateS_sub_Op_FastUnlock6MpknENode__v_; +text: .text%__1cJloadFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotFOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cSstore_to_stackslot6FrnKCodeBuffer_iii_v_; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cMnadxRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: callnode.o; +text: .text%__1cIAndINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cUmembar_cpu_orderNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConINodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: cfgnode.o; +text: .text%__1cPconvI2L_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cOjmpLoopEndNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRmulI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPsarI_eReg_1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cKstoreLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cOcompP_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMvalue_of_loc6FppnHoopDesc__i_; +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_; +text: .text%__1cTcompareAndSwapLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTshrL_eReg_32_63NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSshlL_eReg_1_31NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKcmpOpUOperJnot_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cOcompP_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNSCMemProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cNandI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNaddP_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeFreloc6kM_i_; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKtype2basic6FpknEType__nJBasicType__; +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNstoreImmBNodeFreloc6kM_i_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: jvm.o; +text: .text%JVM_Clone; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: jvm.o; +text: .text%__1cNcmovI_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVeADXRegL_low_onlyOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: objArrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: objArrayKlass.o; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: objArrayKlass.o; +text: .text%__1cMeBCXRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNimmI_1_31OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cLPhaseValues2T5B6M_v_; +text: .text%__1cNtestU_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJLoadCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsubI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cMalloc_object6FpnH_jclass_pnGThread__pnPinstanceOopDesc__: jni.o; +text: .text%__1cIGraphKitOnull_check_oop6MpnKRegionNode_pnENode_i_4_; +text: .text%__1cRmulI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHOrINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_; +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cFKlassDLCA6Mp0_1_; +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_; +text: .text%__1cPconvL2I_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJeRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cNstoreImmINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPshlI_eReg_1NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNmaxI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHnmethodVis_dependent_on_entry6MpnMklassOopDesc_2pnNmethodOopDesc__i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cKloadUBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeHeapPadd_to_freelist6MpnJHeapBlock__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cJVectorSetGslamin6Mrk0_v_; +text: .text%__1cQsalI_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKNode_ArrayFclear6M_v_; +text: .text%__1cKBufferBlobEfree6Fp0_v_; +text: .text%__1cIRootNodeHis_Root6M_p0_: classes.o; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%__1cOMethodLivenessKBasicBlockPmerge_exception6MnGBitMap__i_; +text: .text%__1cNminI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNsubI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cMorI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNtestU_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%jni_NewObject: jni.o; +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_; +text: .text%__1cIciMethodRinstructions_size6M_i_; +text: .text%__1cRsarI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPshrI_eReg_1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKStoreFNodeGOpcode6kM_i_; +text: .text%__1cFStateO_sub_Op_StoreC6MpknENode__v_; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRaddL_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNsubL_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNsubL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompU_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cScompU_eReg_memNodeFreloc6kM_i_; +text: .text%__1cTleaPIdxScaleOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNSharedRuntimeQfind_callee_info6FpnKJavaThread_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cNsubL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSCallLeafDirectNodeFreloc6kM_i_; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cNcmovI_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNcmovI_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_; +text: .text%__1cIMulDNodeGOpcode6kM_i_; +text: .text%__1cHMatcherPprior_fast_lock6FpknENode__i_; +text: .text%__1cFStateV_sub_Op_MemBarAcquire6MpknENode__v_; +text: .text%__1cLConvL2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMachEpilogNodeFreloc6kM_i_; +text: .text%__1cOMachEpilogNodeNis_MachEpilog6M_p0_: ad_i486.o; +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivLNodeGOpcode6kM_i_; +text: .text%__1cSstring_compareNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_; +text: .text%__1cSindIndexOffsetOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cLStrCompNodeGOpcode6kM_i_; +text: .text%__1cSindIndexOffsetOperOindex_position6kM_i_: ad_i486.o; +text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cLPcDescCacheKpc_desc_at6kMpnHnmethod_pCi_pnGPcDesc__; +text: .text%__1cXmembar_acquire_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLloadSSFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNaddP_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEtemp6F_pnMRegisterImpl__; +text: .text%__1cPstoreImmI16NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__; +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cNmodL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmulL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIDivINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJAssemblerDjmp6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cSloadL_volatileNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cENodeHdel_out6Mp0_v_: parseHelper.o; +text: .text%__1cNandI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_; +text: .text%__1cLConvD2INodeGOpcode6kM_i_; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%__1cOPhaseIdealLoop2t6MrnMPhaseIterGVN_pk0i_v_; +text: .text%__1cKciTypeFlowFRangeSprivate_copy_count6kMpn0AGJsrSet__i_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXcmpL_reg_flags_LTGENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: codeBlob.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: callnode.o; +text: .text%__1cNIdealLoopTreeMis_loop_exit6kMpnENode_pnOPhaseIdealLoop__2_; +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cZload_long_indOffset32OperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cZload_long_indOffset32OperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cNincI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKstoreFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNandL_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNtestU_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cXcmpL_reg_flags_LEGTNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNincI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: loopnode.o; +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_; +text: .text%__1cNxorI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRsubI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cKBlock_ListGinsert6MIpnFBlock__v_; +text: .text%__1cECopyYconjoint_words_to_higher6FpnIHeapWord_2I_v_: block.o; +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: loopnode.o; +text: .text%__1cFStateP_sub_Op_CastP2I6MpknENode__v_; +text: .text%__1cLCastP2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNaddL_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_I_; +text: .text%__1cMtlsLoadPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNmaxI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNandL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_; +text: .text%__1cHNTarjanIsetdepth6MIpI_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopKDominators6M_v_; +text: .text%__1cHNTarjanDDFS6Fp0rnJVectorSet_pnOPhaseIdealLoop_pI_i_; +text: .text%__1cOPhaseIdealLoopRinit_dom_lca_tags6M_v_; +text: .text%__1cNaddP_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2L_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLeCXRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cJcmpOpOperHgreater6kM_i_: ad_i486_clone.o; +text: .text%__1cSmembar_acquireNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cScheck_phi_clipping6FpnHPhiNode_rpnHConNode_rI45rpnENode_5_i_: cfgnode.o; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_; +text: .text%__1cKciTypeFlowLStateVectorGdo_new6MpnQciByteCodeStream__v_; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cENodeGOpcode6kM_i_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cSInterpreterRuntimePset_bcp_and_mdp6FpCpnKJavaThread__v_; +text: .text%__1cTjava_lang_ThrowableQclear_stacktrace6FpnHoopDesc__v_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSshlL_eReg_1_31NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%__1cLPcDescCacheLadd_pc_desc6MpnGPcDesc__v_; +text: .text%__1cNcmovI_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOstackSlotFOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLloadSSFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateN_sub_Op_LoadL6MpknENode__v_; +text: .text%__1cNnegI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEcmpl6MnHAddress_i_v_; +text: .text%__1cJloadFNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cIModINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cObox_handleNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: memnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: memnode.o; +text: .text%__1cUParallelScavengeHeapEused6kM_I_; +text: .text%__1cOmulIS_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cTshrL_eReg_32_63NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerGmovzxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cOMacroAssemblerSload_unsigned_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cICmpDNodeGOpcode6kM_i_; +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: parse2.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: loopnode.o; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cFParseJdo_ifnull6MnIBoolTestEmask__v_; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cJLoadBNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cLeAXRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%jni_NewLocalRef: jni.o; +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHMatcherQinline_cache_reg6F_nHOptoRegEName__; +text: .text%__1cLloadSSFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cGOopMapPset_derived_oop6MnHOptoRegEName_ii2_v_; +text: .text%__1cOimmI_32_63OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cLOptoRuntimebAresolve_opt_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cPClassFileParserbEparse_constant_pool_long_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cLConvF2DNodeGOpcode6kM_i_; +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cPconvL2I_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitOset_pair_local6MipnENode__v_: parse2.o; +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cICodeHeapMmax_capacity6kM_I_; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cKloadUBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cJLoadCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_; +text: .text%__1cRsubI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJcmpOpOperKless_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cNaddI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_; +text: .text%__1cJArrayDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cRmulI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cNloadConI0NodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cOJNIHandleBlockMweak_oops_do6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueHoops_do6MpnKOopClosure__v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollectorXoops_do_for_all_threads6FpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cNstoreImmINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; +text: .text%JVM_GetCPMethodModifiers; +text: .text%__1cIModLNodeGOpcode6kM_i_; +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHnmethodNscope_desc_at6MpCi_pnJScopeDesc__; +text: .text%__1cTcompareAndSwapLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%jni_SetLongField: jni.o; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: multnode.o; +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%jio_vsnprintf; +text: .text%__1cLRethrowNodeEhash6kM_I_: classes.o; +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsalI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRsalI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWCallLeafNoFPDirectNodeRis_safepoint_node6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerDjmp6MnHAddress__v_; +text: .text%__1cIimmLOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConLNodeFclone6kM_pnENode__; +text: .text%jio_snprintf; +text: .text%__1cTCallDynamicJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: subnode.o; +text: .text%__1cMstoreSSINodeHis_Copy6kM_I_: ad_i486_misc.o; +text: .text%__1cYmulI_imm_RShift_highNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalL_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIMulLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOjmpLoopEndNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOmulIS_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cMdecI_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__; +text: .text%__1cOleaPIdxOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReturnNode2t6MpnENode_2222_v_; +text: .text%__1cKReturnNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cPGlobalTLABStatsKinitialize6M_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cSReferenceProcessorOprocess_phase16MppnHoopDesc_pnPReferencePolicy_pnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cUParallelScavengeHeapTensure_parseability6M_v_; +text: .text%__1cUParallelScavengeHeapOfill_all_tlabs6M_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cNMemoryServiceIgc_begin6Fi_v_; +text: .text%__1cNMemoryServiceGgc_end6Fi_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cRaddL_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_; +text: .text%__1cKPSYoungGenPupdate_counters6M_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cTDerivedPointerTableFclear6F_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_; +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_; +text: .text%__1cIMachOperFscale6kM_i_; +text: .text%__1cMtlsLoadPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachOperNconstant_disp6kM_i_; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cPVM_GC_OperationZacquire_pending_list_lock6M_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_; +text: .text%__1cIAndLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIAndLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cURethrowExceptionNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cIAndLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cRmulI_eReg_immNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cNmodI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cUSafepointSynchronizeQdo_cleanup_tasks6F_v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cURethrowExceptionNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOCompiledRFrameEinit6M_v_; +text: .text%__1cGvframeDtop6kM_p0_; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_; +text: .text%JVM_DoPrivileged; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_base6MnITosState_ppCi_v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_FPU6MinITosState__v_; +text: .text%__1cNmodL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQVMOperationQdDueueDadd6MpnMVM_Operation__i_; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cIVMThreadSevaluate_operation6MpnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueGunlink6MpnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueOqueue_add_back6MipnMVM_Operation__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cQVMOperationQdDueueGinsert6MpnMVM_Operation_2_v_; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cScompI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: jvm.o; +text: .text%__1cOcompI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_; +text: .text%__1cFParseGdo_new6M_v_; +text: .text%__1cLGCTaskQdDueue2t6Mi_v_; +text: .text%__1cJloadFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cKPSScavengeXshould_attempt_scavenge6F_i_; +text: .text%__1cUWaitForBarrierGCTask2t6Mi_v_; +text: .text%__1cUPSAdaptiveSizePolicyPupdate_averages6MiII_v_; +text: .text%__1cNBarrierGCTaskIdestruct6M_v_; +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_; +text: .text%__1cGGCTaskIdestruct6M_v_; +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_; +text: .text%__1cKPSYoungGenLswap_spaces6M_v_; +text: .text%__1cKPSYoungGenNresize_spaces6MII_v_; +text: .text%__1cKPSScavengeQinvoke_no_policy6Fpi_i_; +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_; +text: .text%__1cKPSYoungGenRresize_generation6MII_i_; +text: .text%__1cKPSYoungGenGresize6MII_v_; +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MII_v_; +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_; +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_; +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_: gcTaskManager.o; +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_; +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_; +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_; +text: .text%__1cUPSAdaptiveSizePolicyUminor_collection_end6MnHGCCauseFCause__v_; +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_; +text: .text%__1cUPSAdaptiveSizePolicyWminor_collection_begin6M_v_; +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_; +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__; +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_; +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_; +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_; +text: .text%__1cUPSAdaptiveSizePolicybPcompute_survivor_space_size_and_threshold6MiiI_i_; +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_; +text: .text%__1cPGlobalTLABStatsHpublish6M_v_; +text: .text%__1cMindirectOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cMindirectOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cbDVM_ParallelGCFailedAllocation2t6MIiiI_v_; +text: .text%__1cJloadSNodeFreloc6kM_i_; +text: .text%__1cMrep_stosNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateN_sub_Op_LoadS6MpknENode__v_; +text: .text%__1cQorI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQshrI_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cNIdealLoopTreePiteration_split6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cINodeHash2t6Mp0_v_; +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_; +text: .text%__1cLPhaseValues2t6Mp0_v_; +text: .text%__1cZCallInterpreterDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_; +text: .text%__1cOJNIHandleBlockRrebuild_free_list6M_v_; +text: .text%__1cFStateM_sub_Op_Goto6MpknENode__v_; +text: .text%__1cFDictIFreset6MpknEDict__v_; +text: .text%__1cIPhaseCFGQFind_Inner_Loops6M_v_; +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_; +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_; +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_; +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_; +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_; +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: cfgnode.o; +text: .text%__1cETypeKInitialize6FpnHCompile__v_; +text: .text%__1cIPhaseCFGKDominators6M_v_; +text: .text%__1cHCompileEInit6Mi_v_; +text: .text%__1cIPhaseCFGDDFS6MpnGTarjan__I_; +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_; +text: .text%__1cIPhaseCFGOschedule_early6MrnJVectorSet_rnJNode_List_rnLBlock_Array__i_; +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_: coalesce.o; +text: .text%__1cWNode_Backward_Iterator2t6MpnENode_rnJVectorSet_rnJNode_List_rnLBlock_Array__v_; +text: .text%__1cIPhaseCFGNschedule_late6MrnJVectorSet_rnJNode_List_rnNGrowableArray4CI___v_; +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_; +text: .text%__1cVExceptionHandlerTable2t6Mi_v_; +text: .text%__1cGTarjanIsetdepth6MI_v_; +text: .text%__1cHCompileICode_Gen6M_v_; +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_; +text: .text%__1cMPhaseChaitinbGstretch_base_pointer_live_ranges6MpnMResourceArea__i_; +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_; +text: .text%__1cQorI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: matcher.o; +text: .text%__1cHMatcherUvalidate_null_checks6M_v_; +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_; +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_; +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_; +text: .text%__1cHMatcherFmatch6M_v_; +text: .text%__1cHCompileOcompute_old_SP6M_nHOptoRegEName__; +text: .text%__1cHMatcher2t6MrnJNode_List__v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cOleaPIdxOffNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cHMatcherLreturn_addr6kM_nHOptoRegEName__; +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_; +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_; +text: .text%__1cIAndLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_; +text: .text%__1cIPhaseIFG2t6MpnFArena__v_; +text: .text%__1cMPhaseChaitinGde_ssa6M_v_; +text: .text%__1cFArenaNmove_contents6Mp0_1_; +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_; +text: .text%__1cFArena2t6MI_v_; +text: .text%__1cWsize_exception_handler6F_I_; +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_; +text: .text%__1cOCompileWrapper2T6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyZdecay_supplemental_growth6Mi_v_; +text: .text%__1cNPhasePeepholeMdo_transform6M_v_; +text: .text%__1cHCompileTframe_size_in_words6kM_i_; +text: .text%__1cNPhasePeephole2T6M_v_; +text: .text%__1cNPhasePeephole2t6MpnNPhaseRegAlloc_rnIPhaseCFG__v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: buildOopMap.o; +text: .text%__1cHCompileMBuildOopMaps6M_v_; +text: .text%__1cWemit_exception_handler6FrnKCodeBuffer__v_; +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o; +text: .text%__1cMPhaseChaitin2T6M_v_; +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_; +text: .text%__1cIPhaseCFGLRemoveEmpty6M_v_; +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_; +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_; +text: .text%__1cHCompileGOutput6M_v_; +text: .text%__1cHCompileQShorten_branches6MpnFLabel_ri333_v_; +text: .text%__1cHCompileLFill_buffer6M_v_; +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_; +text: .text%__1cHCompileRScheduleAndBundle6M_v_; +text: .text%__1cKCodeBufferOrelocate_stubs6M_v_; +text: .text%__1cUPSAdaptiveSizePolicybPeden_increment_with_supplement_aligned_up6MI_I_; +text: .text%__1cUPSAdaptiveSizePolicyOeden_increment6MII_I_; +text: .text%__1cOMachPrologNodeFreloc6kM_i_; +text: .text%__1cUPSAdaptiveSizePolicyVadjust_for_throughput6MipI1_v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cJPhaseLive2T6M_v_; +text: .text%__1cUPSAdaptiveSizePolicybDcompute_generation_free_space6MIIIIIIIi_v_; +text: .text%__1cIPSOldGenMmax_gen_size6M_I_: psOldGen.o; +text: .text%__1cUPSAdaptiveSizePolicybHclear_generation_free_space_flags6M_v_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: loopnode.o; +text: .text%__1cUPSAdaptiveSizePolicyQdecaying_gc_cost6kM_d_; +text: .text%__1cHCompileYinit_scratch_locs_memory6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: methodDataKlass.o; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_words6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescPpost_initialize6MpnOBytecodeStream__v_; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_bytes6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodDataOop.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodDataKlass.o; +text: .text%__1cNmulL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_; +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: methodDataKlass.o; +text: .text%__1cKPSScavengeGinvoke6Fpi_v_; +text: .text%__1cUParallelScavengeHeapTfailed_mem_allocate6MpiIii_pnIHeapWord__; +text: .text%__1cUPSAdaptiveSizePolicyOshould_full_GC6MI_i_; +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cOcmpD_cc_P6NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopTransform.o; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateO_sub_Op_Return6MpknENode__v_; +text: .text%__1cLlog2_intptr6Fi_i_: divnode.o; +text: .text%__1cbLtransform_int_divide_to_long_multiply6FpnIPhaseGVN_pnENode_i_3_: divnode.o; +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_; +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNcmovI_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHRetNodeFreloc6kM_i_; +text: .text%__1cOClearArrayNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectOis_method_data6M_i_: ciInstance.o; +text: .text%__1cIciObjectJis_method6M_i_: ciInstance.o; +text: .text%__1cNaddP_eRegNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIConFNodeGOpcode6kM_i_; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cJCodeCacheNalive_nmethod6FpnICodeBlob__pnHnmethod__; +text: .text%__1cIGraphKitNallocate_heap6MpnENode_222pknITypeFunc_pC22ipknKTypeOopPtr__2_; +text: .text%__1cPciInstanceKlassbBcompute_shared_has_subklass6M_i_; +text: .text%__1cOLibraryCallKitNtry_to_inline6M_i_; +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cOcompP_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_CmpL6MpknENode__v_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: library_call.o; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_pCnJrelocInfoJrelocType__v_; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopTransform.o; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cLConvL2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHBitDataKis_BitData6M_i_: ciMethodData.o; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cRxorI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNaddL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPshlI_eReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cSmembar_releaseNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNdecI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNandL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQshrI_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNdecI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJLoadDNodeGOpcode6kM_i_; +text: .text%__1cMstoreSSINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_; +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_; +text: .text%__1cOcmpD_cc_P6NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadLNodeFreloc6kM_i_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: multnode.o; +text: .text%__1cRxorI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLBoxLockNodeEhash6kM_I_: classes.o; +text: .text%__1cIAddLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRaddI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: ad_i486_misc.o; +text: .text%__1cJlabelOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNsubL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cIMulLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cTconvF2I_reg_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopopts.o; +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cRxorI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateR_sub_Op_SafePoint6MpknENode__v_; +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_; +text: .text%__1cSsafePoint_pollNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsafePoint_pollNodeFreloc6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cLOptoRuntimeRmultianewarray1_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cNandI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSshlL_eReg_1_31NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: memnode.o; +text: .text%__1cOmulIS_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalI_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopKclone_loop6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cQsalL_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXroundDouble_mem_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKciTypeFlowFBlockQset_private_copy6Mi_v_; +text: .text%__1cLimmI_16OperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cHi2sNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHi2sNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRxorI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2L_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_; +text: .text%JVM_GetClassNameUTF; +text: .text%__1cSshrL_eReg_1_31NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKStoreDNodeGOpcode6kM_i_; +text: .text%__1cNcmovP_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopTransform.o; +text: .text%__1cIXorINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cOcmpD_cc_P6NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNmaxI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_; +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_FindLoadedClass; +text: .text%__1cIMulFNodeGOpcode6kM_i_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cNstoreImmINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cOstackSlotLOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cRaddI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvI2DNodeGOpcode6kM_i_; +text: .text%__1cTcompareAndSwapLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSloadL_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUjmpLoopEnd_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cUjmpLoopEnd_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cNmodL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEfrom6F_pnMRegisterImpl__; +text: .text%__1cIjniIdMapHoops_do6MpnKOopClosure__v_; +text: .text%__1cTDerivedPointerTableDadd6FppnHoopDesc_3_v_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: classes.o; +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: loopnode.o; +text: .text%__1cKstoreCNodeFreloc6kM_i_; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: loopnode.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: loopnode.o; +text: .text%__1cMincI_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Copy6kM_I_: loopnode.o; +text: .text%__1cXconvI2L_reg_reg_zexNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJOopMapSetMgrow_om_data6M_v_; +text: .text%__1cScompP_mem_eRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cKReflectionDbox6FpnGjvalue_nJBasicType_pnGThread__pnHoopDesc__; +text: .text%__1cJAssemblerDret6Mi_v_; +text: .text%__1cTcmovII_reg_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerEcall6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cLBoxLockNode2t6Mi_v_; +text: .text%__1cIGraphKitMnext_monitor6M_i_; +text: .text%__1cTshrL_eReg_32_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__; +text: .text%__1cMdecI_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateQ_sub_Op_FastLock6MpknENode__v_; +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cKciTypeFlowLStateVectorJdo_aaload6MpnQciByteCodeStream__v_; +text: .text%__1cNaddI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_; +text: .text%__1cFParseFBlockMadd_new_path6M_i_; +text: .text%__1cYcmpL_zero_flags_LEGTNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%JVM_FindClassFromClass; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc_ii_v_: nativeLookup.o; +text: .text%__1cIMulINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cMdecI_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPshlI_eReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cNminI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMorI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc__v_: nativeLookup.o; +text: .text%__1cNIdealLoopTreeMpolicy_align6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeNpolicy_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeSpolicy_range_check6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeQpolicy_peel_only6kMpnOPhaseIdealLoop__i_; +text: .text%__1cLloadSSFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYmulI_imm_RShift_highNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseFBlockNstack_type_at6kMi_pknEType__; +text: .text%__1cJAssemblerGmovzxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFStateN_sub_Op_LoadB6MpknENode__v_; +text: .text%__1cIMulLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOMacroAssemblerSload_unsigned_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cKstoreBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cZCallInterpreterDirectNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cZCallInterpreterDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cTsarL_eReg_32_63NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMorI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: callnode.o; +text: .text%__1cNtestU_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMURShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLMachUEPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cWflagsReg_long_LTGEOperFclone6kM_pnIMachOper__; +text: .text%__1cIAddLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRsarI_eReg_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIJVMStateNmonitor_depth6kM_i_: graphKit.o; +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: node.o; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cFParseLarray_store6MnJBasicType__v_; +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLRethrowNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPsarI_eReg_1NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKcmpOpUOperEless6kM_i_: ad_i486_clone.o; +text: .text%__1cFParseMdo_checkcast6M_v_; +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_; +text: .text%__1cbACallCompiledJavaDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cMLinkResolverbHlinktime_resolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cENodeHdel_out6Mp0_v_: doCall.o; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%jni_NewString: jni.o; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%__1cMincI_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHTypePtrFempty6kM_i_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_i_v_; +text: .text%__1cTshrL_eReg_32_63NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__: memnode.o; +text: .text%__1cITypeLongFwiden6kMpknEType__3_; +text: .text%__1cIplus_adr6FpnENode_i_1_: generateOptoStub.o; +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cScompP_mem_eRegNodeFreloc6kM_i_; +text: .text%__1cKciTypeFlowLStateVectorMdo_checkcast6MpnQciByteCodeStream__v_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o; +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o; +text: .text%__1cNmaxI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNtestU_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateP_sub_Op_ConvI2L6MpknENode__v_; +text: .text%__1cNdivL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJScopeDescGsender6kM_p0_; +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__; +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__: phaseX.o; +text: .text%__1cQsalI_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_; +text: .text%__1cJloadBNodeFreloc6kM_i_; +text: .text%__1cOcmpD_cc_P6NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOleaPIdxOffNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cKJavaThreadJframes_do6MpFpnFframe_pknLRegisterMap__v_v_; +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKEntryPoint2t6MpC11111111_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cRmulI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%__1cPstoreImmI16NodeFreloc6kM_i_; +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_; +text: .text%__1cLloadSSDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIXorINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOmulIS_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVloadConL_low_onlyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsubI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cOcmpD_cc_P6NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cObox_handleNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCompileBrokerNallocate_task6F_pnLCompileTask__; +text: .text%__1cNCompileBrokerTcreate_compile_task6FpnMCompileQdDueue_inMmethodHandle_i3ipkcii_pnLCompileTask__; +text: .text%__1cMCompileQdDueueDadd6MpnLCompileTask__v_; +text: .text%__1cLCompileTaskKinitialize6MinMmethodHandle_i1ipkcii_v_; +text: .text%jni_GetObjectClass: jni.o; +text: .text%__1cMCompileQdDueueDget6M_pnLCompileTask__; +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_; +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cPconvF2D_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSCompileTaskWrapper2t6MpnLCompileTask__v_; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cFStateM_sub_Op_MulL6MpknENode__v_; +text: .text%__1cOstackSlotFOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cLCompileTaskEfree6M_v_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cSCompileTaskWrapper2T6M_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cNCompileBrokerJfree_task6FpnLCompileTask__v_; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cNCompileBrokerVpush_jni_handle_block6F_v_; +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cSshrL_eReg_1_31NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cNmodI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNCompileBrokerUpop_jni_handle_block6F_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: arrayKlass.o; +text: .text%__1cNaddP_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIimmFOperJconstantF6kM_f_: ad_i486_clone.o; +text: .text%__1cOcompP_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cOstackSlotPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cObox_handleNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cSInterpreterCodeletKinitialize6MpkcinJBytecodesECode__v_; +text: .text%__1cNandI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmodL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIMinINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNandI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%__1cPshrI_eReg_1NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHi2bNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cNnegI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_; +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_; +text: .text%__1cMstoreSSINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_; +text: .text%__1cINodeHash2t6MpnFArena_I_v_; +text: .text%__1cINodeHashIround_up6FI_I_; +text: .text%__1cbACallCompiledJavaDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_; +text: .text%JVM_IHashCode; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: jvm.o; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cOcompiledVFrameScreate_stack_value6kMpnKScopeValue__pnKStackValue__; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorCto6F_pnMRegisterImpl__; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cPconvI2L_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cRtestI_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_i486_misc.o; +text: .text%__1cNmaxI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_AddL6MpknENode__v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cXroundDouble_mem_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cENodeEgetd6kM_d_; +text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o; +text: .text%__1cMstoreSSPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_; +text: .text%__1cKConv2BNodeGOpcode6kM_i_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%JVM_GetClassLoader; +text: .text%__1cRmulI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cOjmpLoopEndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateW_sub_Op_CountedLoopEnd6MpknENode__v_; +text: .text%__1cXconvI2L_reg_reg_zexNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMinINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSCompareAndSwapNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKBinaryNodeGOpcode6kM_i_; +text: .text%__1cUmembar_cpu_orderNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsalL_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFStateW_sub_Op_MemBarCPUOrder6MpknENode__v_; +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cPClassFileParserbGparse_constant_pool_double_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRaddI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSleaP_eReg_immINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cIModINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRmulI_eReg_immNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cKNativeJumpbEcheck_verified_entry_alignment6FpC1_v_; +text: .text%__1cTbasictype2arraycopy6FnJBasicType_i_pC_; +text: .text%__1cFStateU_sub_Op_CallLeafNoFP6MpknENode__v_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cLOptoRuntimeOarraycopy_Type6F_pknITypeFunc__; +text: .text%__1cMMachCallNodeXreturns_float_or_double6kM_i_; +text: .text%__1cOLibraryCallKitQinline_arraycopy6M_i_; +text: .text%__1cOMacroAssemblerOcall_VM_helper6MpnMRegisterImpl_pCii_v_; +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTcmovII_reg_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cNLocationValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: location.o; +text: .text%__1cNLocationValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_2_v_; +text: .text%__1cFParseRarray_store_check6M_v_; +text: .text%__1cQshrI_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNCompileBrokerTis_compile_blocking6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerRassign_compile_id6FnMmethodHandle_i_I_; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTis_not_compile_only6FnMmethodHandle__i_; +text: .text%__1cNsubL_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQsalL_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%__1cJAssemblerLemit_farith6Miii_v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStartNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cNCompileBrokerOcheck_break_at6FnMmethodHandle_iii_i_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: callnode.o; +text: .text%__1cJStartNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cOCompiledRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: nmethod.o; +text: .text%__1cNminI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOCompiledRFrame2t6MnFframe_pnKJavaThread_kpnGRFrame__v_; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cMelapsedTimerDadd6M0_v_; +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cFciEnvbUsystem_dictionary_modification_counter_changed6M_i_; +text: .text%__1cLConvI2FNodeGOpcode6kM_i_; +text: .text%__1cIciMethodQbreak_at_execute6M_i_; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cICodeHeapMinsert_after6MpnJFreeBlock_2_v_; +text: .text%__1cHnmFlagsFclear6M_v_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cHnmethod2n6FIi_pv_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cLPcDescCache2t6M_v_; +text: .text%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cTcmovII_reg_LEGTNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeInew_Type6F_pknITypeFunc__; +text: .text%__1cQorI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cNsubL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitMnew_instance6MpnPciInstanceKlass__pnENode__; +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cIciMethodRbuild_method_data6MnMmethodHandle__v_; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod_ii_v_; +text: .text%__1cHCompileWprint_compile_messages6M_v_; +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cINodeHashUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_; +text: .text%__1cIciMethodRbuild_method_data6M_v_; +text: .text%__1cHCompileLInline_Warm6M_i_; +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_; +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_; +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__; +text: .text%__1cIPhaseCCP2T6M_v_; +text: .text%__1cIPhaseCCPHanalyze6M_v_; +text: .text%__1cIPhaseCCPMdo_transform6M_v_; +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_; +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_; +text: .text%__1cHCompileVfinal_graph_reshaping6M_i_; +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_; +text: .text%__1cHCompileLFinish_Warm6M_v_; +text: .text%__1cMPhaseIterGVN2t6Mp0_v_; +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHCompileIOptimize6M_v_; +text: .text%__1cXMachCallInterpreterNodePret_addr_offset6M_i_; +text: .text%__1cZCallInterpreterDirectNodePcompute_padding6kMi_i_; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cZCallInterpreterDirectNodeKmethod_set6Mi_v_; +text: .text%__1cTsarL_eReg_32_63NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOMachEpilogNodeQsafepoint_offset6kM_i_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2pnGThread__v_; +text: .text%__1cMLinkResolverWresolve_interface_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cKcmpOpUOperKless_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cRaddL_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cJAssemblerEincl6MpnMRegisterImpl__v_; +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__; +text: .text%__1cMTailCallNodeGOpcode6kM_i_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_iii_v_; +text: .text%__1cMstoreSSPNodeHis_Copy6kM_I_: ad_i486_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cXSignatureHandlerLibraryKinitialize6F_v_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cNGrowableArray4CX_Efind6kMkX_i_: interpreterRuntime.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: interpreterRuntime.o; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cIAddFNodeGOpcode6kM_i_; +text: .text%__1cRaddL_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerFffree6Mi_v_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinGHandle__i_; +text: .text%__1cKExceptionsG_throw6FpnGThread_pkcinGHandle__v_; +text: .text%__1cSstring_compareNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cJloadDNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapIppop_any6Mi_v_; +text: .text%__1cXroundDouble_mem_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSshlL_eReg_1_31NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cKPerfMemoryFalloc6FI_pc_; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cIPerfDataMcreate_entry6MnJBasicType_II_v_; +text: .text%__1cIPerfData2T6M_v_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse1.o; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%jni_ReleaseStringUTFChars; +text: .text%__1cTconvD2I_reg_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNmulI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cObox_handleNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cNloadConI0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cXcmpL_reg_flags_LEGTNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_IsInterrupted; +text: .text%__1cJStoreNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinMsymbolHandle_4_i_; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cLOptoRuntimeSnew_typeArray_Type6F_pknITypeFunc__; +text: .text%__1cNdivL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_FindLibraryEntry; +text: .text%__1cPCountedLoopNodeGstride6kM_pnENode__: loopTransform.o; +text: .text%__1cIGraphKitJnew_array6MpnENode_nJBasicType_pknEType_pknMTypeKlassPtr__2_; +text: .text%__1cSleaP_eReg_immINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cLlog2_intptr6Fi_i_: graphKit.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cJloadCNodeFreloc6kM_i_; +text: .text%__1cFStateN_sub_Op_LoadC6MpknENode__v_; +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cTcmovII_reg_LEGTNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_LEGTNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_; +text: .text%__1cMTypeKlassPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cQsalI_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPconvI2F_SSFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSshrL_eReg_1_31NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJleaP8NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObox_handleNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNcmpL_LTGENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbFunnecessary_membar_volatileNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNcmpL_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParsePmerge_exception6Mi_v_; +text: .text%__1cSaddF24_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopJdo_unroll6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cFStateS_sub_Op_ClearArray6MpknENode__v_; +text: .text%__1cMrep_stosNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMrep_stosNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOcompI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFTypeFEmake6Ff_pk0_; +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%__1cNtestP_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cSstring_compareNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherOc_return_value6Fii_nLRegPair__; +text: .text%__1cRandL_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSleaP_eReg_immINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cHi2bNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHCompile2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_; +text: .text%__1cKcmpOpUOperHgreater6kM_i_: ad_i486_clone.o; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cRtestI_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMmulD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cUjmpLoopEnd_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOjmpLoopEndNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cUjmpLoopEnd_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvF2D_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cUjmpLoopEnd_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHi2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQPSIsAliveClosureLdo_object_b6MpnHoopDesc__i_: psScavenge.o; +text: .text%__1cFStateO_sub_Op_StoreL6MpknENode__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: ad_i486_misc.o; +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreLNodeFreloc6kM_i_; +text: .text%__1cENodeHdel_out6Mp0_v_: divnode.o; +text: .text%__1cGThreadLnmethods_do6M_v_; +text: .text%__1cSsafePoint_pollNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRsubI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSReferenceProcessorZadd_to_discovered_list_mt6MppnHoopDesc_23_v_; +text: .text%__1cPRoundDoubleNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cScompP_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cWflagsReg_long_EQdDNEOperFclone6kM_pnIMachOper__; +text: .text%__1cWflagsReg_long_LEGTOperFclone6kM_pnIMachOper__; +text: .text%__1cTcmovII_reg_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%__1cMNativeLookupMlookup_style6FnMmethodHandle_pcpkciiripnGThread__pC_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cRmulI_eReg_immNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cRandI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVlookup_special_native6Fpc_pC_: nativeLookup.o; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cRandI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPDictionaryEntryVadd_protection_domain6MpnHoopDesc__v_; +text: .text%__1cRmulI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTsarL_eReg_32_63NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cPshrI_eReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cRxorI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLregDPR1OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLregDPR1OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cJAssemblerSemit_arith_operand6MipnMRegisterImpl_nHAddress_i_v_; +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_; +text: .text%__1cMNativeLookupNpure_jni_name6FnMmethodHandle__pc_; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cMnegD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpOp_commuteOperFclone6kM_pnIMachOper__; +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cScompU_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_; +text: .text%__1cKciTypeFlowLStateVectorEtrap6MpnQciByteCodeStream_pnHciKlass_i_v_; +text: .text%__1cOmulF24_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRtestI_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cRsubI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cCosHSolarisFEventEpark6M_v_: objectMonitor_solaris.o; +text: .text%__1cKJavaThreadLnmethods_do6M_v_; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cNcmovI_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZCallDynamicJavaDirectNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNObjectMonitorREntryQdDueue_insert6MpnMObjectWaiter_i_v_; +text: .text%__1cQorI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cPClassFileParserbFparse_constant_pool_float_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cbFunnecessary_membar_volatileNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeFreloc6kM_i_; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cNObjectMonitorbAEntryQdDueue_SelectSuccessor6M_pnMObjectWaiter__; +text: .text%__1cRsarI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cKstoreDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeFreloc6kM_i_; +text: .text%__1cLlog2_intptr6Fi_i_: objArrayKlassKlass.o; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%jni_GetStringCritical: jni.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cPsarI_eReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOaddF24_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2D_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPsarI_eReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: callnode.o; +text: .text%__1cbACallCompiledJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cODeoptimizationYtrap_state_is_recompiled6Fi_i_; +text: .text%__1cPconvF2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYMachCallCompiledJavaNodePret_addr_offset6M_i_; +text: .text%__1cNdivI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cRaddL_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeTnmethod_entry_point6FpnKJavaThread_pnNmethodOopDesc_pnHnmethod__pC_; +text: .text%__1cRandL_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHnmethodXinterpreter_entry_point6M_pC_; +text: .text%__1cFTypeDJsingleton6kM_i_; +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_; +text: .text%__1cLRethrowNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o; +text: .text%__1cIGraphKitbKcombine_and_pop_all_exception_states6M_pnNSafePointNode__: parse1.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cQshrL_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMmulD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJloadDNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateP_sub_Op_Rethrow6MpknENode__v_; +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_; +text: .text%__1cFTypeDFxmeet6kMpknEType__3_; +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cKloadUBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_LEGTNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNsubI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__; +text: .text%__1cNaddP_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeFreloc6kM_i_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +text: .text%__1cQorI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cQshrI_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl_i_v_; +text: .text%__1cKJavaThreadLgc_prologue6M_v_; +text: .text%__1cRaddL_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cLloadSSDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvI2F_SSFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseTprofile_switch_case6Mi_v_; +text: .text%__1cKJavaThreadLgc_epilogue6M_v_; +text: .text%__1cFParseOmerge_new_path6Mi_v_; +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_; +text: .text%__1cTCallDynamicJavaNodeSis_CallDynamicJava6kM_pk0_: callnode.o; +text: .text%__1cNxorI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%jni_NewByteArray: jni.o; +text: .text%__1cNmulL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: cfgnode.o; +text: .text%__1cMstoreSSINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConPNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cNcmovP_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKReflectionTget_exception_types6FnMmethodHandle_pnGThread__nOobjArrayHandle__; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cQorI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLloadSSDNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cOstackSlotDOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMstoreSSINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cRxorI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOleaPIdxOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddL_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFframeZinterpreter_frame_set_mdx6Mi_v_; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cPstoreImmI16NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOcmpD_cc_P6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTshrL_eReg_32_63NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: loopnode.o; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cRandI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNandL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cTcmovII_reg_LTGENodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cSleaP_eReg_immINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOtypeArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cPregister_native6FnLKlassHandle_nMsymbolHandle_1pCpnGThread__i_: jni.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEmove6Mii_v_; +text: .text%__1cNcmovI_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENode2t6Mp0111111_v_; +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_; +text: .text%__1cMloadConLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIAddDNodeGOpcode6kM_i_; +text: .text%__1cNmodL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cTconvD2I_reg_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%__1cFParseQjump_if_fork_int6MpnENode_2nIBoolTestEmask__pnGIfNode__; +text: .text%__1cOcompP_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXis_positive_zero_double6Fd_i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: arrayKlass.o; +text: .text%__1cNmaxI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: arrayKlass.o; +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodDataKlass.o; +text: .text%__1cNmulI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodDataKlass.o; +text: .text%__1cHTypeAryFxdual6kM_pknEType__; +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_; +text: .text%__1cLloadSSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cTcmovII_reg_LTGENodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOrepush_if_args6FpnFParse_pnENode_3_v_: parse2.o; +text: .text%__1cNaddL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotFOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cTcmovII_reg_LTGENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotFOperFscale6kM_i_: ad_i486.o; +text: .text%__1cSshlL_eReg_1_31NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotFOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cJAssemblerGpushad6M_v_; +text: .text%JVM_GetCallerClass; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cQorI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapXdo_return_monitor_check6M_v_; +text: .text%__1cQsalL_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKciTypeFlowOsplit_range_at6Mi_pn0AFRange__; +text: .text%__1cPshrI_eReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%jni_GetFieldID: jni.o; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cISubLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNnegI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNnegI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cFParseXfetch_interpreter_state6MipknEType_pnENode__5_; +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_; +text: .text%__1cJAssemblerEaddl6MnHAddress_i_v_; +text: .text%__1cQmulD_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTsarL_eReg_32_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotDOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXroundDouble_mem_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalI_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKCMovePNodeGOpcode6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cNmodI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRcmpOp_commuteOperFccode6kM_i_: ad_i486_clone.o; +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSCountedLoopEndNode2t6MpnENode_2ff_v_; +text: .text%__1cPCountedLoopNode2t6MpnENode_2_v_; +text: .text%__1cNcmovP_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCi_v_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOmulF24_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateX_sub_Op_CompareAndSwapL6MpknENode__v_; +text: .text%__1cSloadL_volatileNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNtestU_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTCompareAndSwapLNode2t6MpnENode_2222_v_; +text: .text%__1cSCompareAndSwapNode2t6MpnENode_2222_v_; +text: .text%__1cOLibraryCallKitRinline_unsafe_CAS6MnJBasicType__i_; +text: .text%__1cSloadL_volatileNodeFreloc6kM_i_; +text: .text%__1cTcompareAndSwapLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cObox_handleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFframeVnmethods_code_blob_do6M_v_; +text: .text%__1cYcmpL_zero_flags_LTGENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVCallRuntimeDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cJloadDNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cJAssemblerDhlt6M_v_; +text: .text%__1cKstoreDNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cTcmovII_reg_LTGENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQshrI_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_; +text: .text%__1cQciByteCodeStreamFtable6MnJBytecodesECode__2_; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cSstoreD_roundedNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvD2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: machnode.o; +text: .text%__1cHMatcherbDinterpreter_frame_pointer_reg6F_nHOptoRegEName__; +text: .text%__1cFStateT_sub_Op_ThreadLocal6MpknENode__v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF_vc_v_; +text: .text%__1cMtlsLoadPNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cWResolveOopMapConflictsRpossible_gc_point6MpnOBytecodeStream__i_: rewriter.o; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorDbox6Mii_v_; +text: .text%__1cSsarL_eReg_1_31NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o; +text: .text%__1cYmulI_imm_RShift_highNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLLShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPRoundDoubleNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKstoreDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPRoundDoubleNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cTCallInterpreterNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cLPhaseValuesHlongcon6Mx_pnIConLNode__; +text: .text%__1cFStateX_sub_Op_CallInterpreter6MpknENode__v_; +text: .text%__1cZCallInterpreterDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeFreloc6kM_i_; +text: .text%__1cTCallInterpreterNodeSis_CallInterpreter6kM_pk0_: classes.o; +text: .text%__1cHMatcherbAinterpreter_method_oop_reg6F_nHOptoRegEName__; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cHCompilebMGenerate_Compiled_To_Interpreter_Graph6MpknITypeFunc_pC_v_; +text: .text%__1cMdecI_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherXcompiler_method_oop_reg6F_nHOptoRegEName__; +text: .text%__1cXMachCallInterpreterNodeWis_MachCallInterpreter6M_p0_: ad_i486_misc.o; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: classes.o; +text: .text%__1cIciMethodRinterpreter_entry6M_pC_; +text: .text%__1cXjvm_define_class_common6FpnHJNIEnv__pkcpnI_jobject_pkWi53pnGThread__pnH_jclass__: jvm.o; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSTailCalljmpIndNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOCallNativeNodeGOpcode6kM_i_; +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cLregDPR1OperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cZCallDynamicJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cSTailCalljmpIndNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cIModINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_; +text: .text%__1cJLoadPNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cNcmpL_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNcmpL_EQdDNENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXroundDouble_mem_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cOstoreF_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cOmulIS_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSalign_to_page_size6FI_I_: heap.o; +text: .text%__1cOaddF24_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2D_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcinMsymbolHandle_4nGHandle_6_v_; +text: .text%__1cNmulI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cNmulI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cTcmovII_reg_EQdDNENodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_LEGTNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJChunkPoolMfree_all_but6MI_v_: allocation.o; +text: .text%__1cRandL_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOaddF24_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%JVM_MonitorWait; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cMmulD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQshrL_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNdivI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cSleaP_eReg_immINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciKlassMis_interface6M_i_: ciTypeArrayKlass.o; +text: .text%__1cNcmpL_LEGTNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateQ_sub_Op_URShiftL6MpknENode__v_; +text: .text%__1cNcmpL_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_; +text: .text%__1cHMatcherXinterpreter_arg_ptr_reg6F_nHOptoRegEName__; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLOptoRuntimeVresolve_static_call_C6FpnKJavaThread__pC_; +text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_; +text: .text%__1cOmulF24_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cRsubL_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsubL_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRmulI_imm_highNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cFParseHdo_irem6M_v_; +text: .text%__1cHi2bNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHBoxNodeGOpcode6kM_i_; +text: .text%__1cQmulD_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cTcmovII_reg_EQdDNENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_EQdDNENodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNtestI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerFpopfd6M_v_; +text: .text%__1cUParallelScavengeHeapIcapacity6kM_I_; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cNmaxI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cPconvF2D_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNIdealLoopTreeXpolicy_maximally_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cKciTypeFlowLStateVectorLdo_newarray6MpnQciByteCodeStream__v_; +text: .text%__1cTmembar_volatileNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cNxorI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cPconvI2F_SSFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cMjniIdPrivateGid_for6FnTinstanceKlassHandle_i_i_: jniId.o; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cICmpFNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cRmulI_imm_highNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cQorI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVloadConL_low_onlyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cPshlI_eReg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_SetClassSigners; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cJAssemblerGpushfd6M_v_; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cNloadConL0NodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cNmulI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIMulINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cNmulI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cHAddress2t6MinJrelocInfoJrelocType__v_; +text: .text%__1cTsarL_eReg_32_63NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvF2I_reg_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cNsubL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddI_mem_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_; +text: .text%__1cNaddL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRmulI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLloadSSDNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cS__ieee754_rem_pio26Fdpd_i_: sharedRuntimeTrig.o; +text: .text%__1cQshrL_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cObox_handleNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cTshlL_eReg_32_63NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cTmembar_volatileNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRtestI_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cMstoreSSINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: connode.o; +text: .text%__1cCosbBthread_local_storage_at_put6Fipv_v_; +text: .text%__1cLConvF2INodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: connode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: cfgnode.o; +text: .text%get_thread; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%jni_CallIntMethod: jni.o; +text: .text%__1cPconvF2D_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeGnegate6M_v_: ad_i486_misc.o; +text: .text%__1cHCompileRmake_vm_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: node.o; +text: .text%__1cJCMoveNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsubI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLloadSSFNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: node.o; +text: .text%__1cMnegD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: node.o; +text: .text%__1cSsarL_eReg_1_31NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTshrL_eReg_32_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cNRelocIteratorEnext6M_i_: sharedRuntime.o; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cLOptoRuntimeThandle_wrong_method6FpnKJavaThread__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cTmembar_volatileNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUPSMarkSweepDecoratorHcompact6Mi_v_; +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_; +text: .text%__1cTcmovII_reg_EQdDNENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvI2F_SSFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cUPSMarkSweepDecoratorVdestination_decorator6F_p0_; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_; +text: .text%__1cQshrI_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOaddF24_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cIDivDNodeGOpcode6kM_i_; +text: .text%__1cOmulF24_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOaddF24_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOmulF24_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHCompileQgrow_alias_types6M_v_; +text: .text%__1cFParseScreate_jump_tables6MpnENode_pnLSwitchRange_4_i_; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%JVM_GetClassCPTypes; +text: .text%__1cUverify_byte_codes_fn6F_pv_: verifier.o; +text: .text%JVM_GetClassMethodsCount; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%__1cOstoreF_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%JVM_GetClassFieldsCount; +text: .text%__1cQshrL_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreBNodeFreloc6kM_i_; +text: .text%__1cPconvI2F_SSFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cMdecI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cPconvI2L_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cJLoadFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddD_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerGmembar6M_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_; +text: .text%__1cXSignatureHandlerLibraryLset_handler6FpnKCodeBuffer__pC_; +text: .text%__1cNcmovP_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cFJNIidEfind6Mi_p0_; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cJCmpD3NodeGOpcode6kM_i_; +text: .text%__1cXconvI2L_reg_reg_zexNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbFunnecessary_membar_volatileNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%JVM_IsPrimitiveClass; +text: .text%__1cIMinINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRsubL_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cRsubL_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cRsubL_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%jni_FindClass: jni.o; +text: .text%__1cPmovI_nocopyNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cHCompilebMGenerate_Interpreter_To_Compiled_Graph6MpknITypeFunc__v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cFStateM_sub_Op_RegD6MpknENode__v_; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cUCallCompiledJavaNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cbACallCompiledJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbACallCompiledJavaDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cTcmovII_reg_LEGTNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_2_v_; +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbACallCompiledJavaDirectNodeFreloc6kM_i_; +text: .text%__1cUPipeline_Use_Element2t6MIIIinXPipeline_Use_Cycle_Mask__v_: ad_i486_pipeline.o; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cFStateY_sub_Op_CallCompiledJava6MpknENode__v_; +text: .text%__1cXPipeline_Use_Cycle_Mask2t6MI_v_: ad_i486_pipeline.o; +text: .text%__1cMsubD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSleaP_eReg_immINodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTshlL_eReg_32_63NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_; +text: .text%__1cNcmovI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRandL_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSstoreD_roundedNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSstoreD_roundedNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_MinI6MpknENode__v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cNminI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFMutex2T6M_v_; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cQmulD_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHi2bNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cIimmIOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMmulD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cINodeHashEgrow6M_v_; +text: .text%__1cMnegD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%__1cIMulINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKLoadPCNodeGOpcode6kM_i_; +text: .text%__1cRaddI_mem_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKCMoveINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcmpOp_commuteOperHgreater6kM_i_: ad_i486_clone.o; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cNmodL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpOp_commuteOperGnegate6M_v_: ad_i486_clone.o; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cQaddD_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFTypeFFxmeet6kMpknEType__3_; +text: .text%__1cYcmpL_zero_flags_LTGENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRmulI_imm_highNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJCMoveNode2t6MpnENode_22pknEType__v_: connode.o; +text: .text%__1cJCMoveNodeEmake6FpnENode_222pknEType__p0_; +text: .text%__1cOPhaseIdealLoopVinsert_pre_post_loops6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cLloadSSDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferWinsert_double_constant6Md_pC_; +text: .text%__1cXroundDouble_mem_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cICodeBlobJis_zombie6kM_i_: onStackReplacement.o; +text: .text%__1cWis_positive_one_double6Fd_i_; +text: .text%__1cNaddP_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cXcmpL_reg_flags_LEGTNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPciObjectFactoryPinsert_non_perm6Mrpn0ANNonPermObject_pnHoopDesc_pnIciObject__v_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: callnode.o; +text: .text%__1cMincI_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cSleaP_eReg_immINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJEventMark2t6MpkcE_v_: psMarkSweep.o; +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cISubDNodeGOpcode6kM_i_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cTconvD2I_reg_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cOcmpD_cc_P6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvD2FNodeGOpcode6kM_i_; +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSshrL_eReg_1_31NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_MonitorNotify; +text: .text%__1cHMatcherXpost_store_load_barrier6FpknENode__i_; +text: .text%__1cFParseNdo_instanceof6M_v_; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRuntime.o; +text: .text%__1cSsarL_eReg_1_31NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_; +text: .text%__1cSmulF24_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cLTypeInstPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cNCallGeneratorSfor_predicted_call6FpnHciKlass_p03_3_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateN_sub_Op_LoadF6MpknENode__v_; +text: .text%__1cNandI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cTconvD2I_reg_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cWPredictedCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cWPredictedCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cMstoreSSPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cOGenerateOopMapKpp_new_ref6MpnNCellTypeState_i_v_; +text: .text%__1cRmulI_imm_highNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseTjump_if_always_fork6Mii_v_; +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateW_sub_Op_MemBarVolatile6MpknENode__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cHRegMask2t6M_v_: matcher.o; +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cPconvI2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cINegDNodeGOpcode6kM_i_; +text: .text%__1cIciObjectOis_method_data6M_i_: ciObjectFactory.o; +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_; +text: .text%__1cIciObjectJis_method6M_i_: ciObjectFactory.o; +text: .text%__1cNmodI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateP_sub_Op_LShiftL6MpknENode__v_; +text: .text%__1cNmodI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: callGenerator.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cKcmpOpUOperFequal6kM_i_: ad_i486_clone.o; +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_; +text: .text%__1cMloadConDNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovI_nocopyNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIci2bNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMVirtualSpaceQuncommitted_size6kM_I_; +text: .text%__1cMVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cJAssemblerFfld_d6MnHAddress__v_; +text: .text%__1cJloadFNodeFreloc6kM_i_; +text: .text%__1cOaddF24_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Mi_v_; +text: .text%__1cRaddI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cTcmovII_reg_LTGENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cSTailCalljmpIndNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassSregister_finalizer6FpnPinstanceOopDesc_pnGThread__2_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cMstoreSSPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cOmulF24_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cQsalI_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNdivI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cNandI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPClassFileParserXverify_unqualified_name6MpcIi_i_; +text: .text%__1cUGenericGrowableArrayKraw_remove6MpknEGrET__v_; +text: .text%__1cOcmovI_regUNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMmulD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFTypeFJsingleton6kM_i_; +text: .text%__1cRandL_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cPconvI2D_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: memnode.o; +text: .text%__1cNmulI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cQsalL_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cNdivL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cRsubL_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvF2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterLdeopt_entry6FnITosState_i_pC_; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cQshrL_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRuntime.o; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cNcmovP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQorI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cMdecI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSstring_compareNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cRInterpreterOopMapIis_empty6M_i_; +text: .text%__1cJMarkSweepSMarkAndPushClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cTsarL_eReg_32_63NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNcmovP_regNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cOmulF24_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMsubD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cRjni_invoke_static6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cMsubD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cJAssemblerFbswap6MpnMRegisterImpl__v_; +text: .text%__1cTconvF2I_reg_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cOjmpLoopEndNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIGraphKitXinsert_mem_bar_volatile6MpnKMemBarNode_i_v_; +text: .text%__1cRsubI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%__1cFStateX_sub_Op_CallDynamicJava6MpknENode__v_; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQMachCallJavaNodeVis_MachCallStaticJava6M_pnWMachCallStaticJavaNode__: ad_i486_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodeFreloc6kM_i_; +text: .text%__1cTcmovII_reg_LTGENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYcmpL_zero_flags_LTGENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEdecl6MpnMRegisterImpl__v_; +text: .text%__1cVLoaderConstraintTableYextend_loader_constraint6MpnVLoaderConstraintEntry_nGHandle_pnMklassOopDesc__v_; +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNCallGeneratorQfor_virtual_call6FpnIciMethod__p0_; +text: .text%__1cVLoaderConstraintTablebHensure_loader_constraint_capacity6MpnVLoaderConstraintEntry_i_v_; +text: .text%__1cIModLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQOopMapCacheEntryFflush6M_v_; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cQOopMapCacheEntryTdeallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryRallocate_bit_mask6M_v_; +text: .text%__1cOPSPromotionLABRunallocate_object6MpnHoopDesc__i_; +text: .text%__1cICodeHeapTmark_segmap_as_free6MII_v_; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cNsubI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICodeHeapJexpand_by6MI_i_; +text: .text%__1cTcmovII_reg_LEGTNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJAssemblerFfld_s6MnHAddress__v_; +text: .text%__1cOstoreF_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cVscale_to_lwp_priority6Fiii_i_: os_solaris.o; +text: .text%__1cMdivD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddI_mem_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMdivD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmulI_mem_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQmulI_mem_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cMsubD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cUVirtualCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cPconvI2F_SSFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpOp_commuteOperKless_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cIci2bNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cIDivLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMincI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cQmulD_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_ConD6MpknENode__v_; +text: .text%__1cSaddD_reg_roundNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNmodL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNRelocIteratorEnext6M_i_: output.o; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cNaddI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosHSolarisKvm_signals6F_pnIsigset_t__; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cSdivD_reg_roundNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cTOopMapForCacheEntry2t6MnMmethodHandle_ipnQOopMapCacheEntry__v_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cGICStubIfinalize6M_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cCosScurrent_stack_size6F_I_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_2_v_; +text: .text%__1cCosHSolarisRunblocked_signals6F_pnIsigset_t__; +text: .text%__1cJStubQdDueueMremove_first6M_v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_; +text: .text%__1cTshlL_eReg_32_63NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cFTypeDGis_nan6kM_i_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%_start: os_solaris.o; +text: .text%__1cYcmpL_zero_flags_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_; +text: .text%__1cLStrCompNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%JVM_SetThreadPriority; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__; +text: .text%__1cTcmovII_reg_EQdDNENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_; +text: .text%__1cMPipeline_Use2t6MIIIpnUPipeline_Use_Element__v_: ad_i486_pipeline.o; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cNsubL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_; +text: .text%__1cPRoundDoubleNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cFParseScan_rerun_bytecode6M_i_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cDhpiEread6FipvI_I_: jvm.o; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cMstoreSSINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cIPipeline2t6MIIiIIiiiikpnSmachPipelineStages_3kpInMPipeline_Use__v_: ad_i486_pipeline.o; +text: .text%JVM_Read; +text: .text%__1cOJavaAssertionsNmatch_package6Fpkc_pn0AKOptionList__; +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cOcmpF_cc_P6NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cOcmpF_cc_P6NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeFreloc6kM_i_; +text: .text%__1cWThreadLocalAllocBufferMinitial_size6F_I_; +text: .text%__1cQaddD_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cOJavaAssertionsLmatch_class6Fpkc_pn0AKOptionList__: javaAssertions.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cENodeEgetf6kM_f_; +text: .text%__1cLConvL2FNodeGOpcode6kM_i_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cIjniIdMapGcreate6FnTinstanceKlassHandle__p0_; +text: .text%__1cQorI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cLConvL2DNodeGOpcode6kM_i_; +text: .text%__1cNloadConL0NodeFclone6kM_pnENode__; +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJimmL0OperFclone6kM_pnIMachOper__; +text: .text%__1cENodeGis_Con6kM_I_: loopnode.o; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIjniIdMap2t6MpnMklassOopDesc_i_v_; +text: .text%__1cIjniIdMapRcompute_index_cnt6FnTinstanceKlassHandle__i_; +text: .text%__1cLjniIdBucket2t6MpnIjniIdMap_p0_v_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: loopnode.o; +text: .text%__1cUThreadSafepointState2t6MpnKJavaThread__v_; +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_; +text: .text%__1cCosMguard_memory6FpcI_i_; +text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_; +text: .text%__1cNaddL_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cOaddF24_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_; +text: .text%__1cSmulF24_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQorl_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cKJavaThreadRthread_main_inner6M_v_; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cKReflectionbFbasic_type_mirror_to_basic_type6FpnHoopDesc_pnGThread__nJBasicType__; +text: .text%__1cM__kernel_cos6Fdd_d_: sharedRuntimeTrig.o; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cM__kernel_sin6Fddi_d_: sharedRuntimeTrig.o; +text: .text%__1cSleaP_eReg_immINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cNSharedRuntimeEdsin6Fd_d_; +text: .text%__1cNSharedRuntimeEdcos6Fd_d_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: multnode.o; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cSshlL_eReg_1_31NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cNdecI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTconvF2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovP_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEsbbl6MnHAddress_i_v_; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cHRegMask2t6Miiiii_v_: ad_i486_expand.o; +text: .text%__1cLConvD2INodeJideal_reg6kM_I_: classes.o; +text: .text%JVM_IsArrayClass; +text: .text%__1cVloadConL_low_onlyNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypePtrFxdual6kM_pknEType__; +text: .text%__1cSMachBreakpointNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cICmpDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVMoveL2D_reg_stackNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQshrI_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRmulI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: output.o; +text: .text%JVM_GetClassName; +text: .text%__1cSsarL_eReg_1_31NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cQorl_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2D_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cFJNIid2t6MpnMklassOopDesc_ip0_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: mulnode.o; +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateM_sub_Op_RegF6MpknENode__v_; +text: .text%__1cPmovI_nocopyNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cNinstanceKlassPjni_id_for_impl6FnTinstanceKlassHandle_i_pnFJNIid__; +text: .text%__1cOcmpF_cc_P6NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKRelocationYpd_get_address_from_code6M_pC_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRuntime.o; +text: .text%__1cSmulF24_reg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmulF24_reg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNtestU_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cJArrayDataKcell_count6M_i_: ciMethodData.o; +text: .text%JVM_Open; +text: .text%__1cOmulIS_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%JVM_StartThread; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_: interpreterRuntime.o; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cGICStubLdestination6kM_pC_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRxorI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cQmulI_mem_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cScompI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOstackSlotDOperFscale6kM_i_: ad_i486.o; +text: .text%__1cOstackSlotDOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cMmulD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotDOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_; +text: .text%__1cQmulD_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotPOperFscale6kM_i_: ad_i486.o; +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOGenerateOopMapMdo_checkcast6M_v_; +text: .text%__1cQmulI_mem_immNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPconvI2D_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRandL_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterWlayout_activation_impl6FpnNmethodOopDesc_iiiipnFframe_4i_i_; +text: .text%JVM_TotalMemory; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: generateOptoStub.o; +text: .text%JVM_FreeMemory; +text: .text%__1cObox_handleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMaxINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTshlL_eReg_32_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQmulI_mem_immNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cQmulI_mem_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2F_SSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJScopeDescTdecode_scope_values6Mi_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cCosOunguard_memory6FpcI_i_; +text: .text%__1cHRetDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cQorI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRmulI_imm_highNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cKloadUBNodeFreloc6kM_i_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cUThreadSafepointState2T6M_v_; +text: .text%__1cFStateT_sub_Op_RoundDouble6MpknENode__v_; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_; +text: .text%__1cHOrLNodeGOpcode6kM_i_; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: library_call.o; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cTcmovII_reg_EQdDNENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cNcmovL_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cMTailCallNode2t6MpnENode_222222_v_; +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cFStateQ_sub_Op_TailCall6MpknENode__v_; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cLeDIRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cPmovI_nocopyNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSTailCalljmpIndNodeFreloc6kM_i_; +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +text: .text%__1cNandI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cFStateO_sub_Op_StoreF6MpknENode__v_; +text: .text%__1cQaddD_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTsarL_eReg_32_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cNaddP_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNaddP_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovI_regUNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__; +text: .text%__1cENodeJis_MemBar6kM_pknKMemBarNode__: classes.o; +text: .text%__1cMstoreSSPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cRandI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cSsarL_eReg_1_31NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cRCardTableModRefBSPclear_MemRegion6MnJMemRegion__v_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cRInlineCacheBufferLnew_ic_stub6F_pnGICStub__; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cVMoveL2D_reg_stackNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cObox_handleNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cGICStubIset_stub6MpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cTcmovII_reg_LEGTNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: onStackReplacement.o; +text: .text%__1cObox_handleNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRuntime.o; +text: .text%__1cSaddD_reg_roundNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmulF24_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSaddD_reg_roundNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOmulF24_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHi2bNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cOLibraryCallKitYinline_native_time_funcs6Mi_i_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__; +text: .text%__1cIPSOldGenHcompact6M_v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cOcmpF_cc_P6NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIPSOldGenPadjust_pointers6M_v_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: ciTypeFlow.o; +text: .text%__1cNmulI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKdirectOperFscale6kM_i_: ad_i486_clone.o; +text: .text%__1cQorI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQObjectStartArrayFreset6M_v_; +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_; +text: .text%__1cMsubD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cIci2bNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIregDOperFclone6kM_pnIMachOper__; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNmaxI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMelapsedTimer2t6M_v_: phase.o; +text: .text%__1cSvframeArrayElementDbci6kM_i_; +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cODeoptimizationYquery_update_method_data6FnQmethodDataHandle_in0ALDeoptReason_rIri4_pnLProfileData__; +text: .text%__1cKstoreDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNminI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNPhaseRegAllocHset_oop6MpknENode_i_v_; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cFStateM_sub_Op_MaxI6MpknENode__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cWis_positive_zero_float6Ff_i_; +text: .text%__1cTcmovII_reg_LTGENodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSPerfStringConstant2t6MnJCounterNS_pkc3_v_; +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cQorI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cOMacroAssemblerFleave6M_v_; +text: .text%__1cMloadConDNodeFclone6kM_pnENode__; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%JVM_NativePath; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%__1cVLoaderConstraintTableJnew_entry6MIpnNsymbolOopDesc_pnMklassOopDesc_ii_pnVLoaderConstraintEntry__; +text: .text%__1cVloadConL_low_onlyNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIimmDOperFclone6kM_pnIMachOper__; +text: .text%__1cIMachNodeOmemory_operand6kM_pknIMachOper__: ad_i486_misc.o; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cNmulI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQorl_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalI_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOaddF24_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: callnode.o; +text: .text%__1cFStateT_sub_Op_CallRuntime6MpknENode__v_; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: callnode.o; +text: .text%__1cOcmovI_regUNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodDataKlass.o; +text: .text%__1cQsalI_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodDataKlass.o; +text: .text%__1cVCallRuntimeDirectNodeFreloc6kM_i_; +text: .text%__1cSdivD_reg_roundNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cISubFNodeGOpcode6kM_i_; +text: .text%__1cNandI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveL2D_reg_stackNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOmulF24_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOcmovI_regUNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRandL_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTcmovII_reg_LTGENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTshrL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOcmpF_cc_P6NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOaddF24_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSaddF24_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNcmovP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovI_regUNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cSmulF24_reg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_; +text: .text%__1cKstoreFNodeFreloc6kM_i_; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%__1cZInterpreterMacroAssemblerWupdate_mdp_by_constant6MpnMRegisterImpl_i_v_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_; +text: .text%__1cSstoreD_roundedNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i_v_; +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferVinsert_float_constant6Mf_pC_; +text: .text%__1cWroundFloat_mem_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOsubF24_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSaddF24_reg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSaddD_reg_roundNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdecI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2I_reg_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKReflectionTunbox_for_primitive6FpnHoopDesc_pnGjvalue_pnGThread__nJBasicType__; +text: .text%__1cMloadConDNodeFreloc6kM_i_; +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_; +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQaddD_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMdecI_memNodeFreloc6kM_i_; +text: .text%__1cTshlL_eReg_32_63NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFTypeDFxdual6kM_pknEType__; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cWCallLeafNoFPDirectNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConDNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMdecI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeWresolve_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cLTypeInstPtrLmirror_type6kM_pnGciType__; +text: .text%__1cSmulF24_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationVtrap_state_add_reason6Fii_i_; +text: .text%__1cKstoreINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMdivD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF3_v3_v_; +text: .text%__1cOCompilerThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cYcmpL_zero_flags_LTGENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cLRuntimeStub2n6FII_pv_; +text: .text%__1cYcmpL_zero_flags_LTGENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cLRuntimeStub2t6MpkcpnKCodeBuffer_iipnJOopMapSet_i_v_; +text: .text%__1cOstoreF_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLloadSSDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQshrL_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalL_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cFTypeDFempty6kM_i_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodDataOop.o; +text: .text%__1cPconvL2D_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKdirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486_clone.o; +text: .text%__1cKdirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486_clone.o; +text: .text%__1cPoldgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cKdirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486_clone.o; +text: .text%__1cKdirectOperLdisp_is_oop6kM_i_: ad_i486_clone.o; +text: .text%__1cLConvD2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cPconvI2F_SSFNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cRmulI_imm_highNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIDivLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cXpartialSubtypeCheckNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJOperation__v4_v_; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cRxorI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXpartialSubtypeCheckNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl_i_v_; +text: .text%__1cIModLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cDhpiFclose6Fi_i_: jvm.o; +text: .text%__1cFParsePdo_monitor_exit6M_v_; +text: .text%__1cFStateN_sub_Op_LoadD6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_MulD6MpknENode__v_; +text: .text%__1cSMachCallNativeNodePret_addr_offset6M_i_; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cOsubF24_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: deoptimization.o; +text: .text%__1cTAbstractInterpreterMreturn_entry6FnITosState_i_pC_; +text: .text%JVM_Close; +text: .text%__1cLOptoRuntimeRnew_objArray_Type6F_pknITypeFunc__; +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__; +text: .text%__1cIMulDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOmulIS_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovL_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_i_v_; +text: .text%__1cNdivI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cSaddF24_reg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cFParseMdo_anewarray6M_v_; +text: .text%__1cIAddFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJLoadDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTinc_decompile_count6FpnHnmethod__v_: nmethod.o; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cMsubD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOcmpD_cc_P6NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTcmovII_reg_EQdDNENodeQuse_cisc_RegMask6M_v_; +text: .text%__1cFStateM_sub_Op_CmpD6MpknENode__v_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cJAssemblerEmovb6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_; +text: .text%__1cSCallLeafDirectNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovI_nocopyNodeErule6kM_I_: ad_i486_misc.o; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIci2bNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSsarL_eReg_1_31NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovI_regUNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cGICStubKcached_oop6kM_pnHoopDesc__; +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cMdivD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvF2I_reg_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSmulF24_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%jni_EnsureLocalCapacity; +text: .text%__1cTcmovII_reg_LEGTNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cQAbstractCompilerMsupports_osr6M_i_: c2compiler.o; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cXcmpL_reg_flags_LTGENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQshrI_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHCompile2t6MpnFciEnv_pF_pknITypeFunc_pCpkciiii_v_; +text: .text%__1cKemit_break6FrnKCodeBuffer__v_; +text: .text%__1cQshrI_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2F_SSF_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIGraphKitIgen_stub6MpCpkciii_v_; +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__; +text: .text%__1cLloadSSINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_; +text: .text%__1cMloadConFNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSmulF24_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJlog2_long6Fx_i_: mulnode.o; +text: .text%__1cJAssemblerEsbbl6MpnMRegisterImpl_i_v_; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__; +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_; +text: .text%__1cINegFNodeGOpcode6kM_i_; +text: .text%__1cQorl_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVeADXRegL_low_onlyOperFclone6kM_pnIMachOper__; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cKimmL32OperFclone6kM_pnIMachOper__; +text: .text%__1cLOptoRuntimeNgenerate_stub6FpnFciEnv_pF_pknITypeFunc_pCpkciiii_8_; +text: .text%__1cVloadConL_low_onlyNodeFclone6kM_pnENode__; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cFParseWload_interpreter_state6MpnENode_2_v_; +text: .text%__1cVMoveL2D_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMStartOSRNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cPconvI2D_regNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cTshlL_eReg_32_63NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateO_sub_Op_StoreD6MpknENode__v_; +text: .text%__1cPmovI_nocopyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_; +text: .text%__1cFStateM_sub_Op_ConF6MpknENode__v_; +text: .text%__1cNcmovI_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cNcmovI_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cIci2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivD_reg_roundNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cWroundFloat_mem_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOmulF24_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_; +text: .text%__1cFTypeFFxdual6kM_pknEType__; +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_; +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cTcmovII_reg_LTGENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cFframebHinterpreter_frame_set_monitor_end6MpnPBasicObjectLock__v_; +text: .text%__1cOstoreF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPmovP_nocopyNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFframebBinterpreter_frame_sender_sp6kM_pi_; +text: .text%__1cNcmovL_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterPsize_activation6FpnNmethodOopDesc_iiiii_i_; +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescVdecode_monitor_values6Mi_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__; +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__; +text: .text%__1cTcmovII_reg_EQdDNENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSvframeArrayElementPunpack_on_stack6MiipnFframe_ii_v_; +text: .text%__1cTconvD2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFframebCinterpreter_frame_set_locals6Mpi_v_; +text: .text%__1cFframebCinterpreter_frame_set_method6MpnNmethodOopDesc__v_; +text: .text%__1cTAbstractInterpreterQcontinuation_for6FpnNmethodOopDesc_pCiiri_3_; +text: .text%__1cTAbstractInterpreterRTosState_as_index6FnITosState__i_; +text: .text%__1cTAbstractInterpreterRlayout_activation6FpnNmethodOopDesc_iiiipnFframe_4i_v_; +text: .text%__1cSvframeArrayElementNon_stack_size6kMiiii_i_; +text: .text%__1cSInterpreterRuntimeJnote_trap6FpnKJavaThread_ipnGThread__v_; +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRmulI_imm_highNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____; +text: .text%__1cRsubL_eReg_memNodeFreloc6kM_i_; +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_; +text: .text%__1cFStateP_sub_Op_ConvF2D6MpknENode__v_; +text: .text%__1cCosNcommit_memory6FpcI_i_; +text: .text%__1cScompP_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXpartialSubtypeCheckNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cFStateM_sub_Op_DivL6MpknENode__v_; +text: .text%__1cNsubL_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cLimmI_32OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cNTemplateTableLindex_check6FpnMRegisterImpl_2_v_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%Unsafe_DefineClass1; +text: .text%JVM_GetComponentType; +text: .text%JVM_DefineClass; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cNmodI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLOptoRuntimeNfetch_monitor6FipnJBasicLock_pC_pnHoopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLStrCompNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRaddD_reg_imm1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cJAssemblerGfrstor6MnHAddress__v_; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cSUnsafe_DefineClass6FpnHJNIEnv__pnI_jstring_pnL_jbyteArray_iipnI_jobject_7_pnH_jclass__: unsafe.o; +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_; +text: .text%__1cSObjectSynchronizerOinflate_helper6FpnHoopDesc__pnNObjectMonitor__: synchronizer.o; +text: .text%__1cSdivD_reg_roundNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cOsubF24_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2D_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLimmI_24OperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cLloadSSDNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cKPSYoungGenKprecompact6M_v_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cOstackSlotPOperFclone6kM_pnIMachOper__; +text: .text%__1cLPSMarkSweepRmark_sweep_phase26F_v_; +text: .text%__1cKPSYoungGenHcompact6M_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase16Frii_v_; +text: .text%Unsafe_AllocateInstance; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase36F_v_; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase46F_v_; +text: .text%__1cPconvL2F_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cLPSMarkSweepbAreset_millis_since_last_gc6F_v_; +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MInHGCCauseFCause__v_; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cMsubD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cRCardTableModRefBSEis_a6MnKBarrierSetEName__i_: cardTableExtension.o; +text: .text%__1cMset_property6FnGHandle_pkc2pnGThread__v_: jvm.o; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cHBoxNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cQorl_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLPSMarkSweepQinvoke_no_policy6Fpii_v_; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%__1cObox_handleNodeFclone6kM_pnENode__; +text: .text%__1cLPSMarkSweepPallocate_stacks6F_v_; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cMdivD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLPSMarkSweepRdeallocate_stacks6F_v_; +text: .text%__1cRInlineCacheBufferOinit_next_stub6F_v_; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cMincI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cNmodL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MpnMRegisterImpl_i2rnFLabel__v_; +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_; +text: .text%__1cFStateL_sub_Op_Box6MpknENode__v_; +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_; +text: .text%JVM_InvokeMethod; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cSmulF24_reg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cIPSOldGenKprecompact6M_v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cJPSPermGenQcompute_new_size6MI_v_; +text: .text%__1cXpartialSubtypeCheckNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerYprofile_not_taken_branch6MpnMRegisterImpl__v_; +text: .text%__1cHi2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_; +text: .text%__1cFStateM_sub_Op_ModL6MpknENode__v_; +text: .text%__1cSaddF24_reg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJPSPermGenKprecompact6M_v_; +text: .text%__1cHi2bNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTleaPIdxScaleOffNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_i486.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cVMoveF2I_reg_stackNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cQaddD_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJCondition__v4_v_; +text: .text%__1cVMoveL2D_reg_stackNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeFreloc6kM_i_; +text: .text%__1cOaddF24_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_pnNsymbolOopDesc_pkc_nGHandle__; +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStubQdDueueMremove_first6Mi_v_; +text: .text%__1cSsarL_eReg_1_31NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_NewArray; +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOcmpF_cc_P6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%__1cFStateM_sub_Op_MulF6MpknENode__v_; +text: .text%__1cTconvF2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTshlL_eReg_32_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerFpopad6M_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cSmulF24_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cRmethodDataOopDescRbci_to_extra_data6Mii_pnLProfileData__; +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvD2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNcmovI_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFStateP_sub_Op_ConvD2I6MpknENode__v_; +text: .text%__1cLMoveF2INodeGOpcode6kM_i_; +text: .text%__1cPconvI2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: compiledICHolderKlass.o; +text: .text%__1cVVM_ParallelGCSystemGC2t6MI_v_; +text: .text%__1cOcmovI_regUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIregFOperFclone6kM_pnIMachOper__; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cLCastP2INodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cPconvL2D_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovP_nocopyNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cSaddF24_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__; +text: .text%__1cHCompileWget_MethodAccessorImpl6M_pnPciInstanceKlass__; +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cSmulF24_reg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_; +text: .text%__1cScompP_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cHCompileRget_Method_invoke6M_pnIciMethod__; +text: .text%__1cLPSMarkSweepGinvoke6Fpii_v_; +text: .text%JVM_GC; +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_; +text: .text%__1cXpartialSubtypeCheckNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cWroundFloat_mem_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQinitialize_class6FnMsymbolHandle_pnGThread__v_: thread.o; +text: .text%__1cKScopeValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cVVM_ParallelGCSystemGCEname6kM_pkc_: vm_operations.o; +text: .text%__1cRfind_field_offset6FpnI_jobject_ipnGThread__i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: compiledICHolderKlass.o; +text: .text%__1cOLibraryCallKitbDis_method_invoke_or_aux_frame6MpnIJVMState__i_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_; +text: .text%JVM_GetSystemPackage; +text: .text%__1cLStatSamplerTget_system_property6FpkcpnGThread__2_; +text: .text%__1cPconvL2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cETypeJis_finite6kM_i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: compiledICHolderKlass.o; +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSaddF24_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cCosNcommit_memory6FpcII_i_; +text: .text%__1cOresolve_symbol6Fpkc_pC_: os_solaris.o; +text: .text%__1cLloadSSINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cNloadConL0NodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%JVM_RawMonitorCreate; +text: .text%__1cImulINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreter.o; +text: .text%__1cQConstantIntValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_; +text: .text%__1cImulINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_22pC_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateP_sub_Op_StrComp6MpknENode__v_; +text: .text%__1cIcp2bNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateP_sub_Op_ConvI2F6MpknENode__v_; +text: .text%__1cOMacroAssemblerPempty_FPU_stack6M_v_; +text: .text%jni_GetStringRegion: jni.o; +text: .text%__1cQConstantIntValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cICodeBlobZis_at_poll_or_poll_return6MpC_i_; +text: .text%__1cSmulF24_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddD_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cIAddFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cScompP_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHBoxNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvL2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cTcmovII_reg_EQdDNENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_; +text: .text%__1cLvframeArrayZdeallocate_monitor_chunks6M_v_; +text: .text%__1cLvframeArrayHfill_in6MpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pknLRegisterMap_i_v_; +text: .text%__1cLvframeArrayIallocate6FpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pnLRegisterMap_nFframe_9A9A9A_p0_; +text: .text%__1cOstoreF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNcmovI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cImulINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsalL_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cbCAbstractInterpreterGeneratorUset_wide_entry_point6MpnITemplate_rpC_v_; +text: .text%Unsafe_CompareAndSwapInt; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_data_at6MpnMRegisterImpl_i2_v_; +text: .text%__1cRsubI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cQsalL_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYcmpL_zero_flags_LTGENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cODeoptimizationLUnrollBlock2t6MiiiiipippCnJBasicType__v_; +text: .text%__1cNTemplateTableRlocals_index_wide6FpnMRegisterImpl__v_; +text: .text%__1cKstorePNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConDNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cSmulF24_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosHSolarisVcleanup_interruptible6FpnKJavaThread__v_; +text: .text%__1cCosHSolarisTsetup_interruptible6F_pnKJavaThread__; +text: .text%__1cQmulI_mem_immNodeFreloc6kM_i_; +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_Sleep; +text: .text%JVM_Lseek; +text: .text%__1cNnmethodLocker2t6MpC_v_; +text: .text%__1cRmulI_eReg_immNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableQvolatile_barrier6F_v_; +text: .text%__1cHnmethodVmark_as_seen_on_stack6M_v_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cQmulD_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cMdivD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cQmulD_reg_immNodeFreloc6kM_i_; +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_; +text: .text%__1cFParseQdo_monitor_enter6M_v_; +text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_; +text: .text%__1cODeoptimizationScreate_vframeArray6FpnKJavaThread_nFframe_pnLRegisterMap__pnLvframeArray__; +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRaddI_mem_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__; +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_; +text: .text%__1cNcmovL_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: deoptimization.o; +text: .text%__1cODeoptimizationRgather_statistics6Fn0ALDeoptReason_n0ALDeoptAction_nJBytecodesECode__v_; +text: .text%__1cIcp2bNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cODeoptimizationPget_method_data6FpnKJavaThread_nMmethodHandle_i_pnRmethodDataOopDesc__; +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__; +text: .text%__1cIci2bNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__; +text: .text%__1cOMacroAssemblerFenter6M_v_; +text: .text%Unsafe_GetNativeByte; +text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_; +text: .text%__1cTsarL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPconvL2F_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQmulD_reg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRuntime.o; +text: .text%__1cFTypeFGis_nan6kM_i_; +text: .text%__1cSaddF24_reg_memNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cKCMoveLNodeGOpcode6kM_i_; +text: .text%JVM_NanoTime; +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cFParseOdo_tableswitch6M_v_; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_; +text: .text%__1cVMoveF2I_reg_stackNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerFpushl6MnHAddress__v_; +text: .text%jni_GetEnv; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodLiveness.o; +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cGICStubFclear6M_v_; +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_i486.o; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cKstoreDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cMloadConFNodeFreloc6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_; +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_; +text: .text%__1cMorI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cFStateP_sub_Op_ConvI2D6MpknENode__v_; +text: .text%__1cJAssemblerEmovw6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cQshrL_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIimmFOperFclone6kM_pnIMachOper__; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFStateM_sub_Op_AddF6MpknENode__v_; +text: .text%__1cOcompI_eRegNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConFNodeFclone6kM_pnENode__; +text: .text%__1cTconvI2F_SSF_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%jint_cmp: parse2.o; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_i_v_; +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cOsubF24_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstoreF_immNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cENodeHis_Call6M_pnICallNode__: machnode.o; +text: .text%__1cLloadSSINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConFNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cKstoreDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSaddD_reg_roundNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWroundFloat_mem_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cPmovP_nocopyNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: callnode.o; +text: .text%__1cOsubF24_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNstoreImmPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cRSignatureIteratorHiterate6M_v_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_; +text: .text%__1cOcmovI_regUNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddD_reg_imm1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovI_nocopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerFfwait6M_v_; +text: .text%__1cJAssemblerKrepne_scan6M_v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cPmovP_nocopyNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_; +text: .text%__1cRandL_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNcmovL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cZInterpreterMacroAssemblerRremove_activation6MnITosState_pnMRegisterImpl_iii_v_; +text: .text%__1cImulINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cImulINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSaddF24_reg_memNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cImulINodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%__1cOcmpF_cc_P6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cLvframeArrayRregister_location6kMi_pC_; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o; +text: .text%__1cVis_positive_one_float6Ff_i_; +text: .text%__1cRaddI_mem_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmulF24_reg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cSdivD_reg_roundNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSaddF24_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cSaddF24_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cIDivINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSstoreD_roundedNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdivD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAddDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cFStateO_sub_Op_LoadPC6MpknENode__v_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cSmulF24_reg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCmpF3NodeGOpcode6kM_i_; +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHMulNodeGis_Mul6kM_pk0_: classes.o; +text: .text%__1cJAssemblerFtestb6MpnMRegisterImpl_i_v_; +text: .text%__1cIGraphKitSgen_native_wrapper6MpnIciMethod__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cOmulIS_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNtestI_regNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%__1cSaddF24_reg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cOsubF24_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cKLoadPCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cXpartialSubtypeCheckNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%Unsafe_StaticFieldOffset; +text: .text%__1cVMoveL2D_reg_stackNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cKdirectOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cTconvI2F_SSF_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cJAssemblerMemit_arith_b6MiipnMRegisterImpl_i_v_; +text: .text%__1cFTypeFFempty6kM_i_; +text: .text%__1cKdirectOperNconstant_disp6kM_i_: ad_i486_clone.o; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cNReservedSpace2t6MpcI_v_; +text: .text%__1cNaddP_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cIMachOperNbase_position6kM_i_; +text: .text%__1cQorl_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cWroundFloat_mem_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cYinternal_word_RelocationMforce_target6MpC_v_: relocInfo.o; +text: .text%__1cQmulD_reg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerGf2ieee6M_v_; +text: .text%__1cQmulD_reg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cPconvL2D_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_i486_clone.o; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%__1cFStateS_sub_Op_CallNative6MpknENode__v_; +text: .text%__1cIciSymbolHas_utf86M_pkc_; +text: .text%__1cOtypeArrayKlassNexternal_name6FnJBasicType__pkc_; +text: .text%__1cENodeHdel_out6Mp0_v_: connode.o; +text: .text%JVM_GetClassContext; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cNdivI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJAssemblerHfincstp6M_v_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MnITosState__v_; +text: .text%__1cETypeFxdual6kM_pk0_; +text: .text%__1cQorI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOMacroAssemblerEfpop6M_v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%__1cLlog2_intptr6Fi_i_: typeArrayKlass.o; +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod__v_; +text: .text%__1cQAbstractCompilerPsupports_native6M_i_: c2compiler.o; +text: .text%__1cOGenerateOopMapGdo_jsr6Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerbGget_unsigned_2_byte_index_at_bcp6MpnMRegisterImpl_i_v_; +text: .text%__1cSaddF24_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_; +text: .text%__1cZInterpreterMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cZInterpreterMacroAssemblerGd2ieee6M_v_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cSmulF24_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSaddF24_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%JVM_GetClassDeclaredFields; +text: .text%stat: os_solaris.o; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cWroundFloat_mem_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cLConvF2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cLClassLoaderbCupdate_class_path_entry_list6Fpkc_v_; +text: .text%__1cOcmovI_regUNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJArgumentsRverify_percentage6FIpkc_i_; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFStateM_sub_Op_AddD6MpknENode__v_; +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cFTypeFJis_finite6kM_i_; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cLOptoRuntimeRresolve_call_Type6F_pknITypeFunc__; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cOPhaseIdealLoopTdo_maximally_unroll6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_2_v_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cSaddF24_reg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerGfnsave6MnHAddress__v_; +text: .text%__1cVMoveF2I_reg_stackNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMdecI_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cJloadDNodeFreloc6kM_i_; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%__1cMloadConLNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%__1cSaddF24_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConFNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cISubDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLloadSSINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cISubDNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cQorl_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_MonitorExit: jni.o; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cSaddD_reg_roundNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvL2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cKReturnNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframebFset_interpreter_frame_sender_sp6Mpi_v_; +text: .text%__1cRmulI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovI_regUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cULinearLeastSquareFit2t6MI_v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cOMacroAssemblerEfcmp6MpnMRegisterImpl__v_; +text: .text%__1cMTailJumpNodeGOpcode6kM_i_; +text: .text%__1cOsubF24_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWroundFloat_mem_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%jni_GetJavaVM; +text: .text%__1cHciKlassIis_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__; +text: .text%__1cSaddF24_reg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTconvI2F_SSF_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvI2F_SSF_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cNReservedSpaceKinitialize6MIIipc_v_; +text: .text%JVM_LoadLibrary; +text: .text%__1cCosOreserve_memory6FIpc_1_; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%__1cOstoreF_immNodeFreloc6kM_i_; +text: .text%__1cVMoveF2I_reg_stackNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%__1cNmulI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerGfild_d6MnHAddress__v_; +text: .text%__1cRmulI_imm_highNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cIcp2bNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIcp2bNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEincl6MnHAddress__v_; +text: .text%__1cXpartialSubtypeCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPmovP_nocopyNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVMoveL2D_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cRaddI_mem_eRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOLibraryCallKitbNinline_native_Reflection_getCallerClass6M_i_; +text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cOLibraryCallKitZinline_native_Class_query6MnIciMethodLIntrinsicId__i_; +text: .text%__1cOstoreF_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvL2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKciTypeFlowLStateVectorOdo_null_assert6MpnHciKlass__v_; +text: .text%__1cPconvL2F_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIci2bNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cXcmpL_reg_flags_LEGTNodeFclone6kM_pnENode__; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cQmulD_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerHfucomip6Mi_v_; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cFciEnvbNArrayIndexOutOfBoundsException_instance6M_pnKciInstance__; +text: .text%__1cRsalI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%jni_SetObjectField: jni.o; +text: .text%Unsafe_AllocateMemory; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: callnode.o; +text: .text%__1cTconvI2F_SSF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMelapsedTimer2t6M_v_: methodLiveness.o; +text: .text%__1cISubDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvI2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_; +text: .text%__1cRaddD_reg_imm1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosHSolarisOset_mpss_range6FpcII_i_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cPciInstanceKlassbDcompute_shared_is_initialized6M_i_; +text: .text%jni_Throw: jni.o; +text: .text%__1cNSpaceCounters2t6MpkciIpnMMutableSpace_pnSGenerationCounters__v_; +text: .text%__1cOPSVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cSaddF24_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cPmovP_nocopyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cIcp2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLklassVtableTis_miranda_entry_at6Mi_i_; +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_; +text: .text%__1cLVtableStubsIcontains6FpC_i_; +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cNdivI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorLpass_double6M_v_: interpreterRuntime.o; +text: .text%__1cLVtableStubsPstub_containing6FpC_pnKVtableStub__; +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cFStateP_sub_Op_ConvF2I6MpknENode__v_; +text: .text%__1cODeoptimizationYreset_invocation_counter6FpnJScopeDesc_i_v_; +text: .text%__1cQshrL_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvF2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRuntime.o; +text: .text%__1cOcompI_eRegNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_2pC22_v_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cNCellTypeStateImake_any6Fi_0_: generateOopMap.o; +text: .text%__1cJAssemblerEmovb6MnHAddress_i_v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__; +text: .text%__1cTshlL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cWImplicitExceptionTableCat6kMI_I_; +text: .text%__1cQshrL_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLVtableStubsGlookup6Fiii_pnKVtableStub__; +text: .text%__1cSsarL_eReg_1_31NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNReservedSpaceKfirst_part6MIii_0_; +text: .text%__1cJAssemblerFfinit6M_v_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: subnode.o; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerSstore_check_part_26MpnMRegisterImpl__v_; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_i486.o; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%__1cLOptoRuntimebBhandle_wrong_method_ic_miss6FpnKJavaThread__pC_; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%__1cNReservedSpace2t6MI_v_; +text: .text%__1cVMoveF2I_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationZtrap_state_set_recompiled6Fii_i_; +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSmulF24_reg_immNodeFreloc6kM_i_; +text: .text%__1cLloadSSINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerSstore_check_part_16MpnMRegisterImpl__v_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cIDivDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%JVM_GetLastErrorString; +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cSmulF24_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_DivD6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cINegDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNTemplateTableOprepare_invoke6FpnMRegisterImpl_2inJBytecodesECode__v_; +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cOcompP_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cKConv2BNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNGCTaskManagerGthread6MI_pnMGCTaskThread__; +text: .text%__1cPdouble_quadword6Fpxxx_0_: ad_i486.o; +text: .text%__1cFStateM_sub_Op_SubD6MpknENode__v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cSmulF24_reg_memNodeFreloc6kM_i_; +text: .text%__1cScompP_eReg_memNodeFreloc6kM_i_; +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cFStateM_sub_Op_NegD6MpknENode__v_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: gcTaskThread.o; +text: .text%__1cJlog2_long6Fx_i_: divnode.o; +text: .text%__1cMelapsedTimer2t6M_v_: compileBroker.o; +text: .text%__1cNcmovI_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cQaddD_reg_immNodeFreloc6kM_i_; +text: .text%__1cIRetTableHadd_jsr6Mii_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNcmovI_memNodeFreloc6kM_i_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: unsafe.o; +text: .text%__1cPaddress_of_flag6FnXCommandLineFlagWithType__pnEFlag__: globals.o; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_; +text: .text%__1cNdivI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cImulINodeFreloc6kM_i_; +text: .text%__1cNmulI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__; +text: .text%__1cNmulI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cRClassPathZipEntry2t6Mppvpc_v_; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cLClassLoaderLadd_to_list6FpnOClassPathEntry__v_; +text: .text%__1cOLibraryCallKitbBinline_native_currentThread6M_i_; +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_; +text: .text%__1cTconvI2F_SSF_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQaddD_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cLClassLoaderSget_canonical_path6Fpc1i_i_; +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMstoreSSINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRaddD_reg_imm1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEcmpb6MnHAddress_i_v_; +text: .text%__1cINegDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerEdecl6MnHAddress__v_; +text: .text%__1cOstackSlotFOperFclone6kM_pnIMachOper__; +text: .text%__1cSaddF24_reg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cMNativeLookupNlong_jni_name6FnMmethodHandle__pc_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl__v_; +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQmulD_reg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cHRetNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeFclone6kM_pnENode__; +text: .text%__1cNtestI_regNodeFclone6kM_pnENode__; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cVMoveF2I_reg_stackNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%Unsafe_SetMemory; +text: .text%__1cRaddL_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIcp2bNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnvOrecord_failure6Mpkc_v_; +text: .text%__1cNcmovL_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cENodeHdel_out6Mp0_v_: ifg.o; +text: .text%__1cJAssemblerEfld16M_v_; +text: .text%__1cSmembar_acquireNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerFfld_x6MnHAddress__v_; +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerHfistp_d6MnHAddress__v_; +text: .text%__1cKstoreFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerFimull6MpnMRegisterImpl_2_v_; +text: .text%__1cRInvocationCounterDdef6Fn0AFState_ipFnMmethodHandle_pnGThread__pC_v_; +text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_; +text: .text%__1cFciEnvXget_or_create_exception6MrpnI_jobject_nMsymbolHandle__pnKciInstance__; +text: .text%__1cMPerfDataList2t6Mi_v_; +text: .text%__1cLloadSSINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cRaddI_mem_eRegNodeFreloc6kM_i_; +text: .text%__1cUInterpreterGeneratorTgenerate_math_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cIDivFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cSCardTableExtensionbEresize_covered_region_by_start6MnJMemRegion__v_; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cJArgumentsObuild_jvm_args6Fpkc_v_; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cJAssemblerFfmulp6Mi_v_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstanceKlass.o; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_22_v_; +text: .text%__1cNcmovL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_22_v_; +text: .text%__1cRaddI_mem_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_FindSignal; +text: .text%__1cKVtableStub2n6FIi_pv_; +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_; +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRuntime.o; +text: .text%__1cLOptoRuntimeVgenerate_handler_blob6FpCi_pnNSafepointBlob__; +text: .text%JVM_RegisterSignal; +text: .text%__1cFParsePdo_lookupswitch6M_v_; +text: .text%__1cSaddF24_reg_memNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_2_v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cOcmpF_cc_P6NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerEcdql6M_v_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6M_v_; +text: .text%__1cUInterpreterGeneratorXcheck_for_compiled_code6MrnFLabel__v_; +text: .text%__1cRCardTableModRefBSbCfind_covering_region_by_base6MpnIHeapWord__i_; +text: .text%__1cIci2bNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRCardTableModRefBSbAlargest_prev_committed_end6kMi_pnIHeapWord__; +text: .text%__1cMSysClassPathNreset_item_at6Mi_v_: arguments.o; +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateM_sub_Op_CmpF6MpknENode__v_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cOLibraryCallKitVinline_fp_conversions6MnIciMethodLIntrinsicId__i_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHCompilePget_invoke_name6M_pnIciSymbol__; +text: .text%__1cFParseRdo_multianewarray6M_v_; +text: .text%__1cJAssemblerGfmul_d6MnHAddress__v_; +text: .text%__1cLVtableStubsFenter6FiiipnKVtableStub__v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl__v_; +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRaddD_reg_imm1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cRaddD_reg_imm1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cISubFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNandI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cNReservedSpaceJlast_part6MI_0_; +text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cJlookupOne6FpnHJNIEnv__pkcpnGThread__pnH_jclass__: jni.o; +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_; +text: .text%__1cNSafepointBlob2n6FII_pv_; +text: .text%__1cQmulD_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2F_SSF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_I_v_; +text: .text%__1cFTypeDJis_finite6kM_i_; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cSPSPromotionManager2t6M_v_; +text: .text%__1cOstackSlotIOperFscale6kM_i_: ad_i486.o; +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o; +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveF2I_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddF24_reg_memNodeFreloc6kM_i_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cSdivD_reg_roundNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cPmovP_nocopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cIci2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMOopTaskQdDueue2t6M_v_; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cRaddI_mem_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_Available; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_2i_v_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_2i_v_; +text: .text%__1cMOopTaskQdDueueKinitialize6M_v_; +text: .text%__1cPmovI_nocopyNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOtailjmpIndNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIci2bNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cOtailjmpIndNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cIcp2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLconvI2BNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeFclone6kM_pnENode__; +text: .text%__1cKciTypeFlowLStateVectorRdo_multianewarray6MpnQciByteCodeStream__v_; +text: .text%__1cJAssemblerGfild_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfstp_d6Mi_v_; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_i486.o; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cNTemplateTableUinvokevirtual_helper6FpnMRegisterImpl_22_v_; +text: .text%__1cJAssemblerEfxch6Mi_v_; +text: .text%__1cJAssemblerFfprem6M_v_; +text: .text%__1cSvframeStreamCommonbFfill_in_compiled_inlined_sender6M_i_; +text: .text%__1cJAssemblerJfnstsw_ax6M_v_; +text: .text%__1cJAssemblerEsahf6M_v_; +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_; +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cOtailjmpIndNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cJAssemblerEfchs6M_v_; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cSaddF24_reg_immNodeFreloc6kM_i_; +text: .text%__1cJAssemblerEfabs6M_v_; +text: .text%__1cJStubQdDueueOregister_queue6Fp0_v_; +text: .text%__1cOMacroAssemblerPcorrected_idivl6MpnMRegisterImpl__i_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cRsubI_eReg_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cFStateL_sub_Op_OrL6MpknENode__v_; +text: .text%__1cPconvF2D_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFStateM_sub_Op_SubF6MpknENode__v_; +text: .text%__1cPOopTaskQdDueueSetOregister_queue6MipnMOopTaskQdDueue__v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cMPeriodicTask2t6MI_v_; +text: .text%__1cSestimate_path_freq6FpnENode__f_: loopnode.o; +text: .text%__1cFStateP_sub_Op_MoveL2D6MpknENode__v_; +text: .text%__1cNincI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: memoryService.o; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cKGCStatInfo2t6Mi_v_; +text: .text%__1cJMarkSweepUAdjustPointerClosure2t6Mi_v_: markSweep.o; +text: .text%__1cNstoreImmBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSINodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerFidivl6MpnMRegisterImpl__v_; +text: .text%__1cOmulF24_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cJAssemblerEmull6MnHAddress__v_; +text: .text%__1cJAssemblerDorl6MnHAddress_i_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerFshrdl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl__v_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cJAssemblerFcpuid6M_v_; +text: .text%__1cJAssemblerEfldz6M_v_; +text: .text%__1cJAssemblerFfld_s6Mi_v_; +text: .text%__1cJAssemblerFfst_s6MnHAddress__v_; +text: .text%__1cJAssemblerFfst_d6MnHAddress__v_; +text: .text%__1cOaddF24_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cSaddF24_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cHMatcherQconvL2FSupported6F_ki_; +text: .text%__1cFStateP_sub_Op_ConvL2F6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvL2D6MpknENode__v_; +text: .text%__1cGatomll6Fpkcpx_i_: arguments.o; +text: .text%__1cJArgumentsRcheck_memory_size6Fxx_n0AJArgsRange__; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cPconvD2F_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJArgumentsRparse_memory_size6Fpkcpxx_n0AJArgsRange__; +text: .text%__1cFStateP_sub_Op_ConvD2F6MpknENode__v_; +text: .text%__1cHnmethodVinvalidate_osr_method6M_v_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o; +text: .text%__1cJAssemblerEmovb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cPconvL2D_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: parse1.o; +text: .text%__1cPconvD2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNmulI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cINegFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMFastLockNodeLis_FastLock6kM_pk0_: classes.o; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_; +text: .text%__1cIPSOldGenOgen_size_limit6M_I_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cIPSOldGenGresize6MI_v_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cQorl_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cPconvL2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cFParseNfetch_monitor6MipnENode_2_2_; +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_; +text: .text%__1cMGCTaskThreadFstart6M_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cMGCTaskThreadDrun6M_v_; +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cGThreadOis_Java_thread6kM_i_: gcTaskThread.o; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o; +text: .text%__1cXpartialSubtypeCheckNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cISubFNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_; +text: .text%lstat: perfMemory_solaris.o; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cPfilename_to_pid6Fpkc_l_: perfMemory_solaris.o; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerWdispatch_only_noverify6MnITosState__v_; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_flag_at6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MpnMRegisterImpl__v_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MpnMRegisterImpl_2_v_; +text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o; +text: .text%__1cIPSOldGenYinitialize_virtual_space6MnNReservedSpace_I_v_; +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Ipkci_v_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__; +text: .text%__1cbCAbstractInterpreterGeneratorTgenerate_error_exit6Mpkc_pC_; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cLNamedThread2t6M_v_; +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_: ad_i486.o; +text: .text%__1cLNamedThreadIset_name6MpkcE_v_; +text: .text%__1cMelapsedTimer2t6M_v_: psAdaptiveSizePolicy.o; +text: .text%__1cUPSAdaptiveSizePolicybQpromo_increment_with_supplement_aligned_up6MI_I_; +text: .text%__1cUPSAdaptiveSizePolicyPpromo_increment6MII_I_; +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_2i_v_; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOMacroAssemblerIsave_eax6MpnMRegisterImpl__v_; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cRCardTableModRefBSbCpar_chunk_heapword_alignment6F_I_: tenuredGeneration.o; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cCosWactive_processor_count6F_i_; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cOMacroAssemblerLrestore_eax6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerFfremr6MpnMRegisterImpl__v_; +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cHOrLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKReflectionbFbasic_type_arrayklass_to_mirror6FpnMklassOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cCosHrealloc6FpvI_1_; +text: .text%__1cOMacroAssemblerGsincos6Miii_v_; +text: .text%__1cEMIN24CI_6FTA0_0_: tenuredGeneration.o; +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cNGCTaskManagerKset_thread6MIpnMGCTaskThread__v_; +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPSVirtualSpace2t6M_v_; +text: .text%__1cNdefaultStreamMhas_log_file6M_i_; +text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cOPSVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cYalign_to_allocation_size6FI_I_: heap.o; +text: .text%__1cWcheck_compare_clipping6FipnGIfNode_pnHConNode_rpnENode__i_: cfgnode.o; +text: .text%__1cLConvL2FNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_; +text: .text%__1cUParallelScavengeHeapOresize_old_gen6MI_v_; +text: .text%__1cIciObjectOis_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cRcheck_if_clipping6FpknKRegionNode_rpnGIfNode_5_i_: cfgnode.o; +text: .text%__1cHOrLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cKJavaThread2t6M_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__cplus_fini_at_exit: CCrti.o; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cIUniverseUreinitialize_itables6F_v_; +text: .text%__1cNReservedSpace2t6MIIipc_v_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cIUniversePinitialize_heap6F_i_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cIUniverseYcompute_base_vtable_size6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_; +text: .text%__1cQVMOperationQdDueue2t6M_v_; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%Unsafe_PageSize; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%Unsafe_FreeMemory; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cNCollectedHeapYlarge_typearray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cIVMThreadEloop6M_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__; +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cKVM_VersionWget_processor_features6F_v_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_; +text: .text%__1cHVM_ExitNset_vm_exited6F_i_; +text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cNWatcherThread2t6M_v_; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadFstart6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cQVerificationTypeKinitialize6F_v_; +text: .text%__1cQVerificationTypeIfinalize6F_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_i486.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_i486.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLVtableStubsKinitialize6F_v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cIVMThread2t6M_v_; +text: .text%__1cGThreadWset_as_starting_thread6M_i_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cIUniversePcheck_alignment6FIIpkc_v_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFJNIidKdeallocate6Fp0_v_; +text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cRInlineCacheBufferKinitialize6F_v_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cLlog2_intptr6Fi_i_: heap.o; +text: .text%__1cICodeHeapFclear6M_v_; +text: .text%__1cICodeHeapHreserve6MIII_i_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_; +text: .text%__1cSInterpreterRuntimeWcreate_klass_exception6FpnKJavaThread_pcpnHoopDesc__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cbCAbstractInterpreterGeneratorbCset_safepoints_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_; +text: .text%__1cZInterpreterMacroAssemblerUdispatch_only_normal6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl_33_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cSCommandLineFlagsExKuintxAtPut6FnXCommandLineFlagWithType_I_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cKDictionaryKfree_entry6MpnPDictionaryEntry__v_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cFStateQ_sub_Op_TailJump6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_NegF6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_; +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_; +text: .text%__1cODeoptimizationTload_class_by_index6FnSconstantPoolHandle_i_v_; +text: .text%__1cODeoptimizationTload_class_by_index6FnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKScopeValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cUConstantOopReadValuePis_constant_oop6kM_i_: debugInfo.o; +text: .text%__1cUConstantOopReadValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o; +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cRComputeEntryStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cNCellTypeStateLmake_bottom6F_0_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_top6F_0_: generateOopMap.o; +text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o; +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cKNoopGCTaskQcreate_on_c_heap6F_p0_; +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_; +text: .text%__1cNGCTaskManagerKinitialize6M_v_; +text: .text%__1cNGCTaskManager2t6MI_v_; +text: .text%__1cXSynchronizedGCTaskQdDueue2t6MpnLGCTaskQdDueue_pnFMutex__v_; +text: .text%__1cLGCTaskQdDueueQcreate_on_c_heap6F_p0_; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cXSignatureHandlerLibraryQset_handler_blob6F_pC_; +text: .text%JVM_MaxMemory; +text: .text%JVM_Halt; +text: .text%JVM_InitProperties; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cKJNIHandlesKinitialize6F_v_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%JNI_CreateJavaVM; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cbDinitializeDirectBufferSupport6FpnHJNIEnv___i_: jni.o; +text: .text%lookupDirectBufferClasses: jni.o; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cRJvmtiEventEnabledFclear6M_v_; +text: .text%__1cRJvmtiEventEnabled2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiEnvBase.o; +text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_: jvmtiEnvBase.o; +text: .text%JVM_SupportsCX8; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%JVM_Socket; +text: .text%JVM_InitializeSocketLibrary; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectPcompute_offsets6F_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cQprint_statistics6F_v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cLlog2_intptr6Fi_i_: interpreter_i486.o; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorXgenerate_abstract_entry6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRuntime.o; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%__1cLJavaClassesPcompute_offsets6F_v_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_; +text: .text%__1cPjava_nio_BufferPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_SystemPcompute_offsets6F_v_; +text: .text%__1cbIjava_security_AccessControlContextPcompute_offsets6F_v_; +text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o; +text: .text%__1cJAssemblerEmull6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_2_v_; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cJArgumentsbOparse_java_compiler_environment_variable6F_v_; +text: .text%__1cJAssemblerEsbbl6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerJdecrement6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerLextend_sign6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerFfaddp6Mi_v_; +text: .text%__1cJAssemblerGfdivrp6Mi_v_; +text: .text%__1cJAssemblerHfdivr_d6MnHAddress__v_; +text: .text%__1cJAssemblerHfdivr_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfmul_s6MnHAddress__v_; +text: .text%__1cJAssemblerHfsubr_d6MnHAddress__v_; +text: .text%__1cJAssemblerHfsubr_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfadd_d6MnHAddress__v_; +text: .text%__1cJAssemblerGfadd_s6MnHAddress__v_; +text: .text%__1cJAssemblerFfsqrt6M_v_; +text: .text%__1cJAssemblerEfcos6M_v_; +text: .text%__1cJAssemblerEfsin6M_v_; +text: .text%__1cJAssemblerEsetb6Mn0AJCondition_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_; +text: .text%__1cJAssemblerFshldl6MpnMRegisterImpl_2_v_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cHi2sNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHi2bNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLconvP2BNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_i486_expand.o; +text: .text%__1cRaddL_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLconvP2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIcp2bNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_i486.o; +text: .text%__1cTconvI2F_SSF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQmulD_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOtailjmpIndNodeFreloc6kM_i_; +text: .text%__1cTconvI2F_SSF_memNodeFreloc6kM_i_; +text: .text%__1cQmulD_reg_memNodeFreloc6kM_i_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cPconvI2L_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cQno_shared_spaces6F_v_: arguments.o; +text: .text%__1cJArgumentsMget_property6Fpkc_2_; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cMSysClassPath2T6M_v_; +text: .text%__1cMSysClassPath2t6Mpkc_v_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_III_v_; +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_IIIIIII_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_i486_pipeline.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cOcompiler2_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cMelapsedTimer2t6M_v_: compilationPolicy.o; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cJCodeCacheKinitialize6F_v_; +text: .text%__1cNExceptionBlob2n6FII_pv_; +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNExceptionBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cQUncommonTrapBlob2n6FII_pv_; +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cQUncommonTrapBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cSDeoptimizationBlob2n6FII_pv_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_; +text: .text%__1cOCompilerOraclePparse_from_file6F_v_; +text: .text%__1cHcc_file6F_pkc_: compilerOracle.o; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: cfgnode.o; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cMTailJumpNode2t6MpnENode_22222_v_; +text: .text%__1cKC2CompilerKinitialize6M_v_; +text: .text%__1cHCompileRpd_compiler2_init6F_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerQsign_extend_byte6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerRsign_extend_short6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerIlcmp2int6MpnMRegisterImpl_222_v_; +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerElmul6Mii_v_; +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerGc2bool6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl_3_v_; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cSDeoptimizationBlob2t6MpnKCodeBuffer_ipnJOopMapSet_iiii_v_; +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTClassLoadingServiceVnotify_class_unloaded6FpnNinstanceKlass_i_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cPClassFileParserYjava_lang_Class_fix_post6Mpi_v_; +text: .text%__1cPClassFileParserXjava_lang_Class_fix_pre6MpnOobjArrayHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cMciKlassKlassEmake6F_p0_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray1_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_; +text: .text%__1cWResolveOopMapConflictsOreport_results6kM_i_: rewriter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cQRelocationHolder2t6M_v_: relocInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cLStatSamplerKinitialize6F_v_; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cICarSpaceEinit6F_v_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cNSharedRuntimeUlookup_function_DD_D6FrpFpnHJNIEnv__pnH_jclass_dd_dpkc_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o; +text: .text%__1cLOptoRuntimeYgenerate_arraycopy_stubs6F_v_; +text: .text%__1cLOptoRuntimebPgenerate_polling_page_return_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebPgenerate_illegal_instruction_handler_blob6F_v_; +text: .text%__1cLOptoRuntimeUsetup_exception_blob6F_v_; +text: .text%__1cLOptoRuntimeWfill_in_exception_blob6F_v_; +text: .text%__1cLOptoRuntimebBgenerate_uncommon_trap_blob6F_v_; +text: .text%__1cHRegMask2t6Miiiii_v_: regmask.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cMelapsedTimer2t6M_v_: psMarkSweep.o; +text: .text%__1cTPSAlwaysTrueClosure2t6M_v_: psMarkSweep.o; +text: .text%__1cLPSMarkSweepKinitialize6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cUPSAdaptiveSizePolicy2t6MIIIIIddI_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_; +text: .text%__1cUdelete_shared_memory6FpcI_v_: perfMemory_solaris.o; +text: .text%__1cUcreate_shared_memory6FI_pc_: perfMemory_solaris.o; +text: .text%__1cSmmap_create_shared6FI_pc_: perfMemory_solaris.o; +text: .text%__1cbAcreate_sharedmem_resources6Fpkc1I_i_: perfMemory_solaris.o; +text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cIPSOldGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cSReferenceProcessorMinit_statics6F_v_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cKPSYoungGenUset_space_boundaries6MII_v_; +text: .text%__1cKPSYoungGenbGcompute_initial_space_boundaries6M_v_; +text: .text%__1cKPSYoungGenPinitialize_work6M_v_; +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGen2t6MIII_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cMelapsedTimer2t6M_v_: psScavenge.o; +text: .text%__1cKPSScavengeKinitialize6F_v_; +text: .text%__1cPOopTaskQdDueueSet2t6Mi_v_: psPromotionManager.o; +text: .text%__1cSPSPromotionManagerKinitialize6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: psPromotionLAB.o; +text: .text%__1cRalign_object_size6Fi_i_: psPromotionLAB.o; +text: .text%__1cJPSPermGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cIPSOldGen2t6MIIIpkci_v_; +text: .text%__1cLStatSamplerUcreate_misc_perfdata6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cINegFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cMStubRoutinesLinitialize26F_v_; +text: .text%__1cMStubRoutinesLinitialize16F_v_; +text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_i486.o; +text: .text%__1cLStatSamplerXcreate_sampled_perfdata6F_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cMPeriodicTaskLis_enrolled6kM_i_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cLremove_file6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cNMemoryServicebFadd_parallel_scavenge_heap_info6FpnUParallelScavengeHeap__v_; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cYSurvivorMutableSpacePool2t6MpnKPSYoungGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cUEdenMutableSpacePool2t6MpnKPSYoungGen_pnMMutableSpace_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cQPSGenerationPool2t6MpnJPSPermGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cQPSGenerationPool2t6MpnIPSOldGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cJMarkSweepSMarkAndPushClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepRFollowRootClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepSFollowStackClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepOIsAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepQKeepAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cNMemoryServiceXadd_psYoung_memory_pool6FpnKPSYoungGen_pnNMemoryManager_4_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cHRetDataKis_RetData6M_i_: methodDataOop.o; +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_; +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_: memoryService.o; +text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cNMemoryServiceWadd_psPerm_memory_pool6FpnJPSPermGen_pnNMemoryManager__v_; +text: .text%__1cNMemoryServiceVadd_psOld_memory_pool6FpnIPSOldGen_pnNMemoryManager__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cJMemRegion2t6M_v_: jvmtiTagMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiImpl.o; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__2t6Mii_v_: jvmtiImpl.o; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o; +text: .text%__1cJTimeStamp2t6M_v_: management.o; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cKManagementEinit6F_v_; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorUhas_pending_requests6F_i_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: loopnode.o; +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_; +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o; +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: library_call.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_I_; +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_; +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o; +text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o; +text: .text%__1cHoopDescLheader_size6F_i_: parGCAllocBuffer.o; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__1cMostream_exit6F_v_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cNdefaultStreamEinit6M_v_; +text: .text%__1cCosMsupports_sse6F_i_; +text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_i486.o; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_; +text: .text%__1cWget_sharedmem_filename6Fpkci_pc_: perfMemory_solaris.o; +text: .text%__1cNget_user_name6Fl_pc_: perfMemory_solaris.o; +text: .text%__1cQget_user_tmp_dir6Fpkc_pc_: perfMemory_solaris.o; +text: .text%__1cKPerfMemoryHdestroy6F_v_; +text: .text%__1cKPerfMemoryKinitialize6F_v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cMPerfDataListFclone6M_p0_; +text: .text%__1cMPerfDataList2t6Mp0_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_; +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_: parallelScavengeHeap.o; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEkind6M_nNCollectedHeapEName__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEheap6F_p0_; +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cSOnStackReplacementKinitialize6F_v_; +text: .text%__1cNObjectMonitorREntryQdDueue_unlink6MpnMObjectWaiter__v_; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cCosPuncommit_memory6FpcI_i_; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosHSolarisWinitialize_system_info6F_v_; +text: .text%__1cCosPphysical_memory6F_X_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER2_sparc 2009-08-01 04:16:54.119050331 +0100 @@ -0,0 +1,7113 @@ +data = R0x2000; +text = LOAD ?RXO; + + +text: .text%__1cLOptoRuntimeLjshort_copy6Fph1I_v_; +text: .text%__1cLOptoRuntimeTarrayof_jshort_copy6FpnIHeapWord_2I_v_; +text: .text%__1cSPSPromotionManagerWcopy_to_survivor_space6MpnHoopDesc__2_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_; +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMOopTaskQdDueueKpop_global6MrpnHoopDesc__i_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_sparc_misc.o; +text: .text%__1cIPhaseIFGIadd_edge6MII_i_; +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: ifg.o; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_sparc_misc.o; +text: .text%__1cENodeEjvms6kM_pnIJVMState__; +text: .text%__1cHRegMaskFis_UP6kM_i_; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_sparc_misc.o; +text: .text%__1cIMachNodeNrematerialize6kM_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: classes.o; +text: .text%__1cIProjNodeHis_Proj6M_p0_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: classes.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: classes.o; +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__; +text: .text%__1cENodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cETypeDcmp6Fkpk03_i_; +text: .text%__1cENodeHis_Copy6kM_I_: classes.o; +text: .text%__1cENodeHlatency6MI_I_; +text: .text%__1cHRegMaskJis_bound16kM_i_; +text: .text%__1cDff16FI_i_; +text: .text%__1cHRegMaskESize6kM_I_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_sparc_misc.o; +text: .text%__1cXresource_allocate_bytes6FI_pc_; +text: .text%__1cIMachNodeJideal_reg6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cRMachSpillCopyNodeMis_SpillCopy6M_p0_: ad_sparc.o; +text: .text%__1cENodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJVectorSet2R6MI_rnDSet__; +text: .text%__1cHRegMaskJis_bound26kM_i_; +text: .text%__1cNSharedRuntimeElmul6Fxx_x_; +text: .text%__1cPOopTaskQdDueueSetFsteal6MipirpnHoopDesc__i_; +text: .text%__1cIMachNodeGOpcode6kM_i_; +text: .text%__1cENodeGpinned6kM_i_: classes.o; +text: .text%__1cJiRegIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cIIndexSetKinitialize6MI_v_; +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cRMachSpillCopyNodeHis_Copy6kM_I_: ad_sparc.o; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__: type.o; +text: .text%__1cHPhiNodeGis_Phi6M_p0_: cfgnode.o; +text: .text%__1cETypeFuhash6Fkpk0_i_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: chaitin.o; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cENodeIout_grow6MI_v_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_sparc.o; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cOloadConI13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cENodeHadd_req6Mp0_v_; +text: .text%__1cEDictGInsert6Mpv1i_1_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cJMultiNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cOloadConI13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cIProjNodeGis_CFG6kM_i_; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cINodeHashLhash_delete6MpknENode__i_; +text: .text%__1cFArenaIcontains6kMpkv_i_; +text: .text%__1cOloadConI13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeKmatch_edge6kMI_I_; +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: classes.o; +text: .text%__1cHPhiNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntCeq6kMpknEType__i_; +text: .text%__1cKbranchNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cIProjNodeGpinned6kM_i_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: classes.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: classes.o; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_sparc_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: cfgnode.o; +text: .text%__1cIProjNodeGOpcode6kM_i_; +text: .text%__1cETypeIhashcons6M_pk0_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_sparc.o; +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_; +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__; +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_; +text: .text%__1cMPhaseChaitinKelide_copy6MpnENode_ipnFBlock_rnJNode_List_6i_i_; +text: .text%__1cQObjectStartArrayMobject_start6MpnIHeapWord__2_: cardTableExtension.o; +text: .text%__1cHCompileRvalid_bundle_info6MpknENode__i_; +text: .text%__1cENodeNrematerialize6kM_i_: classes.o; +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cHCompileNnode_bundling6MpknENode__pnGBundle__; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopopts.o; +text: .text%__1cOlower_pressure6FpnDLRG_IpnFBlock_pI4_v_: ifg.o; +text: .text%__1cGIfNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_; +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cDLRGOcompute_degree6kMr0_i_; +text: .text%__1cFArenaIArealloc6MpvII_1_; +text: .text%__1cIConINodeGOpcode6kM_i_; +text: .text%__1cETypeEmeet6kMpk0_2_; +text: .text%__1cENode2t6MI_v_; +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_: ad_sparc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_; +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_: psTasks.o; +text: .text%__1cENodeHis_Copy6kM_I_: cfgnode.o; +text: .text%__1cKSchedulingLanti_do_def6MpnFBlock_pnENode_nHOptoRegEName_i_v_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: cfgnode.o; +text: .text%__1cKIfTrueNodeGOpcode6kM_i_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: ad_sparc.o; +text: .text%__1cIMachNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cETypeJsingleton6kM_i_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: coalesce.o; +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cJloadPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cIAddPNodeGOpcode6kM_i_; +text: .text%__1cIPhaseIFGJre_insert6MI_v_; +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__; +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_; +text: .text%__1cHTypeIntEhash6kM_i_; +text: .text%__1cETypeLisa_oop_ptr6kM_i_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_; +text: .text%__1cDfh16FI_i_; +text: .text%__1cNMachIdealNodeErule6kM_I_: ad_sparc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_sparc_misc.o; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cIIndexSetKfree_block6MI_v_; +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cHTypeIntJsingleton6kM_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: classes.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: cfgnode.o; +text: .text%__1cLIfFalseNodeGOpcode6kM_i_; +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_; +text: .text%__1cENodeEhash6kM_I_; +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_; +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cNSharedRuntimeDl2f6Fx_f_; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: classes.o; +text: .text%__1cIParmNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cMPhaseChaitinKbias_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cHConNodeGOpcode6kM_i_; +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_; +text: .text%__1cMMachProjNodeGOpcode6kM_i_; +text: .text%__1cJiRegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: classes.o; +text: .text%__1cJiRegIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cENodeXpartial_latency_of_defs6MrnLBlock_Array_rnNGrowableArray4CI___v_; +text: .text%__1cXPipeline_Use_Cycle_Mask2L6Mi_r0_: ad_sparc_pipeline.o; +text: .text%__1cIBoolNodeGOpcode6kM_i_; +text: .text%__1cJMultiNodeIis_Multi6M_p0_; +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeEgrow6MI_v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cKRegionNodeGOpcode6kM_i_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__: ad_sparc.o; +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_; +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_; +text: .text%__1cFStateRMachOperGenerator6MipnIMachNode_pnHCompile__pnIMachOper__; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cPJavaFrameAnchorNmake_walkable6MpnKJavaThread__v_; +text: .text%__1cENodeNis_block_proj6kM_pk0_; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cKRegionNodeGpinned6kM_i_: classes.o; +text: .text%__1cLTypeInstPtrEhash6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_; +text: .text%__1cJloadINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__: ad_sparc.o; +text: .text%__1cKbranchNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateDDFA6MipknENode__i_; +text: .text%__1cMMachProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cENodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cOMethodLivenessKBasicBlockXcompute_gen_kill_single6MpnQciByteCodeStream__v_; +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__: ad_sparc.o; +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_I_: parallelScavengeHeap.o; +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeFclone6kM_p0_; +text: .text%__1cITypeNodeEhash6kM_I_; +text: .text%__1cIBoolNodeHis_Bool6M_p0_: subnode.o; +text: .text%__1cMPipeline_UseMfull_latency6kMIrk0_I_; +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_: ad_sparc.o; +text: .text%__1cENodeKmatch_edge6kMI_I_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cHPhiNodeGpinned6kM_i_: cfgnode.o; +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cICallNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeEjvms6kM_pnIJVMState__: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopnode.o; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cIMachNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_; +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_; +text: .text%__1cENodeFIdeal6MpnIPhaseGVN_i_p0_; +text: .text%__1cKTypeAryPtrEhash6kM_i_; +text: .text%__1cICallNodeHis_Call6M_p0_: callnode.o; +text: .text%__1cETypeFxmeet6kMpk0_2_; +text: .text%__1cILRG_ListGextend6MII_v_; +text: .text%__1cJVectorSet2F6kMI_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_sparc.o; +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_; +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cIProjNodeEhash6kM_I_; +text: .text%__1cGIfNodeGpinned6kM_i_: classes.o; +text: .text%__1cIAddINodeGOpcode6kM_i_; +text: .text%__1cIIndexSet2t6Mp0_v_; +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_; +text: .text%__1cITypeNodeJideal_reg6kM_I_; +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrCeq6kMpknEType__i_; +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: classes.o; +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachNode2t6M_v_; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_sparc.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: callnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: callnode.o; +text: .text%__1cOis_diamond_phi6FpnENode__i_: cfgnode.o; +text: .text%__1cHMatcherKLabel_Root6MpknENode_pnFState_p16_6_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: memnode.o; +text: .text%__1cENodeHsize_of6kM_I_; +text: .text%__1cICmpPNodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: classes.o; +text: .text%__1cKNode_ArrayGremove6MI_v_; +text: .text%__1cNSafePointNodeGpinned6kM_i_: callnode.o; +text: .text%__1cMOopTaskQdDueueOpop_local_slow6MInOTaskQdDueueSuperDAge__i_; +text: .text%__1cHPhiNodeEhash6kM_I_; +text: .text%__1cKTypeOopPtrJsingleton6kM_i_; +text: .text%__1cPindOffset13OperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cKmethodOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cGIfNodeFis_If6M_p0_: classes.o; +text: .text%__1cENodeNrematerialize6kM_i_: cfgnode.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__; +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc_misc.o; +text: .text%__1cOmatch_into_reg6FpnENode_iii1_i_: matcher.o; +text: .text%__1cENodeSremove_dead_region6MpnIPhaseGVN_i_i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: classes.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: memnode.o; +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cKTypeAryPtrCeq6kMpknEType__i_; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cICmpINodeGOpcode6kM_i_; +text: .text%Unsafe_CompareAndSwapLong; +text: .text%__1cNCatchProjNodeGOpcode6kM_i_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: cfgnode.o; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_; +text: .text%__1cENode2t6Mp0_v_; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: classes.o; +text: .text%__1cMloadConPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitHstopped6M_i_; +text: .text%__1cETypeKhas_memory6kM_i_; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: cfgnode.o; +text: .text%__1cMloadConPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStartNodeGpinned6kM_i_: callnode.o; +text: .text%__1cTCreateExceptionNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileMFillLocArray6MpnENode_pnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cIHaltNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrEmake6FnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_pk0_; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: cfgnode.o; +text: .text%__1cHRegMaskMSmearToPairs6M_v_; +text: .text%__1cYCallStaticJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: callnode.o; +text: .text%__1cKMachIfNodeJis_MachIf6kM_pk0_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: classes.o; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cSCallStaticJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cHMatcherKReduceOper6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cMPipeline_UseJadd_usage6Mrk0_v_; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableExtension.o; +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_; +text: .text%__1cJiRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cGcmpkey6Fpkv1_i_; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cMPhaseChaitinMchoose_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cMMergeMemNodeGOpcode6kM_i_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cJTypeTupleJsingleton6kM_i_; +text: .text%__1cIParmNodeGOpcode6kM_i_; +text: .text%__1cJiRegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHTypeIntEmake6Fiii_pk0_; +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cKSchedulingWAddNodeToAvailableList6MpnENode__v_; +text: .text%__1cKSchedulingSChooseNodeToBundle6M_pnENode__; +text: .text%__1cKSchedulingPAddNodeToBundle6MpnENode_pknFBlock__v_; +text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: memnode.o; +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: classes.o; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cJLoadPNodeGOpcode6kM_i_; +text: .text%__1cMMutableSpaceIallocate6MI_pnIHeapWord__; +text: .text%__1cJPSPermGenSallocate_permanent6MI_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6MI_pnIHeapWord__; +text: .text%__1cENodeGis_CFG6kM_i_: connode.o; +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMMutableSpaceMcas_allocate6MI_pnIHeapWord__; +text: .text%__1cNflagsRegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cJCatchNodeGOpcode6kM_i_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: cfgnode.o; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pnIciObject_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cHCmpNodeGis_Cmp6kM_pk0_: classes.o; +text: .text%__1cIJVMStateLdebug_start6kM_I_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cENodeHdel_req6MI_v_; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_; +text: .text%__1cFBlockIis_Empty6kM_i_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%method_compare: methodOop.o; +text: .text%__1cENodeGis_CFG6kM_i_: subnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: multnode.o; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: classes.o; +text: .text%__1cETypeEhash6kM_i_; +text: .text%__1cRNativeInstructionLset_long_at6Mii_v_; +text: .text%__1cJMultiNodeEhash6kM_I_: classes.o; +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__; +text: .text%__1cQciByteCodeStreamEjava6MnJBytecodesECode__2_; +text: .text%__1cJCProjNodeEhash6kM_I_: classes.o; +text: .text%__1cIHaltNodeGOpcode6kM_i_; +text: .text%__1cMMachCallNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cFBlockGselect6MrnJNode_List_rnLBlock_Array_pirnJVectorSet_IrnNGrowableArray4CI___pnENode__; +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__; +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__; +text: .text%__1cICmpUNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_; +text: .text%__1cXPipeline_Use_Cycle_MaskCOr6Mrk0_v_; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cILoadNodeEhash6kM_I_; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc_misc.o; +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cIMachNodeGExpand6MpnFState_rnJNode_List__p0_: ad_sparc_misc.o; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cHConNodeGis_Con6kM_I_: classes.o; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cFBlockLis_uncommon6kMrnLBlock_Array__i_; +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_; +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: cfgnode.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: multnode.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%JVM_ReleaseUTF; +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJTypeTupleEhash6kM_i_; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cENodeHget_int6kM_i_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodDataOop.o; +text: .text%__1cHMatcherTReduceInst_Interior6MpnFState_ipnIMachNode_IrpnENode__I_; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cMflagsRegOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: cfgnode.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cObranchConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cFDictI2i6M_v_; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: classes.o; +text: .text%__1cKNode_ArrayEgrow6MI_v_; +text: .text%__1cHTypeIntEmake6Fi_pk0_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__: memnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: multnode.o; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_; +text: .text%__1cPciInstanceKlassMis_interface6M_i_: ciInstanceKlass.o; +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__; +text: .text%__1cPindOffset13OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: coalesce.o; +text: .text%__1cUcompI_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_; +text: .text%__1cKCastPPNodeGOpcode6kM_i_; +text: .text%__1cOoop_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cOoop_RelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cLLShiftINodeGOpcode6kM_i_; +text: .text%__1cENodeOis_block_start6kM_i_; +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_; +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cKbranchNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cGBitMapJset_union6M0_v_; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cIAddPNodeHis_AddP6M_p0_: classes.o; +text: .text%__1cIConPNodeGOpcode6kM_i_; +text: .text%__1cJLoadINodeGOpcode6kM_i_; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%__1cVcompP_iRegP_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeEhash6kM_I_: classes.o; +text: .text%__1cNbranchConNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: subnode.o; +text: .text%__1cENodeHis_Copy6kM_I_: memnode.o; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGBitMap2t6MpII_v_; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__; +text: .text%__1cFParsePdo_one_bytecode6M_v_; +text: .text%__1cFParseNdo_exceptions6M_v_; +text: .text%__1cITypeLongCeq6kMpknEType__i_; +text: .text%__1cLPCTableNodeGpinned6kM_i_: classes.o; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cHPhiNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeGpinned6kM_i_: connode.o; +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeGis_Con6kM_I_: classes.o; +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__; +text: .text%__1cUParallelScavengeHeapPis_in_permanent6kMpkv_i_: parallelScavengeHeap.o; +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_: phaseX.o; +text: .text%__1cHCompileJcan_alias6MpknHTypePtr_i_i_; +text: .text%__1cKimmI13OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cENodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cEDict2F6kMpkv_pv_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cENodeIdestruct6M_v_; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_sparc_misc.o; +text: .text%__1cMCreateExNodeGOpcode6kM_i_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: cfgnode.o; +text: .text%__1cIBoolNodeEhash6kM_I_; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: classes.o; +text: .text%__1cKNode_ArrayFclear6M_v_; +text: .text%__1cObranchConPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIProjNodeHsize_of6kM_I_; +text: .text%__1cKis_x2logic6FpnIPhaseGVN_pnENode__3_: cfgnode.o; +text: .text%__1cHAbsNodeLis_absolute6FpnIPhaseGVN_pnENode__4_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGBitMapGat_put6MIi_v_; +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cLPhaseValuesGintcon6Mi_pnIConINode__; +text: .text%__1cJloadBNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: multnode.o; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cKciTypeFlowLStateVectorSapply_one_bytecode6MpnQciByteCodeStream__i_; +text: .text%__1cHhashptr6Fpkv_i_; +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: classes.o; +text: .text%__1cGOopMapJset_value6MnHOptoRegEName_ii_v_; +text: .text%__1cHhashkey6Fpkv_i_; +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_; +text: .text%__1cIJVMStateJdebug_end6kM_I_; +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: classes.o; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cLBoxLockNodeNrematerialize6kM_i_: classes.o; +text: .text%__1cKBranchDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cJloadPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: callnode.o; +text: .text%__1cJTypeTupleCeq6kMpknEType__i_; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cSaddP_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: ad_sparc.o; +text: .text%__1cNSafePointNodeHsize_of6kM_I_; +text: .text%__1cObranchConPNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_; +text: .text%__1cNbranchConNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENode2t6Mp011_v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cHCompilePfind_alias_type6MpknHTypePtr_i_pn0AJAliasType__; +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_; +text: .text%__1cKbranchNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPSPromotionLABFflush6M_v_; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cOcompU_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cKbranchNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: ad_sparc_misc.o; +text: .text%__1cJloadPNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: cfgnode.o; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cHMemNodeMIdeal_common6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cILoadNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeGis_Con6kM_I_: subnode.o; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cJloadINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cETypeFempty6kM_i_; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cMMachCallNodeLis_MachCall6M_p0_: ad_sparc_misc.o; +text: .text%__1cIMachNodeHis_Mach6M_p0_: machnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: connode.o; +text: .text%__1cITypeLongEhash6kM_i_; +text: .text%__1cNSafePointNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cJiRegLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: multnode.o; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cMPhaseIterGVNbGregister_new_node_with_optimizer6MpnENode__2_; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cOloadConI13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKStoreINodeGOpcode6kM_i_; +text: .text%__1cJcmpOpOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cOno_flip_branch6FpnFBlock__i_: block.o; +text: .text%__1cMloadConINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJiRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKstorePNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cIAddPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQaddP_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cSaddI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMflagsRegOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLCounterDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cHRegMaskMClearToPairs6M_v_; +text: .text%__1cJiRegLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cRshlI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZPhaseConservativeCoalesceJcopy_copy6MpnENode_2pnFBlock_I_i_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: lcm.o; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_; +text: .text%__1cNflagsRegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKcmpOpPOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cMloadConINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cFArenaEgrow6MI_pv_; +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_; +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__; +text: .text%__1cKTypeRawPtrJsingleton6kM_i_; +text: .text%__1cGIfNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMemNodeGis_Mem6M_p0_: classes.o; +text: .text%__1cSCallStaticJavaNodeRis_CallStaticJava6kM_pk0_: callnode.o; +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__: subnode.o; +text: .text%__1cENodeHis_Goto6kM_I_: classes.o; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cQaddP_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_; +text: .text%__1cKStorePNodeGOpcode6kM_i_; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cNflagsRegUOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cQciByteCodeStreamMreset_to_bci6Mi_v_; +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: block.o; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cMMergeMemNodeLis_MergeMem6M_p0_: memnode.o; +text: .text%__1cFBlockOschedule_local6MrnHMatcher_rnLBlock_Array_pirnJVectorSet_rnNGrowableArray4CI___i_; +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_; +text: .text%__1cObranchConUNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cNSafePointNodebBneeds_polling_address_input6F_i_; +text: .text%__1cKRegionNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOcompI_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: callnode.o; +text: .text%__1cMURShiftINodeGOpcode6kM_i_; +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_; +text: .text%__1cENodeGis_CFG6kM_i_: memnode.o; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cIRootNodeGOpcode6kM_i_; +text: .text%__1cOloadConI13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_; +text: .text%__1cRMachSafePointNodeQis_MachSafePoint6M_p0_: ad_sparc_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse2.o; +text: .text%__1cPcheckCastPPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: classes.o; +text: .text%__1cISubINodeGOpcode6kM_i_; +text: .text%__1cNbranchConNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframeZsender_with_pc_adjustment6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_; +text: .text%__1cJTypeTupleGfields6FI_ppknEType__; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cILoadNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeGpinned6kM_i_: subnode.o; +text: .text%__1cKbranchNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__: cfgnode.o; +text: .text%__1cHAddNodeEhash6kM_I_; +text: .text%__1cMPhaseIterGVNFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cNbranchConNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSPSPromotionManagerMdrain_stacks6M_v_; +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_; +text: .text%__1cLis_cond_add6FpnIPhaseGVN_pnHPhiNode__pnENode__; +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: cfgnode.o; +text: .text%__1cNbranchConNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cSaddI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompU_iRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cHConNodeEhash6kM_I_; +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o; +text: .text%__1cILoadNodeHis_Load6M_p0_: classes.o; +text: .text%__1cYCallStaticJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: subnode.o; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: split_if.o; +text: .text%__1cITypeNodeHsize_of6kM_I_; +text: .text%__1cVcompP_iRegP_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: parse1.o; +text: .text%__1cENodeGpinned6kM_i_: memnode.o; +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__: callnode.o; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: methodDataOop.o; +text: .text%__1cLProfileDataPfollow_contents6M_v_: methodDataOop.o; +text: .text%__1cQaddP_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStoreNodeIis_Store6kM_pk0_: classes.o; +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstorePNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassModifiers; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_; +text: .text%JVM_GetClassAccessFlags; +text: .text%__1cKbranchNodeHis_Goto6kM_I_: ad_sparc_misc.o; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o; +text: .text%__1cHTypeAryEhash6kM_i_; +text: .text%__1cTremove_useless_bool6FpnGIfNode_pnIPhaseGVN__pnENode__: ifnode.o; +text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_; +text: .text%__1cJloadINodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: callnode.o; +text: .text%__1cIBoolNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: multnode.o; +text: .text%__1cIMachOperNconstant_disp6kM_i_; +text: .text%__1cIMachOperFscale6kM_i_; +text: .text%__1cENode2t6Mp0111_v_; +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_; +text: .text%__1cNCompileBrokerLmaybe_block6F_v_; +text: .text%__1cFBlockOcode_alignment6M_I_; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: subnode.o; +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cMMergeMemNodeEhash6kM_I_; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cKSchedulingbFComputeRegisterAntidependencies6MpnFBlock__v_; +text: .text%__1cKSchedulingPComputeUseCount6MpknFBlock__v_; +text: .text%__1cITypeNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cHTypePtrHget_con6kM_i_; +text: .text%__1cUcompI_iReg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_sparc.o; +text: .text%__1cIJumpDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc.o; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc.o; +text: .text%__1cENodeJis_Branch6kM_I_: ad_sparc.o; +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cVcompP_iRegP_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cHSubNodeGis_Sub6M_p0_: classes.o; +text: .text%__1cNPhaseRegAllocGis_oop6kMpknENode__i_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cQaddI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConUNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: multnode.o; +text: .text%__1cUParallelScavengeHeapVunsafe_max_tlab_alloc6kM_I_; +text: .text%__1cFBlockJfind_node6kMpknENode__I_; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cHTypePtrEhash6kM_i_; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_sparc.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_; +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6MI_pnIHeapWord__; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cFBlockLfind_remove6MpknENode__v_; +text: .text%__1cIIndexSetJlrg_union6MIIkIpknIPhaseIFG_rknHRegMask__I_; +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cUcompI_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cIimmPOperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cLMachNopNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%__1cRlock_ptr_RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPVirtualCallDataPadjust_pointers6M_v_; +text: .text%__1cPVirtualCallDataPfollow_contents6M_v_; +text: .text%__1cIJVMStateNclone_shallow6kM_p0_; +text: .text%__1cENodeKreplace_by6Mp0_v_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRMachSpillCopyNodeOimplementation6kMpnKCodeBuffer_pnNPhaseRegAlloc_i_I_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: classes.o; +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJStoreNodeEhash6kM_I_; +text: .text%__1cHMatcherQis_save_on_entry6Mi_i_; +text: .text%__1cSaddP_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQaddI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKTypeOopPtrWmake_from_klass_common6FpnHciKlass_ii_pk0_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: machnode.o; +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__; +text: .text%__1cMMergeMemNodeQclone_all_memory6FpnENode__p0_; +text: .text%__1cOcompU_iRegNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntFempty6kM_i_; +text: .text%__1cKbranchNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLPhaseValuesFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cIMachOperIconstant6kM_i_; +text: .text%__1cNCatchProjNodeMis_CatchProj6kM_pk0_: cfgnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: subnode.o; +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_: spaceCounters.o; +text: .text%__1cQaddI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cOcompU_iRegNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_; +text: .text%__1cRPSOldPromotionLABFflush6M_v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cPcompP_iRegPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferRtransform_address6kMrk0pC_3_; +text: .text%__1cLBoxLockNodeGOpcode6kM_i_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cKTypeRawPtrEhash6kM_i_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: subnode.o; +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_: subnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: memnode.o; +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_; +text: .text%__1cOcompI_iRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cNPhaseRegAllocKreg2offset6kMnHOptoRegEName__i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: multnode.o; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cICallNodeLis_CallLeaf6kM_pknMCallLeafNode__: callnode.o; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cMMergeMemNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGBitMapOset_difference6M0_v_; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: callnode.o; +text: .text%__1cMPhaseChaitinJsplit_USE6MpnENode_pnFBlock_2IIiinNGrowableArray4CI__i_I_; +text: .text%__1cENodeGis_Con6kM_I_: cfgnode.o; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cRshlI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: multnode.o; +text: .text%JVM_CurrentThread; +text: .text%__1cENodeHget_ptr6kM_i_; +text: .text%__1cQciByteCodeStreamFEOBCs6M_nJBytecodesECode__; +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAndINodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cENodeHins_req6MIp0_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBuffer.o; +text: .text%__1cSaddI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_; +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: codeBuffer.o; +text: .text%__1cPBoundRelocationMupdate_addrs6MpCrknKCodeBuffer_4_1_; +text: .text%__1cKstoreINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_; +text: .text%__1cLOptoRuntimeFnew_C6FpnMklassOopDesc_pnKJavaThread__v_; +text: .text%__1cITypeNodeDcmp6kMrknENode__I_; +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: connode.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: gcm.o; +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNflagsRegUOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLstoreI0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cIMachOperOindex_position6kM_i_; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cXmembar_release_lockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJVectorSet2L6MI_rnDSet__; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cOcompU_iRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__; +text: .text%__1cSaddP_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPindOffset13OperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperFscale6kM_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cUcompI_iReg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVcompP_iRegP_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddP_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddP_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: classes.o; +text: .text%__1cIJVMStateIof_depth6kMi_p0_; +text: .text%__1cNSharedRuntimeElrem6Fxx_x_; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_; +text: .text%__1cRcmpFastUnlockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo0RegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cSaddI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObranchConUNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJVectorSet2t6MpnFArena__v_; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: loopnode.o; +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_; +text: .text%__1cVcompP_iRegP_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o; +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cKRelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: postaloc.o; +text: .text%__1cOMethodLivenessKBasicBlockWcompute_gen_kill_range6MpnQciByteCodeStream__v_; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cObranchConUNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJloadPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cITypeLongJsingleton6kM_i_; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cObranchConUNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_; +text: .text%__1cLstoreI0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cQaddI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENode2t6Mp01_v_; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cHTypeAryRary_must_be_exact6kM_i_; +text: .text%__1cRshrI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_; +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJCatchNodeIis_Catch6kM_pk0_: classes.o; +text: .text%__1cIGraphKitEstop6M_v_; +text: .text%__1cOcompI_iRegNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPhaseCCPFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cKCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPcompP_iRegPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRMachNullCheckNodeQis_MachNullCheck6M_p0_: machnode.o; +text: .text%__1cITypeFuncEhash6kM_i_; +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: machnode.o; +text: .text%__1cMTypeKlassPtrEhash6kM_i_; +text: .text%__1cMCallLeafNodeGOpcode6kM_i_; +text: .text%__1cENodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cOcompI_iRegNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_; +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__; +text: .text%__1cOcompU_iRegNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: connode.o; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cJiRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNflagsRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJStartNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cHOrINodeGOpcode6kM_i_; +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMflagsRegOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: callnode.o; +text: .text%__1cKSchedulingQNodeFitsInBundle6MpnENode__i_; +text: .text%__1cLProfileDataPfollow_contents6M_v_: ciMethodData.o; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: ciMethodData.o; +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_; +text: .text%__1cOMachReturnNodeNis_MachReturn6M_p0_: ad_sparc_misc.o; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cIGraphKitLclean_stack6Mi_v_; +text: .text%__1cKStoreBNodeGOpcode6kM_i_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cVcompP_iRegP_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cITypeFuncCeq6kMpknEType__i_; +text: .text%__1cUcompI_iReg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeFclone6kM_pnENode__; +text: .text%__1cMUniverseOperFclone6kM_pnIMachOper__; +text: .text%__1cJlabelOperFclone6kM_pnIMachOper__; +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__: ad_sparc.o; +text: .text%__1cICallNodeHis_Call6M_p0_: classes.o; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_sparc.o; +text: .text%__1cRshlI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o; +text: .text%__1cOPhaseIdealLoopIsplit_up6MpnENode_22_i_; +text: .text%__1cLCounterDataOis_CounterData6M_i_: ciMethodData.o; +text: .text%__1cJStartNodeGpinned6kM_i_: classes.o; +text: .text%__1cHAddNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOis_range_check6FpnENode_r12ri_i_: ifnode.o; +text: .text%JVM_IsNaN; +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockPget_liveness_at6MpnIciMethod_i_nGBitMap__; +text: .text%__1cIciMethodPliveness_at_bci6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessPget_liveness_at6Mi_nGBitMap__; +text: .text%__1cQregF_to_stkINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: memnode.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: subnode.o; +text: .text%__1cENodeDcmp6kMrk0_I_; +text: .text%__1cFParseKensure_phi6Mii_pnHPhiNode__; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cHTypeIntFxdual6kM_pknEType__; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cNSafePointNode2t6MIpnIJVMState__v_; +text: .text%__1cHTypePtrJsingleton6kM_i_; +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o; +text: .text%__1cIGraphKitObasic_plus_adr6MpnENode_2i_2_; +text: .text%__1cJAssemblerOpatched_branch6Fiii_i_; +text: .text%__1cJAssemblerSbranch_destination6Fii_i_; +text: .text%__1cRshlI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cENodeIadd_prec6Mp0_v_; +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cSaddP_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_; +text: .text%__1cMloadConDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cKTypeOopPtrHget_con6kM_i_; +text: .text%__1cQsubI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLPhaseValuesHmakecon6MpknEType__pnHConNode__; +text: .text%__1cJloadLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cJLoadBNodeGOpcode6kM_i_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: subnode.o; +text: .text%__1cLOptoRuntimebCcomplete_monitor_unlocking_C6FpnHoopDesc_pnJBasicLock__v_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_locking_C6FpnHoopDesc_pnJBasicLock_pnKJavaThread__v_; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cLRegisterMapLpd_location6kMnFVMRegEName__pC_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cIAddINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cMMergeMemNode2t6MpnENode__v_; +text: .text%__1cOcompI_iRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cQsubI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPindOffset13OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: machnode.o; +text: .text%__1cPindOffset13OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cIAddINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitQkill_dead_locals6M_v_; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cHRegMaskPfind_first_pair6kM_nHOptoRegEName__; +text: .text%__1cMloadConLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cWShouldNotReachHereNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cRlock_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVcompP_iRegP_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUcompI_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cNloadRangeNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNSafePointNodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_sparc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: memnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: memnode.o; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cPcompP_iRegPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshlI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cIHaltNodeGpinned6kM_i_: classes.o; +text: .text%__1cMloadConPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: ad_sparc.o; +text: .text%__1cIGraphKit2t6MpnIJVMState__v_; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: cfgnode.o; +text: .text%__1cPsp_ptr_RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPconvI2L_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cQPreserveJVMState2T6M_v_; +text: .text%__1cQPreserveJVMState2t6MpnIGraphKit_i_v_; +text: .text%__1cIGraphKitRnull_check_common6MpnENode_nJBasicType_i_2_; +text: .text%__1cPcompP_iRegPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMWarmCallInfoHis_cold6kM_i_; +text: .text%__1cLCastP2INodeGOpcode6kM_i_; +text: .text%__1cRshrI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMloadConLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Copy6kM_I_: machnode.o; +text: .text%__1cFMutexNowned_by_self6kM_i_; +text: .text%__1cLConvI2LNodeGOpcode6kM_i_; +text: .text%__1cITypeLongFxmeet6kMpknEType__3_; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cSaddI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowNmake_range_at6Mi_pn0AFRange__; +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: codeBlob.o; +text: .text%__1cENodeHis_Type6M_pnITypeNode__: classes.o; +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_; +text: .text%__1cENodeQlatency_from_use6kMrnLBlock_Array_rnNGrowableArray4CI__pk0p0_i_; +text: .text%__1cLBoxLockNodeHsize_of6kM_I_; +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_; +text: .text%__1cJStoreNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHTypeAryCeq6kMpknEType__i_; +text: .text%__1cJStartNodeIis_Start6M_p0_: callnode.o; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%__1cHCompileKTracePhase2t6MpkcpnMelapsedTimer_i_v_; +text: .text%__1cMPhaseIterGVNHmakecon6MpknEType__pnHConNode__; +text: .text%__1cSaddI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_; +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJLoadCNodeGOpcode6kM_i_; +text: .text%__1cMTypeKlassPtrCeq6kMpknEType__i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: memnode.o; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: node.o; +text: .text%__1cQciByteCodeStreamJget_field6Mri_pnHciField__; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cOcompI_iRegNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: graphKit.o; +text: .text%__1cRshlI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cTCreateExceptionNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cGBitMapVset_union_with_result6M0_i_; +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_; +text: .text%__1cLRShiftINodeGOpcode6kM_i_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: memnode.o; +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: callnode.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cJloadSNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKStoreCNodeGOpcode6kM_i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: subnode.o; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cMstringStreamFwrite6MpkcI_v_; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cHRetNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cIBoolNodeJideal_reg6kM_I_: subnode.o; +text: .text%__1cHCmpNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRcmpFastUnlockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cETypeFwiden6kMpk0_2_: type.o; +text: .text%__1cRcmpFastUnlockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cILoadNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cLstoreI0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cQciByteCodeStreamKget_method6Mri_pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cNSafePointNodeGpinned6kM_i_: classes.o; +text: .text%__1cPcompP_iRegPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObranchConPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__: cfgnode.o; +text: .text%__1cNCatchProjNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_; +text: .text%__1cIciMethodbCinterpreter_invocation_count6M_i_; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: subnode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: subnode.o; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cKCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cKciTypeFlowFRangeNget_block_for6Mpn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cQsubI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cXmembar_acquire_lockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddP_reg_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: subnode.o; +text: .text%__1cIAndLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_; +text: .text%__1cENodeHis_Goto6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_; +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cRshrI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOkill_dead_code6FpnENode_pnMPhaseIterGVN__i_: node.o; +text: .text%__1cMPrefetchNodeGOpcode6kM_i_; +text: .text%__1cCosGmalloc6FI_pv_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_; +text: .text%__1cIimmPOperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: cfgnode.o; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cIregDOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICodeHeapLheader_size6F_I_; +text: .text%__1cQsubI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: connode.o; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_sparc_misc.o; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cKciTypeFlowLStateVectorEmeet6Mpk1_i_; +text: .text%__1cNbranchConNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseMdo_one_block6M_v_; +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_; +text: .text%__1cLstoreB0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: callnode.o; +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIJVMStateKclone_deep6kM_p0_; +text: .text%__1cIJVMStateLdebug_depth6kM_I_; +text: .text%__1cENodeNadd_req_batch6Mp0I_v_; +text: .text%__1cIGraphKitTadd_safepoint_edges6MpnNSafePointNode_i_v_; +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_; +text: .text%__1cJloadFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: node.o; +text: .text%__1cMMachCallNodeHis_Call6M_pnICallNode__: ad_sparc_misc.o; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__; +text: .text%__1cNloadRangeNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindirectOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cHMatcherScalling_convention6FpnLRegPair_Ii_v_; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_; +text: .text%__1cIAddLNodeGOpcode6kM_i_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_; +text: .text%__1cRMachSafePointNode2t6M_v_; +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__; +text: .text%__1cMFastLockNodeGOpcode6kM_i_; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cLConvL2INodeGOpcode6kM_i_; +text: .text%__1cIXorINodeGOpcode6kM_i_; +text: .text%__1cICallNodeOis_CallRuntime6kM_pknPCallRuntimeNode__: callnode.o; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_; +text: .text%__1cJloadCNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cXinsert_anti_dependences6FrpnFBlock_pnENode_rnLBlock_Array__i_: gcm.o; +text: .text%__1cPorI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompU_iRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__; +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__; +text: .text%__1cOPhaseIdealLoopHdom_lca6kMpnENode_2_2_; +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__; +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHMatcherPc_frame_pointer6kM_nHOptoRegEName__; +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_; +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMMachCallNode2t6M_v_; +text: .text%__1cILoadNodeHsize_of6kM_I_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: methodLiveness.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cICmpPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__; +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cNprefetch2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cIciMethodRhas_compiled_code6M_i_; +text: .text%__1cPcompP_iRegPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPsp_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cLOptoRuntimePnew_typeArray_C6FnJBasicType_ipnKJavaThread__v_; +text: .text%__1cRshrP_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cITypeLongEmake6Fxxi_pk0_; +text: .text%__1cRloadConP_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%__1cMtlsLoadPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreB0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIimmIOperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cLstoreI0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: cfgnode.o; +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; +text: .text%__1cHBitDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cQsubI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: ciTypeFlow.o; +text: .text%__1cKReturnNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cQaddP_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKmethodOperGmethod6kM_i_: ad_sparc.o; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cENodeHis_Goto6kM_I_: cfgnode.o; +text: .text%__1cICmpINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPemit_call_reloc6FrnKCodeBuffer_inJrelocInfoJrelocType_iii_v_; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cIMulLNodeGOpcode6kM_i_; +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: callnode.o; +text: .text%__1cILoopNodeHis_Loop6M_p0_: classes.o; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cNloadConP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJLoadSNodeGOpcode6kM_i_; +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__; +text: .text%__1cKBranchDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cRloadConP_pollNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: connode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: connode.o; +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadLNodeGOpcode6kM_i_; +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_; +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cSaddI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKReturnNodeGOpcode6kM_i_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: callnode.o; +text: .text%__1cNflagsRegUOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIGraphKitbLset_predefined_input_for_runtime_call6MpnNSafePointNode__v_; +text: .text%__1cRshlI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cNCatchProjNodeEhash6kM_I_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cHCompileZintrinsic_insertion_index6MpnIciMethod_i_i_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_; +text: .text%__1cRMachSafePointNodePis_MachCallLeaf6M_pnQMachCallLeafNode__: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeLset_oop_map6MpnGOopMap__v_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_; +text: .text%__1cRMachSafePointNodeSis_MachCallRuntime6M_pnTMachCallRuntimeNode__: ad_sparc_misc.o; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cNMachIdealNodePoper_input_base6kM_I_: machnode.o; +text: .text%__1cSCallLeafDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHRegMaskQis_aligned_Pairs6kM_i_; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cICallNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKstorePNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_; +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_; +text: .text%__1cKciTypeFlowFBlockKsuccessors6MpnQciByteCodeStream_pn0ALStateVector_pn0AGJsrSet__pnNGrowableArray4Cp1___; +text: .text%__1cRMachSafePointNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cJVectorSetFClear6M_v_; +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_; +text: .text%__1cMCallJavaNodeLis_CallJava6kM_pk0_: callnode.o; +text: .text%__1cQMachCallJavaNodePis_MachCallJava6M_p0_: ad_sparc_misc.o; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cICallNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cKTypeOopPtrFempty6kM_i_; +text: .text%__1cKciTypeFlowFBlock2t6Mp0pn0AFRange_pn0AGJsrSet__v_; +text: .text%__1cRshrI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_; +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_; +text: .text%__1cIPhaseIFGFUnion6MII_v_; +text: .text%__1cLstoreB0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWMachCallStaticJavaNodeVis_MachCallStaticJava6M_p0_: ad_sparc_misc.o; +text: .text%__1cILoopNodeGOpcode6kM_i_; +text: .text%__1cRMachSafePointNodeWis_MachCallInterpreter6M_pnXMachCallInterpreterNode__: ad_sparc_misc.o; +text: .text%__1cICmpLNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_; +text: .text%__1cQaddI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindIndexOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIConLNodeGOpcode6kM_i_; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cJloadCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cPCountedLoopNodeOis_CountedLoop6M_p0_: classes.o; +text: .text%__1cENodeLnonnull_req6kM_p0_; +text: .text%__1cGciTypeMis_classless6kM_i_: ciType.o; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: connode.o; +text: .text%__1cHnmethodZsize_of_exception_handler6F_i_; +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cQandL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSaddP_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cObranchConPNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_; +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_; +text: .text%__1cOMethodLivenessNmake_block_at6Mipn0AKBasicBlock__2_; +text: .text%__1cPorI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cIGraphKit2t6M_v_; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_; +text: .text%__1cRInterpretedRFrameEinit6M_v_; +text: .text%__1cHMulNodeEhash6kM_I_; +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cJLoadINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cINodeHashLhash_insert6MpnENode__v_; +text: .text%__1cHTypeIntEmake6Fii_pk0_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: ad_sparc.o; +text: .text%__1cKstoreCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cNSafePointNodeEhash6kM_I_: callnode.o; +text: .text%__1cENodeLbottom_type6kM_pknEType__; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMCreateExNodeGpinned6kM_i_: classes.o; +text: .text%__1cIAddPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cFParsePdo_field_access6Mii_v_; +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_; +text: .text%__1cLRegisterMap2t6Mpk0_v_; +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQmark_inner_loops6FpnIPhaseCFG_pnFBlock__v_: block.o; +text: .text%__1cILoadNodeEmake6FpnENode_22pknHTypePtr_pknEType_nJBasicType__p0_; +text: .text%__1cICallNodeSis_CallDynamicJava6kM_pknTCallDynamicJavaNode__: callnode.o; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cNmethodOopDescWwas_executed_more_than6kMi_i_; +text: .text%__1cRshrI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompI_iRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cPorI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLPhaseValuesHzerocon6MnJBasicType__pnHConNode__; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__; +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_; +text: .text%__1cPconvI2L_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__; +text: .text%__1cWCallLeafNoFPDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKbranchNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKbranchNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: loopnode.o; +text: .text%__1cOloadConI13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetObjectField: jni.o; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: methodDataOop.o; +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cQandL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cQaddL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cJloadBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: machnode.o; +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cYinlineCallClearArrayNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJmake_load6MpnENode_2pknEType_nJBasicType_i_2_; +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_; +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_; +text: .text%__1cENodeGis_Con6kM_I_: callnode.o; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cIGraphKitNuncommon_trap6MipnHciKlass_pkci_v_; +text: .text%__1cHCompileKTracePhase2T6M_v_; +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cIHaltNode2t6MpnENode_2_v_; +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__; +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowGJsrSetNapply_control6Mp0pnQciByteCodeStream_pn0ALStateVector__v_; +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cINodeHashJhash_find6MpknENode__p1_; +text: .text%__1cQmulL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_; +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_; +text: .text%__1cQciByteCodeStreamZget_declared_field_holder6M_pnPciInstanceKlass__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: classes.o; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_; +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_; +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConUNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cMWarmCallInfoGis_hot6kM_i_; +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cSCompareAndSwapNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cLRethrowNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cTmembar_CPUOrderNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmpFastLockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTCreateExceptionNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowLStateVectorJdo_invoke6MpnQciByteCodeStream_i_v_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: connode.o; +text: .text%__1cQmulL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcmpFastLockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreB0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadBNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cFTypeDCeq6kMpknEType__i_; +text: .text%__1cITypeLongEmake6Fx_pk0_; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: memnode.o; +text: .text%__1cMPhaseChaitinTsplit_Rematerialize6MpnENode_pnFBlock_IrInNGrowableArray4CI__ipIp2i_2_; +text: .text%__1cKimmI13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_; +text: .text%__1cQandL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseKdo_get_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNCatchProjNode2t6MpnENode_Ii_v_; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: cfgnode.o; +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_; +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cLPCTableNodeHsize_of6kM_I_: classes.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: cfgnode.o; +text: .text%__1cLPCTableNodeKis_PCTable6kM_pk0_: classes.o; +text: .text%__1cNciCallProfileRapply_prof_factor6Mf_v_; +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__; +text: .text%__1cHCompileOcall_generator6MpnIciMethod_ipnIJVMState_if_pnNCallGenerator__; +text: .text%__1cHCompileOfind_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cIProjNodeDcmp6kMrknENode__I_; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cLLShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFParseMprofile_call6MpnENode__v_; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_; +text: .text%__1cQciByteCodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_; +text: .text%__1cFParseHdo_call6M_v_; +text: .text%__1cNloadConP0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIregFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_; +text: .text%__1cHTypeIntFwiden6kMpknEType__3_; +text: .text%__1cQxorI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cIciMethodLscale_count6Mi_i_; +text: .text%__1cKMemBarNodeEhash6kM_I_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cMURShiftLNodeGOpcode6kM_i_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__; +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_: ciMethodData.o; +text: .text%__1cLstoreI0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKBranchDataNis_BranchData6M_i_: ciMethodData.o; +text: .text%__1cKRegionNodeGpinned6kM_i_: loopnode.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: memnode.o; +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_; +text: .text%__1cNloadRangeNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQxorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIJumpDataLis_JumpData6M_i_: ciMethodData.o; +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNflagsRegLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cQsubI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_release_lockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: memnode.o; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cRshrI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: loopnode.o; +text: .text%__1cTcan_branch_register6FpnENode_1_i_; +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_; +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKstoreCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHConNodeEmake6FpknEType__p0_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: memnode.o; +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_; +text: .text%__1cRshrP_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cISubINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cIBoolNodeHsize_of6kM_I_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: callnode.o; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cPcompP_iRegPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvI2D_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSsafePoint_pollNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cOstackSlotLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIciMethodbAinterpreter_throwout_count6kM_i_; +text: .text%__1cOCompilerOracleNshould_inline6FnMmethodHandle__i_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_; +text: .text%__1cKInlineTreeWfind_subtree_from_root6Fp0pnIJVMState_pnIciMethod_i_1_; +text: .text%__1cIciMethodNshould_inline6M_i_; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cICodeHeapSallocated_capacity6kM_I_; +text: .text%__1cSstkL_to_regD_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescbHhas_unloaded_classes_in_signature6FnMmethodHandle_pnGThread__i_; +text: .text%__1cTmembar_CPUOrderNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: connode.o; +text: .text%__1cNprefetch2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICHeapObj2n6FI_pv_; +text: .text%__1cQaddI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStartNodeIis_Start6M_p0_: classes.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cSstkL_to_regD_2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeDEhash6kM_i_; +text: .text%__1cKTypeRawPtrHget_con6kM_i_; +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cPconvI2L_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_; +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGIfNodeHsize_of6kM_I_: classes.o; +text: .text%__1cPconvL2I_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIimmLOperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cHCompileFstart6kM_pnJStartNode__; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cIParmNodeJideal_reg6kM_I_; +text: .text%__1cQandL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeRget_base_and_disp6kMrirpknHTypePtr__pknENode__; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cHCompilebAallow_range_check_smearing6kM_i_; +text: .text%__1cRbranchLoopEndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_kp0_v_; +text: .text%__1cIciMethodWwas_executed_more_than6Mi_i_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: machnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: machnode.o; +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_; +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOClearArrayNodeGOpcode6kM_i_; +text: .text%__1cWCallLeafNoFPDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cPcheckCastPPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_Write; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cIHaltNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cLOpaque1NodeGOpcode6kM_i_; +text: .text%__1cQmulL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cSbranchCon_longNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_; +text: .text%__1cUcompU_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%__1cKg1RegIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopnode.o; +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cPClassFileParserUskip_over_field_name6MpciI_1_; +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cIGraphKitNcast_not_null6MpnENode__2_; +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitTtoo_many_recompiles6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cIGraphKitOtoo_many_traps6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSandI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cFParseRensure_memory_phi6Mii_pnHPhiNode__; +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cFParseFmerge6Mi_v_; +text: .text%__1cFParseUprofile_taken_branch6Mi_v_; +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cYcompareAndSwapL_boolNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cILoopNodeHis_Loop6M_p0_: loopnode.o; +text: .text%__1cNCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cQxorI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJTypeTupleFxdual6kM_pknEType__; +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeGOpcode6kM_i_; +text: .text%__1cYinlineCallClearArrayNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYinlineCallClearArrayNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeHeapIcapacity6kM_I_; +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cPcmpFastLockNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cNloadKlassNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cHoopDescSslow_identity_hash6M_i_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_; +text: .text%__1cJloadCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIimmPOperPconstant_is_oop6kM_i_: ad_sparc_clone.o; +text: .text%__1cLPCTableNodeEhash6kM_I_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_; +text: .text%__1cOloadConI13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMtlsLoadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeGis_Con6kM_I_: multnode.o; +text: .text%__1cQandI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_; +text: .text%__1cITypeLongEmake6Fxx_pk0_; +text: .text%__1cMindIndexOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__; +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__; +text: .text%__1cNGCTaskManagerYshould_release_resources6MI_i_; +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_; +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_; +text: .text%__1cITypeLongFempty6kM_i_; +text: .text%__1cJloadBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__; +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cSmembar_acquireNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: ad_sparc.o; +text: .text%__1cIMulINodeGOpcode6kM_i_; +text: .text%__1cKInlineTreePshouldNotInline6kMpnIciMethod_pnMWarmCallInfo__pkc_; +text: .text%__1cRcompL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciMethodbHhas_unloaded_classes_in_signature6M_i_; +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGGCTask2t6M_v_; +text: .text%__1cJloadSNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: classes.o; +text: .text%__1cIJumpDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cObranchConUNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cITypeFuncFxdual6kM_pknEType__; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cKstoreINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKMemBarNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_; +text: .text%__1cJcmpOpOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cKTypeRawPtrCeq6kMpknEType__i_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFParseRoptimize_inlining6MpnIciMethod_ipnPciInstanceKlass_24irnKInlineTreeLInlineStyle_r2_v_; +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_; +text: .text%__1cQxorI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIregFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cKcmpOpPOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_; +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_: connode.o; +text: .text%__1cObranchConPNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cObranchConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cSaddL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_; +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cJloadCNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cISubINodeDsub6kMpknEType_3_3_; +text: .text%__1cFParseOreturn_current6MpnENode__v_; +text: .text%__1cRsarI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitPstore_to_memory6MpnENode_22nJBasicType_i_2_; +text: .text%__1cJStoreNodeEmake6FpnENode_22pknHTypePtr_2nJBasicType__p0_; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cLBoxLockNodeKis_BoxLock6kM_pk0_: classes.o; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cLBoxLockNodeKstack_slot6FpnENode__nHOptoRegEName__; +text: .text%__1cMloadConLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__; +text: .text%__1cHMatcherPstack_alignment6F_I_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%__1cPconvI2L_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeIget_long6kM_x_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cSmembar_releaseNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJimmU5OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cLOpaque1NodeEhash6kM_I_; +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSbranchCon_longNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRshrI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cKciTypeFlowGJsrSetSis_compatible_with6Mp1_i_; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cZCallDynamicJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKStoreLNodeGOpcode6kM_i_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cETypeCeq6kMpk0_i_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cFParseRbranch_prediction6Mrf_f_; +text: .text%__1cRsarI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cHTypeAryFempty6kM_i_; +text: .text%__1cKTypeAryPtrFempty6kM_i_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodDataKlass.o; +text: .text%__1cOMacroAssemblerFjumpl6MrnHAddress_pnMRegisterImpl_ipkci_v_; +text: .text%__1cOMacroAssemblerEjump6MrnHAddress_ipkci_v_; +text: .text%__1cIciMethodLis_accessor6kM_i_; +text: .text%__1cRbranchLoopEndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cQmulL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cJiRegIOperFclone6kM_pnIMachOper__; +text: .text%__1cLstoreP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_; +text: .text%__1cFTypeFEhash6kM_i_; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__; +text: .text%__1cFArenaEused6kM_I_; +text: .text%__1cFParseLbuild_exits6M_v_; +text: .text%__1cFParseIdo_exits6M_v_; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_; +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_; +text: .text%__1cQsubI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParsePdo_method_entry6M_v_; +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_; +text: .text%jni_IsSameObject: jni.o; +text: .text%__1cMloadConINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNbranchConNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cNbranchConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQandL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQaddL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cJStartNodeOis_block_start6kM_i_: callnode.o; +text: .text%__1cRsarI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObjectFklass6M_pnHciKlass__; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_; +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_; +text: .text%__1cENodeRlatency_from_uses6kMrnLBlock_Array_rnNGrowableArray4CI___i_; +text: .text%__1cPconvL2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_; +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_; +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_; +text: .text%__1cIIndexSetEswap6Mp0_v_; +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_; +text: .text%__1cXmembar_release_lockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cJloadLNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cRshrP_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__; +text: .text%__1cFBlockTimplicit_null_check6MrnLBlock_Array_rnNGrowableArray4CI__pnENode_6_v_; +text: .text%__1cQandI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeNis_glue_frame6kM_i_; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cFParseYprofile_not_taken_branch6M_v_; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cbACallCompiledJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: cfgnode.o; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cKstoreBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQLibraryIntrinsicKis_virtual6kM_i_: library_call.o; +text: .text%__1cMPrefetchNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: callnode.o; +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cENodeGOpcode6kM_i_; +text: .text%__1cRshrP_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQandI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMURShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cNmethodOopDescWload_signature_classes6FnMmethodHandle_pnGThread__i_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_; +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cMloadConFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFChunk2n6FII_pv_; +text: .text%__1cbACallCompiledJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2L_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_; +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cILoadNodeDcmp6kMrknENode__I_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: library_call.o; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: cfgnode.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: cfgnode.o; +text: .text%__1cRcompL_reg_conNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_; +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__; +text: .text%__1cMciMethodData2t6M_v_; +text: .text%__1cLOopRecorderKfind_index6MpnI_jobject__i_; +text: .text%__1cJStartNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_; +text: .text%__1cFframeTis_first_java_frame6kM_i_; +text: .text%__1cGRFrameGcaller6M_p0_; +text: .text%__1cFframeNis_java_frame6kM_i_; +text: .text%__1cNprefetch2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowFBlockPclone_loop_head6Mp0ip1pn0AGJsrSet__3_; +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRshrP_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: loopnode.o; +text: .text%__1cFParseFdo_if6MpnENode_2nIBoolTestEmask_2_v_; +text: .text%__1cLCastP2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSandI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: callnode.o; +text: .text%__1cMPrefetchNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%__1cMloadConFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMCreateExNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cQaddL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMCallLeafNodeLis_CallLeaf6kM_pk0_: classes.o; +text: .text%__1cLPCTableNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_sparc.o; +text: .text%__1cISubINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cQdivD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_; +text: .text%__1cIGraphKitRmake_slow_call_ex6MpnENode_pnPciInstanceKlass__v_; +text: .text%__1cKTypeOopPtrEhash6kM_i_; +text: .text%__1cIMinINodeGOpcode6kM_i_; +text: .text%__1cYinlineCallClearArrayNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNflagsRegLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIConINodeHget_int6kMpi_i_: classes.o; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: psTasks.o; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_; +text: .text%__1cFBlockUhoist_LCA_above_defs6Mp01IrnLBlock_Array__1_; +text: .text%JVM_GetMethodIxModifiers; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: codeBlob.o; +text: .text%__1cVExceptionHandlerTableMadd_subtable6MipnNGrowableArray4Ci__2_v_; +text: .text%__1cPconvI2L_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHMulNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_IsInterface; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cJloadCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInterpretedRFrameOis_interpreted6kM_i_: rframe.o; +text: .text%__1cGRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cPorI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIDivINodeGOpcode6kM_i_; +text: .text%__1cbACallCompiledJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cICodeHeapIallocate6MI_pv_; +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__; +text: .text%__1cRcompL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cSconvI2D_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHMatcherQis_spillable_arg6Fi_i_; +text: .text%__1cLRegisterMapFclear6Mpi_v_; +text: .text%__1cLRShiftLNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_; +text: .text%__1cUPipeline_Use_Element2t6M_v_: output.o; +text: .text%__1cRshrL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: classes.o; +text: .text%__1cFParseSmerge_memory_edges6MpnMMergeMemNode_ii_v_; +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_; +text: .text%__1cNimmP_pollOperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cRloadConP_pollNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSconvI2D_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cIMulINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__; +text: .text%__1cLRegisterMapIpd_clear6M_v_; +text: .text%__1cSstkL_to_regD_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cLstoreP0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMnegF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo0RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cSaddL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_; +text: .text%__1cSstkL_to_regD_2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassWfind_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cPciInstanceKlassTget_field_by_offset6Mii_pnHciField__; +text: .text%__1cRshrP_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cSstkL_to_regD_2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cWstatic_stub_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cSstkL_to_regD_2NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: codeBlob.o; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cKstoreLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cRbranchLoopEndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvI2D_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cSconvI2D_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cUcompU_iReg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitXset_edges_for_java_call6MpnMCallJavaNode_i_v_; +text: .text%__1cOMacroAssemblerNverify_thread6M_v_; +text: .text%__1cJloadLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: methodDataOop.o; +text: .text%__1cSbranchCon_longNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNloadKlassNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cRbranchLoopEndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cYcompareAndSwapL_boolNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cSbranchCon_longNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJimmU5OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRbranchLoopEndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: ad_sparc.o; +text: .text%__1cLBlock_ArrayEgrow6MI_v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: loopnode.o; +text: .text%__1cbCcatch_cleanup_fix_all_inputs6FpnENode_11_v_: lcm.o; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cOMacroAssemblerUallocate_oop_address6MpnI_jobject_pnMRegisterImpl__nHAddress__; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cUParallelScavengeHeapNtlab_capacity6kM_I_; +text: .text%__1cKcmpOpPOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cObranchConPNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cKstoreBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeEhash6kM_I_: loopnode.o; +text: .text%__1cPconvL2I_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cLstoreB0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cbDcatch_cleanup_find_cloned_def6FpnFBlock_pnENode_1rnLBlock_Array_i_3_: lcm.o; +text: .text%__1cQxorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferMstart_a_stub6M_v_; +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferKend_a_stub6M_v_; +text: .text%__1cFTypeFCeq6kMpknEType__i_; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cQciByteCodeStreamMget_constant6M_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_; +text: .text%__1cPconvL2I_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSbranchCon_longNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cOFastUnlockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_; +text: .text%__1cQandL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJiRegPOperFclone6kM_pnIMachOper__; +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__; +text: .text%__1cIregDOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKMemBarNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKMemBarNode2t6M_v_; +text: .text%__1cNIdealLoopTreeObeautify_loops6MpnOPhaseIdealLoop__i_; +text: .text%__1cRsarI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o; +text: .text%__1cKimmI13OperFclone6kM_pnIMachOper__; +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: callGenerator.o; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cNCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cJcmpOpOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cNprefetch2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRcompL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%__1cSandI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKMemBarNodeJis_MemBar6kM_pk0_: classes.o; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cOloadConI13NodeFclone6kM_pnENode__; +text: .text%__1cObranchConUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOinsert_mem_bar6MpnKMemBarNode__v_; +text: .text%__1cLOptoRuntimeOnew_objArray_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cRshlL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%__1cZresource_reallocate_bytes6FpcII_0_; +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_; +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMaxINodeGOpcode6kM_i_; +text: .text%__1cIGraphKitMarray_length6MpnENode__2_; +text: .text%__1cIGraphKitbMset_predefined_output_for_runtime_call6MpnENode_pnMMergeMemNode__v_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_; +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_; +text: .text%__1cMPhaseChaitinQgather_lrg_masks6Mi_v_; +text: .text%__1cIimmDOperJconstantD6kM_d_: ad_sparc_clone.o; +text: .text%__1cIPhaseIFGEinit6MI_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: live.o; +text: .text%__1cJPhaseLiveHcompute6MI_v_; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cSaddI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRcompL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZnoG3_iRegI_64bit_safeOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cFTypeDEmake6Fd_pk0_; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cPThreadRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cRshlI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvL2I_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cQaddL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMatcherMreturn_value6Fii_nLRegPair__; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_; +text: .text%__1cIMachOperEtype6kM_pknEType__; +text: .text%JVM_GetCPClassNameUTF; +text: .text%__1cUcompU_iReg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cITypeNodeHis_Type6M_p0_: classes.o; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cTmembar_CPUOrderNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcmpOpUOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cObranchConUNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cObranchConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_acquireNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%__1cTCallInterpreterNodeGOpcode6kM_i_; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cRshrP_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%__1cJloadPNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cMstringStream2t6MI_v_; +text: .text%__1cIGraphKitMreset_memory6M_pnENode__; +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNprefetch2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeMsetup_is_top6M_v_; +text: .text%__1cIGotoNodeGOpcode6kM_i_; +text: .text%__1cPorI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: callnode.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: callnode.o; +text: .text%__1cHRetNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cHRetNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjectFactory.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciObjectFactory.o; +text: .text%__1cKcmpOpPOperFequal6kM_i_: ad_sparc_clone.o; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSandI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cXmembar_acquire_lockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cKStoreBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cSaddL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_; +text: .text%__1cEDict2T6M_v_; +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIModINodeGOpcode6kM_i_; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cJloadCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeHget_int6kMpi_i_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_; +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__; +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_; +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cKjavaVFrameNis_java_frame6kM_i_: vframe.o; +text: .text%__1cISubLNodeGOpcode6kM_i_; +text: .text%__1cKciTypeFlowXmark_known_range_starts6M_v_; +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cKciTypeFlowLfind_ranges6M_v_; +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__; +text: .text%__1cKciTypeFlowHdo_flow6M_v_; +text: .text%__1cKciTypeFlowKflow_types6M_v_; +text: .text%__1cKciTypeFlowKmap_blocks6M_v_; +text: .text%__1cMloadConPNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cKStoreCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cMciMethodDataJload_data6M_v_; +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__; +text: .text%__1cOcompU_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitGmemory6MI_pnENode__; +text: .text%__1cIHaltNodeEhash6kM_I_: classes.o; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cKReturnNodeEhash6kM_I_: classes.o; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cIAndINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassRoop_is_methodData6kM_i_: methodDataKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: methodDataKlass.o; +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_; +text: .text%__1cIGraphKitOmake_slow_call6MpknITypeFunc_pCpkcpnENode_88_8_; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cLOpaque2NodeGOpcode6kM_i_; +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_; +text: .text%__1cKstoreCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: machnode.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: loopnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: loopnode.o; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cENodeHis_Copy6kM_I_: ad_sparc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: memnode.o; +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cRsarI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUcompU_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUcompU_iReg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%JVM_InternString; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cRcompL_reg_conNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cXmembar_acquire_lockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKimmP13OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cVcompP_iRegP_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeGis_Con6kM_I_: ad_sparc.o; +text: .text%__1cSaddL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRcompL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: machnode.o; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: machnode.o; +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__: machnode.o; +text: .text%__1cRMachNullCheckNode2t6MpnENode_2I_v_; +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_: parse2.o; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: classes.o; +text: .text%__1cSCompareAndSwapNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseNpush_constant6MnKciConstant__i_; +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_; +text: .text%jni_SetIntField: jni.o; +text: .text%__1cENodeGis_Sub6M_pnHSubNode__: classes.o; +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cNIdealLoopTreeTcheck_inner_safepts6MpnOPhaseIdealLoop__v_; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cKBufferBlobEfree6Fp0_v_; +text: .text%__1cIRootNodeHis_Root6M_p0_: classes.o; +text: .text%__1cPconvL2I_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cLCastP2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cVshrL_reg_imm6_L2INodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQmodI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cKTypeRawPtrFempty6kM_i_; +text: .text%__1cLConvI2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIMachNodeSalignment_required6kM_i_: machnode.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: machnode.o; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: machnode.o; +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: ciMethodData.o; +text: .text%__1cQxorI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: assembler_sparc.o; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cOLibraryCallKitOgenerate_guard6MpnENode_pnKRegionNode_f_v_; +text: .text%__1cSandI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreP0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPhaseIFGISquareUp6M_v_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cKCodeBuffer2T6M_v_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cQPSGenerationPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cKNode_Array2t6MpnFArena__v_: reg_split.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: multnode.o; +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPconvI2L_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJLoadBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcompL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cQciByteCodeStreamJget_klass6Mri_pnHciKlass__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: classes.o; +text: .text%__1cQcmovI_reg_ltNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cUCallCompiledJavaNodeGOpcode6kM_i_; +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__; +text: .text%__1cNprefetch2NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKcmpOpPOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cRMachSafePointNodePis_MachCallJava6M_pnQMachCallJavaNode__: ad_sparc_misc.o; +text: .text%__1cMPhaseIterGVNIoptimize6M_v_; +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_; +text: .text%__1cISubINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cHMemNodeHsize_of6kM_I_; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQmodI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKCMoveINodeGOpcode6kM_i_; +text: .text%__1cLLShiftLNodeGOpcode6kM_i_; +text: .text%__1cYcompareAndSwapL_boolNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_; +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_; +text: .text%__1cKcmpOpUOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cObranchConUNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%__1cPCheckCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLstoreP0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cQmulD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_; +text: .text%__1cLProfileDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cLBuildCutout2T6M_v_; +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cITypeFuncMreturns_long6kM_i_; +text: .text%__1cJloadSNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse2.o; +text: .text%__1cENodeGis_Con6kM_I_: connode.o; +text: .text%__1cNloadConP0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cJimmP0OperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cLstoreI0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cIAndINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cQandL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: loopnode.o; +text: .text%__1cHNTarjanICOMPRESS6M_v_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cQsubL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: compile.o; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopnode.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopnode.o; +text: .text%__1cOcompI_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopnode.o; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cHTypePtrCeq6kMpknEType__i_; +text: .text%__1cQandI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cSCompareAndSwapNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cRcompL_reg_conNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmembar_acquireNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPindOffset13OperFclone6kM_pnIMachOper__; +text: .text%__1cICallNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_; +text: .text%__1cJloadCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: loopnode.o; +text: .text%__1cLstoreI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cSaddL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshrL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cILoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cHMatcherLfind_shared6MpnENode__v_; +text: .text%__1cJStartNodeHsize_of6kM_I_; +text: .text%__1cHMatcherFxform6MpnENode_i_2_; +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_; +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__: rframe.o; +text: .text%__1cQmodI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRinterpretedVFrameDbci6kM_i_; +text: .text%__1cRreturn_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cQmodI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseKdo_put_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMnegF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNIdealLoopTreeOpolicy_peeling6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeUiteration_split_impl6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNIdealLoopTreebBpolicy_do_remove_empty_loop6MpnOPhaseIdealLoop__i_; +text: .text%__1cQsubL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAndINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitRmerge_fast_memory6MpnENode_2i_v_; +text: .text%__1cRcompL_reg_conNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cMnegF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o; +text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_; +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_; +text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_; +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_; +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_; +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_; +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_; +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_; +text: .text%__1cLCastP2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUcompU_iReg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cRmethodDataOopDescJis_mature6kM_i_; +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_; +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMMachCallNodeMreturns_long6kM_i_; +text: .text%__1cIGraphKitOhas_ex_handler6M_i_; +text: .text%__1cMloadConDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJStartNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__; +text: .text%__1cNloadConP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReturnNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cIProjNodeJideal_reg6kM_I_; +text: .text%__1cQaddI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQcmovI_reg_ltNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQcmovI_reg_gtNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc_misc.o; +text: .text%__1cSsafePoint_pollNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJcmpOpOperFequal6kM_i_: ad_sparc_clone.o; +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cTMachCallRuntimeNodeSis_MachCallRuntime6M_p0_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_; +text: .text%__1cFciEnvUis_unresolved_string6kMpnPciInstanceKlass_i_i_; +text: .text%__1cQciByteCodeStreamUis_unresolved_string6kM_i_; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cNflagsRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cZCallDynamicJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerOcas_under_lock6MpnMRegisterImpl_22pCi_v_; +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_; +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_; +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cTmembar_CPUOrderNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%JVM_GetFieldIxModifiers; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: connode.o; +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cRScavengeRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cQmulL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cILoopNode2t6MpnENode_2_v_; +text: .text%JVM_IsConstructorIx; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: classes.o; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cSaddP_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMatcherUc_calling_convention6FpnLRegPair_I_v_; +text: .text%__1cPCallRuntimeNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cPCountedLoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: classes.o; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cLRShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKg1RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cIAndINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cIAndINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_; +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKo0RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVshrL_reg_imm6_L2INodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_: psScavenge.o; +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMatcherQpost_fast_unlock6FpknENode__i_; +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cHAddNodeGis_Add6kM_pk0_: classes.o; +text: .text%__1cHCompileQsync_stack_slots6kM_i_; +text: .text%__1cJLoadCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJLoadFNodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_; +text: .text%__1cKciTypeFlowLStateVectorMdo_putstatic6MpnQciByteCodeStream__v_; +text: .text%__1cHOrINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cKciTypeFlowLStateVectorGdo_ldc6MpnQciByteCodeStream__v_; +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRloadConP_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cQxorI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_: ad_sparc.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMFastLockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopopts.o; +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_; +text: .text%__1cSmembar_releaseNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cRshrL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse3.o; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cQLRUMaxHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cPcompP_iRegPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSxorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: output.o; +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_: psScavenge.o; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_; +text: .text%__1cQshlI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_; +text: .text%__1cWCallLeafNoFPDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvL2I_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLCounterDataOis_CounterData6M_i_: methodDataOop.o; +text: .text%__1cJloadPNodeFclone6kM_pnENode__; +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cJVectorSetGslamin6Mrk0_v_; +text: .text%JVM_Clone; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cYinlineCallClearArrayNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cITypeLongFxdual6kM_pknEType__; +text: .text%__1cSmembar_releaseNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cRshrL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cOstackSlotLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_release_lockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOpaque2NodeEhash6kM_I_; +text: .text%__1cJloadFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompU_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYSurvivorMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cUEdenMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_; +text: .text%__1cLstoreP0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHOrINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_; +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%__1cHMatcherNfind_receiver6Fi_nFVMRegEName__; +text: .text%__1cIMulINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cQandI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMachEpilogNodeNis_MachEpilog6M_p0_: ad_sparc.o; +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmPOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConPNodeFclone6kM_pnENode__; +text: .text%__1cNSCMemProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_; +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRMachSafePointNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: assembler_sparc.o; +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_; +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cJNode_ListEyank6MpnENode__v_; +text: .text%__1cMPhaseChaitinISimplify6M_v_; +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_; +text: .text%__1cQcmovI_reg_gtNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cMStartOSRNodeGOpcode6kM_i_; +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cLcmpD_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJLoadSNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJcmpOpOperEless6kM_i_: ad_sparc_clone.o; +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: ad_sparc_misc.o; +text: .text%__1cKType_ArrayEgrow6MI_v_; +text: .text%__1cSaddL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPconvF2D_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJScopeDescGis_top6kM_i_; +text: .text%__1cRshrL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowLStateVectorOmeet_exception6MpnPciInstanceKlass_pk1_i_; +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNloadConL0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cVshrL_reg_imm6_L2INodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_; +text: .text%__1cLstoreB0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRshrI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNSafePointNodeLpop_monitor6M_v_; +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__; +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: connode.o; +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_; +text: .text%__1cTCallDynamicJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cQsubI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassDLCA6Mp0_1_; +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_; +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o; +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_; +text: .text%__1cRcompL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeGis_Con6kM_I_: memnode.o; +text: .text%__1cRshlL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoop2t6MrnMPhaseIterGVN_pk0i_v_; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cRloadConP_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshlL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_; +text: .text%__1cSCallLeafDirectNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJLoadBNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSstring_compareNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cFBytesNget_native_u46FpC_I_: bytecodes.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cRcompL_reg_conNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: cfgnode.o; +text: .text%__1cRcompL_reg_conNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: cfgnode.o; +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: connode.o; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interp_masm_sparc.o; +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cGOopMapPset_derived_oop6MnHOptoRegEName_ii2_v_; +text: .text%__1cLConvL2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOPhaseIdealLoopKDominators6M_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cIAndINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cYcompareAndSwapL_boolNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRbranchLoopEndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: loopnode.o; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cKtype2basic6FpknEType__nJBasicType__; +text: .text%__1cMPhaseChaitinFSplit6MI_I_; +text: .text%__1cMPhaseChaitinHcompact6M_v_; +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_; +text: .text%__1cNIdealLoopTreeMis_loop_exit6kMpnENode_pnOPhaseIdealLoop__2_; +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_; +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_; +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQshlI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerWcompiler_unlock_object6MpnMRegisterImpl_222rnFLabel__v_; +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interpreter_sparc.o; +text: .text%__1cKPSYoungGenNused_in_bytes6kM_I_; +text: .text%__1cOMachEpilogNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cIRootNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMatcherPprior_fast_lock6FpknENode__i_; +text: .text%__1cJLoadLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%__1cKstoreFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cMtlsLoadPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: connode.o; +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_; +text: .text%__1cICodeHeapMmax_capacity6kM_I_; +text: .text%__1cSbranchCon_longNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cNflagsRegFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseWensure_phis_everywhere6M_v_; +text: .text%__1cVshrL_reg_imm6_L2INodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadFNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cTDebugInfoReadStream2t6MpknHnmethod_i_v_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: ad_sparc.o; +text: .text%__1cLRethrowNodeEhash6kM_I_: classes.o; +text: .text%__1cIDivLNodeGOpcode6kM_i_; +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cOloadConL13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cNloadConL0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%jio_snprintf; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: node.o; +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cSmulI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimebAresolve_opt_virtual_call_C6FpnKJavaThread__pC_; +text: .text%jni_NewLocalRef: jni.o; +text: .text%__1cRsubI_zero_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulDNodeGOpcode6kM_i_; +text: .text%__1cLStrCompNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cQcmovI_reg_gtNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cURethrowExceptionNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: callnode.o; +text: .text%__1cKstoreBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cKStoreFNodeGOpcode6kM_i_; +text: .text%__1cLConvD2INodeGOpcode6kM_i_; +text: .text%__1cURethrowExceptionNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLPhaseValues2T5B6M_v_; +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKReturnNode2t6MpnENode_2222_v_; +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKReturnNodeJideal_reg6kM_I_: classes.o; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cZnoG3_iRegI_64bit_safeOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cQcmovI_reg_gtNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%JVM_GetCPMethodModifiers; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: memnode.o; +text: .text%__1cFParseKarray_load6MnJBasicType__v_; +text: .text%jni_SetLongField: jni.o; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: classes.o; +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_; +text: .text%__1cIJVMState2t6Mi_v_; +text: .text%__1cIAndLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interp_masm_sparc.o; +text: .text%__1cIAndLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cIAndLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o; +text: .text%__1cJLoadSNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cMMachProjNodeHsize_of6kM_I_: classes.o; +text: .text%__1cTDebugInfoReadStreamLread_handle6M_nGHandle__; +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_; +text: .text%__1cMtlsLoadPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cbFunnecessary_membar_volatileNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMloadConLNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJiRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cFParseNadd_safepoint6M_v_; +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: phaseX.o; +text: .text%__1cLPhaseValues2t6Mp0_v_; +text: .text%__1cQcmovI_reg_ltNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_: coalesce.o; +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_; +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_; +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_; +text: .text%__1cHCompileEInit6Mi_v_; +text: .text%__1cVExceptionHandlerTable2t6Mi_v_; +text: .text%__1cHMatcherLreturn_addr6kM_nHOptoRegEName__; +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_; +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_; +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_; +text: .text%__1cIPhaseCFGQFind_Inner_Loops6M_v_; +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_; +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: block.o; +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_; +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_; +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_; +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_; +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_; +text: .text%__1cHCompileICode_Gen6M_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: matcher.o; +text: .text%__1cFArena2t6MI_v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_; +text: .text%__1cFArenaNmove_contents6Mp0_1_; +text: .text%__1cKCodeBufferGresize6Miiii_v_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cIPhaseIFG2t6MpnFArena__v_; +text: .text%__1cFDictIFreset6MpknEDict__v_; +text: .text%__1cHMatcherFmatch6M_v_; +text: .text%__1cHMatcher2t6MrnJNode_List__v_; +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_; +text: .text%__1cIPhaseCFGOschedule_early6MrnJVectorSet_rnJNode_List_rnLBlock_Array__i_; +text: .text%__1cETypeKInitialize6FpnHCompile__v_; +text: .text%__1cIPhaseCFGNschedule_late6MrnJVectorSet_rnJNode_List_rnNGrowableArray4CI___v_; +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_; +text: .text%__1cIPhaseCFGKDominators6M_v_; +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_; +text: .text%__1cMPhaseChaitinbGstretch_base_pointer_live_ranges6MpnMResourceArea__i_; +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_; +text: .text%__1cJPhaseLive2T6M_v_; +text: .text%__1cWemit_exception_handler6FrnKCodeBuffer__v_; +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_; +text: .text%__1cKCodeBufferOrelocate_stubs6M_v_; +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMPhaseChaitin2T6M_v_; +text: .text%__1cHMatcherWis_short_branch_offset6Mi_i_; +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_; +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_; +text: .text%__1cIPhaseCFGLRemoveEmpty6M_v_; +text: .text%__1cHCompileGOutput6M_v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cHCompileMBuildOopMaps6M_v_; +text: .text%__1cHCompilePneed_stack_bang6kMi_i_; +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o; +text: .text%__1cGBundlePinitialize_nops6FppnIMachNode__v_; +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_; +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: buildOopMap.o; +text: .text%__1cHCompileLFill_buffer6M_v_; +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZCallInterpreterDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cENodeHrm_prec6MI_v_; +text: .text%__1cLcmpD_ccNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cIMulLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIGraphKitOset_pair_local6MipnENode__v_: parse2.o; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%JVM_DoPrivileged; +text: .text%__1cOcompiledVFrameGis_top6kM_i_; +text: .text%__1cRsubI_zero_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cQaddL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cHciKlassMis_interface6M_i_: ciObjArrayKlass.o; +text: .text%__1cIConDNodeGOpcode6kM_i_; +text: .text%__1cObranchConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cTresource_free_bytes6FpcI_v_; +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_; +text: .text%__1cIAddLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcompL_reg_conNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cPconvL2I_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cRmethodDataOopDescKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_bytes6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_words6FpnNmethodOopDesc__i_; +text: .text%__1cIDivINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cILoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cNprefetch2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cILoopNodeHsize_of6kM_I_: loopnode.o; +text: .text%__1cIAndLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cVshrL_reg_imm6_L2INodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nativeInst_sparc.o; +text: .text%__1cIConPNodeEmake6FpC_p0_; +text: .text%__1cIGraphKitNstore_barrier6MpnENode_22_v_; +text: .text%__1cOcmovII_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: callnode.o; +text: .text%__1cIciMethodRinstructions_size6M_i_; +text: .text%__1cSmulI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: memnode.o; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cMindIndexOperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cMindIndexOperOindex_position6kM_i_: ad_sparc.o; +text: .text%__1cMindIndexOperFscale6kM_i_: ad_sparc.o; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: assembler_sparc.o; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cMindIndexOperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_; +text: .text%__1cGTarjanICOMPRESS6M_v_; +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKloadUBNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cICmpDNodeGOpcode6kM_i_; +text: .text%__1cNloadConL0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_; +text: .text%__1cVCallRuntimeDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOloadConL13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cZCallInterpreterDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreB0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cNIdealLoopTreePiteration_split6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cOloadConL13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cLRShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLOptoRuntimeRmultianewarray1_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cSconvI2D_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: subnode.o; +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_: psGenerationCounters.o; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_; +text: .text%__1cQregP_to_stkPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cJArrayDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cQmodI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: loopnode.o; +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cURethrowExceptionNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQcmovI_reg_ltNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreB0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRshrL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIModINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKstoreFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cJcmpOpOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cQstkI_to_regFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJimmL0OperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cFParseJdo_ifnull6MnIBoolTestEmask__v_; +text: .text%__1cQmulD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmI0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_; +text: .text%__1cOloadConL13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorHis_busy6kM_i_; +text: .text%JVM_GetClassNameUTF; +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIXorINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_; +text: .text%__1cVshrL_reg_imm6_L2INodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: multnode.o; +text: .text%__1cKcmpOpFOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cNflagsRegFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cTmembar_volatileNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRshlL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: callnode.o; +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: memnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: memnode.o; +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_; +text: .text%__1cJCmpL3NodeGOpcode6kM_i_; +text: .text%__1cIciObjectOis_method_data6M_i_: ciInstance.o; +text: .text%__1cIciObjectJis_method6M_i_: ciInstance.o; +text: .text%JVM_FindLoadedClass; +text: .text%__1cLCastP2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIMulLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIMulLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cRbranchLoopEndNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cMMutableSpaceFclear6M_v_; +text: .text%__1cIConFNodeGOpcode6kM_i_; +text: .text%__1cOClearArrayNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitNallocate_heap6MpnENode_222pknITypeFunc_pC22ipknKTypeOopPtr__2_; +text: .text%__1cPciInstanceKlassbBcompute_shared_has_subklass6M_i_; +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cMPrefetchNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQmulD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNprefetch2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIModLNodeGOpcode6kM_i_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cSbranchCon_longNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_; +text: .text%__1cOLibraryCallKitNtry_to_inline6M_i_; +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__; +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: library_call.o; +text: .text%__1cFTypeFEmake6Ff_pk0_; +text: .text%__1cIimmFOperJconstantF6kM_f_: ad_sparc_clone.o; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cCosRcurrent_thread_id6F_i_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_; +text: .text%__1cbACallCompiledJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cPorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_; +text: .text%__1cENodeHis_Copy6kM_I_: node.o; +text: .text%__1cZCallInterpreterDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopTransform.o; +text: .text%__1cITypeLongFwiden6kMpknEType__3_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: loopnode.o; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cSxorI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_; +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cLcmpD_ccNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindIndexOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindIndexOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindIndexOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%JVM_FindClassFromClass; +text: .text%__1cRshrP_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cObranchConFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLRethrowNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMMutableSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cKstoreLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: loopnode.o; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o; +text: .text%__1cObox_handleNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeEhash6kM_I_: classes.o; +text: .text%__1cFParseFBlockMadd_new_path6M_i_; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cIimmPOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cMloadConPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: loopnode.o; +text: .text%__1cQsubL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvF2DNodeGOpcode6kM_i_; +text: .text%__1cOstackSlotIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cLConvI2DNodeGOpcode6kM_i_; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKcmpOpPOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKimmL13OperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cMURShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsubI_zero_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_; +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_; +text: .text%__1cQshlI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHnmethodNscope_desc_at6MpCi_pnJScopeDesc__; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cSmulI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: multnode.o; +text: .text%__1cIGraphKitOnull_check_oop6MpnKRegionNode_pnENode_i_4_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cJcmpOpOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cOcmovII_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cOMacroAssemblerEsetx6MxpnMRegisterImpl_2nJrelocInfoJrelocType__v_; +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindirectOperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cMindirectOperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cIAddLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMindirectOperFscale6kM_i_: ad_sparc.o; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cSsubL_reg_reg_2NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%jni_NewString: jni.o; +text: .text%__1cLConvL2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%__1cQshlI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cHMatcherQinline_cache_reg6F_nHOptoRegEName__; +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__; +text: .text%__1cIGraphKitMnext_monitor6M_i_; +text: .text%__1cOloadConI13NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNode2t6Mi_v_; +text: .text%__1cPconvF2D_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__; +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__; +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNloadConP0NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimeKjbyte_copy6FpW1I_v_; +text: .text%__1cRorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o; +text: .text%__1cKcmpOpUOperEless6kM_i_: ad_sparc_clone.o; +text: .text%__1cQaddF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cObox_handleNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cQnotemp_iRegIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cOcmovII_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cIMulLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cIMulINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cRsarL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: cfgnode.o; +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_; +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__: memnode.o; +text: .text%__1cbLtransform_int_divide_to_long_multiply6FpnIPhaseGVN_pnENode_i_3_: divnode.o; +text: .text%__1cJcmpOpOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cQmulD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGvframeDtop6kM_p0_; +text: .text%__1cOCompiledRFrameEinit6M_v_; +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerUcompiler_lock_object6MpnMRegisterImpl_222rnFLabel__v_; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cIXorINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_; +text: .text%__1cQregF_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cOMacroAssemblerLsave_thread6MkpnMRegisterImpl__v_; +text: .text%__1cOcmovII_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cLcmpD_ccNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopopts.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cMloadConINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cScheck_phi_clipping6FpnHPhiNode_rpnHConNode_rI45rpnENode_5_i_: cfgnode.o; +text: .text%__1cOcmovII_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_sparc.o; +text: .text%__1cRshlL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseGdo_new6M_v_; +text: .text%__1cZCallDynamicJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIimmIOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cQmodI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPhaseIdealLoopKclone_loop6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%jni_GetObjectClass: jni.o; +text: .text%__1cSxorI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cIPSOldGenPupdate_counters6M_v_; +text: .text%__1cQshrI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNIdealLoopTreeNpolicy_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cNIdealLoopTreeSpolicy_range_check6kMpnOPhaseIdealLoop__i_; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopTransform.o; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cTloadL_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6M_v_; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_2_v_; +text: .text%__1cSstring_compareNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cJloadFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQregF_to_stkINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cINodeHash2t6MpnFArena_I_v_; +text: .text%__1cPconvI2L_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_; +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cOcmovII_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerKsave_frame6Mi_v_; +text: .text%__1cVshrL_reg_imm6_L2INodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreC0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cQregI_to_stkINodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%signalHandler; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_; +text: .text%__1cKConv2BNodeGOpcode6kM_i_; +text: .text%__1cSstring_compareNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%JVM_IHashCode; +text: .text%__1cSconvI2D_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cJStartNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cOMacroAssemblerbBcheck_and_forward_exception6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cQshlI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cJloadCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQandL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovPP_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__: phaseX.o; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%JVM_GetClassLoader; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cSconvD2I_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_; +text: .text%__1cIciMethodQbreak_at_execute6M_i_; +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cKScheduling2t6MpnFArena_rnHCompile__v_; +text: .text%__1cKSchedulingMDoScheduling6M_v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cMelapsedTimerHseconds6kM_d_; +text: .text%__1cJStealTaskEname6M_pc_: psTasks.o; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cJStealTask2t6Mi_v_; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerMnote_release6MI_v_; +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cQshrI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cIimmLOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConLNodeFclone6kM_pnENode__; +text: .text%__1cHCompileVfinal_graph_reshaping6M_i_; +text: .text%__1cIciMethodRbuild_method_data6M_v_; +text: .text%__1cHCompileIOptimize6M_v_; +text: .text%__1cHCompileLFinish_Warm6M_v_; +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cHCompileLInline_Warm6M_i_; +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_; +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_; +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__; +text: .text%__1cIPhaseCCPHanalyze6M_v_; +text: .text%__1cIPhaseCCPMdo_transform6M_v_; +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_; +text: .text%__1cIPhaseCCP2T6M_v_; +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_; +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_; +text: .text%__1cMPhaseIterGVN2t6Mp0_v_; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod_ii_v_; +text: .text%__1cQmulI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_; +text: .text%__1cOcmovII_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXMachCallInterpreterNodePret_addr_offset6M_i_; +text: .text%__1cOMachEpilogNodeQsafepoint_offset6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_; +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSxorI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_; +text: .text%__1cRsarI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbFunnecessary_membar_volatileNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: callnode.o; +text: .text%__1cRshlL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_I_; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: interp_masm_sparc.o; +text: .text%__1cMloadConDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; +text: .text%__1cZInterpreterMacroAssemblerTdispatch_Lbyte_code6MnITosState_ppCii_v_; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: reg_split.o; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; +text: .text%__1cOloadConI13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSconvI2F_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddFNodeGOpcode6kM_i_; +text: .text%__1cObranchConFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowLStateVectorJdo_aaload6MpnQciByteCodeStream__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_; +text: .text%__1cKBinaryNodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cQstkI_to_regFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cRNativeMovConstRegIset_data6Mi_v_; +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_; +text: .text%__1cJStartNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMTailCallNodeGOpcode6kM_i_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%__1cQregP_to_stkPNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cHTypePtrFempty6kM_i_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cIMulFNodeGOpcode6kM_i_; +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQmulD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPconvF2D_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cTmembar_CPUOrderNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTmembar_CPUOrderNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cSCompareAndSwapNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSThreadLocalStorageGthread6F_pnGThread__: assembler_sparc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%__1cQregI_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cSdivL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_; +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cKPerfMemoryFalloc6FI_pc_; +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_; +text: .text%__1cQmulL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%jni_ReleaseStringUTFChars; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFParseLarray_store6MnJBasicType__v_; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cSmulI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_IsInterrupted; +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%JVM_FindLibraryEntry; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: assembler_sparc.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cRshlL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHCompile2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cQshlL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: classes.o; +text: .text%__1cHBitDataKis_BitData6M_i_: ciMethodData.o; +text: .text%__1cNLocationValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cPconvF2D_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMinINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerRload_ptr_contents6MrnHAddress_pnMRegisterImpl_i_v_: assembler_sparc.o; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: loopnode.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: loopnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: loopnode.o; +text: .text%__1cSconvI2F_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMatcherOc_return_value6Fii_nLRegPair__; +text: .text%__1cENodeHis_Copy6kM_I_: loopnode.o; +text: .text%__1cKloadUBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKg3RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cSsubL_reg_reg_2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cTloadL_unalignedNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: callnode.o; +text: .text%__1cMregD_lowOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cTCallDynamicJavaNodeSis_CallDynamicJava6kM_pk0_: callnode.o; +text: .text%__1cTloadL_unalignedNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cObranchConFNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cObox_handleNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%__1cQmodI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRbranchLoopEndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKcmpOpUOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cUParallelScavengeHeapEused6kM_I_; +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVCallRuntimeDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQxorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLOptoRuntimeOarraycopy_Type6F_pknITypeFunc__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTbasictype2arraycopy6FnJBasicType_i_pC_; +text: .text%__1cOLibraryCallKitQinline_arraycopy6M_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cLcmpD_ccNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWCallLeafNoFPDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLOptoRuntimeSnew_typeArray_Type6F_pknITypeFunc__; +text: .text%__1cJloadINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJnew_array6MpnENode_nJBasicType_pknEType_pknMTypeKlassPtr__2_; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cIMinINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cTmembar_CPUOrderNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRbranchLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRbranchLoopEndNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTmembar_volatileNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo1RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cSxorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cYinlineCallClearArrayNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYinlineCallClearArrayNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMPhaseChaitinGSelect6M_I_; +text: .text%__1cLOptoRuntimeInew_Type6F_pknITypeFunc__; +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_; +text: .text%__1cSbranchCon_longNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cSbranchCon_longNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSbranchCon_longNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cIModINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cIGraphKitMnew_instance6MpnPciInstanceKlass__pnENode__; +text: .text%__1cLstoreP0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMloadConLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseTprofile_switch_case6Mi_v_; +text: .text%__1cOcmovIL_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cSandI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmLOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cFParseOmerge_new_path6Mi_v_; +text: .text%__1cYMachCallCompiledJavaNodePret_addr_offset6M_i_; +text: .text%__1cQregP_to_stkPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cHnmethodXinterpreter_entry_point6M_pC_; +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQPSIsAliveClosureLdo_object_b6MpnHoopDesc__i_: psScavenge.o; +text: .text%jni_NewByteArray: jni.o; +text: .text%__1cQdivL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cUdivL_reg_imm13_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeTnmethod_entry_point6FpnKJavaThread_pnNmethodOopDesc_pnHnmethod__pC_; +text: .text%__1cQaddF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_; +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cSconvI2D_helperNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cSTailCalljmpIndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQcmovI_reg_gtNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cObox_handleNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%__1cSReferenceProcessorZadd_to_discovered_list_mt6MppnHoopDesc_23_v_; +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLstoreP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKloadUBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_; +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_; +text: .text%__1cTLoadL_unalignedNodeGOpcode6kM_i_; +text: .text%__1cSmulI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerZget_2_byte_integer_at_bcp6MipnMRegisterImpl_2n0ALsignedOrNot_n0AKsetCCOrNot__v_; +text: .text%__1cQcmovI_reg_gtNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cQandI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cQmulI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParsePmerge_exception6Mi_v_; +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cYinlineCallClearArrayNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%__1cNloadConP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%jni_GetStringCritical: jni.o; +text: .text%__1cUmulL_reg_imm13_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cSTailCalljmpIndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cRorI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregF_to_stkINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Mi_v_; +text: .text%__1cWCallLeafNoFPDirectNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse1.o; +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o; +text: .text%__1cMloadConFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o; +text: .text%__1cJScopeDescGsender6kM_p0_; +text: .text%__1cNloadConP0NodeFclone6kM_pnENode__; +text: .text%__1cSxorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJimmP0OperFclone6kM_pnIMachOper__; +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cNloadConPCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cKciTypeFlowLStateVectorEtrap6MpnQciByteCodeStream_pnHciKlass_i_v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cQregI_to_stkINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cIregDOperFclone6kM_pnIMachOper__; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cYinternal_word_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cIregFOperFclone6kM_pnIMachOper__; +text: .text%__1cJloadDNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQshlL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cOcmovPI_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cObox_handleNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreI0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQciByteCodeStreamPget_klass_index6M_i_; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cRtestI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cFParseMdo_checkcast6M_v_; +text: .text%__1cOCompiledRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: nmethod.o; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJimmU6OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cHRegMask2t6M_v_: matcher.o; +text: .text%__1cOPhaseIdealLoopJdo_unroll6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%__1cIimmIOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConINodeFclone6kM_pnENode__; +text: .text%__1cSmulL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPstoreI_FregNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: ad_sparc_misc.o; +text: .text%__1cNflagsRegFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cSconvD2I_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOtypeArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_; +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_; +text: .text%__1cUParallelScavengeHeapTensure_parseability6M_v_; +text: .text%__1cUParallelScavengeHeapOfill_all_tlabs6M_v_; +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cKPSYoungGenPupdate_counters6M_v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_; +text: .text%__1cTDerivedPointerTableFclear6F_v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cLConvI2FNodeGOpcode6kM_i_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cQaddF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRshlL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cOcmovII_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cJStoreNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cPconvF2D_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKimmU13OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cQshlL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUcompU_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetCallerClass; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%__1cOcmovPP_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLstoreC0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadL_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICmpFNodeGOpcode6kM_i_; +text: .text%__1cSdivL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQregF_to_stkINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJLoadDNodeGOpcode6kM_i_; +text: .text%__1cQmulD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvI2F_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCii_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%__1cLstoreB0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cHTypeAryFxdual6kM_pknEType__; +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cQdivL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUParallelScavengeHeapMmem_allocate6MIii_pnIHeapWord__; +text: .text%__1cQregP_to_stkPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_gtNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cWloadConI_x43300000NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_; +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyWminor_collection_begin6M_v_; +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_; +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_; +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_; +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_; +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_; +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cUPSAdaptiveSizePolicyUminor_collection_end6MnHGCCauseFCause__v_; +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_; +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_; +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_; +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_; +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cKPSYoungGenLswap_spaces6M_v_; +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MII_v_; +text: .text%__1cODeoptimizationYtrap_state_is_recompiled6Fi_i_; +text: .text%__1cKPSYoungGenGresize6MII_v_; +text: .text%__1cKPSYoungGenNresize_spaces6MII_v_; +text: .text%__1cUPSAdaptiveSizePolicyPupdate_averages6MiII_v_; +text: .text%__1cUPSAdaptiveSizePolicybPcompute_survivor_space_size_and_threshold6MiiI_i_; +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_; +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_; +text: .text%__1cKPSScavengeQinvoke_no_policy6Fpi_i_; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cKPSYoungGenRresize_generation6MII_i_; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_; +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_: gcTaskManager.o; +text: .text%__1cTmembar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cVCallRuntimeDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadL_unalignedNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cUmulL_reg_imm13_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_; +text: .text%__1cLOptoRuntimeVresolve_static_call_C6FpnKJavaThread__pC_; +text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_; +text: .text%__1cHMatcherbAinterpreter_method_oop_reg6F_nHOptoRegEName__; +text: .text%__1cTCallInterpreterNodeSis_CallInterpreter6kM_pk0_: classes.o; +text: .text%__1cTCallInterpreterNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cHCompilebMGenerate_Compiled_To_Interpreter_Graph6MpknITypeFunc_pC_v_; +text: .text%__1cISubLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cZCallInterpreterDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIciMethodRinterpreter_entry6M_pC_; +text: .text%__1cQmulF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXMachCallInterpreterNodeWis_MachCallInterpreter6M_p0_: ad_sparc_misc.o; +text: .text%__1cZCallInterpreterDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPconvF2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRcompL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWloadConI_x41f00000NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cQciByteCodeStreamFtable6MnJBytecodesECode__2_; +text: .text%__1cKimmL13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRsarL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cLcmpF_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKloadUBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cRorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWResolveOopMapConflictsRpossible_gc_point6MpnOBytecodeStream__i_: rewriter.o; +text: .text%__1cRsarL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQmulI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMnegD_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_; +text: .text%__1cUdivL_reg_imm13_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOcmovIL_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_memNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cJloadSNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cQshlL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOMacroAssemblerWstore_unaligned_double6MpnRFloatRegisterImpl_pnMRegisterImpl_i_v_; +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cQstkI_to_regFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQregP_to_stkPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_; +text: .text%__1cSTailCalljmpIndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cLOptoRuntimeThandle_wrong_method6FpnKJavaThread__pC_; +text: .text%__1cOMacroAssemblerUstore_unaligned_long6MpnMRegisterImpl_2i_v_; +text: .text%__1cSmulL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cOLibraryCallKitRinline_unsafe_CAS6MnJBasicType__i_; +text: .text%__1cTCompareAndSwapLNode2t6MpnENode_2222_v_; +text: .text%__1cYcompareAndSwapL_boolNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCompareAndSwapNode2t6MpnENode_2222_v_; +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cYcompareAndSwapL_boolNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUPSAdaptiveSizePolicyOshould_full_GC6MI_i_; +text: .text%__1cIPSOldGenMmax_gen_size6M_I_: psOldGen.o; +text: .text%__1cUPSAdaptiveSizePolicyQdecaying_gc_cost6kM_d_; +text: .text%__1cUPSAdaptiveSizePolicybDcompute_generation_free_space6MIIIIIIIi_v_; +text: .text%__1cSmulL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKPSScavengeGinvoke6Fpi_v_; +text: .text%__1cUPSAdaptiveSizePolicyVadjust_for_throughput6MipI1_v_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cSsubL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUPSAdaptiveSizePolicyZdecay_supplemental_growth6Mi_v_; +text: .text%__1cSdivL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cUParallelScavengeHeapTfailed_mem_allocate6MpiIii_pnIHeapWord__; +text: .text%__1cbDVM_ParallelGCFailedAllocation2t6MIiiI_v_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cQaddL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o; +text: .text%__1cMregD_lowOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_; +text: .text%__1cOcmovII_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseScreate_jump_tables6MpnENode_pnLSwitchRange_4_i_; +text: .text%__1cENodeEgetd6kM_d_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cOcmovIL_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cRtestI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSxorI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cKstoreFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMatcherXinterpreter_arg_ptr_reg6F_nHOptoRegEName__; +text: .text%__1cPstoreI_FregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCMovePNodeGOpcode6kM_i_; +text: .text%__1cLstoreC0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowOsplit_range_at6Mi_pn0AFRange__; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cSmulI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%JVM_MonitorWait; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cNloadConPCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cQshlI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQdivD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_; +text: .text%__1cNloadConL0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHBoxNodeGOpcode6kM_i_; +text: .text%__1cRshrL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMflagsRegOperFclone6kM_pnIMachOper__; +text: .text%__1cSconvI2F_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregF_to_stkINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cENode2t6Mp0111111_v_; +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cPstoreI_FregNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseRarray_store_check6M_v_; +text: .text%__1cQsubF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cQaddD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cQshlI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cSstkL_to_regD_0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cQregI_to_stkINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_; +text: .text%__1cQsubI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZInterpreterMacroAssemblerXget_constant_pool_cache6MpnMRegisterImpl__v_; +text: .text%__1cSbranchCon_longNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cKcmpOpUOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cUParallelScavengeHeapIcapacity6kM_I_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cSsubL_reg_reg_2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: cfgnode.o; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cLstoreF0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadINodeFclone6kM_pnENode__; +text: .text%JVM_SetClassSigners; +text: .text%__1cQdivL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXconvI2D_regDHi_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cSandL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRbranchLoopEndNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cFParseXfetch_interpreter_state6MipknEType_pnENode__5_; +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_; +text: .text%__1cOcmovPP_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cKstoreCNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%jni_CallIntMethod: jni.o; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cKloadUBNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cUCallCompiledJavaNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cSconvD2I_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHCompilebMGenerate_Interpreter_To_Compiled_Graph6MpknITypeFunc__v_; +text: .text%__1cbACallCompiledJavaDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbACallCompiledJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSconvD2I_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cbACallCompiledJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cSaddP_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIAddDNodeGOpcode6kM_i_; +text: .text%__1cOcmovPP_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2D_helperNodeFclone6kM_pnENode__; +text: .text%__1cOloadI_fregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHCompileRmake_vm_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cOloadI_fregNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCountedLoopEndNode2t6MpnENode_2ff_v_; +text: .text%__1cQmulD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPstoreI_FregNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFTypeDJsingleton6kM_i_; +text: .text%__1cLstoreC0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassMethodsCount; +text: .text%__1cKstoreINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%JVM_GetClassFieldsCount; +text: .text%__1cLconvI2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cRorI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%JVM_GetClassCPTypes; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cHciKlassMis_interface6M_i_: ciTypeArrayKlass.o; +text: .text%__1cQmulI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cVinline_cache_regPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPconvF2D_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregI_to_stkINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cTloadL_unalignedNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreterRT_sparc.o; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_; +text: .text%JVM_IsPrimitiveClass; +text: .text%__1cJimmU6OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cIDivDNodeGOpcode6kM_i_; +text: .text%__1cObox_handleNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRorI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: cfgnode.o; +text: .text%__1cKloadUBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTmembar_volatileNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cXconvI2D_regDHi_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%jni_FindClass: jni.o; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cOMacroAssemblerOstore_argument6MpnMRegisterImpl_rnIArgument__v_: interpreterRT_sparc.o; +text: .text%__1cFParseHdo_irem6M_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: machnode.o; +text: .text%__1cNloadConP0NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTloadL_unalignedNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cObox_handleNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cQregI_to_stkINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRorI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cMregD_lowOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSconvI2F_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cFTypeDFxmeet6kMpknEType__3_; +text: .text%__1cFMutex2T6M_v_; +text: .text%__1cRtestI_reg_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: multnode.o; +text: .text%__1cQdivI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLPhaseValuesHlongcon6Mx_pnIConLNode__; +text: .text%__1cQregP_to_stkPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQstkI_to_regFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregI_to_stkINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQRelocationHolderEplus6kMi_0_; +text: .text%__1cUPSMarkSweepDecoratorHcompact6Mi_v_; +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_; +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_; +text: .text%__1cTloadL_unalignedNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%__1cQregF_to_stkINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileQgrow_alias_types6M_v_; +text: .text%__1cLLShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cbFunnecessary_membar_volatileNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOcmovII_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNiRegIsafeOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cUmulL_reg_imm13_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cTloadD_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cZInterpreterMacroAssemblerHpop_ptr6MpnMRegisterImpl__v_; +text: .text%__1cQdivD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: connode.o; +text: .text%__1cKLoadPCNodeGOpcode6kM_i_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: connode.o; +text: .text%__1cOloadConL13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRcompL_reg_conNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodDataKlass.o; +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodDataKlass.o; +text: .text%__1cLcmpF_ccNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o; +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cQaddD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cObox_handleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvD2I_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_; +text: .text%__1cJloadINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSdivL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregP_to_stkPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIModINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXvirtual_call_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cUdivL_reg_imm13_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvD2I_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCMoveINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: callnode.o; +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsubF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cQMachCallJavaNodeVis_MachCallStaticJava6M_pnWMachCallStaticJavaNode__: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cUmulL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQdivL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: classes.o; +text: .text%__1cOcmovPI_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUdivL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMTypeKlassPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cUmulL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCallGeneratorQfor_virtual_call6FpnIciMethod__p0_; +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cTmembar_volatileNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIMulINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cQdivD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJCmpD3NodeGOpcode6kM_i_; +text: .text%__1cJloadDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMinINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cUVirtualCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cQmulF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_MonitorNotify; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: templateTable_sparc.o; +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interpreterRT_sparc.o; +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSstring_compareNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cVshrL_reg_imm6_L2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cOloadConL13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cINegDNodeGOpcode6kM_i_; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cOimmI_32_63OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNIdealLoopTreeXpolicy_maximally_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cSsubL_reg_reg_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregP_to_stkPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRelocIteratorEnext6M_i_: output.o; +text: .text%__1cOcmovII_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cTloadD_unalignedNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQshlL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cObranchConFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNminI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRshlI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNminI_eRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConL13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cOMacroAssemblerDjmp6MpnMRegisterImpl_ipkci_v_; +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSsubD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cUdivL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmulD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cOcmovIF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUmulL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsubD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUdivL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSandL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConPCNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cZregDHi_regDLo_to_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJEventMark2t6MpkcE_v_: psMarkSweep.o; +text: .text%__1cQregP_to_stkPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCMoveNodeEmake6FpnENode_222pknEType__p0_; +text: .text%__1cJCMoveNode2t6MpnENode_22pknEType__v_: connode.o; +text: .text%__1cSconvI2F_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cOcmovIF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MipnMRegisterImpl__v_; +text: .text%__1cQcmovI_reg_ltNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo1RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRtestI_reg_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cQshrL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRsarL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeFclone6kM_pnENode__; +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRT_sparc.o; +text: .text%__1cRorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cQshrL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOimmI_32_63OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cSstkL_to_regD_0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUGenericGrowableArrayKraw_remove6MpknEGrET__v_; +text: .text%__1cOloadI_fregNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMnegD_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTAbstractInterpreterLdeopt_entry6FnITosState_i_pC_; +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJloadSNodeFclone6kM_pnENode__; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%__1cMnegD_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: output.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cISubFNodeGOpcode6kM_i_; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQstkI_to_regINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQshrL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_; +text: .text%__1cSsubD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSmulD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLconvI2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovIF_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRsarL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRtestI_reg_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregI_to_stkINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFTypeFJsingleton6kM_i_; +text: .text%__1cLconvI2BNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvD2I_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopVinsert_pre_post_loops6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cQaddD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovPI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregL_to_stkLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_; +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_; +text: .text%__1cLTypeInstPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cWPredictedCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cLcmpF_ccNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cWPredictedCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cNCallGeneratorSfor_predicted_call6FpnHciKlass_p03_3_; +text: .text%__1cSconvI2F_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXconvI2D_regDHi_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cObox_handleNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_; +text: .text%__1cOcmovPP_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSaddD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQmulF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpF_ccNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cOcmovLI_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cFTypeFFxmeet6kMpknEType__3_; +text: .text%__1cCosScurrent_stack_size6F_I_; +text: .text%__1cOcmovLL_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cSdivL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cFTypeDGis_nan6kM_i_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cSsubL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_SetThreadPriority; +text: .text%__1cQaddF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_; +text: .text%_start: os_solaris.o; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cQsubD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerTload_unaligned_long6MpnMRegisterImpl_i2_v_; +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_; +text: .text%JVM_Read; +text: .text%__1cOcmovPI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cQsubL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmodI_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubDNodeGOpcode6kM_i_; +text: .text%__1cQmodI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cRsarI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: callGenerator.o; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cLOptoRuntimeWresolve_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cSmulD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cMloadConINodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSstring_compareNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreF0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cSsubD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cSstkL_to_regD_0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_; +text: .text%__1cRsarL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIModLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeEgetf6kM_f_; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: node.o; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cUregI_to_stkLHi_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLConvL2DNodeGOpcode6kM_i_; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cCosMguard_memory6FpcI_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: node.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: node.o; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cQshrL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cIDivLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSdivL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: interp_masm_sparc.o; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cOMacroAssemblerNload_contents6MrnHAddress_pnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cPconvI2D_memNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cNimmP_pollOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRtestI_reg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZregDHi_regDLo_to_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cMVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cUregI_to_stkLHi_0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_; +text: .text%__1cOcmovIF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cRloadConP_pollNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%JVM_IsArrayClass; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cJloadDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cKstoreBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cOloadI_fregNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: generateOptoStub.o; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cKstoreLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNloadConPCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreC0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeHeapJexpand_by6MI_i_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cObranchConFNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadD_unalignedNodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_GetClassName; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cOloadI_fregNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cSaddD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cOcmovIF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: loopnode.o; +text: .text%__1cQshrL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMatcherXpost_store_load_barrier6FpknENode__i_; +text: .text%__1cLConvD2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQstkI_to_regFNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_sparc.o; +text: .text%__1cINodeHashEgrow6M_v_; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cOcmovPP_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cMloadConDNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLStrCompNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerVload_unaligned_double6MpnMRegisterImpl_ipnRFloatRegisterImpl__v_; +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cJloadSNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQsubI_reg_regNodeFclone6kM_pnENode__; +text: .text%JVM_Open; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cSsubL_reg_reg_2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cICodeBlobJis_zombie6kM_i_: onStackReplacement.o; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cSmulL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%JVM_StartThread; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cJArrayDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cHAddress2t6Mn0AJaddr_type_i_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: generateOptoStub.o; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%__1cIciObjectJis_method6M_i_: ciObjectFactory.o; +text: .text%__1cNloadConPCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cTLoadD_unalignedNodeGOpcode6kM_i_; +text: .text%__1cSstkL_to_regD_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshrI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%JVM_FreeMemory; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%JVM_TotalMemory; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cUmulL_reg_imm13_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferWinsert_double_constant6Md_pC_; +text: .text%__1cTAbstractInterpreterWlayout_activation_impl6FpnNmethodOopDesc_iiiipnFframe_4i_i_; +text: .text%__1cQdivL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciObjectOis_method_data6M_i_: ciObjectFactory.o; +text: .text%__1cOcmovIL_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_memNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: interpreter_sparc.o; +text: .text%__1cUdivL_reg_imm13_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cQandI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cSandL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cCosOunguard_memory6FpcI_i_; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cODeoptimizationYquery_update_method_data6FnQmethodDataHandle_in0ALDeoptReason_rIri4_pnLProfileData__; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cUregI_to_stkLHi_0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cUregI_to_stkLHi_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsubF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICmpDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLconvI2BNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRNativeMovConstRegEdata6kM_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cLcmpF_ccNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cMTailCallNode2t6MpnENode_222222_v_; +text: .text%__1cQaddD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: deoptimization.o; +text: .text%__1cPconvD2F_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNimmP_pollOperFclone6kM_pnIMachOper__; +text: .text%__1cQdivD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRtestI_reg_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbEset_method_data_pointer_offset6MpnMRegisterImpl__v_; +text: .text%__1cSconvF2I_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMaxINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +text: .text%__1cJLoadPNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cRloadConP_pollNodeFclone6kM_pnENode__; +text: .text%__1cHTypeInt2t6Miii_v_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cNloadConPCNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConPCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cXconvI2D_regDHi_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: library_call.o; +text: .text%__1cSandL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLI_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cRshlI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_; +text: .text%__1cQregL_to_stkLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRT_sparc.o; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__; +text: .text%__1cHTypePtrFxdual6kM_pknEType__; +text: .text%__1cURethrowExceptionNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcastP2INodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOcmovLL_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cIGraphKitXinsert_mem_bar_volatile6MpnKMemBarNode_i_v_; +text: .text%__1cKCMoveLNodeGOpcode6kM_i_; +text: .text%__1cRshlL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitYinline_native_time_funcs6Mi_i_; +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_; +text: .text%__1cVMoveL2D_stack_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHRetDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cTloadD_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNiRegIsafeOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNloadConP0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: ciTypeFlow.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: stubGenerator_sparc.o; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cZInterpreterMacroAssemblerFpop_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMnegD_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cKimmL13OperFclone6kM_pnIMachOper__; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cLstoreF0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cQshlL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cSsubL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmulL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cOloadI_fregNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRtestI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cLstoreF0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cNmaxI_eRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cNmaxI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_NativePath; +text: .text%__1cOMacroAssemblerNflush_windows6M_v_; +text: .text%__1cNloadConL0NodeFclone6kM_pnENode__; +text: .text%__1cSsubD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_; +text: .text%__1cKg3RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVinline_cache_regPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cWloadConI_x41f00000NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstorePNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIGraphKitbAgen_stub_or_native_wrapper6MpCpkcpnIciMethod_iiiii_v_; +text: .text%__1cQObjectStartArrayFreset6M_v_; +text: .text%__1cPconvI2D_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cQaddD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvF2INodeGOpcode6kM_i_; +text: .text%__1cJimmL0OperFclone6kM_pnIMachOper__; +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cIPSOldGenPadjust_pointers6M_v_; +text: .text%__1cVCallRuntimeDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: callnode.o; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: callnode.o; +text: .text%__1cOcmovPI_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPSOldGenHcompact6M_v_; +text: .text%__1cMtlsLoadPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cLcmpF_ccNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Mi_v_; +text: .text%__1cKimmI11OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cSstkL_to_regD_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQcmovI_reg_gtNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreP0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovIF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLL_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MipnMRegisterImpl__v_; +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cPconvF2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodDataKlass.o; +text: .text%__1cOcmovDF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cOcmovLL_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%jint_cmp: parse2.o; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cVMoveL2D_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_; +text: .text%__1cSconvF2I_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodDataKlass.o; +text: .text%__1cJloadCNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOloadI_fregNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovLL_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLConvD2FNodeGOpcode6kM_i_; +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cWloadConI_x41f00000NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKcmpOpFOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cLstoreC0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQregL_to_stkLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cIAddFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLcastP2INodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKo2RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cOcmovIF_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQaddL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%JVM_Close; +text: .text%__1cSmulD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSsubD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cXconvI2D_regDHi_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadD_unalignedNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cOMacroAssemblerNget_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cQsubF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbIcompute_extra_locals_size_in_bytes6MpnMRegisterImpl_22_v_; +text: .text%__1cLcmpF_ccNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_; +text: .text%__1cPorI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUregI_to_stkLHi_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSxorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvI2D_memNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cQdivI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_memNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvI2BNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cWloadConI_x43300000NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x41f00000NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: loopnode.o; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cSmulI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: loopnode.o; +text: .text%__1cENodeJis_MemBar6kM_pknKMemBarNode__: classes.o; +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFParseNdo_instanceof6M_v_; +text: .text%__1cLconvI2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_; +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRshrL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJloadBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJloadDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIDivLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSmulD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOstackSlotLOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cUregI_to_stkLHi_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQshlI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cPconvD2F_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovPP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsubF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cOcmovLI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_; +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%jni_EnsureLocalCapacity; +text: .text%__1cLstoreI0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvD2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cPorL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cUregI_to_stkLHi_0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cSaddD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cQsubD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovPP_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cSsubL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreter_sparc.o; +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__; +text: .text%__1cJloadFNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddI_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParsePdo_monitor_exit6M_v_; +text: .text%__1cSdivL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cObranchConFNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cSconvF2I_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cOloadI_fregNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPstoreI_FregNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadLNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: stubGenerator_sparc.o; +text: .text%__1cSmulL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cHCompile2t6MpnFciEnv_pF_pknITypeFunc_pCpkciiii_v_; +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_sparc.o; +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__; +text: .text%__1cUregI_to_stkLHi_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitIgen_stub6MpCpkciii_v_; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cFTypeFFxdual6kM_pknEType__; +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cSstkL_to_regD_2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSstkL_to_regD_0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeOmemory_operand6kM_pknIMachOper__: ad_sparc_misc.o; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadL_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cVMoveF2I_stack_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOcmovLL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTloadL_unalignedNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTloadL_unalignedNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cXconvI2D_regDHi_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSvframeArrayElementPunpack_on_stack6MiipnFframe_ii_v_; +text: .text%__1cSconvF2I_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbFtest_invocation_counter_for_mdp6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cXconvI2D_regDHi_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_; +text: .text%__1cFTypeDFxdual6kM_pknEType__; +text: .text%__1cSaddD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerbAincrement_backedge_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerbBtest_backedge_count_for_osr6MpnMRegisterImpl_22_v_; +text: .text%__1cSmulL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovPI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cTloadD_unalignedNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZregDHi_regDLo_to_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____; +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__; +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_; +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__; +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cJimmP0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_; +text: .text%__1cQshrI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveL2D_stack_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x43300000NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_; +text: .text%__1cSvframeArrayElementNon_stack_size6kMiiii_i_; +text: .text%__1cWloadConI_x41f00000NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_; +text: .text%__1cIimmDOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cFframeZinterpreter_frame_set_mdx6Mi_v_; +text: .text%__1cOstackSlotLOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotLOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cTloadD_unalignedNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadD_unalignedNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIModLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJimmU5OperFclone6kM_pnIMachOper__; +text: .text%__1cTAbstractInterpreterPsize_activation6FpnNmethodOopDesc_iiiii_i_; +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSmulD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSstkL_to_regD_0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTAbstractInterpreterQcontinuation_for6FpnNmethodOopDesc_pCiiri_3_; +text: .text%__1cUregI_to_stkLHi_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cINegFNodeGOpcode6kM_i_; +text: .text%__1cSsubD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTAbstractInterpreterRlayout_activation6FpnNmethodOopDesc_iiiipnFframe_4i_v_; +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cUregI_to_stkLHi_0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%JVM_GetComponentType; +text: .text%__1cQdivI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%Unsafe_DefineClass1; +text: .text%__1cOcmovII_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSvframeArrayElementDbci6kM_i_; +text: .text%__1cVMoveF2I_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICodeBlobZis_at_poll_or_poll_return6MpC_i_; +text: .text%__1cLvframeArrayIallocate6FpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pnLRegisterMap_nFframe_9A9A9A_p0_; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cLcastP2INodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNnmethodLocker2t6MpC_v_; +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: deoptimization.o; +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__; +text: .text%__1cNloadConL0NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_; +text: .text%__1cODeoptimizationScreate_vframeArray6FpnKJavaThread_nFframe_pnLRegisterMap__pnLvframeArray__; +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__; +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__; +text: .text%__1cZInterpreterMacroAssemblerXindex_check_without_pop6MpnMRegisterImpl_2i22_v_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cPconvD2F_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_; +text: .text%__1cQsubD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_DefineClass; +text: .text%JVM_InvokeMethod; +text: .text%__1cOcmovPP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%__1cHBoxNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cFStateL_sub_Op_Box6MpknENode__v_; +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferVinsert_float_constant6Mf_pC_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: jniFastGetField_sparc.o; +text: .text%__1cMnegD_regNodeIpipeline6kM_pknIPipeline__; +text: .text%Unsafe_AllocateInstance; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cQstkI_to_regINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cYinternal_word_RelocationMforce_target6MpC_v_: relocInfo.o; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cMloadConFNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cETypeJis_finite6kM_i_; +text: .text%__1cPconvI2D_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLconvI2BNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPorL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPorL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerGif_cmp6MnJAssemblerJCondition_i_v_; +text: .text%__1cZInterpreterMacroAssemblerLindex_check6MpnMRegisterImpl_2i22_v_; +text: .text%__1cOMacroAssemblerPcasx_under_lock6MpnMRegisterImpl_22pCi_v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cQsubF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshlI_reg_imm5NodeFclone6kM_pnENode__; +text: .text%__1cNloadRangeNodeFclone6kM_pnENode__; +text: .text%__1cSaddL_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovPI_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKstfSSFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvI2L_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%__1cJCmpF3NodeGOpcode6kM_i_; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cLTypeInstPtrLmirror_type6kM_pnGciType__; +text: .text%__1cOstackSlotIOperFclone6kM_pnIMachOper__; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cOcmovII_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerHbr_null6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cFParseScan_rerun_bytecode6M_i_; +text: .text%__1cQshrL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_pnNsymbolOopDesc_pkc_nGHandle__; +text: .text%__1cIAddFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKstfSSFNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cHdom_lca6FpnFBlock_1_1_: gcm.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%JVM_NewArray; +text: .text%__1cHOrLNodeGOpcode6kM_i_; +text: .text%__1cLStrCompNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cOcmovDF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_; +text: .text%__1cOcmovLI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_; +text: .text%__1cFParseWload_interpreter_state6MpnENode_2_v_; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cKPSYoungGenKprecompact6M_v_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cSconvD2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cPconvI2L_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPconvD2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cMStartOSRNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: callnode.o; +text: .text%__1cRCardTableModRefBSEis_a6MnKBarrierSetEName__i_: cardTableExtension.o; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cLconvP2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__; +text: .text%__1cPconvD2F_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLMoveF2INodeGOpcode6kM_i_; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__; +text: .text%__1cVVM_ParallelGCSystemGCEname6kM_pkc_: vm_operations.o; +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_; +text: .text%__1cVVM_ParallelGCSystemGC2t6MI_v_; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cOMacroAssemblerPbreakpoint_trap6M_v_; +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cQAbstractCompilerMsupports_osr6M_i_: c2compiler.o; +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_; +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cJPSPermGenKprecompact6M_v_; +text: .text%JVM_GC; +text: .text%__1cIPSOldGenKprecompact6M_v_; +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cLPSMarkSweepQinvoke_no_policy6Fpii_v_; +text: .text%__1cLPSMarkSweepGinvoke6Fpii_v_; +text: .text%__1cQmulL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cWloadConI_x43300000NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MInHGCCauseFCause__v_; +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_; +text: .text%__1cWloadConI_x41f00000NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cJPSPermGenQcompute_new_size6MI_v_; +text: .text%__1cKPSYoungGenHcompact6M_v_; +text: .text%JVM_GetSystemPackage; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_; +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNStubGeneratorFalign6Mi_v_: stubGenerator_sparc.o; +text: .text%__1cQregL_to_stkLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLcastP2INodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcmpOpFOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cOcmovPI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCMoveNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQdivD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovIF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCMoveDNodeGOpcode6kM_i_; +text: .text%__1cJLoadDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cNStubGeneratorLstub_prolog6MpnMStubCodeDesc__v_: stubGenerator_sparc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cQaddL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%jni_GetStringRegion: jni.o; +text: .text%JVM_RawMonitorCreate; +text: .text%__1cJloadLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMloadConFNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cNloadConPCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cSaddP_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cOstackSlotFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%JVM_Sleep; +text: .text%__1cHBoxNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cObox_handleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLstoreF0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cQstkI_to_regFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cRorI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cVMoveF2I_stack_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotLOperFclone6kM_pnIMachOper__; +text: .text%Unsafe_CompareAndSwapInt; +text: .text%JVM_Lseek; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: templateTable_sparc.o; +text: .text%__1cNloadRangeNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPconvD2F_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cPconvF2D_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cQmulI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmulF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvF2I_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRmethodDataOopDescRbci_to_extra_data6Mii_pnLProfileData__; +text: .text%__1cOcmovLI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cQregL_to_stkLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregP_to_stkPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvI2D_memNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MipnMRegisterImpl_rnFLabel_2_v_; +text: .text%__1cVMoveL2D_stack_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: loopnode.o; +text: .text%__1cPconvD2F_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLI_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_; +text: .text%__1cQdivI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cOMacroAssemblerKbr_notnull6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: machnode.o; +text: .text%__1cLOptoRuntimeRnew_objArray_Type6F_pknITypeFunc__; +text: .text%__1cQaddF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_; +text: .text%__1cSconvF2I_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsarL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_GetEnv; +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQstkI_to_regINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_sparc.o; +text: .text%Unsafe_GetNativeByte; +text: .text%JVM_NanoTime; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%__1cOMacroAssemblerOrestore_thread6MkpnMRegisterImpl__v_; +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQandL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIimmFOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cKcmpOpFOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cFParseMdo_anewarray6M_v_; +text: .text%__1cSdivL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cObranchConFNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cFParseOdo_tableswitch6M_v_; +text: .text%__1cOcmovIF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSaddL_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLstoreC0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cOMacroAssemblerGmembar6MnJAssemblerQMembar_mask_bits__v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableXjvmti_post_field_access6Fiii_v_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodLiveness.o; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cOstackSlotFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cKo2RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQregI_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cMregD_lowOperFclone6kM_pnIMachOper__; +text: .text%__1cJloadFNodeFclone6kM_pnENode__; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cCosNcommit_memory6FpcII_i_; +text: .text%__1cJloadLNodeFclone6kM_pnENode__; +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cSaddI_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cLOptoRuntimeNgenerate_stub6FpnFciEnv_pF_pknITypeFunc_pCpkciiii_8_; +text: .text%__1cWloadConI_x43300000NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseQdo_monitor_enter6M_v_; +text: .text%__1cPorL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreC0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSPromotionLABRunallocate_object6MpnHoopDesc__i_; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cVMoveL2D_stack_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cSmodL_reg_imm13NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRshrI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cSsubL_reg_reg_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUmulL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPconvI2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cUdivL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cRSignatureIteratorHiterate6M_v_; +text: .text%__1cOcmovLL_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcastP2INodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%__1cSmulL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cLcastP2INodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__; +text: .text%__1cLCastP2INodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cOloadConL13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmodL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeFJis_finite6kM_i_; +text: .text%__1cKcmpOpFOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cIDivDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOMacroAssemblerKget_thread6M_v_; +text: .text%__1cPconvI2F_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2F_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKVtableStub2n6FIi_pv_; +text: .text%__1cNloadConPCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWloadConI_x41f00000NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKScopeValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cLOptoRuntimebBhandle_wrong_method_ic_miss6FpnKJavaThread__pC_; +text: .text%__1cSmulD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%__1cZregDHi_regDLo_to_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o; +text: .text%__1cPconvD2F_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cVMoveF2I_stack_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cQAbstractCompilerPsupports_native6M_i_: c2compiler.o; +text: .text%__1cPorL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2F_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIciSymbolHas_utf86M_pkc_; +text: .text%__1cQandI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cMnegD_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o; +text: .text%__1cSconvF2I_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerZtotal_frame_size_in_bytes6Mi_i_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cVMoveF2I_stack_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%Unsafe_StaticFieldOffset; +text: .text%__1cQmulI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cQaddI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassContext; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod__v_; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%__1cOcmovIF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pCi_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cWloadConI_x43300000NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cKstoreFNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cVMoveL2D_stack_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cSmulL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMulNodeGis_Mul6kM_pk0_: classes.o; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cOloadConL13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cKLoadPCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%__1cLstoreF0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPconvI2D_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_22pC22i_v_; +text: .text%__1cNloadConPCNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cETypeFxdual6kM_pk0_; +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__; +text: .text%__1cKimmU13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MinITosState__v_; +text: .text%__1cSaddL_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cZInterpreterMacroAssemblerbCincrement_invocation_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_int6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerbDunlock_if_synchronized_method6MnITosState_ii_v_; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%__1cIGraphKitSgen_native_wrapper6MpnIciMethod__v_; +text: .text%__1cPconvI2L_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerWempty_expression_stack6M_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MirnFLabel__v_; +text: .text%__1cOcmovIL_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cOCompilerThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cLOptoRuntimeRresolve_call_Type6F_pknITypeFunc__; +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cQregF_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cMMonitorChunk2t6Mi_v_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: callnode.o; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cMMonitorValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cPorL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodDataOop.o; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: templateTable_sparc.o; +text: .text%__1cFframebLprevious_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cQshlL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreter.o; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cKJavaThreadRadd_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cKJavaThreadUremove_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cLcastP2INodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVMoveL2D_stack_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%JVM_LoadLibrary; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MrnFLabel__v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cHCompileRget_Method_invoke6M_pnIciMethod__; +text: .text%__1cZInterpreterMacroAssemblerSget_cpool_and_tags6MpnMRegisterImpl_2_v_; +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cHCompileWget_MethodAccessorImpl6M_pnPciInstanceKlass__; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cOPSVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKimmP13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_; +text: .text%__1cNSharedRuntimeEdrem6Fdd_d_; +text: .text%__1cQstkI_to_regINodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cSstkL_to_regD_2NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAddDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSstkL_to_regD_0NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cPstoreI_FregNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTloadD_unalignedNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLVtableStubsIcontains6FpC_i_; +text: .text%__1cOloadI_fregNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLconvP2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUInterpreterGeneratorbCgenerate_check_compiled_code6MrnFLabel__v_; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6MpnMRegisterImpl_22_v_; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cMTailJumpNodeGOpcode6kM_i_; +text: .text%__1cPconvF2D_regNodeFclone6kM_pnENode__; +text: .text%__1cOLibraryCallKitbDis_method_invoke_or_aux_frame6MpnIJVMState__i_; +text: .text%__1cTloadD_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cSestimate_path_freq6FpnENode__f_: loopnode.o; +text: .text%__1cCosOreserve_memory6FIpc_1_; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%__1cSmulL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_; +text: .text%__1cKstfSSFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: subnode.o; +text: .text%__1cRtestI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosNcommit_memory6FpcI_i_; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_; +text: .text%__1cWImplicitExceptionTableCat6kMI_I_; +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_; +text: .text%jni_GetJavaVM; +text: .text%__1cOcmovDF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cQConstantIntValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%jni_MonitorExit: jni.o; +text: .text%__1cOMacroAssemblerDret6Mi_v_: templateTable_sparc.o; +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cULinearLeastSquareFit2t6MI_v_; +text: .text%__1cQdivL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cOLibraryCallKitbBinline_native_currentThread6M_i_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cNReservedSpaceKfirst_part6MIii_0_; +text: .text%__1cNReservedSpace2t6MI_v_; +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cOloadI_fregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cIAddDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJloadFNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKConv2BNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvI2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSconvD2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_Throw: jni.o; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cFTypeFGis_nan6kM_i_; +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKReturnNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cIDivINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cISubDNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cPstoreI_FregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQciByteCodeStreamXget_method_holder_index6M_i_; +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_; +text: .text%__1cOMacroAssemblerEfneg6MnRFloatRegisterImplFWidth_p13_v_; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRT_sparc.o; +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSpaceCounters2t6MpkciIpnMMutableSpace_pnSGenerationCounters__v_; +text: .text%__1cLcmpF_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%jni_SetObjectField: jni.o; +text: .text%__1cISubDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cISubFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_sparc.o; +text: .text%bootstrap_flush_windows; +text: .text%__1cMloadConPNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerbCverify_oop_or_return_address6MpnMRegisterImpl_2_v_; +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_; +text: .text%__1cQmodL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__; +text: .text%__1cSmulL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cIDivFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%__1cSsubL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_; +text: .text%__1cPciInstanceKlassbDcompute_shared_is_initialized6M_i_; +text: .text%__1cQmulD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%Unsafe_AllocateMemory; +text: .text%__1cSandL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetLastErrorString; +text: .text%__1cQmodL_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cPstoreI_FregNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cSandI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cOLibraryCallKitZinline_native_Class_query6MnIciMethodLIntrinsicId__i_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cOLibraryCallKitbNinline_native_Reflection_getCallerClass6M_i_; +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQaddI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cRcompL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_2_v_; +text: .text%__1cSconvD2I_helperNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cKstfSSFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl_2_v_; +text: .text%__1cINegDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cOloadConL13NodeFclone6kM_pnENode__; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__; +text: .text%__1cPconvL2I_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_2NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: assembler_sparc.o; +text: .text%__1cIRetTableHadd_jsr6Mii_v_; +text: .text%__1cMnegF_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cQregF_to_stkINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cPorL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cPconvD2F_regNodeFclone6kM_pnENode__; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cLconvP2BNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvP2BNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLL_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cMnegD_regNodeFclone6kM_pnENode__; +text: .text%__1cOMacroAssemblerJfloat_cmp6MiipnRFloatRegisterImpl_2pnMRegisterImpl__v_; +text: .text%__1cLconvI2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cODeoptimizationLUnrollBlockOsize_of_frames6kM_i_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cQaddD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cISubDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cISubFNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cISubFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNflagsRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cJcmpOpOperFclone6kM_pnIMachOper__; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cFTypeFFempty6kM_i_; +text: .text%__1cLconvP2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVMoveF2I_stack_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC22_v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cKstfSSFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_; +text: .text%__1cPconvI2D_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%Unsafe_SetMemory; +text: .text%__1cSstkL_to_regD_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstfSSFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_x6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cVMoveF2I_stack_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_; +text: .text%__1cOcmovLI_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConL0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKg1RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cOcmovPI_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovDF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cQsubF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_1NodeFclone6kM_pnENode__; +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_icc6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cFParsePdo_lookupswitch6M_v_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNgen_new_frame6FpnOMacroAssembler_i_v_: runtime_sparc.o; +text: .text%__1cKstfSSFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cINegDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_: ad_sparc.o; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cIDivDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMloadConDNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMemRegionFminus6kMk0_0_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_; +text: .text%__1cOtailjmpIndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_22_v_; +text: .text%__1cVMoveL2D_stack_regNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cOtailjmpIndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cLOptoRuntimeVgenerate_handler_blob6FpCi_pnNSafepointBlob__; +text: .text%__1cKstfSSFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: runtime_sparc.o; +text: .text%__1cQsubD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRT_sparc.o; +text: .text%__1cOtailjmpIndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFParseRdo_multianewarray6M_v_; +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__; +text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_; +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOcmovIF_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o; +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSconvI2D_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveF2I_stack_regNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cLstoreF0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl_2_v_; +text: .text%__1cPstoreI_FregNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cOcmovLL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i2_v_; +text: .text%__1cRNativeInstructionPis_ic_miss_trap6M_i_; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_2222rnFLabel__v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: stubGenerator_sparc.o; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cOcmovPI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%JVM_RegisterSignal; +text: .text%JVM_FindSignal; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o; +text: .text%jio_vsnprintf; +text: .text%__1cQshrL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_i22_v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_222_v_; +text: .text%__1cNReservedSpaceJlast_part6MI_0_; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_I_v_; +text: .text%__1cFTypeDFempty6kM_i_; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_; +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cHciKlassIis_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cLconvP2BNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o; +text: .text%__1cZInterpreterMacroAssemblerUadd_monitor_to_stack6MipnMRegisterImpl_2_v_; +text: .text%JVM_Available; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cQshlL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMOopTaskQdDueueKinitialize6M_v_; +text: .text%__1cMOopTaskQdDueue2t6M_v_; +text: .text%__1cRNativeInstructionKis_illegal6M_i_; +text: .text%__1cZInterpreterMacroAssemblerQtop_most_monitor6M_nHAddress__; +text: .text%__1cLstoreF0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cKi0RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cPOopTaskQdDueueSetOregister_queue6MipnMOopTaskQdDueue__v_; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cSconvF2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNloadConP0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cbAconvL2D_reg_slow_fxtofNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOstackSlotFOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cOstackSlotFOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotFOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cPconvF2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cHMatcherQconvL2FSupported6F_ki_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cLstoreC0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRcompL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cUcompI_iReg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cMPeriodicTask2t6MI_v_; +text: .text%__1cPconvF2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeDJis_finite6kM_i_; +text: .text%__1cPconvL2I_regNodeFclone6kM_pnENode__; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cLNamedThread2t6M_v_; +text: .text%__1cSconvD2I_helperNodeFclone6kM_pnENode__; +text: .text%__1cLNamedThreadIset_name6MpkcE_v_; +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQdivD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWloadConI_x43300000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cKcmpOpFOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cPconvD2F_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cSconvF2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovIF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovIF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o; +text: .text%__1cJimmL0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLcastP2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovLL_regNodeFclone6kM_pnENode__; +text: .text%__1cbAconvL2D_reg_slow_fxtofNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvP2BNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsubD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQregF_to_stkINodeFclone6kM_pnENode__; +text: .text%__1cSstkL_to_regD_2NodeFclone6kM_pnENode__; +text: .text%__1cQregF_to_stkINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNTemplateTableTinvokevfinal_helper6FpnMRegisterImpl_2_v_; +text: .text%__1cSmulD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableUgenerate_vtable_call6FpnMRegisterImpl_22_v_; +text: .text%__1cSstkL_to_regD_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cSmembar_releaseNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmI0OperFclone6kM_pnIMachOper__; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cNloadConL0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeFclone6kM_pnENode__; +text: .text%__1cQsubL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cRsarI_reg_imm5NodeFclone6kM_pnENode__; +text: .text%__1cWloadConI_x41f00000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQdivI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cZregDHi_regDLo_to_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsubI_zero_regNodeFclone6kM_pnENode__; +text: .text%__1cXconvI2D_regDHi_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKloadUBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cOcmovLI_regNodeFclone6kM_pnENode__; +text: .text%__1cQstkI_to_regINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUregI_to_stkLHi_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvF2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLOptoRuntimeIl2f_Type6F_pknITypeFunc__; +text: .text%__1cOMacroAssemblerUcalc_mem_param_words6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MnITosState_pnMRegisterImpl_3_v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MnITosState_pnMRegisterImpl__v_; +text: .text%__1cOLibraryCallKitVinline_fp_conversions6MnIciMethodLIntrinsicId__i_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_flag_at6MipnMRegisterImpl_2_v_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_26MpCpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_1_x6MnJAssemblerJCondition_rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerZget_4_byte_integer_at_bcp6MipnMRegisterImpl_2n0AKsetCCOrNot__v_; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cCosHrealloc6FpvI_1_; +text: .text%__1cUConstantOopReadValuePis_constant_oop6kM_i_: debugInfo.o; +text: .text%__1cKScopeValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o; +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_; +text: .text%__1cZInterpreterMacroAssemblerRaccess_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cINegFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cCosPuncommit_memory6FpcI_i_; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cZInterpreterMacroAssemblerSaccess_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerTaccess_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_int6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerQstore_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerRstore_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerSstore_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cCosWactive_processor_count6F_i_; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: jniFastGetField_sparc.o; +text: .text%__1cRcheck_if_clipping6FpknKRegionNode_rpnGIfNode_5_i_: cfgnode.o; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cIciObjectOis_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstanceKlass.o; +text: .text%__1cPmake_new_frames6FpnOMacroAssembler_i_v_: runtime_sparc.o; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cGatomll6Fpkcpx_i_: arguments.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_; +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_; +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cOPSVirtualSpace2t6M_v_; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: gcTaskThread.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cMGCTaskThreadDrun6M_v_; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%__1cKCodeBufferGresize6M_v_; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_; +text: .text%__1cOtailjmpIndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMGCTaskThreadFstart6M_v_; +text: .text%__1cNStubGenerator2t6MpnKCodeBuffer_i_v_: stubGenerator_sparc.o; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cRFloatRegisterImplIencoding6kMn0AFWidth__i_: interpreter_sparc.o; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_; +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_; +text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerbAdispatch_next_noverify_oop6MnITosState_i_v_; +text: .text%__1cOMacroAssemblerDret6Mi_v_: stubGenerator_sparc.o; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cFParseDl2f6M_v_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cOPSVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cSInterpreterRuntimeWcreate_klass_exception6FpnKJavaThread_pcpnHoopDesc__v_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_; +text: .text%__1cPorL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRT_sparc.o; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cUInterpreterGeneratorVrestore_native_result6M_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cHMatcherbDinterpreter_frame_pointer_reg6F_nHOptoRegEName__; +text: .text%__1cLconvP2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVshrL_reg_imm6_L2INodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cKTypeOopPtrCeq6kMpknEType__i_; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosPphysical_memory6F_X_; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cVMoveF2I_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveL2D_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_; +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__; +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o; +text: .text%__1cLconvP2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cFParseNfetch_monitor6MipnENode_2_2_; +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_: parallelScavengeHeap.o; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEkind6M_nNCollectedHeapEName__: parallelScavengeHeap.o; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_normal6MnITosState__v_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: parse1.o; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_expand.o; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interp_masm_sparc.o; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEheap6F_p0_; +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_; +text: .text%__1cKcmpOpFOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_I_; +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_; +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__1cMostream_exit6F_v_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cNReservedSpace2t6MIIipc_v_; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cTloadL_unalignedNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o; +text: .text%__1cOLibraryCallKitWinline_native_hashcode6Mii_i_; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: library_call.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cQregL_to_stkLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_; +text: .text%__1cHRetDataKis_RetData6M_i_: methodDataOop.o; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cKi0RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKg1RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o; +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC222_v_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cHMatcherVfind_callee_arguments6FpnNsymbolOopDesc_ipi_pnLRegPair__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cQPSGenerationPool2t6MpnIPSOldGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cQPSGenerationPool2t6MpnJPSPermGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cUEdenMutableSpacePool2t6MpnKPSYoungGen_pnMMutableSpace_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cYSurvivorMutableSpacePool2t6MpnKPSYoungGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cLstoreF0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%JNI_CreateJavaVM; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o; +text: .text%__1cIUniversePcheck_alignment6FIIpkc_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cQdivD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cQsubD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cQaddF_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cRsarL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cQshlI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%JVM_InitializeSocketLibrary; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%__1cOcmovLI_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovLI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovDF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_Socket; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%JVM_SupportsCX8; +text: .text%__1cOcmovIF_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cKstfSSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cSmulL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cSmulI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o; +text: .text%Unsafe_SetNativeLong; +text: .text%JVM_InitProperties; +text: .text%JVM_Halt; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%Unsafe_FreeMemory; +text: .text%Unsafe_PageSize; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%JVM_MaxMemory; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cTClassLoadingServiceVnotify_class_unloaded6FpnNinstanceKlass_i_v_; +text: .text%__1cMFastLockNodeLis_FastLock6kM_pk0_: classes.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cLOptoRuntimeUmultianewarray1_Type6F_pknITypeFunc__; +text: .text%__1cVRegistersForDebuggingRrestore_registers6FpnOMacroAssembler_pnMRegisterImpl__v_: assembler_sparc.o; +text: .text%__1cVRegistersForDebuggingOsave_registers6FpnOMacroAssembler__v_: assembler_sparc.o; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cHCompileRpd_compiler2_init6F_v_; +text: .text%__1cKC2CompilerKinitialize6M_v_; +text: .text%__1cMTailJumpNode2t6MpnENode_22222_v_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cWResolveOopMapConflictsOreport_results6kM_i_: rewriter.o; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: cfgnode.o; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cMciKlassKlassEmake6F_p0_; +text: .text%__1cIciMethodMvtable_index6M_i_; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cHOrLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cHOrLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cKPSYoungGenbCreset_survivors_after_shrink6M_v_; +text: .text%__1cKPSYoungGenQlimit_gen_shrink6MI_I_; +text: .text%__1cKPSYoungGenRavailable_to_live6M_I_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_pipeline.o; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_IIIIIII_v_; +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_III_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cINegFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cNStubGeneratorbNgenerate_flush_callers_register_windows6M_pC_: stubGenerator_sparc.o; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorSgenerate_test_stop6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbIgenerate_copy_words_aligned8_lower6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbJgenerate_copy_words_aligned8_higher6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbBgenerate_set_words_aligned86M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbCgenerate_zero_words_aligned86M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbEgenerate_partial_subtype_check6M_pC_: stubGenerator_sparc.o; +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_; +text: .text%__1cLOptoRuntimeYgenerate_arraycopy_stubs6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__; +text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_; +text: .text%__1cOMacroAssemblerPstop_subroutine6M_v_; +text: .text%__1cOMacroAssemblerElcmp6MpnMRegisterImpl_2222_v_; +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_22222_v_; +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_22222_v_; +text: .text%__1cOMacroAssemblerFlushr6MpnMRegisterImpl_22222_v_; +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpnMRegisterImpl_pCi_v_; +text: .text%__1cLOptoRuntimebPgenerate_polling_page_return_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cLOptoRuntimebPgenerate_illegal_instruction_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebBgenerate_uncommon_trap_blob6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cLOptoRuntimeWfill_in_exception_blob6F_v_; +text: .text%__1cLOptoRuntimeUsetup_exception_blob6F_v_; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cKCMoveDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerCfb6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cUPSAdaptiveSizePolicy2t6MIIIIIddI_v_; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTablebDinvokeinterface_object_method6FpnMRegisterImpl_222_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: templateTable_sparc.o; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cZCompiledArgumentOopFinderRhandle_oop_offset6M_v_: frame.o; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cNGCTaskManager2t6MI_v_; +text: .text%__1cNGCTaskManagerKinitialize6M_v_; +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_; +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cSCommandLineFlagsExKuintxAtPut6FnXCommandLineFlagWithType_I_v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cICodeHeapHreserve6MIII_i_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o; +text: .text%__1cFStateO_sub_Op_CMoveD6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cKPSYoungGenPinitialize_work6M_v_; +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGen2t6MIII_v_; +text: .text%__1cOPSVirtualSpaceJshrink_by6MI_i_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cJPSPermGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Ipkci_v_; +text: .text%__1cIPSOldGen2t6MIIIpkci_v_; +text: .text%__1cIPSOldGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o; +text: .text%__1cLPSMarkSweepKinitialize6F_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cKPSScavengeKinitialize6F_v_; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cOcompiler2_init6F_v_; +text: .text%__1cSPSPromotionManagerKinitialize6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cNTemplateTableDret6F_v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_COMPILER2_sparcv9 2009-08-01 04:16:54.867278001 +0100 @@ -0,0 +1,7131 @@ +data = R0x2000; +text = LOAD ?RXO; + + +text: .text%__1cLOptoRuntimeLjshort_copy6Fph1L_v_; +text: .text%__1cLOptoRuntimeTarrayof_jshort_copy6FpnIHeapWord_2L_v_; +text: .text%__1cSPSPromotionManagerWcopy_to_survivor_space6MpnHoopDesc__2_; +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_sparc_misc.o; +text: .text%__1cIPhaseIFGIadd_edge6MII_i_; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cMOopTaskQdDueueKpop_global6MrpnHoopDesc__i_; +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: ifg.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_sparc_misc.o; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cENodeEjvms6kM_pnIJVMState__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_sparc_misc.o; +text: .text%__1cHRegMaskESize6kM_I_; +text: .text%__1cHRegMaskFis_UP6kM_i_; +text: .text%__1cIMachNodeNrematerialize6kM_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: classes.o; +text: .text%__1cIProjNodeHis_Proj6M_p0_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: classes.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: classes.o; +text: .text%__1cETypeDcmp6Fkpk03_i_; +text: .text%__1cENodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHlatency6MI_I_; +text: .text%__1cENodeHis_Copy6kM_I_: classes.o; +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__; +text: .text%__1cHRegMaskJis_bound16kM_i_; +text: .text%__1cDff16FI_i_; +text: .text%__1cXresource_allocate_bytes6FL_pc_; +text: .text%__1cIMachNodeJideal_reg6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_sparc_misc.o; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cHRegMaskJis_bound26kM_i_; +text: .text%__1cJVectorSet2R6MI_rnDSet__; +text: .text%__1cENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRMachSpillCopyNodeMis_SpillCopy6M_p0_: ad_sparc.o; +text: .text%__1cIMachNodeGOpcode6kM_i_; +text: .text%__1cENodeGpinned6kM_i_: classes.o; +text: .text%__1cQIndexSetIteratorEnext6M_I_: chaitin.o; +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cETypeFuhash6Fkpk0_i_; +text: .text%__1cJiRegIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cHPhiNodeGis_Phi6M_p0_: cfgnode.o; +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__: type.o; +text: .text%__1cENodeIout_grow6MI_v_; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cIIndexSetKinitialize6MI_v_; +text: .text%__1cQObjectStartArrayMobject_start6MpnIHeapWord__2_: cardTableExtension.o; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cRMachSpillCopyNodeHis_Copy6kM_I_: ad_sparc.o; +text: .text%__1cEDictGInsert6Mpv1i_1_; +text: .text%__1cPOopTaskQdDueueSetFsteal6MipirpnHoopDesc__i_; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cOloadConI13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cJMultiNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeHadd_req6Mp0_v_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_sparc.o; +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_; +text: .text%__1cENodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cOloadConI13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeGOpcode6kM_i_; +text: .text%__1cOloadConI13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIProjNodeGis_CFG6kM_i_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: classes.o; +text: .text%__1cETypeIhashcons6M_pk0_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: classes.o; +text: .text%__1cOloadConI13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFArenaIcontains6kMpkv_i_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: classes.o; +text: .text%__1cIProjNodeGpinned6kM_i_; +text: .text%__1cICallNodeKmatch_edge6kMI_I_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_; +text: .text%__1cIProjNodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: cfgnode.o; +text: .text%__1cKbranchNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_sparc_misc.o; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_; +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_: psTasks.o; +text: .text%__1cMPhaseChaitinKelide_copy6MpnENode_ipnFBlock_rnJNode_List_6i_i_; +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__; +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_; +text: .text%__1cHCompileRvalid_bundle_info6MpknENode__i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_sparc.o; +text: .text%__1cGIfNodeGOpcode6kM_i_; +text: .text%__1cHCompileNnode_bundling6MpknENode__pnGBundle__; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_; +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_; +text: .text%__1cETypeEmeet6kMpk0_2_; +text: .text%__1cDLRGOcompute_degree6kMr0_i_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: coalesce.o; +text: .text%__1cENode2t6MI_v_; +text: .text%__1cFArenaIArealloc6MpvLL_1_; +text: .text%__1cHTypeIntCeq6kMpknEType__i_; +text: .text%__1cENodeNrematerialize6kM_i_: classes.o; +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cKSchedulingLanti_do_def6MpnFBlock_pnENode_nHOptoRegEName_i_v_; +text: .text%__1cENodeHis_Copy6kM_I_: cfgnode.o; +text: .text%__1cOlower_pressure6FpnDLRG_IpnFBlock_pI4_v_: ifg.o; +text: .text%__1cIAddPNodeGOpcode6kM_i_; +text: .text%__1cKIfTrueNodeGOpcode6kM_i_; +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cETypeLisa_oop_ptr6kM_i_; +text: .text%__1cITypeLongCeq6kMpknEType__i_; +text: .text%__1cETypeJsingleton6kM_i_; +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_: ad_sparc.o; +text: .text%__1cHRegMaskMClearToPairs6M_v_; +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: cfgnode.o; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: ad_sparc.o; +text: .text%__1cJloadPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMachNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%JVM_ArrayCopy; +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: classes.o; +text: .text%__1cLIfFalseNodeGOpcode6kM_i_; +text: .text%__1cENodeEhash6kM_I_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_sparc_misc.o; +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: cfgnode.o; +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cIPhaseIFGJre_insert6MI_v_; +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_; +text: .text%__1cJiRegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopopts.o; +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_; +text: .text%__1cDfh16FI_i_; +text: .text%__1cIIndexSetKfree_block6MI_v_; +text: .text%__1cHTypeIntJsingleton6kM_i_; +text: .text%__1cIParmNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cNMachIdealNodeErule6kM_I_: ad_sparc.o; +text: .text%__1cIBoolNodeGOpcode6kM_i_; +text: .text%__1cIConINodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: classes.o; +text: .text%__1cNSharedRuntimeDl2f6Fx_f_; +text: .text%__1cHTypeIntEhash6kM_i_; +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_; +text: .text%__1cJMultiNodeIis_Multi6M_p0_; +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_; +text: .text%__1cHConNodeGOpcode6kM_i_; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: classes.o; +text: .text%__1cENodeXpartial_latency_of_defs6MrnLBlock_Array_rnNGrowableArray4CI___v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cXPipeline_Use_Cycle_Mask2L6Mi_r0_: ad_sparc_pipeline.o; +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_; +text: .text%__1cENodeEgrow6MI_v_; +text: .text%__1cKRegionNodeGOpcode6kM_i_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cMMachProjNodeGOpcode6kM_i_; +text: .text%__1cITypeNodeEhash6kM_I_; +text: .text%__1cITypeLongEhash6kM_i_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cMPhaseChaitinKbias_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cHRegMaskQis_aligned_Pairs6kM_i_; +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__: ad_sparc.o; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cFStateRMachOperGenerator6MipnIMachNode_pnHCompile__pnIMachOper__; +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_; +text: .text%__1cJiRegIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKRegionNodeGpinned6kM_i_: classes.o; +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_; +text: .text%__1cENodeNis_block_proj6kM_pk0_; +text: .text%__1cPJavaFrameAnchorNmake_walkable6MpnKJavaThread__v_; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cGcmpkey6Fpkv1_i_; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_L_: parallelScavengeHeap.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FLipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cLTypeInstPtrEhash6kM_i_; +text: .text%__1cIBoolNodeHis_Bool6M_p0_: subnode.o; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_; +text: .text%__1cOMethodLivenessKBasicBlockXcompute_gen_kill_single6MpnQciByteCodeStream__v_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cKTypeAryPtrEhash6kM_i_; +text: .text%__1cENodeFIdeal6MpnIPhaseGVN_i_p0_; +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFStateDDFA6MipknENode__i_; +text: .text%__1cENodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_; +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMPipeline_UseMfull_latency6kMIrk0_I_; +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopnode.o; +text: .text%__1cHPhiNodeGpinned6kM_i_: cfgnode.o; +text: .text%__1cKbranchNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cENodeFclone6kM_p0_; +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMMachProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cENodeKmatch_edge6kMI_I_; +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cICallNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_sparc.o; +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__: ad_sparc.o; +text: .text%__1cETypeFxmeet6kMpk0_2_; +text: .text%__1cJVectorSet2F6kMI_i_; +text: .text%__1cMOopTaskQdDueueOpop_local_slow6MInOTaskQdDueueSuperDAge__i_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: ad_sparc_misc.o; +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cICallNodeHis_Call6M_p0_: callnode.o; +text: .text%__1cIProjNodeEhash6kM_I_; +text: .text%__1cGIfNodeGpinned6kM_i_: classes.o; +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__: ad_sparc.o; +text: .text%__1cILRG_ListGextend6MII_v_; +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_; +text: .text%__1cRMachSafePointNodeEjvms6kM_pnIJVMState__: ad_sparc_misc.o; +text: .text%__1cICmpPNodeGOpcode6kM_i_; +text: .text%__1cIMachNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cIConLNodeGOpcode6kM_i_; +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__; +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_: ad_sparc.o; +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_; +text: .text%__1cOis_diamond_phi6FpnENode__i_: cfgnode.o; +text: .text%__1cHPhiNodeEhash6kM_I_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: callnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: callnode.o; +text: .text%__1cKTypeOopPtrJsingleton6kM_i_; +text: .text%__1cITypeNodeJideal_reg6kM_I_; +text: .text%__1cMloadConPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cGIfNodeFis_If6M_p0_: classes.o; +text: .text%__1cIMachNode2t6M_v_; +text: .text%__1cIAddINodeGOpcode6kM_i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: memnode.o; +text: .text%__1cENodeHsize_of6kM_I_; +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKNode_ArrayGremove6MI_v_; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: classes.o; +text: .text%__1cIIndexSet2t6Mp0_v_; +text: .text%__1cHMatcherKLabel_Root6MpknENode_pnFState_p16_6_; +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: classes.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: cfgnode.o; +text: .text%__1cLTypeInstPtrCeq6kMpknEType__i_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: classes.o; +text: .text%__1cENodeSremove_dead_region6MpnIPhaseGVN_i_i_; +text: .text%__1cNSafePointNodeGpinned6kM_i_: callnode.o; +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: memnode.o; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc_misc.o; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: classes.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: classes.o; +text: .text%__1cOmatch_into_reg6FpnENode_iii1_i_: matcher.o; +text: .text%__1cKTypeAryPtrCeq6kMpknEType__i_; +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_; +text: .text%__1cIHaltNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cKmethodOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: callnode.o; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_sparc.o; +text: .text%__1cPindOffset13OperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cENodeNrematerialize6kM_i_: cfgnode.o; +text: .text%__1cMMergeMemNodeGOpcode6kM_i_; +text: .text%__1cHCompileMFillLocArray6MpnENode_pnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cETypeKhas_memory6kM_i_; +text: .text%__1cSCallStaticJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: cfgnode.o; +text: .text%__1cENode2t6Mp0_v_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cNCatchProjNodeGOpcode6kM_i_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: cfgnode.o; +text: .text%__1cIGraphKitHstopped6M_i_; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_sparc_misc.o; +text: .text%__1cICmpINodeGOpcode6kM_i_; +text: .text%__1cJStartNodeGpinned6kM_i_: callnode.o; +text: .text%__1cHRegMaskPfind_first_pair6kM_nHOptoRegEName__; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cKMachIfNodeJis_MachIf6kM_pk0_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrEmake6FnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_pk0_; +text: .text%__1cMPipeline_UseJadd_usage6Mrk0_v_; +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cJloadINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cYCallStaticJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIget_long6kM_x_; +text: .text%__1cHMatcherKReduceOper6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cMPhaseChaitinMchoose_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_; +text: .text%__1cJTypeTupleJsingleton6kM_i_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pnIciObject_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cENodeGis_CFG6kM_i_: connode.o; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableExtension.o; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cJLoadPNodeGOpcode6kM_i_; +text: .text%__1cHConNodeGis_Con6kM_I_: classes.o; +text: .text%__1cHRegMaskMSmearToPairs6M_v_; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: memnode.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cJiRegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cIParmNodeGOpcode6kM_i_; +text: .text%__1cKSchedulingPAddNodeToBundle6MpnENode_pknFBlock__v_; +text: .text%__1cKSchedulingWAddNodeToAvailableList6MpnENode__v_; +text: .text%__1cKSchedulingSChooseNodeToBundle6M_pnENode__; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: classes.o; +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cICmpUNodeGOpcode6kM_i_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cENodeHdel_req6MI_v_; +text: .text%__1cMMutableSpaceMcas_allocate6ML_pnIHeapWord__; +text: .text%__1cETypeEhash6kM_i_; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cJiRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cHCmpNodeGis_Cmp6kM_pk0_: classes.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: cfgnode.o; +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cTCreateExceptionNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__; +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6ML_pnIHeapWord__; +text: .text%__1cJPSPermGenSallocate_permanent6ML_pnIHeapWord__; +text: .text%__1cMMutableSpaceIallocate6ML_pnIHeapWord__; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_; +text: .text%__1cJMultiNodeEhash6kM_I_: classes.o; +text: .text%__1cITypeLongJsingleton6kM_i_; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cENodeGis_CFG6kM_i_: subnode.o; +text: .text%__1cFBlockIis_Empty6kM_i_; +text: .text%__1cILoadNodeEhash6kM_I_; +text: .text%__1cJCProjNodeEhash6kM_I_: classes.o; +text: .text%__1cQciByteCodeStreamEjava6MnJBytecodesECode__2_; +text: .text%__1cJCatchNodeGOpcode6kM_i_; +text: .text%__1cIHaltNodeGOpcode6kM_i_; +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: classes.o; +text: .text%__1cNflagsRegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cLConvI2LNodeGOpcode6kM_i_; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cKRelocationLunpack_data6M_v_: ad_sparc.o; +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: multnode.o; +text: .text%__1cIJVMStateLdebug_start6kM_I_; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: cfgnode.o; +text: .text%__1cFBlockGselect6MrnJNode_List_rnLBlock_Array_pirnJVectorSet_IrnNGrowableArray4CI___pnENode__; +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__; +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc_misc.o; +text: .text%method_compare: methodOop.o; +text: .text%__1cXPipeline_Use_Cycle_MaskCOr6Mrk0_v_; +text: .text%__1cMMachCallNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMachNodeGExpand6MpnFState_rnJNode_List__p0_: ad_sparc_misc.o; +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__; +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: classes.o; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: cfgnode.o; +text: .text%__1cJiRegLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: multnode.o; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: classes.o; +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__: memnode.o; +text: .text%__1cHMatcherTReduceInst_Interior6MpnFState_ipnIMachNode_IrpnENode__I_; +text: .text%__1cJTypeTupleEhash6kM_i_; +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCastPPNodeGOpcode6kM_i_; +text: .text%__1cGOopMapJset_value6MnHOptoRegEName_ii_v_; +text: .text%__1cITypeLongFxmeet6kMpknEType__3_; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMloadConINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2L_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_ReleaseUTF; +text: .text%__1cObranchConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKNode_ArrayEgrow6MI_v_; +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodDataOop.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cFBlockLis_uncommon6kMrnLBlock_Array__i_; +text: .text%__1cINodeHashLhash_delete6MpknENode__i_; +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: multnode.o; +text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_; +text: .text%__1cMloadConINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRNativeInstructionLset_long_at6Mii_v_; +text: .text%__1cPciInstanceKlassMis_interface6M_i_: ciInstanceKlass.o; +text: .text%__1cFDictI2i6M_v_; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cIAddPNodeHis_AddP6M_p0_: classes.o; +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cHCompileJcan_alias6MpknHTypePtr_i_i_; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cENodeGpinned6kM_i_: connode.o; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cITypeLongEmake6Fxxi_pk0_; +text: .text%__1cENodeOis_block_start6kM_i_; +text: .text%__1cGBitMapJset_union6M0_v_; +text: .text%__1cJiRegLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: subnode.o; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cIConPNodeGOpcode6kM_i_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: ad_sparc.o; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cHTypeIntEmake6Fi_pk0_; +text: .text%__1cKRegionNodeEhash6kM_I_: classes.o; +text: .text%__1cHPhiNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMflagsRegOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cPindOffset13OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cENodeHis_Copy6kM_I_: memnode.o; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_; +text: .text%__1cENodeGis_Con6kM_I_: classes.o; +text: .text%__1cJLoadINodeGOpcode6kM_i_; +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHTypeIntEmake6Fiii_pk0_; +text: .text%__1cENodeIdestruct6M_v_; +text: .text%__1cIBoolNodeEhash6kM_I_; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_: phaseX.o; +text: .text%__1cKbranchNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGBitMap2t6MpLL_v_; +text: .text%__1cENodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cEDict2F6kMpkv_pv_; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: coalesce.o; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_sparc_misc.o; +text: .text%__1cFParsePdo_one_bytecode6M_v_; +text: .text%__1cFParseNdo_exceptions6M_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cNbranchConNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cKis_x2logic6FpnIPhaseGVN_pnENode__3_: cfgnode.o; +text: .text%__1cHAbsNodeLis_absolute6FpnIPhaseGVN_pnENode__4_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cVcompP_iRegP_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_; +text: .text%__1cLPCTableNodeGpinned6kM_i_: classes.o; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: cfgnode.o; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cGBitMapGat_put6MLi_v_; +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_; +text: .text%__1cKNode_ArrayFclear6M_v_; +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: classes.o; +text: .text%__1cIProjNodeHsize_of6kM_I_; +text: .text%__1cUParallelScavengeHeapPis_in_permanent6kMpkv_i_: parallelScavengeHeap.o; +text: .text%__1cMCreateExNodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: multnode.o; +text: .text%__1cHhashptr6Fpkv_i_; +text: .text%__1cLLShiftLNodeGOpcode6kM_i_; +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cHhashkey6Fpkv_i_; +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cObranchConPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cKRelocationRpd_set_data_value6MpCl_v_; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cKciTypeFlowLStateVectorSapply_one_bytecode6MpnQciByteCodeStream__i_; +text: .text%__1cOoop_RelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cOoop_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cOcompU_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKBranchDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__; +text: .text%__1cITypeNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cHCompilePfind_alias_type6MpknHTypePtr_i_pn0AJAliasType__; +text: .text%__1cHConNodeEhash6kM_I_; +text: .text%__1cKimmI13OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: classes.o; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: ad_sparc.o; +text: .text%__1cJTypeTupleCeq6kMpknEType__i_; +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: callnode.o; +text: .text%__1cHMemNodeMIdeal_common6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_; +text: .text%__1cENode2t6Mp011_v_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_; +text: .text%__1cObranchConPNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cNSafePointNodeHsize_of6kM_I_; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: cfgnode.o; +text: .text%__1cIMachNodeHis_Mach6M_p0_: machnode.o; +text: .text%__1cENodeGis_Con6kM_I_: subnode.o; +text: .text%__1cJloadPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_; +text: .text%__1cETypeFempty6kM_i_; +text: .text%__1cENodeHget_int6kM_i_; +text: .text%__1cIAddPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMPhaseIterGVNbGregister_new_node_with_optimizer6MpnENode__2_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cIJVMStateJdebug_end6kM_I_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: connode.o; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cJloadPNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeNrematerialize6kM_i_: classes.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cNSafePointNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFArenaEgrow6ML_pv_; +text: .text%__1cJloadBNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cMMachCallNodeLis_MachCall6M_p0_: ad_sparc_misc.o; +text: .text%__1cILoadNodeKmatch_edge6kMI_I_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: multnode.o; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cNbranchConNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_CFG6kM_i_: memnode.o; +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQaddP_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSaddI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cOno_flip_branch6FpnFBlock__i_: block.o; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%__1cKStorePNodeGOpcode6kM_i_; +text: .text%__1cJiRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKStoreINodeGOpcode6kM_i_; +text: .text%__1cHMemNodeGis_Mem6M_p0_: classes.o; +text: .text%__1cHPhiNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cObranchConUNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cQaddP_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cOloadConI13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cGIfNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNflagsRegUOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cOPSPromotionLABFflush6M_v_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cSPSPromotionManagerMdrain_stacks6M_v_; +text: .text%__1cENodeHis_Goto6kM_I_: classes.o; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_; +text: .text%__1cITypeNodeDcmp6kMrknENode__I_; +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJloadINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__: subnode.o; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cNSafePointNodebBneeds_polling_address_input6F_i_; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cSCallStaticJavaNodeRis_CallStaticJava6kM_pk0_: callnode.o; +text: .text%__1cMMergeMemNodeLis_MergeMem6M_p0_: memnode.o; +text: .text%__1cLCounterDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_; +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: lcm.o; +text: .text%__1cKcmpOpPOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cNflagsRegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKTypeRawPtrJsingleton6kM_i_; +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: callnode.o; +text: .text%__1cJcmpOpOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__: cfgnode.o; +text: .text%__1cZPhaseConservativeCoalesceJcopy_copy6MpnENode_2pnFBlock_I_i_; +text: .text%__1cKstorePNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUParallelScavengeHeapVunsafe_max_tlab_alloc6kM_L_; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_L_pnIHeapWord__; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cQciByteCodeStreamMreset_to_bci6Mi_v_; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: classes.o; +text: .text%__1cMPhaseIterGVNFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cFframeZsender_with_pc_adjustment6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o; +text: .text%__1cLis_cond_add6FpnIPhaseGVN_pnHPhiNode__pnENode__; +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6ML_pnIHeapWord__; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2L_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: block.o; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cMMergeMemNodeEhash6kM_I_; +text: .text%__1cMflagsRegOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cILoadNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKRegionNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRNativeInstructionQset_data64_sethi6FpCl_v_; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: nativeInst_sparc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: nativeInst_sparc.o; +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cFBlockOschedule_local6MrnHMatcher_rnLBlock_Array_pirnJVectorSet_rnNGrowableArray4CI___i_; +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cITypeNodeHsize_of6kM_I_; +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcompU_iRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cRMachSafePointNodeQis_MachSafePoint6M_p0_: ad_sparc_misc.o; +text: .text%__1cIRootNodeGOpcode6kM_i_; +text: .text%__1cLPhaseValuesGintcon6Mi_pnIConINode__; +text: .text%__1cOloadConI13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObranchConPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGpinned6kM_i_: subnode.o; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cENodeGpinned6kM_i_: memnode.o; +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_; +text: .text%__1cJTypeTupleGfields6FI_ppknEType__; +text: .text%__1cENodeHdel_out6Mp0_v_: parse2.o; +text: .text%__1cOcompI_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o; +text: .text%__1cHAddNodeEhash6kM_I_; +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_; +text: .text%__1cRmethodDataOopDescTbytecode_cell_count6FnJBytecodesECode__i_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: subnode.o; +text: .text%__1cQaddP_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cHTypeAryRary_must_be_exact6kM_i_; +text: .text%__1cPconvI2L_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cILoadNodeHis_Load6M_p0_: classes.o; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: split_if.o; +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cYCallStaticJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachOperNconstant_disp6kM_i_; +text: .text%__1cIMachOperFscale6kM_i_; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: methodDataOop.o; +text: .text%__1cLProfileDataPfollow_contents6M_v_: methodDataOop.o; +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o; +text: .text%__1cKbranchNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cTremove_useless_bool6FpnGIfNode_pnIPhaseGVN__pnENode__: ifnode.o; +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__: callnode.o; +text: .text%__1cIBoolNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLLShiftINodeGOpcode6kM_i_; +text: .text%__1cJStoreNodeIis_Store6kM_pk0_: classes.o; +text: .text%__1cUcompI_iReg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cKbranchNodeHis_Goto6kM_I_: ad_sparc_misc.o; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cENode2t6Mp0111_v_; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cObranchConUNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcheckCastPPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_; +text: .text%__1cNPhaseRegAllocGis_oop6kMpknENode__i_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: parse1.o; +text: .text%__1cISubINodeGOpcode6kM_i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: cfgnode.o; +text: .text%__1cHTypeAryEhash6kM_i_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: classes.o; +text: .text%__1cHSubNodeGis_Sub6M_p0_: classes.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cNbranchConNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_sparc.o; +text: .text%__1cENodeJis_Branch6kM_I_: ad_sparc.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc.o; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc.o; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: subnode.o; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cNbranchConNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cJStoreNodeEhash6kM_I_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: memnode.o; +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_; +text: .text%JVM_GetClassModifiers; +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%JVM_GetClassAccessFlags; +text: .text%__1cITypeLongEmake6Fx_pk0_; +text: .text%__1cFBlockOcode_alignment6M_I_; +text: .text%__1cJloadPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKSchedulingPComputeUseCount6MpknFBlock__v_; +text: .text%__1cKSchedulingbFComputeRegisterAntidependencies6MpnFBlock__v_; +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLOptoRuntimeFnew_C6FpnMklassOopDesc_pnKJavaThread__v_; +text: .text%__1cOis_range_check6FpnENode_r12ri_i_: ifnode.o; +text: .text%__1cENodeKreplace_by6Mp0_v_; +text: .text%__1cHTypePtrHget_con6kM_l_; +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cIimmPOperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_sparc.o; +text: .text%Unsafe_CompareAndSwapLong; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cIJumpDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cVcompP_iRegP_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHTypePtrEhash6kM_i_; +text: .text%__1cFBlockLfind_remove6MpknENode__v_; +text: .text%__1cNCompileBrokerLmaybe_block6F_v_; +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_; +text: .text%__1cKTypeRawPtrEhash6kM_i_; +text: .text%__1cPcompP_iRegPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcompU_iRegNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: multnode.o; +text: .text%__1cRMachSpillCopyNodeOimplementation6kMpnKCodeBuffer_pnNPhaseRegAlloc_i_I_; +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: callnode.o; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cHMatcherQis_save_on_entry6Mi_i_; +text: .text%__1cOcompU_iRegNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMachOperIconstant6kM_l_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: multnode.o; +text: .text%__1cJloadINodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_; +text: .text%__1cPVirtualCallDataPadjust_pointers6M_v_; +text: .text%__1cPVirtualCallDataPfollow_contents6M_v_; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cKstorePNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftINodeGOpcode6kM_i_; +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: machnode.o; +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMMergeMemNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o; +text: .text%__1cHTypeIntFempty6kM_i_; +text: .text%__1cSaddI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cLMachNopNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIJVMStateNclone_shallow6kM_p0_; +text: .text%__1cITypeLongEmake6Fxx_pk0_; +text: .text%__1cIIndexSetJlrg_union6MIIkIpknIPhaseIFG_rknHRegMask__I_; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cJiRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cQaddI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: subnode.o; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cVcompP_iRegP_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: cfgnode.o; +text: .text%__1cNRelocIteratorKinitialize6MlpnICodeBlob_pC3_v_; +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_; +text: .text%__1cKCodeBufferRtransform_address6kMrk0pC_3_; +text: .text%__1cMMergeMemNodeQclone_all_memory6FpnENode__p0_; +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__; +text: .text%__1cKTypeOopPtrWmake_from_klass_common6FpnHciKlass_ii_pk0_; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_; +text: .text%__1cFBlockJfind_node6kMpknENode__I_; +text: .text%__1cRPSOldPromotionLABFflush6M_v_; +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJStartNodeGpinned6kM_i_: classes.o; +text: .text%__1cLPhaseValuesFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cNPhaseRegAllocKreg2offset6kMnHOptoRegEName__i_; +text: .text%__1cKbranchNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLRegisterMapLpd_location6kMnFVMRegEName__pC_; +text: .text%__1cICallNodeLis_CallLeaf6kM_pknMCallLeafNode__: callnode.o; +text: .text%__1cOcompU_iRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: connode.o; +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRlock_ptr_RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: subnode.o; +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_: subnode.o; +text: .text%__1cNCatchProjNodeMis_CatchProj6kM_pk0_: cfgnode.o; +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_: spaceCounters.o; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cNflagsRegUOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLRegisterMapFclear6Mpl_v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: multnode.o; +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cGBitMapOset_difference6M0_v_; +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_; +text: .text%__1cLstoreI0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConUNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: callnode.o; +text: .text%__1cSaddI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: multnode.o; +text: .text%__1cENodeHget_ptr6kM_l_; +text: .text%__1cLBoxLockNodeGOpcode6kM_i_; +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_; +text: .text%__1cQciByteCodeStreamFEOBCs6M_nJBytecodesECode__; +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__; +text: .text%__1cQaddP_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cENodeHins_req6MIp0_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBuffer.o; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cRshlL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMTypeKlassPtrEhash6kM_i_; +text: .text%__1cLPhaseValuesHlongcon6Mx_pnIConLNode__; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cQaddI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%__1cSaddP_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: classes.o; +text: .text%__1cOcompI_iRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cPBoundRelocationMupdate_addrs6MpCrknKCodeBuffer_4_1_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: codeBuffer.o; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cObranchConUNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObranchConUNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__; +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_; +text: .text%__1cIMachOperOindex_position6kM_i_; +text: .text%__1cMPhaseChaitinJsplit_USE6MpnENode_pnFBlock_2IIiinNGrowableArray4CI__i_I_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: gcm.o; +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAndINodeGOpcode6kM_i_; +text: .text%JVM_CurrentThread; +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_; +text: .text%__1cIJVMStateIof_depth6kMi_p0_; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cRMachNullCheckNodeQis_MachNullCheck6M_p0_: machnode.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Ml_v_: relocInfo.o; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cPindOffset13OperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperFscale6kM_i_: ad_sparc.o; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: loopnode.o; +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_; +text: .text%__1cLstoreI0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshlI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJVectorSet2L6MI_rnDSet__; +text: .text%__1cNloadRangeNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: postaloc.o; +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cXmembar_release_lockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cITypeLongFempty6kM_i_; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: machnode.o; +text: .text%__1cIPhaseCCPFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMethodLivenessKBasicBlockWcompute_gen_kill_range6MpnQciByteCodeStream__v_; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCl_v_; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cVcompP_iRegP_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENode2t6Mp01_v_; +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__; +text: .text%__1cJVectorSet2t6MpnFArena__v_; +text: .text%__1cOcompU_iRegNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRSignatureIteratorSiterate_parameters6ML_v_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_; +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cPconvI2L_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cPcompP_iRegPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeDcmp6kMrk0_I_; +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_; +text: .text%__1cJloadPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cKo0RegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: connode.o; +text: .text%__1cITypeFuncEhash6kM_i_; +text: .text%__1cRcmpFastUnlockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKStoreBNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cLRShiftINodeGOpcode6kM_i_; +text: .text%__1cMURShiftLNodeGOpcode6kM_i_; +text: .text%__1cLCastP2LNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cQaddI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRshrI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cFMutexNowned_by_self6kM_i_; +text: .text%__1cMCallLeafNodeGOpcode6kM_i_; +text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o; +text: .text%__1cKstoreINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cOMachReturnNodeNis_MachReturn6M_p0_: ad_sparc_misc.o; +text: .text%__1cJCatchNodeIis_Catch6kM_pk0_: classes.o; +text: .text%__1cIGraphKitEstop6M_v_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: ciMethodData.o; +text: .text%__1cLProfileDataPfollow_contents6M_v_: ciMethodData.o; +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cITypeFuncCeq6kMpknEType__i_; +text: .text%__1cSaddP_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: subnode.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: callnode.o; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_sparc.o; +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopIsplit_up6MpnENode_22_i_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: memnode.o; +text: .text%__1cQaddI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeHis_Call6M_p0_: classes.o; +text: .text%__1cOcompI_iRegNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%__1cENodeIadd_prec6Mp0_v_; +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: machnode.o; +text: .text%__1cOcompI_iRegNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: memnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: memnode.o; +text: .text%__1cIAddLNodeGOpcode6kM_i_; +text: .text%__1cVcompP_iRegP_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__: ad_sparc.o; +text: .text%__1cSaddP_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: memnode.o; +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o; +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciMethodPliveness_at_bci6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessPget_liveness_at6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessKBasicBlockPget_liveness_at6MpnIciMethod_i_nGBitMap__; +text: .text%__1cHAddNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMTypeKlassPtrCeq6kMpknEType__i_; +text: .text%__1cHTypePtrJsingleton6kM_i_; +text: .text%__1cIGraphKitObasic_plus_adr6MpnENode_2l_2_; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: memnode.o; +text: .text%__1cNSafePointNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitLclean_stack6Mi_v_; +text: .text%__1cNflagsRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKbranchNodeFclone6kM_pnENode__; +text: .text%__1cMUniverseOperFclone6kM_pnIMachOper__; +text: .text%__1cJlabelOperFclone6kM_pnIMachOper__; +text: .text%__1cNSafePointNode2t6MIpnIJVMState__v_; +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_; +text: .text%__1cJStartNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLConvI2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_; +text: .text%__1cFParseKensure_phi6Mii_pnHPhiNode__; +text: .text%__1cNloadRangeNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cMloadConDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJAssemblerOpatched_branch6Fiii_i_; +text: .text%__1cJAssemblerSbranch_destination6Fii_i_; +text: .text%JVM_IsNaN; +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: subnode.o; +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLCounterDataOis_CounterData6M_i_: ciMethodData.o; +text: .text%__1cQaddI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cVcompP_iRegP_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: ad_sparc.o; +text: .text%__1cKbranchNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cPcompP_iRegPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cIHaltNodeGpinned6kM_i_: classes.o; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cJStoreNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_; +text: .text%__1cWShouldNotReachHereNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cWShouldNotReachHereNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cMflagsRegOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJLoadPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cKRelocationJpack_data6M_i_: ad_sparc.o; +text: .text%__1cIGraphKitQkill_dead_locals6M_v_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cPindOffset13OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cNloadKlassNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAddINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cSaddP_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHOrINodeGOpcode6kM_i_; +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cMMergeMemNode2t6MpnENode__v_; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cMPhaseIterGVNHmakecon6MpknEType__pnHConNode__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_sparc.o; +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cOCallRelocationFvalue6M_pC_: ad_sparc.o; +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPconvI2L_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJStartNodeGOpcode6kM_i_; +text: .text%__1cOcompI_iRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cUcompI_iReg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: cfgnode.o; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSaddI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: ad_sparc.o; +text: .text%__1cENodeQlatency_from_use6kMrnLBlock_Array_rnNGrowableArray4CI__pk0p0_i_; +text: .text%__1cPcompP_iRegPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Copy6kM_I_: machnode.o; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQPreserveJVMState2t6MpnIGraphKit_i_v_; +text: .text%__1cQPreserveJVMState2T6M_v_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: subnode.o; +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_; +text: .text%__1cQsubI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: node.o; +text: .text%__1cRshlL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHis_Type6M_pnITypeNode__: classes.o; +text: .text%__1cKStoreCNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitRnull_check_common6MpnENode_nJBasicType_i_2_; +text: .text%__1cMloadConLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLstoreI0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIAddINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMloadConLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cVcompP_iRegP_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHTypeAryCeq6kMpknEType__i_; +text: .text%__1cIGraphKit2t6MpnIJVMState__v_; +text: .text%__1cKbranchNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cETypeFwiden6kMpk0_2_: type.o; +text: .text%__1cJloadINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMWarmCallInfoHis_cold6kM_i_; +text: .text%__1cSaddI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cQciByteCodeStreamJget_field6Mri_pnHciField__; +text: .text%__1cCosGmalloc6FL_pv_; +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_; +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cRlock_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cKstoreINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowNmake_range_at6Mi_pn0AFRange__; +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_; +text: .text%__1cMloadConLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_; +text: .text%__1cJloadSNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cILoadNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHCompileKTracePhase2t6MpkcpnMelapsedTimer_i_v_; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%__1cMtlsLoadPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: callnode.o; +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSafePointNodeGpinned6kM_i_: classes.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: subnode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: subnode.o; +text: .text%__1cQsubI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: graphKit.o; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cKCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cTCreateExceptionNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cITypeLongFxdual6kM_pknEType__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cGBitMapVset_union_with_result6M0_i_; +text: .text%__1cQaddP_reg_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cLBoxLockNodeHsize_of6kM_I_; +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_; +text: .text%__1cIBoolNodeJideal_reg6kM_I_: subnode.o; +text: .text%__1cHCmpNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cJLoadSNodeGOpcode6kM_i_; +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: subnode.o; +text: .text%__1cHRetNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPcompP_iRegPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHis_Goto6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLPhaseValuesHmakecon6MpknEType__pnHConNode__; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cObranchConPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMstringStreamFwrite6MpkcL_v_; +text: .text%__1cOkill_dead_code6FpnENode_pnMPhaseIterGVN__i_: node.o; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_sparc_misc.o; +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cRshlI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKTypeOopPtrHget_con6kM_l_; +text: .text%__1cPconvI2L_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: connode.o; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cIAndLNodeGOpcode6kM_i_; +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_; +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompI_iRegNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__: cfgnode.o; +text: .text%__1cQciByteCodeStreamKget_method6Mri_pnIciMethod__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__; +text: .text%__1cJloadFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciMethodbCinterpreter_invocation_count6M_i_; +text: .text%__1cNCatchProjNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cKciTypeFlowFRangeNget_block_for6Mpn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cXmembar_acquire_lockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJLoadBNodeGOpcode6kM_i_; +text: .text%__1cRshlL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_; +text: .text%__1cRcmpFastUnlockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStartNodeIis_Start6M_p0_: callnode.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: node.o; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: callnode.o; +text: .text%__1cRcmpFastUnlockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMatcherScalling_convention6FpnLRegPair_Ii_v_; +text: .text%__1cHBitDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cIJVMStateKclone_deep6kM_p0_; +text: .text%__1cENodeNadd_req_batch6Mp0I_v_; +text: .text%__1cIGraphKitTadd_safepoint_edges6MpnNSafePointNode_i_v_; +text: .text%__1cIJVMStateLdebug_depth6kM_I_; +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJLoadLNodeGOpcode6kM_i_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cRshlL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRshrP_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_; +text: .text%__1cOcompU_iRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cRshrI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPsp_ptr_RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPconvI2L_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreB0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMMachCallNodeHis_Call6M_pnICallNode__: ad_sparc_misc.o; +text: .text%__1cICmpPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__; +text: .text%__1cMPrefetchNodeGOpcode6kM_i_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cNloadRangeNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreB0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_; +text: .text%__1cICodeHeapLheader_size6F_L_; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cRloadConP_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcompP_iRegPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cKciTypeFlowLStateVectorEmeet6Mpk1_i_; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cRloadConP_pollNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_; +text: .text%__1cIimmIOperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cLConvL2INodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: cfgnode.o; +text: .text%__1cIMulLNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cFParseMdo_one_block6M_v_; +text: .text%__1cILoadNodeHsize_of6kM_I_; +text: .text%__1cQsubI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_; +text: .text%__1cQaddP_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_L_; +text: .text%__1cKTypeOopPtrFempty6kM_i_; +text: .text%__1cRMachSafePointNode2t6M_v_; +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__; +text: .text%__1cLOptoRuntimePnew_typeArray_C6FnJBasicType_ipnKJavaThread__v_; +text: .text%__1cNbranchConNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cMindirectOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cICallNodeOis_CallRuntime6kM_pknPCallRuntimeNode__: callnode.o; +text: .text%__1cLstoreI0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: cfgnode.o; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cXinsert_anti_dependences6FrpnFBlock_pnENode_rnLBlock_Array__i_: gcm.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: callnode.o; +text: .text%__1cNloadConP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_; +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_; +text: .text%__1cMFastLockNodeGOpcode6kM_i_; +text: .text%__1cHMatcherPc_frame_pointer6kM_nHOptoRegEName__; +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_; +text: .text%__1cMMachCallNode2t6M_v_; +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_; +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_; +text: .text%__1cKmethodOperGmethod6kM_l_: ad_sparc.o; +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__; +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_; +text: .text%__1cIciMethodRhas_compiled_code6M_i_; +text: .text%__1cNflagsRegUOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: connode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: connode.o; +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__; +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__; +text: .text%__1cPemit_call_reloc6FrnKCodeBuffer_lnJrelocInfoJrelocType_iii_v_; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: methodLiveness.o; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cSaddI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cQsubI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNprefetch2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_; +text: .text%__1cLRegisterMap2t6Mpk0_v_; +text: .text%__1cIGraphKitbLset_predefined_input_for_runtime_call6MpnNSafePointNode__v_; +text: .text%__1cKReturnNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeHis_Goto6kM_I_: cfgnode.o; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cJLoadCNodeGOpcode6kM_i_; +text: .text%__1cHConNodeEmake6FpknEType__p0_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: ciTypeFlow.o; +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_; +text: .text%__1cHOopFlowNbuild_oop_map6MpnENode_ipnNPhaseRegAlloc_pi_pnGOopMap__; +text: .text%__1cRMachSafePointNodeLset_oop_map6MpnGOopMap__v_: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodePis_MachCallLeaf6M_pnQMachCallLeafNode__: ad_sparc_misc.o; +text: .text%__1cNMachIdealNodePoper_input_base6kM_I_: machnode.o; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cILoopNodeGOpcode6kM_i_; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_; +text: .text%__1cPsp_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fl_i_; +text: .text%__1cRshlL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: callnode.o; +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_; +text: .text%__1cICmpINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cHTypeIntFxdual6kM_pknEType__; +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cRshrI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPorI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_; +text: .text%__1cKBranchDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cNCatchProjNodeEhash6kM_I_; +text: .text%__1cRMachSafePointNodeSis_MachCallRuntime6M_pnTMachCallRuntimeNode__: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cQsubI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKReturnNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: callnode.o; +text: .text%__1cNSafePointNodeEhash6kM_I_: callnode.o; +text: .text%__1cRMachSafePointNodeWis_MachCallInterpreter6M_pnXMachCallInterpreterNode__: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o; +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICallNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerEcall6MpCnJrelocInfoJrelocType__v_: ad_sparc.o; +text: .text%__1cHCompileZintrinsic_insertion_index6MpnIciMethod_i_i_; +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cIPhaseIFGFUnion6MII_v_; +text: .text%__1cKStoreLNodeGOpcode6kM_i_; +text: .text%__1cMCallJavaNodeLis_CallJava6kM_pk0_: callnode.o; +text: .text%__1cQMachCallJavaNodePis_MachCallJava6M_p0_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopHdom_lca6kMpnENode_2_2_; +text: .text%__1cRshlI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICallNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_; +text: .text%__1cWMachCallStaticJavaNodeVis_MachCallStaticJava6M_p0_: ad_sparc_misc.o; +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKstoreCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: connode.o; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_; +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_; +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_; +text: .text%__1cKciTypeFlowFBlockKsuccessors6MpnQciByteCodeStream_pn0ALStateVector_pn0AGJsrSet__pnNGrowableArray4Cp1___; +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_; +text: .text%__1cINodeHashLhash_insert6MpnENode__v_; +text: .text%__1cILoopNodeHis_Loop6M_p0_: classes.o; +text: .text%__1cSCallLeafDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJVectorSetFClear6M_v_; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cKstorePNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_; +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowFBlock2t6Mp0pn0AFRange_pn0AGJsrSet__v_; +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_; +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_; +text: .text%__1cMindIndexOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSaddP_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: memnode.o; +text: .text%__1cLstoreB0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cGciTypeMis_classless6kM_i_: ciType.o; +text: .text%__1cQmark_inner_loops6FpnIPhaseCFG_pnFBlock__v_: block.o; +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_; +text: .text%__1cRInterpretedRFrameEinit6M_v_; +text: .text%__1cENodeLnonnull_req6kM_p0_; +text: .text%__1cRshrI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cYcompareAndSwapL_boolNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cPconvI2L_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cICmpLNodeGOpcode6kM_i_; +text: .text%__1cHnmethodZsize_of_exception_handler6F_i_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_; +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHMulNodeEhash6kM_I_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cLPhaseValuesHzerocon6MnJBasicType__pnHConNode__; +text: .text%__1cRshlI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIGraphKit2t6M_v_; +text: .text%__1cYcompareAndSwapL_boolNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: loopnode.o; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_; +text: .text%__1cOMethodLivenessNmake_block_at6Mipn0AKBasicBlock__2_; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cENodeLbottom_type6kM_pknEType__; +text: .text%__1cIAddPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeSis_CallDynamicJava6kM_pknTCallDynamicJavaNode__: callnode.o; +text: .text%__1cMCreateExNodeGpinned6kM_i_: classes.o; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cIXorINodeGOpcode6kM_i_; +text: .text%__1cPCountedLoopNodeOis_CountedLoop6M_p0_: classes.o; +text: .text%__1cObranchConUNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cRshlL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cFParsePdo_field_access6Mii_v_; +text: .text%__1cILoadNodeEmake6FpnENode_22pknHTypePtr_pknEType_nJBasicType__p0_; +text: .text%__1cOloadConI13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cRshrI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: callnode.o; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cIGraphKitNuncommon_trap6MipnHciKlass_pkci_v_; +text: .text%__1cIregDOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cTmembar_CPUOrderNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNloadConP0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__; +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__; +text: .text%__1cIHaltNode2t6MpnENode_2_v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: ad_sparc.o; +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: machnode.o; +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cRsarI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: ad_sparc.o; +text: .text%__1cQaddI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cNloadKlassNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cRshlI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCompareAndSwapNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cQmulL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: ad_sparc_misc.o; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: callnode.o; +text: .text%__1cNloadConP0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: ad_sparc.o; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_; +text: .text%__1cIGraphKitJmake_load6MpnENode_2pknEType_nJBasicType_i_2_; +text: .text%__1cPorI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__; +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKbranchNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%jni_GetObjectField: jni.o; +text: .text%__1cNbranchConNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cNmethodOopDescWwas_executed_more_than6kMi_i_; +text: .text%__1cOcompI_iRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIAddINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: connode.o; +text: .text%__1cNloadKlassNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cINodeHashJhash_find6MpknENode__p1_; +text: .text%__1cHCompileKTracePhase2T6M_v_; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: methodDataOop.o; +text: .text%__1cYinlineCallClearArrayNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFChunk2n6FLL_pv_; +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_; +text: .text%__1cQciByteCodeStreamZget_declared_field_holder6M_pnPciInstanceKlass__; +text: .text%__1cHTypeIntFwiden6kMpknEType__3_; +text: .text%__1cKciTypeFlowGJsrSetNapply_control6Mp0pnQciByteCodeStream_pn0ALStateVector__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_; +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_; +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_; +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: memnode.o; +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_; +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cLstoreI0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrP_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cTCreateExceptionNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cHTypeIntEmake6Fii_pk0_; +text: .text%__1cHCompilebAallow_range_check_smearing6kM_i_; +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cMWarmCallInfoGis_hot6kM_i_; +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_; +text: .text%__1cRshlI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: memnode.o; +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseKdo_get_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cIProjNodeDcmp6kMrknENode__I_; +text: .text%__1cKMemBarNodeEhash6kM_I_; +text: .text%__1cPcompP_iRegPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: loopnode.o; +text: .text%__1cKRegionNodeGpinned6kM_i_: loopnode.o; +text: .text%__1cLstoreB0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cRsarI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPorI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowLStateVectorJdo_invoke6MpnQciByteCodeStream_i_v_; +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: memnode.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: cfgnode.o; +text: .text%__1cNCatchProjNode2t6MpnENode_Ii_v_; +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: cfgnode.o; +text: .text%__1cLPCTableNodeKis_PCTable6kM_pk0_: classes.o; +text: .text%__1cNloadRangeNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_kp0_v_; +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__; +text: .text%__1cHCompileOcall_generator6MpnIciMethod_ipnIJVMState_if_pnNCallGenerator__; +text: .text%__1cNciCallProfileRapply_prof_factor6Mf_v_; +text: .text%__1cHCompileOfind_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMPhaseChaitinTsplit_Rematerialize6MpnENode_pnFBlock_IrInNGrowableArray4CI__ipIp2i_2_; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cFParseHdo_call6M_v_; +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_; +text: .text%__1cQciByteCodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_; +text: .text%__1cFParseMprofile_call6MpnENode__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cQmulL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cTmembar_CPUOrderNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLPCTableNodeHsize_of6kM_I_: classes.o; +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__; +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__; +text: .text%__1cNGCTaskManagerYshould_release_resources6MI_i_; +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_; +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_; +text: .text%__1cPcmpFastLockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cPcmpFastLockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_: ciMethodData.o; +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_; +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cSaddP_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: machnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: machnode.o; +text: .text%__1cRsarI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cIciMethodLscale_count6Mi_i_; +text: .text%__1cIBoolNodeHsize_of6kM_I_; +text: .text%__1cGGCTask2t6M_v_; +text: .text%__1cSsafePoint_pollNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGIfNodeHsize_of6kM_I_: classes.o; +text: .text%__1cICodeHeapSallocated_capacity6kM_L_; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cJloadCNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cOstackSlotLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cLLShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cKBranchDataNis_BranchData6M_i_: ciMethodData.o; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cKimmL13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cOstackSlotLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIJumpDataLis_JumpData6M_i_: ciMethodData.o; +text: .text%__1cILoopNodeHis_Loop6M_p0_: loopnode.o; +text: .text%__1cSstkL_to_regD_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshlL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTcan_branch_register6FpnENode_1_i_; +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_; +text: .text%__1cSstkL_to_regD_2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_; +text: .text%__1cIciMethodbAinterpreter_throwout_count6kM_i_; +text: .text%__1cOCompilerOracleNshould_inline6FnMmethodHandle__i_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cIciMethodNshould_inline6M_i_; +text: .text%__1cKInlineTreeWfind_subtree_from_root6Fp0pnIJVMState_pnIciMethod_i_1_; +text: .text%__1cRshrI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_release_lockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICHeapObj2n6FL_pv_; +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_; +text: .text%__1cFTypeDCeq6kMpknEType__i_; +text: .text%__1cIciMethodWwas_executed_more_than6Mi_i_; +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIHaltNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIGraphKitOtoo_many_traps6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitTtoo_many_recompiles6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: connode.o; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cKimmL13OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cFTypeDEhash6kM_i_; +text: .text%__1cPconvL2I_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cOClearArrayNodeGOpcode6kM_i_; +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQandL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeRget_base_and_disp6kMrlrpknHTypePtr__pknENode__; +text: .text%__1cJloadLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cJloadSNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cNNativeFarCallKis_call_at6FpC_i_; +text: .text%__1cHoopDescSslow_identity_hash6M_l_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__l_; +text: .text%__1cYcompareAndSwapL_boolNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cLstoreP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConUNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompU_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cMPeriodicTaskOreal_time_tick6FL_v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_; +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_; +text: .text%__1cNmethodOopDescbHhas_unloaded_classes_in_signature6FnMmethodHandle_pnGThread__i_; +text: .text%__1cWCallLeafNoFPDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetArrayLength: jni.o; +text: .text%__1cIGraphKitNcast_not_null6MpnENode__2_; +text: .text%__1cJStartNodeIis_Start6M_p0_: classes.o; +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_n0AJIcoResult__; +text: .text%__1cQsubI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindIndexOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQandL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_Write; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cPcheckCastPPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cFParseRensure_memory_phi6Mii_pnHPhiNode__; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cICodeHeapIcapacity6kM_L_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_L_: memoryPool.o; +text: .text%__1cKMemoryPoolImax_size6kM_L_: memoryPool.o; +text: .text%__1cENodeGis_Con6kM_I_: multnode.o; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cIParmNodeJideal_reg6kM_I_; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%__1cLRethrowNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserUskip_over_field_name6MpciI_1_; +text: .text%__1cRbranchLoopEndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cSmembar_acquireNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cYinlineCallClearArrayNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMloadConFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNmethodOopDescWload_signature_classes6FnMmethodHandle_pnGThread__i_; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cNprefetch2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: cfgnode.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: classes.o; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cKg1RegIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIimmLOperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopnode.o; +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cQmulL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: assembler_sparc.o; +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_; +text: .text%__1cFParseUprofile_taken_branch6Mi_v_; +text: .text%__1cFParseFmerge6Mi_v_; +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__; +text: .text%__1cJloadBNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cMloadConFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_; +text: .text%__1cNCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cLPCTableNodeEhash6kM_I_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cOloadConI13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%__1cJloadLNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSandI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cSbranchCon_longNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimebCcomplete_monitor_unlocking_C6FpnHoopDesc_pnJBasicLock__v_; +text: .text%__1cQandI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimebAcomplete_monitor_locking_C6FpnHoopDesc_pnJBasicLock_pnKJavaThread__v_; +text: .text%__1cNflagsRegLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_; +text: .text%__1cJTypeTupleFxdual6kM_pknEType__; +text: .text%__1cQandI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cYinlineCallClearArrayNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__; +text: .text%__1cKTypeAryPtrFempty6kM_i_; +text: .text%__1cHTypeAryFempty6kM_i_; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: psTasks.o; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKTypeRawPtrCeq6kMpknEType__i_; +text: .text%__1cRshlI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_; +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cJloadCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadSNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKMemBarNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cSaddL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cKInlineTreePshouldNotInline6kMpnIciMethod_pnMWarmCallInfo__pkc_; +text: .text%__1cLOpaque1NodeGOpcode6kM_i_; +text: .text%__1cIJumpDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cQxorI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cIciMethodbHhas_unloaded_classes_in_signature6M_i_; +text: .text%__1cFframeNis_glue_frame6kM_i_; +text: .text%__1cObranchConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMtlsLoadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKcmpOpPOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cRshrP_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmpFastLockNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cObranchConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmPOperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cIciObjectFklass6M_pnHciKlass__; +text: .text%__1cISubINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLOptoRuntimeOnew_objArray_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%__1cIGraphKitPstore_to_memory6MpnENode_22nJBasicType_i_2_; +text: .text%__1cJStoreNodeEmake6FpnENode_22pknHTypePtr_2nJBasicType__p0_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRshrP_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseRoptimize_inlining6MpnIciMethod_ipnPciInstanceKlass_24irnKInlineTreeLInlineStyle_r2_v_; +text: .text%__1cJcmpOpOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cQmulL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNloadKlassNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cITypeFuncFxdual6kM_pknEType__; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_; +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cFTypeFEhash6kM_i_; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_; +text: .text%__1cPorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: classes.o; +text: .text%__1cFParseOreturn_current6MpnENode__v_; +text: .text%__1cLstoreP0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cILoadNodeDcmp6kMrknENode__I_; +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_: connode.o; +text: .text%__1cENodeRlatency_from_uses6kMrnLBlock_Array_rnNGrowableArray4CI___i_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cQxorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHMatcherPstack_alignment6F_I_; +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%__1cETypeCeq6kMpk0_i_; +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowGJsrSetSis_compatible_with6Mp1_i_; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cWstatic_stub_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cRloadConP_pollNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cNimmP_pollOperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cLBoxLockNodeKstack_slot6FpnENode__nHOptoRegEName__; +text: .text%__1cLBoxLockNodeKis_BoxLock6kM_pk0_: classes.o; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_; +text: .text%__1cIregFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__; +text: .text%__1cOMacroAssemblerEjump6MrnHAddress_ipkci_v_; +text: .text%__1cOMacroAssemblerFjumpl6MrnHAddress_pnMRegisterImpl_ipkci_v_; +text: .text%__1cIMulINodeGOpcode6kM_i_; +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cRcompL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRshrI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciMethodLis_accessor6kM_i_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cFParseRbranch_prediction6Mrf_f_; +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_; +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: callnode.o; +text: .text%__1cMloadConINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cKstoreINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGOpcode6kM_i_; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_sparc.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: cfgnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: cfgnode.o; +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: loopnode.o; +text: .text%__1cRshrP_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSCallLeafDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cLCastP2LNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cISubINodeDsub6kMpknEType_3_3_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cFParseLbuild_exits6M_v_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cFParseIdo_exits6M_v_; +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_; +text: .text%__1cFArenaEused6kM_L_; +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: callnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: callnode.o; +text: .text%__1cNbranchConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_IsSameObject: jni.o; +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_; +text: .text%__1cFParsePdo_method_entry6M_v_; +text: .text%__1cGRFrameGcaller6M_p0_; +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_; +text: .text%__1cFframeTis_first_java_frame6kM_i_; +text: .text%__1cFframeNis_java_frame6kM_i_; +text: .text%__1cFBlockTimplicit_null_check6MrnLBlock_Array_rnNGrowableArray4CI__pnENode_6_v_; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cKstoreLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cNbranchConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_releaseNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cbACallCompiledJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHdel_out6Mp0_v_: library_call.o; +text: .text%__1cJiRegIOperFclone6kM_pnIMachOper__; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodDataKlass.o; +text: .text%__1cQandI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cQLibraryIntrinsicKis_virtual6kM_i_: library_call.o; +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_; +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_; +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_; +text: .text%__1cIIndexSetEswap6Mp0_v_; +text: .text%__1cXmembar_release_lockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cJStartNodeOis_block_start6kM_i_: callnode.o; +text: .text%__1cLOpaque1NodeEhash6kM_I_; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cFTypeFCeq6kMpknEType__i_; +text: .text%__1cbACallCompiledJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cHCompileFstart6kM_pnJStartNode__; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cMCallLeafNodeLis_CallLeaf6kM_pk0_: classes.o; +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cSbranchCon_longNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cJStartNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cQxorI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cFParseYprofile_not_taken_branch6M_v_; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cLRegisterMapIpd_clear6M_v_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cJloadCNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cUParallelScavengeHeapNtlab_capacity6kM_L_; +text: .text%__1cRbranchLoopEndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMciMethodData2t6M_v_; +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__; +text: .text%__1cMPrefetchNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIregFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cFParseFdo_if6MpnENode_2nIBoolTestEmask_2_v_; +text: .text%__1cZCallDynamicJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowFBlockPclone_loop_head6Mp0ip1pn0AGJsrSet__3_; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cKTypeOopPtrEhash6kM_i_; +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLPCTableNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_; +text: .text%__1cIGraphKitRmake_slow_call_ex6MpnENode_pnPciInstanceKlass__v_; +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cJloadLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cHMulNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cYinlineCallClearArrayNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cMCreateExNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSandI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrP_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbACallCompiledJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: callnode.o; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cPconvL2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cFBlockUhoist_LCA_above_defs6Mp01IrnLBlock_Array__1_; +text: .text%__1cQaddL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%JVM_GetMethodIxModifiers; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%__1cMnegF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cVExceptionHandlerTableMadd_subtable6MipnNGrowableArray4Cl__2_v_; +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRshrL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cLRShiftLNodeGOpcode6kM_i_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%JVM_IsInterface; +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cPThreadRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cIMulLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQaddL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHMatcherQis_spillable_arg6Fi_i_; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cRsarI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cNprefetch2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cSstkL_to_regD_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cICodeHeapIallocate6ML_pv_; +text: .text%__1cICodeHeapPsearch_freelist6ML_pnJFreeBlock__; +text: .text%__1cFParseSmerge_memory_edges6MpnMMergeMemNode_ii_v_; +text: .text%__1cMPrefetchNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: codeBlob.o; +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRcompL_reg_conNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQxorI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUPipeline_Use_Element2t6M_v_: output.o; +text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o; +text: .text%__1cSstkL_to_regD_2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNflagsRegLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cJloadCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_; +text: .text%__1cRInterpretedRFrameOis_interpreted6kM_i_: rframe.o; +text: .text%__1cGRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cSstkL_to_regD_2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKTypeRawPtrHget_con6kM_l_; +text: .text%__1cKRegionNodeEhash6kM_I_: loopnode.o; +text: .text%__1cSstkL_to_regD_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Ml_v_: codeBlob.o; +text: .text%__1cSstkL_to_regD_2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_; +text: .text%__1cJloadCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSstkL_to_regD_2NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: loopnode.o; +text: .text%__1cMloadConLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__; +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__; +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_; +text: .text%__1cQaddL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cLstoreB0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassTget_field_by_offset6Mii_pnHciField__; +text: .text%__1cNinstanceKlassWfind_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cIDivINodeGOpcode6kM_i_; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: classes.o; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cKo0RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciObjectFactory.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjectFactory.o; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cUcompU_iReg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cPorI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadSNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMinINodeGOpcode6kM_i_; +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__; +text: .text%__1cKjavaVFrameNis_java_frame6kM_i_: vframe.o; +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_; +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cNNativeFarCallPset_destination6MpC_v_; +text: .text%__1cIGraphKitXset_edges_for_java_call6MpnMCallJavaNode_i_v_; +text: .text%__1cJStartNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cObranchConUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__; +text: .text%__1cISubLNodeGOpcode6kM_i_; +text: .text%__1cZCallInterpreterDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cKcmpOpUOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cObranchConUNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cObranchConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cOMacroAssemblerNverify_thread6M_v_; +text: .text%__1cRbranchLoopEndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitMarray_length6MpnENode__2_; +text: .text%__1cLBlock_ArrayEgrow6MI_v_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cSaddL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: methodDataOop.o; +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_; +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cOMacroAssemblerUallocate_oop_address6MpnI_jobject_pnMRegisterImpl__nHAddress__; +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvL2I_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSbranchCon_longNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQciByteCodeStreamMget_constant6M_nKciConstant__; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cJimmU5OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cKMemBarNode2t6M_v_; +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cSmembar_acquireNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmPOperPconstant_is_oop6kM_i_: ad_sparc_clone.o; +text: .text%__1cHRetNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cSaddI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRshrP_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferMstart_a_stub6M_v_; +text: .text%__1cTemit_java_to_interp6FrnKCodeBuffer__v_; +text: .text%__1cKCodeBufferKend_a_stub6M_v_; +text: .text%__1cObranchConPNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cKcmpOpPOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cLstoreP0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJiRegPOperFclone6kM_pnIMachOper__; +text: .text%__1cKMemBarNodeJis_MemBar6kM_pk0_: classes.o; +text: .text%__1cQandL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitOinsert_mem_bar6MpnKMemBarNode__v_; +text: .text%__1cRcompL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_; +text: .text%__1cKstoreBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: memnode.o; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cOcompU_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOFastUnlockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRbranchLoopEndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cKStoreCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKMemBarNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMloadConPNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cKimmI13OperFclone6kM_pnIMachOper__; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cOloadConI13NodeFclone6kM_pnENode__; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: callGenerator.o; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cNCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cRbranchLoopEndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o; +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: classes.o; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_; +text: .text%__1cSbranchCon_longNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLstoreP0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitbMset_predefined_output_for_runtime_call6MpnENode_pnMMergeMemNode__v_; +text: .text%__1cIGraphKitGmemory6MI_pnENode__; +text: .text%__1cZresource_reallocate_bytes6FpcLL_0_; +text: .text%__1cSbranchCon_longNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNprefetch2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_; +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cIAddLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_; +text: .text%__1cTmembar_CPUOrderNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cMPhaseChaitinQgather_lrg_masks6Mi_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: live.o; +text: .text%__1cJPhaseLiveHcompute6MI_v_; +text: .text%__1cIPhaseIFGEinit6MI_v_; +text: .text%__1cKStoreBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSandI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNIdealLoopTreeObeautify_loops6MpnOPhaseIdealLoop__i_; +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cUcompU_iReg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cITypeNodeHis_Type6M_p0_: classes.o; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cJimmU6OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cJcmpOpOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cQmulL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitMreset_memory6M_pnENode__; +text: .text%__1cQPSGenerationPoolImax_size6kM_L_: memoryPool.o; +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cQPSGenerationPoolNused_in_bytes6M_L_: memoryPool.o; +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQaddL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: machnode.o; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTCallInterpreterNodeGOpcode6kM_i_; +text: .text%JVM_GetCPClassNameUTF; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: loopnode.o; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cKimmL13OperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cIimmDOperJconstantD6kM_d_: ad_sparc_clone.o; +text: .text%__1cbCcatch_cleanup_fix_all_inputs6FpnENode_11_v_: lcm.o; +text: .text%__1cQaddL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHMatcherMreturn_value6Fii_nLRegPair__; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cFTypeDEmake6Fd_pk0_; +text: .text%__1cIHaltNodeEhash6kM_I_: classes.o; +text: .text%__1cLCastP2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cMstringStream2t6ML_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cMURShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cIModINodeGOpcode6kM_i_; +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cZnoG3_iRegI_64bit_safeOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cENodeMsetup_is_top6M_v_; +text: .text%__1cIGotoNodeGOpcode6kM_i_; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cKstoreCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIMachOperEtype6kM_pknEType__; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_; +text: .text%__1cEDict2T6M_v_; +text: .text%__1cSCompareAndSwapNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cbDcatch_cleanup_find_cloned_def6FpnFBlock_pnENode_1rnLBlock_Array_i_3_: lcm.o; +text: .text%__1cHRetNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHRetNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: loopnode.o; +text: .text%__1cPconvI2L_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJimmU5OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cKimmI13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cObranchConUNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cKcmpOpUOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__; +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cKcmpOpPOperFequal6kM_i_: ad_sparc_clone.o; +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: machnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: machnode.o; +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__: machnode.o; +text: .text%__1cRMachNullCheckNode2t6MpnENode_2I_v_; +text: .text%__1cSandI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKReturnNodeEhash6kM_I_: classes.o; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cIMachNodeSalignment_required6kM_i_: machnode.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: machnode.o; +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: machnode.o; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cPconvL2I_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Sub6M_pnHSubNode__: classes.o; +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cIMaxINodeGOpcode6kM_i_; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_; +text: .text%__1cQandL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cNprefetch2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cQaddP_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRcompL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cSaddL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQandI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cIGraphKitOmake_slow_call6MpknITypeFunc_pCpkcpnENode_88_8_; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cLBuildCutout2T6M_v_; +text: .text%__1cFParseNpush_constant6MnKciConstant__i_; +text: .text%__1cUcompU_iReg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__; +text: .text%__1cKciTypeFlowXmark_known_range_starts6M_v_; +text: .text%__1cKciTypeFlowKflow_types6M_v_; +text: .text%__1cKciTypeFlowHdo_flow6M_v_; +text: .text%__1cKciTypeFlowLfind_ranges6M_v_; +text: .text%__1cKciTypeFlowKmap_blocks6M_v_; +text: .text%__1cJloadLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cIAndINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKTypeRawPtrFempty6kM_i_; +text: .text%__1cJloadCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cVshrL_reg_imm6_L2INodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_: parse2.o; +text: .text%__1cJScopeDescGis_top6kM_i_; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cMURShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cUcompU_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cMciMethodDataJload_data6M_v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%__1cXmembar_acquire_lockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKKlass_vtbl2n6FLrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cRcompL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%__1cENodeHis_Copy6kM_I_: ad_sparc.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cKstoreBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKcmpOpPOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cPorI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_; +text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o; +text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cTDebugInfoReadStream2t6MpknHnmethod_i_v_; +text: .text%__1cPconvL2I_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%JVM_InternString; +text: .text%__1cOLibraryCallKitOgenerate_guard6MpnENode_pnKRegionNode_f_v_; +text: .text%__1cRsarI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cNIdealLoopTreeTcheck_inner_safepts6MpnOPhaseIdealLoop__v_; +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cLOopRecorderKfind_index6MpnI_jobject__i_; +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cHciKlassMis_interface6M_i_: ciObjArrayKlass.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodDataKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: methodDataKlass.o; +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_; +text: .text%__1cPmethodDataKlassRoop_is_methodData6kM_i_: methodDataKlass.o; +text: .text%__1cKBufferBlobEfree6Fp0_v_; +text: .text%jni_SetIntField: jni.o; +text: .text%__1cQmulL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadPNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: memnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: memnode.o; +text: .text%__1cNloadConP0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cJimmP0OperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cIRootNodeHis_Root6M_p0_: classes.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: multnode.o; +text: .text%__1cIregDOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cXmembar_acquire_lockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: classes.o; +text: .text%__1cTDebugInfoReadStreamLread_handle6M_nGHandle__; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cRcompL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQxorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRScavengeRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cKimmP13OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cVcompP_iRegP_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: reg_split.o; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cJloadSNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_; +text: .text%__1cLstoreI0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcompiledVFrameGis_top6kM_i_; +text: .text%__1cIPhaseIFGISquareUp6M_v_; +text: .text%__1cPCheckCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHTypePtrCeq6kMpknEType__i_; +text: .text%__1cSmembar_acquireNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmodI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQcmovI_reg_ltNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cLstoreI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cQciByteCodeStreamJget_klass6Mri_pnHciKlass__; +text: .text%__1cSCompareAndSwapNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cIGraphKitOhas_ex_handler6M_i_; +text: .text%__1cLCastP2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: ciMethodData.o; +text: .text%__1cENodeGis_Con6kM_I_: connode.o; +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cSaddL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: loopnode.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse2.o; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: connode.o; +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__; +text: .text%__1cRshrL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cSandI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_; +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_; +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_; +text: .text%__1cMPhaseIterGVNIoptimize6M_v_; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cQmodI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQandL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: loopnode.o; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: classes.o; +text: .text%__1cUCallCompiledJavaNodeGOpcode6kM_i_; +text: .text%__1cQmodI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHMemNodeHsize_of6kM_I_; +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_; +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_; +text: .text%__1cISubINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOMacroAssemblerDset6MlpnMRegisterImpl_rknQRelocationHolder__v_: ad_sparc.o; +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodePis_MachCallJava6M_pnQMachCallJavaNode__: ad_sparc_misc.o; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHnmethodNscope_desc_at6MpCi_pnJScopeDesc__; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%__1cMnegF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cMnegF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Ml_v_; +text: .text%__1cPconvL2I_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeGis_Con6kM_I_: ad_sparc.o; +text: .text%__1cMnegF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSsafePoint_pollNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc_misc.o; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__; +text: .text%__1cMStartOSRNodeGOpcode6kM_i_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_: psScavenge.o; +text: .text%__1cRcompL_reg_conNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAndINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cOcompI_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTmembar_CPUOrderNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSaddP_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_: psScavenge.o; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cLCastP2LNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cQcmovI_reg_ltNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNprefetch2NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cLProfileDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOCompiledRFrameEinit6M_v_; +text: .text%__1cGvframeDtop6kM_p0_; +text: .text%__1cMURShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: compile.o; +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_; +text: .text%__1cQandI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cJLoadBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitRmerge_fast_memory6MpnENode_2i_v_; +text: .text%__1cRshlL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmulL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cFParseKdo_put_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cFciEnvUis_unresolved_string6kMpnPciInstanceKlass_i_i_; +text: .text%__1cQciByteCodeStreamUis_unresolved_string6kM_i_; +text: .text%__1cRreturn_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_; +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_; +text: .text%__1cJloadCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHMatcherFxform6MpnENode_i_2_; +text: .text%__1cJStartNodeHsize_of6kM_I_; +text: .text%__1cHMatcherLreturn_addr6kM_nHOptoRegEName__; +text: .text%__1cHMatcherLfind_shared6MpnENode__v_; +text: .text%__1cILoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cJLoadSNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLstoreP0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConL13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cUcompU_iReg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cLOpaque2NodeGOpcode6kM_i_; +text: .text%__1cIProjNodeJideal_reg6kM_I_; +text: .text%__1cICallNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cHNTarjanICOMPRESS6M_v_; +text: .text%__1cQdivD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNRelocIteratorEnext6M_i_: nativeInst_sparc.o; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cIAndINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cUEdenMutableSpacePoolImax_size6kM_L_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolImax_size6kM_L_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolNused_in_bytes6M_L_: memoryPool.o; +text: .text%__1cYSurvivorMutableSpacePoolNused_in_bytes6M_L_: memoryPool.o; +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cKReturnNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_; +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_; +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_; +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_; +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_; +text: .text%__1cPcompP_iRegPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRloadConP_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQmodI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cSaddL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNIdealLoopTreeOpolicy_peeling6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreebBpolicy_do_remove_empty_loop6MpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeUiteration_split_impl6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_; +text: .text%__1cJLoadFNodeGOpcode6kM_i_; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: classes.o; +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6ML_v_: jni.o; +text: .text%__1cQaddI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__: rframe.o; +text: .text%__1cRinterpretedVFrameDbci6kM_i_; +text: .text%__1cILoopNode2t6MpnENode_2_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: assembler_sparc.o; +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cKCodeBuffer2T6M_v_; +text: .text%__1cRcompL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopnode.o; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopnode.o; +text: .text%JVM_GetFieldIxModifiers; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopnode.o; +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreB0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLLShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescJis_mature6kM_i_; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cQsubL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%JVM_IsConstructorIx; +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_; +text: .text%__1cKCMoveINodeGOpcode6kM_i_; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cQcmovI_reg_ltNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cTMachCallRuntimeNodeSis_MachCallRuntime6M_p0_: ad_sparc_misc.o; +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse3.o; +text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o; +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cRcompL_reg_conNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQxorI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeFclone6kM_pnENode__; +text: .text%__1cIimmPOperFclone6kM_pnIMachOper__; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cHMatcherUc_calling_convention6FpnLRegPair_I_v_; +text: .text%__1cPCallRuntimeNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cVshrL_reg_imm6_L2INodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cIAndINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIConINodeHget_int6kMpi_i_: classes.o; +text: .text%__1cIAndINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cKciTypeFlowLStateVectorMdo_putstatic6MpnQciByteCodeStream__v_; +text: .text%__1cJcmpOpOperFequal6kM_i_: ad_sparc_clone.o; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_; +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cHCompileQsync_stack_slots6kM_i_; +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cQcmovI_reg_gtNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo0RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_: ad_sparc.o; +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvL2I_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cRloadConP_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%__1cRshlI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPCountedLoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMatcherQpost_fast_unlock6FpknENode__i_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cRMachSafePointNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowLStateVectorGdo_ldc6MpnQciByteCodeStream__v_; +text: .text%__1cENodeGis_Con6kM_I_: memnode.o; +text: .text%__1cQsubL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cMFastLockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cHAddNodeGis_Add6kM_pk0_: classes.o; +text: .text%__1cPindOffset13OperFclone6kM_pnIMachOper__; +text: .text%__1cKg1RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNSCMemProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRshrL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cKPSYoungGenNused_in_bytes6kM_L_; +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_; +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cQLRUMaxHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: ad_sparc_misc.o; +text: .text%__1cRshrL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKNode_Array2t6MpnFArena__v_: output.o; +text: .text%__1cSxorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_; +text: .text%__1cXmembar_release_lockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cNflagsRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cSmembar_releaseNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRcompL_reg_conNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%JVM_Clone; +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_; +text: .text%__1cQandI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJVectorSetGslamin6Mrk0_v_; +text: .text%__1cUcompU_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cYinlineCallClearArrayNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_; +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_; +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cNloadConP0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJcmpOpOperEless6kM_i_: ad_sparc_clone.o; +text: .text%__1cWCallLeafNoFPDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindIndexOperOindex_position6kM_i_: ad_sparc.o; +text: .text%__1cMindIndexOperFscale6kM_i_: ad_sparc.o; +text: .text%__1cMindIndexOperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cMindIndexOperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cOstackSlotLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cKType_ArrayEgrow6MI_v_; +text: .text%__1cSmembar_releaseNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMachEpilogNodeNis_MachEpilog6M_p0_: ad_sparc.o; +text: .text%__1cJLoadLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJloadCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_; +text: .text%__1cOloadConL13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%__1cHOrINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cVshrL_reg_imm6_L2INodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_; +text: .text%__1cMPhaseChaitinISimplify6M_v_; +text: .text%__1cOloadConL13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRshrL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopopts.o; +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_; +text: .text%__1cJLoadCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_; +text: .text%__1cLCounterDataOis_CounterData6M_i_: methodDataOop.o; +text: .text%__1cZCallDynamicJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHget_int6kMpi_i_; +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cHMatcherNfind_receiver6Fi_nFVMRegEName__; +text: .text%__1cQaddL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIConFNodeGOpcode6kM_i_; +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: connode.o; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cJNode_ListEyank6MpnENode__v_; +text: .text%__1cNSafePointNodeLpop_monitor6M_v_; +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__; +text: .text%__1cRshrI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: connode.o; +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: loopnode.o; +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_; +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_; +text: .text%__1cJLoadSNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKciTypeFlowLStateVectorOmeet_exception6MpnPciInstanceKlass_pk1_i_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeKmethod_set6Ml_v_; +text: .text%__1cSCallLeafDirectNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_; +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoop2t6MrnMPhaseIterGVN_pk0i_v_; +text: .text%__1cKtype2basic6FpknEType__nJBasicType__; +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_; +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o; +text: .text%__1cRshrL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: interp_masm_sparc.o; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cKstoreLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: classes.o; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cIAndINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRcompL_reg_conNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadPNodeFclone6kM_pnENode__; +text: .text%__1cOloadConL13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKstoreFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassDLCA6Mp0_1_; +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_; +text: .text%__1cGOopMapPset_derived_oop6MnHOptoRegEName_ii2_v_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cIRootNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOPhaseIdealLoopKDominators6M_v_; +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cMindirectOperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cMindirectOperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cFBytesNget_native_u46FpC_I_: bytecodes.o; +text: .text%__1cMindirectOperFscale6kM_i_: ad_sparc.o; +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_; +text: .text%__1cYcompareAndSwapL_boolNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_; +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_; +text: .text%__1cMPhaseChaitinHcompact6M_v_; +text: .text%__1cMPhaseChaitinFSplit6MI_I_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: connode.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interp_masm_sparc.o; +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKStoreFNodeGOpcode6kM_i_; +text: .text%__1cIimmPOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cHMatcherPprior_fast_lock6FpknENode__i_; +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerWcompiler_unlock_object6MpnMRegisterImpl_222rnFLabel__v_; +text: .text%__1cQcmovI_reg_gtNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cNloadConL0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_; +text: .text%__1cOMachEpilogNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_; +text: .text%__1cLRethrowNodeEhash6kM_I_: classes.o; +text: .text%__1cQsubI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmulI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFParseWensure_phis_everywhere6M_v_; +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: cfgnode.o; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: cfgnode.o; +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cHOrINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRbranchLoopEndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cQxorI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMVirtualSpaceNreserved_size6kM_L_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cFParseKarray_load6MnJBasicType__v_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_: psGenerationCounters.o; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cICodeHeapMmax_capacity6kM_L_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cILoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: node.o; +text: .text%__1cJloadFNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cILoopNodeHsize_of6kM_I_: loopnode.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: memnode.o; +text: .text%__1cFStateQ_sub_Op_URShiftL6MpknENode__v_; +text: .text%__1cNloadConL0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitNstore_barrier6MpnENode_22_v_; +text: .text%__1cOMacroAssemblerEcall6MpCnJrelocInfoJrelocType__v_: assembler_sparc.o; +text: .text%__1cMtlsLoadPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interpreter_sparc.o; +text: .text%__1cNIdealLoopTreeMis_loop_exit6kMpnENode_pnOPhaseIdealLoop__2_; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cSbranchCon_longNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cPlocal_vsnprintf6FpcLpkcpv_i_; +text: .text%__1cVshrL_reg_imm6_L2INodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cURethrowExceptionNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cURethrowExceptionNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cIAndLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIAndLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cIAndLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cKloadUBNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSstring_compareNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cFTypeFEmake6Ff_pk0_; +text: .text%jio_snprintf; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cLOptoRuntimebAresolve_opt_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%jni_NewLocalRef: jni.o; +text: .text%__1cIimmFOperJconstantF6kM_f_: ad_sparc_clone.o; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cFParseNadd_safepoint6M_v_; +text: .text%__1cRcompL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIDivLNodeGOpcode6kM_i_; +text: .text%__1cQregF_to_stkINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRcompL_reg_conNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cTCallDynamicJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cKReturnNode2t6MpnENode_2222_v_; +text: .text%__1cKReturnNodeJideal_reg6kM_I_: classes.o; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cLPhaseValues2T5B6M_v_; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cYcompareAndSwapL_boolNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cENodeHrm_prec6MI_v_; +text: .text%__1cMMutableSpaceFclear6M_v_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cICodeBlobWfix_relocation_at_move6Ml_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cLConvL2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cIGraphKitOset_pair_local6MipnENode__v_: parse2.o; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cLcmpD_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cMtlsLoadPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLStrCompNodeGOpcode6kM_i_; +text: .text%__1cOCompiledRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: nmethod.o; +text: .text%JVM_GetCPMethodModifiers; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cIConDNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%jni_SetLongField: jni.o; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cIAndLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQregP_to_stkPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIJVMState2t6Mi_v_; +text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: callnode.o; +text: .text%__1cLOpaque2NodeEhash6kM_I_; +text: .text%__1cKCodeBufferGresize6Miiii_v_; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cLstoreB0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulDNodeGOpcode6kM_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_; +text: .text%__1cZCallInterpreterDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cIConPNodeEmake6FpC_p0_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cNObjectMonitorHis_busy6kM_l_; +text: .text%__1cCosRcurrent_thread_id6F_l_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fl_v_; +text: .text%__1cHCompileICode_Gen6M_v_; +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_; +text: .text%__1cHCompileEInit6Mi_v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cETypeKInitialize6FpnHCompile__v_; +text: .text%__1cFDictIFreset6MpknEDict__v_; +text: .text%__1cHMatcher2t6MrnJNode_List__v_; +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_; +text: .text%__1cVExceptionHandlerTable2t6Mi_v_; +text: .text%__1cFArenaNmove_contents6Mp0_1_; +text: .text%__1cHCompileGOutput6M_v_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cFArena2t6ML_v_; +text: .text%__1cHCompileLFill_buffer6M_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: phaseX.o; +text: .text%__1cHMatcherFmatch6M_v_; +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_; +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: block.o; +text: .text%__1cIPhaseCFGOschedule_early6MrnJVectorSet_rnJNode_List_rnLBlock_Array__i_; +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o; +text: .text%__1cHCompileMBuildOopMaps6M_v_; +text: .text%__1cIPhaseCFGLRemoveEmpty6M_v_; +text: .text%__1cIPhaseCFGQFind_Inner_Loops6M_v_; +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_; +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: buildOopMap.o; +text: .text%__1cIPhaseCFGKDominators6M_v_; +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_; +text: .text%__1cLPhaseValues2t6Mp0_v_; +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_; +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_; +text: .text%__1cIPhaseCFGNschedule_late6MrnJVectorSet_rnJNode_List_rnNGrowableArray4CI___v_; +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_; +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_; +text: .text%__1cJPhaseLive2T6M_v_; +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_; +text: .text%__1cIPhaseIFG2t6MpnFArena__v_; +text: .text%__1cHMatcherWis_short_branch_offset6Mi_i_; +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: matcher.o; +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_; +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_; +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_; +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_; +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_; +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_; +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_; +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_; +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_; +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_; +text: .text%__1cMPhaseChaitinbGstretch_base_pointer_live_ranges6MpnMResourceArea__i_; +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_: coalesce.o; +text: .text%__1cGBundlePinitialize_nops6FppnIMachNode__v_; +text: .text%__1cWemit_exception_handler6FrnKCodeBuffer__v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cMPhaseChaitin2T6M_v_; +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cKCodeBufferOrelocate_stubs6M_v_; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: callnode.o; +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHCompilePneed_stack_bang6kMi_i_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: multnode.o; +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOcmovII_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: callnode.o; +text: .text%__1cLstoreB0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTresource_free_bytes6FpcL_v_; +text: .text%__1cMMutableSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cJimmL0OperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nativeInst_sparc.o; +text: .text%JVM_DoPrivileged; +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_; +text: .text%__1cQcmovI_reg_gtNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitOnull_check_oop6MpnKRegionNode_pnENode_i_4_; +text: .text%__1cVCallRuntimeDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQcmovI_reg_gtNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cVshrL_reg_imm6_L2INodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDset6MlpnMRegisterImpl_rknQRelocationHolder__v_: assembler_sparc.o; +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNprefetch2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJimmU6OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_; +text: .text%__1cRshrL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: loopnode.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: loopnode.o; +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_words6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cZCallInterpreterDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObox_handleNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cNIdealLoopTreePiteration_split6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cIMulLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIMulLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cURethrowExceptionNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICmpDNodeGOpcode6kM_i_; +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_; +text: .text%__1cLConvD2INodeGOpcode6kM_i_; +text: .text%__1cTmembar_volatileNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLCastP2LNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIModINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cObox_handleNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMindIndexOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindIndexOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindIndexOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cKstoreFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIDivINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRshrP_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_; +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cJLoadBNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJArrayDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_; +text: .text%__1cGTarjanICOMPRESS6M_v_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cLOptoRuntimeRmultianewarray1_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cQmerge_point_safe6FpnENode__i_: loopopts.o; +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cJcmpOpOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cVshrL_reg_imm6_L2INodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_; +text: .text%__1cOLibraryCallKitNtry_to_inline6M_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: loopnode.o; +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_; +text: .text%__1cKstoreBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConL0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: library_call.o; +text: .text%__1cIciObjectJis_method6M_i_: ciInstance.o; +text: .text%__1cQsubL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRcompL_reg_conNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciObjectOis_method_data6M_i_: ciInstance.o; +text: .text%__1cQaddF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: loopnode.o; +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZnoG3_iRegI_64bit_safeOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJcmpOpOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cRsubI_zero_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassNameUTF; +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_; +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__; +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_; +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cQshlI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_FindLoadedClass; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cENodeHis_Copy6kM_I_: node.o; +text: .text%__1cIModLNodeGOpcode6kM_i_; +text: .text%__1cRbranchLoopEndNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLRethrowNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cObranchConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIPSOldGenPupdate_counters6M_v_; +text: .text%__1cJCmpL3NodeGOpcode6kM_i_; +text: .text%__1cMloadConINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cLOptoRuntimeKjbyte_copy6FpW1L_v_; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cNloadConP0NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cITypeLongFwiden6kMpknEType__3_; +text: .text%__1cZCallInterpreterDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: subnode.o; +text: .text%__1cKcmpOpFOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cNflagsRegFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPciInstanceKlassbBcompute_shared_has_subklass6M_i_; +text: .text%__1cIGraphKitNallocate_heap6MpnENode_222pknITypeFunc_pC22ipknKTypeOopPtr__2_; +text: .text%__1cOClearArrayNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSbranchCon_longNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNflagsRegFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_; +text: .text%__1cLBoxLockNodeEhash6kM_I_: classes.o; +text: .text%JVM_FindClassFromClass; +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmulI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cScheck_phi_clipping6FpnHPhiNode_rpnHConNode_rI45rpnENode_5_i_: cfgnode.o; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cLConvL2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cSxorI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cFParseFBlockMadd_new_path6M_i_; +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_; +text: .text%__1cOcompiledVFrameScreate_stack_value6kMpnKScopeValue__pnKStackValue__; +text: .text%signalHandler; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cMelapsedTimerHseconds6kM_d_; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cIAddLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cQcmovI_reg_ltNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cKcmpOpUOperEless6kM_i_: ad_sparc_clone.o; +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cJStealTask2t6Mi_v_; +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cNGCTaskManagerMnote_release6MI_v_; +text: .text%__1cJStealTaskEname6M_pc_: psTasks.o; +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cMPrefetchNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNprefetch2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvF2DNodeGOpcode6kM_i_; +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__: memnode.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: multnode.o; +text: .text%__1cIMulLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cFParseJdo_ifnull6MnIBoolTestEmask__v_; +text: .text%__1cQmodI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIimmIOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cMloadConINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcmpOpPOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cSaddI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cJiRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cOloadConI13NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cKciTypeFlowLStateVectorJdo_aaload6MpnQciByteCodeStream__v_; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%jni_NewString: jni.o; +text: .text%__1cNLocationValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cHMatcherQinline_cache_reg6F_nHOptoRegEName__; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopTransform.o; +text: .text%__1cPorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitMnext_monitor6M_i_; +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__; +text: .text%__1cLBoxLockNode2t6Mi_v_; +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__; +text: .text%__1cRorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cSmulI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmulI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cOcmovII_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cOcmovPP_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_; +text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_; +text: .text%__1cRsarI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cOcmovII_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQnotemp_iRegIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cVshrL_reg_imm6_L2INodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJimmI0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_sparc.o; +text: .text%__1cMloadConDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQregP_to_stkPNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciMethodRinstructions_size6M_i_; +text: .text%__1cQsubL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerUcompiler_lock_object6MpnMRegisterImpl_222rnFLabel__v_; +text: .text%__1cRsarL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJloadFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseGdo_new6M_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopopts.o; +text: .text%__1cUParallelScavengeHeapMmem_allocate6MLii_pnIHeapWord__; +text: .text%__1cIXorINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerDbpr6Mn0AKRCondition_in0AHPredict_pnMRegisterImpl_rnFLabel__v_: assembler_sparc.o; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%jni_GetObjectClass: jni.o; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%__1cOPhaseIdealLoopKclone_loop6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cSxorI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_L_; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cSstring_compareNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPconvL2I_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cbLtransform_int_divide_to_long_multiply6FpnIPhaseGVN_pnENode_i_3_: divnode.o; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_; +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6M_v_; +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__: phaseX.o; +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: memnode.o; +text: .text%__1cQandL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cLcmpD_ccNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cINodeHash2t6MpnFArena_I_v_; +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: classes.o; +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cSstring_compareNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: cfgnode.o; +text: .text%__1cQmulI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cPconvF2D_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cIMulINodeGadd_id6kM_pknEType__: classes.o; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%__1cHTypePtrFempty6kM_i_; +text: .text%__1cQaddP_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cbFunnecessary_membar_volatileNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerKsave_frame6Mi_v_; +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerbBcheck_and_forward_exception6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cJcmpOpOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%JVM_IHashCode; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cQmodI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsubI_zero_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovII_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSCompareAndSwapNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTmembar_CPUOrderNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: reg_split.o; +text: .text%__1cTmembar_CPUOrderNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopTransform.o; +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%JVM_GetClassLoader; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_; +text: .text%__1cKScheduling2t6MpnFArena_rnHCompile__v_; +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%__1cKSchedulingMDoScheduling6M_v_; +text: .text%__1cIciMethodQbreak_at_execute6M_i_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cJStartNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cQstkI_to_regFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJStartNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cOstackSlotPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cObox_handleNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQmulD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cNIdealLoopTreeNpolicy_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cTCallInterpreterNodeSis_CallInterpreter6kM_pk0_: classes.o; +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cNIdealLoopTreeSpolicy_range_check6kMpnOPhaseIdealLoop__i_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cQmulL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cXMachCallInterpreterNodePret_addr_offset6M_i_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cUParallelScavengeHeapEused6kM_L_; +text: .text%__1cQcmovI_reg_ltNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interp_masm_sparc.o; +text: .text%__1cIPhaseCCP2T6M_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: assembler_sparc.o; +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cIPhaseCCPMdo_transform6M_v_; +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_; +text: .text%__1cHCompileLFinish_Warm6M_v_; +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_; +text: .text%__1cHCompileIOptimize6M_v_; +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_; +text: .text%__1cIPhaseCCPHanalyze6M_v_; +text: .text%__1cHCompileLInline_Warm6M_i_; +text: .text%__1cMPhaseIterGVN2t6Mp0_v_; +text: .text%__1cIciMethodRbuild_method_data6M_v_; +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__; +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_; +text: .text%__1cHCompileVfinal_graph_reshaping6M_i_; +text: .text%__1cTmembar_CPUOrderNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod_ii_v_; +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cObranchConFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cSmulI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMachEpilogNodeQsafepoint_offset6kM_i_; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: callnode.o; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_; +text: .text%__1cHBitDataKis_BitData6M_i_: ciMethodData.o; +text: .text%__1cSxorI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_; +text: .text%__1cKloadUBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cRNativeMovConstRegIset_data6Ml_v_; +text: .text%__1cZInterpreterMacroAssemblerTdispatch_Lbyte_code6MnITosState_ppCii_v_; +text: .text%__1cQregP_to_stkPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_; +text: .text%__1cJStartNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cKBinaryNodeGOpcode6kM_i_; +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_; +text: .text%__1cMTailCallNodeGOpcode6kM_i_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cQregF_to_stkINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o; +text: .text%__1cZCallDynamicJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: loopnode.o; +text: .text%__1cENodeHis_Copy6kM_I_: loopnode.o; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: loopnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: loopnode.o; +text: .text%__1cUGenericGrowableArrayIraw_find6kMpknEGrET__i_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cIAddFNodeGOpcode6kM_i_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cLstoreC0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cTmembar_volatileNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cObox_handleNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseLarray_store6MnJBasicType__v_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cSThreadLocalStorageGthread6F_pnGThread__: assembler_sparc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cKcmpOpUOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cKPerfMemoryFalloc6FL_pc_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_; +text: .text%jni_ReleaseStringUTFChars; +text: .text%__1cRsubI_zero_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulFNodeGOpcode6kM_i_; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%JVM_IsInterrupted; +text: .text%__1cIModINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%JVM_FindLibraryEntry; +text: .text%__1cKloadUBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cQmodI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cHCompile2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: assembler_sparc.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cIMulINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cQaddF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimeOarraycopy_Type6F_pknITypeFunc__; +text: .text%__1cOLibraryCallKitQinline_arraycopy6M_i_; +text: .text%__1cTbasictype2arraycopy6FnJBasicType_i_pC_; +text: .text%__1cJStoreNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cOstackSlotIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cQregI_to_stkINodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cIXorINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cQshlI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQshlL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cObox_handleNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cHMatcherOc_return_value6Fii_nLRegPair__; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cQmulI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cVCallRuntimeDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cPconvF2D_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQshlI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMPhaseChaitinGSelect6M_I_; +text: .text%__1cPconvF2D_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLOptoRuntimeSnew_typeArray_Type6F_pknITypeFunc__; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cIGraphKitJnew_array6MpnENode_nJBasicType_pknEType_pknMTypeKlassPtr__2_; +text: .text%__1cPconvL2I_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOloadConI13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cQshrI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cQmulD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKConv2BNodeGOpcode6kM_i_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_; +text: .text%__1cNloadConP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: cfgnode.o; +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_; +text: .text%__1cFParseOmerge_new_path6Mi_v_; +text: .text%__1cFParseTprofile_switch_case6Mi_v_; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cRbranchLoopEndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRbranchLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQregI_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRbranchLoopEndNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cIimmIOperFclone6kM_pnIMachOper__; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_; +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cUParallelScavengeHeapTensure_parseability6M_v_; +text: .text%__1cMloadConINodeFclone6kM_pnENode__; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cUParallelScavengeHeapOfill_all_tlabs6M_v_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cKPSYoungGenPupdate_counters6M_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cNloadConPCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_; +text: .text%__1cTDerivedPointerTableFclear6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_; +text: .text%__1cSsubL_reg_reg_2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cQmulD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cNloadConL0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cFParseRarray_store_check6M_v_; +text: .text%__1cNloadConP0NodeFclone6kM_pnENode__; +text: .text%__1cJimmP0OperFclone6kM_pnIMachOper__; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cIMinINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cObranchConFNodeIpipeline6kM_pknIPipeline__; +text: .text%jni_NewByteArray: jni.o; +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQdivL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLcmpD_ccNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_; +text: .text%__1cRsubI_zero_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_; +text: .text%__1cQandI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSbranchCon_longNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSbranchCon_longNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cSbranchCon_longNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYinlineCallClearArrayNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cYinlineCallClearArrayNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cLRethrowNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cObranchConFNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cYinlineCallClearArrayNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cISubLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cQPSIsAliveClosureLdo_object_b6MpnHoopDesc__i_: psScavenge.o; +text: .text%__1cSconvI2F_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cYMachCallCompiledJavaNodePret_addr_offset6M_i_; +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse1.o; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%__1cWCallLeafNoFPDirectNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Ml_v_; +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cSandI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLstoreP0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQmulF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRorI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSxorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cIMinINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cOloadConL13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cUdivL_reg_imm13_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cSReferenceProcessorZadd_to_discovered_list_mt6MppnHoopDesc_23_v_; +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerEsetx6MxpnMRegisterImpl_2nJrelocInfoJrelocType__v_; +text: .text%__1cZInterpreterMacroAssemblerZget_2_byte_integer_at_bcp6MipnMRegisterImpl_2n0ALsignedOrNot_n0AKsetCCOrNot__v_; +text: .text%__1cQmulI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_; +text: .text%__1cSconvI2F_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MLL_v_; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cKPSYoungGenLswap_spaces6M_v_; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cUPSAdaptiveSizePolicyWminor_collection_begin6M_v_; +text: .text%__1cKPSYoungGenNresize_spaces6MLL_v_; +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_; +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_; +text: .text%__1cKPSScavengeQinvoke_no_policy6Fpi_i_; +text: .text%__1cUPSAdaptiveSizePolicyPupdate_averages6MiLL_v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cUPSAdaptiveSizePolicybPcompute_survivor_space_size_and_threshold6MiiL_i_; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cSsubL_reg_reg_2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cUPSAdaptiveSizePolicyUminor_collection_end6MnHGCCauseFCause__v_; +text: .text%__1cKPSYoungGenGresize6MLL_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cKPSYoungGenRresize_generation6MLL_i_; +text: .text%__1cSmulI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_; +text: .text%__1cHRetNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_; +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_; +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_; +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_: gcTaskManager.o; +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_; +text: .text%__1cObranchConFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_; +text: .text%__1cSaddL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_; +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_; +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__; +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_; +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_; +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_; +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cQstkI_to_regFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvF2D_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreB0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMloadConDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cUmulL_reg_imm13_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cISubLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: ad_sparc_misc.o; +text: .text%jni_GetStringCritical: jni.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cLOptoRuntimeInew_Type6F_pknITypeFunc__; +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: callnode.o; +text: .text%__1cIGraphKitMnew_instance6MpnPciInstanceKlass__pnENode__; +text: .text%__1cHnmethodXinterpreter_entry_point6M_pC_; +text: .text%__1cOcmovIL_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeTnmethod_entry_point6FpnKJavaThread_pnNmethodOopDesc_pnHnmethod__pC_; +text: .text%__1cQshlI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cTCallDynamicJavaNodeSis_CallDynamicJava6kM_pk0_: callnode.o; +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cQshlL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKg3RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cQaddF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParsePmerge_exception6Mi_v_; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFParseMdo_checkcast6M_v_; +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQregF_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cUPSAdaptiveSizePolicybDcompute_generation_free_space6MLLLLLLLi_v_; +text: .text%__1cIPSOldGenMmax_gen_size6M_L_: psOldGen.o; +text: .text%__1cUPSAdaptiveSizePolicyQdecaying_gc_cost6kM_d_; +text: .text%__1cUPSAdaptiveSizePolicyZdecay_supplemental_growth6Mi_v_; +text: .text%__1cUPSAdaptiveSizePolicyVadjust_for_throughput6MipL1_v_; +text: .text%__1cKPSScavengeGinvoke6Fpi_v_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cUPSAdaptiveSizePolicyOshould_full_GC6ML_i_; +text: .text%__1cUParallelScavengeHeapTfailed_mem_allocate6MpiLii_pnIHeapWord__; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cbDVM_ParallelGCFailedAllocation2t6MLiiI_v_; +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_; +text: .text%__1cMTypeKlassPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSxorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshlL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: generateOptoStub.o; +text: .text%__1cRloadConP_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKo1RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cQcmovI_reg_gtNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cPstoreI_FregNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cJLoadDNodeGOpcode6kM_i_; +text: .text%__1cHRegMask2t6M_v_: matcher.o; +text: .text%__1cQcmovI_reg_gtNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_; +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cQshrI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKloadUBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKimmU13OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cUcompU_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cQciByteCodeStreamPget_klass_index6M_i_; +text: .text%__1cKciTypeFlowLStateVectorEtrap6MpnQciByteCodeStream_pnHciKlass_i_v_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLConvI2DNodeGOpcode6kM_i_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cOtypeArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cQregP_to_stkPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOcmovPI_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMregD_lowOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cQxorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMMachProjNodeHsize_of6kM_I_: classes.o; +text: .text%__1cQshrI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_; +text: .text%__1cTmembar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FLipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%__1cOcmovII_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseXfetch_interpreter_state6MipknEType_pnENode__5_; +text: .text%__1cSsubL_reg_reg_2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cSTailCalljmpIndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRloadConP_pollNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interpreter_sparc.o; +text: .text%__1cOcmovII_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreI0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovPP_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%JVM_GetCallerClass; +text: .text%__1cMloadConLNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: ad_sparc.o; +text: .text%__1cObox_handleNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJScopeDescGsender6kM_p0_; +text: .text%__1cICmpFNodeGOpcode6kM_i_; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__; +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cLConvI2FNodeGOpcode6kM_i_; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCii_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cQdivL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQciByteCodeStreamFtable6MnJBytecodesECode__2_; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cOcmovII_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cUmulL_reg_imm13_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitRinline_unsafe_CAS6MnJBasicType__i_; +text: .text%__1cTCompareAndSwapLNode2t6MpnENode_2222_v_; +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cVCallRuntimeDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSCompareAndSwapNode2t6MpnENode_2222_v_; +text: .text%__1cOCallNativeNodeGOpcode6kM_i_; +text: .text%__1cQshlL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHTypeAryFxdual6kM_pknEType__; +text: .text%__1cPconvF2D_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOPhaseIdealLoopJdo_unroll6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQregP_to_stkPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerDbpr6Mn0AKRCondition_in0AHPredict_pnMRegisterImpl_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cMloadConLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQshrI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cTCallInterpreterNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cHMatcherbAinterpreter_method_oop_reg6F_nHOptoRegEName__; +text: .text%__1cZCallInterpreterDirectNodeKmethod_set6Ml_v_; +text: .text%__1cOloadConL13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSTailCalljmpIndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompilebMGenerate_Compiled_To_Interpreter_Graph6MpknITypeFunc_pC_v_; +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o; +text: .text%__1cIciMethodRinterpreter_entry6M_pC_; +text: .text%__1cZCallInterpreterDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXMachCallInterpreterNodeWis_MachCallInterpreter6M_p0_: ad_sparc_misc.o; +text: .text%__1cZCallInterpreterDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cRorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadConPCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_; +text: .text%__1cODeoptimizationYtrap_state_is_recompiled6Fi_i_; +text: .text%__1cSmulI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cJloadINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cKstoreFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cQregI_to_stkINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cWResolveOopMapConflictsRpossible_gc_point6MpnOBytecodeStream__i_: rewriter.o; +text: .text%__1cRtestI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cObox_handleNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cSTailCalljmpIndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cRsarL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cRcompL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmulL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cQcmovI_reg_gtNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_; +text: .text%__1cOMacroAssemblerWstore_unaligned_double6MpnRFloatRegisterImpl_pnMRegisterImpl_i_v_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cLcmpD_ccNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cHMatcherXinterpreter_arg_ptr_reg6F_nHOptoRegEName__; +text: .text%__1cQshlL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConL13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cHciKlassMis_interface6M_i_: ciTypeArrayKlass.o; +text: .text%__1cQmulF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLOptoRuntimeThandle_wrong_method6FpnKJavaThread__pC_; +text: .text%__1cOMacroAssemblerUstore_unaligned_long6MpnMRegisterImpl_2i_v_; +text: .text%__1cQaddL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHBoxNodeGOpcode6kM_i_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cFParseScreate_jump_tables6MpnENode_pnLSwitchRange_4_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cLOptoRuntimeVresolve_static_call_C6FpnKJavaThread__pC_; +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o; +text: .text%__1cSconvD2I_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cSdivL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPstoreI_FregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cSsubL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cSsubL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cPconvF2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cSandL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSxorI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_; +text: .text%JVM_MonitorWait; +text: .text%__1cKcmpOpUOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cENodeEgetd6kM_d_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +text: .text%__1cUdivL_reg_imm13_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cKloadUBNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_; +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLcmpF_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKCMovePNodeGOpcode6kM_i_; +text: .text%__1cKciTypeFlowOsplit_range_at6Mi_pn0AFRange__; +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNloadConP0NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x43300000NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPstoreI_FregNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKloadUBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2F_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cYinternal_word_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cENode2t6Mp0111111_v_; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cRbranchLoopEndNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cSaddP_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cOcmovPP_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cKstoreCNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUParallelScavengeHeapIcapacity6kM_L_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cZInterpreterMacroAssemblerXget_constant_pool_cache6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_; +text: .text%__1cOcmovPP_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOcmovII_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNflagsRegFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQregP_to_stkPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cLstoreC0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x41f00000NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cKstoreINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cTAbstractInterpreterLdeopt_entry6FnITosState_i_pC_; +text: .text%JVM_SetClassSigners; +text: .text%__1cQregP_to_stkPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRorI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cQRelocationHolderEplus6kMi_0_; +text: .text%__1cISubLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTmembar_volatileNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMflagsRegOperFclone6kM_pnIMachOper__; +text: .text%__1cObox_handleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadSNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cOloadI_fregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOloadI_fregNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%jni_CallIntMethod: jni.o; +text: .text%__1cFParseHdo_irem6M_v_; +text: .text%__1cVinline_cache_regPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FLi_pnGThread__; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%__1cRtestI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConPCNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cHCompilebMGenerate_Interpreter_To_Compiled_Graph6MpknITypeFunc__v_; +text: .text%__1cbACallCompiledJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUCallCompiledJavaNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cFTypeDJsingleton6kM_i_; +text: .text%__1cSCountedLoopEndNode2t6MpnENode_2ff_v_; +text: .text%__1cSbranchCon_longNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cPstoreI_FregNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreF0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQmulI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_; +text: .text%__1cbACallCompiledJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cHCompileRmake_vm_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cbACallCompiledJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetClassMethodsCount; +text: .text%JVM_GetClassFieldsCount; +text: .text%JVM_GetClassCPTypes; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cOcmovII_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cKstoreLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQshlI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQregF_to_stkINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cIAddDNodeGOpcode6kM_i_; +text: .text%__1cRtestI_reg_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6ML_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreterRT_sparc.o; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: interpreterRT_sparc.o; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cMloadConLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIimmLOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: machnode.o; +text: .text%__1cQmulD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRorI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_; +text: .text%jni_FindClass: jni.o; +text: .text%__1cbFunnecessary_membar_volatileNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_; +text: .text%__1cPconvI2L_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUPSMarkSweepDecoratorHcompact6Mi_v_; +text: .text%__1cMloadConFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cRshlL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKLoadPCNodeGOpcode6kM_i_; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cPconvF2D_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%JVM_IsPrimitiveClass; +text: .text%__1cRsarL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUmulL_reg_imm13_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cOcmovII_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cRorI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvI2F_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNimmP_pollOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cTmembar_volatileNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeDFxmeet6kMpknEType__3_; +text: .text%__1cRloadConP_pollNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2D_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cQaddD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cIDivDNodeGOpcode6kM_i_; +text: .text%__1cJloadINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLstoreC0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSdivL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSsubL_reg_reg_2NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cIMulINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMnegD_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUmulL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cUdivL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileQgrow_alias_types6M_v_; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cQdivD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIModINodeJideal_reg6kM_I_: classes.o; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cVshrL_reg_imm6_L2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSconvI2D_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOimmI_32_63OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cFMutex2T6M_v_; +text: .text%__1cQregI_to_stkINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRcompL_reg_conNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerHpop_ptr6MpnMRegisterImpl__v_; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cOcmovII_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: callnode.o; +text: .text%__1cQaddD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_; +text: .text%__1cSdivL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregI_to_stkINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cUmulL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cQregP_to_stkPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cQregP_to_stkPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cRtestI_reg_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%JVM_MonitorNotify; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: multnode.o; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cSstkL_to_regD_0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: classes.o; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_sparc.o; +text: .text%__1cNimmP_pollOperFclone6kM_pnIMachOper__; +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cRloadConP_pollNodeFclone6kM_pnENode__; +text: .text%__1cSstring_compareNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cOMacroAssemblerDset6MlpnMRegisterImpl_rknQRelocationHolder__v_: templateTable_sparc.o; +text: .text%__1cSconvI2F_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSdivL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRtestI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNloadKlassNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interpreterRT_sparc.o; +text: .text%__1cQregL_to_stkLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cNRelocIteratorEnext6M_i_: output.o; +text: .text%__1cINegDNodeGOpcode6kM_i_; +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodDataKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodDataKlass.o; +text: .text%__1cMloadConLNodeFclone6kM_pnENode__; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cQaddF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cQregL_to_stkLNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cJCmpD3NodeGOpcode6kM_i_; +text: .text%__1cQsubL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cIimmLOperFclone6kM_pnIMachOper__; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cIregDOperFclone6kM_pnIMachOper__; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cNIdealLoopTreeXpolicy_maximally_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cIMulINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cJEventMark2t6MpkcE_v_: psMarkSweep.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cUdivL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTAbstractInterpreterWlayout_activation_impl6FpnNmethodOopDesc_iiiipnFframe_4i_i_; +text: .text%__1cOcmovPP_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cUmulL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cJCMoveNodeEmake6FpnENode_222pknEType__p0_; +text: .text%__1cFTypeFJsingleton6kM_i_; +text: .text%__1cIMinINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJCMoveNode2t6MpnENode_22pknEType__v_: connode.o; +text: .text%__1cSandL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDjmp6MpnMRegisterImpl_ipkci_v_; +text: .text%__1cRorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: connode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: connode.o; +text: .text%__1cQshrL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cQsubF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLcmpF_ccNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: deoptimization.o; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cQregI_to_stkINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMregD_lowOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQstkI_to_regFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cXconvI2D_regDHi_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQmulD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshlI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MipnMRegisterImpl__v_; +text: .text%__1cQmulF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cNminI_eRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNminI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQaddD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreC0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeBlobJis_zombie6kM_i_: onStackReplacement.o; +text: .text%__1cObranchConFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUdivL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSsubL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNloadConPCNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConPCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadINodeFclone6kM_pnENode__; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: node.o; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cQregI_to_stkINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshlI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvD2I_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: node.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: node.o; +text: .text%__1cNloadConPCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cUGenericGrowableArrayKraw_remove6MpknEGrET__v_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cQshrL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOloadI_fregNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRsarI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSconvD2I_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQregF_to_stkINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cINodeHashEgrow6M_v_; +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXvirtual_call_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cQMachCallJavaNodeVis_MachCallStaticJava6M_pnWMachCallStaticJavaNode__: ad_sparc_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQdivL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%__1cSdivL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: output.o; +text: .text%__1cOcmovDF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cSmulL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovIF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovIF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cQsubD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%__1cOMacroAssemblerOstore_argument6MpnMRegisterImpl_rnIArgument__v_: interpreterRT_sparc.o; +text: .text%__1cKo1RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRT_sparc.o; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cObox_handleNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cENodeEgetf6kM_f_; +text: .text%__1cQmulF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cLstoreP0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNCallGeneratorQfor_virtual_call6FpnIciMethod__p0_; +text: .text%__1cSsubL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cUdivL_reg_imm13_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovPP_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cNiRegIsafeOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKCMoveINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cXconvI2D_regDHi_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConL0NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIregFOperFclone6kM_pnIMachOper__; +text: .text%__1cHTypePtrFxdual6kM_pknEType__; +text: .text%__1cUVirtualCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cJloadDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovII_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cISubDNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cQsubD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRsarL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_; +text: .text%__1cOPhaseIdealLoopVinsert_pre_post_loops6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cQmodI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFTypeFFxmeet6kMpknEType__3_; +text: .text%__1cOcmovPI_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cQmodI_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cCosScurrent_stack_size6F_L_; +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cODeoptimizationYquery_update_method_data6FnQmethodDataHandle_in0ALDeoptReason_rIri4_pnLProfileData__; +text: .text%JVM_SetThreadPriority; +text: .text%__1cFTypeDGis_nan6kM_i_; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_; +text: .text%__1cQregF_to_stkINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cQaddF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_; +text: .text%__1cQshlL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%_start: os_solaris.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cQdivD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMloadConINodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_L_i_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cOcmovPI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQshrL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cJloadLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovLL_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%JVM_Read; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cOMacroAssemblerTload_unaligned_long6MpnMRegisterImpl_i2_v_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cQstkI_to_regINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cQaddD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: callGenerator.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQregI_to_stkINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cENodeGis_Con6kM_I_: loopnode.o; +text: .text%__1cIModLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: loopnode.o; +text: .text%__1cSsubD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2D_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cKimmL13OperFclone6kM_pnIMachOper__; +text: .text%__1cRsarL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_26MpCpnMRegisterImpl_rnFLabel__v_; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cSmulD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSstring_compareNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cIDivLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerDset6MlpnMRegisterImpl_rknQRelocationHolder__v_: interp_masm_sparc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cCosMguard_memory6FpcL_i_; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cHMatcherXpost_store_load_barrier6FpknENode__i_; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cSandL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cKCMoveDNodeGOpcode6kM_i_; +text: .text%__1cOcmovIF_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLcmpF_ccNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cRtestI_reg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOloadI_fregNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vL_v_; +text: .text%__1cJloadSNodeFclone6kM_pnENode__; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cOloadI_fregNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cJLoadPNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cOcmovIL_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: cfgnode.o; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cSaddD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJloadBNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2F_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJArrayDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cQsubD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cQshrI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpF_ccNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvL2DNodeGOpcode6kM_i_; +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_; +text: .text%JVM_GetClassName; +text: .text%__1cOMacroAssemblerVload_unaligned_double6MpnMRegisterImpl_ipnRFloatRegisterImpl__v_; +text: .text%JVM_IsArrayClass; +text: .text%__1cQdivI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubFNodeGOpcode6kM_i_; +text: .text%__1cSsubD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHAddress2t6Mn0AJaddr_type_i_v_; +text: .text%__1cSmulD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cMnegD_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2L_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cLTypeInstPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQsubF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cQdivL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: generateOptoStub.o; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_sparc.o; +text: .text%__1cRtestI_reg_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2F_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cLstoreF0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCallGeneratorSfor_predicted_call6FpnHciKlass_p03_3_; +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cWPredictedCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cWPredictedCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: interpreter_sparc.o; +text: .text%__1cJMarkSweepSMarkAndPushClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cJloadSNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZregDHi_regDLo_to_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%JVM_Open; +text: .text%__1cLconvI2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_; +text: .text%__1cSandL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: loopnode.o; +text: .text%__1cSconvI2F_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMloadConDNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: loopnode.o; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cQandI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cKCodeBufferWinsert_double_constant6Md_pC_; +text: .text%__1cSstkL_to_regD_0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%JVM_StartThread; +text: .text%__1cOcmovDF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cQdivD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: icBuffer_sparc.o; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cSaddD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadConPCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cObranchConFNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cOcmovIL_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUdivL_reg_imm13_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIciObjectOis_method_data6M_i_: ciObjectFactory.o; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cLStrCompNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectJis_method6M_i_: ciObjectFactory.o; +text: .text%JVM_TotalMemory; +text: .text%JVM_FreeMemory; +text: .text%__1cUmulL_reg_imm13_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cURethrowExceptionNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_; +text: .text%__1cMTailCallNode2t6MpnENode_222222_v_; +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_; +text: .text%__1cQsubI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSconvD2I_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMnegD_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosOunguard_memory6FpcL_i_; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cOcmovPI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodDataKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodDataKlass.o; +text: .text%__1cQaddI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cICmpDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__1cQregF_to_stkINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cIGraphKitXinsert_mem_bar_volatile6MpnKMemBarNode_i_v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cOcmovLI_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cQshrL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cHRetDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cQregL_to_stkLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cOcmovIL_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cLcmpF_ccNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvI2D_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerbEset_method_data_pointer_offset6MpnMRegisterImpl__v_; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cHTypeInt2t6Miii_v_; +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cSsubD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvF2I_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSmulD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cNloadRangeNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cOcmovIF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseNdo_instanceof6M_v_; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cSdivL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerFpop_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cSsubL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_; +text: .text%__1cSmulL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRT_sparc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_; +text: .text%__1cLstoreC0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cJloadDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: library_call.o; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cKCMoveLNodeGOpcode6kM_i_; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cOloadI_fregNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovLL_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cVMoveL2D_stack_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXconvI2D_regDHi_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cLConvD2FNodeGOpcode6kM_i_; +text: .text%__1cTAbstractInterpreterQcontinuation_for6FpnNmethodOopDesc_pCiiri_3_; +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cTAbstractInterpreterPsize_activation6FpnNmethodOopDesc_iiiii_i_; +text: .text%__1cIPSOldGenPadjust_pointers6M_v_; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cIPSOldGenHcompact6M_v_; +text: .text%__1cTAbstractInterpreterRlayout_activation6FpnNmethodOopDesc_iiiipnFframe_4i_v_; +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__; +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_; +text: .text%jint_cmp: parse2.o; +text: .text%__1cSvframeArrayElementPunpack_on_stack6MiipnFframe_ii_v_; +text: .text%__1cIMaxINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSvframeArrayElementNon_stack_size6kMiiii_i_; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__; +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____; +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: ciTypeFlow.o; +text: .text%__1cOcmovLL_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOLibraryCallKitYinline_native_time_funcs6Mi_i_; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: templateTable_sparc.o; +text: .text%__1cOMacroAssemblerNflush_windows6M_v_; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cKstorePNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFframeZinterpreter_frame_set_mdx6Ml_v_; +text: .text%__1cQdivI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDset6MlpnMRegisterImpl_rknQRelocationHolder__v_: stubGenerator_sparc.o; +text: .text%__1cQshrI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQObjectStartArrayFreset6M_v_; +text: .text%__1cKg3RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQshlL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cLstoreF0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: callnode.o; +text: .text%__1cKimmI11OperIconstant6kM_l_: ad_sparc_clone.o; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerDset6MlpnMRegisterImpl_rknQRelocationHolder__v_: interpreter_sparc.o; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_; +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Ml_v_; +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cIGraphKitbAgen_stub_or_native_wrapper6MpCpkcpnIciMethod_iiiii_v_; +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cOcmovLL_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVinline_cache_regPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVCallRuntimeDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_NativePath; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cQdivD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSvframeArrayElementDbci6kM_i_; +text: .text%__1cQaddD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNnmethodLocker2t6MpC_v_; +text: .text%__1cICodeBlobZis_at_poll_or_poll_return6MpC_i_; +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_; +text: .text%__1cNloadConP0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSconvI2D_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovPI_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__; +text: .text%__1cLvframeArrayIallocate6FpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pnLRegisterMap_nFframe_9A9A9A_p0_; +text: .text%__1cSandL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__; +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__; +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_; +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_; +text: .text%__1cLConvF2INodeGOpcode6kM_i_; +text: .text%__1cLOptoRuntimeWresolve_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cSsubD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: deoptimization.o; +text: .text%__1cODeoptimizationScreate_vframeArray6FpnKJavaThread_nFframe_pnLRegisterMap__pnLvframeArray__; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cMtlsLoadPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cVMoveL2D_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cVCompressedWriteStreamKwrite_long6Mx_v_; +text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_; +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_; +text: .text%__1cLstoreI0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MipnMRegisterImpl__v_; +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmulI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMachCallNativeNodePret_addr_offset6M_i_; +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvD2I_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSstkL_to_regD_0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cRNativeMovConstRegEdata6kM_l_; +text: .text%__1cLconvI2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmaxI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_; +text: .text%__1cNmaxI_eRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRConstantLongValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cOcmovIF_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_gtNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cPorI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovPP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cENodeJis_MemBar6kM_pknKMemBarNode__: classes.o; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cUregI_to_stkLHi_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZregDHi_regDLo_to_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovLL_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOloadI_fregNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cLconvI2BNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovII_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvD2I_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConL0NodeFclone6kM_pnENode__; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cKcmpOpFOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cJimmL0OperFclone6kM_pnIMachOper__; +text: .text%__1cSaddP_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_; +text: .text%__1cIAddFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%JVM_Close; +text: .text%__1cMtlsLoadPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotLOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cJloadFNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbIcompute_extra_locals_size_in_bytes6MpnMRegisterImpl_22_v_; +text: .text%__1cQsubF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPconvF2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOimmI_32_63OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cQdivL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerNget_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cQregL_to_stkLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSxorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSandL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cLcastP2LNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLstoreF0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvD2I_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcastPPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQstkI_to_regINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cMStartOSRNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cFParseWload_interpreter_state6MpnENode_2_v_; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_; +text: .text%__1cQAbstractCompilerMsupports_osr6M_i_: c2compiler.o; +text: .text%__1cJloadCNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMacroAssemblerPbreakpoint_trap6M_v_; +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__; +text: .text%__1cSconvF2I_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cUregI_to_stkLHi_0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2F_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__; +text: .text%__1cSmulD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIDivLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQaddD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovLI_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cLConvD2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%jni_EnsureLocalCapacity; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_; +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodDataOop.o; +text: .text%__1cFTypeFFxdual6kM_pknEType__; +text: .text%__1cLcastP2LNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPorL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNiRegIsafeOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__; +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIMulDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKScopeValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cRshlI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQdivI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovPP_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompile2t6MpnFciEnv_pF_pknITypeFunc_pCpkciiii_v_; +text: .text%__1cSmulL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParsePdo_monitor_exit6M_v_; +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__; +text: .text%__1cSaddI_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLcmpF_ccNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPstoreI_FregNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_sparc.o; +text: .text%__1cIGraphKitIgen_stub6MpCpkciii_v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cXconvI2D_regDHi_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotLOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotLOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cSconvF2I_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadBNodeFclone6kM_pnENode__; +text: .text%__1cOloadI_fregNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQregL_to_stkLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmP0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cLcmpF_ccNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2L_regNodeFclone6kM_pnENode__; +text: .text%__1cLResourceObj2n6FLn0APallocation_type__pv_; +text: .text%__1cLcmpD_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreter_sparc.o; +text: .text%__1cFTypeDFxdual6kM_pknEType__; +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cRorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cOcmovDF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSstkL_to_regD_2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVMoveF2I_stack_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMachNodeOmemory_operand6kM_pknIMachOper__: ad_sparc_misc.o; +text: .text%__1cXconvI2D_regDHi_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosHSolarisKmmap_chunk6FpcLii_2_; +text: .text%__1cINegFNodeGOpcode6kM_i_; +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQregL_to_stkLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovDF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbAincrement_backedge_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cKCodeBufferVinsert_float_constant6Mf_pC_; +text: .text%__1cSstkL_to_regD_0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cMregD_lowOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cYinternal_word_RelocationMforce_target6MpC_v_: relocInfo.o; +text: .text%__1cObranchConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cZInterpreterMacroAssemblerbFtest_invocation_counter_for_mdp6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerbBtest_backedge_count_for_osr6MpnMRegisterImpl_22_v_; +text: .text%__1cObranchConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovPP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovPI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIModLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMnegD_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_; +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cObranchConFNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cOcmovPI_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cNloadConPCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_; +text: .text%__1cQsubD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLconvP2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cVMoveL2D_stack_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConFNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOcmovII_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegD_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSstkL_to_regD_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cPconvD2F_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cMVirtualSpaceJexpand_by6ML_i_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cOloadConL13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLconvI2BNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPorL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHBoxNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%JVM_GetComponentType; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerXindex_check_without_pop6MpnMRegisterImpl_2i22_v_; +text: .text%__1cVMoveF2I_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cOcmovLI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cQConstantIntValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cFStateL_sub_Op_Box6MpknENode__v_; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%Unsafe_DefineClass1; +text: .text%__1cPorL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstfSSFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQsubF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvI2D_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvF2I_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cSmulL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cNloadRangeNodeFclone6kM_pnENode__; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cSaddL_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerLindex_check6MpnMRegisterImpl_2i22_v_; +text: .text%__1cKcastPPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerGif_cmp6MnJAssemblerJCondition_i_v_; +text: .text%__1cOcmovIF_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cUregI_to_stkLHi_0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2D_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerPcasx_under_lock6MpnMRegisterImpl_22pCi_v_; +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_; +text: .text%__1cJloadLNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cLstoreF0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%JVM_DefineClass; +text: .text%__1cNloadConL0NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cHOrLNodeGOpcode6kM_i_; +text: .text%__1cWImplicitExceptionTableCat6kMI_I_; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%JVM_InvokeMethod; +text: .text%__1cLVtableStubsIcontains6FpC_i_; +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cQsubD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cQstkI_to_regINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cOMacroAssemblerHbr_null6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cKcastPPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeHeapJexpand_by6ML_i_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cOcmovDF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_pnNsymbolOopDesc_pkc_nGHandle__; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cRCardTableModRefBSEis_a6MnKBarrierSetEName__i_: cardTableExtension.o; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cSaddL_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cHdom_lca6FpnFBlock_1_1_: gcm.o; +text: .text%__1cNloadRangeNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKcmpOpFOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cJPSPermGenKprecompact6M_v_; +text: .text%__1cJPSPermGenQcompute_new_size6ML_v_; +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cIPSOldGenKprecompact6M_v_; +text: .text%__1cKstfSSFNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_; +text: .text%__1cLPSMarkSweepQinvoke_no_policy6Fpii_v_; +text: .text%Unsafe_AllocateInstance; +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MLnHGCCauseFCause__v_; +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cIAddFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseScan_rerun_bytecode6M_i_; +text: .text%JVM_NewArray; +text: .text%__1cLStrCompNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cLTypeInstPtrLmirror_type6kM_pnGciType__; +text: .text%__1cKPSYoungGenKprecompact6M_v_; +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_; +text: .text%__1cKPSYoungGenHcompact6M_v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cMloadConPNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GC; +text: .text%JVM_GetSystemPackage; +text: .text%__1cUregI_to_stkLHi_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreC0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLPSMarkSweepGinvoke6Fpii_v_; +text: .text%__1cRSignatureIteratorHiterate6M_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cUregI_to_stkLHi_0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_; +text: .text%__1cPconvD2F_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cETypeJis_finite6kM_i_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: callnode.o; +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cQaddL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cVVM_ParallelGCSystemGC2t6MI_v_; +text: .text%__1cQshlI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__; +text: .text%__1cLMoveF2INodeGOpcode6kM_i_; +text: .text%__1cJCmpF3NodeGOpcode6kM_i_; +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cLConvD2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cVVM_ParallelGCSystemGCEname6kM_pkc_: vm_operations.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_; +text: .text%__1cOstackSlotIOperFclone6kM_pnIMachOper__; +text: .text%jni_GetStringRegion: jni.o; +text: .text%__1cNStubGeneratorLstub_prolog6MpnMStubCodeDesc__v_: stubGenerator_sparc.o; +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cHBoxNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cOcmovPI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cJCMoveNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cLcastP2LNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cObox_handleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQregP_to_stkPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQshrL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%JVM_RawMonitorCreate; +text: .text%__1cLOptoRuntimeNgenerate_stub6FpnFciEnv_pF_pknITypeFunc_pCpkciiii_8_; +text: .text%__1cIimmDOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%Unsafe_CompareAndSwapInt; +text: .text%__1cWloadConI_x43300000NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotLOperFclone6kM_pnIMachOper__; +text: .text%__1cXconvI2D_regDHi_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPSPromotionLABRunallocate_object6MpnHoopDesc__i_; +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MipnMRegisterImpl_rnFLabel_2_v_; +text: .text%__1cXconvI2D_regDHi_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVMoveL2D_stack_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSaddD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cMnegF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVMoveF2I_stack_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPorL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadFNodeFclone6kM_pnENode__; +text: .text%__1cUregI_to_stkLHi_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2F_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerFpop_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cSsubD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsubL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cQregL_to_stkLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRmethodDataOopDescRbci_to_extra_data6Mii_pnLProfileData__; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreter.o; +text: .text%__1cPconvF2D_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferGresize6M_v_; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cQaddL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cSaddD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWloadConI_x41f00000NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cWloadConI_x43300000NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmulD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_Sleep; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cUregI_to_stkLHi_0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLconvI2BNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRorI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUregI_to_stkLHi_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_Lseek; +text: .text%__1cQmulD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOloadConL13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__; +text: .text%__1cSmulD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: templateTable_sparc.o; +text: .text%__1cSsubD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cQsubF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cMloadConDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cMloadConFNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cWloadConI_x41f00000NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_sparc.o; +text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_; +text: .text%Unsafe_GetNativeByte; +text: .text%__1cQmulL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLconvI2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerKbr_notnull6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cQmulF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cQandL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%jni_GetEnv; +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cOcmovLI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__; +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerEcall6MpCnJrelocInfoJrelocType__v_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: stubGenerator_sparc.o; +text: .text%JVM_NanoTime; +text: .text%__1cOMacroAssemblerOrestore_thread6MkpnMRegisterImpl__v_; +text: .text%__1cIMulFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_; +text: .text%__1cPconvD2F_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerLsave_thread6MkpnMRegisterImpl__v_; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%__1cOcmovIF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLCastP2LNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cENodeHis_Call6M_pnICallNode__: machnode.o; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cKimmU13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cNloadConL0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmL0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cFParseOdo_tableswitch6M_v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableXjvmti_post_field_access6Fiii_v_; +text: .text%__1cKstoreFNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cWloadConI_x41f00000NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWloadConI_x43300000NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKcastPPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeFJis_finite6kM_i_; +text: .text%__1cOstackSlotFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__; +text: .text%__1cSmulL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseQdo_monitor_enter6M_v_; +text: .text%__1cLconvP2BNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMregD_lowOperFclone6kM_pnIMachOper__; +text: .text%__1cSaddI_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cOMacroAssemblerGmembar6MnJAssemblerQMembar_mask_bits__v_: templateTable_sparc.o; +text: .text%__1cLcastP2LNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodLiveness.o; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cQshrI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerEfmov6MnRFloatRegisterImplFWidth_p13_v_; +text: .text%__1cOcmovLL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQAbstractCompilerPsupports_native6M_i_: c2compiler.o; +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o; +text: .text%__1cOMacroAssemblerKget_thread6M_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: stubGenerator_sparc.o; +text: .text%__1cUdivL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUmulL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsubL_reg_reg_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKLoadPCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOcmovIL_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cVMoveL2D_stack_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIciSymbolHas_utf86M_pkc_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cQregI_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cNloadConPCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cLcastP2LNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLOptoRuntimeRnew_objArray_Type6F_pknITypeFunc__; +text: .text%__1cNloadConPCNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSmodL_reg_imm13NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod__v_; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%__1cIGraphKitSgen_native_wrapper6MpnIciMethod__v_; +text: .text%__1cKReturnNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cSmodL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQdivD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvF2I_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLconvI2BNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x41f00000NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosNcommit_memory6FpcLL_i_; +text: .text%__1cUregI_to_stkLHi_0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cWloadConI_x41f00000NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x43300000NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeFFempty6kM_i_; +text: .text%__1cETypeFxdual6kM_pk0_; +text: .text%__1cZInterpreterMacroAssemblerbCincrement_invocation_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: jniFastGetField_sparc.o; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: jniFastGetField_sparc.o; +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerEcall6MpCnJrelocInfoJrelocType__v_: jniFastGetField_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MinITosState__v_; +text: .text%__1cFParseMdo_anewarray6M_v_; +text: .text%__1cPorL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2F_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cSaddD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cOcmovLI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%__1cVMoveF2I_stack_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cPconvI2F_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MirnFLabel__v_; +text: .text%__1cQaddI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%__1cLstoreC0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cKcmpOpFOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cSconvF2I_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvF2I_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2F_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o; +text: .text%__1cWloadConI_x43300000NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%Unsafe_StaticFieldOffset; +text: .text%__1cOMacroAssemblerZtotal_frame_size_in_bytes6Mi_i_; +text: .text%__1cUregI_to_stkLHi_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cVMoveL2D_stack_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cRshlL_reg_imm6NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRshlI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2F_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVMoveF2I_stack_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o; +text: .text%__1cPconvI2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_int6MpnMRegisterImpl_2_v_; +text: .text%__1cOcmovIF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmU5OperFclone6kM_pnIMachOper__; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_22pC22i_v_; +text: .text%__1cSmulL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConFNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cLstoreC0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_GetClassContext; +text: .text%__1cZInterpreterMacroAssemblerWempty_expression_stack6M_v_; +text: .text%__1cJloadLNodeFclone6kM_pnENode__; +text: .text%__1cHMulNodeGis_Mul6kM_pk0_: classes.o; +text: .text%__1cMnegD_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbDunlock_if_synchronized_method6MnITosState_ii_v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cSstkL_to_regD_1NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cOcmovIF_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cSsubD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cSaddL_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cKVtableStub2n6FLi_pv_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%__1cKcmpOpFOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cQregF_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: callnode.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cLOptoRuntimeRresolve_call_Type6F_pknITypeFunc__; +text: .text%__1cQshlL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovLI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPorL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimebBhandle_wrong_method_ic_miss6FpnKJavaThread__pC_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cLconvP2BNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cVMoveL2D_stack_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cOcmovLL_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUInterpreterGeneratorbCgenerate_check_compiled_code6MrnFLabel__v_; +text: .text%__1cNSharedRuntimeEdrem6Fdd_d_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cHCompileRget_Method_invoke6M_pnIciMethod__; +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_; +text: .text%__1cHCompileWget_MethodAccessorImpl6M_pnPciInstanceKlass__; +text: .text%__1cSstkL_to_regD_2NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cSstkL_to_regD_1NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6MpnMRegisterImpl_22_v_; +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_; +text: .text%__1cODeoptimizationLUnrollBlockOsize_of_frames6kM_i_; +text: .text%__1cMMonitorValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cOLibraryCallKitbBinline_native_currentThread6M_i_; +text: .text%__1cQdivL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOloadConL13NodeFclone6kM_pnENode__; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cCosNcommit_memory6FpcL_i_; +text: .text%__1cOPSVirtualSpaceJexpand_by6ML_i_; +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_; +text: .text%__1cKJavaThreadUremove_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cKJavaThreadRadd_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cPconvF2D_regNodeFclone6kM_pnENode__; +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_; +text: .text%__1cLcastP2LNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_: ad_sparc.o; +text: .text%__1cKstfSSFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_0NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDret6Mi_v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cQstkI_to_regINodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cZInterpreterMacroAssemblerSget_cpool_and_tags6MpnMRegisterImpl_2_v_; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cOLibraryCallKitbDis_method_invoke_or_aux_frame6MpnIJVMState__i_; +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MrnFLabel__v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cKimmP13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cULinearLeastSquareFit2t6MI_v_; +text: .text%__1cLcastP2LNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframebLprevious_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cPstoreI_FregNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRtestI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cMMonitorChunk2t6Mi_v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%jni_GetJavaVM; +text: .text%jni_MonitorExit: jni.o; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cIAddDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOloadI_fregNodeOmemory_operand6kM_pknIMachOper__; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cSandL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%__1cMTailJumpNodeGOpcode6kM_i_; +text: .text%__1cNLoadKlassNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cCosOreserve_memory6FLpc_1_; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%__1cOloadConL13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_LoadLibrary; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRT_sparc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cISubFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cISubDNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cSdivL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSconvD2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__; +text: .text%__1cNSpaceCounters2t6MpkciLpnMMutableSpace_pnSGenerationCounters__v_; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cSmulL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%bootstrap_flush_windows; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cSmulL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cISubDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerEfneg6MnRFloatRegisterImplFWidth_p13_v_; +text: .text%__1cPstoreI_FregNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_GetLastErrorString; +text: .text%__1cLcmpF_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNReservedSpaceKfirst_part6MLii_0_; +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_; +text: .text%__1cOcmovLI_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNReservedSpace2t6ML_v_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cFParsePdo_lookupswitch6M_v_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_sparc.o; +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerbCverify_oop_or_return_address6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: jniFastGetField_sparc.o; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_LLii_v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cSsubL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_SetObjectField: jni.o; +text: .text%__1cPconvD2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvP2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKcastPPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmodL_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLconvP2BNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pCi_v_; +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cPciInstanceKlassbDcompute_shared_is_initialized6M_i_; +text: .text%__1cSandI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQmodL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQmulD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPstoreI_FregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_Throw: jni.o; +text: .text%__1cOloadI_fregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadFNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSconvD2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOMacroAssemblerEcall6MpCnJrelocInfoJrelocType__v_: runtime_sparc.o; +text: .text%__1cPconvD2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%Unsafe_AllocateMemory; +text: .text%__1cOLibraryCallKitbNinline_native_Reflection_getCallerClass6M_i_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cUcompI_iReg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerJfloat_cmp6MiipnRFloatRegisterImpl_2pnMRegisterImpl__v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cSstkL_to_regD_1NodeFclone6kM_pnENode__; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cJcmpOpOperFclone6kM_pnIMachOper__; +text: .text%__1cMindirectOperFclone6kM_pnIMachOper__; +text: .text%__1cOLibraryCallKitZinline_native_Class_query6MnIciMethodLIntrinsicId__i_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cRcompL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cKstfSSFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cOcmovDF_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLI_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_x6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cMnegF_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cKg1RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__; +text: .text%__1cIRetTableHadd_jsr6Mii_v_; +text: .text%__1cSstkL_to_regD_2NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmU6OperFclone6kM_pnIMachOper__; +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cSmulL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIimmFOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cOcmovLL_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDret6Mi_v_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: stubGenerator_sparc.o; +text: .text%__1cISubDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKstfSSFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cKo2RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIDivDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cIDivFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSconvD2I_helperNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIDivINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQregF_to_stkINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNflagsRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cNflagsRegFOperFclone6kM_pnIMachOper__; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cINegDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLcmpD_ccNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%Unsafe_SetMemory; +text: .text%__1cRshlI_reg_imm5NodeFclone6kM_pnENode__; +text: .text%__1cRshlL_reg_imm6NodeFclone6kM_pnENode__; +text: .text%__1cSconvI2D_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarI_reg_imm5NodeFclone6kM_pnENode__; +text: .text%__1cQsubI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmulD_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOcmovPI_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_2_v_; +text: .text%__1cIAddDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cKstfSSFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFTypeFGis_nan6kM_i_; +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cPconvD2F_regNodeFclone6kM_pnENode__; +text: .text%__1cQandI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cNgen_new_frame6FpnOMacroAssembler_i_v_: runtime_sparc.o; +text: .text%__1cVMoveF2I_stack_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cRsubI_zero_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cMnegD_regNodeFclone6kM_pnENode__; +text: .text%__1cPorL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cSstkL_to_regD_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cMloadConFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConDNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvP2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cLstoreF0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvL2I_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cINegDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC22_v_; +text: .text%__1cLstoreF0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovII_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveF2I_stack_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: subnode.o; +text: .text%__1cQmulI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cKstfSSFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cRshrI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLstoreF0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJMemRegionFminus6kMk0_0_; +text: .text%__1cQsubD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cQsubF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_22_v_; +text: .text%__1cLOptoRuntimeVgenerate_handler_blob6FpCi_pnNSafepointBlob__; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cOMacroAssemblerFsethi6MrnHAddress_i_v_: runtime_sparc.o; +text: .text%__1cNFingerprinterLfingerprint6M_L_: oopMapCache.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cMOopTaskQdDueueKinitialize6M_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__; +text: .text%__1cQshrL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pLi_v_: oopMapCache.o; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cMOopTaskQdDueue2t6M_v_; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: gcTaskThread.o; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_; +text: .text%JVM_FindSignal; +text: .text%__1cOcmovIF_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKConv2BNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOcmovPI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovDF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cOtailjmpIndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOtailjmpIndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeDFempty6kM_i_; +text: .text%__1cKstfSSFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseRdo_multianewarray6M_v_; +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRT_sparc.o; +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_L_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorKpass_float6M_v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cOtailjmpIndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cKScopeValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cZInterpreterMacroAssemblerQtop_most_monitor6M_nHAddress__; +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cNReservedSpaceJlast_part6ML_0_; +text: .text%__1cKCMoveDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVMoveF2I_stack_regNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cPconvI2D_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cVMoveL2D_stack_regNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cKloadUBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cQciByteCodeStreamXget_method_holder_index6M_i_; +text: .text%__1cHciKlassIis_klass6M_i_: ciInstanceKlass.o; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cLconvP2BNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_; +text: .text%__1cKo2RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cPstoreI_FregNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPconvI2D_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOCompilerThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_; +text: .text%JVM_Available; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%jio_vsnprintf; +text: .text%__1cISubFNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i2_v_; +text: .text%__1cISubFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_i22_v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIDivDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_2222rnFLabel__v_; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%JVM_RegisterSignal; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cFStateO_sub_Op_CMoveD6MpknENode__v_; +text: .text%__1cQshlL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cZInterpreterMacroAssemblerUadd_monitor_to_stack6MipnMRegisterImpl_2_v_; +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_222_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_L_i_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cOcmovLL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRNativeInstructionPis_ic_miss_trap6M_i_; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cbAconvL2D_reg_slow_fxtofNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_; +text: .text%__1cMGCTaskThreadDrun6M_v_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o; +text: .text%__1cRNativeInstructionKis_illegal6M_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cMGCTaskThreadFstart6M_v_; +text: .text%__1cUConstantOopReadValuePis_constant_oop6kM_i_: debugInfo.o; +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cWloadConI_x43300000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cINegFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cWloadConI_x41f00000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSconvF2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_; +text: .text%__1cZregDHi_regDLo_to_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXconvI2D_regDHi_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsubD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvD2F_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cHMatcherQconvL2FSupported6F_ki_; +text: .text%__1cSmulD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLconvI2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cPOopTaskQdDueueSetOregister_queue6MipnMOopTaskQdDueue__v_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_; +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: assembler_sparc.o; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUgenerate_vtable_call6FpnMRegisterImpl_22_v_; +text: .text%__1cNTemplateTableTinvokevfinal_helper6FpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerUcalc_mem_param_words6MpnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cLNamedThread2t6M_v_; +text: .text%__1cLNamedThreadIset_name6MpkcE_v_; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_; +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_; +text: .text%__1cQdivI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQdivD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectOis_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstanceKlass.o; +text: .text%__1cPconvF2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUregI_to_stkLHi_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUregI_to_stkLHi_0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLcastP2LNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cKCastPPNodeJideal_reg6kM_I_: connode.o; +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMPeriodicTask2t6ML_v_; +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_; +text: .text%__1cRcheck_if_clipping6FpknKRegionNode_rpnGIfNode_5_i_: cfgnode.o; +text: .text%__1cFTypeDJis_finite6kM_i_; +text: .text%__1cLConvF2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOcmovIF_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovIF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovIF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cLcmpD_ccNodeFclone6kM_pnENode__; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cRcompL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cUcompI_iReg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cPconvL2I_regNodeFclone6kM_pnENode__; +text: .text%__1cFParseDl2f6M_v_; +text: .text%__1cCosPuncommit_memory6FpcL_i_; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cKcmpOpFOperFclone6kM_pnIMachOper__; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_flag_at6MipnMRegisterImpl_2_v_; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cSconvD2I_helperNodeFclone6kM_pnENode__; +text: .text%__1cSstkL_to_regD_2NodeFclone6kM_pnENode__; +text: .text%__1cQregF_to_stkINodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MnITosState_pnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cOPSVirtualSpaceKinitialize6MnNReservedSpace_L_i_; +text: .text%__1cNloadConP0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MnITosState_pnMRegisterImpl_3_v_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cOcmovLL_regNodeFclone6kM_pnENode__; +text: .text%__1cKi0RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerbAdispatch_next_noverify_oop6MnITosState_i_v_; +text: .text%__1cJimmI0OperFclone6kM_pnIMachOper__; +text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cPfilename_to_pid6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cSconvF2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPmake_new_frames6FpnOMacroAssembler_i_v_: runtime_sparc.o; +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cSestimate_path_freq6FpnENode__f_: loopnode.o; +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_; +text: .text%__1cOstackSlotFOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cKcmpOpFOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cPconvF2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOLibraryCallKitVinline_fp_conversions6MnIciMethodLIntrinsicId__i_; +text: .text%__1cCosHrealloc6FpvL_1_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_1_x6MnJAssemblerJCondition_rnFLabel__v_; +text: .text%__1cOtailjmpIndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_icc6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_xcc6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cbAconvL2D_reg_slow_fxtofNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLOptoRuntimeIl2f_Type6F_pknITypeFunc__; +text: .text%__1cOstackSlotFOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotFOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOPSVirtualSpace2t6M_v_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cZInterpreterMacroAssemblerQstore_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cZInterpreterMacroAssemblerRstore_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerSstore_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cRsubI_zero_regNodeFclone6kM_pnENode__; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cJAssemblerCbp6Mn0AJCondition_in0ACCC_n0AHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cMnegF_regNodeFclone6kM_pnENode__; +text: .text%__1cCosWactive_processor_count6F_i_; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cNStubGenerator2t6MpnKCodeBuffer_i_v_: stubGenerator_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerRaccess_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerZget_4_byte_integer_at_bcp6MipnMRegisterImpl_2n0AKsetCCOrNot__v_; +text: .text%__1cNPhaseRegAllocHset_oop6MpknENode_i_v_; +text: .text%__1cGatomll6Fpkcpx_i_: arguments.o; +text: .text%__1cOcmovDF_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerSaccess_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cOcmovLI_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerTaccess_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_int6MpnMRegisterImpl_2_v_; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%__1cOloadConL13NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cLPSMarkSweepKinitialize6F_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cLstoreC0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o; +text: .text%__1cHRetDataKis_RetData6M_i_: methodDataOop.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cMciKlassKlassEmake6F_p0_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cUPSAdaptiveSizePolicy2t6MLLLLLddI_v_; +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%__1cKTypeOopPtrCeq6kMpknEType__i_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cOloadConL13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cOcompiler2_init6F_v_; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cOMacroAssemblerEcall6MpCnJrelocInfoJrelocType__v_: interpreter_sparc.o; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cKPSYoungGenRavailable_to_live6M_L_; +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_; +text: .text%__1cMTailJumpNode2t6MpnENode_22222_v_; +text: .text%__1cQsubL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRT_sparc.o; +text: .text%__1cKPSYoungGenPinitialize_work6M_v_; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_L_v_; +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_L_v_; +text: .text%__1cKPSYoungGen2t6MLLL_v_; +text: .text%__1cOPSVirtualSpaceJshrink_by6ML_i_; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o; +text: .text%__1cSmulL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cKPSYoungGenbCreset_survivors_after_shrink6M_v_; +text: .text%__1cKPSYoungGenQlimit_gen_shrink6ML_L_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cIPSOldGenOgen_size_limit6M_L_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: cfgnode.o; +text: .text%__1cIPSOldGenGresize6ML_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cKstfSSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Lpkci_v_; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cUCompressedReadStreamJread_long6M_x_; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o; +text: .text%__1cIPSOldGen2t6MLLLpkci_v_; +text: .text%__1cIPSOldGen2t6MnNReservedSpace_LLLLpkci_v_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cSInterpreterRuntimeWcreate_klass_exception6FpnKJavaThread_pcpnHoopDesc__v_; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_L_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cKPSScavengeKinitialize6F_v_; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cKcastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_; +text: .text%__1cSPSPromotionManagerKinitialize6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cOcmovLI_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovLI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKcmpOpFOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cNpriocntl_stub6FinGidtype_iipc_l_: os_solaris.o; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cJPSPermGen2t6MnNReservedSpace_LLLLpkci_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%JVM_InitProperties; +text: .text%JVM_Halt; +text: .text%__1cNReservedSpace2t6MLLipc_v_; +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_; +text: .text%JVM_MaxMemory; +text: .text%__1cNReservedSpaceUpage_align_size_down6FL_L_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FL_L_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: parse1.o; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cMloadConLNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cKg1RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%JNI_CreateJavaVM; +text: .text%__1cSmembar_releaseNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cHOrLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHOrLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_expand.o; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapOresize_old_gen6ML_v_; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cUParallelScavengeHeapEheap6F_p0_; +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_; +text: .text%__1cKvtune_init6F_v_; +text: .text%JVM_SupportsCX8; +text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cMFastLockNodeLis_FastLock6kM_pk0_: classes.o; +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_L_; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%JVM_InitializeSocketLibrary; +text: .text%__1cHVM_ExitEdoit6M_v_; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%JVM_Socket; +text: .text%__1cFParseNfetch_monitor6MipnENode_2_2_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_: parallelScavengeHeap.o; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEkind6M_nNCollectedHeapEName__: parallelScavengeHeap.o; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cKi0RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o; +text: .text%__1cIciMethodMvtable_index6M_i_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cQregF_to_stkINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cNget_user_name6Fi_pc_: perfMemory_solaris.o; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cIUniversePcheck_alignment6FLLpkc_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cMostream_exit6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o; +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FL_v_; +text: .text%__1cSmmap_create_shared6FL_pc_: perfMemory_solaris.o; +text: .text%Unsafe_FreeMemory; +text: .text%__1cbAcreate_sharedmem_resources6Fpkc1L_i_: perfMemory_solaris.o; +text: .text%Unsafe_PageSize; +text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cKScopeValuePis_constant_oop6kM_i_: debugInfo.o; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cLOptoRuntimebPgenerate_polling_page_return_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebPgenerate_illegal_instruction_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebBgenerate_uncommon_trap_blob6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cLOptoRuntimeWfill_in_exception_blob6F_v_; +text: .text%__1cQshrL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLOptoRuntimeUsetup_exception_blob6F_v_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cLOptoRuntimeYgenerate_arraycopy_stubs6F_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cSCommandLineFlagsExKuintxAtPut6FnXCommandLineFlagWithType_L_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%__1cHMatcherbDinterpreter_frame_pointer_reg6F_nHOptoRegEName__; +text: .text%__1cQdivD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_pipeline.o; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cQsubD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC222_v_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cQaddF_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cICodeHeapHreserve6MLLL_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cLOptoRuntimeNfetch_monitor6FipnJBasicLock_pC_pnHoopDesc__; +text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_; +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cGThreadOis_Java_thread6kM_i_: gcTaskThread.o; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorSgenerate_test_stop6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbIgenerate_copy_words_aligned8_lower6M_pC_: stubGenerator_sparc.o; +text: .text%__1cINegFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNStubGeneratorbJgenerate_copy_words_aligned8_higher6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbEgenerate_partial_subtype_check6M_pC_: stubGenerator_sparc.o; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_; +text: .text%__1cNGCTaskManagerKinitialize6M_v_; +text: .text%__1cNGCTaskManager2t6MI_v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__L_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cNStubGeneratorbNgenerate_flush_callers_register_windows6M_pC_: stubGenerator_sparc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: stubGenerator_sparc.o; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_: stubGenerator_sparc.o; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_sparc.o; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cVMoveL2D_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: loopnode.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpnMRegisterImpl_pCi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cVMoveF2I_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_LLL_v_; +text: .text%__1cLconvP2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_LLLLLLL_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cPorL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_normal6MnITosState__v_; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: library_call.o; +text: .text%__1cOMacroAssemblerCfb6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cOLibraryCallKitWinline_native_hashcode6Mii_i_; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cRsarL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTablebDinvokeinterface_object_method6FpnMRegisterImpl_222_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cQPSGenerationPool2t6MpnIPSOldGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cQPSGenerationPool2t6MpnJPSPermGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cRConstantLongValueQis_constant_long6kM_i_: debugInfo.o; +text: .text%__1cUEdenMutableSpacePool2t6MpnKPSYoungGen_pnMMutableSpace_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cYSurvivorMutableSpacePool2t6MpnKPSYoungGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cTConstantDoubleValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cHCompileRpd_compiler2_init6F_v_; +text: .text%__1cKC2CompilerKinitialize6M_v_; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cOMacroAssemblerElcmp6MpnMRegisterImpl_22_v_; +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_; +text: .text%__1cVRegistersForDebuggingRrestore_registers6FpnOMacroAssembler_pnMRegisterImpl__v_: assembler_sparc.o; +text: .text%__1cVRegistersForDebuggingOsave_registers6FpnOMacroAssembler__v_: assembler_sparc.o; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o; +text: .text%__1cFStateO_sub_Op_CastPP6MpknENode__v_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cKScopeValueSis_constant_double6kM_i_: debugInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interp_masm_sparc.o; +text: .text%__1cQshlI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray1_Type6F_pknITypeFunc__; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cWResolveOopMapConflictsOreport_results6kM_i_: rewriter.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cCosPphysical_memory6F_X_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cOMacroAssemblerPstop_subroutine6M_v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_CORE_i486 2009-08-01 04:16:55.931745040 +0100 @@ -0,0 +1 @@ +# reordering not support for CORE builds --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_CORE_sparc 2009-08-01 04:16:56.344289152 +0100 @@ -0,0 +1 @@ +# reordering not support for CORE builds --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_CORE_sparcv9 2009-08-01 04:16:56.753103275 +0100 @@ -0,0 +1 @@ +# reordering not support for CORE builds --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_TIERED_amd64 2009-08-01 04:16:57.204462707 +0100 @@ -0,0 +1,8115 @@ +data = R0x2000; +text = LOAD ?RXO; + + +text: .text%__1cECopyRpd_disjoint_words6FpnIHeapWord_2L_v_; +text: .text%__1cSPSPromotionManagerWcopy_to_survivor_space6MpnHoopDesc__2_; +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQIndexSetIteratorEnext6M_I_; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cNRelocIteratorEnext6M_i_; +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQObjectStartArrayMobject_start6kMpnIHeapWord__2_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__; +text: .text%__1cIPhaseIFGIadd_edge6MII_i_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_; +text: .text%__1cIMachNodeHis_Mach6M_p0_; +text: .text%__1cENodeHis_Copy6kM_I_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_; +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_; +text: .text%__1cHnmethodKcan_unload6MpnRBoolObjectClosure_pnKOopClosure_ppnHoopDesc_i_i_; +text: .text%__1cIMachNodeNrematerialize6kM_i_; +text: .text%__1cHRegMaskFis_UP6kM_i_; +text: .text%__1cXresource_allocate_bytes6FL_pc_; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__; +text: .text%__1cHRegMaskESize6kM_I_; +text: .text%__1cIIndexSetLalloc_block6M_pn0AIBitBlock__; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cDLRGOcompute_degree6kMr0_i_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cYPSPromotionFailedClosureJdo_object6MpnHoopDesc__v_; +text: .text%__1cENodeEjvms6kM_pnIJVMState__; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_; +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__; +text: .text%__1cENodeHdel_out6Mp0_v_; +text: .text%__1cKRelocationLunpack_data6M_v_; +text: .text%__1cIMachNodeJideal_reg6kM_I_; +text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_; +text: .text%__1cKRelocationSpd_address_in_code6M_ppC_; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cRMachSpillCopyNodeMis_SpillCopy6M_p0_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__; +text: .text%__1cENodeGis_CFG6kM_i_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_; +text: .text%__1cETypeDcmp6Fpk02_i_; +text: .text%__1cQObjectStartArrayWobject_starts_in_range6kMpnIHeapWord_2_i_; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cHRegMaskJis_bound16kM_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__; +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cHRegMaskJis_bound26kM_i_; +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__; +text: .text%__1cNGrowableArray4CI_Hat_grow6MirkI_I_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_; +text: .text%__1cMOopTaskQdDueueOpop_local_slow6MInOTaskQdDueueSuperDAge__i_; +text: .text%__1cRMachSpillCopyNodeHis_Copy6kM_I_; +text: .text%__1cENodeGpinned6kM_i_; +text: .text%__1cOoop_RelocationJoop_value6M_pnHoopDesc__; +text: .text%__1cJrRegPOperEtype6kM_pknEType__; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_LI_v_; +text: .text%__1cMOopTaskQdDueueKpop_global6MrpnHoopDesc__i_; +text: .text%__1cPOopTaskQdDueueSetPsteal_best_of_26MipirpnHoopDesc__i_; +text: .text%__1cJVectorSet2R6MI_rnDSet__; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cSPSPromotionManagerUflush_prefetch_queue6M_v_; +text: .text%__1cIProjNodeHis_Proj6M_p0_; +text: .text%__1cPDictionaryEntrybDprotection_domain_set_oops_do6MpnKOopClosure__v_; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cMloadConPNodePoper_input_base6kM_I_; +text: .text%__1cMloadConPNodeHtwo_adr6kM_I_; +text: .text%__1cMloadConPNodeErule6kM_I_; +text: .text%__1cHPhiNodeGis_Phi6M_p0_; +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__; +text: .text%__1cDff16FI_i_; +text: .text%__1cENodeNrematerialize6kM_i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_; +text: .text%__1cFframeVoopmapreg_to_location6kMipknLRegisterMap__ppnHoopDesc__; +text: .text%__1cIIndexSetKinitialize6MI_v_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__; +text: .text%__1cMMutableSpaceMcas_allocate6ML_pnIHeapWord__; +text: .text%__1cIMachNodeGOpcode6kM_i_; +text: .text%__1cMget_live_bit6Fpii_i_: buildOopMap.o; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__; +text: .text%__1cENodeHadd_req6Mp0_v_; +text: .text%__1cENodeIout_grow6MI_v_; +text: .text%__1cMset_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__; +text: .text%__1cIIndexSetKfree_block6MI_v_; +text: .text%__1cJCProjNodeGis_CFG6kM_i_; +text: .text%__1cETypeFuhash6Fpk0_i_; +text: .text%__1cJrRegIOperEtype6kM_pknEType__; +text: .text%__1cMPhaseChaitinLskip_copies6MpnENode__2_; +text: .text%__1cIMachNodeMcisc_operand6kM_i_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_; +text: .text%__1cICallNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeHis_Call6M_pnICallNode__; +text: .text%__1cNCollectedHeapbDcheck_for_bad_heap_word_value6MpnIHeapWord_L_v_; +text: .text%__1cEDictGInsert6Mpv1i_1_; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cINodeHashLhash_delete6MpknENode__i_; +text: .text%__1cETypeJtype_dict6F_pnEDict__; +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_; +text: .text%__1cOPSPromotionLABFflush6M_v_; +text: .text%__1cIrc_class6Fi_nCRC__: ad_amd64.o; +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_; +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_; +text: .text%__1cNCollectedHeapbAcommon_mem_allocate_noinit6FLipnGThread__pnIHeapWord__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__; +text: .text%__1cFArenaIcontains6kMpkv_i_; +text: .text%__1cJMultiNodeGis_CFG6kM_i_; +text: .text%__1cHPhiNodeGOpcode6kM_i_; +text: .text%__1cMPhaseChaitinKelide_copy6MpnENode_ipnFBlock_rnJNode_List_6i_i_; +text: .text%__1cKjmpDirNodeNis_block_proj6kM_pknENode__; +text: .text%__1cIProjNodeGis_CFG6kM_i_; +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_; +text: .text%__1cETypeIhashcons6M_pk0_; +text: .text%__1cENodeEhash6kM_I_; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_; +text: .text%__1cOlower_pressure6FpnDLRG_IpnFBlock_pI4_v_: ifg.o; +text: .text%__1cPOopTaskQdDueueSetFsteal6MipirpnHoopDesc__i_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cKRegionNodeGis_CFG6kM_i_; +text: .text%__1cHCompileRvalid_bundle_info6MpknENode__i_; +text: .text%__1cIProjNodeGOpcode6kM_i_; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__; +text: .text%__1cPVirtualCallDataKcell_count6M_i_; +text: .text%__1cIProjNodeGpinned6kM_i_; +text: .text%__1cENodeMcisc_operand6kM_i_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMloadConINodePoper_input_base6kM_I_; +text: .text%__1cMloadConINodeHtwo_adr6kM_I_; +text: .text%__1cHNTarjanEEVAL6M_p0_; +text: .text%__1cNMachIdealNodeErule6kM_I_; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cETypeLisa_oop_ptr6kM_i_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_; +text: .text%__1cENode2t6MI_v_; +text: .text%__1cJloadPNodeErule6kM_I_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__; +text: .text%__1cMloadConINodeErule6kM_I_; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__; +text: .text%__1cHTypeIntCeq6kMpknEType__i_; +text: .text%__1cLProfileDataPfollow_contents6M_v_; +text: .text%__1cLProfileDataPadjust_pointers6M_v_; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__; +text: .text%__1cHRegMaskMClearToPairs6M_v_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_IrnJVectorSet__v_; +text: .text%__1cLemit_opcode6FrnKCodeBuffer_i_v_; +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__; +text: .text%__1cFArenaIArealloc6MpvLL_1_; +text: .text%__1cGIfNodeGOpcode6kM_i_; +text: .text%__1cHTypePtrEhash6kM_i_; +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__; +text: .text%__1cIPhaseIFGJre_insert6MI_v_; +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__; +text: .text%__1cMclr_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cETypeEmeet6kMpk0_2_; +text: .text%__1cMPhaseChaitinQis_high_pressure6MpnFBlock_pnDLRG_I_i_; +text: .text%__1cKBranchDataKcell_count6M_i_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_; +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_; +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_; +text: .text%__1cDfh16FI_i_; +text: .text%__1cJraw_score6Fdd_d_: chaitin.o; +text: .text%__1cDLRGFscore6kM_d_; +text: .text%__1cKTypeOopPtrEhash6kM_i_; +text: .text%__1cIAddPNodeGOpcode6kM_i_; +text: .text%__1cKIfTrueNodeGOpcode6kM_i_; +text: .text%__1cMPhaseChaitinMchoose_color6MrnDLRG_i_i_; +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_; +text: .text%__1cGcmpkey6Fpkv1_i_; +text: .text%__1cETypeJsingleton6kM_i_; +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cSPSPromotionManagerMdrain_stacks6M_v_; +text: .text%__1cHConNodeGOpcode6kM_i_; +text: .text%__1cITypeLongCeq6kMpknEType__i_; +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_L_; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cMPhaseChaitinKbias_color6MrnDLRG_i_i_; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cLOptoRuntimeXdeoptimize_caller_frame6FpnKJavaThread_i_v_; +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cJCodeCacheFalive6FpnICodeBlob__2_; +text: .text%__1cHRegMaskQis_aligned_Pairs6kM_i_; +text: .text%__1cSis_single_register6FI_i_: postaloc.o; +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_; +text: .text%__1cECopyYconjoint_words_to_higher6FpnIHeapWord_2L_v_; +text: .text%__1cILRG_ListGextend6MII_v_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cJPhaseLiveGgetset6MpnFBlock__pnIIndexSet__; +text: .text%__1cIConINodeGOpcode6kM_i_; +text: .text%__1cENodeRis_cisc_alternate6kM_i_; +text: .text%__1cLIfFalseNodeGOpcode6kM_i_; +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_; +text: .text%__1cLCounterDataKcell_count6M_i_; +text: .text%__1cWThreadLocalAllocBufferFreset6M_v_; +text: .text%__1cMMachProjNodeGOpcode6kM_i_; +text: .text%__1cENodeEgrow6MI_v_; +text: .text%__1cIMachNodePcompute_padding6kMi_i_; +text: .text%__1cKup_one_dom6FpnENode__1_: ifnode.o; +text: .text%__1cIMachNodeSalignment_required6kM_i_; +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_L_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapVunsafe_max_tlab_alloc6kM_L_; +text: .text%__1cHNTarjanICOMPRESS6M_v_; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cJMultiNodeIis_Multi6M_p0_; +text: .text%__1cIBoolNodeGOpcode6kM_i_; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6ML_pnIHeapWord__; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6MpnIHeapWord_22_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2L_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cKTypeOopPtrCeq6kMpknEType__i_; +text: .text%__1cHTypePtrCeq6kMpknEType__i_; +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_; +text: .text%__1cIIndexSetFclear6M_v_; +text: .text%__1cHTypeIntJsingleton6kM_i_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_pnIIndexSet_rnJVectorSet__v_; +text: .text%__1cECopyXconjoint_words_to_lower6FpnIHeapWord_2L_v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cNRelocIteratorKinitialize6MlpnICodeBlob_pC3_v_; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cITypeNodeJideal_reg6kM_I_; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cHTypeIntEhash6kM_i_; +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cIMachNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__; +text: .text%__1cFState2T6M_v_; +text: .text%__1cPVirtualCallDataPfollow_contents6M_v_; +text: .text%__1cPVirtualCallDataPadjust_pointers6M_v_; +text: .text%__1cENodeGis_Con6kM_I_; +text: .text%__1cJrRegIOperJnum_edges6kM_I_; +text: .text%__1cENodeIget_long6kM_x_; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_; +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__; +text: .text%__1cHemit_rm6FrnKCodeBuffer_iii_v_; +text: .text%__1cHPhiNodeGpinned6kM_i_; +text: .text%__1cITypeNodeEhash6kM_I_; +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_; +text: .text%__1cJPhaseLiveKgetfreeset6M_pnIIndexSet__; +text: .text%__1cMMachProjNodeJideal_reg6kM_I_; +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIParmNodeGis_CFG6kM_i_; +text: .text%__1cENodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cKRegionNodeGOpcode6kM_i_; +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeFnew_C6FpnMklassOopDesc_pnKJavaThread__v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__; +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_; +text: .text%__1cLTypeInstPtrEhash6kM_i_; +text: .text%__1cLuse_dom_lca6FpnFBlock_pnENode_3rnLBlock_Array__1_: gcm.o; +text: .text%__1cITypeLongEhash6kM_i_; +text: .text%__1cIBoolNodeHis_Bool6M_p0_; +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_; +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__; +text: .text%__1cIJumpDataKcell_count6M_i_; +text: .text%__1cHNTarjanELINK6Mp01_v_; +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_; +text: .text%__1cIMachNode2t6M_v_; +text: .text%__1cFStateRMachOperGenerator6MipnIMachNode_pnHCompile__pnIMachOper__; +text: .text%__1cRPSOldPromotionLABFflush6M_v_; +text: .text%__1cICallNodeHis_Call6M_p0_; +text: .text%__1cENodeFIdeal6MpnIPhaseGVN_i_p0_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cENodeNis_block_proj6kM_pk0_; +text: .text%__1cUParallelScavengeHeapPis_in_permanent6kMpkv_i_; +text: .text%__1cHdom_lca6FpnFBlock_1_1_: gcm.o; +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_; +text: .text%__1cJVectorSet2F6kMI_i_; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cICallNodeLbottom_type6kM_pknEType__; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cIMachNodeLbottom_type6kM_pknEType__; +text: .text%__1cPClassFileParserOcheck_property6MipkcipnGThread__v_; +text: .text%__1cFState2t6M_v_; +text: .text%__1cFStateDDFA6MipknENode__i_; +text: .text%__1cJPhaseLiveHfreeset6MpknFBlock__v_; +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_; +text: .text%__1cKRegionNodeGpinned6kM_i_; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cRSignatureIteratorGexpect6Mc_v_; +text: .text%__1cIProjNodeEhash6kM_I_; +text: .text%__1cENodeFclone6kM_p0_; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_; +text: .text%__1cHCompileMFillLocArray6MpnENode_pnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cHRegMaskPfind_first_pair6kM_i_; +text: .text%__1cENodeKmatch_edge6kMI_I_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cNPhaseRegAllocKreg2offset6kMi_i_; +text: .text%__1cNPhaseRegAllocUreg2offset_unchecked6kMi_i_; +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cETypeFxmeet6kMpk0_2_; +text: .text%__1cENodeFis_If6M_pnGIfNode__; +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__; +text: .text%__1cICmpPNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockXcompute_gen_kill_single6MpnQciBytecodeStream__v_; +text: .text%__1cKjmpDirNodeMideal_Opcode6kM_i_; +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cILoadNodeHis_Load6M_p0_; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_; +text: .text%__1cRMachSafePointNodeEjvms6kM_pnIJVMState__; +text: .text%__1cNinstanceKlassMclass_loader6kM_pnHoopDesc__; +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_; +text: .text%__1cRMachSpillCopyNodeOimplementation6kMpnKCodeBuffer_pnNPhaseRegAlloc_i_I_; +text: .text%__1cKTypeAryPtrEhash6kM_i_; +text: .text%__1cIIndexSet2t6Mp0_v_; +text: .text%__1cNrFlagsRegOperEtype6kM_pknEType__; +text: .text%__1cHTypeInt2t6Miii_v_; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_; +text: .text%__1cIGraphKitHstopped6M_i_; +text: .text%__1cKTypeOopPtrJsingleton6kM_i_; +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_; +text: .text%__1cNSafePointNodeGpinned6kM_i_; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cIConLNodeGOpcode6kM_i_; +text: .text%__1cHPhiNodeEhash6kM_I_; +text: .text%__1cGIfNodeGpinned6kM_i_; +text: .text%__1cOis_diamond_phi6FpnENode__i_: cfgnode.o; +text: .text%__1cLTypeInstPtrCeq6kMpknEType__i_; +text: .text%__1cENodeHsize_of6kM_I_; +text: .text%__1cENodeSremove_dead_region6MpnIPhaseGVN_i_i_; +text: .text%__1cHRegMaskMSmearToPairs6M_v_; +text: .text%__1cSCallStaticJavaNodeEhash6kM_I_; +text: .text%jni_GetObjectField: jni.o; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cKNode_ArrayEgrow6MI_v_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cICmpINodeGOpcode6kM_i_; +text: .text%__1cHMatcherKLabel_Root6MpknENode_pnFState_p16_6_; +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cUGenericGrowableArrayMraw_allocate6Mi_pv_; +text: .text%__1cJMarkSweepNpreserve_mark6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cIMachNodeKconst_size6kM_i_; +text: .text%__1cGIfNodeFis_If6M_p0_; +text: .text%__1cINodeHashLhash_insert6MpnENode__v_; +text: .text%__1cOPhaseIdealLoopMis_dominator6MpnENode_2_i_; +text: .text%__1cKTypeOopPtrLxadd_offset6kMi_i_; +text: .text%JVM_Read; +text: .text%__1cDhpiEread6FipvI_L_; +text: .text%__1cIMachNodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_p0_; +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_; +text: .text%__1cIHaltNodeGis_CFG6kM_i_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cMMergeMemNodeGOpcode6kM_i_; +text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cHemit_d86FrnKCodeBuffer_i_v_; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cKCodeBuffer2T6M_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIMachNodeJemit_size6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLTypeInstPtr2t6MnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_v_; +text: .text%__1cLOptoRuntimeKjbyte_copy6FpW1L_v_; +text: .text%__1cJloadINodeErule6kM_I_; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_; +text: .text%__1cIAddINodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_; +text: .text%__1cOmatch_into_reg6FpnENode_iii1_i_: matcher.o; +text: .text%__1cENodeJis_Branch6kM_I_; +text: .text%__1cITypeLong2t6Mxxi_v_; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cIJVMStateIof_depth6kMi_p0_; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cJrRegLOperEtype6kM_pknEType__; +text: .text%__1cENode2t6Mp0_v_; +text: .text%__1cLTypeInstPtrEmake6FnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_pk0_; +text: .text%__1cJStartNodeGpinned6kM_i_; +text: .text%__1cHhashptr6Fpkv_i_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cFBlockLis_uncommon6kMrnLBlock_Array__i_; +text: .text%__1cEDict2F6kMpkv_pv_; +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cNCatchProjNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKjmpConNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadPNodeGOpcode6kM_i_; +text: .text%__1cLSymbolTableGlookup6MipkciI_pnNsymbolOopDesc__; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOindOffset8OperJnum_edges6kM_I_; +text: .text%__1cKBufferBlobIis_alive6kM_i_; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cJTypeTupleJsingleton6kM_i_; +text: .text%__1cKTypeAryPtrCeq6kMpknEType__i_; +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cETypeKhas_memory6kM_i_; +text: .text%__1cHMatcherKReduceOper6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachNodeFreloc6kM_i_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cICodeBlobOis_osr_adapter6kM_i_; +text: .text%__1cHConNodeGis_Con6kM_I_; +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_; +text: .text%__1cILoadNodeEhash6kM_I_; +text: .text%__1cRMachSpillCopyNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cICodeBlobTfix_oop_relocations6MpC1_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cIimmIOperIconstant6kM_l_; +text: .text%__1cHCmpNodeGis_Cmp6kM_pk0_; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cENodeHdel_req6MI_v_; +text: .text%__1cJCProjNodeEhash6kM_I_; +text: .text%__1cHCompileJcan_alias6MpknHTypePtr_i_i_; +text: .text%__1cJMultiNodeEhash6kM_I_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cHnmethodIis_alive6kM_i_; +text: .text%__1cIHaltNodeGOpcode6kM_i_; +text: .text%__1cMMergeMemNodeLis_MergeMem6M_p0_; +text: .text%__1cUPSMarkSweepDecoratorQinsert_deadspace6MrlpnIHeapWord_L_i_; +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_; +text: .text%__1cETypeEhash6kM_i_; +text: .text%__1cJHashtableLhash_symbol6Fpkci_I_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pnIciObject_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cYDebugInformationRecorderLcheck_phase6Mn0AFPhase__v_; +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cICodeBlobKis_nmethod6kM_i_; +text: .text%__1cICmpUNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMi_pnGOopMap__; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpC_pnGOopMap__; +text: .text%__1cIConPNodeGOpcode6kM_i_; +text: .text%__1cIMachNodeHtwo_adr6kM_I_; +text: .text%__1cIParmNodeGOpcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodeErule6kM_I_; +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_; +text: .text%__1cLAdapterInfoFequal6kMp0_i_; +text: .text%__1cGOopMapbEmap_compiler_reg_to_oopmap_reg6Miii_i_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_; +text: .text%__1cGTarjanEEVAL6M_p0_; +text: .text%__1cKRelocationYindex_to_runtime_address6Fl_pC_; +text: .text%__1cYexternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cJCatchNodeGOpcode6kM_i_; +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cITypeLongJsingleton6kM_i_; +text: .text%__1cNencode_RegMem6FrnKCodeBuffer_iiiiii_v_; +text: .text%__1cFBlockGselect6MrnJNode_List_rnLBlock_Array_pirnJVectorSet_IrnNGrowableArray4CI___pnENode__; +text: .text%__1cGOopMapHset_xxx6MinLOopMapValueJoop_types_iii_v_; +text: .text%__1cFMutexNowned_by_self6kM_i_; +text: .text%__1cTCreateExceptionNodeErule6kM_I_; +text: .text%__1cIJVMStateLdebug_start6kM_I_; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__; +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_; +text: .text%__1cIMachNodeNoperand_index6kMI_i_; +text: .text%__1cKMachIfNodeJis_MachIf6kM_pk0_; +text: .text%__1cFBlockIis_Empty6kM_i_; +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__; +text: .text%__1cFBlockOcode_alignment6M_I_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_; +text: .text%__1cGBitMapUclear_range_of_words6MLL_v_; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_; +text: .text%__1cJrRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKCastPPNodeGOpcode6kM_i_; +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__; +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cMMachCallNodeGpinned6kM_i_; +text: .text%__1cHnmethodJis_zombie6kM_i_; +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvI2LNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntEmake6Fi_pk0_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__; +text: .text%__1cFKlassMoop_is_array6kM_i_; +text: .text%__1cIBoolNodeEhash6kM_I_; +text: .text%__1cIimmPOperEtype6kM_pknEType__; +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__; +text: .text%__1cJrRegPOperJnum_edges6kM_I_; +text: .text%__1cOrFlagsRegUOperEtype6kM_pknEType__; +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cNsymbolOopDescLas_C_string6kMpci_1_; +text: .text%__1cGOopMapJset_value6Miii_v_; +text: .text%__1cITypeLongFxmeet6kMpknEType__3_; +text: .text%__1cNCollectedHeapbHcheck_for_non_bad_heap_word_value6MpnIHeapWord_L_v_; +text: .text%__1cHMatcherTReduceInst_Interior6MpnFState_ipnIMachNode_IrpnENode__I_; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_; +text: .text%__1cKjmpConNodeGpinned6kM_i_; +text: .text%__1cNSafePointNodebBneeds_polling_address_input6F_i_; +text: .text%__1cHCompileRprobe_alias_cache6MpknHTypePtr__pn0APAliasCacheEntry__; +text: .text%__1cHnmethodOis_not_entrant6kM_i_; +text: .text%__1cIAddPNodeHis_AddP6M_p0_; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cIMachNodeGExpand6MpnFState_rnJNode_List__p0_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cENodeIdestruct6M_v_; +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cGBitMapJset_union6M0_v_; +text: .text%__1cPciInstanceKlassMis_interface6M_i_; +text: .text%__1cHTypeIntEmake6Fiii_pk0_; +text: .text%__1cJTypeTupleEhash6kM_i_; +text: .text%__1cFParsePdo_one_bytecode6M_v_; +text: .text%__1cFParseNdo_exceptions6M_v_; +text: .text%__1cKRegionNodeEhash6kM_I_; +text: .text%__1cMMutableSpaceIallocate6ML_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6ML_pnIHeapWord__; +text: .text%__1cJPSPermGenSallocate_permanent6ML_pnIHeapWord__; +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeOis_block_start6kM_i_; +text: .text%__1cHMatcherQis_save_on_entry6Mi_i_; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_; +text: .text%__1cITypeLongEmake6Fxxi_pk0_; +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_; +text: .text%__1cGBitMap2t6MpLL_v_; +text: .text%__1cLOptoRuntimePnew_typeArray_C6FnJBasicType_ipnKJavaThread__v_; +text: .text%__1cHCompilePfind_alias_type6MpknHTypePtr_i_pn0AJAliasType__; +text: .text%__1cNnew_loc_value6FpnNPhaseRegAlloc_inILocationEType__pnNLocationValue__: output.o; +text: .text%__1cKjmpDirNodePoper_input_base6kM_I_; +text: .text%__1cMMachCallNodeLis_MachCall6M_p0_; +text: .text%__1cHPhiNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cOindOffset8OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOindOffset8OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cZPhaseConservativeCoalesceJcopy_copy6MpnENode_2pnFBlock_I_i_; +text: .text%__1cKutf8_write6FpCH_0_: utf8.o; +text: .text%__1cKNode_ArrayGremove6MI_v_; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cOindOffset8OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLjmpConUNodeMideal_Opcode6kM_i_; +text: .text%__1cFBlockJfind_node6kMpknENode__I_; +text: .text%__1cOis_range_check6FpnENode_r12ri_i_: ifnode.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__; +text: .text%__1cHhashkey6Fpkv_i_; +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_; +text: .text%__1cJLoadINodeGOpcode6kM_i_; +text: .text%__1cKis_x2logic6FpnIPhaseGVN_pnENode__3_: cfgnode.o; +text: .text%__1cHAbsNodeLis_absolute6FpnIPhaseGVN_pnENode__4_; +text: .text%__1cGTarjanICOMPRESS6M_v_; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cENodeHis_Goto6kM_I_; +text: .text%__1cLPCTableNodeGpinned6kM_i_; +text: .text%JVM_ReleaseUTF; +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFChunk2t6ML_v_; +text: .text%__1cFChunk2n6FLL_pv_; +text: .text%__1cOindOffset8OperFscale6kM_i_; +text: .text%__1cMCreateExNodeGOpcode6kM_i_; +text: .text%__1cFframebCsender_for_interpreter_frame6kMpnLRegisterMap__0_; +text: .text%__1cFChunk2k6Fpv_v_; +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__; +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_; +text: .text%__1cKjmpConNodePoper_input_base6kM_I_; +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_; +text: .text%__1cMciMethodDataHdata_at6Mi_pnLProfileData__; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cIProjNodeHsize_of6kM_I_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__; +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cHBitDataKcell_count6M_i_; +text: .text%__1cFframeZsender_for_compiled_frame6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cFArenaEgrow6ML_pv_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_; +text: .text%__1cLBoxLockNodeNrematerialize6kM_i_; +text: .text%__1cRMachSafePointNodeQis_MachSafePoint6M_p0_; +text: .text%__1cJloadBNodeErule6kM_I_; +text: .text%__1cITypeNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cHConNodeEhash6kM_I_; +text: .text%__1cICodeBlobLlink_offset6M_i_; +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_; +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cICodeBlobOis_i2c_adapter6kM_i_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cIJVMStateNclone_shallow6kM_p0_; +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGOopMapQset_callee_saved6Miiii_v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cIJVMStateJdebug_end6kM_I_; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cENode2t6Mp011_v_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__; +text: .text%__1cHMatcherTcollect_null_checks6MpnENode__v_; +text: .text%__1cNSafePointNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMemNodeMIdeal_common6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGBitMapGat_put6MLi_v_; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cNSafePointNodeHsize_of6kM_I_; +text: .text%__1cMgetTimeNanos6F_x_; +text: .text%__1cKciTypeFlowLStateVectorSapply_one_bytecode6MpnQciBytecodeStream__i_; +text: .text%__1cKPSScavengeUoop_promotion_failed6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cHTypePtrLmeet_offset6kMi_i_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_; +text: .text%__1cNPhaseRegAllocGis_oop6kMpknENode__i_; +text: .text%__1cSPSPromotionManagerUoop_promotion_failed6MpnHoopDesc_pnLmarkOopDesc__2_; +text: .text%__1cIMachOperLdisp_is_oop6kM_i_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cLLShiftLNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockIload_one6Mi_v_; +text: .text%__1cJTypeTupleCeq6kMpknEType__i_; +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__; +text: .text%__1cFDictI2i6M_v_; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%__1cKC2IAdapterIis_alive6kM_i_; +text: .text%__1cENodeHget_int6kM_i_; +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cIAddPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__; +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_; +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeXis_iteratively_computed6M_i_; +text: .text%__1cNloadConI0NodePoper_input_base6kM_I_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_; +text: .text%__1cENodeKreplace_by6Mp0_v_; +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cIBoolTestKcc2logical6kMpknEType__3_; +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJrelocInfoKset_format6Mi_v_; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cKRelocationJpack_data6M_i_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_; +text: .text%__1cNtestP_regNodeMideal_Opcode6kM_i_; +text: .text%__1cETypeFempty6kM_i_; +text: .text%__1cHMemNodeGis_Mem6M_p0_; +text: .text%__1cZload_can_see_stored_value6FpnILoadNode_pnENode_pnOPhaseTransform__3_: memnode.o; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cICodeBlobJis_zombie6kM_i_; +text: .text%__1cJTypeTupleGfields6FI_ppknEType__; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%method_compare: methodOop.o; +text: .text%__1cMMergeMemNodeEhash6kM_I_; +text: .text%__1cJloadPNodePoper_input_base6kM_I_; +text: .text%__1cILoadNodeKmatch_edge6kMI_I_; +text: .text%__1cFBlockUneeded_for_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cIIndexSetSpopulate_free_list6F_v_; +text: .text%__1cGIfNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIemit_d326FrnKCodeBuffer_i_v_; +text: .text%__1cNloadConI0NodeHtwo_adr6kM_I_; +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_; +text: .text%__1cITypeNodeDcmp6kMrknENode__I_; +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_; +text: .text%__1cRSignatureIteratorTcheck_signature_end6M_v_; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cHnmethodKis_nmethod6kM_i_; +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cSCallStaticJavaNodeRis_CallStaticJava6kM_pk0_; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_; +text: .text%__1cOno_flip_branch6FpnFBlock__i_: block.o; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cKjmpDirNodeGpinned6kM_i_; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cKjmpDirNodeHtwo_adr6kM_I_; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cJrRegLOperJnum_edges6kM_I_; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cNGrowableArray4CpnKScopeValue__2t6Mii_v_; +text: .text%__1cYDebugInformationRecorderWserialize_scope_values6MpnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cITypeNodeHsize_of6kM_I_; +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cNloadConI0NodeErule6kM_I_; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cQciBytecodeStreamMreset_to_bci6Mi_v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cHPhiNodeHsize_of6kM_I_; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_; +text: .text%__1cIMachNodeRget_base_and_disp6kMrlrpknHTypePtr__pknENode__; +text: .text%__1cLOopMapCacheIentry_at6kMi_pnQOopMapCacheEntry__; +text: .text%__1cOGenerateOopMapKcheck_type6MnNCellTypeState_1_v_; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cILoadNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMPhaseIterGVNbGregister_new_node_with_optimizer6MpnENode__2_; +text: .text%__1cKBufferBlobMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cLRegisterMapFclear6M_v_; +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__; +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLBlock_Array2t6MpnFArena__v_; +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%__1cNrFlagsRegOperJnum_edges6kM_I_; +text: .text%__1cLPhaseValuesGintcon6Mi_pnIConINode__; +text: .text%__1cKStoreINodeGOpcode6kM_i_; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cJcmpOpOperJnum_edges6kM_I_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cJloadPNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cOcompU_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cPloadConUL32NodePoper_input_base6kM_I_; +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_; +text: .text%__1cKI2CAdapterIis_alive6kM_i_; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__; +text: .text%__1cFframebFinterpreter_frame_monitor_begin6kM_pnPBasicObjectLock__; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cIGraphKitEstop6M_v_; +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o; +text: .text%__1cPloadConUL32NodeHtwo_adr6kM_I_; +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cKStorePNodeGOpcode6kM_i_; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__; +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_; +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFBlockLfind_remove6MpknENode__v_; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_; +text: .text%__1cGOopMapHset_oop6Miii_v_; +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_; +text: .text%__1cPloadConUL32NodeErule6kM_I_; +text: .text%__1cFChunkEchop6M_v_; +text: .text%__1cMciMethodDataJnext_data6MpnLProfileData__2_; +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cLBlock_StackXmost_frequent_successor6MpnFBlock__I_; +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_; +text: .text%__1cFBlockOschedule_local6MrnHMatcher_rnLBlock_Array_pirnJVectorSet_rnNGrowableArray4CI___i_; +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cOGenerateOopMapFppop16MnNCellTypeState__v_; +text: .text%__1cKstorePNodePoper_input_base6kM_I_; +text: .text%__1cMPhaseIterGVNFwiden6kMpknEType_3_3_; +text: .text%__1cKRegionNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cLis_cond_add6FpnIPhaseGVN_pnHPhiNode__pnENode__; +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o; +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGTarjanELINK6Mp01_v_; +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_; +text: .text%__1cIBoolNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o; +text: .text%__1cTremove_useless_bool6FpnGIfNode_pnIPhaseGVN__pnENode__: ifnode.o; +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_; +text: .text%__1cRInterpreterOopMapKinitialize6M_v_; +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_; +text: .text%__1cYCallStaticJavaDirectNodeHtwo_adr6kM_I_; +text: .text%__1cKDictionaryJget_entry6MiInMsymbolHandle_nGHandle__pnPDictionaryEntry__; +text: .text%__1cLLShiftINodeGOpcode6kM_i_; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_; +text: .text%__1cHnmethodMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cIMachOperOindex_position6kM_i_; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_irknQRelocationHolder_i_v_; +text: .text%__1cLjmpConUNodePoper_input_base6kM_I_; +text: .text%__1cHAddNodeEhash6kM_I_; +text: .text%__1cNtestP_regNodePoper_input_base6kM_I_; +text: .text%__1cHSubNodeGis_Sub6M_p0_; +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_; +text: .text%__1cIRootNodeGOpcode6kM_i_; +text: .text%__1cFframebDinterpreter_frame_monitor_end6kM_pnPBasicObjectLock__; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cSInterpreterRuntimeLcache_entry6FpnKJavaThread__pnWConstantPoolCacheEntry__; +text: .text%__1cOindOffset8OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cENode2t6Mp0111_v_; +text: .text%__1cITypeLongEmake6Fx_pk0_; +text: .text%__1cLjmpConUNodeGpinned6kM_i_; +text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__; +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKTypeRawPtrJsingleton6kM_i_; +text: .text%__1cNGCTaskManagerNresource_flag6MI_i_; +text: .text%__1cNGCTaskManagerYshould_release_resources6MI_i_; +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__; +text: .text%__1cNloadRangeNodeErule6kM_I_; +text: .text%__1cNrFlagsRegOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIIndexSetJlrg_union6MIIIpknIPhaseIFG_rknHRegMask__I_; +text: .text%__1cISubINodeGOpcode6kM_i_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cKstoreINodePoper_input_base6kM_I_; +text: .text%__1cNtestP_regNodeHtwo_adr6kM_I_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_; +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__; +text: .text%__1cJloadSNodeErule6kM_I_; +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__; +text: .text%__1cMBasicAdapterHoops_do6MpnKOopClosure__v_; +text: .text%__1cKjmpConNodeJnum_opnds6kM_I_; +text: .text%__1cLOptoRuntimeOnew_objArray_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cKjmpDirNodeHsize_of6kM_I_; +text: .text%__1cJloadPNodeJnum_opnds6kM_I_; +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cHOopFlowNcompute_reach6MpnNPhaseRegAlloc_ipnEDict__v_; +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGGCTaskKinitialize6M_v_; +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__; +text: .text%__1cNGCTaskManagerWdecrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__; +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_; +text: .text%__1cNGCTaskManagerWincrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueHdequeue6M_pnGGCTask__; +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_; +text: .text%__1cHMatcherXadjust_outgoing_stk_arg6Miiri_i_; +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_; +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cMBasicAdapterMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cXindIndexScaleOffsetOperJnum_edges6kM_I_; +text: .text%__1cLRShiftINodeGOpcode6kM_i_; +text: .text%__1cJStoreNodeIis_Store6kM_pk0_; +text: .text%Unsafe_CompareAndSwapLong; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cNtestI_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cJStoreNodeEhash6kM_I_; +text: .text%__1cOcompI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cKmethodOperGmethod6kM_l_; +text: .text%__1cHTypeAryRary_must_be_exact6kM_i_; +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_; +text: .text%__1cGGCTask2t6M_v_; +text: .text%__1cOcompU_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_; +text: .text%__1cOindOffset8OperNbase_position6kM_i_; +text: .text%__1cOindOffset8OperNconstant_disp6kM_i_; +text: .text%__1cKjmpDirNodeHis_Goto6kM_I_; +text: .text%__1cENodeHset_req6MIp0_v_; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_; +text: .text%__1cMMergeMemNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cXAdaptiveWeightedAverageYcompute_adaptive_average6Mff_f_; +text: .text%__1cOcompU_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvI2L_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_nMvmIntrinsicsCID__; +text: .text%__1cKstorePNodeJnum_opnds6kM_I_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cMMergeMemNodeQclone_all_memory6FpnENode__p0_; +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__; +text: .text%__1cLMachNopNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJchar2type6Fc_nJBasicType__; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cICallNodeLis_CallLeaf6kM_pknMCallLeafNode__; +text: .text%__1cLrecord_bias6FpknIPhaseIFG_ii_v_: coalesce.o; +text: .text%__1cJrRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_; +text: .text%__1cRMachSafePointNodeRis_safepoint_node6kM_i_; +text: .text%__1cHTypeIntFempty6kM_i_; +text: .text%__1cRInvocationCounterJset_state6Mn0AFState__v_; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cKTypeOopPtrWmake_from_klass_common6FpnHciKlass_ii_pk0_; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_; +text: .text%__1cLPhaseValuesFwiden6kMpknEType_3_3_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cNtestI_regNodeHtwo_adr6kM_I_; +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYCallStaticJavaDirectNodeSalignment_required6kM_i_; +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOPhaseIdealLoopIsplit_up6MpnENode_22_i_; +text: .text%__1cWConstantPoolCacheEntryRset_initial_state6Mi_v_; +text: .text%__1cKNativeCallGverify6M_v_; +text: .text%__1cNCompileBrokerLmaybe_block6F_v_; +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_; +text: .text%__1cJloadINodeMideal_Opcode6kM_i_; +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_; +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_; +text: .text%__1cITypeLongEmake6Fxx_pk0_; +text: .text%__1cKRegionNodeOis_block_start6kM_i_; +text: .text%__1cCosGmalloc6FL_pv_; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_; +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_; +text: .text%__1cRMachNullCheckNodeQis_MachNullCheck6M_p0_; +text: .text%__1cOGenerateOopMapGppush16MnNCellTypeState__v_; +text: .text%__1cGBitMapOset_difference6M0_v_; +text: .text%__1cMvalue_of_loc6FppnHoopDesc__l_; +text: .text%__1cRmethodDataOopDescTbytecode_cell_count6FnJBytecodesECode__i_; +text: .text%__1cRmethodDataOopDescRcompute_data_size6FpnOBytecodeStream__i_; +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_; +text: .text%__1cOcompI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cNGrowableArray4CpnKciTypeFlowFBlock__2t6MpnFArena_iirk2_v_; +text: .text%__1cLPhaseValuesHlongcon6Mx_pnIConLNode__; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cOcompI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_; +text: .text%__1cMPhaseChaitinJsplit_USE6MpnENode_pnFBlock_2IIiinNGrowableArray4CI__i_I_; +text: .text%__1cNCatchProjNodeMis_CatchProj6kM_pk0_; +text: .text%__1cOcompU_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNSafePointNodeGOpcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cMTypeKlassPtrEhash6kM_i_; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYlookup_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cIciMethodbCinterpreter_invocation_count6M_i_; +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_; +text: .text%JVM_CurrentThread; +text: .text%__1cPClassFileParserbLparse_constant_pool_nameandtype_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKstoreINodeJnum_opnds6kM_I_; +text: .text%__1cHTypeAryEhash6kM_i_; +text: .text%JVM_GetClassModifiers; +text: .text%JVM_GetClassAccessFlags; +text: .text%__1cRAbstractAssemblerGa_byte6Mi_v_; +text: .text%__1cJAssemblerGprefix6Mn0AGPrefix__v_; +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_; +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cENodeDcmp6kMrk0_I_; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMrax_RegPOperEtype6kM_pknEType__; +text: .text%__1cKTypeRawPtrEhash6kM_i_; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_; +text: .text%__1cXindIndexScaleOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cENode2t6Mp01_v_; +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJStartNodeIis_Start6M_p0_; +text: .text%__1cMURShiftINodeGOpcode6kM_i_; +text: .text%__1cNGrowableArray4CpnMMonitorValue__2t6Mii_v_; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderYserialize_monitor_values6MpnNGrowableArray4CpnMMonitorValue____i_; +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cOMethodLivenessKBasicBlockWcompute_gen_kill_range6MpnQciBytecodeStream__v_; +text: .text%__1cJAssemblerJemit_data6MirknQRelocationHolder_i_v_; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cOcompU_rRegNodeErule6kM_I_; +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cETypeOget_const_type6FpnGciType__pk0_; +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cLCounterDataOis_CounterData6M_i_; +text: .text%__1cRCompilationPolicyNcanBeCompiled6FnMmethodHandle__i_; +text: .text%__1cOcompU_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_; +text: .text%__1cOrFlagsRegUOperJnum_edges6kM_I_; +text: .text%__1cKCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cMtlsLoadPNodeErule6kM_I_; +text: .text%__1cIGraphKitGmemory6MI_pnENode__; +text: .text%__1cNRelocIteratorJset_limit6MpC_v_; +text: .text%__1cPcheckCastPPNodeErule6kM_I_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQciBytecodeStreamPget_field_index6M_i_; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cNchunk_oops_do6FpnKOopClosure_pnFChunk_pc_L_: handles.o; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cMUniverseOperFclone6kM_pnIMachOper__; +text: .text%__1cJlabelOperFclone6kM_pnIMachOper__; +text: .text%__1cRaddI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cLPhaseValuesHmakecon6MpknEType__pnHConNode__; +text: .text%__1cENodeHins_req6MIp0_v_; +text: .text%__1cIGraphKitLclean_stack6Mi_v_; +text: .text%__1cKRegionNodeHhas_phi6kM_pnHPhiNode__; +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_; +text: .text%__1cOMachReturnNodeNis_MachReturn6M_p0_; +text: .text%__1cNaddI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cQPreserveJVMState2t6MpnIGraphKit_i_v_; +text: .text%__1cNtestP_regNodeMcisc_operand6kM_i_; +text: .text%__1cKjmpConNodeHtwo_adr6kM_I_; +text: .text%__1cPClassFileParserbJparse_constant_pool_methodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_; +text: .text%__1cMPhaseChaitinVmay_be_copy_of_callee6kMpnENode__i_; +text: .text%__1cNtestP_regNodeErule6kM_I_; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_; +text: .text%__1cOcompU_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_; +text: .text%__1cKCodeBufferOadd_stub_reloc6MpCrknQRelocationHolder_i_v_; +text: .text%__1cKCodeBufferOalloc_relocate6M_pnORelocateBuffer__; +text: .text%__1cNtestI_regNodeErule6kM_I_; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cFParseKensure_phi6Mii_pnHPhiNode__; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cOGenerateOopMapSget_basic_block_at6kMi_pnKBasicBlock__; +text: .text%__1cHAddress2t6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_; +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cOcompI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cPciObjectFactoryNinit_ident_of6MpnIciObject__v_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cITypeFuncEhash6kM_i_; +text: .text%__1cOcompI_rRegNodeErule6kM_I_; +text: .text%__1cIciMethodPliveness_at_bci6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessPget_liveness_at6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessKBasicBlockPget_liveness_at6MpnIciMethod_i_nGBitMap__; +text: .text%__1cITypeLongFempty6kM_i_; +text: .text%__1cMTypeKlassPtrCeq6kMpknEType__i_; +text: .text%__1cJAssemblerJemit_data6MinJrelocInfoJrelocType_i_v_; +text: .text%__1cJLoadSNodeGOpcode6kM_i_; +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitObasic_plus_adr6MpnENode_2l_2_; +text: .text%__1cJVectorSet2L6MI_rnDSet__; +text: .text%__1cJVectorSetEgrow6MI_v_; +text: .text%__1cLCastP2LNodeGOpcode6kM_i_; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cJAssemblerEcall6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadLNodeGOpcode6kM_i_; +text: .text%__1cMLinkResolverNresolve_klass6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cIAndINodeGOpcode6kM_i_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_; +text: .text%__1cUParallelScavengeHeapNtlab_capacity6kM_L_; +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIPhaseCCPFwiden6kMpknEType_3_3_; +text: .text%__1cHTypePtrJsingleton6kM_i_; +text: .text%__1cNArgumentCountDset6MinJBasicType__v_; +text: .text%__1cWShouldNotReachHereNodeGpinned6kM_i_; +text: .text%__1cNsubI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNtestI_regNodePoper_input_base6kM_I_; +text: .text%__1cLBoxLockNodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeHtwo_adr6kM_I_; +text: .text%__1cQMachCallJavaNodePis_MachCallJava6M_p0_; +text: .text%__1cKStoreBNodeGOpcode6kM_i_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_22nHAddressLScaleFactor_ipCrknQRelocationHolder__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cITypeFuncCeq6kMpknEType__i_; +text: .text%__1cMloadConLNodePoper_input_base6kM_I_; +text: .text%__1cMPhaseIterGVNHmakecon6MpknEType__pnHConNode__; +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHoopDescGverify6M_v_; +text: .text%__1cQconstMethodKlassSoop_is_constMethod6kM_i_; +text: .text%__1cRconstantPoolKlassToop_is_constantPool6kM_i_; +text: .text%__1cOcompP_rRegNodePoper_input_base6kM_I_; +text: .text%__1cHTypePtrHget_con6kM_l_; +text: .text%__1cHMatcherWis_short_branch_offset6Mi_i_; +text: .text%__1cMloadConLNodeHtwo_adr6kM_I_; +text: .text%__1cMTypeKlassPtr2t6MnHTypePtrDPTR_pnHciKlass_i_v_; +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_; +text: .text%__1cYCallStaticJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cJVectorSet2t6MpnFArena__v_; +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_; +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_; +text: .text%__1cNloadConP0NodePoper_input_base6kM_I_; +text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_; +text: .text%__1cRNativeInstructionFwrote6Mi_v_; +text: .text%__1cMURShiftLNodeGOpcode6kM_i_; +text: .text%__1cOrFlagsRegUOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIciMethodLscale_count6Mi_i_; +text: .text%__1cLjmpConUNodeJnum_opnds6kM_I_; +text: .text%__1cQciBytecodeStreamQget_method_index6M_i_; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__; +text: .text%__1cGOopMapJheap_size6kM_i_; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cMloadConLNodeErule6kM_I_; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cNSafePointNode2t6MIpnIJVMState__v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__; +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_; +text: .text%__1cWConstantPoolCacheEntryIas_flags6MnITosState_iiiii_i_; +text: .text%__1cNloadConP0NodeHtwo_adr6kM_I_; +text: .text%__1cWThreadLocalAllocBufferVinitialize_statistics6M_v_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cWThreadLocalAllocBufferVaccumulate_statistics6MLi_v_; +text: .text%__1cWThreadLocalAllocBufferImax_size6F_L_; +text: .text%__1cWThreadLocalAllocBufferGresize6M_v_; +text: .text%__1cJloadINodePoper_input_base6kM_I_; +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__; +text: .text%__1cWConstantPoolCacheEntryGverify6kMpnMoutputStream__v_; +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_; +text: .text%__1cFParsePload_state_from6Mpn0AFBlock__v_; +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_; +text: .text%__1cNloadRangeNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIGraphKitQkill_dead_locals6M_v_; +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__; +text: .text%__1cKimmL32OperJconstantL6kM_x_; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_; +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_; +text: .text%__1cRaddP_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cICHeapObj2n6FL_pv_; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cKInlineTreeJcallee_at6kMipnIciMethod__p0_; +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_; +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_; +text: .text%__1cIHaltNodeGpinned6kM_i_; +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cOCallRelocationFvalue6M_pC_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCl_v_; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cOCallRelocationPset_destination6MpCl_v_; +text: .text%__1cHcommute6FpnENode_ii_i_: addnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__; +text: .text%__1cENodeQlatency_from_use6kMrnLBlock_Array_rnNGrowableArray4CI__pk0p0_i_; +text: .text%__1cHAddNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o; +text: .text%__1cNloadConP0NodeErule6kM_I_; +text: .text%__1cIGraphKitMreset_memory6M_pnENode__; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadConI0NodeMideal_Opcode6kM_i_; +text: .text%__1cJloadPNodeHtwo_adr6kM_I_; +text: .text%__1cNSignatureInfoGdo_int6M_v_; +text: .text%__1cKjmpDirNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cPThreadRootsTaskEname6M_pc_; +text: .text%__1cUThreadSafepointStateMroll_forward6Mn0AMsuspend_type__v_; +text: .text%__1cUThreadSafepointStateHrestart6M_v_; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cJrRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIAddLNodeGOpcode6kM_i_; +text: .text%__1cJLoadPNodeJideal_reg6kM_I_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cOkill_dead_code6FpnENode_pnMPhaseIterGVN__i_: node.o; +text: .text%__1cKjmpDirNodeFclone6kM_pnENode__; +text: .text%__1cOcompI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__2t6MpnFArena_iirk2_v_; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNtestP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNincI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2ipnGThread__v_; +text: .text%__1cHOrINodeGOpcode6kM_i_; +text: .text%__1cOcompI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMPhaseChaitinJsplit_DEF6MpnENode_pnFBlock_iIp25nNGrowableArray4CI__i_I_; +text: .text%__1cENodeHis_Type6M_pnITypeNode__; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cRaddP_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_; +text: .text%__1cENode2n6FLi_pv_; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cIAddINodeGadd_id6kM_pknEType__; +text: .text%__1cKStoreCNodeGOpcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cILoadNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cJStoreNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNFingerprinterLfingerprint6M_L_; +text: .text%__1cLConvI2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cIGraphKitRnull_check_common6MpnENode_nJBasicType_i_2_; +text: .text%__1cKBlock_ListGremove6MI_v_; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cKciTypeFlowGJsrSetJcopy_into6Mp1_v_; +text: .text%__1cICmpLNodeGOpcode6kM_i_; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cLcastP2LNodePoper_input_base6kM_I_; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cTciConstantPoolCacheEfind6Mi_i_; +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_L_; +text: .text%__1cNGrowableArray4CpnIciObject__Praw_at_put_grow6Mirk14_v_; +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__; +text: .text%__1cHOopFlowEmake6FpnFArena_i_p0_; +text: .text%__1cJloadLNodeErule6kM_I_; +text: .text%__1cNloadConI0NodeLbottom_type6kM_pknEType__; +text: .text%__1cJimmI0OperIconstant6kM_l_; +text: .text%__1cScompI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__; +text: .text%__1cNaddI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_; +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSignatureInfoHdo_void6M_v_; +text: .text%__1cLAdapterInfoKhash_value6kM_l_; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%__1cHOopFlowFclone6Mp0i_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6ML_v_; +text: .text%__1cILoopNodeHis_Loop6M_p0_; +text: .text%__1cPindOffset32OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cPindOffset32OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMCallLeafNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cSComputeAdapterInfoHcompute6Mplii_v_; +text: .text%__1cLAdapterInfoHcompute6MnMmethodHandle_i_v_; +text: .text%__1cLAdapterInfo2T6M_v_; +text: .text%__1cSComputeAdapterInfoLreturn_type6MnJBasicType__i_; +text: .text%__1cSComputeAdapterInfoMsize_in_bits6FnMmethodHandle__i_; +text: .text%__1cMAdapterCacheGlookup6MpnLAdapterInfo__pnMBasicAdapter__; +text: .text%__1cJloadINodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o; +text: .text%__1cTDerivedPointerTableDadd6FppnHoopDesc_3_v_; +text: .text%__1cFParseFBlockJinit_node6Mp0i_v_; +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_; +text: .text%__1cOcompP_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cKjmpDirNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMLinkResolverbFlinktime_resolve_virtual_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_; +text: .text%__1cKCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cKciTypeFlowNmake_range_at6Mi_pn0AFRange__; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cKoopFactoryPnew_constMethod6FiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cFBlockUhoist_LCA_above_defs6Mp01IrnLBlock_Array__1_; +text: .text%__1cScompI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cFciEnvXget_field_by_index_impl6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cQciBytecodeStreamJget_field6Mri_pnHciField__; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cKTypeOopPtrFempty6kM_i_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_16MnJBytecodesECode__v_; +text: .text%__1cKCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMMergeMemNodeNgrow_to_match6Mpk0_v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_; +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_; +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_; +text: .text%__1cLStringTableGlookup6MipHiI_pnHoopDesc__; +text: .text%__1cLBoxLockNodeHsize_of6kM_I_; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cMMergeMemNode2t6MpnENode__v_; +text: .text%__1cMMergeMemNodeRmake_empty_memory6F_pnENode__; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cNtestP_regNodeJnum_opnds6kM_I_; +text: .text%__1cJStartNodeGis_CFG6kM_i_; +text: .text%__1cRaddI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_; +text: .text%__1cMindIndexOperJnum_edges6kM_I_; +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__; +text: .text%__1cKGCStatInfoMset_gc_usage6MinLMemoryUsage_i_v_; +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__; +text: .text%__1cQPreserveJVMState2T6M_v_; +text: .text%__1cLRuntimeStubIis_alive6kM_i_; +text: .text%__1cMWarmCallInfoHis_cold6kM_i_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%__1cKjmpConNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIemit_d646FrnKCodeBuffer_l_v_; +text: .text%__1cFParseFmerge6Mi_v_; +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_; +text: .text%__1cIAddINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFParseMdo_one_block6M_v_; +text: .text%__1cFParseFBlockMrecord_state6Mp0_v_; +text: .text%__1cNtestP_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cJloadLNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIPhaseIFGFUnion6MII_v_; +text: .text%__1cNloadRangeNodeJnum_opnds6kM_I_; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_26MnJBytecodesECode__v_; +text: .text%__1cOcompP_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cIBoolNodeJideal_reg6kM_I_; +text: .text%__1cHCmpNodeJideal_reg6kM_I_; +text: .text%__1cFStateM_sub_Op_Bool6MpknENode__v_; +text: .text%__1cJCatchNodeIis_Catch6kM_pk0_; +text: .text%__1cJLoadBNodeGOpcode6kM_i_; +text: .text%__1cENodeHlatency6MI_I_; +text: .text%__1cIGraphKit2t6MpnIJVMState__v_; +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cKTypeOopPtrHget_con6kM_l_; +text: .text%__1cMPhaseChaitinKprompt_use6MpnFBlock_I_i_; +text: .text%__1cIJVMStateLdebug_depth6kM_I_; +text: .text%__1cIGraphKitTadd_safepoint_edges6MpnNSafePointNode_i_v_; +text: .text%__1cENodeNadd_req_batch6Mp0I_v_; +text: .text%__1cIJVMStateKclone_deep6kM_p0_; +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_; +text: .text%__1cXindIndexScaleOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cXindIndexScaleOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cGBitMapVset_union_with_result6M0_i_; +text: .text%__1cNSafePointNodeEhash6kM_I_; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cJStartNodeGOpcode6kM_i_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_; +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_; +text: .text%__1cTC2IAdapterGeneratorXlazy_std_verified_entry6FnMmethodHandle__pC_; +text: .text%__1cPindOffset32OperJnum_edges6kM_I_; +text: .text%__1cPFieldAccessInfoDset6MnLKlassHandle_nMsymbolHandle_iinJBasicType_nLAccessFlags__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cNsubI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cTCreateExceptionNodeHtwo_adr6kM_I_; +text: .text%__1cPindOffset32OperFscale6kM_i_; +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cICmpPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cHTypePtrLdual_offset6kM_i_; +text: .text%__1cNMachIdealNodePoper_input_base6kM_I_; +text: .text%__1cSObjectSynchronizerOinflate_helper6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cKciTypeFlowIblock_at6Mipn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cKciTypeFlowFRangeNget_block_for6Mpn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cSvframeStreamCommonbBfill_from_interpreter_frame6M_v_; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%__1cLcastP2LNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadKlassNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cXindIndexScaleOffsetOperFscale6kM_i_; +text: .text%__1cQciBytecodeStreamKget_method6Mri_pnIciMethod__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cNstoreImmBNodePoper_input_base6kM_I_; +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_; +text: .text%__1cMindirectOperJnum_edges6kM_I_; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cTconvI2L_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cRshrL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__; +text: .text%__1cPClassFileParserbFparse_constant_pool_class_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cOMethodLivenessKBasicBlockMmerge_normal6MnGBitMap__i_; +text: .text%__1cTleaPIdxScaleOffNodeHtwo_adr6kM_I_; +text: .text%__1cETypeFwiden6kMpk0_2_; +text: .text%__1cKciTypeFlowLStateVector2t6Mp0_v_; +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cOcompU_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCatchProjNodeHsize_of6kM_I_; +text: .text%__1cNCatchProjNodeEhash6kM_I_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cKciTypeFlowFBlockPis_simpler_than6Mp1_i_; +text: .text%__1cJimmI8OperIconstant6kM_l_; +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__; +text: .text%__1cILoadNodeHsize_of6kM_I_; +text: .text%__1cHMatcherVReduceInst_Chain_Rule6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__; +text: .text%__1cNincI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cLjmpConUNodeHtwo_adr6kM_I_; +text: .text%__1cHMatcherScalling_convention6FpnLOptoRegPair_Ii_v_; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cENodeLnonnull_req6kM_p0_; +text: .text%__1cICmpINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHTypeAryCeq6kMpknEType__i_; +text: .text%__1cQSystemDictionaryKfind_class6FiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cQUnique_Node_ListEpush6MpnENode__v_; +text: .text%__1cILoopNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_; +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__; +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_; +text: .text%__1cQPSGenerationPoolImax_size6kM_L_; +text: .text%__1cQPSGenerationPoolNused_in_bytes6M_L_; +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cOMethodLivenessNwork_list_get6M_pn0AKBasicBlock__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cICallNodeOis_CallRuntime6kM_pknPCallRuntimeNode__; +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_; +text: .text%__1cNstoreImmBNodeMideal_Opcode6kM_i_; +text: .text%__1cKciTypeFlowLStateVectorEmeet6Mpk1_i_; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_; +text: .text%__1cJloadINodeJnum_opnds6kM_I_; +text: .text%__1cNaddI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cRMachSafePointNode2t6M_v_; +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__; +text: .text%__1cOcompP_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMPhaseChaitinKFind_const6kMpknENode__I_; +text: .text%__1cMPhaseChaitinKFind_const6kMI_I_; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cRInterpretedRFrameEinit6M_v_; +text: .text%__1cHemit_cc6FrnKCodeBuffer_ii_v_; +text: .text%__1cNtestI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cXvirtual_call_RelocationJfirst_oop6M_pC_; +text: .text%__1cXvirtual_call_RelocationJoop_limit6M_pC_; +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_; +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmPOperIconstant6kM_l_; +text: .text%__1cIimmPOperPconstant_is_oop6kM_i_; +text: .text%__1cOleaPIdxOffNodeHtwo_adr6kM_I_; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cTleaPIdxScaleOffNodeErule6kM_I_; +text: .text%JVM_IsNaN; +text: .text%__1cXinsert_anti_dependences6FrpnFBlock_pnENode_rnLBlock_Array__i_: gcm.o; +text: .text%__1cLOptoRuntimebCcomplete_monitor_unlocking_C6FpnHoopDesc_pnJBasicLock__v_; +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimebAcomplete_monitor_locking_C6FpnHoopDesc_pnJBasicLock_pnKJavaThread__v_; +text: .text%__1cHCompileKTracePhase2t6MpkcpnMelapsedTimer_i_v_; +text: .text%__1cHCompileKTracePhase2T6M_v_; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cHMatcherPc_frame_pointer6kM_i_; +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_; +text: .text%__1cMMachCallNode2t6M_v_; +text: .text%__1cICallNodeJideal_reg6kM_I_; +text: .text%__1cOleaPIdxOffNodeErule6kM_I_; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cOcompP_rRegNodeErule6kM_I_; +text: .text%__1cMany_RegPOperJnum_edges6kM_I_; +text: .text%__1cIGraphKitbLset_predefined_input_for_runtime_call6MpnNSafePointNode__v_; +text: .text%__1cMany_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cScompI_rReg_immNodeErule6kM_I_; +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_; +text: .text%__1cLRegisterMap2t6Mpk0_v_; +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_; +text: .text%__1cNmethodOopDescWwas_executed_more_than6kMi_i_; +text: .text%__1cKstoreCNodePoper_input_base6kM_I_; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cHi2sNodeErule6kM_I_; +text: .text%__1cIMulLNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopHdom_lca6kMpnENode_2_2_; +text: .text%__1cMPrefetchNodeGOpcode6kM_i_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_; +text: .text%__1cNtestI_regNodeJnum_opnds6kM_I_; +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIMachOperNconstant_disp6kM_i_; +text: .text%__1cIMachOperFscale6kM_i_; +text: .text%__1cFframeNis_java_frame6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNloadKlassNodeErule6kM_I_; +text: .text%__1cIciMethodRhas_compiled_code6M_i_; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MipnGOopMap__v_; +text: .text%__1cNincI_rRegNodeErule6kM_I_; +text: .text%__1cRMachSafePointNodePis_MachCallLeaf6M_pnQMachCallLeafNode__; +text: .text%__1cRMachSafePointNodeLset_oop_map6MpnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MipnGOopMap__v_; +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MipnGOopMap__v_; +text: .text%__1cHOopFlowNbuild_oop_map6MpnENode_ipnNPhaseRegAlloc_pi_pnGOopMap__; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNsubI_rRegNodeMcisc_operand6kM_i_; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%__1cRaddI_rReg_immNodeErule6kM_I_; +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cHRetNodeMideal_Opcode6kM_i_; +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_; +text: .text%__1cNsubI_rRegNodeErule6kM_I_; +text: .text%__1cRaddP_rReg_immNodeErule6kM_I_; +text: .text%__1cPClassFileParserbGparse_constant_pool_string_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cJloadLNodePoper_input_base6kM_I_; +text: .text%__1cRshrL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cITypeLongFxdual6kM_pknEType__; +text: .text%__1cRMachSafePointNodeSis_MachCallRuntime6M_pnTMachCallRuntimeNode__; +text: .text%__1cNaddI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_; +text: .text%__1cOcompP_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cIAndLNodeGOpcode6kM_i_; +text: .text%__1cMindIndexOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cOGenerateOopMapCpp6MpnNCellTypeState_2_v_; +text: .text%__1cMCallJavaNodeLis_CallJava6kM_pk0_; +text: .text%__1cICallNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_; +text: .text%__1cRcmpFastUnlockNodePoper_input_base6kM_I_; +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcompP_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cFStateW_sub_Op_CallStaticJava6MpknENode__v_; +text: .text%__1cWMachCallStaticJavaNodeVis_MachCallStaticJava6M_p0_; +text: .text%__1cRaddP_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cICallInfoDset6MnLKlassHandle_nMmethodHandle_pnGThread__v_; +text: .text%__1cSComputeAdapterInfoJdo_object6Mii_v_; +text: .text%__1cRMachSafePointNodeWis_MachCallInterpreter6M_pnXMachCallInterpreterNode__; +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__; +text: .text%__1cLConvL2INodeGOpcode6kM_i_; +text: .text%__1cOcompI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNaddI_rRegNodeErule6kM_I_; +text: .text%__1cHConNodeEmake6FpknEType__p0_; +text: .text%__1cScompI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cJLoadCNodeGOpcode6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeFreloc6kM_i_; +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cIGraphKitQset_saved_ex_oop6FpnNSafePointNode_pnENode__v_; +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__; +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cGOopMapHcopy_to6MpC_v_; +text: .text%__1cNstoreImmBNodeJnum_opnds6kM_I_; +text: .text%__1cVLoaderConstraintTableWfind_loader_constraint6MnMsymbolHandle_nGHandle__ppnVLoaderConstraintEntry__; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Ml_v_; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cNgetTimeMillis6F_x_; +text: .text%__1cRaddP_rReg_immNodeLbottom_type6kM_pknEType__; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cLPhaseValuesHzerocon6MnJBasicType__pnHConNode__; +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_; +text: .text%__1cTconvI2L_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cIGraphKitNuncommon_trap6MipnHciKlass_pkci_v_; +text: .text%__1cILoadNodeEmake6FpnENode_22pknHTypePtr_pknEType_nJBasicType__p0_; +text: .text%__1cIGraphKitJmake_load6MpnENode_2pknEType_nJBasicType_i_2_; +text: .text%__1cTconvI2L_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cTno_rax_rbx_RegPOperJnum_edges6kM_I_; +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__; +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__; +text: .text%__1cIHaltNode2t6MpnENode_2_v_; +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNaddI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cGPcDesc2t6Mii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cKciTypeFlowLStateVectorJcopy_into6kMp1_v_; +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_; +text: .text%__1cOcompL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cMoutputStreamPupdate_position6MpkcL_v_; +text: .text%__1cMstringStreamFwrite6MpkcL_v_; +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_; +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_; +text: .text%__1cKciTypeFlowFBlockKsuccessors6MpnQciBytecodeStream_pn0ALStateVector_pn0AGJsrSet__pnNGrowableArray4Cp1___; +text: .text%__1cKciTypeFlowOwork_list_next6M_pn0AFBlock__; +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_; +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cSCompareAndSwapNodeGis_CFG6kM_i_; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__Icontains6kMrk2_i_; +text: .text%__1cKciTypeFlowFBlock2t6Mp0pn0AFRange_pn0AGJsrSet__v_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowJJsrRecord__2t6MpnFArena_iirk2_v_; +text: .text%__1cKstoreCNodeJnum_opnds6kM_I_; +text: .text%__1cNmodI_rRegNodeErule6kM_I_; +text: .text%__1cKInlineTreeWfind_subtree_from_root6Fp0pnIJVMState_pnIciMethod_i_1_; +text: .text%__1cNGrowableArray4CpnPciInstanceKlass__2t6MpnFArena_iirk1_v_; +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_; +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_; +text: .text%__1cINodeHashJhash_find6MpknENode__p1_; +text: .text%__1cFParsePdo_field_access6Mii_v_; +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__; +text: .text%__1cOMethodLivenessNmake_block_at6Mipn0AKBasicBlock__2_; +text: .text%__1cKstorePNodeHtwo_adr6kM_I_; +text: .text%__1cKciTypeFlowPflow_successors6MpnNGrowableArray4Cpn0AFBlock___pn0ALStateVector__v_; +text: .text%__1cGciTypeMis_classless6kM_i_; +text: .text%__1cRsalI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cJloadFNodeErule6kM_I_; +text: .text%__1cKBranchDataNis_BranchData6M_i_; +text: .text%__1cIJumpDataLis_JumpData6M_i_; +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_; +text: .text%__1cLklassVtableNput_method_at6MpnNmethodOopDesc_i_v_; +text: .text%__1cHi2sNodeMideal_Opcode6kM_i_; +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_; +text: .text%__1cRshrI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadConI0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadKlassNodeJnum_opnds6kM_I_; +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__; +text: .text%__1cLStringTableGintern6FnGHandle_pHipnGThread__pnHoopDesc__; +text: .text%__1cLStringTableLhash_string6FpHi_i_; +text: .text%__1cMCreateExNodeGpinned6kM_i_; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRScavengeRootsTaskEname6M_pc_; +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNtestP_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKstoreINodeHtwo_adr6kM_I_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntEmake6Fii_pk0_; +text: .text%__1cRcmpFastUnlockNodeHtwo_adr6kM_I_; +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_; +text: .text%__1cPDictionaryEntrybAcontains_protection_domain6kMpnHoopDesc__i_; +text: .text%__1cIregFOperEtype6kM_pknEType__; +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cNsubI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__; +text: .text%__1cNsubI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_; +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cLcastP2LNodeJnum_opnds6kM_I_; +text: .text%__1cOMethodLivenessNwork_list_add6Mpn0AKBasicBlock__v_; +text: .text%__1cFParseFBlockNlocal_type_at6kMi_pknEType__; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cHTypeIntFxdual6kM_pknEType__; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHCompileZintrinsic_insertion_index6MpnIciMethod_i_i_; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_; +text: .text%__1cJVectorSetFClear6M_v_; +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cICodeHeapSallocated_capacity6kM_L_; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cIMachOperEtype6kM_pknEType__; +text: .text%__1cLjmpConUNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cLStatSamplerLsample_data6FpnMPerfDataList__v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_; +text: .text%__1cMPeriodicTaskMtime_to_wait6F_L_; +text: .text%__1cMPeriodicTaskOreal_time_tick6FL_v_; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_; +text: .text%__1cLStatSamplerOcollect_sample6F_v_; +text: .text%__1cJloadBNodePoper_input_base6kM_I_; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__; +text: .text%__1cIGraphKit2t6M_v_; +text: .text%__1cOemit_d64_reloc6FrnKCodeBuffer_lnJrelocInfoJrelocType_i_v_; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cENodeHget_ptr6kM_l_; +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_; +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKI2CAdapterOis_i2c_adapter6kM_i_; +text: .text%__1cOcompU_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQciBytecodeStreamWget_field_holder_index6M_i_; +text: .text%__1cQciBytecodeStreamZget_declared_field_holder6M_pnPciInstanceKlass__; +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cMorI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cFParseRensure_memory_phi6Mii_pnHPhiNode__; +text: .text%__1cNdecI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadINodeJideal_reg6kM_I_; +text: .text%__1cKRelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cPindOffset32OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_; +text: .text%__1cMFastLockNodeGOpcode6kM_i_; +text: .text%__1cScompU_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cHCompilebAallow_range_check_smearing6kM_i_; +text: .text%__1cLBuildCutout2T6M_v_; +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cICodeHeapIcapacity6kM_L_; +text: .text%__1cKMemoryPoolImax_size6kM_L_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_L_; +text: .text%__1cMPhaseChaitinTsplit_Rematerialize6MpnENode_pnFBlock_IrInNGrowableArray4CI__ipIp2i_2_; +text: .text%__1cJcmpOpOperFccode6kM_i_; +text: .text%__1cKjmpDirNodeTmay_be_short_branch6kM_i_; +text: .text%__1cKjmpDirNodeOis_pc_relative6kM_i_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cOcompL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_; +text: .text%__1cFParseKdo_get_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_; +text: .text%__1cHMulNodeEhash6kM_I_; +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_p0_v_; +text: .text%__1cTconvI2L_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cScompU_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNFingerprinterJdo_object6Mii_v_; +text: .text%__1cMloadConFNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cMloadConFNodeHtwo_adr6kM_I_; +text: .text%__1cICallNodeSis_CallDynamicJava6kM_pknTCallDynamicJavaNode__; +text: .text%__1cRcmpFastUnlockNodeJnum_opnds6kM_I_; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cUParallelScavengeHeapMmem_allocate6MLii_pnIHeapWord__; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cKjmpConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_; +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_; +text: .text%__1cMloadConFNodeErule6kM_I_; +text: .text%__1cLcastP2LNodeHtwo_adr6kM_I_; +text: .text%__1cIMachOperIconstant6kM_l_; +text: .text%__1cJloadSNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolImax_size6kM_L_; +text: .text%__1cUEdenMutableSpacePoolNused_in_bytes6M_L_; +text: .text%__1cUEdenMutableSpacePoolImax_size6kM_L_; +text: .text%__1cYSurvivorMutableSpacePoolNused_in_bytes6M_L_; +text: .text%__1cKjmpConNodeTmay_be_short_branch6kM_i_; +text: .text%__1cKjmpConNodeOis_pc_relative6kM_i_; +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferKend_a_stub6M_v_; +text: .text%__1cTemit_java_to_interp6FrnKCodeBuffer__v_; +text: .text%__1cKCodeBufferMstart_a_stub6M_v_; +text: .text%__1cFParseUprofile_taken_branch6Mi_v_; +text: .text%__1cKciTypeFlowGJsrSetNapply_control6Mp0pnQciBytecodeStream_pn0ALStateVector__v_; +text: .text%__1cKReturnNodeGis_CFG6kM_i_; +text: .text%__1cRSignatureIteratorSskip_optional_size6M_v_; +text: .text%__1cRaddI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHTypeIntFwiden6kMpknEType__3_; +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescbHhas_unloaded_classes_in_signature6FnMmethodHandle_pnGThread__i_; +text: .text%__1cIciObjectRis_instance_klass6M_i_; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cPloadConUL32NodeMideal_Opcode6kM_i_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cNloadRangeNodeHtwo_adr6kM_I_; +text: .text%__1cJloadLNodeJnum_opnds6kM_I_; +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__l_; +text: .text%__1cHoopDescSslow_identity_hash6M_l_; +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__; +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadCNodeErule6kM_I_; +text: .text%__1cKOSRAdapterIis_alive6kM_i_; +text: .text%__1cQjava_lang_StringMbasic_create6FpnQtypeArrayOopDesc_ipnGThread__nGHandle__; +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cXindIndexScaleOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOcompL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cKciTypeFlowLStateVectorJdo_invoke6MpnQciBytecodeStream_i_v_; +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_; +text: .text%__1cKRegionNodeJideal_reg6kM_I_; +text: .text%__1cJloadINodeHtwo_adr6kM_I_; +text: .text%__1cQmark_inner_loops6FpnIPhaseCFG_pnFBlock__v_: block.o; +text: .text%__1cIHaltNodeJideal_reg6kM_I_; +text: .text%__1cFStateM_sub_Op_Halt6MpknENode__v_; +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReturnNodeGOpcode6kM_i_; +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_; +text: .text%__1cKStoreLNodeGOpcode6kM_i_; +text: .text%__1cPCountedLoopNodeOis_CountedLoop6M_p0_; +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_; +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_; +text: .text%__1cNloadConI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2L_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_; +text: .text%__1cMWarmCallInfoGis_hot6kM_i_; +text: .text%__1cNprefetchwNodeMideal_Opcode6kM_i_; +text: .text%__1cIAddINodeJideal_reg6kM_I_; +text: .text%__1cNCatchProjNode2t6MpnENode_Ii_v_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__; +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmulL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cKciTypeFlowIcan_trap6MrnQciBytecodeStream__i_; +text: .text%__1cQVMOperationQdDueueLqueue_empty6Mi_i_; +text: .text%__1cIProjNodeDcmp6kMrknENode__I_; +text: .text%__1cSComputeAdapterInfoGdo_int6M_v_; +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cIGraphKitTtoo_many_recompiles6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cHCompileFstart6kM_pnJStartNode__; +text: .text%__1cNmulL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cLPCTableNodeEhash6kM_I_; +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_; +text: .text%__1cJStartNodeOis_block_start6kM_i_; +text: .text%__1cQComputeCallStackHdo_void6M_v_; +text: .text%__1cNaddI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOtoo_many_traps6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cNciCallProfileRapply_prof_factor6Mf_v_; +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__; +text: .text%__1cHCompileOcall_generator6MpnIciMethod_ipnIJVMState_if_pnNCallGenerator__; +text: .text%__1cHCompileOfind_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cMindIndexOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMindIndexOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cPClassFileParserbIparse_constant_pool_fieldref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNdecI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cNGrowableArray4Cl_2t6Mii_v_; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cLPCTableNodeKis_PCTable6kM_pk0_; +text: .text%__1cLPCTableNodeHsize_of6kM_I_; +text: .text%__1cNincI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cQciBytecodeStreamXget_method_holder_index6M_i_; +text: .text%__1cMorI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cLimmUL32OperJconstantL6kM_x_; +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_; +text: .text%__1cFParseHdo_call6M_v_; +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_; +text: .text%__1cFParseZcan_not_compile_call_site6MpnIciMethod_pnPciInstanceKlass__i_; +text: .text%__1cQciBytecodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cFParseMprofile_call6MpnENode__v_; +text: .text%__1cScompP_mem_rRegNodePoper_input_base6kM_I_; +text: .text%__1cICodeHeapLheader_size6F_L_; +text: .text%__1cJloadBNodeJnum_opnds6kM_I_; +text: .text%__1cENodeLbottom_type6kM_pknEType__; +text: .text%__1cXindIndexScaleOffsetOperNconstant_disp6kM_i_; +text: .text%__1cSindIndexOffsetOperJnum_edges6kM_I_; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cOGenerateOopMapNrestore_state6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_; +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_; +text: .text%__1cIBoolNodeHsize_of6kM_I_; +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cJloadSNodePoper_input_base6kM_I_; +text: .text%__1cPno_rax_RegPOperJnum_edges6kM_I_; +text: .text%__1cOcompI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%jni_SetIntField: jni.o; +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__; +text: .text%__1cMMutableSpaceFclear6M_v_; +text: .text%__1cNtestI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cNprefetchwNodePoper_input_base6kM_I_; +text: .text%__1cTCreateExceptionNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVExceptionHandlerTableJadd_entry6MnRHandlerTableEntry__v_; +text: .text%__1cPsalI_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cRaddP_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_; +text: .text%__1cIGraphKitNcast_not_null6MpnENode__2_; +text: .text%__1cTconvL2I_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cKPSYoungGenNused_in_bytes6kM_L_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_; +text: .text%__1cPcheckCastPPNodeJnum_opnds6kM_I_; +text: .text%__1cLLShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGIfNodeHsize_of6kM_I_; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cOcompL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cScompI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMMergeMemNodeJideal_reg6kM_I_; +text: .text%__1cNandL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cIciMethodWwas_executed_more_than6Mi_i_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cSReferenceProcessorOprocess_phase36MppnHoopDesc_ipnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorOprocess_phase26MppnHoopDesc_pnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cTleaPIdxScaleOffNodeMideal_Opcode6kM_i_; +text: .text%__1cTleaPIdxScaleOffNodePoper_input_base6kM_I_; +text: .text%__1cFLabelJadd_patch6Mi_v_; +text: .text%__1cKMemBarNodeEhash6kM_I_; +text: .text%__1cOcompP_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadConL0NodePoper_input_base6kM_I_; +text: .text%__1cNsubI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJMarkSweepSMarkAndPushClosureLdo_nmethods6kM_ki_; +text: .text%__1cIXorINodeGOpcode6kM_i_; +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cPindOffset32OperNbase_position6kM_i_; +text: .text%__1cPindOffset32OperNconstant_disp6kM_i_; +text: .text%__1cOcompU_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeHtwo_adr6kM_I_; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cFframeNis_glue_frame6kM_i_; +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_; +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_; +text: .text%__1cIIndexSetEswap6Mp0_v_; +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_; +text: .text%__1cRshrL_rReg_immNodeErule6kM_I_; +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_; +text: .text%__1cTpass_initial_checks6FpnIciMethod_i1_i_; +text: .text%__1cKInlineTreeMshouldInline6kMpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cOCompilerOracleNshould_inline6FnMmethodHandle__i_; +text: .text%__1cKInlineTreeNtry_to_inline6MpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cIciMethodbAinterpreter_throwout_count6kM_i_; +text: .text%__1cIciMethodNshould_inline6M_i_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cScompU_rReg_immNodeErule6kM_I_; +text: .text%__1cKjmpDirNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpDirNodeJis_Branch6kM_I_; +text: .text%__1cKjmpDirNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cILoadNodeDcmp6kMrknENode__I_; +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__; +text: .text%__1cFLabelSpatch_instructions6MpnRAbstractAssembler__v_; +text: .text%__1cRAbstractAssemblerHbind_to6MrnFLabel_i_v_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cNloadConL0NodeErule6kM_I_; +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__; +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cJrRegIOperFclone6kM_pnIMachOper__; +text: .text%__1cMindIndexOperFscale6kM_i_; +text: .text%__1cScompP_mem_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cRandI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cMMachProjNodeHsize_of6kM_I_; +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cScompP_mem_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeErule6kM_I_; +text: .text%__1cPindOffset32OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFStateM_sub_Op_CmpP6MpknENode__v_; +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_; +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cIciSymbolJmake_impl6Fpkc_p0_; +text: .text%__1cScompU_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cKimmL32OperIconstant6kM_l_; +text: .text%__1cHi2sNodePoper_input_base6kM_I_; +text: .text%__1cKimmL32OperJnum_edges6kM_I_; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cLBoxLockNodeKstack_slot6FpnENode__i_; +text: .text%__1cLBoxLockNodeKis_BoxLock6kM_pk0_; +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_; +text: .text%__1cKDataLayoutPneeds_array_len6FC_i_; +text: .text%__1cKDataLayoutKinitialize6MCHi_v_; +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframebHnext_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_; +text: .text%__1cFParseRbranch_prediction6Mrf_f_; +text: .text%__1cPshrI_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cOcompL_rRegNodeErule6kM_I_; +text: .text%__1cNGrowableArray4Cpv_Praw_at_put_grow6Mirk03_v_; +text: .text%__1cNGrowableArray4Cl_Praw_at_put_grow6Mirkl2_v_; +text: .text%__1cISubINodeLbottom_type6kM_pknEType__; +text: .text%__1cIGraphKitZset_all_rewritable_memory6MpnENode__v_; +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_; +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__; +text: .text%__1cJAssemblerEmovq6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cRsalI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_; +text: .text%__1cGRFrameMset_distance6Mi_v_; +text: .text%__1cICodeBlobOis_c2i_adapter6kM_i_; +text: .text%__1cFframeTis_first_java_frame6kM_i_; +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_; +text: .text%__1cGRFrameGcaller6M_p0_; +text: .text%__1cTStackWalkCompPolicyIsenderOf6MpnGRFrame_pnNGrowableArray4C2___2_; +text: .text%__1cGRFrameKnew_RFrame6FnFframe_pnKJavaThread_p0_4_; +text: .text%__1cKstoreLNodePoper_input_base6kM_I_; +text: .text%__1cTconstantPoolOopDescMklass_at_put6MipnMklassOopDesc__v_; +text: .text%__1cNFingerprinterGdo_int6M_v_; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cRaddI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cRshrL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_; +text: .text%__1cScompI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cLRethrowNodeGis_CFG6kM_i_; +text: .text%__1cIciObjectFklass6M_pnHciKlass__; +text: .text%__1cNloadConP0NodeMideal_Opcode6kM_i_; +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_; +text: .text%__1cNGCTaskManagerRset_resource_flag6MIi_v_; +text: .text%__1cRshrI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cUmembar_cpu_orderNodeMideal_Opcode6kM_i_; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cKEntryPointFentry6kMnITosState__pC_; +text: .text%__1cJloadCNodeMideal_Opcode6kM_i_; +text: .text%__1cKJavaThreadJframes_do6MpFpnFframe_pknLRegisterMap__v_v_; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cFStateM_sub_Op_RegL6MpknENode__v_; +text: .text%__1cNdecI_rRegNodeErule6kM_I_; +text: .text%__1cKjmpConNodeJis_Branch6kM_I_; +text: .text%__1cKjmpConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_; +text: .text%__1cKjmpConNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fl_v_; +text: .text%__1cNCallGeneratorJis_inline6kM_i_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cCosRcurrent_thread_id6F_l_; +text: .text%__1cKciTypeFlowLStateVectorMdo_getstatic6MpnQciBytecodeStream__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_; +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadKlassNodeHtwo_adr6kM_I_; +text: .text%__1cFParseYprofile_not_taken_branch6M_v_; +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_; +text: .text%__1cOcompL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cLRuntimeStubMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cOcompL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_; +text: .text%__1cNCallGeneratorKis_virtual6kM_i_; +text: .text%__1cKInlineTreePshouldNotInline6kMpnIciMethod_pnMWarmCallInfo__pkc_; +text: .text%__1cLcastP2LNodeErule6kM_I_; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cNPhaseRegAllocKoffset2reg6kMi_i_; +text: .text%__1cQjmpCon_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cQjmpCon_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_pnGRFrame__v_; +text: .text%__1cTconvI2L_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeRlatency_from_uses6kMrnLBlock_Array_rnNGrowableArray4CI___i_; +text: .text%__1cNGrowableArray4CI_Praw_at_put_grow6MirkI2_v_; +text: .text%__1cFParseFdo_if6MpnENode_2nIBoolTestEmask_2_v_; +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cXindIndexScaleOffsetOperOindex_position6kM_i_; +text: .text%__1cXindIndexScaleOffsetOperNbase_position6kM_i_; +text: .text%__1cPsalI_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_; +text: .text%__1cISubINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSconstMethodOopDescbEchecked_exceptions_length_addr6kM_pH_; +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_; +text: .text%__1cRsubI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cJloadCNodePoper_input_base6kM_I_; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cIAddPNodeJideal_reg6kM_I_; +text: .text%__1cTleaPIdxScaleOffNodeJnum_opnds6kM_I_; +text: .text%__1cRaddI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_; +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_; +text: .text%__1cKTypeRawPtrHget_con6kM_l_; +text: .text%__1cOClearArrayNodeGOpcode6kM_i_; +text: .text%__1cOoop_RelocationHoops_do6MpFppnHoopDesc__v_v_; +text: .text%__1cIciMethodbHhas_unloaded_classes_in_signature6M_i_; +text: .text%__1cScompP_mem_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_; +text: .text%__1cNincI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJimmP0OperEtype6kM_pknEType__; +text: .text%__1cNloadConP0NodeLbottom_type6kM_pknEType__; +text: .text%__1cPloadConUL32NodeLbottom_type6kM_pknEType__; +text: .text%__1cNloadConI0NodeHsize_of6kM_I_; +text: .text%__1cRaddI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cPshrI_rReg_1NodePoper_input_base6kM_I_; +text: .text%JVM_handle_solaris_signal; +text: .text%signalHandler; +text: .text%__1cQJNI_FastGetFieldQfind_slowcase_pc6FpC_1_; +text: .text%__1cMLinkResolverbElinktime_resolve_static_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cRresolve_and_patch6FppnHoopDesc__v_; +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_; +text: .text%__1cISubINodeDsub6kMpknEType_3_3_; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRInterpretedRFrameOis_interpreted6kM_i_; +text: .text%__1cGRFrameLis_compiled6kM_i_; +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_; +text: .text%__1cTStackWalkCompPolicyMshouldInline6FnMmethodHandle_fi_pkc_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cIGraphKitPstore_to_memory6MpnENode_22nJBasicType_i_2_; +text: .text%__1cJStoreNodeEmake6FpnENode_22pknHTypePtr_2nJBasicType__p0_; +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_; +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_; +text: .text%__1cKciTypeFlowGJsrSetSis_compatible_with6Mp1_i_; +text: .text%__1cENodeIadd_prec6Mp0_v_; +text: .text%__1cKOSRAdapterOis_osr_adapter6kM_i_; +text: .text%__1cIMulINodeGOpcode6kM_i_; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cNGrowableArray4CpnGciType__2t6MpnFArena_iirk1_v_; +text: .text%__1cKTypeAryPtrFempty6kM_i_; +text: .text%__1cHTypeAryFempty6kM_i_; +text: .text%__1cJloadCNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cRandI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cMelapsedTimerHseconds6kM_d_; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cJAssemblerDnop6M_v_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cKstoreLNodeJnum_opnds6kM_I_; +text: .text%__1cIjniIdMapHoops_do6MpnKOopClosure__v_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cJArrayDataKcell_count6M_i_; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cKType_ArrayEgrow6MI_v_; +text: .text%JVM_Write; +text: .text%__1cDhpiFwrite6FipkvI_L_; +text: .text%__1cMStartC2INodeGOpcode6kM_i_; +text: .text%__1cSindIndexOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__; +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cIParmNodeJideal_reg6kM_I_; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_; +text: .text%__1cRshrI_rReg_immNodeErule6kM_I_; +text: .text%__1cJcmpOpOperGnegate6M_v_; +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_; +text: .text%__1cFParseRoptimize_inlining6MpnIciMethod_ipnPciInstanceKlass_24irnKInlineTreeLInlineStyle_r2_v_; +text: .text%__1cQimprove_receiver6FpnPciInstanceKlass_pknLTypeInstPtr_ri_1_; +text: .text%__1cPcmpFastLockNodeHtwo_adr6kM_I_; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_; +text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_; +text: .text%__1cFParseSmerge_memory_edges6MpnMMergeMemNode_ii_v_; +text: .text%__1cJAssemblerEmovq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__v_; +text: .text%__1cUThreadSafepointStatebDhandle_polling_page_exception6M_v_; +text: .text%__1cLjmpConUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cKcmpOpUOperFccode6kM_i_; +text: .text%__1cLjmpConUNodeTmay_be_short_branch6kM_i_; +text: .text%__1cLjmpConUNodeOis_pc_relative6kM_i_; +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_; +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_; +text: .text%__1cJloadPNodeFreloc6kM_i_; +text: .text%__1cTno_rax_rbx_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNprefetchwNodeJnum_opnds6kM_I_; +text: .text%__1cKjmpConNodeGnegate6M_v_; +text: .text%__1cMindirectOperFscale6kM_i_; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cRsubI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cQComputeCallStackGdo_int6M_v_; +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNtestP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFParseMvisit_blocks6M_v_; +text: .text%__1cQjmpDir_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cQjmpDir_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKciTypeFlowLStateVectorLdo_getfield6MpnQciBytecodeStream__v_; +text: .text%__1cNaddI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cScompP_mem_rRegNodeErule6kM_I_; +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__; +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKMemBarNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOjmpLoopEndNodeMideal_Opcode6kM_i_; +text: .text%__1cFBlockTimplicit_null_check6MrnLBlock_Array_rnNGrowableArray4CI__pnENode_6_v_; +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%__1cJloadBNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cQVMOperationQdDueueSqueue_remove_front6Mi_pnMVM_Operation__; +text: .text%__1cOcompI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cRaddP_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIciMethodLis_accessor6kM_i_; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__; +text: .text%__1cLBoxLockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_; +text: .text%__1cQciBytecodeStreamSget_constant_index6kM_i_; +text: .text%__1cOGenerateOopMapOset_bbmark_bit6Mi_v_; +text: .text%__1cFParseOreturn_current6MpnENode__v_; +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_; +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cMorI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMloadConPNodeFreloc6kM_i_; +text: .text%__1cGThreadMis_VM_thread6kM_i_; +text: .text%__1cSPSPromotionManagerFreset6M_v_; +text: .text%__1cNPrefetchQdDueueFclear6M_v_; +text: .text%__1cSPSPromotionManagerKflush_labs6M_v_; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cNincI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cTJvmtiEventCollectorYunset_jvmti_thread_state6M_v_; +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cJTypeTupleFxdual6kM_pknEType__; +text: .text%__1cOcompP_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cHi2sNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_2_v_; +text: .text%__1cLcastP2LNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNCompileBrokerXcompilation_is_in_queue6FnMmethodHandle_i_i_; +text: .text%__1cRsubI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cETypeCeq6kMpk0_i_; +text: .text%__1cHMatcherPstack_alignment6F_I_; +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadSNodeJnum_opnds6kM_I_; +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRshrL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2L_reg_memNodeErule6kM_I_; +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_n0AJIcoResult__; +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%__1cLRethrowNodeGOpcode6kM_i_; +text: .text%__1cPcmpFastLockNodeJnum_opnds6kM_I_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__; +text: .text%__1cFParseIdo_exits6M_v_; +text: .text%__1cFParseLbuild_exits6M_v_; +text: .text%__1cFParseLinit_blocks6M_v_; +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cFParseNdo_all_blocks6M_v_; +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cOParseGeneratorJcan_parse6FpnIciMethod_i_i_; +text: .text%__1cFArenaEused6kM_L_; +text: .text%__1cRandI_rReg_immNodeErule6kM_I_; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cPno_rax_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_; +text: .text%__1cPClassFileParserUskip_over_field_name6MpciI_1_; +text: .text%__1cFParsePdo_method_entry6M_v_; +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_; +text: .text%__1cKciTypeFlowFBlockPclone_loop_head6Mp0ip1pn0AGJsrSet__3_; +text: .text%__1cLOpaque1NodeGOpcode6kM_i_; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cRruntime_type_from6FpnJJavaValue__nJBasicType__: javaCalls.o; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cPJavaCallWrapper2t6MnMmethodHandle_nGHandle_pnJJavaValue_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cRJavaCallArgumentsKparameters6M_pl_; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cLCastP2LNodeLbottom_type6kM_pknEType__; +text: .text%__1cPJavaCallWrapper2T6M_v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cMrax_RegPOperJnum_edges6kM_I_; +text: .text%__1cMrax_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cITypeFuncFxdual6kM_pknEType__; +text: .text%__1cIimmLOperJconstantL6kM_x_; +text: .text%__1cIMulLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNmethodOopDescWload_signature_classes6FnMmethodHandle_pnGThread__i_; +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cMLinkResolverXresolve_klass_no_update6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNaddL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cISubINodeGadd_id6kM_pknEType__; +text: .text%__1cNsubI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cMMutableSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cKInlineTree2t6MpnHCompile_pk0pnIciMethod_pnIJVMState_if_v_; +text: .text%__1cJEventMark2t6MpkcE_v_; +text: .text%__1cJloadCNodeJnum_opnds6kM_I_; +text: .text%__1cNaddI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQComputeCallStackHdo_long6M_v_; +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cRaddI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cQVMOperationQdDueueNqueue_oops_do6MipnKOopClosure__v_; +text: .text%__1cMCreateExNodeJideal_reg6kM_I_; +text: .text%__1cMorI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMorI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoHdo_long6M_v_; +text: .text%__1cLPCTableNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRinterpretedVFrameDbci6kM_i_; +text: .text%__1cKInlineTreeYcompute_callee_frequency6kMi_f_; +text: .text%__1cKInlineTreebCbuild_inline_tree_for_callee6MpnIciMethod_pnIJVMState_i_p0_; +text: .text%__1cRinterpretedVFrameDbcp6kM_pC_; +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__; +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_; +text: .text%__1cLRShiftLNodeGOpcode6kM_i_; +text: .text%__1cPsarI_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cNsubI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOMethodLivenessKBasicBlockIload_two6Mi_v_; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cNmulL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cNrFlagsRegOperFclone6kM_pnIMachOper__; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_; +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_; +text: .text%__1cIGraphKitRmake_slow_call_ex6MpnENode_pnPciInstanceKlass__v_; +text: .text%__1cTcompareAndSwapLNodePoper_input_base6kM_I_; +text: .text%__1cMloadConINodeHsize_of6kM_I_; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_i_v_; +text: .text%__1cRMachSafePointNodeLis_MachCall6M_pnMMachCallNode__; +text: .text%__1cNstoreImmINodeMideal_Opcode6kM_i_; +text: .text%__1cJScopeDescGis_top6kM_i_; +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__; +text: .text%__1cPstoreImmI16NodeMideal_Opcode6kM_i_; +text: .text%__1cMindIndexOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cFStateQ_sub_Op_CreateEx6MpknENode__v_; +text: .text%__1cRshrL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cLjmpConUNodeJis_Branch6kM_I_; +text: .text%__1cLjmpConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLjmpConUNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cRaddI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_; +text: .text%__1cMMachCallNodeHis_Call6M_pnICallNode__; +text: .text%__1cNdecI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKStoreCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLklassVtableKis_miranda6FpnNmethodOopDesc_pnPobjArrayOopDesc_pnMklassOopDesc__i_; +text: .text%__1cTconvL2I_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cRalign_code_offset6Fi_I_; +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__; +text: .text%__1cMorI_rRegNodeErule6kM_I_; +text: .text%__1cMLinkResolverVresolve_invokespecial6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cTconvL2I_reg_regNodeErule6kM_I_; +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_; +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__; +text: .text%__1cRaddL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cLRShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFTypeFEhash6kM_i_; +text: .text%__1cIGraphKitMarray_length6MpnENode__2_; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_; +text: .text%__1cPsalI_rReg_1NodeErule6kM_I_; +text: .text%__1cIJVMState2t6Mi_v_; +text: .text%__1cNstoreImmBNodeHtwo_adr6kM_I_; +text: .text%__1cLLShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cScompU_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cNGrowableArray4Cl_Icontains6kMrkl_i_; +text: .text%__1cScompU_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%__1cOcompP_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKRegionNodeOhas_unique_phi6kM_pnHPhiNode__; +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__; +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__; +text: .text%__1cVExceptionHandlerTableMadd_subtable6MipnNGrowableArray4Cl__22_v_; +text: .text%__1cNandL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNObjectMonitorHis_busy6kM_l_; +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_; +text: .text%__1cJAssemblerElock6M_v_; +text: .text%__1cJAssemblerIcmpxchgq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_; +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cPshrI_rReg_1NodeErule6kM_I_; +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_; +text: .text%__1cIPSOldGenPupdate_counters6M_v_; +text: .text%__1cNSingletonBlobIis_alive6kM_i_; +text: .text%__1cKTypeRawPtrCeq6kMpknEType__i_; +text: .text%__1cIregDOperEtype6kM_pknEType__; +text: .text%__1cQleaPIdxScaleNodeHtwo_adr6kM_I_; +text: .text%__1cTStackWalkCompPolicyPshouldNotInline6FnMmethodHandle__pkc_; +text: .text%__1cMPrefetchNodeLbottom_type6kM_pknEType__; +text: .text%__1cPcmpFastLockNodeErule6kM_I_; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_; +text: .text%__1cMCallLeafNodeLis_CallLeaf6kM_pk0_; +text: .text%__1cQleaPIdxScaleNodeMideal_Opcode6kM_i_; +text: .text%__1cJcmpOpOperFequal6kM_i_; +text: .text%__1cScompI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_IsSameObject: jni.o; +text: .text%__1cNmulL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNmulL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_; +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMrcx_RegIOperJnum_edges6kM_I_; +text: .text%__1cFKlassNoop_is_method6kM_i_; +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_; +text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_; +text: .text%__1cRaddP_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cJloadLNodeHtwo_adr6kM_I_; +text: .text%__1cHMulNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMrep_stosNodePoper_input_base6kM_I_; +text: .text%__1cRsalI_rReg_immNodeErule6kM_I_; +text: .text%__1cJFieldTypeSskip_optional_size6FpnNsymbolOopDesc_pi_v_; +text: .text%__1cMloadConPNodeHsize_of6kM_I_; +text: .text%__1cSCallLeafDirectNodeHtwo_adr6kM_I_; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__; +text: .text%__1cQsolaris_mprotect6FpcLi_i_: os_solaris.o; +text: .text%__1cRaddI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cHnmethodLis_unloaded6kM_i_; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cIGraphKitXset_edges_for_java_call6MpnMCallJavaNode_i_v_; +text: .text%__1cTconvI2L_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cHi2sNodeJnum_opnds6kM_I_; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cHMatcherXadjust_incoming_stk_arg6Mi_i_; +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_; +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cRsubI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__; +text: .text%__1cTconvI2L_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_CmpU6MpknENode__v_; +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_; +text: .text%__1cKcopy_table6FppC1i_v_: interpreter.o; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_; +text: .text%__1cMVM_OperationVevaluate_at_safepoint6kM_i_; +text: .text%__1cMVM_OperationVevaluate_concurrently6kM_i_; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_; +text: .text%__1cXmembar_release_lockNodePoper_input_base6kM_I_; +text: .text%__1cRaddL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cScompP_mem_rRegNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cNincI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cPstoreImmI16NodePoper_input_base6kM_I_; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cQLibraryIntrinsicKis_virtual6kM_i_; +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__; +text: .text%__1cMciMethodData2t6M_v_; +text: .text%__1cPsarI_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cNstoreImmBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFTypeDEhash6kM_i_; +text: .text%__1cMPrefetchNodeKmatch_edge6kMI_I_; +text: .text%__1cHCompileQcan_generate_C2I6MpnIciMethod_i_i_; +text: .text%__1cPloadConUL32NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_; +text: .text%__1cLOpaque1NodeEhash6kM_I_; +text: .text%__1cXmembar_release_lockNodeHtwo_adr6kM_I_; +text: .text%JVM_GetMethodIxModifiers; +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNandL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cNandL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_; +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_; +text: .text%__1cIHaltNodeEhash6kM_I_; +text: .text%__1cNstoreImmINodePoper_input_base6kM_I_; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_; +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cOcompL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQVMOperationQdDueueLremove_next6M_pnMVM_Operation__; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_; +text: .text%__1cRsubI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFStateP_sub_Op_LShiftL6MpknENode__v_; +text: .text%__1cLjmpConUNodeGnegate6M_v_; +text: .text%__1cKcmpOpUOperGnegate6M_v_; +text: .text%__1cMrax_RegLOperJnum_edges6kM_I_; +text: .text%__1cLGCTaskQdDueueKinitialize6M_v_; +text: .text%__1cJStealTask2t6Mi_v_; +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_; +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerMnote_release6MI_v_; +text: .text%__1cJStealTaskEname6M_pc_; +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cQciBytecodeStreamMget_constant6M_nKciConstant__; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cJcmpOpOperFclone6kM_pnIMachOper__; +text: .text%__1cMrep_stosNodeMideal_Opcode6kM_i_; +text: .text%__1cEhash6Fpkc1_I_; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cKJavaThreadLgc_epilogue6M_v_; +text: .text%__1cKJavaThreadLgc_prologue6M_v_; +text: .text%__1cTsize_java_to_interp6F_I_; +text: .text%__1cUreloc_java_to_interp6F_I_; +text: .text%__1cQinit_input_masks6FIrnHRegMask_1_p0_: matcher.o; +text: .text%__1cKOSRAdapterHoops_do6MpnKOopClosure__v_; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cRitableMethodEntryKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cTcompareAndSwapLNodeMideal_Opcode6kM_i_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_; +text: .text%__1cNCallGeneratorCtf6kM_pknITypeFunc__; +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__; +text: .text%__1cKStoreBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNaddL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cTcompareAndSwapLNodeJnum_opnds6kM_I_; +text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_; +text: .text%__1cQleaPIdxScaleNodePoper_input_base6kM_I_; +text: .text%__1cNGrowableArray4CpnNmethodOopDesc__2t6Mii_v_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_; +text: .text%__1cNloadConP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cNsubL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cbAjni_check_async_exceptions6FpnKJavaThread__v_: jni.o; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%__1cRsalI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindIndexOperNbase_position6kM_i_; +text: .text%__1cMindIndexOperOindex_position6kM_i_; +text: .text%__1cMindIndexOperNconstant_disp6kM_i_; +text: .text%__1cJLoadSNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__2t6Mii_v_; +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_; +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%JVM_IsInterface; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_; +text: .text%__1cRshrL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjmpCon_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjmpCon_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpConNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cQjmpCon_shortNodeJis_Branch6kM_I_; +text: .text%__1cKJavaThreadNpd_last_frame6M_nFframe__; +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_; +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__; +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_; +text: .text%__1cNGrowableArray4CpnGRFrame__2t6Mii_v_; +text: .text%__1cKjavaVFrameNis_java_frame6kM_i_; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cQciBytecodeStreamPget_klass_index6M_i_; +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRMachNullCheckNode2t6MpnENode_2I_v_; +text: .text%__1cRsarI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_; +text: .text%__1cRMachSafePointNodePis_MachCallJava6M_pnQMachCallJavaNode__; +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cMStartI2CNodeGOpcode6kM_i_; +text: .text%__1cKOSRAdapterMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cNdecI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIMinINodeGOpcode6kM_i_; +text: .text%__1cNinstanceKlassbCfind_local_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cNinstanceKlassWfind_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cPciInstanceKlassTget_field_by_offset6Mii_pnHciField__; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cKmethodOperJnum_edges6kM_I_; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cISubLNodeGOpcode6kM_i_; +text: .text%__1cFStateO_sub_Op_StoreP6MpknENode__v_; +text: .text%__1cRshrI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsarL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNstoreImmBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstorePNodeFreloc6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYCallStaticJavaDirectNodeJnum_opnds6kM_I_; +text: .text%__1cQleaPIdxScaleNodeErule6kM_I_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__; +text: .text%__1cIGraphKitSclear_saved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cNloadConI0NodeFclone6kM_pnENode__; +text: .text%__1cJimmI0OperFclone6kM_pnIMachOper__; +text: .text%__1cLCastP2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cXcopy_u2_with_conversion6FpH0i_v_: classFileParser.o; +text: .text%__1cENodeGis_Sub6M_pnHSubNode__; +text: .text%__1cJAssemblerFtestq6MpnMRegisterImpl_2_v_; +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_; +text: .text%__1cZresource_reallocate_bytes6FpcLL_0_; +text: .text%__1cKstoreINodeFreloc6kM_i_; +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQciBytecodeStreamJget_klass6Mri_pnHciKlass__; +text: .text%__1cKMemBarNode2t6M_v_; +text: .text%__1cIDivINodeGOpcode6kM_i_; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_; +text: .text%__1cPshrI_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMorI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__; +text: .text%__1cPsalI_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_; +text: .text%__1cIAddLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIModINodeGOpcode6kM_i_; +text: .text%__1cNmulL_rRegNodeErule6kM_I_; +text: .text%__1cSsafePoint_pollNodeHtwo_adr6kM_I_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cJCHAResult2t6MnLKlassHandle_nMsymbolHandle_2pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___n0E_i_v_; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cKMemBarNodeJis_MemBar6kM_pk0_; +text: .text%__1cIGraphKitOinsert_mem_bar6MpnKMemBarNode__v_; +text: .text%__1cHi2sNodeHtwo_adr6kM_I_; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cNIdealLoopTreeObeautify_loops6MpnOPhaseIdealLoop__i_; +text: .text%__1cScompP_mem_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreBNodePoper_input_base6kM_I_; +text: .text%__1cRandI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSCallLeafDirectNodeRis_safepoint_node6kM_i_; +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKMemBarNodeJideal_reg6kM_I_; +text: .text%__1cJloadSNodeHtwo_adr6kM_I_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cUDebugInfoWriteStreamMwrite_handle6MpnI_jobject__v_; +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNaddI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJlog2_long6Fx_i_; +text: .text%__1cTconvL2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_; +text: .text%__1cOjmpLoopEndNodePoper_input_base6kM_I_; +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_; +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__; +text: .text%JVM_InternString; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cNGrowableArray4CpnENode__2t6Mii_v_; +text: .text%__1cPCheckCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_; +text: .text%__1cTconvI2L_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__; +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_; +text: .text%__1cFTypeFCeq6kMpknEType__i_; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__; +text: .text%__1cNandL_rRegNodeErule6kM_I_; +text: .text%__1cQjmpDir_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cQjmpDir_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKjmpDirNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cQjmpDir_shortNodeJis_Branch6kM_I_; +text: .text%__1cLBlock_ArrayEgrow6MI_v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cSCompareAndSwapNodeLbottom_type6kM_pknEType__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_; +text: .text%__1cSindIndexOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cSindIndexOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cNGrowableArray4CI_Egrow6Mi_v_; +text: .text%__1cHMatcherMreturn_value6Fii_nLOptoRegPair__; +text: .text%__1cFStateP_sub_Op_ConvI2L6MpknENode__v_; +text: .text%__1cOjmpLoopEndNodeGpinned6kM_i_; +text: .text%__1cNxorI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cNsubI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_; +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cQLRUMaxHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cLcastP2LNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcheckCastPPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__; +text: .text%__1cZCallInterpreterDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cILoopNodeHsize_of6kM_I_; +text: .text%__1cSindIndexOffsetOperFscale6kM_i_; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cRaddI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_; +text: .text%__1cGOopMapPset_derived_oop6Miiii_v_; +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_; +text: .text%__1cHi2bNodeErule6kM_I_; +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_; +text: .text%__1cMloadConDNodePoper_input_base6kM_I_; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__; +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_; +text: .text%__1cRjmpConU_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRjmpConU_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cMloadConDNodeHtwo_adr6kM_I_; +text: .text%__1cHnmethodKpc_desc_at6MpC_pnGPcDesc__; +text: .text%__1cJrRegPOperFclone6kM_pnIMachOper__; +text: .text%__1cFParseNpush_constant6MnKciConstant__i_; +text: .text%__1cMrep_stosNodeJnum_opnds6kM_I_; +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__; +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_; +text: .text%__1cPstoreImmI16NodeJnum_opnds6kM_I_; +text: .text%__1cTleaPIdxScaleOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitbMset_predefined_output_for_runtime_call6MpnENode_pnMMergeMemNode__v_; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cPsarI_rReg_1NodeErule6kM_I_; +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_; +text: .text%__1cIGraphKitOhas_ex_handler6M_i_; +text: .text%__1cMloadConDNodeErule6kM_I_; +text: .text%__1cHCompileQsync_stack_slots6kM_i_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cMURShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNdecI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cPClassFileParserbHparse_constant_pool_integer_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTDebugInfoReadStream2t6MpknHnmethod_i_v_; +text: .text%__1cRsalI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cJScopeDescJstream_at6kMi_pnTDebugInfoReadStream__; +text: .text%__1cVjava_lang_ClassLoaderGparent6FpnHoopDesc__2_; +text: .text%__1cIPhaseIFGEinit6MI_v_; +text: .text%__1cMPhaseChaitinQgather_lrg_masks6Mi_v_; +text: .text%__1cJPhaseLiveHcompute6MI_v_; +text: .text%JVM_GetCPClassNameUTF; +text: .text%__1cMLinkResolverUresolve_invokestatic6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNstoreImmINodeJnum_opnds6kM_I_; +text: .text%__1cITypeNodeHis_Type6M_p0_; +text: .text%__1cHRetNodePoper_input_base6kM_I_; +text: .text%__1cLCastP2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cPloadConUL32NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOFastUnlockNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cNprefetchwNodeHtwo_adr6kM_I_; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKstoreCNodeHtwo_adr6kM_I_; +text: .text%__1cQleaPIdxScaleNodeJnum_opnds6kM_I_; +text: .text%__1cNaddL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cOcompL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cTDebugInfoReadStreamLread_handle6M_nGHandle__; +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_; +text: .text%__1cFStateR_sub_Op_LoadRange6MpknENode__v_; +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_; +text: .text%__1cOcompU_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmovI_reg_gNodePoper_input_base6kM_I_; +text: .text%__1cLProfileDataSis_VirtualCallData6M_i_; +text: .text%__1cSmembar_acquireNodePoper_input_base6kM_I_; +text: .text%__1cNsubL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cNloadRangeNodeFreloc6kM_i_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowJJsrRecord__2t6Miirk2i_v_; +text: .text%__1cTcompareAndSwapLNodeErule6kM_I_; +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cMURShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOcompiledVFrameGis_top6kM_i_; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cNxorI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cRshrI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cKciTypeFlowXmark_known_range_starts6M_v_; +text: .text%__1cKciTypeFlowLfind_ranges6M_v_; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_; +text: .text%__1cKciTypeFlowKmap_blocks6M_v_; +text: .text%__1cKciTypeFlowHdo_flow6M_v_; +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__; +text: .text%__1cKciTypeFlowKflow_types6M_v_; +text: .text%__1cIAndINodeGadd_id6kM_pknEType__; +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadBNodeHtwo_adr6kM_I_; +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_L_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_; +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_; +text: .text%__1cHMatcherNfind_receiver6Fi_i_; +text: .text%__1cMciMethodDataJload_data6M_v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cJCmpL3NodeGOpcode6kM_i_; +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cUParallelScavengeHeapEused6kM_L_; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__; +text: .text%__1cIMaxINodeGOpcode6kM_i_; +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_; +text: .text%__1cPsalI_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cQSystemDictionarybAcompute_loader_lock_object6FnGHandle_pnGThread__1_; +text: .text%__1cHciKlassMis_interface6M_i_; +text: .text%__1cPmethodDataKlassRoop_is_methodData6kM_i_; +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__; +text: .text%__1cJloadCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueHoops_do6MpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cOJNIHandleBlockMweak_oops_do6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollectorXoops_do_for_all_threads6FpnKOopClosure__v_; +text: .text%__1cRindIndexScaleOperJnum_edges6kM_I_; +text: .text%__1cRindIndexScaleOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKstoreBNodeJnum_opnds6kM_I_; +text: .text%__1cNSignatureInfoJdo_double6M_v_; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cRsalI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cMrdx_RegIOperEtype6kM_pknEType__; +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_; +text: .text%__1cSmembar_acquireNodeHtwo_adr6kM_I_; +text: .text%__1cRshrI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%__1cSCompareAndSwapNodeKmatch_edge6kMI_I_; +text: .text%__1cISubINodeJideal_reg6kM_I_; +text: .text%__1cRMachSafePointNodeGpinned6kM_i_; +text: .text%__1cIimmIOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConINodeFclone6kM_pnENode__; +text: .text%__1cICodeHeapIallocate6ML_pv_; +text: .text%__1cICodeHeapPsearch_freelist6ML_pnJFreeBlock__; +text: .text%__1cbACallCompiledJavaDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLCastP2LNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNmulL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJLoadBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVmerge_point_too_heavy6FpnHCompile_pnENode__i_: loopopts.o; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%__1cFParseKdo_put_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cHnmethodOis_java_method6kM_i_; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_; +text: .text%__1cRsarL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cScompU_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFciEnvZcheck_klass_accessibility6MpnHciKlass_pnMklassOopDesc__i_; +text: .text%__1cIciObjectMis_obj_array6M_i_; +text: .text%__1cOLibraryCallKitOgenerate_guard6MpnENode_pnKRegionNode_f_v_; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMstringStream2t6ML_v_; +text: .text%__1cJloadINodeFreloc6kM_i_; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cOMethodLivenessKBasicBlockJstore_two6Mi_v_; +text: .text%__1cJloadINodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_pnIMachNode__; +text: .text%__1cTconvL2I_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cRandI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_; +text: .text%__1cOAbstractICachePcall_flush_stub6FpCi_v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cIGraphKitOmake_slow_call6MpknITypeFunc_pCpkcpnENode_88_8_; +text: .text%__1cICodeHeapPfollowing_block6MpnJFreeBlock__2_; +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__; +text: .text%__1cPshrI_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cEDictIdoubhash6M_v_; +text: .text%__1cTleaPIdxScaleOffNodeLbottom_type6kM_pknEType__; +text: .text%__1cIProjNodeJideal_reg6kM_I_; +text: .text%__1cHi2sNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLimmI_16OperJnum_edges6kM_I_; +text: .text%__1cUmembar_cpu_orderNodePoper_input_base6kM_I_; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cTCallInterpreterNodeGOpcode6kM_i_; +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRaddL_rReg_immNodeErule6kM_I_; +text: .text%__1cJLoadLNodeJideal_reg6kM_I_; +text: .text%__1cTleaPIdxScaleOffNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_; +text: .text%__1cENodeMsetup_is_top6M_v_; +text: .text%__1cIGotoNodeGOpcode6kM_i_; +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHCompilePneed_stack_bang6kMi_i_; +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cNFingerprinterIdo_array6Mii_v_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cMorI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKTypeRawPtrFempty6kM_i_; +text: .text%__1cHRetNodeGpinned6kM_i_; +text: .text%__1cHRetNodeHtwo_adr6kM_I_; +text: .text%__1cPsalI_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPGlobalTLABStatsKinitialize6M_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cUParallelScavengeHeapTensure_parseability6M_v_; +text: .text%__1cTDerivedPointerTableFclear6F_v_; +text: .text%__1cNMemoryServiceGgc_end6Fi_v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cSReferenceProcessorOprocess_phase16MppnHoopDesc_pnPReferencePolicy_pnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cKPSYoungGenPupdate_counters6M_v_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cNMemoryServiceIgc_begin6Fi_v_; +text: .text%__1cUParallelScavengeHeapOfill_all_tlabs6M_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_; +text: .text%__1cQPlaceholderTableJnew_entry6MipnNsymbolOopDesc_pnHoopDesc__pnQPlaceholderEntry__; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_; +text: .text%__1cYGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cNaddL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cNaddL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cUPSAdaptiveSizePolicyZdecay_supplemental_growth6Mi_v_; +text: .text%__1cUPSAdaptiveSizePolicybPeden_increment_with_supplement_aligned_up6ML_L_; +text: .text%__1cUPSAdaptiveSizePolicyQdecaying_gc_cost6kM_d_; +text: .text%__1cUPSAdaptiveSizePolicybDcompute_generation_free_space6MLLLLLLLi_v_; +text: .text%__1cIPSOldGenMmax_gen_size6M_L_; +text: .text%__1cUPSAdaptiveSizePolicybHclear_generation_free_space_flags6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyOeden_increment6MLI_L_; +text: .text%__1cUPSAdaptiveSizePolicyVadjust_for_throughput6MipL1_v_; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cUmembar_cpu_orderNodeHtwo_adr6kM_I_; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cJAssemblerDjmp6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cPshrI_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cRmulI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNandI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cOMachEpilogNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLklassVtableVinitialize_from_super6MnLKlassHandle__i_; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cLklassVtableOcopy_vtable_to6MpnLvtableEntry__v_; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_; +text: .text%__1cPVM_GC_OperationOskip_operation6kM_i_; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cPVM_GC_OperationZacquire_pending_list_lock6M_v_; +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_; +text: .text%__1cKReturnNodeEhash6kM_I_; +text: .text%__1cHnmethodVis_dependent_on_entry6MpnMklassOopDesc_2pnNmethodOopDesc__i_; +text: .text%__1cbDVM_ParallelGCFailedAllocation2t6MLiiI_v_; +text: .text%__1cLlog2_intptr6Fl_i_; +text: .text%__1cKKlass_vtbl2n6FLrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cMloadConPNodeFclone6kM_pnENode__; +text: .text%__1cIimmPOperFclone6kM_pnIMachOper__; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cSCallLeafDirectNodeKmethod_set6Ml_v_; +text: .text%__1cJcmpOpOperJnot_equal6kM_i_; +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cCosOunguard_memory6FpcL_i_; +text: .text%__1cNandL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftINodeJideal_reg6kM_I_; +text: .text%__1cRsarI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_; +text: .text%__1cJLoadSNodeJideal_reg6kM_I_; +text: .text%__1cTconvL2I_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cIPhaseIFGISquareUp6M_v_; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cKciTypeFlowLStateVectorOmeet_exception6MpnPciInstanceKlass_pk1_i_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cCosOprotect_memory6FpcL_i_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cCosXserialize_thread_states6F_v_; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cUSafepointSynchronizeQdo_cleanup_tasks6F_v_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cNloadConP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cQVMOperationQdDueueGinsert6MpnMVM_Operation_2_v_; +text: .text%__1cQVMOperationQdDueueGunlink6MpnMVM_Operation__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cCosMget_priority6FpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cQVMOperationQdDueueOqueue_add_back6MipnMVM_Operation__v_; +text: .text%__1cGThreadMget_priority6Fpk0_nOThreadPriority__; +text: .text%__1cCosTget_native_priority6FpknGThread_pi_nIOSReturn__; +text: .text%__1cIVMThreadSevaluate_operation6MpnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueDadd6MpnMVM_Operation__i_; +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cNget_next_hash6F_l_: synchronizer.o; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cKPSScavengeXshould_attempt_scavenge6F_i_; +text: .text%__1cKPSScavengeQinvoke_no_policy6Fpi_i_; +text: .text%__1cPGlobalTLABStatsHpublish6M_v_; +text: .text%__1cUinitialize_hashtable6FppnLNameSigHash__v_; +text: .text%__1cPclear_hashtable6FppnLNameSigHash__v_; +text: .text%__1cQciBytecodeStreamUis_unresolved_string6kM_i_; +text: .text%__1cFciEnvUis_unresolved_string6kMpnPciInstanceKlass_i_i_; +text: .text%__1cFciEnvZis_unresolved_string_impl6kMpnNinstanceKlass_i_i_; +text: .text%__1cNtestP_regNodeFreloc6kM_i_; +text: .text%__1cNSCMemProjNodeGis_CFG6kM_i_; +text: .text%__1cKPSScavengeGinvoke6Fpi_v_; +text: .text%__1cUParallelScavengeHeapTfailed_mem_allocate6MpiLii_pnIHeapWord__; +text: .text%__1cbDVM_ParallelGCFailedAllocationEname6kM_pkc_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_; +text: .text%__1cKDictionaryJnew_entry6MIpnMklassOopDesc_pnHoopDesc__pnPDictionaryEntry__; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cQSystemDictionaryRupdate_dictionary6FiIiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryQfind_placeholder6FiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNIdealLoopTreeTcheck_inner_safepts6MpnOPhaseIdealLoop__v_; +text: .text%__1cPsarI_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_; +text: .text%__1cIAndINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectOis_null_object6kM_i_; +text: .text%__1cNIdealLoopTreeNDCE_loop_body6M_v_; +text: .text%__1cNprefetchwNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_release_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_; +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_; +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_; +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_; +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_; +text: .text%__1cNdecI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSComputeAdapterInfoHdo_long6M_v_; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cMrcx_RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%__1cENodeUdepends_only_on_test6kM_i_; +text: .text%__1cXmembar_acquire_lockNodePoper_input_base6kM_I_; +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_; +text: .text%__1cNGrowableArray4nLKlassHandle__Icontains6kMrkn0A__i_; +text: .text%__1cLGCTaskQdDueue2t6Mi_v_; +text: .text%__1cNaddL_rRegNodeErule6kM_I_; +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_; +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_; +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_; +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_; +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_; +text: .text%__1cSAdaptiveSizePolicyWminor_collection_begin6M_v_; +text: .text%__1cSAdaptiveSizePolicyUminor_collection_end6MnHGCCauseFCause__v_; +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_; +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_; +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__; +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_; +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_; +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_; +text: .text%__1cUWaitForBarrierGCTask2t6Mi_v_; +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_; +text: .text%__1cNBarrierGCTaskIdestruct6M_v_; +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_; +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_; +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_; +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_; +text: .text%__1cGGCTaskIdestruct6M_v_; +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_; +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_; +text: .text%__1cKPSYoungGenLswap_spaces6M_v_; +text: .text%__1cUPSAdaptiveSizePolicybPcompute_survivor_space_size_and_threshold6MiiL_i_; +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MLL_v_; +text: .text%__1cUPSAdaptiveSizePolicyPupdate_averages6MiLL_v_; +text: .text%__1cKPSYoungGenRresize_generation6MLL_i_; +text: .text%__1cKPSYoungGenGresize6MLL_v_; +text: .text%__1cKPSYoungGenNresize_spaces6MLL_v_; +text: .text%__1cHMatcherKcan_be_arg6Fi_i_; +text: .text%__1cHMatcherQis_spillable_arg6Fi_i_; +text: .text%__1cUPSAdaptiveSizePolicyOshould_full_GC6ML_i_; +text: .text%__1cSAdaptiveSizePolicybIupdate_minor_pause_young_estimator6Md_v_; +text: .text%__1cUPSAdaptiveSizePolicybGupdate_minor_pause_old_estimator6Md_v_; +text: .text%__1cNsubL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMStartOSRNodeGOpcode6kM_i_; +text: .text%__1cRsubI_rReg_memNodeErule6kM_I_; +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cXmembar_acquire_lockNodeHtwo_adr6kM_I_; +text: .text%__1cNandI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cNcmovI_regNodePoper_input_base6kM_I_; +text: .text%__1cMURShiftINodeJideal_reg6kM_I_; +text: .text%__1cMorI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cLRShiftINodeJideal_reg6kM_I_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cLklassVtableQfill_in_mirandas6Mri_v_; +text: .text%__1cRandI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cSmembar_releaseNodePoper_input_base6kM_I_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cJrRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_; +text: .text%__1cRmethodDataOopDescJis_mature6kM_i_; +text: .text%__1cJcmpOpOperEless6kM_i_; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cOcompL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cNloadKlassNodeFreloc6kM_i_; +text: .text%__1cRshrI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTjava_lang_ThrowableNset_backtrace6FpnHoopDesc_2_v_; +text: .text%__1cPcmovI_reg_gNodeMideal_Opcode6kM_i_; +text: .text%__1cIAndINodeGmul_id6kM_pknEType__; +text: .text%__1cTClassLoadingServiceScompute_class_size6FpnNinstanceKlass__L_; +text: .text%__1cLklassVtableQget_num_mirandas6FpnMklassOopDesc_pnPobjArrayOopDesc_4_i_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cRaddI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%__1cQSystemDictionaryQadd_to_hierarchy6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserYcheck_super_class_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cNinstanceKlassbBdo_local_static_fields_impl6FnTinstanceKlassHandle_pFpnPfieldDescriptor_pnGThread__v5_v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_; +text: .text%__1cScompI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cScompI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_; +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_; +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_; +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_; +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_; +text: .text%__1cFKlassRoop_is_methodData6kM_i_; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cQSystemDictionaryRload_shared_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRfind_shared_class6FnMsymbolHandle__pnMklassOopDesc__; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_; +text: .text%__1cRaddL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cKciTypeFlowLStateVectorGdo_ldc6MpnQciBytecodeStream__v_; +text: .text%__1cMPhaseIterGVNIoptimize6M_v_; +text: .text%__1cOrFlagsRegUOperFclone6kM_pnIMachOper__; +text: .text%__1cNmulL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cMrdi_RegPOperJnum_edges6kM_I_; +text: .text%__1cRsalI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cQPackageHashtableMcompute_hash6Mpkci_I_; +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cRsalL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cIConINodeHget_int6kMpi_i_; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__; +text: .text%__1cLOpaque2NodeGOpcode6kM_i_; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cILoopNode2t6MpnENode_2_v_; +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTconvI2L_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cScompP_mem_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIRewriterScompute_index_maps6FnSconstantPoolHandle_rpnIintArray_rpnIintStack__v_; +text: .text%__1cIRewriterXnew_constant_pool_cache6FrnIintArray_pnGThread__nXconstantPoolCacheHandle__; +text: .text%__1cIintArray2t6Mii_v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassLverify_code6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cFframeWsender_for_entry_frame6kMpnLRegisterMap__0_; +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_; +text: .text%__1cSmembar_releaseNodeHtwo_adr6kM_I_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserbSparse_constant_pool_interfacemethodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_; +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmodI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cNtestL_regNodeMideal_Opcode6kM_i_; +text: .text%__1cRaddI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIConFNodeGOpcode6kM_i_; +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTconvI2L_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNSharedRuntimebOraw_exception_handler_for_return_address6FpC_1_; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cOMethodLivenessKBasicBlockPmerge_exception6MnGBitMap__i_; +text: .text%__1cTconvI2L_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAndINodeKmul_opcode6kM_i_; +text: .text%__1cIAndINodeKadd_opcode6kM_i_; +text: .text%__1cPcmovI_reg_gNodeJnum_opnds6kM_I_; +text: .text%__1cKCMoveINodeGOpcode6kM_i_; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_; +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPloadConUL32NodeHsize_of6kM_I_; +text: .text%__1cJAssemblerEandq6MpnMRegisterImpl_i_v_; +text: .text%__1cLClassLoaderOlookup_package6Fpkc_pnLPackageInfo__; +text: .text%__1cQPackageHashtableJget_entry6MiIpkcL_pnLPackageInfo__; +text: .text%__1cIGraphKitRmerge_fast_memory6MpnENode_2i_v_; +text: .text%JVM_Clone; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cUCallCompiledJavaNodeGOpcode6kM_i_; +text: .text%__1cPsalI_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKadd_n_reqs6FpnENode_1_v_: graphKit.o; +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_; +text: .text%__1cQComputeCallStackJdo_double6M_v_; +text: .text%__1cKciTypeFlowLStateVectorMdo_putstatic6MpnQciBytecodeStream__v_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_; +text: .text%__1cIGraphKitOmake_merge_mem6MpnENode_22_v_; +text: .text%__1cGEventsDlog6FpkcE_v_; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cSsafePoint_pollNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cPshrI_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseWensure_phis_everywhere6M_v_; +text: .text%__1cNsubL_rRegNodeErule6kM_I_; +text: .text%__1cNIdealLoopTreeUiteration_split_impl6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNIdealLoopTreebBpolicy_do_remove_empty_loop6MpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeOpolicy_peeling6kMpnOPhaseIdealLoop__i_; +text: .text%__1cIBoolNodeZis_counted_loop_exit_test6M_i_; +text: .text%__1cJloadCNodeHtwo_adr6kM_I_; +text: .text%__1cUPSMarkSweepDecoratorVdestination_decorator6F_p0_; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cENode2n6FL_pv_; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cKBufferBlob2n6FLI_pv_; +text: .text%__1cFParseKarray_load6MnJBasicType__v_; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cKBufferBlob2t6Mpkci_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cKciTypeFlowLStateVectorLdo_putfield6MpnQciBytecodeStream__v_; +text: .text%__1cHnmethodNscope_desc_at6MpC_pnJScopeDesc__; +text: .text%__1cHnmethodJcode_size6kM_i_; +text: .text%__1cRtestP_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRtestP_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_; +text: .text%__1cOjmpLoopEndNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_i_v_; +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassRclass_initializer6M_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassWcall_class_initializer6MpnGThread__v_; +text: .text%__1cNinstanceKlassbOset_initialization_state_and_notify_impl6FnTinstanceKlassHandle_n0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassbJset_initialization_state_and_notify6Mn0AKClassState_pnGThread__v_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cMrdi_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cFTypeDCeq6kMpknEType__i_; +text: .text%__1cJLoadCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNtestL_regNodeHtwo_adr6kM_I_; +text: .text%__1cTconvL2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_; +text: .text%__1cMrax_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNmodI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNSignatureInfoIdo_short6M_v_; +text: .text%JVM_GetFieldIxModifiers; +text: .text%__1cNsubL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNandL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cNsubL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_; +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__; +text: .text%__1cICodeHeapTmark_segmap_as_used6MLL_v_; +text: .text%__1cMorI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cHOrINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_IsConstructorIx; +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_; +text: .text%__1cHMatcherLfind_shared6MpnENode__v_; +text: .text%__1cHMatcherFxform6MpnENode_i_2_; +text: .text%__1cJStartNodeHsize_of6kM_I_; +text: .text%__1cILRG_List2t6MI_v_; +text: .text%__1cHMatcherLreturn_addr6kM_i_; +text: .text%__1cSindIndexOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cGBundlePinitialize_nops6FppnIMachNode__v_; +text: .text%__1cOMachPrologNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMemNodeHsize_of6kM_I_; +text: .text%__1cNSignatureInfoIdo_float6M_v_; +text: .text%__1cRaddI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRmulI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cFParseNadd_safepoint6M_v_; +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_; +text: .text%__1cRaddI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOCompiledRFrameEinit6M_v_; +text: .text%__1cGvframeDtop6kM_p0_; +text: .text%__1cPsarI_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cRmethodDataOopDescYcompute_extra_data_count6Fii_i_; +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectIis_klass6M_i_; +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_; +text: .text%__1cRxorI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadConP0NodeHsize_of6kM_I_; +text: .text%__1cJAssemblerEaddq6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEsubq6MpnMRegisterImpl_2_v_; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6ML_v_; +text: .text%__1cTresource_free_bytes6FpcL_v_; +text: .text%__1cNSingletonBlobMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_; +text: .text%__1cUPSMarkSweepDecoratorHcompact6Mi_v_; +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeMideal_Opcode6kM_i_; +text: .text%__1cRindIndexScaleOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cRindIndexScaleOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_; +text: .text%__1cENodeHrm_prec6MI_v_; +text: .text%__1cHAddNodeGis_Add6kM_pk0_; +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_; +text: .text%__1cTMachCallRuntimeNodeSis_MachCallRuntime6M_p0_; +text: .text%__1cMrax_RegIOperJnum_edges6kM_I_; +text: .text%__1cICodeHeapLmerge_right6MpnJFreeBlock__v_; +text: .text%__1cNaddI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNmulL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cWandI_rReg_imm65535NodeMideal_Opcode6kM_i_; +text: .text%__1cKReturnNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRjmpConU_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLjmpConUNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cRjmpConU_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cRjmpConU_shortNodeJis_Branch6kM_I_; +text: .text%__1cKcmpOpUOperFclone6kM_pnIMachOper__; +text: .text%__1cRtestP_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cIregDOperJnum_edges6kM_I_; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cSindIndexOffsetOperNconstant_disp6kM_i_; +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__; +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQleaPIdxScaleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cRaddP_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMatcherQpost_fast_unlock6FpknENode__i_; +text: .text%__1cFStateV_sub_Op_MemBarRelease6MpknENode__v_; +text: .text%__1cOleaPIdxOffNodeMideal_Opcode6kM_i_; +text: .text%__1cILoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cScompI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cScompI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerDorq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cScompI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTcompareAndSwapLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cKciTypeFlowLStateVectorJhalf_type6FpnGciType__3_; +text: .text%__1cQmerge_point_safe6FpnENode__i_: loopopts.o; +text: .text%__1cRaddL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cHMatcherUc_calling_convention6FpnLOptoRegPair_I_v_; +text: .text%__1cPCallRuntimeNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cNaddL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNxorI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_; +text: .text%__1cENodeHget_int6kMpi_i_; +text: .text%__1cPCountedLoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJLoadFNodeGOpcode6kM_i_; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNincI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPClassFileParserbEparse_constant_pool_long_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPcmovI_reg_lNodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerEleaq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_; +text: .text%__1cNprefetchwNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cMPhaseChaitinOcache_lrg_info6M_v_; +text: .text%__1cMPhaseChaitinISimplify6M_v_; +text: .text%__1cMPhaseChaitinGSelect6M_I_; +text: .text%__1cRsarL_rReg_immNodeErule6kM_I_; +text: .text%__1cKcmpOpUOperJnot_equal6kM_i_; +text: .text%__1cScompU_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cScompU_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_; +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cWCallLeafNoFPDirectNodeHtwo_adr6kM_I_; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cSvframeStreamCommonYfill_from_compiled_frame6MpnHnmethod_i_v_; +text: .text%__1cNandL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cHnmethodQis_native_method6kM_i_; +text: .text%__1cTleaPIdxScaleOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNmulL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cODataRelocationGoffset6M_i_; +text: .text%__1cODataRelocationJset_value6MpC_v_; +text: .text%__1cKRelocationRpd_set_data_value6MpCl_v_; +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_; +text: .text%__1cIMulINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFStateO_sub_Op_StoreB6MpknENode__v_; +text: .text%__1cRaddL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIregFOperJnum_edges6kM_I_; +text: .text%__1cRandI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIRootNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOleaPIdxOffNodePoper_input_base6kM_I_; +text: .text%__1cJcmpOpOperKless_equal6kM_i_; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__; +text: .text%__1cMrep_stosNodeHtwo_adr6kM_I_; +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNsubI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHMemNodeScalculate_adr_type6FpknEType_pknHTypePtr__6_; +text: .text%__1cRmulI_rReg_immNodeErule6kM_I_; +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_; +text: .text%__1cNaddP_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_; +text: .text%__1cKstoreLNodeHtwo_adr6kM_I_; +text: .text%__1cNnegI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodePoper_input_base6kM_I_; +text: .text%__1cbFloadConL_0x6666666666666667NodeErule6kM_I_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cIGraphKitNstore_barrier6MpnENode_22_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_acquireNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMoutputStreamMdo_vsnprintf6FpcLpkcpnR__va_list_element_irL_3_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_; +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_; +text: .text%__1cMPhaseChaitinFSplit6MI_I_; +text: .text%__1cMPhaseChaitinHcompact6M_v_; +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_; +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_; +text: .text%__1cQComputeCallStackIdo_short6M_v_; +text: .text%__1cNFingerprinterHdo_long6M_v_; +text: .text%__1cIciMethodRinstructions_size6M_i_; +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__; +text: .text%__1cJimmL0OperJconstantL6kM_x_; +text: .text%__1cWandI_rReg_imm65535NodePoper_input_base6kM_I_; +text: .text%__1cIAndINodeJideal_reg6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cYexternal_word_RelocationJpack_data6M_i_; +text: .text%__1cJimmP0OperFclone6kM_pnIMachOper__; +text: .text%__1cKRelocationYruntime_address_to_index6FpC_l_; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_inJrelocInfoJrelocType_i_v_; +text: .text%__1cYexternal_word_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cRsalL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cLPhaseValues2T5B6M_v_; +text: .text%__1cNstoreImmBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateQ_sub_Op_URShiftL6MpknENode__v_; +text: .text%__1cJNode_ListEyank6MpnENode__v_; +text: .text%__1cNxorI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNxorI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEmovq6MpnMRegisterImpl_l_v_; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%__1cMFastLockNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTCallDynamicJavaNodeEhash6kM_I_; +text: .text%__1cMalloc_object6FpnH_jclass_pnGThread__pnPinstanceOopDesc__: jni.o; +text: .text%__1cRshrL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__; +text: .text%__1cJAssemblerEsubq6MpnMRegisterImpl_i_v_; +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_; +text: .text%__1cLPcDescCacheKpc_desc_at6kMpnHnmethod_pC_pnGPcDesc__; +text: .text%__1cKBlock_ListGinsert6MIpnFBlock__v_; +text: .text%__1cKtype2basic6FpknEType__nJBasicType__; +text: .text%__1cQleaPIdxScaleNodeLbottom_type6kM_pknEType__; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_; +text: .text%__1cIGraphKitOnull_check_oop6MpnKRegionNode_pnENode_i_4_; +text: .text%__1cJloadCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRxorI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cICodeHeapPadd_to_freelist6MpnJHeapBlock__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cJVectorSetGslamin6Mrk0_v_; +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_; +text: .text%__1cScompI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddI_rReg_memNodeErule6kM_I_; +text: .text%__1cYexternal_word_RelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cKRelocationYpd_get_address_from_code6M_pC_; +text: .text%__1cRxorI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_; +text: .text%__1cNandL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOCompilerOracleMshould_print6FnMmethodHandle__i_; +text: .text%__1cNstoreImmBNodeFreloc6kM_i_; +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_; +text: .text%__1cKBufferBlobEfree6Fp0_v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_FPU6MinITosState__v_; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cOGenerateOopMapIppop_any6Mi_v_; +text: .text%__1cKNode_ArrayFclear6M_v_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cJAssemblerFpushq6MpnMRegisterImpl__v_; +text: .text%__1cIRootNodeHis_Root6M_p0_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cRsalL_rReg_immNodeErule6kM_I_; +text: .text%__1cPstoreImmI16NodeHtwo_adr6kM_I_; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cNaddP_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cPsarI_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_; +text: .text%__1cNtestL_regNodeErule6kM_I_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__; +text: .text%__1cNstoreImmINodeHtwo_adr6kM_I_; +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_; +text: .text%__1cNSafePointNodeLpop_monitor6M_v_; +text: .text%__1cRsarI_rReg_immNodeErule6kM_I_; +text: .text%__1cNtestL_regNodePoper_input_base6kM_I_; +text: .text%__1cRsarL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindirectOperNbase_position6kM_i_; +text: .text%__1cMindirectOperNconstant_disp6kM_i_; +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_; +text: .text%__1cKciTypeFlowLStateVectorGdo_new6MpnQciBytecodeStream__v_; +text: .text%__1cHMatcherPprior_fast_lock6FpknENode__i_; +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_; +text: .text%__1cFStateV_sub_Op_MemBarAcquire6MpknENode__v_; +text: .text%__1cNSharedRuntimeQfind_callee_info6FpnKJavaThread_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cFKlassDLCA6Mp0_1_; +text: .text%__1cRtestP_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRtestP_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRtestP_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_; +text: .text%__1cUmembar_cpu_orderNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUmembar_cpu_orderNodeLbottom_type6kM_pknEType__; +text: .text%__1cTcompareAndSwapLNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cTcompareAndSwapLNodeHtwo_adr6kM_I_; +text: .text%__1cJScopeDescGsender6kM_p0_; +text: .text%__1cSindIndexOffsetOperOindex_position6kM_i_; +text: .text%__1cSindIndexOffsetOperNbase_position6kM_i_; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__; +text: .text%__1cNtestU_regNodeHtwo_adr6kM_I_; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cNGrowableArray4Cpv_2t6MpnFArena_iirk0_v_; +text: .text%__1cKstoreFNodePoper_input_base6kM_I_; +text: .text%__1cNGrowableArray4Cl_2t6MpnFArena_iirkl_v_; +text: .text%__1cNstoreImmINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMachEpilogNodeFreloc6kM_i_; +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_regNodeMideal_Opcode6kM_i_; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cFStateN_sub_Op_LoadL6MpknENode__v_; +text: .text%__1cNmodI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSignatureInfoHdo_char6M_v_; +text: .text%__1cNtestU_regNodeMideal_Opcode6kM_i_; +text: .text%__1cFStateQ_sub_Op_CallLeaf6MpknENode__v_; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cJloadLNodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_pnIMachNode__; +text: .text%__1cJloadLNodeFreloc6kM_i_; +text: .text%__1cSCallLeafDirectNodeFreloc6kM_i_; +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_; +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_; +text: .text%__1cNsubL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cJAssemblerEmovq6MpnMRegisterImpl_2_v_; +text: .text%__1cRmulL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cRsubI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cKciTypeFlowFRangeSprivate_copy_count6kMpn0AGJsrSet__i_; +text: .text%__1cOleaPIdxOffNodeJnum_opnds6kM_I_; +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_; +text: .text%__1cNandI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cHOrINodeGadd_id6kM_pknEType__; +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_; +text: .text%__1cOPhaseIdealLoop2t6MrnMPhaseIterGVN_pk0i_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_; +text: .text%__1cRsubI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__; +text: .text%__1cOjmpLoopEndNodeHtwo_adr6kM_I_; +text: .text%__1cJLoadBNodeJideal_reg6kM_I_; +text: .text%__1cNnegI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateS_sub_Op_FastUnlock6MpknENode__v_; +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMVirtualSpaceNreserved_size6kM_L_; +text: .text%__1cScompU_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKStoreFNodeGOpcode6kM_i_; +text: .text%__1cLCastP2LNodeJideal_reg6kM_I_; +text: .text%__1cPcmovI_reg_gNodeErule6kM_I_; +text: .text%__1cFStateP_sub_Op_CastP2L6MpknENode__v_; +text: .text%__1cScompU_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cScompU_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cScompU_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cWCallLeafNoFPDirectNodeRis_safepoint_node6kM_i_; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_pCnJrelocInfoJrelocType__v_; +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_; +text: .text%__1cIimmFOperJconstantF6kM_f_; +text: .text%__1cNcmovI_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_; +text: .text%__1cNcmovI_regNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEmovq6MnHAddress_i_v_; +text: .text%__1cIciObjectJis_method6M_i_; +text: .text%__1cIciObjectOis_method_data6M_i_; +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__; +text: .text%__1cHOrINodeJideal_reg6kM_I_; +text: .text%__1cNcmovI_regNodeMcisc_operand6kM_i_; +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__; +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_; +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cIAndLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIAndLNodeKadd_opcode6kM_i_; +text: .text%__1cFStateO_sub_Op_StoreC6MpknENode__v_; +text: .text%__1cIAndLNodeKmul_opcode6kM_i_; +text: .text%__1cRaddL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMrep_stosNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_acquire_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFParseJdo_ifnull6MnIBoolTestEmask__v_; +text: .text%__1cMtlsLoadPNodeHtwo_adr6kM_I_; +text: .text%__1cIGraphKitOset_pair_local6MipnENode__v_; +text: .text%__1cJLoadCNodeJideal_reg6kM_I_; +text: .text%__1cPcmovI_reg_lNodeMideal_Opcode6kM_i_; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cMrcx_RegIOperEtype6kM_pknEType__; +text: .text%__1cLConvL2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOPhaseIdealLoopKDominators6M_v_; +text: .text%__1cHNTarjanDDFS6Fp0rnJVectorSet_pnOPhaseIdealLoop_pI_i_; +text: .text%__1cHNTarjanIsetdepth6MIpI_v_; +text: .text%__1cIMulLNodeKmul_opcode6kM_i_; +text: .text%__1cIMulLNodeKadd_opcode6kM_i_; +text: .text%jni_SetLongField: jni.o; +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopRinit_dom_lca_tags6M_v_; +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLPcDescCacheLadd_pc_desc6MpnGPcDesc__v_; +text: .text%__1cScheck_phi_clipping6FpnHPhiNode_rpnHConNode_rI45rpnENode_5_i_: cfgnode.o; +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTconvI2L_reg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cRsubI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_; +text: .text%__1cTC2IAdapterGeneratorUgenerate_c2i_adapter6FnMmethodHandle__pnKC2IAdapter__; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cNaddP_rRegNodeErule6kM_I_; +text: .text%__1cRmulL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cICodeBlobOis_java_method6kM_i_; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cTjava_lang_ThrowableQclear_stacktrace6FpnHoopDesc__v_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cSInterpreterRuntimePset_bcp_and_mdp6FpCpnKJavaThread__v_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cNGrowableArray4nMmethodHandle__Icontains6kMrkn0A__i_; +text: .text%__1cLOpaque2NodeEhash6kM_I_; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cFParseGdo_new6M_v_; +text: .text%__1cFParseFBlockMadd_new_path6M_i_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cJimmI0OperJnum_edges6kM_I_; +text: .text%__1cRmulI_rReg_immNodeMcisc_operand6kM_i_; +text: .text%__1cICodeHeapMmax_capacity6kM_L_; +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_; +text: .text%__1cRindIndexScaleOperFscale6kM_i_; +text: .text%__1cNxorI_rRegNodeErule6kM_I_; +text: .text%__1cFParseFBlockNstack_type_at6kMi_pknEType__; +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFTypeFEmake6Ff_pk0_; +text: .text%__1cIModINodeLbottom_type6kM_pknEType__; +text: .text%__1cJcmpOpOperHgreater6kM_i_; +text: .text%__1cQComputeCallStackHdo_bool6M_v_; +text: .text%__1cJMemRegionMintersection6kM0_0_; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cRmulI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__; +text: .text%__1cIConDNodeGOpcode6kM_i_; +text: .text%__1cNandI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cLRethrowNodeEhash6kM_I_; +text: .text%__1cTC2IAdapterGeneratorSstd_verified_entry6FnMmethodHandle__pC_; +text: .text%__1cIDivLNodeGOpcode6kM_i_; +text: .text%__1cNandI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cGThreadOis_Java_thread6kM_i_; +text: .text%__1cSmembar_releaseNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMatcherQinline_cache_reg6F_i_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeJnum_opnds6kM_I_; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_; +text: .text%jni_NewLocalRef: jni.o; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_; +text: .text%__1cLOptoRuntimebAresolve_opt_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cPstoreImmI16NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIemit_d166FrnKCodeBuffer_i_v_; +text: .text%__1cKimmI16OperIconstant6kM_l_; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__; +text: .text%__1cJCodeCacheNalive_nmethod6FpnICodeBlob__pnHnmethod__; +text: .text%__1cJAssemblerGmovzbl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_; +text: .text%__1cPcmovI_reg_lNodeJnum_opnds6kM_I_; +text: .text%__1cMloadConLNodeHsize_of6kM_I_; +text: .text%__1cOMacroAssemblerSload_unsigned_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cTconvI2L_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNaddL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKstoreFNodeJnum_opnds6kM_I_; +text: .text%__1cNaddL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSComputeAdapterInfoJdo_double6M_v_; +text: .text%__1cLimmUL32OperFclone6kM_pnIMachOper__; +text: .text%__1cPloadConUL32NodeFclone6kM_pnENode__; +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_; +text: .text%__1cMtlsLoadPNodePoper_input_base6kM_I_; +text: .text%__1cPlocal_vsnprintf6FpcLpkcpnR__va_list_element__i_; +text: .text%__1cSComputeAdapterInfoHdo_bool6M_v_; +text: .text%jio_vsnprintf; +text: .text%__1cURethrowExceptionNodeGpinned6kM_i_; +text: .text%__1cNstoreImmINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAndLNodeJideal_reg6kM_I_; +text: .text%__1cURethrowExceptionNodeHtwo_adr6kM_I_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%jio_snprintf; +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeErule6kM_I_; +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cTcompareAndSwapLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetCPMethodModifiers; +text: .text%__1cFStateR_sub_Op_SafePoint6MpknENode__v_; +text: .text%__1cSsafePoint_pollNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cJloadCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQorI_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cPsarI_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__; +text: .text%__1cKReturnNode2t6MpnENode_2222_v_; +text: .text%__1cKReturnNodeJideal_reg6kM_I_; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%JVM_DoPrivileged; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cNIdealLoopTreeMis_loop_exit6kMpnENode_pnOPhaseIdealLoop__2_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsafePoint_pollNodeFreloc6kM_i_; +text: .text%__1cLStrCompNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cKloadUBNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeGOpcode6kM_i_; +text: .text%__1cOLibraryCallKitNtry_to_inline6M_i_; +text: .text%__1cNFingerprinterHdo_bool6M_v_; +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_; +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_; +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cHnmethodOexception_size6kM_i_; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethodQscopes_data_size6kM_i_; +text: .text%__1cHnmethodJstub_size6kM_i_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cNtestU_regNodeErule6kM_I_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cICodeBlobWfix_relocation_at_move6Ml_v_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cMrdx_RegLOperEtype6kM_pknEType__; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cLPhaseValues2t6Mp0_v_; +text: .text%__1cINodeHash2t6Mp0_v_; +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_; +text: .text%__1cJAssemblerDjmp6MnHAddress__v_; +text: .text%__1cOJNIHandleBlockRrebuild_free_list6M_v_; +text: .text%__1cSstring_compareNodeZcheck_for_anti_dependence6kM_i_; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cIDivINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cICmpDNodeGOpcode6kM_i_; +text: .text%__1cPcmovI_reg_gNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_; +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_; +text: .text%__1cNGrowableArray4CpnNCallGenerator__2t6Mii_v_; +text: .text%__1cETypeKInitialize6FpnHCompile__v_; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_; +text: .text%__1cHCompileEInit6Mi_v_; +text: .text%__1cVExceptionHandlerTable2t6Mi_v_; +text: .text%__1cFDictIFreset6MpknEDict__v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%JVM_FindLoadedClass; +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_; +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_; +text: .text%__1cIPhaseCFGDDFS6MpnGTarjan__I_; +text: .text%__1cFArenaNmove_contents6Mp0_1_; +text: .text%__1cIPhaseIFG2t6MpnFArena__v_; +text: .text%__1cHMatcherUvalidate_null_checks6M_v_; +text: .text%__1cHCompileOcompute_old_SP6M_i_; +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_; +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_; +text: .text%__1cJStartNodeJideal_reg6kM_I_; +text: .text%__1cHMatcher2t6MrnJNode_List__v_; +text: .text%__1cFArena2t6ML_v_; +text: .text%__1cIPhaseCFGOschedule_early6MrnJVectorSet_rnJNode_List_rnLBlock_Array__i_; +text: .text%__1cWNode_Backward_Iterator2t6MpnENode_rnJVectorSet_rnJNode_List_rnLBlock_Array__v_; +text: .text%__1cHMatcherFmatch6M_v_; +text: .text%__1cFStateM_sub_Op_Goto6MpknENode__v_; +text: .text%__1cIPhaseCFGNschedule_late6MrnJVectorSet_rnJNode_List_rnNGrowableArray4CI___v_; +text: .text%__1cIPhaseCFGQFind_Inner_Loops6M_v_; +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_; +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_; +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_; +text: .text%__1cHCompileICode_Gen6M_v_; +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_; +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_; +text: .text%__1cMPhaseChaitinGde_ssa6M_v_; +text: .text%__1cMPhaseChaitinbGstretch_base_pointer_live_ranges6MpnMResourceArea__i_; +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_; +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_; +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_; +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_; +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_; +text: .text%__1cGTarjanIsetdepth6MI_v_; +text: .text%__1cIPhaseCFGKDominators6M_v_; +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_; +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_; +text: .text%__1cHCompileTframe_size_in_words6kM_i_; +text: .text%__1cOCompileWrapper2T6M_v_; +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_; +text: .text%__1cHCompileYinit_scratch_locs_memory6M_v_; +text: .text%__1cNPhasePeephole2t6MpnNPhaseRegAlloc_rnIPhaseCFG__v_; +text: .text%__1cJPhaseLive2T6M_v_; +text: .text%__1cNPhasePeephole2T6M_v_; +text: .text%__1cHCompileGOutput6M_v_; +text: .text%__1cHCompileQShorten_branches6MpnFLabel_ri333_v_; +text: .text%__1cHCompileLFill_buffer6M_v_; +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_; +text: .text%__1cHCompileRScheduleAndBundle6M_v_; +text: .text%__1cOMachPrologNodeFreloc6kM_i_; +text: .text%__1cNtestU_regNodePoper_input_base6kM_I_; +text: .text%__1cWemit_exception_handler6FrnKCodeBuffer__v_; +text: .text%__1cWsize_exception_handler6F_I_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cNPhasePeepholeMdo_transform6M_v_; +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_; +text: .text%__1cMPhaseChaitin2T6M_v_; +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_; +text: .text%__1cKCodeBufferOrelocate_stubs6M_v_; +text: .text%__1cIPhaseCFGLRemoveEmpty6M_v_; +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o; +text: .text%__1cHCompileMBuildOopMaps6M_v_; +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_; +text: .text%__1cNGrowableArray4CpnJNode_List__2t6Mii_v_; +text: .text%__1cRsarL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cFStateM_sub_Op_CmpL6MpknENode__v_; +text: .text%__1cJloadSNodeFreloc6kM_i_; +text: .text%__1cFStateN_sub_Op_LoadS6MpknENode__v_; +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_; +text: .text%__1cOCompiledRFrame2t6MnFframe_pnKJavaThread_pnGRFrame__v_; +text: .text%__1cKC2IAdapterOis_c2i_adapter6kM_i_; +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__; +text: .text%__1cOCompiledRFrameLis_compiled6kM_i_; +text: .text%__1cRmethodDataOopDescKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_bytes6FpnNmethodOopDesc__i_; +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cNxorI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_words6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescPpost_initialize6MpnOBytecodeStream__v_; +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateO_sub_Op_Return6MpknENode__v_; +text: .text%__1cHRetNodeFreloc6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_base6MnITosState_ppCi_v_; +text: .text%__1cZCallInterpreterDirectNodeHtwo_adr6kM_I_; +text: .text%__1cNloadConP0NodeFclone6kM_pnENode__; +text: .text%__1cOClearArrayNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_; +text: .text%__1cOcompiledVFrameScreate_stack_value6kMpnKScopeValue__pnKStackValue__; +text: .text%__1cQleaPIdxScaleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRindIndexScaleOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cIGraphKitNallocate_heap6MpnENode_222pknITypeFunc_pC22ipknKTypeOopPtr__2_; +text: .text%__1cPciInstanceKlassbBcompute_shared_has_subklass6M_i_; +text: .text%__1cNSignatureInfoHdo_byte6M_v_; +text: .text%__1cQorI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cNIdealLoopTreePiteration_split6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNandI_rRegNodeErule6kM_I_; +text: .text%__1cRsarI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cIMulINodeGadd_id6kM_pknEType__; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmodI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_; +text: .text%__1cHBitDataKis_BitData6M_i_; +text: .text%__1cQsalI_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cNaddP_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEcmpq6MnHAddress_i_v_; +text: .text%__1cNloadConP0NodeFreloc6kM_i_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__; +text: .text%__1cOMacroAssemblerKincrementq6MpnMRegisterImpl_i_v_; +text: .text%__1cRsarI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cNGrowableArray4nMmethodHandle__2t6Mii_v_; +text: .text%__1cLConvL2INodeJideal_reg6kM_I_; +text: .text%__1cNGrowableArray4nLKlassHandle__2t6Mii_v_; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%JVM_GetClassNameUTF; +text: .text%__1cMPrefetchNodeJideal_reg6kM_I_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cNprefetchwNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateQ_sub_Op_Prefetch6MpknENode__v_; +text: .text%__1cOjmpLoopEndNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNprefetchwNodeFreloc6kM_i_; +text: .text%__1cIAddLNodeJideal_reg6kM_I_; +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cKstoreCNodeFreloc6kM_i_; +text: .text%__1cNdecI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Ml_v_; +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_; +text: .text%__1cHi2bNodeMideal_Opcode6kM_i_; +text: .text%__1cNLocationValueLis_location6kM_i_; +text: .text%__1cNLocationValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cIMulLNodeJideal_reg6kM_I_; +text: .text%__1cNsubL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cFStateN_sub_Op_LoadB6MpknENode__v_; +text: .text%__1cNnegI_rRegNodeErule6kM_I_; +text: .text%__1cNFingerprinterJdo_double6M_v_; +text: .text%JVM_FindClassFromClass; +text: .text%__1cKcmpOpUOperEless6kM_i_; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cITypeLongFwiden6kMpknEType__3_; +text: .text%__1cQsalI_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cKReflectionDbox6FpnGjvalue_nJBasicType_pnGThread__pnHoopDesc__; +text: .text%__1cMLinkResolverbHlinktime_resolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cLBoxLockNodeEhash6kM_I_; +text: .text%__1cJOopMapSetMgrow_om_data6M_v_; +text: .text%__1cRxorI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cKciTypeFlowFBlockQset_private_copy6Mi_v_; +text: .text%__1cWandI_rReg_imm65535NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWandI_rReg_imm65535NodeErule6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_; +text: .text%__1cNcmovI_regNodeErule6kM_I_; +text: .text%__1cRsalL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNGrowableArray4CpnKInlineTree__Egrow6Mi_v_; +text: .text%__1cSComputeAdapterInfoIdo_short6M_v_; +text: .text%__1cNtestL_regNodeJnum_opnds6kM_I_; +text: .text%__1cLConvF2DNodeGOpcode6kM_i_; +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__; +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNaddP_rRegNodeLbottom_type6kM_pknEType__; +text: .text%__1cNmodL_rRegNodeErule6kM_I_; +text: .text%__1cRsalI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerDret6Mi_v_; +text: .text%__1cRshrI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_; +text: .text%JVM_IHashCode; +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cNxorI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLMachUEPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__; +text: .text%__1cNtestL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadFNodePoper_input_base6kM_I_; +text: .text%__1cHRetDataKcell_count6M_i_; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_; +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_; +text: .text%__1cMdecI_memNodePoper_input_base6kM_I_; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIPSOldGenHcompact6M_v_; +text: .text%__1cSsafePoint_pollNodeJnum_opnds6kM_I_; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cQObjectStartArrayFreset6M_v_; +text: .text%__1cIPSOldGenPadjust_pointers6M_v_; +text: .text%__1cScompP_mem_rRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJloadBNodeFreloc6kM_i_; +text: .text%__1cUandI_rReg_imm255NodeMideal_Opcode6kM_i_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowFBlock__Icontains6kMrk2_i_; +text: .text%__1cScompP_mem_rRegNodeFreloc6kM_i_; +text: .text%__1cNcmovP_regNodePoper_input_base6kM_I_; +text: .text%__1cTno_rax_rdx_RegIOperJnum_edges6kM_I_; +text: .text%__1cKciTypeFlowLStateVectorJdo_aaload6MpnQciBytecodeStream__v_; +text: .text%__1cJAssemblerMemit_operand6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnRFloatRegisterImpl_pnMRegisterImpl_4nHAddressLScaleFactor_ipCrknQRelocationHolder__v_; +text: .text%__1cNaddL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRethrowNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRaddI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsubI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cIModLNodeGOpcode6kM_i_; +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__; +text: .text%__1cFParseMdo_checkcast6M_v_; +text: .text%__1cIMulINodeGmul_id6kM_pknEType__; +text: .text%__1cMloadConINodeGis_Con6kM_I_; +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIMulDNodeGOpcode6kM_i_; +text: .text%__1cRsarL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNsubL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_; +text: .text%__1cTconvI2L_reg_memNodeFreloc6kM_i_; +text: .text%__1cSComputeAdapterInfoIdo_float6M_v_; +text: .text%__1cFParseLarray_store6MnJBasicType__v_; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc_ii_v_: nativeLookup.o; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%__1cZCallInterpreterDirectNodeSalignment_required6kM_i_; +text: .text%__1cZCallInterpreterDirectNodePoper_input_base6kM_I_; +text: .text%__1cZCallInterpreterDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRmulL_rReg_immNodeMcisc_operand6kM_i_; +text: .text%__1cNloadConI0NodeGis_Con6kM_I_; +text: .text%__1cKstoreBNodeHtwo_adr6kM_I_; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc__v_: nativeLookup.o; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_; +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o; +text: .text%__1cPsalI_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvI2L_reg_memNodeHtwo_adr6kM_I_; +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__; +text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_; +text: .text%__1cFStateM_sub_Op_AddL6MpknENode__v_; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cUmembar_cpu_orderNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUmembar_cpu_orderNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSCompareAndSwapNodeJideal_reg6kM_I_; +text: .text%__1cFStateW_sub_Op_MemBarCPUOrder6MpknENode__v_; +text: .text%__1cKciTypeFlowLStateVectorMdo_checkcast6MpnQciBytecodeStream__v_; +text: .text%__1cMorI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMrax_RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cNmodL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cMincI_memNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPshrI_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cRmulI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cNandI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbACallCompiledJavaDirectNodeHtwo_adr6kM_I_; +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cbLtransform_int_divide_to_long_multiply6FpnIPhaseGVN_pnENode_i_3_: divnode.o; +text: .text%__1cTno_rax_rdx_RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJAssemblerGmovzwl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cRmulL_rReg_immNodeErule6kM_I_; +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_; +text: .text%__1cHTypePtrFempty6kM_i_; +text: .text%__1cOMacroAssemblerSload_unsigned_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cOGenerateOopMapXdo_return_monitor_check6M_v_; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_; +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__; +text: .text%__1cIGraphKitMnext_monitor6M_i_; +text: .text%__1cLBoxLockNode2t6Mi_v_; +text: .text%__1cRmulI_rReg_immNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cJloadFNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cIplus_adr6FpnENode_l_1_: generateOptoStub.o; +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__; +text: .text%__1cHConNode2t6MpknEType__v_; +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_; +text: .text%__1cNCompileBrokerTcreate_compile_task6FpnMCompileQdDueue_inMmethodHandle_i3ipkcii_pnLCompileTask__; +text: .text%__1cLCompileTaskKinitialize6MinMmethodHandle_i1ipkcii_v_; +text: .text%__1cNCompileBrokerNallocate_task6F_pnLCompileTask__; +text: .text%__1cMCompileQdDueueDadd6MpnLCompileTask__v_; +text: .text%__1cRxorI_rReg_memNodeErule6kM_I_; +text: .text%__1cMCompileQdDueueDget6M_pnLCompileTask__; +text: .text%__1cRsarI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQleaPIdxScaleNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cSCompileTaskWrapper2t6MpnLCompileTask__v_; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_MulL6MpknENode__v_; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_i_v_; +text: .text%__1cObox_handleNodePoper_input_base6kM_I_; +text: .text%__1cNCompileBrokerJfree_task6FpnLCompileTask__v_; +text: .text%__1cSCompileTaskWrapper2T6M_v_; +text: .text%__1cLCompileTaskEfree6M_v_; +text: .text%__1cNnegI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cMincI_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRandL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cRaddI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%jni_NewString: jni.o; +text: .text%__1cRxorI_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_; +text: .text%__1cKloadUBNodePoper_input_base6kM_I_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassXoop_is_compiledICHolder6kM_i_; +text: .text%__1cJStoreNodeUdepends_only_on_test6kM_i_; +text: .text%__1cPcmovI_reg_lNodeErule6kM_I_; +text: .text%__1cOloadConL32NodePoper_input_base6kM_I_; +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_; +text: .text%__1cRtestI_reg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIimmDOperJconstantD6kM_d_; +text: .text%__1cFParsePmerge_exception6Mi_v_; +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNGrowableArray4CpnIciObject__2t6MpnFArena_iirk1_v_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2pnGThread__v_; +text: .text%__1cZCallDynamicJavaDirectNodeHtwo_adr6kM_I_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverWresolve_interface_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cNGrowableArray4CpnIciObject__JappendAll6Mpk2_v_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cRtestP_reg_memNodeFreloc6kM_i_; +text: .text%__1cNtestP_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNGrowableArray4CpnIciMethod__2t6MpnFArena_iirk1_v_; +text: .text%__1cNGrowableArray4CpnHciKlass__2t6MpnFArena_iirk1_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cNGrowableArray4CpnPciReturnAddress__2t6MpnFArena_iirk1_v_; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cNCompileBrokerVpush_jni_handle_block6F_v_; +text: .text%__1cIJVMStateNmonitor_depth6kM_i_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNCompileBrokerUpop_jni_handle_block6F_v_; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateQ_sub_Op_FastLock6MpknENode__v_; +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvD2I_reg_regNodeErule6kM_I_; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cLOptoRuntimeRmultianewarray1_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cWImplicitExceptionTableCat6kMI_I_; +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_; +text: .text%__1cLVtableStubsPstub_containing6FpC_pnKVtableStub__; +text: .text%__1cLVtableStubsIcontains6FpC_i_; +text: .text%__1cNFingerprinterIdo_float6M_v_; +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_; +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUjmpLoopEnd_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNmodI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cUjmpLoopEnd_shortNodeMideal_Opcode6kM_i_; +text: .text%__1cKEntryPoint2t6MpC11111111_v_; +text: .text%jni_GetObjectClass: jni.o; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cRandI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_; +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cOloadConL32NodeHtwo_adr6kM_I_; +text: .text%__1cQshrI_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_; +text: .text%__1cNcmovI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdecI_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMrax_RegLOperEtype6kM_pknEType__; +text: .text%__1cRmulI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cIXorINodeGadd_id6kM_pknEType__; +text: .text%__1cNtestP_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmovI_reg_gNodeHtwo_adr6kM_I_; +text: .text%__1cOPhaseIdealLoopKclone_loop6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cHi2bNodePoper_input_base6kM_I_; +text: .text%__1cRsalL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cKBinaryNodeGOpcode6kM_i_; +text: .text%__1cNxorI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_; +text: .text%JVM_GetClassLoader; +text: .text%__1cMstoreSSPNodeMideal_Opcode6kM_i_; +text: .text%__1cNmulL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRxorI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cINodeHashIround_up6FI_I_; +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_; +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_; +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_; +text: .text%__1cRaddP_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cINodeHash2t6MpnFArena_I_v_; +text: .text%__1cRaddI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cIJVMState2n6FL_pv_; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cOleaPIdxOffNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cMdecI_memNodeJnum_opnds6kM_I_; +text: .text%__1cIModINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_; +text: .text%__1cSInterpreterCodeletKinitialize6MpkcnJBytecodesECode__v_; +text: .text%__1cTconvI2L_reg_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNxorI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNaddP_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cOloadConL32NodeErule6kM_I_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cFframeVnmethods_code_blob_do6M_v_; +text: .text%__1cHi2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKcmpOpUOperKless_equal6kM_i_; +text: .text%__1cWandI_rReg_imm65535NodeJnum_opnds6kM_I_; +text: .text%__1cFParseTprofile_switch_case6Mi_v_; +text: .text%__1cKNativeJumpbEcheck_verified_entry_alignment6FpC1_v_; +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_; +text: .text%__1cFParseOmerge_new_path6Mi_v_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_; +text: .text%__1cNandI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cNmodL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cMloadConLNodeFclone6kM_pnENode__; +text: .text%__1cNtestU_regNodeJnum_opnds6kM_I_; +text: .text%__1cIimmLOperFclone6kM_pnIMachOper__; +text: .text%__1cRandL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cOleaPIdxOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cNCompileBrokerTis_not_compile_only6FnMmethodHandle__i_; +text: .text%__1cNCompileBrokerRassign_compile_id6FnMmethodHandle_i_I_; +text: .text%__1cNCompileBrokerTis_compile_blocking6FnMmethodHandle_i_i_; +text: .text%__1cIMulFNodeGOpcode6kM_i_; +text: .text%__1cNIdealLoopTreeQpolicy_peel_only6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeSpolicy_range_check6kMpnOPhaseIdealLoop__i_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cNIdealLoopTreeMpolicy_align6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeNpolicy_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNtestU_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cKC2CompilerOneeds_adapters6M_i_; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cIciMethodVshould_print_assembly6M_i_; +text: .text%__1cOMacroAssemblerOcall_VM_helper6MpnMRegisterImpl_pCii_v_; +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cMincI_memNodeJnum_opnds6kM_I_; +text: .text%__1cNCompileBrokerOcheck_break_at6FnMmethodHandle_iii_i_; +text: .text%__1cJAssemblerEcall6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cNCompileBrokerbAeager_compile_c2i_adapters6FpnFciEnv_pnIciMethod__v_; +text: .text%__1cNCompileBrokerbAeager_compile_i2c_adapters6FpnFciEnv_pnIciMethod__v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cMstoreSSPNodeHis_Copy6kM_I_; +text: .text%__1cQshrI_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cIciMethodQbreak_at_execute6M_i_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cFciEnvbUsystem_dictionary_modification_counter_changed6M_i_; +text: .text%__1cMelapsedTimerDadd6M0_v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cJStartNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cICodeHeapMinsert_after6MpnJFreeBlock_2_v_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%__1cKloadUBNodeErule6kM_I_; +text: .text%__1cQsalL_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cbACallCompiledJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbACallCompiledJavaDirectNodePoper_input_base6kM_I_; +text: .text%__1cTbasictype2arraycopy6FnJBasicType_i_pC_; +text: .text%__1cOLibraryCallKitQinline_arraycopy6M_i_; +text: .text%__1cPstoreImmI16NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLOptoRuntimeOarraycopy_Type6F_pknITypeFunc__; +text: .text%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cLPcDescCache2t6M_v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cRmulL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnQAbstractCompiler__p0_; +text: .text%__1cPcmovI_reg_lNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmFlagsFclear6M_v_; +text: .text%__1cHnmethod2n6FLi_pv_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnQAbstractCompiler__v_; +text: .text%__1cNaddI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cFStateN_sub_Op_LoadC6MpknENode__v_; +text: .text%__1cJloadCNodeFreloc6kM_i_; +text: .text%__1cFParseQjump_if_fork_int6MpnENode_2nIBoolTestEmask__pnGIfNode__; +text: .text%__1cWandI_rReg_imm65535NodeHtwo_adr6kM_I_; +text: .text%__1cNdivL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cINodeHashUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod_ii_v_; +text: .text%__1cHCompileWprint_compile_messages6M_v_; +text: .text%__1cPClassFileParserbGparse_constant_pool_double_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cQsalI_rReg_CLNodeErule6kM_I_; +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__; +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cIciMethodRbuild_method_data6M_v_; +text: .text%__1cHCompileIOptimize6M_v_; +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_; +text: .text%__1cHCompileLFinish_Warm6M_v_; +text: .text%__1cHCompileLInline_Warm6M_i_; +text: .text%__1cPno_rax_RegLOperJnum_edges6kM_I_; +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1_v_; +text: .text%__1cIciMethodRbuild_method_data6MnMmethodHandle__v_; +text: .text%__1cSstring_compareNodeErule6kM_I_; +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cHCompileVfinal_graph_reshaping6M_i_; +text: .text%__1cOcompI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cScompI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_; +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cMPhaseIterGVN2t6Mp0_v_; +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_; +text: .text%__1cIPhaseCCP2T6M_v_; +text: .text%__1cIPhaseCCPHanalyze6M_v_; +text: .text%__1cIPhaseCCPMdo_transform6M_v_; +text: .text%__1cOcompI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNsubL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConPcNodeMideal_Opcode6kM_i_; +text: .text%__1cKExceptionsG_throw6FpnGThread_pkcinGHandle__v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinGHandle__i_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cNandL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cQsalI_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__; +text: .text%__1cPsalL_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinMsymbolHandle_4_i_; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHMatcherbDinterpreter_frame_pointer_reg6F_i_; +text: .text%__1cQorI_rReg_immNodeErule6kM_I_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cKcmpOpUOperHgreater6kM_i_; +text: .text%__1cNCompileBrokerUcheck_adapter_result6FnMmethodHandle_ippnMBasicAdapter__i_; +text: .text%__1cJloadFNodeJnum_opnds6kM_I_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cSMachC2IEntriesNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cbFunnecessary_membar_volatileNodePoper_input_base6kM_I_; +text: .text%__1cRmulI_rReg_immNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQPSIsAliveClosureLdo_object_b6MpnHoopDesc__i_; +text: .text%__1cTCallInterpreterNodeSis_CallInterpreter6kM_pk0_; +text: .text%__1cZCallInterpreterDirectNodePcompute_padding6kMi_i_; +text: .text%__1cSMachC2IcheckICNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cZCallInterpreterDirectNodeKmethod_set6Ml_v_; +text: .text%__1cXMachCallInterpreterNodePret_addr_offset6M_i_; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cLOptoRuntimeInew_Type6F_pknITypeFunc__; +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_; +text: .text%__1cMTailCallNodeGOpcode6kM_i_; +text: .text%__1cJChunkPoolMfree_all_but6ML_v_; +text: .text%__1cIGraphKitMnew_instance6MpnPciInstanceKlass__pnENode__; +text: .text%__1cPcmpD_cc_regNodePoper_input_base6kM_I_; +text: .text%__1cRsalL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cLOptoRuntimeSnew_typeArray_Type6F_pknITypeFunc__; +text: .text%__1cObox_handleNodeMideal_Opcode6kM_i_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cIGraphKitJnew_array6MpnENode_nJBasicType_pknEType_pknMTypeKlassPtr__2_; +text: .text%__1cNdecL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOemit_d64_reloc6FrnKCodeBuffer_lrknQRelocationHolder_i_v_; +text: .text%__1cRtestI_reg_immNodeErule6kM_I_; +text: .text%__1cIAddFNodeGOpcode6kM_i_; +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cKloadUBNodeJnum_opnds6kM_I_; +text: .text%__1cNGrowableArray4CpnHoopDesc__2t6Mii_v_; +text: .text%__1cZCallDynamicJavaDirectNodeSalignment_required6kM_i_; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__; +text: .text%__1cQorI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUParallelScavengeHeapIcapacity6kM_L_; +text: .text%__1cJAssemblerEcmpq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNnegI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cPDictionaryEntryVadd_protection_domain6MpnHoopDesc__v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cRCardTableModRefBSPclear_MemRegion6MnJMemRegion__v_; +text: .text%__1cOleaPIdxOffNodeLbottom_type6kM_pknEType__; +text: .text%__1cNdivL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%JVM_IsInterrupted; +text: .text%__1cFParseRarray_store_check6M_v_; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cScompL_rReg_immNodeMideal_Opcode6kM_i_; +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_; +text: .text%__1cTCallDynamicJavaNodeSis_CallDynamicJava6kM_pk0_; +text: .text%__1cCosHSolarisFEventEpark6M_v_; +text: .text%__1cIMinINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cODeoptimizationYtrap_state_is_recompiled6Fi_i_; +text: .text%__1cXSignatureHandlerLibraryKinitialize6F_v_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cNGrowableArray4CL_Efind6kMrkL_i_; +text: .text%__1cUandI_rReg_imm255NodePoper_input_base6kM_I_; +text: .text%__1cSReferenceProcessorZadd_to_discovered_list_mt6MppnHoopDesc_23_v_; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii_v_; +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cNObjectMonitorbAEntryQdDueue_SelectSuccessor6M_pnMObjectWaiter__; +text: .text%__1cNObjectMonitorREntryQdDueue_insert6MpnMObjectWaiter_i_v_; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cJAssemblerEpopq6MpnMRegisterImpl__v_; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cISubLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMTypeKlassPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cNloadConI0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSComputeAdapterInfoHdo_char6M_v_; +text: .text%__1cKPerfMemoryFalloc6FL_pc_; +text: .text%__1cIPerfData2T6M_v_; +text: .text%__1cIPerfDataMcreate_entry6MnJBasicType_LL_v_; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cPcmovI_reg_gNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEfrom6F_pnMRegisterImpl__; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_; +text: .text%__1cOjmpLoopEndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOjmpLoopEndNodeOis_pc_relative6kM_i_; +text: .text%__1cOjmpLoopEndNodeTmay_be_short_branch6kM_i_; +text: .text%jni_ReleaseStringUTFChars: jni.o; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cFStateW_sub_Op_CountedLoopEnd6MpknENode__v_; +text: .text%__1cNFingerprinterIdo_short6M_v_; +text: .text%__1cOcompU_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cJAssemblerEincq6MpnMRegisterImpl__v_; +text: .text%__1cFTypeDEmake6Fd_pk0_; +text: .text%__1cScompU_rReg_memNodeFreloc6kM_i_; +text: .text%__1cNtestL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_; +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cWCallLeafNoFPDirectNodeFreloc6kM_i_; +text: .text%__1cSstring_compareNodeJnum_opnds6kM_I_; +text: .text%__1cFStateU_sub_Op_CallLeafNoFP6MpknENode__v_; +text: .text%JVM_FindLibraryEntry; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cISubLNodeGadd_id6kM_pknEType__; +text: .text%__1cNmodI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMatcherOc_return_value6Fii_nLOptoRegPair__; +text: .text%__1cRxorI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQsarL_rReg_63NodeMideal_Opcode6kM_i_; +text: .text%__1cRmulI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cIMachOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cNandI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNnegI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cFStateS_sub_Op_ClearArray6MpknENode__v_; +text: .text%__1cRaddL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cHCompile2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cIXorINodeJideal_reg6kM_I_; +text: .text%__1cMrep_stosNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRtestI_reg_immNodePoper_input_base6kM_I_; +text: .text%__1cKC2CompilerPcompile_adapter6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cMrep_stosNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMAdapterCacheGinsert6MpnLAdapterInfo_pnMBasicAdapter__v_; +text: .text%__1cFStateO_sub_Op_StoreL6MpknENode__v_; +text: .text%__1cLAdapterInfoHcopy_to6Mp0_v_; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__; +text: .text%__1cIMinINodeGadd_id6kM_pknEType__; +text: .text%__1cNdecL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGThreadLnmethods_do6M_v_; +text: .text%__1cKmul_hiNodeMideal_Opcode6kM_i_; +text: .text%__1cKstoreLNodeFreloc6kM_i_; +text: .text%__1cMstoreSSPNodeLbottom_type6kM_pknEType__; +text: .text%__1cRsubI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cPsarL_rReg_2NodeMideal_Opcode6kM_i_; +text: .text%__1cTconvF2D_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRmulL_rReg_immNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNmodL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cRmulL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeHtwo_adr6kM_I_; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cScompU_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRInterpreterOopMapIis_empty6M_i_; +text: .text%__1cNFingerprinterHdo_char6M_v_; +text: .text%__1cOrepush_if_args6FpnFParse_pnENode_3_v_: parse2.o; +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__; +text: .text%__1cNGrowableArray4CpnHoopDesc__Uclear_and_deallocate6M_v_; +text: .text%__1cMrdx_RegLOperJnum_edges6kM_I_; +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__; +text: .text%__1cNGrowableArray4CpnJNode_List__Egrow6Mi_v_; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure__i_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cNGrowableArray4CpnFKlass__2t6Mii_v_; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure__i_; +text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_; +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MLnHGCCauseFCause__v_; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cNGrowableArray4CpnFKlass__Uclear_and_deallocate6M_v_; +text: .text%__1cKPSYoungGenHcompact6M_v_; +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_; +text: .text%__1cKPSYoungGenKprecompact6M_v_; +text: .text%__1cLPSMarkSweepQinvoke_no_policy6Fpii_v_; +text: .text%__1cLPSMarkSweepPallocate_stacks6F_v_; +text: .text%__1cLPSMarkSweepRdeallocate_stacks6F_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase16Fi_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase26F_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase36F_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase46F_v_; +text: .text%__1cLPSMarkSweepbAreset_millis_since_last_gc6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_; +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_; +text: .text%__1cIPSOldGenKprecompact6M_v_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cRCardTableModRefBSEis_a6MnKBarrierSetEName__i_; +text: .text%__1cJPSPermGenQcompute_new_size6ML_v_; +text: .text%__1cJPSPermGenKprecompact6M_v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_i_v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cPcmpD_cc_regNodeHtwo_adr6kM_I_; +text: .text%__1cbFunnecessary_membar_volatileNodeHtwo_adr6kM_I_; +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIciSymbolHas_utf86M_pkc_; +text: .text%__1cQorI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cKJavaThreadLnmethods_do6M_v_; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_; +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cScompL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cNandI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cPstoreImmI16NodeFreloc6kM_i_; +text: .text%__1cPstoreImmI16NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cVlookup_special_native6Fpc_pC_: nativeLookup.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cMNativeLookupMlookup_style6FnMmethodHandle_pcpkciiripnGThread__pC_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%jni_GetStringCritical: jni.o; +text: .text%__1cSInterpreterRuntimeTnmethod_entry_point6FpnKJavaThread_pnNmethodOopDesc_pnHnmethod__pC_; +text: .text%__1cIPSOldGenOgen_size_limit6M_L_; +text: .text%__1cTI2CAdapterGeneratorSstd_verified_entry6FnMmethodHandle__pC_; +text: .text%__1cTI2CAdapterGeneratorUgenerate_i2c_adapter6FnMmethodHandle__pnKI2CAdapter__; +text: .text%__1cUPSAdaptiveSizePolicybQpromo_increment_with_supplement_aligned_up6ML_L_; +text: .text%__1cHnmethodXinterpreter_entry_point6M_pC_; +text: .text%__1cUParallelScavengeHeapOresize_old_gen6ML_v_; +text: .text%__1cUPSAdaptiveSizePolicyPpromo_increment6MLI_L_; +text: .text%__1cWandI_rReg_imm65535NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIPSOldGenGresize6ML_v_; +text: .text%__1cKConv2BNodeGOpcode6kM_i_; +text: .text%__1cObox_handleNodeHtwo_adr6kM_I_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cNstoreImmINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cLConvI2FNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNcmovI_regNodeHtwo_adr6kM_I_; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cIGraphKitbKcombine_and_pop_all_exception_states6M_pnNSafePointNode__; +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_; +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cKReflectionTget_exception_types6FnMmethodHandle_pnGThread__nOobjArrayHandle__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_i_v_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cFStateP_sub_Op_Rethrow6MpknENode__v_; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_; +text: .text%__1cQsalL_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cNdecL_rRegNodeErule6kM_I_; +text: .text%__1cLRethrowNodeJideal_reg6kM_I_; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNstoreImmINodeFreloc6kM_i_; +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMNativeLookupNpure_jni_name6FnMmethodHandle__pc_; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cURethrowExceptionNodeFreloc6kM_i_; +text: .text%__1cTCompareAndSwapLNode2t6MpnENode_2222_v_; +text: .text%__1cQshrI_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cSCompareAndSwapNode2t6MpnENode_2222_v_; +text: .text%__1cTcompareAndSwapLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTcompareAndSwapLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOLibraryCallKitRinline_unsafe_CAS6MnJBasicType__i_; +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateX_sub_Op_CompareAndSwapL6MpknENode__v_; +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_; +text: .text%__1cTcompareAndSwapLNodeFreloc6kM_i_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cOcompP_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPsarI_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cMmulD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMaddF_regNodePoper_input_base6kM_I_; +text: .text%__1cPcmpD_cc_regNodeMideal_Opcode6kM_i_; +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_; +text: .text%__1cbACallCompiledJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cYMachCallCompiledJavaNodePret_addr_offset6M_i_; +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKciTypeFlowLStateVectorEtrap6MpnQciBytecodeStream_pnHciKlass_i_v_; +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cJloadFNodeHtwo_adr6kM_I_; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cFframeZinterpreter_frame_set_mdx6Ml_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cQorI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cPClassFileParserbFparse_constant_pool_float_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRmulL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cQciBytecodeStreamFtable6MnJBytecodesECode__2_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cRxorI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cNmulI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_; +text: .text%__1cISubLNodeJideal_reg6kM_I_; +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__; +text: .text%__1cQOopMapCacheEntryFflush6M_v_; +text: .text%__1cQOopMapCacheEntryRallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryTdeallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cRsalL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPsalL_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cOstackSlotPOperJnum_edges6kM_I_; +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_; +text: .text%__1cFParseXfetch_interpreter_state6MipknEType_pnENode__5_; +text: .text%__1cObox_handleNodeJnum_opnds6kM_I_; +text: .text%__1cNaddP_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cSComputeAdapterInfoHdo_byte6M_v_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cOGenerateOopMapKpp_new_ref6MpnNCellTypeState_i_v_; +text: .text%__1cNcmovI_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cTOopMapForCacheEntry2t6MnMmethodHandle_ipnQOopMapCacheEntry__v_; +text: .text%__1cPcmpD_cc_immNodeMideal_Opcode6kM_i_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_; +text: .text%__1cTconvF2D_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cNdivL_rRegNodeErule6kM_I_; +text: .text%__1cRmulL_rReg_immNodeQuse_cisc_RegMask6M_v_; +text: .text%JVM_GetCallerClass; +text: .text%__1cQsalL_rReg_CLNodeErule6kM_I_; +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNloadConP0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_; +text: .text%__1cRxorI_rReg_immNodeErule6kM_I_; +text: .text%__1cZCallDynamicJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cFParseScreate_jump_tables6MpnENode_pnLSwitchRange_4_i_; +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Ml_v_; +text: .text%__1cPcmovI_reg_lNodeHtwo_adr6kM_I_; +text: .text%__1cLConvD2INodeGOpcode6kM_i_; +text: .text%__1cNcmovP_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWPredictedCallGeneratorJis_inline6kM_i_; +text: .text%__1cUjmpLoopEnd_shortNodeJis_Branch6kM_I_; +text: .text%__1cQorI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cUjmpLoopEnd_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_; +text: .text%__1cbFloadConL_0x6666666666666667NodeMideal_Opcode6kM_i_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl_i_v_; +text: .text%__1cOjmpLoopEndNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cUjmpLoopEnd_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPCountedLoopNodeGstride6kM_pnENode__; +text: .text%__1cHi2bNodeJnum_opnds6kM_I_; +text: .text%__1cHTypeAryFxdual6kM_pknEType__; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cKstoreFNodeHtwo_adr6kM_I_; +text: .text%__1cNnegI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLencode_copy6FrnKCodeBuffer_ii_v_; +text: .text%__1cbBconvI2L_reg_reg_reg_zexNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2F_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cUCallNativeDirectNodeMideal_Opcode6kM_i_; +text: .text%__1cKmul_hiNodePoper_input_base6kM_I_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cPcmpD_cc_regNodeMcisc_operand6kM_i_; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_; +text: .text%__1cPcmpD_cc_regNodeErule6kM_I_; +text: .text%__1cNtestU_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSPromotionLABRunallocate_object6MpnHoopDesc__i_; +text: .text%__1cPcmpD_cc_immNodeHtwo_adr6kM_I_; +text: .text%__1cOPhaseIdealLoopJdo_unroll6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cRandL_rReg_immNodeErule6kM_I_; +text: .text%__1cNloadConP0NodeGis_Con6kM_I_; +text: .text%__1cIMulINodeKmul_opcode6kM_i_; +text: .text%__1cNdivL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIMulINodeKadd_opcode6kM_i_; +text: .text%__1cRxorI_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPregister_native6FnLKlassHandle_nMsymbolHandle_1pCpnGThread__i_: jni.o; +text: .text%__1cTno_rax_rdx_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOtypeArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cTno_rax_rdx_RegLOperJnum_edges6kM_I_; +text: .text%__1cOCallNativeNodeGOpcode6kM_i_; +text: .text%__1cQsalI_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__; +text: .text%__1cRxorI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cGHandle2t6MpnGThread_pnHoopDesc__v_; +text: .text%__1cQorI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_; +text: .text%__1cOloadConL32NodeMideal_Opcode6kM_i_; +text: .text%__1cNGrowableArray4Cpv_Egrow6Mi_v_; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cNGrowableArray4Cl_Egrow6Mi_v_; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cLResourceObj2n6FLn0APallocation_type__pv_; +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_; +text: .text%__1cJLoadPNodeUdepends_only_on_test6kM_i_; +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cScompL_rReg_immNodeErule6kM_I_; +text: .text%__1cQshrI_rReg_CLNodeErule6kM_I_; +text: .text%__1cNaddL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateT_sub_Op_ThreadLocal6MpknENode__v_; +text: .text%__1cVCallRuntimeDirectNodeHtwo_adr6kM_I_; +text: .text%__1cKciTypeFlowOsplit_range_at6Mi_pn0AFRange__; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cXjvm_define_class_common6FpnHJNIEnv__pkcpnI_jobject_pkWi53pnGThread__pnH_jclass__: jvm.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcinMsymbolHandle_4nGHandle_6_v_; +text: .text%__1cMmulD_immNodePoper_input_base6kM_I_; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cMmulF_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNGrowableArray4CpnKStackValue__2t6Mii_v_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cRandL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCi_v_; +text: .text%__1cUandI_rReg_imm255NodeErule6kM_I_; +text: .text%__1cRmulL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPsarL_rReg_2NodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerGpushaq6M_v_; +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_; +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_; +text: .text%__1cMrsi_RegPOperJnum_edges6kM_I_; +text: .text%__1cMstoreSSPNodePoper_input_base6kM_I_; +text: .text%__1cScompL_rReg_immNodePoper_input_base6kM_I_; +text: .text%__1cKCodeBufferWinsert_double_constant6Md_pC_; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNaddP_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseHdo_irem6M_v_; +text: .text%__1cRsarL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_2_v_; +text: .text%__1cHi2bNodeHtwo_adr6kM_I_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cJAssemblerFmovsd6MnHAddress_pnRFloatRegisterImpl__v_; +text: .text%__1cTCallInterpreterNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cXMachCallInterpreterNodeWis_MachCallInterpreter6M_p0_; +text: .text%__1cFStateX_sub_Op_CallInterpreter6MpknENode__v_; +text: .text%__1cZCallInterpreterDirectNodeFreloc6kM_i_; +text: .text%__1cMStartC2INodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cMStartC2INodeKc2i_domain6FpknJTypeTuple__3_; +text: .text%__1cHCompilebMGenerate_Compiled_To_Interpreter_Graph6MpknITypeFunc_pC_v_; +text: .text%__1cZCallInterpreterDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFciEnvUregister_c2i_adapter6MpnIciMethod_pnJOopMapSet_pnKCodeBuffer_ii_v_; +text: .text%__1cSMachC2IcheckICNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSMachC2IEntriesNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHMatcherbAinterpreter_method_oop_reg6F_i_; +text: .text%__1cHMatcherXcompiler_method_oop_reg6F_i_; +text: .text%__1cIciMethodRinterpreter_entry6M_pC_; +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpD_cc_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cSTailCalljmpIndNodeGpinned6kM_i_; +text: .text%__1cSTailCalljmpIndNodeHtwo_adr6kM_I_; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cHBoxNodeGOpcode6kM_i_; +text: .text%__1cPcmpD_cc_regNodeJnum_opnds6kM_I_; +text: .text%__1cKC2IAdapter2t6MpnKCodeBuffer_iIpnJOopMapSet_i_v_; +text: .text%__1cKC2IAdapterPnew_c2i_adapter6FpnKCodeBuffer_IpnJOopMapSet_i_p0_; +text: .text%__1cKC2IAdapter2n6FLI_pv_; +text: .text%__1cJAssemblerFmovss6MnHAddress_pnRFloatRegisterImpl__v_; +text: .text%__1cIMulINodeJideal_reg6kM_I_; +text: .text%__1cKCMovePNodeGOpcode6kM_i_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF_vc_v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cFParseTjump_if_always_fork6Mii_v_; +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMmulF_immNodePoper_input_base6kM_I_; +text: .text%__1cPcmpD_cc_immNodePoper_input_base6kM_I_; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cJAssemblerDhlt6M_v_; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cQComputeCallStackIdo_float6M_v_; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_; +text: .text%__1cKciTypeFlowLStateVectorLdo_newarray6MpnQciBytecodeStream__v_; +text: .text%__1cWResolveOopMapConflictsRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmovI_reg_lNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cNFingerprinterHdo_byte6M_v_; +text: .text%__1cENode2t6Mp0111111_v_; +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cMmulD_immNodeErule6kM_I_; +text: .text%__1cMnegD_regNodePoper_input_base6kM_I_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cMaddF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cKmul_hiNodeJnum_opnds6kM_I_; +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRxorI_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cNsubI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_2_v_; +text: .text%__1cHMatcherXinterpreter_arg_ptr_reg6F_i_; +text: .text%__1cINegDNodeGOpcode6kM_i_; +text: .text%__1cNdecL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cRsarI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cNcmovP_regNodeJnum_opnds6kM_I_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeVresolve_static_call_C6FpnKJavaThread__pC_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cMrsi_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cTAbstractInterpreterLdeopt_entry6FnITosState_i_pC_; +text: .text%__1cRindIndexScaleOperNconstant_disp6kM_i_; +text: .text%__1cQorI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cRtestI_reg_immNodeJnum_opnds6kM_I_; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cNtestI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_; +text: .text%__1cRxorI_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cObox_handleNodeErule6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cOleaPIdxOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsarL_rReg_63NodePoper_input_base6kM_I_; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_; +text: .text%__1cHCompileRmake_vm_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cNmulI_rRegNodeErule6kM_I_; +text: .text%__1cNGrowableArray4Ci_2t6Mii_v_; +text: .text%__1cQsalL_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cNGrowableArray4Ci_Uclear_and_deallocate6M_v_; +text: .text%__1cPCountedLoopNode2t6MpnENode_2_v_; +text: .text%__1cSCountedLoopEndNode2t6MpnENode_2ff_v_; +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_; +text: .text%__1cENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMmulD_regNodePoper_input_base6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cIModINodeJideal_reg6kM_I_; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cObox_handleNodeLbottom_type6kM_pknEType__; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cPshrL_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cUverify_byte_codes_fn6F_pv_: verifier.o; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%JVM_GetClassCPTypes; +text: .text%__1cQComputeCallStackHdo_byte6M_v_; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%JVM_GetClassFieldsCount; +text: .text%JVM_GetClassMethodsCount; +text: .text%__1cINodeHashEgrow6M_v_; +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKmul_hiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEnegq6MpnMRegisterImpl__v_; +text: .text%__1cNmodL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKloadUBNodeHtwo_adr6kM_I_; +text: .text%__1cRxorI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_; +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cNsubL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOjmpLoopEndNodeGnegate6M_v_; +text: .text%__1cQorI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPsalL_rReg_1NodeErule6kM_I_; +text: .text%__1cPcmpD_cc_immNodeErule6kM_I_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cHCompileQgrow_alias_types6M_v_; +text: .text%__1cUandI_rReg_imm255NodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_CallIntMethod: jni.o; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cPno_rax_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMrdx_RegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNmulI_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cNxorI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2D_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cbFunnecessary_membar_volatileNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cENodeEgetd6kM_d_; +text: .text%__1cICmpFNodeGOpcode6kM_i_; +text: .text%__1cLOptoRuntimeThandle_wrong_method6FpnKJavaThread__pC_; +text: .text%__1cNGrowableArray4CpnOMethodLivenessKBasicBlock__Egrow6Mi_v_; +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%JVM_SetClassSigners; +text: .text%__1cNdivL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRandL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cOstackSlotPOperFscale6kM_i_; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cMdecI_memNodeHtwo_adr6kM_I_; +text: .text%__1cSalign_to_page_size6FL_L_: heap.o; +text: .text%__1cNmulI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cPsarL_rReg_2NodeErule6kM_I_; +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%jni_NewByteArray: jni.o; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Ml_v_; +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_; +text: .text%__1cJAssemblerSemit_arith_operand6MipnMRegisterImpl_nHAddress_i_v_; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cMaddF_regNodeMcisc_operand6kM_i_; +text: .text%__1cRsubI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNloadConPcNodeHtwo_adr6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cKLoadPCNodeGOpcode6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cTconvD2I_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cMstoreSSPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNGrowableArray4Ci_Icontains6kMrki_i_; +text: .text%__1cKstoreBNodeFreloc6kM_i_; +text: .text%__1cObox_handleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMjniIdPrivateGid_for6FnTinstanceKlassHandle_i_l_; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cQshrI_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_; +text: .text%__1cRandL_rReg_immNodeHtwo_adr6kM_I_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_2_v_; +text: .text%__1cbACallCompiledJavaDirectNodeFreloc6kM_i_; +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__; +text: .text%__1cUCallCompiledJavaNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cFStateY_sub_Op_CallCompiledJava6MpknENode__v_; +text: .text%__1cFciEnvUregister_i2c_adapter6MpnIciMethod_pnJOopMapSet_pnKCodeBuffer_i_v_; +text: .text%__1cbACallCompiledJavaDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMStartI2CNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cHCompilebMGenerate_Interpreter_To_Compiled_Graph6MpknITypeFunc__v_; +text: .text%__1cPciObjectFactoryPinsert_non_perm6Mrpn0ANNonPermObject_pnHoopDesc_pnIciObject__v_; +text: .text%__1cKI2CAdapter2n6FLI_pv_; +text: .text%__1cKCodeBufferVinsert_float_constant6Mf_pC_; +text: .text%__1cbACallCompiledJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKI2CAdapterPnew_i2c_adapter6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cKmul_hiNodeErule6kM_I_; +text: .text%__1cKI2CAdapter2t6MpnKCodeBuffer_pnJOopMapSet_ii_v_; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cFJNIidEfind6Mi_p0_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6ML_v_; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cILogDNodeGOpcode6kM_i_; +text: .text%__1cXSignatureHandlerLibraryLset_handler6FpnKCodeBuffer__pC_; +text: .text%JVM_IsPrimitiveClass; +text: .text%__1cIDivDNodeGOpcode6kM_i_; +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cJCMoveNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cJAssemblerEcmpl6MnHAddress_i_v_; +text: .text%__1cHi2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLimmI_24OperJnum_edges6kM_I_; +text: .text%__1cRxorI_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFTypeDJsingleton6kM_i_; +text: .text%__1cPsalI_rReg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cUPipeline_Use_Element2t6MIIIinXPipeline_Use_Cycle_Mask__v_; +text: .text%__1cTmembar_volatileNodePoper_input_base6kM_I_; +text: .text%__1cNloadConPcNodePoper_input_base6kM_I_; +text: .text%__1cXPipeline_Use_Cycle_Mask2t6MI_v_; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cNdecL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cIciMethodOresolve_invoke6MpnHciKlass_2_p0_; +text: .text%__1cQChunkPoolCleanerEtask6M_v_; +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPsalL_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_; +text: .text%__1cZCallDynamicJavaDirectNodeFreloc6kM_i_; +text: .text%__1cQMachCallJavaNodeVis_MachCallStaticJava6M_pnWMachCallStaticJavaNode__; +text: .text%__1cUandI_rReg_imm255NodeJnum_opnds6kM_I_; +text: .text%__1cFStateX_sub_Op_CallDynamicJava6MpknENode__v_; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%jni_FindClass: jni.o; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cIMinINodeJideal_reg6kM_I_; +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cNCallGeneratorSfor_predicted_call6FpnHciKlass_p03_3_; +text: .text%__1cLTypeInstPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cTconvI2F_reg_regNodeErule6kM_I_; +text: .text%__1cWPredictedCallGeneratorKis_virtual6kM_i_; +text: .text%__1cTconvF2D_reg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNcmovP_regNodeErule6kM_I_; +text: .text%__1cMaddF_regNodeJnum_opnds6kM_I_; +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%JVM_MonitorWait; +text: .text%__1cPshrL_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cMaddF_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNCallGeneratorQfor_virtual_call6FpnIciMethod__p0_; +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cLPSMarkSweepbAabsorb_live_data_from_eden6FpnUPSAdaptiveSizePolicy_pnKPSYoungGen_pnIPSOldGen__i_; +text: .text%__1cUPSMarkSweepDecoratorbDadvance_destination_decorator6F_v_; +text: .text%__1cNmulI_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNmulI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cSOnStackReplacementPget_osr_adapter6FnFframe_nMmethodHandle__pnKOSRAdapter__; +text: .text%__1cNGrowableArray4CpnKOSRAdapter__Hat_grow6Mirk1_1_; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cRCardTableModRefBSKinvalidate6MnJMemRegion__v_; +text: .text%__1cJLoadFNodeJideal_reg6kM_I_; +text: .text%__1cJAssemblerEaddq6MpnMRegisterImpl_2_v_; +text: .text%__1cFTypeFJsingleton6kM_i_; +text: .text%__1cTconvF2D_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cMstoreSSPNodeErule6kM_I_; +text: .text%__1cOloadConL32NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMstoreSSPNodeHtwo_adr6kM_I_; +text: .text%__1cMincI_memNodeHtwo_adr6kM_I_; +text: .text%__1cKcmpOpUOperFequal6kM_i_; +text: .text%__1cTconvF2D_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cHRegMask2t6M_v_; +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_; +text: .text%__1cNGrowableArray4Ci_2t6MpnFArena_iirki_v_; +text: .text%__1cNloadConL0NodeHsize_of6kM_I_; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cJCmpD3NodeGOpcode6kM_i_; +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpD_cc_immNodeJnum_opnds6kM_I_; +text: .text%__1cLConvL2DNodeGOpcode6kM_i_; +text: .text%__1cRmulI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerFcmovq6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cNminI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMmulD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cNminI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cFStateM_sub_Op_MinI6MpknENode__v_; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cScompL_rReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGICStubIfinalize6M_v_; +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_; +text: .text%__1cUandI_rReg_imm255NodeHtwo_adr6kM_I_; +text: .text%__1cJStubQdDueueMremove_first6M_v_; +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cUVirtualCallGeneratorKis_virtual6kM_i_; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_; +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_; +text: .text%__1cJAssemblerEcmpq6MpnMRegisterImpl_2_v_; +text: .text%__1cNGrowableArray4CpnIciObject__Egrow6Mi_v_; +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cNmodI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFParseNdo_instanceof6M_v_; +text: .text%__1cPcmpD_cc_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNIdealLoopTreeXpolicy_maximally_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cTmembar_volatileNodeHtwo_adr6kM_I_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_; +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_; +text: .text%__1cJLoadDNodeGOpcode6kM_i_; +text: .text%__1cNcmovL_regNodePoper_input_base6kM_I_; +text: .text%__1cMdecI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_; +text: .text%__1cQsalL_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__; +text: .text%__1cHMatcherXpost_store_load_barrier6FpknENode__i_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cJloadDNodePoper_input_base6kM_I_; +text: .text%__1cENodeEgetf6kM_f_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Ml_v_; +text: .text%__1cTconvD2I_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cSTailCalljmpIndNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2D_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cQorI_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRaddI_mem_rRegNodePoper_input_base6kM_I_; +text: .text%__1cMmulF_immNodeErule6kM_I_; +text: .text%__1cJAssemblerGmovlpd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cPcmpF_cc_regNodePoper_input_base6kM_I_; +text: .text%__1cNCompileBrokerTcompile_adapter_for6FnMmethodHandle_ii_pnMBasicAdapter__; +text: .text%__1cCosbBthread_local_storage_at_put6Fipv_v_; +text: .text%__1cNCompileBrokerbBwait_for_adapter_completion6FpnLCompileTask__pnMBasicAdapter__; +text: .text%__1cOjmpLoopEndNodeJis_Branch6kM_I_; +text: .text%__1cOjmpLoopEndNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cNinstanceKlassSregister_finalizer6FpnPinstanceOopDesc_pnGThread__2_; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cKCMoveINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FLi_pnGThread__; +text: .text%__1cMrax_RegIOperEtype6kM_pknEType__; +text: .text%__1cOjmpLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateW_sub_Op_MemBarVolatile6MpknENode__v_; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cMincI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitXinsert_mem_bar_volatile6MpnKMemBarNode_i_v_; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%__1cCosHSolarisKmmap_chunk6FpcLii_2_; +text: .text%__1cbFloadConL_0x6666666666666667NodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cOloadConL32NodeLbottom_type6kM_pknEType__; +text: .text%__1cRxorI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMmulD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovP_regNodeLbottom_type6kM_pknEType__; +text: .text%__1cJScopeDescTdecode_scope_values6Mi_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cTAbstractInterpreterWlayout_activation_impl6FpnNmethodOopDesc_iiiipnFframe_4i_i_; +text: .text%__1cLconvI2BNodeMideal_Opcode6kM_i_; +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQsalI_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cPsarL_rReg_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmodL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cScompL_rReg_immNodeJnum_opnds6kM_I_; +text: .text%__1cQshrL_rReg_CLNodePoper_input_base6kM_I_; +text: .text%__1cJCMoveNode2t6MpnENode_22pknEType__v_; +text: .text%__1cJCMoveNodeEmake6FpnENode_222pknEType__p0_; +text: .text%__1cVLoaderConstraintTableYextend_loader_constraint6MpnVLoaderConstraintEntry_nGHandle_pnMklassOopDesc__v_; +text: .text%__1cIimmIOperJnum_edges6kM_I_; +text: .text%__1cJAssemblerFmovss6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRandL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNGrowableArray4CpnKciTypeFlowFBlock__Gremove6Mrk2_v_; +text: .text%__1cVLoaderConstraintTablebHensure_loader_constraint_capacity6MpnVLoaderConstraintEntry_i_v_; +text: .text%__1cPsalL_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cMsubD_regNodePoper_input_base6kM_I_; +text: .text%__1cMstoreSSPNodeJnum_opnds6kM_I_; +text: .text%__1cMnegD_regNodeHtwo_adr6kM_I_; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cPClassFileParserXverify_unqualified_name6MpcIi_i_; +text: .text%__1cMdivD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cQsarL_rReg_63NodeErule6kM_I_; +text: .text%__1cRsubL_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMVirtualSpaceQuncommitted_size6kM_L_; +text: .text%__1cRsubL_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cMVirtualSpaceJexpand_by6ML_i_; +text: .text%__1cNstoreImmPNodeMideal_Opcode6kM_i_; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cJloadDNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cQComputeCallStackHdo_char6M_v_; +text: .text%__1cNdivI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cOcmovI_regUNodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cSvframeArrayElementDbci6kM_i_; +text: .text%__1cMaddF_regNodeErule6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeHtwo_adr6kM_I_; +text: .text%__1cNdecL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cMdecI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFStateN_sub_Op_LoadF6MpknENode__v_; +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__; +text: .text%__1cNaddI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerEdecl6MpnMRegisterImpl__v_; +text: .text%__1cOPhaseIdealLoopVinsert_pre_post_loops6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cJAssemblerGbswapl6MpnMRegisterImpl__v_; +text: .text%__1cRInlineCacheBufferLnew_ic_stub6F_pnGICStub__; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cIAddDNodeGOpcode6kM_i_; +text: .text%__1cMincI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_; +text: .text%__1cNloadConPcNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGICStubIset_stub6MpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cTconvD2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerHpop_ptr6MpnMRegisterImpl__v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_; +text: .text%__1cMelapsedTimer2t6M_v_; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_; +text: .text%__1cMdivD_immNodeErule6kM_I_; +text: .text%__1cTconvI2D_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cFTypeFFxmeet6kMpknEType__3_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_; +text: .text%__1cGICStubLdestination6kM_pC_; +text: .text%__1cRsalL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cPcmpD_cc_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMaddD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cNdivI_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__; +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUandI_rReg_imm255NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNdivL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKcastPPNodePoper_input_base6kM_I_; +text: .text%__1cMaddD_immNodePoper_input_base6kM_I_; +text: .text%__1cFTypeDFxmeet6kMpknEType__3_; +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIDivLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJloadDNodeErule6kM_I_; +text: .text%__1cRaddI_mem_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cPsarL_rReg_1NodeMideal_Opcode6kM_i_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cMmulD_regNodeMcisc_operand6kM_i_; +text: .text%__1cMmulF_memNodePoper_input_base6kM_I_; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%__1cHnmethodNis_osr_method6kM_i_; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cFParseScan_rerun_bytecode6M_i_; +text: .text%__1cISubFNodeGOpcode6kM_i_; +text: .text%__1cRjni_invoke_static6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cFTypeDGis_nan6kM_i_; +text: .text%__1cTconvI2F_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cOJavaAssertionsNmatch_package6Fpkc_pn0AKOptionList__; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cOJavaAssertionsLmatch_class6Fpkc_pn0AKOptionList__; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cHTypePtrFxdual6kM_pknEType__; +text: .text%__1cTconvI2F_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cMaddF_immNodeMideal_Opcode6kM_i_; +text: .text%__1cQshrL_rReg_CLNodeMideal_Opcode6kM_i_; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNnegI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPcmpF_cc_regNodeHtwo_adr6kM_I_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cISubDNodeGOpcode6kM_i_; +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSstring_compareNodeHtwo_adr6kM_I_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cRaddI_mem_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cKScopeValueLis_location6kM_i_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cMmulF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%JVM_MonitorNotify; +text: .text%__1cQsarL_rReg_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cIModLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cIjniIdMapGcreate6FnTinstanceKlassHandle__p0_; +text: .text%__1cPsarL_rReg_2NodeJnum_opnds6kM_I_; +text: .text%__1cKReflectionbFbasic_type_mirror_to_basic_type6FpnHoopDesc_pnGThread__nJBasicType__; +text: .text%__1cPcmpF_cc_regNodeMideal_Opcode6kM_i_; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cIjniIdMap2t6MpnMklassOopDesc_i_v_; +text: .text%__1cIjniIdMapRcompute_index_cnt6FnTinstanceKlassHandle__i_; +text: .text%__1cLjniIdBucket2t6MpnIjniIdMap_p0_v_; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_; +text: .text%__1cLTypeInstPtrLmirror_type6kM_pnGciType__; +text: .text%__1cMsubF_regNodePoper_input_base6kM_I_; +text: .text%__1cPcmpD_cc_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMlogD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvL2FNodeGOpcode6kM_i_; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cMPipeline_Use2t6MIIIpnUPipeline_Use_Element__v_; +text: .text%__1cKstorePNodeErule6kM_I_; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConPcNodeErule6kM_I_; +text: .text%__1cIPipeline2t6MIIiIIiiiipnSmachPipelineStages_2pInMPipeline_Use__v_; +text: .text%__1cRComputeEntryStackGdo_int6M_v_; +text: .text%__1cMstoreSSPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSMachBreakpointNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRsubL_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNtestU_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPsalL_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cNmodL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvF2D_reg_regNodeErule6kM_I_; +text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cObox_handleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsalI_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_rReg_memNodeErule6kM_I_; +text: .text%__1cLloadSSDNodePoper_input_base6kM_I_; +text: .text%__1cNCompileBrokerbAinvoke_compiler_on_adapter6FpnLCompileTask__v_; +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMaddF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRxorI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cMaddF_immNodePoper_input_base6kM_I_; +text: .text%__1cKCMoveLNodeGOpcode6kM_i_; +text: .text%__1cICodeHeapTmark_segmap_as_free6MLL_v_; +text: .text%__1cRaddL_rReg_memNodePoper_input_base6kM_I_; +text: .text%JVM_IsArrayClass; +text: .text%__1cJAssemblerEsbbq6MnHAddress_i_v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_; +text: .text%__1cMmulD_regNodeJnum_opnds6kM_I_; +text: .text%__1cODeoptimizationYquery_update_method_data6FnQmethodDataHandle_in0ALDeoptReason_rIri4_pnLProfileData__; +text: .text%__1cICodeHeapJexpand_by6ML_i_; +text: .text%__1cMmulD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cJAssemblerEaddq6MnHAddress_i_v_; +text: .text%JVM_GetClassName; +text: .text%__1cTconvF2D_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cMmulD_immNodeJnum_opnds6kM_I_; +text: .text%__1cNmulI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQorI_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%__1cRsubL_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRaddL_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cRsubL_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRsubL_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cPshrL_rReg_1NodeErule6kM_I_; +text: .text%__1cQshrI_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_RegD6MpknENode__v_; +text: .text%__1cQorI_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cUCallNativeDirectNodeHtwo_adr6kM_I_; +text: .text%__1cTconvI2D_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_; +text: .text%__1cIMaxINodeJideal_reg6kM_I_; +text: .text%__1cFJNIid2t6MpnMklassOopDesc_ip0_v_; +text: .text%__1cNinstanceKlassPjni_id_for_impl6FnTinstanceKlassHandle_i_pnFJNIid__; +text: .text%__1cJAssemblerEaddq6MpnMRegisterImpl_nHAddress__v_; +text: .text%JVM_Open; +text: .text%__1cHRegMask2t6Miiiiiii_v_; +text: .text%__1cbFloadConL_0x6666666666666667NodeHtwo_adr6kM_I_; +text: .text%__1cNsubI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMmulF_regNodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_i_v_; +text: .text%__1cQConstantIntValuePis_constant_int6kM_i_; +text: .text%__1cRmulL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPsarL_rReg_2NodeHtwo_adr6kM_I_; +text: .text%__1cKmul_hiNodeHtwo_adr6kM_I_; +text: .text%__1cQConstantIntValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cRxorI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateM_sub_Op_ConD6MpknENode__v_; +text: .text%__1cLConvI2DNodeGOpcode6kM_i_; +text: .text%__1cVLoaderConstraintTableJnew_entry6MIpnNsymbolOopDesc_pnMklassOopDesc_ii_pnVLoaderConstraintEntry__; +text: .text%__1cNaddP_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPcmpF_cc_regNodeMcisc_operand6kM_i_; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cVscale_to_lwp_priority6Fiii_i_: os_solaris.o; +text: .text%__1cLOptoRuntimeWresolve_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cLStrCompNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMmulF_memNodeMideal_Opcode6kM_i_; +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJloadDNodeJnum_opnds6kM_I_; +text: .text%__1cFStateM_sub_Op_RegF6MpknENode__v_; +text: .text%__1cMmulF_immNodeJnum_opnds6kM_I_; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cNcmovP_regNodeHtwo_adr6kM_I_; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_; +text: .text%__1cScompL_rReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cLCastP2LNodeUdepends_only_on_test6kM_i_; +text: .text%__1cTconvF2D_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulD_immNodeHtwo_adr6kM_I_; +text: .text%__1cOMacroAssemblerFleave6M_v_; +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMTailCallNode2t6MpnENode_222222_v_; +text: .text%__1cICmpDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_; +text: .text%__1cSTailCalljmpIndNodeFreloc6kM_i_; +text: .text%__1cObox_handleNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cOloadConL32NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMlogD_regNodePoper_input_base6kM_I_; +text: .text%__1cTconvI2F_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cMnegD_regNodeErule6kM_I_; +text: .text%__1cLvframeArrayRregister_location6kMi_pC_; +text: .text%__1cQorI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateQ_sub_Op_TailCall6MpknENode__v_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cMaddD_immNodeErule6kM_I_; +text: .text%__1cNmaxI_rRegNodePoper_input_base6kM_I_; +text: .text%__1cPshrL_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvI2F_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNmaxI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cCosHSolarisKvm_signals6F_pnIsigset_t__; +text: .text%__1cCosHSolarisRunblocked_signals6F_pnIsigset_t__; +text: .text%__1cMaddF_immNodeErule6kM_I_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cFStateM_sub_Op_MaxI6MpknENode__v_; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cCosScurrent_stack_size6F_L_; +text: .text%__1cNPhaseRegAllocHset_oop6MpknENode_i_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cJloadFNodeFreloc6kM_i_; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cKstoreFNodeFreloc6kM_i_; +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_; +text: .text%__1cNcmovL_memNodeErule6kM_I_; +text: .text%__1cFStateO_sub_Op_StoreF6MpknENode__v_; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cNcmovL_regNodeMcisc_operand6kM_i_; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_L_i_; +text: .text%__1cLconvI2BNodePoper_input_base6kM_I_; +text: .text%__1cOGenerateOopMapMdo_checkcast6M_v_; +text: .text%JVM_SetThreadPriority; +text: .text%__1cG_start6Fpv_0_: os_solaris.o; +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__; +text: .text%JVM_GetStackAccessControlContext; +text: .text%JVM_IsThreadAlive; +text: .text%__1cTconvL2D_reg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNdivI_rRegNodeErule6kM_I_; +text: .text%__1cNdecL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitYinline_native_time_funcs6Mi_i_; +text: .text%__1cNGrowableArray4CpknEType__2t6MpnFArena_iirk2_v_; +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_; +text: .text%__1cTconvL2F_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cJAssemblerExorq6MpnMRegisterImpl_2_v_; +text: .text%__1cNcmovL_regNodeJnum_opnds6kM_I_; +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__; +text: .text%__1cOcmovI_regUNodeMideal_Opcode6kM_i_; +text: .text%__1cNcmovL_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cMsubD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cNloadConL0NodeFclone6kM_pnENode__; +text: .text%__1cPcmpF_cc_regNodeErule6kM_I_; +text: .text%__1cJimmL0OperFclone6kM_pnIMachOper__; +text: .text%__1cNmodI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmpF_cc_regNodeJnum_opnds6kM_I_; +text: .text%__1cPcmpF_cc_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cbFloadConL_0x6666666666666667NodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerFpop_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cTconvL2D_reg_memNodePoper_input_base6kM_I_; +text: .text%__1cLConvD2FNodeGOpcode6kM_i_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEtemp6F_pnMRegisterImpl__; +text: .text%__1cMmulF_immNodeHtwo_adr6kM_I_; +text: .text%__1cQsarL_rReg_63NodeHtwo_adr6kM_I_; +text: .text%__1cQsarL_rReg_63NodeJnum_opnds6kM_I_; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cMsubF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2L_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWThreadLocalAllocBufferMinitial_size6F_L_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cTconvF2I_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cRandI_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cRandI_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cCosNcommit_memory6FpcL_i_; +text: .text%__1cNdivI_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cENodeJis_MemBar6kM_pknKMemBarNode__; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%JVM_NativePath; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_; +text: .text%__1cUThreadSafepointState2t6MpnKJavaThread__v_; +text: .text%__1cCosMguard_memory6FpcL_i_; +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUCallNativeDirectNodePoper_input_base6kM_I_; +text: .text%__1cHnmethodTinc_decompile_count6M_v_; +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cMResourceMarkNreset_to_mark6M_v_; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cNloadConPcNodeLbottom_type6kM_pknEType__; +text: .text%__1cMmulD_regNodeErule6kM_I_; +text: .text%__1cMdivD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vL_v_; +text: .text%__1cPcmpD_cc_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2D_reg_regNodeErule6kM_I_; +text: .text%__1cQshrL_rReg_CLNodeJnum_opnds6kM_I_; +text: .text%__1cNcmovL_memNodePoper_input_base6kM_I_; +text: .text%__1cNdivL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpD_cc_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateT_sub_Op_CallRuntime6MpknENode__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_; +text: .text%__1cKcastPPNodeHtwo_adr6kM_I_; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cMsubD_regNodeMcisc_operand6kM_i_; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsalL_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cVCallRuntimeDirectNodeFreloc6kM_i_; +text: .text%__1cIGraphKitIset_jvms6MpnIJVMState__v_; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cTconvD2I_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cOsalI_mem_1NodePoper_input_base6kM_I_; +text: .text%__1cSMachCallNativeNodePret_addr_offset6M_i_; +text: .text%__1cMLinkResolverbEresolve_interface_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cZInterpreterMacroAssemblerFpop_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cMrdi_RegIOperEtype6kM_pknEType__; +text: .text%__1cVThreadStateTransitionKtransition6FpnKJavaThread_nPJavaThreadState_3_v_; +text: .text%__1cUCallNativeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKJavaThreadRthread_main_inner6M_v_; +text: .text%__1cQorI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitbAgen_stub_or_native_wrapper6MpCpkcpnIciMethod_iiiii_v_; +text: .text%__1cPsalL_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMResourceMark2t6M_v_; +text: .text%__1cQshrI_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescVdecode_monitor_values6Mi_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cSvframeArrayElementPunpack_on_stack6MiipnFframe_ii_v_; +text: .text%__1cTAbstractInterpreterRlayout_activation6FpnNmethodOopDesc_iiiipnFframe_4i_v_; +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__; +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____; +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_; +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_; +text: .text%__1cNGrowableArray4CpnLMonitorInfo__2t6Mii_v_; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cOMacroAssemblerKincrementl6MpnMRegisterImpl_i_v_; +text: .text%__1cFframebCinterpreter_frame_set_locals6Mpl_v_; +text: .text%__1cFframebHinterpreter_frame_set_monitor_end6MpnPBasicObjectLock__v_; +text: .text%__1cTAbstractInterpreterPsize_activation6FpnNmethodOopDesc_iiiii_i_; +text: .text%__1cSPerfStringConstant2t6MnJCounterNS_pkc3_v_; +text: .text%__1cTAbstractInterpreterQcontinuation_for6FpnNmethodOopDesc_pCiiri_3_; +text: .text%__1cZInterpreterMacroAssemblerLcall_VM_Ico6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cFframebCinterpreter_frame_set_method6MpnNmethodOopDesc__v_; +text: .text%__1cMmulF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cFframebBinterpreter_frame_sender_sp6kM_pl_; +text: .text%__1cMaddF_regNodeHtwo_adr6kM_I_; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cKstoreINodeErule6kM_I_; +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_i_v_; +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__; +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cTconvF2D_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cSvframeArrayElementNon_stack_size6kMiiii_i_; +text: .text%__1cMaddD_regNodePoper_input_base6kM_I_; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cMorL_rRegNodePoper_input_base6kM_I_; +text: .text%__1cOcmovD_regUNodePoper_input_base6kM_I_; +text: .text%__1cPcmovI_reg_gNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMdivD_immNodePoper_input_base6kM_I_; +text: .text%__1cJloadDNodeHtwo_adr6kM_I_; +text: .text%__1cKReflectionTunbox_for_primitive6FpnHoopDesc_pnGjvalue_pnGThread__nJBasicType__; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cMmulF_memNodeJnum_opnds6kM_I_; +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cODeoptimizationVtrap_state_add_reason6Fii_i_; +text: .text%__1cDhpiFclose6Fi_i_; +text: .text%__1cJMemRegionFminus6kM0_0_; +text: .text%__1cMmulD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i_v_; +text: .text%__1cNcmovL_regNodeMideal_Opcode6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerWupdate_mdp_by_constant6MpnMRegisterImpl_i_v_; +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__; +text: .text%__1cRaddL_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2F_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvL2F_reg_regNodeMideal_Opcode6kM_i_; +text: .text%__1cGICStubKcached_oop6kM_pnHoopDesc__; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cTconvD2F_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cJAssemblerFpushq6Mi_v_; +text: .text%JVM_Close; +text: .text%__1cMnegF_regNodePoper_input_base6kM_I_; +text: .text%__1cOcmovI_regUNodeJnum_opnds6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cOcmovI_regUNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQshrL_rReg_CLNodeErule6kM_I_; +text: .text%__1cTconvF2D_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__; +text: .text%__1cOcmovI_regUNodeMcisc_operand6kM_i_; +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__; +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_; +text: .text%__1cVCompressedWriteStreamKwrite_long6Mx_v_; +text: .text%__1cTconvF2I_reg_regNodePoper_input_base6kM_I_; +text: .text%__1cLConvF2INodeGOpcode6kM_i_; +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_; +text: .text%__1cFParsePdo_monitor_exit6M_v_; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cKcastPPNodeErule6kM_I_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF3_v3_v_; +text: .text%__1cOsalI_mem_1NodeJnum_opnds6kM_I_; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%__1cPshrL_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cRandI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRandI_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cQorI_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cJAssemblerGmovslq6MpnMRegisterImpl_2_v_; +text: .text%__1cRandI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cRConstantLongValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%JVM_StartThread; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cTconvF2D_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMsubD_regNodeErule6kM_I_; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cNmulI_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cLRuntimeStub2n6FLI_pv_; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cLRuntimeStub2t6MpkcpnKCodeBuffer_iipnJOopMapSet_i_v_; +text: .text%__1cTconvF2D_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cRxorI_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMmulF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitbDis_method_invoke_or_aux_frame6MpnIJVMState__i_; +text: .text%__1cbFloadConL_0x6666666666666667NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAddFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOloadConL32NodeHsize_of6kM_I_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJOperation__v4_v_; +text: .text%__1cRaddL_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivLNodeJideal_reg6kM_I_; +text: .text%__1cGICStubFclear6M_v_; +text: .text%__1cTconvI2D_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMsubD_regNodeJnum_opnds6kM_I_; +text: .text%__1cMsubD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cHCompileWget_MethodAccessorImpl6M_pnPciInstanceKlass__; +text: .text%__1cHCompileRget_Method_invoke6M_pnIciMethod__; +text: .text%__1cNdecI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFTypeFFxdual6kM_pknEType__; +text: .text%__1cTconvL2D_reg_memNodeJnum_opnds6kM_I_; +text: .text%__1cTconvI2D_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nMvmIntrinsicsCID__; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cFStateM_sub_Op_ConF6MpknENode__v_; +text: .text%__1cMloadConFNodeHsize_of6kM_I_; +text: .text%__1cPsarL_rReg_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsarL_rReg_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPoldgetTimeNanos6F_x_; +text: .text%__1cPno_rax_RegLOperFclone6kM_pnIMachOper__; +text: .text%__1cTAbstractInterpreterMreturn_entry6FnITosState_i_pC_; +text: .text%__1cPsarL_rReg_1NodePoper_input_base6kM_I_; +text: .text%__1cMnegD_regNodeJnum_opnds6kM_I_; +text: .text%__1cKmul_hiNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerEjccb6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cNcmovP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__; +text: .text%__1cLvframeArrayIallocate6FpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pnLRegisterMap_nFframe_9A9A9A_p0_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%__1cODeoptimizationScreate_vframeArray6FpnKJavaThread_nFframe_pnLRegisterMap__pnLvframeArray__; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_; +text: .text%__1cODeoptimizationPget_method_data6FpnKJavaThread_nMmethodHandle_i_pnRmethodDataOopDesc__; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__; +text: .text%__1cTconvI2D_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationRgather_statistics6Fn0ALDeoptReason_n0ALDeoptAction_nJBytecodesECode__v_; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_; +text: .text%__1cNGrowableArray4CpnOcompiledVFrame__2t6Mii_v_; +text: .text%__1cOcmovI_regUNodeErule6kM_I_; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cXpartialSubtypeCheckNodeMideal_Opcode6kM_i_; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cLvframeArrayZdeallocate_monitor_chunks6M_v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cMmulD_memNodePoper_input_base6kM_I_; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_; +text: .text%__1cOcompL_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cODeoptimizationLUnrollBlock2t6MiiiiiplppCnJBasicType__v_; +text: .text%__1cLvframeArrayHfill_in6MpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pknLRegisterMap_i_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%__1cMaddF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__; +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__; +text: .text%__1cNnmethodLocker2t6MpC_v_; +text: .text%__1cTconvD2I_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_; +text: .text%__1cLconvI2BNodeErule6kM_I_; +text: .text%__1cTconvF2I_reg_regNodeErule6kM_I_; +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__; +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQAbstractCompilerMsupports_osr6M_i_; +text: .text%__1cRaddL_mem_rRegNodePoper_input_base6kM_I_; +text: .text%__1cSCallLeafDirectNodeJnum_opnds6kM_I_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cMmulL_memNodePoper_input_base6kM_I_; +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cMaddF_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cMincL_memNodePoper_input_base6kM_I_; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cMmulL_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMaddD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__; +text: .text%__1cJAssemblerEmovb6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_; +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKloadUBNodeFreloc6kM_i_; +text: .text%__1cMStartOSRNodeScalling_convention6kMpnLOptoRegPair_I_v_; +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__; +text: .text%__1cMloadConPNodeGis_Con6kM_I_; +text: .text%__1cMmulD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cOPSVirtualSpaceJexpand_by6ML_i_; +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_; +text: .text%__1cFParseWload_interpreter_state6MpnENode_2_v_; +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOstackSlotDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl_i_v_; +text: .text%__1cSCardTableExtensionbEresize_covered_region_by_start6MnJMemRegion__v_; +text: .text%__1cQshrL_rReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRaddL_mem_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMlogD_regNodeErule6kM_I_; +text: .text%__1cXpartialSubtypeCheckNodePoper_input_base6kM_I_; +text: .text%__1cNmulI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cMdecI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsalL_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKemit_break6FrnKCodeBuffer__v_; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%__1cOstackSlotDOperJnum_edges6kM_I_; +text: .text%__1cMsubF_regNodeMcisc_operand6kM_i_; +text: .text%__1cMdecI_memNodeFreloc6kM_i_; +text: .text%__1cMdecI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRCardTableModRefBSbCfind_covering_region_by_base6MpnIHeapWord__i_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cINegFNodeGOpcode6kM_i_; +text: .text%__1cRCardTableModRefBSbAlargest_prev_committed_end6kMi_pnIHeapWord__; +text: .text%__1cLloadSSDNodeJnum_opnds6kM_I_; +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cLconvI2BNodeJnum_opnds6kM_I_; +text: .text%__1cNstoreImmPNodePoper_input_base6kM_I_; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cHCompile2t6MpnFciEnv_pF_pknITypeFunc_pCpkciiii_v_; +text: .text%__1cTconvL2F_reg_regNodeMcisc_operand6kM_i_; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_; +text: .text%__1cNloadConPcNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cFStateM_sub_Op_CmpD6MpknENode__v_; +text: .text%__1cNloadConL0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cUCallNativeDirectNodeKmethod_set6Ml_v_; +text: .text%__1cKcastPPNodeMideal_Opcode6kM_i_; +text: .text%__1cNcmovL_memNodeJnum_opnds6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cPshrL_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cIGraphKitIgen_stub6MpCpkciii_v_; +text: .text%__1cbDcatch_cleanup_find_cloned_def6FpnFBlock_pnENode_1rnLBlock_Array_i_3_: lcm.o; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cTC2IAdapterGeneratorUmkh_unverified_entry6FnMmethodHandle__pC_; +text: .text%__1cRaddL_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cLOptoRuntimeNgenerate_stub6FpnFciEnv_pF_pknITypeFunc_pCpkciiii_8_; +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__; +text: .text%__1cbCcatch_cleanup_fix_all_inputs6FpnENode_11_v_: lcm.o; +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__; +text: .text%__1cNdivI_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cNstoreImmPNodeHtwo_adr6kM_I_; +text: .text%__1cLOptoRuntimeRnew_objArray_Type6F_pknITypeFunc__; +text: .text%JVM_GetComponentType; +text: .text%__1cIMulDNodeJideal_reg6kM_I_; +text: .text%__1cTconvF2D_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cJAssemblerEsbbq6MpnMRegisterImpl_i_v_; +text: .text%__1cNcmovL_memNodeMideal_Opcode6kM_i_; +text: .text%jni_GetStringRegion: jni.o; +text: .text%jni_EnsureLocalCapacity: jni.o; +text: .text%__1cLloadSSDNodeHtwo_adr6kM_I_; +text: .text%__1cMaddF_memNodePoper_input_base6kM_I_; +text: .text%__1cFParseMdo_anewarray6M_v_; +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLconvI2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_; +text: .text%__1cMincL_memNodeJnum_opnds6kM_I_; +text: .text%__1cRandL_rReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cRaddL_mem_rRegNodeJnum_opnds6kM_I_; +text: .text%JVM_NewArray; +text: .text%JVM_FreeMemory; +text: .text%JVM_TotalMemory; +text: .text%__1cMaddD_immNodeJnum_opnds6kM_I_; +text: .text%__1cMsubF_regNodeJnum_opnds6kM_I_; +text: .text%__1cLloadSSINodePoper_input_base6kM_I_; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cMincI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cMsubF_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMmulF_memNodeErule6kM_I_; +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_; +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_; +text: .text%__1cFStateL_sub_Op_Box6MpknENode__v_; +text: .text%__1cRaddL_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTconvL2F_reg_regNodeErule6kM_I_; +text: .text%__1cKPSYoungGenLpost_resize6M_v_; +text: .text%__1cNcmovL_regNodeErule6kM_I_; +text: .text%__1cOcmovD_regUNodeJnum_opnds6kM_I_; +text: .text%__1cRandI_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeHtwo_adr6kM_I_; +text: .text%__1cTAbstractInterpreterRTosState_as_index6FnITosState__i_; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__; +text: .text%__1cMincL_memNodeMideal_Opcode6kM_i_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJloadCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosPuncommit_memory6FpcL_i_; +text: .text%__1cSInterpreterRuntimeJnote_trap6FpnKJavaThread_ipnGThread__v_; +text: .text%__1cRSignatureIteratorHiterate6M_v_; +text: .text%__1cIModLNodeJideal_reg6kM_I_; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__; +text: .text%__1cMaddF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHBoxNodeLbottom_type6kM_pknEType__; +text: .text%__1cFStateM_sub_Op_DivL6MpknENode__v_; +text: .text%__1cTconvL2D_reg_memNodeErule6kM_I_; +text: .text%JVM_GetSystemPackage; +text: .text%__1cCosNcommit_memory6FpcLL_i_; +text: .text%__1cOMacroAssemblerFenter6M_v_; +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_; +text: .text%__1cNTemplateTableLindex_check6FpnMRegisterImpl_2_v_; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_; +text: .text%__1cMincI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateP_sub_Op_ConvF2D6MpknENode__v_; +text: .text%__1cMmulL_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%Unsafe_DefineClass1; +text: .text%__1cSUnsafe_DefineClass6FpnHJNIEnv__pnI_jstring_pnL_jbyteArray_iipnI_jobject_7_pnH_jclass__: unsafe.o; +text: .text%__1cFTypeDFxdual6kM_pknEType__; +text: .text%__1cMincI_memNodeFreloc6kM_i_; +text: .text%__1cPcmpF_cc_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMsubF_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMsubF_memNodePoper_input_base6kM_I_; +text: .text%__1cTconvF2D_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%JVM_DefineClass; +text: .text%__1cMaddF_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMmulL_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cMmulL_memNodeJnum_opnds6kM_I_; +text: .text%__1cJAssemblerEshrq6MpnMRegisterImpl_i_v_; +text: .text%__1cTC2IAdapterGeneratorLadapter_for6FnMmethodHandle__pnKC2IAdapter__; +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MpnMRegisterImpl_i2rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerYprofile_not_taken_branch6MpnMRegisterImpl__v_; +text: .text%__1cTleaPIdxScaleOffNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cNloadConL0NodeGis_Con6kM_I_; +text: .text%__1cMset_property6FnGHandle_pkc2pnGThread__v_: jvm.o; +text: .text%JVM_GetCPFieldModifiers; +text: .text%JVM_InvokeMethod; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cZcatch_cleanup_inter_block6FpnENode_pnFBlock_13rnLBlock_Array_i_v_: lcm.o; +text: .text%__1cOsalI_mem_1NodeMideal_Opcode6kM_i_; +text: .text%__1cMaddF_immNodeJnum_opnds6kM_I_; +text: .text%__1cMsubD_immNodePoper_input_base6kM_I_; +text: .text%__1cMmulF_regNodeMcisc_operand6kM_i_; +text: .text%__1cMmulF_regNodeJnum_opnds6kM_I_; +text: .text%__1cMmulF_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cMmulD_regNodeHtwo_adr6kM_I_; +text: .text%__1cTconvD2F_reg_regNodeMcisc_operand6kM_i_; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cCosHSolarisOset_mpss_range6FpcLL_i_; +text: .text%__1cTconvF2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFParseOdo_tableswitch6M_v_; +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cMrdx_RegLOperFclone6kM_pnIMachOper__; +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJCondition__v4_v_; +text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_amd64.o; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cTconvF2D_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMmulF_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%Unsafe_AllocateInstance; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cOcmovD_regUNodeMideal_Opcode6kM_i_; +text: .text%__1cIciObjectMis_classless6kM_i_; +text: .text%__1cMsubD_immNodeMideal_Opcode6kM_i_; +text: .text%__1cRInlineCacheBufferOinit_next_stub6F_v_; +text: .text%__1cPshrL_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_immNodeErule6kM_I_; +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLMoveF2INodeGOpcode6kM_i_; +text: .text%__1cNcmovL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_2_v_; +text: .text%__1cOcmovD_regUNodeErule6kM_I_; +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cMorL_rRegNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvD2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cScompL_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cXpartialSubtypeCheckNodeErule6kM_I_; +text: .text%__1cOstackSlotDOperEtype6kM_pknEType__; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cLloadSSDNodeErule6kM_I_; +text: .text%__1cMsubD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRComputeEntryStackIdo_short6M_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorCto6F_pnMRegisterImpl__; +text: .text%__1cTconvF2D_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulL_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_; +text: .text%__1cMloadConFNodeKconst_size6kM_i_; +text: .text%__1cMorL_rRegNodeMcisc_operand6kM_i_; +text: .text%__1cMmulD_memNodeMideal_Opcode6kM_i_; +text: .text%__1cMaddD_regNodeMideal_Opcode6kM_i_; +text: .text%__1cTconvI2D_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_; +text: .text%__1cMloadConFNodeFreloc6kM_i_; +text: .text%__1cILogDNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cNstoreImmPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLStrCompNodeJideal_reg6kM_I_; +text: .text%__1cMlogD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cMaddD_regNodeMcisc_operand6kM_i_; +text: .text%__1cMaddD_regNodeErule6kM_I_; +text: .text%__1cScompL_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cIAddFNodeJideal_reg6kM_I_; +text: .text%__1cJimmP0OperPconstant_is_oop6kM_i_; +text: .text%__1cJimmP0OperIconstant6kM_l_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cETypeJis_finite6kM_i_; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%JVM_GetClassContext; +text: .text%__1cIciObjectTis_type_array_klass6M_i_; +text: .text%__1cNsubL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cIregFOperFclone6kM_pnIMachOper__; +text: .text%__1cRfind_field_offset6FpnI_jobject_ipnGThread__i_; +text: .text%__1cHBoxNodeJideal_reg6kM_I_; +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cLloadSSDNodeMideal_Opcode6kM_i_; +text: .text%__1cMsubF_regNodeErule6kM_I_; +text: .text%__1cRsubL_rReg_memNodeFreloc6kM_i_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_pnNsymbolOopDesc_pkc_nGHandle__; +text: .text%__1cTconvL2F_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMmulF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__; +text: .text%__1cLStatSamplerTget_system_property6FpkcpnGThread__2_; +text: .text%__1cRmethodDataOopDescRbci_to_extra_data6Mii_pnLProfileData__; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cVMoveF2I_reg_stackNodeMideal_Opcode6kM_i_; +text: .text%__1cNmodL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cQsalI_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_; +text: .text%__1cJStubQdDueueMremove_first6Mi_v_; +text: .text%__1cQinitialize_class6FnMsymbolHandle_pnGThread__v_: thread.o; +text: .text%__1cJAssemblerFcmovq6Mn0AJCondition_pnMRegisterImpl_nHAddress__v_; +text: .text%__1cXpartialSubtypeCheckNodeJnum_opnds6kM_I_; +text: .text%__1cMmulD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMaddF_immNodeHtwo_adr6kM_I_; +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cTconvL2D_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeErule6kM_I_; +text: .text%__1cVMoveL2D_reg_stackNodeMideal_Opcode6kM_i_; +text: .text%__1cFStateM_sub_Op_MulD6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_ModL6MpknENode__v_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cPloadConUL32NodeGis_Con6kM_I_; +text: .text%__1cQshrL_rReg_CLNodeHtwo_adr6kM_I_; +text: .text%__1cKJavaThreadbOcheck_special_condition_for_native_trans6Fp0_v_; +text: .text%__1cODeoptimizationYreset_invocation_counter6FpnJScopeDesc_i_v_; +text: .text%__1cZCallDynamicJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTconvF2I_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cMmulD_memNodeJnum_opnds6kM_I_; +text: .text%__1cHOrLNodeGOpcode6kM_i_; +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__; +text: .text%__1cMnegF_regNodeErule6kM_I_; +text: .text%__1cMsubF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_RawMonitorCreate; +text: .text%__1cOresolve_symbol6Fpkc_pC_: os_solaris.o; +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cKCMoveDNodeGOpcode6kM_i_; +text: .text%__1cFParseQdo_monitor_enter6M_v_; +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cXpartialSubtypeCheckNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvD2INodeJideal_reg6kM_I_; +text: .text%__1cKcastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallDynamicJavaDirectNodeJnum_opnds6kM_I_; +text: .text%__1cMlogD_regNodeJnum_opnds6kM_I_; +text: .text%Unsafe_CompareAndSwapInt; +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cMmulD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNmulI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKimmL32OperFclone6kM_pnIMachOper__; +text: .text%__1cIimmFOperFclone6kM_pnIMachOper__; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_22pC_v_; +text: .text%__1cOindOffset8OperFclone6kM_pnIMachOper__; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6MpnMRegisterImpl_i_v_; +text: .text%__1cOloadConL32NodeFclone6kM_pnENode__; +text: .text%__1cMloadConFNodeFclone6kM_pnENode__; +text: .text%__1cScompL_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cNTemplateTableRlocals_index_wide6FpnMRegisterImpl__v_; +text: .text%__1cVMoveL2D_reg_stackNodePoper_input_base6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_data_at6MpnMRegisterImpl_i2_v_; +text: .text%__1cKOSRAdapter2n6FLI_pv_; +text: .text%__1cKOSRAdapterPnew_osr_adapter6FpnKCodeBuffer_pnJOopMapSet_ii_p0_; +text: .text%__1cJAssemblerEincl6MnHAddress__v_; +text: .text%__1cKOSRAdapter2t6MpnKCodeBuffer_pnJOopMapSet_iii_v_; +text: .text%__1cTconvI2D_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cNSharedRuntimeRgenerate_osr_blob6Fi_pnKOSRAdapter__; +text: .text%__1cMaddD_regNodeJnum_opnds6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorUset_wide_entry_point6MpnITemplate_rpC_v_; +text: .text%__1cMmulF_regNodeErule6kM_I_; +text: .text%__1cIMulFNodeJideal_reg6kM_I_; +text: .text%__1cFStateM_sub_Op_MulF6MpknENode__v_; +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__; +text: .text%__1cHnmethodVmark_as_seen_on_stack6M_v_; +text: .text%__1cMloadConDNodeHsize_of6kM_I_; +text: .text%__1cOcmovI_regUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLconvI2BNodeHtwo_adr6kM_I_; +text: .text%__1cMorL_rRegNodeJnum_opnds6kM_I_; +text: .text%__1cQorI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cMaddD_immNodeHtwo_adr6kM_I_; +text: .text%__1cMloadConDNodeKconst_size6kM_i_; +text: .text%__1cLConvL2FNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cLloadSSINodeMideal_Opcode6kM_i_; +text: .text%__1cOstackSlotDOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cTconvF2D_reg_memNodeFreloc6kM_i_; +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvL2D_reg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cMloadConDNodeFreloc6kM_i_; +text: .text%JVM_Lseek; +text: .text%__1cPsarL_rReg_1NodeErule6kM_I_; +text: .text%__1cPsarL_rReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMaddD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOstackSlotDOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMorL_rRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMmulF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMlogD_regNodeHtwo_adr6kM_I_; +text: .text%__1cRaddI_mem_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cFStateM_sub_Op_AddF6MpknENode__v_; +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cTconvL2F_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNGrowableArray4CpnKOSRAdapter__Praw_at_put_grow6Mirk14_v_; +text: .text%__1cFStateP_sub_Op_StrComp6MpknENode__v_; +text: .text%__1cTconvL2F_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cMaddF_memNodeJnum_opnds6kM_I_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cScompL_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cPcmpF_cc_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_; +text: .text%__1cQmulI_mem_immNodePoper_input_base6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cNdecL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_; +text: .text%__1cScompL_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cJAssemblerEsubq6MpnMRegisterImpl_nHAddress__v_; +text: .text%jni_GetEnv; +text: .text%JVM_NanoTime; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_2pC22_v_; +text: .text%__1cFTypeFJis_finite6kM_i_; +text: .text%__1cRmulI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cScompL_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cHMulNodeGis_Mul6kM_pk0_; +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_; +text: .text%__1cQmulI_mem_immNodeMideal_Opcode6kM_i_; +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJAssemblerLemit_data646MxnJrelocInfoJrelocType_i_v_; +text: .text%__1cJAssemblerFpushq6MnHAddress__v_; +text: .text%__1cIGraphKitSgen_native_wrapper6MpnIciMethod__v_; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_; +text: .text%__1cPcmpD_cc_immNodeKconst_size6kM_i_; +text: .text%__1cKLoadPCNodeJideal_reg6kM_I_; +text: .text%__1cMorL_rRegNodeErule6kM_I_; +text: .text%__1cUCallNativeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompP_rReg_memNodePoper_input_base6kM_I_; +text: .text%__1cScompP_rReg_memNodeMideal_Opcode6kM_i_; +text: .text%__1cSvframeStreamCommonbFfill_in_compiled_inlined_sender6M_i_; +text: .text%__1cNdivI_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cKcastPPNodeJnum_opnds6kM_I_; +text: .text%__1cTconvL2D_reg_memNodeHtwo_adr6kM_I_; +text: .text%__1cOLibraryCallKitbNinline_native_Reflection_getCallerClass6M_i_; +text: .text%__1cOLibraryCallKitZinline_native_Class_query6MnMvmIntrinsicsCID__i_; +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod__v_; +text: .text%__1cKciTypeFlowLStateVectorOdo_null_assert6MpnHciKlass__v_; +text: .text%__1cMsubD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_; +text: .text%__1cNGrowableArray4CpnGciType__Egrow6Mi_v_; +text: .text%__1cMdivD_immNodeJnum_opnds6kM_I_; +text: .text%__1cNstoreImmPNodeJnum_opnds6kM_I_; +text: .text%__1cMdivD_immNodeHtwo_adr6kM_I_; +text: .text%__1cLloadSSINodeHtwo_adr6kM_I_; +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cPcmpD_cc_immNodeFreloc6kM_i_; +text: .text%__1cUCallNativeDirectNodeFreloc6kM_i_; +text: .text%__1cNloadConPcNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulD_memNodeErule6kM_I_; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cFStateS_sub_Op_CallNative6MpknENode__v_; +text: .text%__1cFStateO_sub_Op_LoadPC6MpknENode__v_; +text: .text%__1cQAbstractCompilerPsupports_native6M_i_; +text: .text%__1cQorI_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cMmulF_regNodeHtwo_adr6kM_I_; +text: .text%__1cPsalL_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQshrI_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableQvolatile_barrier6FnJAssemblerQMembar_mask_bits__v_; +text: .text%__1cNdivL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cVMoveL2D_reg_stackNodeErule6kM_I_; +text: .text%__1cRsalI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_; +text: .text%__1cYinternal_word_RelocationMforce_target6MpC_v_; +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__; +text: .text%__1cLloadSSINodeJnum_opnds6kM_I_; +text: .text%__1cKPSYoungGenRavailable_to_live6M_L_; +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNcmovL_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSstore_to_stackslot6FrnKCodeBuffer_iii_v_; +text: .text%__1cFTypeFGis_nan6kM_i_; +text: .text%__1cQshrL_rReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvD2F_reg_regNodeJnum_opnds6kM_I_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6M_v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_; +text: .text%__1cMmulD_immNodeFreloc6kM_i_; +text: .text%__1cQmulI_mem_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cOstackSlotIOperJnum_edges6kM_I_; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cUInterpreterGeneratorXcheck_for_compiled_code6MrnFLabel__v_; +text: .text%__1cRaddI_mem_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cLconvI2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMlogD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSVirtualSpaceJshrink_by6ML_i_; +text: .text%__1cTconvD2F_reg_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cRCardTableModRefBSYcommitted_unique_to_self6kMinJMemRegion__1_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cFStateN_sub_Op_LoadD6MpknENode__v_; +text: .text%__1cTconvL2F_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cZInterpreterMacroAssemblerRremove_activation6MnITosState_pnMRegisterImpl_iii_v_; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cPcmpF_cc_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubF_memNodeJnum_opnds6kM_I_; +text: .text%__1cKPSYoungGenUavailable_to_min_gen6M_L_; +text: .text%__1cJAssemblerKrepne_scan6M_v_; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%__1cKPSYoungGenbCreset_survivors_after_shrink6M_v_; +text: .text%__1cKPSYoungGenQlimit_gen_shrink6ML_L_; +text: .text%__1cTconvI2D_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateP_sub_Op_ConvI2F6MpknENode__v_; +text: .text%__1cMmulD_immNodeKconst_size6kM_i_; +text: .text%__1cMmulD_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cMmulF_immNodeFreloc6kM_i_; +text: .text%__1cJloadBNodeHsize_of6kM_I_; +text: .text%__1cOcompI_rRegNodeHsize_of6kM_I_; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cJloadPNodeHsize_of6kM_I_; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cOtypeArrayKlassNexternal_name6FnJBasicType__pkc_; +text: .text%Unsafe_StaticFieldOffset; +text: .text%__1cFTypeFFempty6kM_i_; +text: .text%__1cNcmovL_regNodeHtwo_adr6kM_I_; +text: .text%__1cLloadSSDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_; +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_; +text: .text%__1cETypeFxdual6kM_pk0_; +text: .text%__1cVVM_ParallelGCSystemGC2t6MIInHGCCauseFCause__v_; +text: .text%__1cJCmpF3NodeGOpcode6kM_i_; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cMsubD_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLdivL_10NodePoper_input_base6kM_I_; +text: .text%__1cVVM_ParallelGCSystemGCEname6kM_pkc_; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%__1cJAssemblerEjmpb6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cOcmovI_regUNodeHtwo_adr6kM_I_; +text: .text%__1cMaddD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEmovw6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerGmovsbl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cMrax_RegLOperFclone6kM_pnIMachOper__; +text: .text%__1cMorL_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2F_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_; +text: .text%__1cJLoadDNodeJideal_reg6kM_I_; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cQmulI_mem_immNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_; +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerFpopaq6M_v_; +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKimmL10OperJnum_edges6kM_I_; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%__1cLcastP2LNodeHsize_of6kM_I_; +text: .text%__1cQmulI_mem_immNodeRis_cisc_alternate6kM_i_; +text: .text%__1cMsubD_regNodeHtwo_adr6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MnITosState__v_; +text: .text%__1cRsubI_rReg_memNodeHsize_of6kM_I_; +text: .text%__1cTconvL2F_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cNReservedSpace2t6MpcL_v_; +text: .text%__1cKmul_hiNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSmembar_acquireNodeJnum_opnds6kM_I_; +text: .text%__1cQsarL_rReg_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerMemit_arith_b6MiipnMRegisterImpl_i_v_; +text: .text%__1cPsarL_rReg_2NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLdivL_10NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%JVM_GC; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cScompP_rReg_memNodeJnum_opnds6kM_I_; +text: .text%__1cIPSOldGenSexpand_to_reserved6M_i_; +text: .text%__1cQmulI_mem_immNodeJnum_opnds6kM_I_; +text: .text%__1cIPSOldGenJexpand_by6ML_i_; +text: .text%__1cIPSOldGenGexpand6ML_v_; +text: .text%__1cIPSOldGenXexpand_and_cas_allocate6ML_pnIHeapWord__; +text: .text%__1cPsarL_rReg_1NodeHtwo_adr6kM_I_; +text: .text%__1cJAssemblerFtestb6MpnMRegisterImpl_i_v_; +text: .text%__1cXpartialSubtypeCheckNodeHtwo_adr6kM_I_; +text: .text%__1cMsubF_regNodeHtwo_adr6kM_I_; +text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_; +text: .text%__1cRaddL_rReg_memNodeFreloc6kM_i_; +text: .text%__1cVMoveL2D_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompP_rReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPsarL_rReg_1NodeJnum_opnds6kM_I_; +text: .text%__1cOGenerateOopMapGdo_jsr6Mi_v_; +text: .text%__1cMmulF_memNodeHtwo_adr6kM_I_; +text: .text%__1cScompP_rReg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cLPSMarkSweepGinvoke6Fpii_v_; +text: .text%__1cOcmovD_regUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovL_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvF2I_reg_regNodeHtwo_adr6kM_I_; +text: .text%__1cMmulF_immNodeKconst_size6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerbGget_unsigned_2_byte_index_at_bcp6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cMdecI_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cJloadDNodeFreloc6kM_i_; +text: .text%__1cMincL_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNaddL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cTconvD2F_reg_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cMmulD_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLClassLoaderbCupdate_class_path_entry_list6Fpkc_v_; +text: .text%__1cMsubF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovI_regUNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cTconvL2D_reg_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cLOptoRuntimeRresolve_call_Type6F_pknITypeFunc__; +text: .text%__1cHciKlassIis_klass6M_i_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cKScopeValuePis_constant_int6kM_i_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cMsubF_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cMsubF_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJMemRegion2t6M_v_; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cQsalL_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cJArgumentsRverify_percentage6FLpkc_i_; +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__; +text: .text%__1cRComputeEntryStackHdo_long6M_v_; +text: .text%__1cHnmethodVinvalidate_osr_method6M_v_; +text: .text%__1cMaddF_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%jni_SetObjectField: jni.o; +text: .text%__1cLConvD2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOBasicHashtable2t6Mii_v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cNandI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNcmovL_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__; +text: .text%__1cMTailJumpNodeGOpcode6kM_i_; +text: .text%__1cCosHSolarisVcleanup_interruptible6FpnKJavaThread__v_; +text: .text%__1cCosHSolarisTsetup_interruptible6F_pnKJavaThread__; +text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_; +text: .text%__1cMdivD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%JVM_LoadLibrary; +text: .text%JVM_Sleep; +text: .text%__1cNReservedSpaceKinitialize6MLLipc_v_; +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__; +text: .text%__1cOstackSlotIOperFscale6kM_i_; +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__; +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__; +text: .text%jint_cmp: parse2.o; +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cLloadSSINodeErule6kM_I_; +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVMoveF2I_reg_stackNodePoper_input_base6kM_I_; +text: .text%__1cLConvL2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIDivDNodeJideal_reg6kM_I_; +text: .text%__1cRandI_rReg_memNodeFreloc6kM_i_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%jni_GetJavaVM: jni.o; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%jni_MonitorExit: jni.o; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cPciInstanceKlassbDcompute_shared_is_initialized6M_i_; +text: .text%__1cNGrowableArray4CpnIPerfData__Praw_at_put_grow6Mirk14_v_; +text: .text%__1cFciEnvOrecord_failure6Mpkc_v_; +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__; +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOstackSlotDOperFscale6kM_i_; +text: .text%__1cOstackSlotDOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cOcmovI_regUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReturnNodeUdepends_only_on_test6kM_i_; +text: .text%__1cNcmovL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvD2F_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvF2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvL2F_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_immNodeJnum_opnds6kM_I_; +text: .text%__1cVMoveL2D_reg_stackNodeJnum_opnds6kM_I_; +text: .text%__1cRaddI_mem_rRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cTconvL2D_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXpartialSubtypeCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSharedRuntimeEdrem6Fdd_d_; +text: .text%__1cRaddI_rReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMloadConDNodeFclone6kM_pnENode__; +text: .text%__1cScompP_rReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKC2IAdapterXreturn_from_interpreter6M_pC_; +text: .text%__1cKC2IAdapterRsetup_stack_frame6MnFframe_pnLvframeArray__v_; +text: .text%__1cIregDOperFclone6kM_pnIMachOper__; +text: .text%__1cJAssemblerGmovswl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cMsubF_memNodeErule6kM_I_; +text: .text%__1cIimmDOperFclone6kM_pnIMachOper__; +text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cKC2IAdapterSunpack_c2i_adapter6MnFframe_1pnLvframeArray__v_; +text: .text%__1cNdivI_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_; +text: .text%__1cFframebFset_interpreter_frame_sender_sp6Mpl_v_; +text: .text%__1cPsarL_rReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cULinearLeastSquareFit2t6MI_v_; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%__1cMaddF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cJAssemblerFpopfq6M_v_; +text: .text%__1cCosOreserve_memory6FLpc_1_; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cUParallelScavengeHeapEkind6M_nNCollectedHeapEName__; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_LLii_v_; +text: .text%__1cNSpaceCounters2t6MpkciLpnMMutableSpace_pnSGenerationCounters__v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cMaddF_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cOcompiledVFrameUresolve_monitor_lock6kMnILocation__pnJBasicLock__; +text: .text%__1cTconvD2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%__1cNReservedSpaceKfirst_part6MLii_0_; +text: .text%__1cNCellTypeStateImake_any6Fi_0_; +text: .text%__1cMorL_rRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cISubFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMnegF_regNodeJnum_opnds6kM_I_; +text: .text%__1cINegDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_; +text: .text%__1cISubDNodeGadd_id6kM_pknEType__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cMaddD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMMonitorChunk2t6Mi_v_; +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__; +text: .text%__1cZCompiledArgumentOopFinderDset6MinJBasicType__v_; +text: .text%__1cNstoreImmPNodeFreloc6kM_i_; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cLOptoRuntimebBhandle_wrong_method_ic_miss6FpnKJavaThread__pC_; +text: .text%__1cKJavaThreadUremove_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cKJavaThreadRadd_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cNReservedSpace2t6ML_v_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cNmulL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNmulI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_; +text: .text%Unsafe_GetNativeByte; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%__1cFframebLprevious_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cFStateP_sub_Op_ConvD2I6MpknENode__v_; +text: .text%__1cJAssemblerGpushfq6M_v_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_2_v_; +text: .text%__1cIDivFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cINegDNodeJideal_reg6kM_I_; +text: .text%__1cODeoptimizationZtrap_state_set_recompiled6Fii_i_; +text: .text%__1cPshrL_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2D_reg_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_; +text: .text%__1cNandI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pLi_v_; +text: .text%__1cMsubF_memNodeHtwo_adr6kM_I_; +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__; +text: .text%__1cRaddL_rReg_memNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cMmulL_memNodeFreloc6kM_i_; +text: .text%__1cLVtableStubsGlookup6Fiii_pnKVtableStub__; +text: .text%__1cMMonitorValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cFStateM_sub_Op_NegD6MpknENode__v_; +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_; +text: .text%__1cISubDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cNstoreImmPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetNodeJnum_opnds6kM_I_; +text: .text%__1cIDivINodeJideal_reg6kM_I_; +text: .text%__1cRInvocationCounterDdef6Fn0AFState_ipFnMmethodHandle_pnGThread__pC_v_; +text: .text%__1cMNativeLookupNlong_jni_name6FnMmethodHandle__pc_; +text: .text%__1cMaddF_memNodeErule6kM_I_; +text: .text%__1cOcmovD_regUNodeHtwo_adr6kM_I_; +text: .text%__1cMaddF_memNodeHtwo_adr6kM_I_; +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cMorL_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cOMacroAssemblerNpop_CPU_state6M_v_; +text: .text%__1cMmulF_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cOMacroAssemblerOpush_CPU_state6M_v_; +text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_; +text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_; +text: .text%__1cOMacroAssemblerMpop_IU_state6M_v_; +text: .text%__1cOMacroAssemblerNpush_IU_state6M_v_; +text: .text%__1cOMacroAssemblerSstore_check_part_26MpnMRegisterImpl__v_; +text: .text%__1cTconvL2D_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerSstore_check_part_16MpnMRegisterImpl__v_; +text: .text%__1cRaddL_rReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMaddF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRClassPathZipEntry2t6Mppvpc_v_; +text: .text%__1cNTemplateTableOprepare_invoke6FpnMRegisterImpl_2inJBytecodesECode__v_; +text: .text%__1cVMoveF2I_reg_stackNodeErule6kM_I_; +text: .text%__1cJAssemblerEandq6MpnMRegisterImpl_2_v_; +text: .text%__1cFParsePdo_lookupswitch6M_v_; +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_; +text: .text%__1cIAddDNodeJideal_reg6kM_I_; +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRComputeEntryStackJdo_double6M_v_; +text: .text%__1cMaddD_regNodeHtwo_adr6kM_I_; +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cJAssemblerEcmpb6MnHAddress_i_v_; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cLClassLoaderSget_canonical_path6Fpc1i_i_; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cMsubD_immNodeHtwo_adr6kM_I_; +text: .text%__1cLklassVtableTis_miranda_entry_at6Mi_i_; +text: .text%__1cKPSScavengeZclean_up_failed_promotion6F_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%JVM_Available; +text: .text%__1cJAssemblerHucomiss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl__v_; +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJAssemblerFimulq6MpnMRegisterImpl_2_v_; +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__; +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_; +text: .text%__1cLClassLoaderLadd_to_list6FpnOClassPathEntry__v_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cIRetTableHadd_jsr6Mii_v_; +text: .text%__1cMincL_memNodeHtwo_adr6kM_I_; +text: .text%__1cKPSYoungGenOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cNGrowableArray4CpnLmarkOopDesc__2t6Mii_v_; +text: .text%__1cUCompressedReadStreamJread_long6M_x_; +text: .text%__1cISubDNodeJideal_reg6kM_I_; +text: .text%__1cWNonPrintingResourceObj2n6FLnLResourceObjPallocation_type__pv_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cPaddress_of_flag6FnXCommandLineFlagWithType__pnEFlag__: globals.o; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cMmulI_memNodePoper_input_base6kM_I_; +text: .text%__1cOcompL_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNGrowableArray4CpnLmarkOopDesc__Uclear_and_deallocate6M_v_; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cOcompI_rRegNodeFclone6kM_pnENode__; +text: .text%__1cRsubI_rReg_memNodeFclone6kM_pnENode__; +text: .text%__1cLcastP2LNodeFclone6kM_pnENode__; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%__1cRaddL_rReg_memNodeErule6kM_I_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl__v_; +text: .text%__1cOsalI_mem_1NodeHtwo_adr6kM_I_; +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVMoveL2D_reg_stackNodeHtwo_adr6kM_I_; +text: .text%__1cRaddL_mem_rRegNodeHtwo_adr6kM_I_; +text: .text%__1cJloadPNodeFclone6kM_pnENode__; +text: .text%__1cJloadBNodeFclone6kM_pnENode__; +text: .text%__1cRaddL_rReg_memNodeHtwo_adr6kM_I_; +text: .text%__1cMmulF_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cMaddF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEmovb6MnHAddress_i_v_; +text: .text%__1cIAddDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%Unsafe_AllocateMemory; +text: .text%__1cVMoveF2I_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerHfxrstor6MnHAddress__v_; +text: .text%__1cJAssemblerGfxsave6MnHAddress__v_; +text: .text%__1cHCompilePget_invoke_name6M_pnIciSymbol__; +text: .text%__1cJAssemblerEsetb6Mn0AJCondition_pnMRegisterImpl__v_; +text: .text%__1cNxorI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_; +text: .text%__1cNGCTaskManagerGthread6MI_pnMGCTaskThread__; +text: .text%__1cRConstantLongValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cRConstantLongValueQis_constant_long6kM_i_; +text: .text%__1cKScopeValuePis_constant_oop6kM_i_; +text: .text%__1cKScopeValueSis_constant_double6kM_i_; +text: .text%__1cMmulD_memNodeHtwo_adr6kM_I_; +text: .text%__1cVMoveF2I_reg_stackNodeHtwo_adr6kM_I_; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cNcmovL_memNodeHtwo_adr6kM_I_; +text: .text%__1cFStateM_sub_Op_AddD6MpknENode__v_; +text: .text%__1cMmulI_memNodeMideal_Opcode6kM_i_; +text: .text%__1cScompL_rReg_memNodeFreloc6kM_i_; +text: .text%__1cLloadSSINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNGrowableArray4CpnIPerfData__2t6Mii_v_; +text: .text%__1cOCompilerThreadSis_Compiler_thread6kM_i_; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cLVtableStubsFenter6FiiipnKVtableStub__v_; +text: .text%__1cMmulI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOcmovD_regUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cNnegI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_; +text: .text%__1cUConstantOopReadValuePis_constant_oop6kM_i_; +text: .text%__1cHMatcherNlogDSupported6F_ki_; +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_; +text: .text%__1cLconvI2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_; +text: .text%__1cMPerfDataList2t6Mi_v_; +text: .text%__1cFStateP_sub_Op_ConvI2D6MpknENode__v_; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cJCodeCachebCmake_marked_nmethods_zombies6F_v_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cMmulI_memNodeJnum_opnds6kM_I_; +text: .text%__1cFStateM_sub_Op_CmpF6MpknENode__v_; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cOtailjmpIndNodeGpinned6kM_i_; +text: .text%__1cQshrL_rReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerGmovzbl6MpnMRegisterImpl_2_v_; +text: .text%__1cILogDNodeJideal_reg6kM_I_; +text: .text%__1cMmulI_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRaddL_mem_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMincL_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_; +text: .text%__1cNcmovL_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cMOopTaskQdDueueKinitialize6M_v_; +text: .text%__1cMmulD_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMOopTaskQdDueue2t6M_v_; +text: .text%__1cOLibraryCallKitbBinline_native_currentThread6M_i_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cMaddF_immNodeKconst_size6kM_i_; +text: .text%__1cVMoveL2D_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitSinline_math_native6MnMvmIntrinsicsCID__i_; +text: .text%__1cFciEnvbNArrayIndexOutOfBoundsException_instance6M_pnKciInstance__; +text: .text%__1cMsubD_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddF_immNodeFreloc6kM_i_; +text: .text%__1cMaddD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_; +text: .text%__1cNReservedSpaceJlast_part6ML_0_; +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMaddD_immNodeFreloc6kM_i_; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%__1cMaddD_immNodeKconst_size6kM_i_; +text: .text%jni_Throw: jni.o; +text: .text%__1cRmulI_rReg_immNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cOsalI_mem_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSPSPromotionManager2t6M_v_; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_; +text: .text%__1cMsubF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerFmovss6MpnRFloatRegisterImpl_2_v_; +text: .text%JVM_GetLastErrorString; +text: .text%__1cJAssemblerFmovsd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_22_v_; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cFStateM_sub_Op_SubF6MpknENode__v_; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cKstoreBNodeErule6kM_I_; +text: .text%__1cKVtableStub2n6FLi_pv_; +text: .text%__1cJAssemblerEdecq6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_22_v_; +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cRaddI_mem_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_L_v_; +text: .text%__1cOLibraryCallKitMinline_trans6MnMvmIntrinsicsCID__i_; +text: .text%Unsafe_SetMemory; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cJTimeStamp2t6M_v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_2i_v_; +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorKpass_float6M_v_; +text: .text%__1cISubFNodeJideal_reg6kM_I_; +text: .text%__1cNGrowableArray4CpnIPerfData__Egrow6Mi_v_; +text: .text%__1cMSysClassPathNreset_item_at6Mi_v_; +text: .text%__1cFStateM_sub_Op_LogD6MpknENode__v_; +text: .text%__1cFTypeDFempty6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_2i_v_; +text: .text%__1cJlookupOne6FpnHJNIEnv__pkcpnGThread__pnH_jclass__: jni.o; +text: .text%__1cLloadSSINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl__v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cISubFNodeGadd_id6kM_pknEType__; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cFStateM_sub_Op_SubD6MpknENode__v_; +text: .text%JVM_RegisterSignal; +text: .text%JVM_FindSignal; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cMorL_rRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cKConv2BNodeJideal_reg6kM_I_; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cLloadSSDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveF2I_reg_stackNodeJnum_opnds6kM_I_; +text: .text%__1cJArgumentsObuild_jvm_args6Fpkc_v_; +text: .text%__1cOLibraryCallKitMpop_math_arg6M_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cRaddI_mem_rRegNodeFreloc6kM_i_; +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_; +text: .text%__1cVMoveF2I_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cOtailjmpIndNodeHtwo_adr6kM_I_; +text: .text%__1cQmulI_mem_immNodeFreloc6kM_i_; +text: .text%__1cNincI_rRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__; +text: .text%__1cUConstantOopReadValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cRaddI_mem_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdivD_immNodeKconst_size6kM_i_; +text: .text%__1cMmulD_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_; +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_; +text: .text%__1cMsubF_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_; +text: .text%__1cNGrowableArray4CpnTDerivedPointerEntry__Egrow6Mi_v_; +text: .text%__1cOtailjmpIndNodeJnum_opnds6kM_I_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cKGCStatInfo2t6Mi_v_; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_; +text: .text%__1cJMarkSweepUAdjustPointerClosure2t6Mi_v_; +text: .text%__1cCosHrealloc6FpvL_1_; +text: .text%__1cCosWactive_processor_count6F_i_; +text: .text%__1cSestimate_path_freq6FpnENode__f_: loopnode.o; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cOLibraryCallKitVinline_fp_conversions6MnMvmIntrinsicsCID__i_; +text: .text%__1cZcatch_cleanup_intra_block6FpnENode_1pnFBlock_ii_v_: lcm.o; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cNdefaultStreamMhas_log_file6M_i_; +text: .text%__1cNcmovL_memNodeRis_cisc_alternate6kM_i_; +text: .text%__1cRalign_object_size6Fl_l_; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_; +text: .text%__1cNstoreImmBNodeErule6kM_I_; +text: .text%__1cNstoreImmINodeErule6kM_I_; +text: .text%__1cLloadSSDNodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cLloadSSINodeZcheck_for_anti_dependence6kM_i_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cFParseRdo_multianewarray6M_v_; +text: .text%__1cMloadConDNodeGis_Con6kM_I_; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cPfilename_to_pid6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cNGrowableArray4CpnNmethodOopDesc__Egrow6Mi_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cXpartialSubtypeCheckNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cNGrowableArray4CpC_Egrow6Mi_v_; +text: .text%__1cNGrowableArray4CL_Egrow6Mi_v_; +text: .text%__1cObox_handleNodeHsize_of6kM_I_; +text: .text%__1cPsarL_rReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Lpkci_v_; +text: .text%__1cIPSOldGenYinitialize_virtual_space6MnNReservedSpace_L_v_; +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_; +text: .text%__1cNdivI_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbCAbstractInterpreterGeneratorTgenerate_error_exit6Mpkc_pC_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cOPSVirtualSpace2t6M_v_; +text: .text%__1cOPSVirtualSpaceKinitialize6MnNReservedSpace_L_i_; +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_flag_at6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerWdispatch_only_noverify6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cKReflectionbFbasic_type_arrayklass_to_mirror6FpnMklassOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cMAdapterCache2t6M_v_; +text: .text%__1cSComputeAdapterInfoIdo_array6Mii_v_; +text: .text%__1cGatomll6Fpkcpx_i_: arguments.o; +text: .text%__1cJArgumentsRcheck_memory_size6Fxx_n0AJArgsRange__; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cYalign_to_allocation_size6FL_L_: heap.o; +text: .text%__1cJArgumentsRparse_memory_size6Fpkcpxx_n0AJArgsRange__; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cQAgentLibraryList2t6M_v_; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_; +text: .text%__1cMmulF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerGmovsbl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGmovswl6MpnMRegisterImpl_2_v_; +text: .text%__1cLOptoRuntimebDlazy_c2i_adapter_generation_C6FpnKJavaThread__pC_; +text: .text%__1cLOptoRuntimeVgenerate_handler_blob6FpCi_pnNSafepointBlob__; +text: .text%__1cRaddL_mem_rRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerGmovzwl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerFmovdq6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cRComputeEntryStackIdo_float6M_v_; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_nHAddress__v_; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_; +text: .text%__1cJAssemblerEcmpq6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerHucomisd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerFidivl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerFidivq6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEcdql6M_v_; +text: .text%__1cJAssemblerEcdqq6M_v_; +text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerDorq6MnHAddress_i_v_; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cMGCTaskThreadDrun6M_v_; +text: .text%__1cMGCTaskThreadFstart6M_v_; +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_; +text: .text%__1cJStubQdDueueOregister_queue6Fp0_v_; +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_; +text: .text%__1cJAssemblerFxaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cNGCTaskManagerKset_thread6MIpnMGCTaskThread__v_; +text: .text%__1cJAssemblerHldmxcsr6MnHAddress__v_; +text: .text%__1cJAssemblerFxorps6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cKcastPPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMPeriodicTask2t6ML_v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cPOopTaskQdDueueSetOregister_queue6MipnMOopTaskQdDueue__v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cPdouble_quadword6Fpxxx_0_: templateTable_amd64.o; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUinvokevirtual_helper6FpnMRegisterImpl_22_v_; +text: .text%__1cEMIN24CL_6FTA0_0_; +text: .text%__1cRCardTableModRefBSbCpar_chunk_heapword_alignment6F_L_; +text: .text%__1cOMacroAssemblerPcorrected_idivl6MpnMRegisterImpl__i_; +text: .text%__1cOMacroAssemblerPcorrected_idivq6MpnMRegisterImpl__i_; +text: .text%__1cLNamedThread2t6M_v_; +text: .text%__1cLNamedThreadIset_name6MpkcE_v_; +text: .text%__1cOMacroAssemblerQserialize_memory6MpnMRegisterImpl_22_v_; +text: .text%__1cIDivDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_DivD6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvL2F6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvL2D6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvF2I6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvD2F6MpknENode__v_; +text: .text%__1cRcheck_if_clipping6FpknKRegionNode_rpnGIfNode_5_i_: cfgnode.o; +text: .text%__1cWcheck_compare_clipping6FipnGIfNode_pnHConNode_rpnENode__i_: cfgnode.o; +text: .text%__1cIciObjectOis_array_klass6M_i_; +text: .text%__1cScompP_rReg_memNodeFreloc6kM_i_; +text: .text%__1cKCastPPNodeJideal_reg6kM_I_; +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFTypeDJis_finite6kM_i_; +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvL2D_reg_memNodeFreloc6kM_i_; +text: .text%__1cMdivD_immNodeFreloc6kM_i_; +text: .text%__1cMmulF_memNodeFreloc6kM_i_; +text: .text%__1cMaddF_memNodeFreloc6kM_i_; +text: .text%__1cLConvF2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOcompP_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cKciTypeFlowLStateVectorRdo_multianewarray6MpnQciBytecodeStream__v_; +text: .text%__1cMorI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cNSafepointBlob2n6FLI_pv_; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cINegFNodeJideal_reg6kM_I_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_L_i_; +text: .text%__1cHMatcherQconvL2FSupported6F_ki_; +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_; +text: .text%__1cLConvF2INodeJideal_reg6kM_I_; +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_; +text: .text%__1cLConvL2FNodeJideal_reg6kM_I_; +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cJAssemblerEshrq6MpnMRegisterImpl__v_; +text: .text%__1cMsubF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerEsubq6MnHAddress_i_v_; +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_; +text: .text%__1cMmulD_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cYSurvivorMutableSpacePool2t6MpnKPSYoungGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cKNoopGCTaskQcreate_on_c_heap6F_p0_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cWResolveOopMapConflictsOreport_results6kM_i_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cJAssemblerFxchgl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJAssemblerFxchgq6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cJAssemblerIcmpxchgl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cINegFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cJArgumentsbOparse_java_compiler_environment_variable6F_v_; +text: .text%__1cHVM_ExitNset_vm_exited6F_i_; +text: .text%__1cICodeHeapHreserve6MLLL_i_; +text: .text%__1cQRelocationHolder2t6M_v_; +text: .text%__1cICodeHeapFclear6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cJArgumentsSset_bytecode_flags6F_v_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cQno_shared_spaces6F_v_: arguments.o; +text: .text%__1cJArgumentsMget_property6Fpkc_2_; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_; +text: .text%__1cNGCTaskManagerKinitialize6M_v_; +text: .text%__1cNGCTaskManager2t6MI_v_; +text: .text%__1cXSynchronizedGCTaskQdDueue2t6MpnLGCTaskQdDueue_pnFMutex__v_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cRInlineCacheBufferKinitialize6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cJAssemblerHclflush6MnHAddress__v_; +text: .text%__1cOAbstractICacheKinitialize6F_v_; +text: .text%__1cLGCTaskQdDueueQcreate_on_c_heap6F_p0_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cMSysClassPath2T6M_v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cYGCAdaptivePolicyCounters2t6MpkciipnSAdaptiveSizePolicy__v_; +text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_; +text: .text%__1cJAssemblerHstmxcsr6MnHAddress__v_; +text: .text%__1cJAssemblerFaddss6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFsubss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_; +text: .text%__1cMSysClassPath2t6Mpkc_v_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cJAssemblerFmulss6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFdivss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerFaddsd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cJAssemblerFsubsd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cJAssemblerFmulsd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFdivsd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%__1cJAssemblerGsqrtsd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +text: .text%__1cRArgumentOopFinderDset6MinJBasicType__v_; +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_LLL_v_; +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_LLLLLLL_v_; +text: .text%__1cHOrLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cZCompiledArgumentOopFinderRhandle_oop_offset6M_v_; +text: .text%__1cJAssemblerFxorps6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerFxorpd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerFxorpd6MpnRFloatRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerJcvtsi2ssl6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerJcvtsi2ssq6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerJcvtsi2sdl6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cJAssemblerJcvtsi2sdq6MpnRFloatRegisterImpl_pnMRegisterImpl__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: adaptiveSizePolicy.o; +text: .text%__1cSAdaptiveSizePolicy2t6ML_v_; +text: .text%__1cFframebDoops_interpreted_arguments_do6MnMsymbolHandle_ipnKOopClosure__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o; +text: .text%__1cJAssemblerKcvttss2sil6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cJAssemblerKcvttss2siq6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o; +text: .text%__1cJAssemblerKcvttsd2sil6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cJAssemblerKcvttsd2siq6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cQSystemDictionaryKmethods_do6FpFpnNmethodOopDesc__v_v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cJAssemblerIcvtss2sd6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cJAssemblerIcvtsd2ss6MpnRFloatRegisterImpl_2_v_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cOMacroAssemblerKdecrementl6MpnMRegisterImpl_i_v_; +text: .text%__1cHVM_ExitEname6kM_pkc_; +text: .text%__1cKcastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cMPeriodicTaskLis_enrolled6kM_i_; +text: .text%__1cNMemoryServicebFadd_parallel_scavenge_heap_info6FpnUParallelScavengeHeap__v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cMadapter_init6F_v_; +text: .text%__1cTI2CAdapterGeneratorKinitialize6F_v_; +text: .text%__1cNMemoryServiceXadd_psYoung_memory_pool6FpnKPSYoungGen_pnNMemoryManager_4_v_; +text: .text%__1cTC2IAdapterGeneratorKinitialize6F_v_; +text: .text%__1cOstackSlotPOperFclone6kM_pnIMachOper__; +text: .text%__1cObox_handleNodeFclone6kM_pnENode__; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_amd64_pipeline.o; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl_3_v_; +text: .text%__1cFJNIidKdeallocate6Fp0_v_; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cSReferenceProcessorMinit_statics6F_v_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl_33_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cZInterpreterMacroAssemblerUdispatch_only_normal6MnITosState__v_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cNMemoryServiceVadd_psOld_memory_pool6FpnIPSOldGen_pnNMemoryManager__v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_; +text: .text%__1cKPSYoungGenUset_space_boundaries6MLL_v_; +text: .text%__1cKPSYoungGenbGcompute_initial_space_boundaries6M_v_; +text: .text%__1cKPSYoungGenPinitialize_work6M_v_; +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_L_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_L_v_; +text: .text%__1cKPSYoungGen2t6MLLL_v_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cRaddL_rReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRaddL_mem_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cLVtableStubsKinitialize6F_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cMincL_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cKPSScavengeKinitialize6F_v_; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cNMemoryServiceWadd_psPerm_memory_pool6FpnJPSPermGen_pnNMemoryManager__v_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbCset_safepoints_for_all_bytes6M_v_; +text: .text%__1cOsalI_mem_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSPSPromotionManagerKinitialize6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_; +text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cJPSPermGen2t6MnNReservedSpace_LLLLpkci_v_; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_; +text: .text%__1cIPSOldGen2t6MLLLpkci_v_; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cIPSOldGen2t6MnNReservedSpace_LLLLpkci_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cSInterpreterRuntimeYthrow_ClassCastException6FpnKJavaThread_pnHoopDesc__v_; +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cOMacroAssemblerGc2bool6MpnMRegisterImpl__v_; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cTPSAlwaysTrueClosure2t6M_v_: psMarkSweep.o; +text: .text%__1cXSignatureHandlerLibraryQset_handler_blob6F_pC_; +text: .text%__1cNGrowableArray4CpC_2t6Mii_v_; +text: .text%__1cNGrowableArray4CL_2t6Mii_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cOMacroAssemblerRsign_extend_short6MpnMRegisterImpl__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_ClassCastException_handler6M_pC_; +text: .text%__1cGThreadWset_as_starting_thread6M_i_; +text: .text%__1cLPSMarkSweepKinitialize6F_v_; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cNWatcherThread2t6M_v_; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadFstart6F_v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cOMacroAssemblerQsign_extend_byte6MpnMRegisterImpl__v_; +text: .text%__1cKJavaThread2t6M_v_; +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cKDictionaryKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cUInterpreterGeneratorTgenerate_math_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cUInterpreterGeneratorXgenerate_abstract_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cKDictionaryKfree_entry6MpnPDictionaryEntry__v_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cUPSAdaptiveSizePolicy2t6MLLLLLddI_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cHCompileRpd_compiler2_init6F_v_; +text: .text%__1cKC2CompilerKinitialize6M_v_; +text: .text%__1cFStateQ_sub_Op_TailJump6MpknENode__v_; +text: .text%__1cMorL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cFStateL_sub_Op_OrL6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_NegF6MpknENode__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_amd64_expand.o; +text: .text%__1cQprint_statistics6F_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cFStateP_sub_Op_MoveL2D6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectPcompute_offsets6F_v_; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_amd64.o; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_; +text: .text%__1cbIjava_security_AccessControlContextPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_SystemPcompute_offsets6F_v_; +text: .text%__1cPjava_nio_BufferPcompute_offsets6F_v_; +text: .text%__1cFStateO_sub_Op_CMoveD6MpknENode__v_; +text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_; +text: .text%__1cFStateO_sub_Op_CastPP6MpknENode__v_; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cLJavaClassesPcompute_offsets6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%__1cMTailJumpNode2t6MpnENode_22222_v_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cTConstantDoubleValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FL_v_; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_L_; +text: .text%__1cUdelete_shared_memory6FpcL_v_: perfMemory_solaris.o; +text: .text%__1cUcreate_shared_memory6FL_pc_: perfMemory_solaris.o; +text: .text%__1cOtailjmpIndNodeFreloc6kM_i_; +text: .text%__1cSmmap_create_shared6FL_pc_: perfMemory_solaris.o; +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cbAcreate_sharedmem_resources6Fpkc1L_i_: perfMemory_solaris.o; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cLremove_file6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cWget_sharedmem_filename6Fpkci_pc_: perfMemory_solaris.o; +text: .text%__1cNget_user_name6Fi_pc_: perfMemory_solaris.o; +text: .text%__1cQget_user_tmp_dir6Fpkc_pc_: perfMemory_solaris.o; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cFciEnvXget_or_create_exception6MrpnI_jobject_nMsymbolHandle__pnKciInstance__; +text: .text%__1cMloadConFNodeGis_Con6kM_I_; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKPerfMemoryHdestroy6F_v_; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cKPerfMemoryKinitialize6F_v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cNGrowableArray4CpnIPerfData__JappendAll6Mpk2_v_; +text: .text%__1cMPerfDataListFclone6M_p0_; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cMciKlassKlassEmake6F_p0_; +text: .text%__1cMPerfDataList2t6Mp0_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cMmulD_memNodeFreloc6kM_i_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cMsubD_immNodeFreloc6kM_i_; +text: .text%__1cMsubF_memNodeFreloc6kM_i_; +text: .text%lookupDirectBufferClasses: jni.o; +text: .text%__1cbDinitializeDirectBufferSupport6FpnHJNIEnv___i_: jni.o; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%JNI_CreateJavaVM; +text: .text%__1cFParseWprofile_null_checkcast6M_v_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cOsalI_mem_1NodeFreloc6kM_i_; +text: .text%__1cIciMethodMvtable_index6M_i_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cMmulI_memNodeFreloc6kM_i_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cMincL_memNodeFreloc6kM_i_; +text: .text%__1cRaddL_mem_rRegNodeFreloc6kM_i_; +text: .text%__1cRaddL_mem_rRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMincL_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jniFastGetField_amd64.o; +text: .text%__1cNcmovL_memNodeFreloc6kM_i_; +text: .text%__1cKJNIHandlesKinitialize6F_v_; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%JVM_InitProperties; +text: .text%JVM_Halt; +text: .text%JVM_MaxMemory; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cKCMoveDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOsalI_mem_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetDataKis_RetData6M_i_; +text: .text%JVM_InitializeSocketLibrary; +text: .text%JVM_Socket; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cPOopTaskQdDueueSet2t6Mi_v_; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%JVM_SupportsCX8; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_; +text: .text%__1cOCompilerOraclePparse_from_file6F_v_; +text: .text%__1cHcc_file6F_pkc_: compilerOracle.o; +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_; +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_; +text: .text%__1cRJvmtiEventEnabled2t6M_v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_; +text: .text%__1cRJvmtiEventEnabledFclear6M_v_; +text: .text%__1cNGrowableArray4CpnOCompilerThread__2t6Mii_v_; +text: .text%__1cFParseNfetch_monitor6MipnENode_2_2_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cNGrowableArray4CpnIciMethod__Egrow6Mi_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cUParallelScavengeHeapbCsupports_inline_contig_alloc6kM_i_; +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cUParallelScavengeHeapEheap6F_p0_; +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cOcompiler2_init6F_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_L_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cMaddF_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_; +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_; +text: .text%__1cHoopDescLheader_size6F_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserXjava_lang_Class_fix_pre6MpnOobjArrayHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserYjava_lang_Class_fix_post6Mpi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__2t6Mii_v_; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__Uclear_and_deallocate6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cMostream_exit6F_v_; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cIUniversePcheck_alignment6FLLpkc_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cNdefaultStreamEinit6M_v_; +text: .text%__1cIUniverseUreinitialize_itables6F_v_; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cIUniversePinitialize_heap6F_i_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cIUniverseYcompute_base_vtable_size6F_v_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%Unsafe_FreeMemory; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%Unsafe_PageSize; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_iipc_l_: os_solaris.o; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cOLibraryCallKitWinline_native_hashcode6Mii_i_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cCosOrelease_memory6FpcL_i_; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cNmulI_rRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cQVerificationTypeKinitialize6F_v_; +text: .text%__1cQVerificationTypeIfinalize6F_v_; +text: .text%__1cJCodeCacheKinitialize6F_v_; +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cTClassLoadingServiceVnotify_class_unloaded6FpnNinstanceKlass_i_v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_; +text: .text%__1cNExceptionBlob2n6FLI_pv_; +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNExceptionBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cQUncommonTrapBlob2n6FLI_pv_; +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cQUncommonTrapBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cSDeoptimizationBlob2n6FLI_pv_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cSDeoptimizationBlob2t6MpnKCodeBuffer_ipnJOopMapSet_iiii_v_; +text: .text%__1cRLowMemoryDetectorUhas_pending_requests6F_i_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosHSolarisWinitialize_system_info6F_v_; +text: .text%__1cCosPphysical_memory6F_X_; +text: .text%__1cMFastLockNodeLis_FastLock6kM_pk0_; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: cmsAdaptiveSizePolicy.o; +text: .text%__1cKManagementEinit6F_v_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cJMarkSweepQKeepAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cNReservedSpace2t6MLLipc_v_; +text: .text%__1cJMarkSweepOIsAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cCosZset_memory_serialize_page6FpC_v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cJMarkSweepSFollowStackClosure2t6M_v_: markSweep.o; +text: .text%__1cNReservedSpaceUpage_align_size_down6FL_L_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FL_L_; +text: .text%__1cNGrowableArray4CpnKOSRAdapter__2t6Mii_v_; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cSOnStackReplacementKinitialize6F_v_; +text: .text%__1cJMarkSweepRFollowRootClosure2t6M_v_: markSweep.o; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cHOrLNodeJideal_reg6kM_I_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cJMarkSweepSMarkAndPushClosure2t6M_v_: markSweep.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cQVMOperationQdDueue2t6M_v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cMsubD_immNodeKconst_size6kM_i_; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cHMatcherVfind_callee_arguments6FpnNsymbolOopDesc_ipi_pnLOptoRegPair__; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cIVMThread2t6M_v_; +text: .text%__1cNSharedRuntimeUlookup_function_DD_D6FrpFpnHJNIEnv__pnH_jclass_dd_dpkc_v_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_top6F_0_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_2_v_; +text: .text%__1cNCellTypeStateLmake_bottom6F_0_; +text: .text%__1cNcmovL_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cLOptoRuntimeYgenerate_arraycopy_stubs6F_v_; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_; +text: .text%__1cNSharedRuntimebBgenerate_class_cast_message6FpnKJavaThread_pkc_pc_; +text: .text%__1cNSharedRuntimebBgenerate_class_cast_message6Fpkc2_pc_; +text: .text%__1cLOptoRuntimebPgenerate_polling_page_return_handler_blob6F_v_; +text: .text%__1cIVMThreadEloop6M_v_; +text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o; +text: .text%__1cLOptoRuntimeUsetup_exception_blob6F_v_; +text: .text%__1cLOptoRuntimeWfill_in_exception_blob6F_v_; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cQPSGenerationPool2t6MpnIPSOldGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cJAssemblerFimull6MpnMRegisterImpl_2_v_; +text: .text%__1cLOptoRuntimebBgenerate_uncommon_trap_blob6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cQPSGenerationPool2t6MpnJPSPermGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__L_; +text: .text%__1cICarSpaceEinit6F_v_; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_; +text: .text%__1cLStatSamplerKinitialize6F_v_; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cSCommandLineFlagsExKuintxAtPut6FnXCommandLineFlagWithType_L_v_; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerUcreate_misc_perfdata6F_v_; +text: .text%__1cLStatSamplerXcreate_sampled_perfdata6F_v_; +text: .text%__1cJAssemblerDorq6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEsarq6MpnMRegisterImpl__v_; +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEshlq6MpnMRegisterImpl__v_; +text: .text%__1cUEdenMutableSpacePool2t6MpnKPSYoungGen_pnMMutableSpace_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cNStubGeneratorQgenerate_initial6M_v_; +text: .text%__1cNStubGeneratorXgenerate_atomic_add_ptr6M_pC_; +text: .text%__1cNStubGeneratorTgenerate_atomic_add6M_pC_; +text: .text%__1cNStubGeneratorbCgenerate_atomic_cmpxchg_long6M_pC_; +text: .text%__1cNStubGeneratorXgenerate_atomic_cmpxchg6M_pC_; +text: .text%__1cNStubGeneratorYgenerate_atomic_xchg_ptr6M_pC_; +text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_; +text: .text%__1cNStubGeneratorMgenerate_all6M_v_; +text: .text%__1cNStubGeneratorSgenerate_d2l_fixup6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_d2i_fixup6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_f2l_fixup6M_pC_; +text: .text%__1cNStubGeneratorSgenerate_f2i_fixup6M_pC_; +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__; +text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_; +text: .text%__1cNStubGeneratorVgenerate_verify_mxcsr6M_pC_; +text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_; +text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_; +text: .text%__1cMStubRoutinesLinitialize16F_v_; +text: .text%__1cMStubRoutinesLinitialize26F_v_; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_; +text: .text%__1cNGrowableArray4CpnTDerivedPointerEntry__2t6Mii_v_; +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cNGrowableArray4CpnHMonitor__2t6Mii_v_; +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray1_Type6F_pknITypeFunc__; +text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl__v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_TIERED_i486 2009-08-01 04:16:57.931550112 +0100 @@ -0,0 +1,8395 @@ +data = R0x2000; +text = LOAD ?RXO; + + +text: .text%__1cQIndexSetIteratorEnext6M_I_: ifg.o; +text: .text%__1cSPSPromotionManagerWcopy_to_survivor_space6MpnHoopDesc__2_; +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: indexSet.o; +text: .text%__1cNSharedRuntimeElrem6Fxx_x_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: chaitin.o; +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; +text: .text%__1cIPhaseIFGIadd_edge6MII_i_; +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_; +text: .text%__1cIMachNodeNrematerialize6kM_i_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMOopTaskQdDueueKpop_global6MrpnHoopDesc__i_; +text: .text%__1cPOopTaskQdDueueSetPsteal_best_of_26MipirpnHoopDesc__i_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: live.o; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cIIndexSetLalloc_block6M_pn0AIBitBlock__; +text: .text%__1cHRegMaskFis_UP6kM_i_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_i486_misc.o; +text: .text%__1cDLRGOcompute_degree6kMr0_i_; +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__; +text: .text%__1cRMachSpillCopyNodeMis_SpillCopy6M_p0_: ad_i486.o; +text: .text%__1cENodeEjvms6kM_pnIJVMState__; +text: .text%__1cIMachNodeJideal_reg6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: classes.o; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_i486_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: classes.o; +text: .text%__1cHRegMaskJis_bound16kM_i_; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cENodeHis_Copy6kM_I_: classes.o; +text: .text%__1cQObjectStartArrayMobject_start6MpnIHeapWord__2_: cardTableExtension.o; +text: .text%__1cRMachSpillCopyNodeHis_Copy6kM_I_: ad_i486.o; +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cENodeHis_Copy6kM_I_: ad_i486_misc.o; +text: .text%__1cETypeDcmp6Fkpk03_i_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: loopnode.o; +text: .text%__1cHRegMaskJis_bound26kM_i_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_i486_misc.o; +text: .text%__1cHRegMaskESize6kM_I_; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cJeRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cJVectorSet2R6MI_rnDSet__; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_i486.o; +text: .text%__1cXresource_allocate_bytes6FI_pc_; +text: .text%__1cDff16FI_i_; +text: .text%__1cJeRegPOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cIProjNodeHis_Proj6M_p0_; +text: .text%__1cENodeGis_CFG6kM_i_: classes.o; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopopts.o; +text: .text%__1cIIndexSetKinitialize6MI_v_; +text: .text%__1cMloadConINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPOopTaskQdDueueSetFsteal6MipirpnHoopDesc__i_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopnode.o; +text: .text%__1cMloadConINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHPhiNodeGis_Phi6M_p0_: cfgnode.o; +text: .text%__1cENodeGpinned6kM_i_: classes.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_i486.o; +text: .text%__1cIIndexSetKfree_block6MI_v_; +text: .text%__1cIMachNodeGOpcode6kM_i_; +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_: psTasks.o; +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_i486_misc.o; +text: .text%__1cENodeIout_grow6MI_v_; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cENodeHadd_req6Mp0_v_; +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__: type.o; +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cETypeFuhash6Fkpk0_i_; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeNrematerialize6kM_i_: classes.o; +text: .text%__1cJloadPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIrc_class6FnHOptoRegEName__nCRC__: ad_i486.o; +text: .text%__1cNMachIdealNodeErule6kM_I_: ad_i486.o; +text: .text%__1cKjmpDirNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_: ad_i486.o; +text: .text%__1cOlower_pressure6FpnDLRG_IpnFBlock_pI4_v_: ifg.o; +text: .text%__1cMget_live_bit6Fpii_i_: buildOopMap.o; +text: .text%__1cMPhaseChaitinLskip_copies6MpnENode__2_; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_i486_misc.o; +text: .text%__1cINodeHashLhash_delete6MpknENode__i_; +text: .text%__1cEDictGInsert6Mpv1i_1_; +text: .text%__1cICallNodeKmatch_edge6kMI_I_; +text: .text%__1cJMultiNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cHTypeIntCeq6kMpknEType__i_; +text: .text%__1cENodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: ad_i486.o; +text: .text%__1cETypeJtype_dict6F_pnEDict__; +text: .text%__1cHPhiNodeGOpcode6kM_i_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cENodeHdel_out6Mp0_v_: matcher.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: classes.o; +text: .text%__1cFArenaIcontains6kMpkv_i_; +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_; +text: .text%__1cIProjNodeGis_CFG6kM_i_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: classes.o; +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__; +text: .text%__1cIPhaseIFGJre_insert6MI_v_; +text: .text%__1cJraw_score6Fdd_d_: chaitin.o; +text: .text%__1cDLRGFscore6kM_d_; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: classes.o; +text: .text%__1cETypeIhashcons6M_pk0_; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cENodeEhash6kM_I_; +text: .text%__1cIProjNodeGpinned6kM_i_; +text: .text%__1cMloadConPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMPhaseChaitinKelide_copy6MpnENode_ipnFBlock_rnJNode_List_6i_i_; +text: .text%__1cHNTarjanEEVAL6M_p0_; +text: .text%__1cMloadConPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: classes.o; +text: .text%__1cQIndexSetIteratorEnext6M_I_: coalesce.o; +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMset_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cHCompileRvalid_bundle_info6MpknENode__i_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_; +text: .text%__1cIProjNodeGOpcode6kM_i_; +text: .text%__1cENodeHdel_out6Mp0_v_: phaseX.o; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_IrnJVectorSet__v_; +text: .text%__1cDfh16FI_i_; +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__: ad_i486.o; +text: .text%__1cMPhaseChaitinMchoose_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: oopMap.o; +text: .text%__1cENodeHis_Copy6kM_I_: cfgnode.o; +text: .text%__1cIMachNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cIConINodeGOpcode6kM_i_; +text: .text%__1cGIfNodeGOpcode6kM_i_; +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cHTypePtrEhash6kM_i_; +text: .text%__1cMPhaseChaitinQis_high_pressure6MpnFBlock_pnDLRG_I_i_; +text: .text%__1cENode2t6MI_v_; +text: .text%__1cMPhaseChaitinKbias_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cENodeMcisc_operand6kM_i_: classes.o; +text: .text%__1cMOopTaskQdDueueOpop_local_slow6MInOTaskQdDueueSuperDAge__i_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: typeArrayKlass.o; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_; +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_; +text: .text%__1cETypeEmeet6kMpk0_2_; +text: .text%__1cETypeLisa_oop_ptr6kM_i_; +text: .text%__1cFArenaIArealloc6MpvII_1_; +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__: ad_i486.o; +text: .text%__1cKTypeOopPtrEhash6kM_i_; +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__: ad_i486.o; +text: .text%__1cIMachNodeMcisc_operand6kM_i_: ad_i486.o; +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_i486_misc.o; +text: .text%__1cKIfTrueNodeGOpcode6kM_i_; +text: .text%__1cIAddPNodeGOpcode6kM_i_; +text: .text%__1cENodeHdel_out6Mp0_v_: graphKit.o; +text: .text%__1cPDictionaryEntrybDprotection_domain_set_oops_do6MpnKOopClosure__v_: dictionary.o; +text: .text%__1cHTypeIntEhash6kM_i_; +text: .text%__1cSPSPromotionManagerUflush_prefetch_queue6M_v_: psPromotionManager.o; +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_; +text: .text%__1cMMachProjNodeGOpcode6kM_i_; +text: .text%__1cETypeJsingleton6kM_i_; +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_: ad_i486.o; +text: .text%__1cJleaP8NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJleaP8NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMclr_live_bit6Fpii_v_: buildOopMap.o; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: cfgnode.o; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_i486.o; +text: .text%__1cIMachNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cJPhaseLiveGgetset6MpnFBlock__pnIIndexSet__; +text: .text%__1cHConNodeGOpcode6kM_i_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cJloadINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSis_single_register6FI_i_: postaloc.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cILRG_ListGextend6MII_v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: classes.o; +text: .text%__1cLIfFalseNodeGOpcode6kM_i_; +text: .text%__1cECopyYconjoint_words_to_higher6FpnIHeapWord_2I_v_: node.o; +text: .text%__1cHTypeIntJsingleton6kM_i_; +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_; +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_; +text: .text%__1cJPhaseLiveLadd_liveout6MpnFBlock_pnIIndexSet_rnJVectorSet__v_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: cfgnode.o; +text: .text%__1cMMutableSpaceMcas_allocate6MI_pnIHeapWord__; +text: .text%__1cHNTarjanICOMPRESS6M_v_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_; +text: .text%__1cKTypeOopPtrCeq6kMpknEType__i_; +text: .text%__1cIBoolNodeGOpcode6kM_i_; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cENodeEgrow6MI_v_; +text: .text%__1cHTypePtrCeq6kMpknEType__i_; +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cJeRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLemit_opcode6FrnKCodeBuffer_i_v_; +text: .text%__1cMMachProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cENodeNrematerialize6kM_i_: cfgnode.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_i486_misc.o; +text: .text%__1cKup_one_dom6FpnENode__1_: ifnode.o; +text: .text%__1cJMultiNodeIis_Multi6M_p0_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_; +text: .text%__1cJPhaseLiveKgetfreeset6M_pnIIndexSet__; +text: .text%__1cHnmethodbHfollow_root_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_ppnHoopDesc_iri_v_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: classes.o; +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_; +text: .text%__1cFState2T6M_v_; +text: .text%__1cIParmNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cHTypeInt2t6Miii_v_; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_i486_misc.o; +text: .text%__1cECopyXconjoint_words_to_lower6FpnIHeapWord_2I_v_: node.o; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cHemit_rm6FrnKCodeBuffer_iii_v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: typeArrayKlass.o; +text: .text%__1cKRegionNodeGOpcode6kM_i_; +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__; +text: .text%__1cIMachNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_I_: parallelScavengeHeap.o; +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_; +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: cfgnode.o; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cHNTarjanELINK6Mp01_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: node.o; +text: .text%__1cLuse_dom_lca6FpnFBlock_pnENode_3rnLBlock_Array__1_: gcm.o; +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_; +text: .text%__1cIIndexSetFclear6M_v_: live.o; +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_; +text: .text%__1cOPSPromotionLABFflush6M_v_; +text: .text%__1cITypeNodeEhash6kM_I_; +text: .text%__1cJVectorSet2F6kMI_i_; +text: .text%__1cJPhaseLiveHfreeset6MpknFBlock__v_; +text: .text%__1cENodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_; +text: .text%__1cIBoolNodeHis_Bool6M_p0_: subnode.o; +text: .text%__1cTleaPIdxScaleOffNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNPhaseRegAllocUreg2offset_unchecked6kMnHOptoRegEName__i_; +text: .text%__1cNPhaseRegAllocKreg2offset6kMnHOptoRegEName__i_; +text: .text%__1cTleaPIdxScaleOffNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNode2t6M_v_; +text: .text%__1cITypeNodeJideal_reg6kM_I_; +text: .text%__1cLTypeInstPtrEhash6kM_i_; +text: .text%__1cFStateRMachOperGenerator6MipnIMachNode_pnHCompile__pnIMachOper__; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: classes.o; +text: .text%__1cHdom_lca6FpnFBlock_1_1_: gcm.o; +text: .text%__1cENodeNis_block_proj6kM_pk0_; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cPClassFileParserOcheck_property6MipkcipnGThread__v_; +text: .text%__1cKRegionNodeGpinned6kM_i_: classes.o; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cHPhiNodeGpinned6kM_i_: cfgnode.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_i486.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_i486.o; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cFStateDDFA6MipknENode__i_; +text: .text%__1cFState2t6M_v_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: classes.o; +text: .text%__1cKRelocationLunpack_data6M_v_: ad_i486.o; +text: .text%__1cHRegMaskMSmearToPairs6M_v_; +text: .text%__1cKjmpDirNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIIndexSet2t6Mp0_v_; +text: .text%__1cENodeFclone6kM_p0_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: cfgnode.o; +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__; +text: .text%__1cETypeFxmeet6kMpk0_2_; +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_; +text: .text%__1cENodeKmatch_edge6kMI_I_; +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_; +text: .text%__1cICallNodeLbottom_type6kM_pknEType__; +text: .text%__1cKTypeAryPtrEhash6kM_i_; +text: .text%__1cENodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cOMethodLivenessKBasicBlockXcompute_gen_kill_single6MpnQciByteCodeStream__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: callnode.o; +text: .text%__1cICallNodeHis_Call6M_p0_: callnode.o; +text: .text%__1cRMachSpillCopyNodeOimplementation6kMpnKCodeBuffer_pnNPhaseRegAlloc_i_I_; +text: .text%__1cIProjNodeEhash6kM_I_; +text: .text%__1cHemit_d86FrnKCodeBuffer_i_v_; +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_; +text: .text%__1cRMachSafePointNodeEjvms6kM_pnIJVMState__: ad_i486_misc.o; +text: .text%__1cENodeFIdeal6MpnIPhaseGVN_i_p0_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfo.o; +text: .text%__1cGIfNodeGpinned6kM_i_: classes.o; +text: .text%__1cRSignatureIteratorGexpect6Mc_v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: memnode.o; +text: .text%__1cENodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: instanceKlass.o; +text: .text%__1cNeFlagsRegOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_; +text: .text%__1cICmpPNodeGOpcode6kM_i_; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: location.o; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cHPhiNodeEhash6kM_I_; +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: callnode.o; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cTCreateExceptionNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: cfgnode.o; +text: .text%__1cOis_diamond_phi6FpnENode__i_: cfgnode.o; +text: .text%__1cHCompileMFillLocArray6MpnENode_pnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: ad_i486.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_i486.o; +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_; +text: .text%__1cLTypeInstPtrCeq6kMpknEType__i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: memnode.o; +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_; +text: .text%__1cLimpl_helper6FpnKCodeBuffer_iiiiipkci_i_: ad_i486.o; +text: .text%__1cKTypeOopPtrJsingleton6kM_i_; +text: .text%__1cENodeHsize_of6kM_I_; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cIAddINodeGOpcode6kM_i_; +text: .text%__1cIGraphKitHstopped6M_i_; +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__; +text: .text%__1cGIfNodeFis_If6M_p0_: classes.o; +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: callnode.o; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cENodeSremove_dead_region6MpnIPhaseGVN_i_i_; +text: .text%__1cKTypeOopPtrLxadd_offset6kMi_i_; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: coalesce.o; +text: .text%__1cHMatcherKLabel_Root6MpknENode_pnFState_p16_6_; +text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_; +text: .text%__1cKRelocationSpd_address_in_code6M_ppC_; +text: .text%__1cNSafePointNodeGpinned6kM_i_: callnode.o; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cICmpINodeGOpcode6kM_i_; +text: .text%__1cFBlockLis_uncommon6kMrnLBlock_Array__i_; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_i486_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: classes.o; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cSCallStaticJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cIHaltNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: classes.o; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%__1cGcmpkey6Fpkv1_i_; +text: .text%__1cMMergeMemNodeGOpcode6kM_i_; +text: .text%__1cIMachNodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_p0_; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: cfgnode.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: classes.o; +text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cOmatch_into_reg6FpnENode_iii1_i_: matcher.o; +text: .text%__1cENodeHdel_out6Mp0_v_: reg_split.o; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_i486.o; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_i486.o; +text: .text%__1cINodeHashLhash_insert6MpnENode__v_; +text: .text%__1cKTypeAryPtrCeq6kMpknEType__i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: callnode.o; +text: .text%__1cOindOffset8OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_; +text: .text%__1cLTypeInstPtr2t6MnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_v_; +text: .text%__1cENode2t6Mp0_v_; +text: .text%__1cIimmIOperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cLSymbolTableGlookup6MipkciI_pnNsymbolOopDesc__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: classes.o; +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cETypeKhas_memory6kM_i_; +text: .text%__1cNloadRangeNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLTypeInstPtrEmake6FnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_pk0_; +text: .text%__1cKjmpConNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIJVMStateIof_depth6kMi_p0_; +text: .text%__1cKNode_ArrayEgrow6MI_v_; +text: .text%__1cJStartNodeGpinned6kM_i_: callnode.o; +text: .text%__1cRPSOldPromotionLABFflush6M_v_; +text: .text%__1cNCatchProjNodeGOpcode6kM_i_; +text: .text%__1cENodeGis_CFG6kM_i_: connode.o; +text: .text%__1cHMatcherKReduceOper6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_; +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIConPNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntEmake6Fiii_pk0_; +text: .text%__1cRMachSpillCopyNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: instanceKlass.o; +text: .text%__1cJTypeTupleJsingleton6kM_i_; +text: .text%__1cJLoadPNodeGOpcode6kM_i_; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cKCodeBuffer2T6M_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_; +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIMachNodeJemit_size6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cHCmpNodeGis_Cmp6kM_pk0_: classes.o; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: instanceKlass.o; +text: .text%__1cHTypeIntEmake6Fi_pk0_; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cENodeRis_cisc_alternate6kM_i_: ad_i486.o; +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNencode_RegMem6FrnKCodeBuffer_iiiiii_v_; +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cILoadNodeEhash6kM_I_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pnIciObject_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cENodeMcisc_operand6kM_i_: cfgnode.o; +text: .text%__1cJeRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cICmpUNodeGOpcode6kM_i_; +text: .text%__1cJHashtableLhash_symbol6Fpkci_I_: symbolTable.o; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: cfgnode.o; +text: .text%__1cJCProjNodeEhash6kM_I_: classes.o; +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cJMultiNodeEhash6kM_I_: classes.o; +text: .text%__1cENodeHdel_req6MI_v_; +text: .text%__1cHCompileJcan_alias6MpknHTypePtr_i_i_; +text: .text%__1cSPSPromotionManagerMdrain_stacks6M_v_; +text: .text%__1cETypeEhash6kM_i_; +text: .text%__1cLOptoRuntimeXdeoptimize_caller_frame6FpnKJavaThread_i_v_; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cIHaltNodeGOpcode6kM_i_; +text: .text%__1cZPhaseConservativeCoalesceJcopy_copy6MpnENode_2pnFBlock_I_i_; +text: .text%__1cENodeGis_CFG6kM_i_: subnode.o; +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_; +text: .text%__1cIParmNodeGOpcode6kM_i_; +text: .text%__1cIJVMStateLdebug_start6kM_I_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: classes.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: multnode.o; +text: .text%__1cGTarjanEEVAL6M_p0_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cOeFlagsRegUOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: classes.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: cfgnode.o; +text: .text%__1cFBlockGselect6MrnJNode_List_rnLBlock_Array_pirnJVectorSet_IrnNGrowableArray4CI___pnENode__; +text: .text%__1cKMachIfNodeJis_MachIf6kM_pk0_: ad_i486_misc.o; +text: .text%__1cEDict2F6kMpkv_pv_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: multnode.o; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: ad_i486.o; +text: .text%__1cHhashptr6Fpkv_i_; +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cENodeHget_int6kM_i_; +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cJCatchNodeGOpcode6kM_i_; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cOoop_RelocationJoop_value6M_pnHoopDesc__; +text: .text%__1cHConNodeGis_Con6kM_I_: classes.o; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cFBlockIis_Empty6kM_i_; +text: .text%__1cWThreadLocalAllocBufferFreset6M_v_; +text: .text%__1cENodeGis_Con6kM_I_: classes.o; +text: .text%__1cGBitMapUclear_range_of_words6MII_v_: bitMap.o; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: split_if.o; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cYDebugInformationRecorderLcheck_phase6Mn0AFPhase__v_; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__: memnode.o; +text: .text%__1cLLShiftINodeGOpcode6kM_i_; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableExtension.o; +text: .text%__1cFBlockOcode_alignment6M_I_; +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_; +text: .text%__1cKCastPPNodeGOpcode6kM_i_; +text: .text%__1cMMachCallNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__; +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: oopMap.o; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__; +text: .text%__1cENodeHis_Copy6kM_I_: memnode.o; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cUParallelScavengeHeapVunsafe_max_tlab_alloc6kM_I_; +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__; +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__; +text: .text%__1cIMachNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: classes.o; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cENodeGpinned6kM_i_: connode.o; +text: .text%__1cWThreadLocalAllocBufferKinitialize6MpnIHeapWord_22_v_; +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6MI_pnIHeapWord__; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: collectedHeap.o; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: sharedHeap.o; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cIBoolNodeEhash6kM_I_; +text: .text%__1cQciByteCodeStreamEjava6MnJBytecodesECode__2_; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cOindOffset8OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKjmpDirNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: subnode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: cfgnode.o; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__; +text: .text%__1cIMachNodeHis_Mach6M_p0_: machnode.o; +text: .text%__1cKjmpConNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: loopopts.o; +text: .text%__1cNsymbolOopDescLas_C_string6kMpci_1_; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cHMatcherTReduceInst_Interior6MpnFState_ipnIMachNode_IrpnENode__I_; +text: .text%__1cENodeJis_Branch6kM_I_: ad_i486.o; +text: .text%__1cIAddPNodeHis_AddP6M_p0_: classes.o; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_i486.o; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_; +text: .text%__1cJloadSNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJPSPermGenSallocate_permanent6MI_pnIHeapWord__; +text: .text%__1cMMutableSpaceIallocate6MI_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6MI_pnIHeapWord__; +text: .text%__1cHCompileRprobe_alias_cache6MpknHTypePtr__pn0APAliasCacheEntry__; +text: .text%__1cENodeIdestruct6M_v_; +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIMachNodeNoperand_index6kMI_i_; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cKRegionNodeEhash6kM_I_: classes.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: multnode.o; +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cGBitMapJset_union6M0_v_; +text: .text%__1cIMachNodeGExpand6MpnFState_rnJNode_List__p0_: ad_i486_misc.o; +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_: phaseX.o; +text: .text%__1cENodeOis_block_start6kM_i_; +text: .text%__1cPciInstanceKlassMis_interface6M_i_: ciInstanceKlass.o; +text: .text%__1cJeRegLOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cHPhiNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJTypeTupleEhash6kM_i_; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKjmpConNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseNdo_exceptions6M_v_; +text: .text%__1cFParsePdo_one_bytecode6M_v_; +text: .text%__1cFBlockJfind_node6kMpknENode__I_; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cLjmpConUNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGBitMap2t6MpII_v_; +text: .text%__1cLOptoRuntimeFnew_C6FpnMklassOopDesc_pnKJavaThread__v_; +text: .text%method_compare: methodOop.o; +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: classes.o; +text: .text%__1cKis_x2logic6FpnIPhaseGVN_pnENode__3_: cfgnode.o; +text: .text%__1cHAbsNodeLis_absolute6FpnIPhaseGVN_pnENode__4_; +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_; +text: .text%__1cLPhaseValuesGintcon6Mi_pnIConINode__; +text: .text%__1cHCompilePfind_alias_type6MpknHTypePtr_i_pn0AJAliasType__; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cJloadLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cGTarjanICOMPRESS6M_v_; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cHRegMaskMClearToPairs6M_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: block.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse1.o; +text: .text%__1cMMachCallNodeLis_MachCall6M_p0_: ad_i486_misc.o; +text: .text%__1cNSafePointNodebBneeds_polling_address_input6F_i_; +text: .text%__1cIJVMStateJdebug_end6kM_I_; +text: .text%__1cIMachNodeKconst_size6kM_i_: ad_i486.o; +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJLoadINodeGOpcode6kM_i_; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cIMachNodeFreloc6kM_i_: ad_i486.o; +text: .text%__1cIProjNodeHsize_of6kM_I_; +text: .text%__1cHMatcherQis_save_on_entry6Mi_i_; +text: .text%__1cLBoxLockNodeNrematerialize6kM_i_: classes.o; +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__; +text: .text%__1cOindOffset8OperFscale6kM_i_: ad_i486.o; +text: .text%__1cNloadConI0NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIIndexSetSpopulate_free_list6F_v_; +text: .text%__1cLPCTableNodeGpinned6kM_i_: classes.o; +text: .text%__1cNnew_loc_value6FpnNPhaseRegAlloc_nHOptoRegEName_nILocationEType__pnNLocationValue__: output.o; +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: cfgnode.o; +text: .text%__1cENodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%JVM_ReleaseUTF; +text: .text%__1cKutf8_write6FpCH_0_: utf8.o; +text: .text%__1cKNode_ArrayGremove6MI_v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOis_range_check6FpnENode_r12ri_i_: ifnode.o; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodDataOop.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: multnode.o; +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cMciMethodDataHdata_at6Mi_pnLProfileData__; +text: .text%__1cENodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_i486_misc.o; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cMCreateExNodeGOpcode6kM_i_; +text: .text%__1cSloadL_volatileNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cGOopMapbEmap_compiler_reg_to_oopmap_reg6MnHOptoRegEName_ii_nFVMRegEName__; +text: .text%__1cHhashkey6Fpkv_i_; +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_; +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_; +text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_; +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cENodeHdel_out6Mp0_v_: coalesce.o; +text: .text%__1cGBitMapGat_put6MIi_v_; +text: .text%__1cJloadBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse2.o; +text: .text%__1cHMatcherTcollect_null_checks6MpnENode__v_; +text: .text%__1cNloadConI0NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENode2t6Mp011_v_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cHMemNodeMIdeal_common6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cITypeLongCeq6kMpknEType__i_; +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__; +text: .text%__1cRMachSafePointNodeQis_MachSafePoint6M_p0_: ad_i486_misc.o; +text: .text%__1cOMethodLivenessKBasicBlockIload_one6Mi_v_; +text: .text%__1cNSafePointNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: ad_i486_misc.o; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: classes.o; +text: .text%__1cIemit_d326FrnKCodeBuffer_i_v_; +text: .text%__1cFDictI2i6M_v_; +text: .text%__1cIJVMStateNclone_shallow6kM_p0_; +text: .text%__1cNloadConI0NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: subnode.o; +text: .text%__1cIMachNodeFreloc6kM_i_: ad_i486_misc.o; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cNSafePointNodeHsize_of6kM_I_; +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cHTypePtrLmeet_offset6kMi_i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: connode.o; +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFChunk2t6MI_v_; +text: .text%__1cFChunk2n6FII_pv_; +text: .text%__1cKciTypeFlowLStateVectorSapply_one_bytecode6MpnQciByteCodeStream__i_; +text: .text%__1cGOopMapJset_value6MnHOptoRegEName_ii_v_; +text: .text%__1cIMachOperLdisp_is_oop6kM_i_; +text: .text%__1cJloadPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFChunk2k6Fpv_v_; +text: .text%__1cOcompU_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_; +text: .text%__1cIIndexSetFclear6M_v_: indexSet.o; +text: .text%__1cIIndexSetJlrg_union6MIIkIpknIPhaseIFG_rknHRegMask__I_; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%__1cETypeFempty6kM_i_; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cJTypeTupleCeq6kMpknEType__i_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: split_if.o; +text: .text%__1cENodeHdel_out6Mp0_v_: memnode.o; +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_; +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIBoolTestKcc2logical6kMpknEType__3_; +text: .text%__1cIAddPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFArenaEgrow6MI_pv_; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: multnode.o; +text: .text%__1cJeRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cUParallelScavengeHeapPis_in_permanent6kMpkv_i_: parallelScavengeHeap.o; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: ad_i486.o; +text: .text%__1cENodeHdel_out6Mp0_v_: loopopts.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: cfgnode.o; +text: .text%__1cKBranchDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cENodeGis_CFG6kM_i_: memnode.o; +text: .text%__1cKjmpDirNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cFBlockUneeded_for_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cILoadNodeKmatch_edge6kMI_I_; +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHMemNodeGis_Mem6M_p0_: classes.o; +text: .text%__1cMPhaseIterGVNbGregister_new_node_with_optimizer6MpnENode__2_; +text: .text%__1cENodeKreplace_by6Mp0_v_; +text: .text%__1cNPhaseRegAllocGis_oop6kMpknENode__i_; +text: .text%__1cGIfNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: classes.o; +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_; +text: .text%__1cKjmpDirNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__: cfgnode.o; +text: .text%__1cHPhiNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cJTypeTupleGfields6FI_ppknEType__; +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cZload_can_see_stored_value6FpnILoadNode_pnENode_pnOPhaseTransform__3_: memnode.o; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cNtestP_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLrecord_bias6FpknIPhaseIFG_ii_v_: coalesce.o; +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_; +text: .text%__1cKNativeCallLdestination6kM_pC_; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: memnode.o; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: callnode.o; +text: .text%__1cSCallStaticJavaNodeRis_CallStaticJava6kM_pk0_: callnode.o; +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMMergeMemNodeEhash6kM_I_; +text: .text%__1cJcmpOpOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cUPSMarkSweepDecoratorQinsert_deadspace6MripnIHeapWord_I_i_; +text: .text%__1cMMergeMemNodeLis_MergeMem6M_p0_: memnode.o; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__: subnode.o; +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_; +text: .text%__1cKStorePNodeGOpcode6kM_i_; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: methodOop.o; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cLjmpConUNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cOno_flip_branch6FpnFBlock__i_: block.o; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cKRelocationJpack_data6M_i_: ad_i486.o; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cISubINodeGOpcode6kM_i_; +text: .text%__1cKStoreINodeGOpcode6kM_i_; +text: .text%__1cNeFlagsRegOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQciByteCodeStreamMreset_to_bci6Mi_v_; +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: lcm.o; +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: machnode.o; +text: .text%__1cRSignatureIteratorTcheck_signature_end6M_v_; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cENodeHis_Goto6kM_I_: classes.o; +text: .text%__1cILoadNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cITypeLongEhash6kM_i_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: block.o; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cJloadPNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cMPhaseIterGVNFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cLis_cond_add6FpnIPhaseGVN_pnHPhiNode__pnENode__; +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: classes.o; +text: .text%__1cKRegionNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: block.o; +text: .text%__1cITypeNodeHsize_of6kM_I_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: memnode.o; +text: .text%__1cFMutexNowned_by_self6kM_i_; +text: .text%__1cIMachNodeRget_base_and_disp6kMrirpknHTypePtr__pknENode__; +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIIndexSetFclear6M_v_: chaitin.o; +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cFBlockOschedule_local6MrnHMatcher_rnLBlock_Array_pirnJVectorSet_rnNGrowableArray4CI___i_; +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_; +text: .text%__1cLBlock_StackXmost_frequent_successor6MpnFBlock__I_; +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cENodeGpinned6kM_i_: subnode.o; +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLCounterDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGTarjanELINK6Mp01_v_; +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_; +text: .text%__1cYDebugInformationRecorderWserialize_scope_values6MpnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__; +text: .text%__1cENodeGis_Con6kM_I_: cfgnode.o; +text: .text%__1cMciMethodDataJnext_data6MpnLProfileData__2_; +text: .text%__1cENodeGpinned6kM_i_: memnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: subnode.o; +text: .text%__1cIBoolNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cOcompU_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: parse1.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_i486.o; +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o; +text: .text%__1cIGraphKitEstop6M_v_; +text: .text%__1cTremove_useless_bool6FpnGIfNode_pnIPhaseGVN__pnENode__: ifnode.o; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cJeRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: subnode.o; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cMloadConLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cILoadNodeHis_Load6M_p0_: classes.o; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cKjmpConNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHAddNodeEhash6kM_I_; +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__: callnode.o; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_irknQRelocationHolder_i_v_; +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_; +text: .text%__1cMURShiftINodeGOpcode6kM_i_; +text: .text%__1cOcompI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cIRootNodeGOpcode6kM_i_; +text: .text%__1cFChunkEchop6M_v_; +text: .text%__1cIMachOperOindex_position6kM_i_; +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIimmPOperEtype6kM_pknEType__: ad_i486_clone.o; +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOindOffset8OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOindOffset8OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOindOffset8OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cHRegMaskQis_aligned_Pairs6kM_i_; +text: .text%__1cMloadConLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompU_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cHSubNodeGis_Sub6M_p0_: classes.o; +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_; +text: .text%__1cYCallStaticJavaDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNeFlagsRegOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cIregDOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cSInterpreterRuntimeLcache_entry6FpnKJavaThread__pnWConstantPoolCacheEntry__: interpreterRuntime.o; +text: .text%__1cJStoreNodeIis_Store6kM_pk0_: classes.o; +text: .text%__1cKDictionaryJget_entry6MiInMsymbolHandle_nGHandle__pnPDictionaryEntry__; +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cHi2sNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNtestI_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cJloadPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPimpl_mov_helper6FpnKCodeBuffer_iiii_i_: ad_i486.o; +text: .text%__1cHConNodeEhash6kM_I_; +text: .text%__1cKjmpDirNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%Unsafe_CompareAndSwapLong; +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__; +text: .text%__1cFBlockLfind_remove6MpknENode__v_; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cKTypeRawPtrJsingleton6kM_i_; +text: .text%__1cMloadConDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJStoreNodeEhash6kM_I_; +text: .text%__1cMPhaseChaitinJsplit_USE6MpnENode_pnFBlock_2IIiinNGrowableArray4CI__i_I_; +text: .text%__1cMloadConDNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENode2t6Mp0111_v_; +text: .text%__1cXindIndexScaleOffsetOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cHMatcherXadjust_outgoing_stk_arg6MinHOptoRegEName_r2_2_; +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_; +text: .text%__1cJeRegLOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstorePNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cLRegisterMapFclear6Mpi_v_; +text: .text%__1cNtestI_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: cfgnode.o; +text: .text%__1cENodeMcisc_operand6kM_i_: memnode.o; +text: .text%__1cNtestP_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cHTypeAryRary_must_be_exact6kM_i_; +text: .text%__1cRMachNullCheckNodeQis_MachNullCheck6M_p0_: machnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: callnode.o; +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJCodeCacheFalive6FpnICodeBlob__2_; +text: .text%__1cLRShiftINodeGOpcode6kM_i_; +text: .text%__1cOcompU_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: reg_split.o; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cHTypeIntFempty6kM_i_; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJchar2type6Fc_nJBasicType__: fieldType.o; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__; +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: subnode.o; +text: .text%__1cITypeNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHOopFlowNcompute_reach6MpnNPhaseRegAlloc_ipnEDict__v_; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cRInvocationCounterJset_state6Mn0AFState__v_; +text: .text%__1cLOptoRuntimePnew_typeArray_C6FnJBasicType_ipnKJavaThread__v_; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: multnode.o; +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cMMergeMemNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%JVM_GetClassModifiers; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%JVM_GetClassAccessFlags; +text: .text%__1cOcompU_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeHis_Goto6kM_I_: ad_i486_misc.o; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeHdel_out6Mp0_v_: split_if.o; +text: .text%__1cLMachNopNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLPhaseValuesFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: connode.o; +text: .text%__1cOcompU_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cICallNodeLis_CallLeaf6kM_pknMCallLeafNode__: callnode.o; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cKmethodOperGmethod6kM_i_: ad_i486.o; +text: .text%__1cWConstantPoolCacheEntryRset_initial_state6Mi_v_; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cJloadINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopOidom_no_update6kMpnENode__2_: loopTransform.o; +text: .text%__1cLProfileDataPfollow_contents6M_v_: methodDataOop.o; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: methodDataOop.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: classes.o; +text: .text%__1cOindOffset8OperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cOcompI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNaddI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_; +text: .text%__1cRmethodDataOopDescTbytecode_cell_count6FnJBytecodesECode__i_; +text: .text%__1cRmethodDataOopDescRcompute_data_size6FpnOBytecodeStream__i_; +text: .text%__1cMMergeMemNodeQclone_all_memory6FpnENode__p0_; +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: multnode.o; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cOPhaseIdealLoopIsplit_up6MpnENode_22_i_; +text: .text%__1cKTypeOopPtrWmake_from_klass_common6FpnHciKlass_ii_pk0_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: postaloc.o; +text: .text%__1cOcompU_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: subnode.o; +text: .text%__1cNtestI_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_: subnode.o; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cNCompileBrokerLmaybe_block6F_v_; +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_; +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cIJumpDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: symbolKlass.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: symbolKlass.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: symbolKlass.o; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: callnode.o; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: machnode.o; +text: .text%__1cHRegMaskPfind_first_pair6kM_nHOptoRegEName__; +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__; +text: .text%__1cRMachSafePointNodeRis_safepoint_node6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_; +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_; +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_; +text: .text%__1cOeFlagsRegUOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cENodeHdel_out6Mp0_v_: cfgnode.o; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cMtlsLoadPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbLparse_constant_pool_nameandtype_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cGBitMapOset_difference6M0_v_; +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cYCallStaticJavaDirectNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNtestP_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: memnode.o; +text: .text%__1cKstorePNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOindOffset8OperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRaddI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKstoreINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: memnode.o; +text: .text%__1cOcompI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: loopnode.o; +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQciByteCodeStreamFEOBCs6M_nJBytecodesECode__; +text: .text%__1cITypeNodeDcmp6kMrknENode__I_; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_; +text: .text%__1cHTypeAryEhash6kM_i_; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cNsubI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverYlookup_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cPVirtualCallDataPadjust_pointers6M_v_; +text: .text%__1cPVirtualCallDataPfollow_contents6M_v_; +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_; +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cKTypeRawPtrEhash6kM_i_; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: machnode.o; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cENodeHis_Copy6kM_I_: machnode.o; +text: .text%__1cIAndINodeGOpcode6kM_i_; +text: .text%__1cRCompilationPolicyNcanBeCompiled6FnMmethodHandle__i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: multnode.o; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJlabelOperFclone6kM_pnIMachOper__; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cNCatchProjNodeMis_CatchProj6kM_pk0_: cfgnode.o; +text: .text%__1cENode2t6Mp01_v_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cNtestI_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: subnode.o; +text: .text%__1cKTypeOopPtrHget_con6kM_i_; +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMUniverseOperFclone6kM_pnIMachOper__; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: multnode.o; +text: .text%__1cJloadINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMTypeKlassPtrEhash6kM_i_; +text: .text%__1cKRegionNodeHhas_phi6kM_pnHPhiNode__; +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cENodeHins_req6MIp0_v_; +text: .text%__1cPconvI2L_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIciMethodbCinterpreter_invocation_count6M_i_; +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cKStoreCNodeGOpcode6kM_i_; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%__1cOcompI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRshrI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerJemit_data6MirknQRelocationHolder_i_v_; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cOeFlagsRegUOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLjmpConUNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafePointNodeGOpcode6kM_i_; +text: .text%__1cJVectorSet2L6MI_rnDSet__; +text: .text%__1cJVectorSetEgrow6MI_v_; +text: .text%__1cOMethodLivenessKBasicBlockWcompute_gen_kill_range6MpnQciByteCodeStream__v_; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: gcm.o; +text: .text%__1cKStoreBNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserbJparse_constant_pool_methodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: connode.o; +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_; +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_; +text: .text%__1cLeAXRegPOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o; +text: .text%__1cENodeHdel_out6Mp0_v_: gcm.o; +text: .text%__1cETypeOget_const_type6FpnGciType__pk0_; +text: .text%__1cPcheckCastPPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJVectorSet2t6MpnFArena__v_; +text: .text%__1cIGraphKitGmemory6MI_pnENode__; +text: .text%__1cITypeLong2t6Mxxi_v_; +text: .text%__1cKCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQciByteCodeStreamPget_field_index6M_i_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: memnode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: memnode.o; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cENodeDcmp6kMrk0_I_; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_; +text: .text%__1cJloadINodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodOop.o; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cKjmpConNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_; +text: .text%__1cMPhaseChaitinVmay_be_copy_of_callee6kMpnENode__i_; +text: .text%__1cXroundDouble_mem_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cIPhaseCCPFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cFParseKensure_phi6Mii_pnHPhiNode__; +text: .text%__1cJloadFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: postaloc.o; +text: .text%__1cOMachReturnNodeNis_MachReturn6M_p0_: ad_i486_misc.o; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_i486.o; +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cPciObjectFactoryNinit_ident_of6MpnIciObject__v_; +text: .text%__1cQPreserveJVMState2T6M_v_; +text: .text%__1cQPreserveJVMState2t6MpnIGraphKit_i_v_; +text: .text%__1cNtestP_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cIGraphKitLclean_stack6Mi_v_; +text: .text%__1cYDebugInformationRecorderYserialize_monitor_values6MpnNGrowableArray4CpnMMonitorValue____i_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_: spaceCounters.o; +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cLcastP2INodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cLBoxLockNodeGOpcode6kM_i_; +text: .text%__1cHTypePtrHget_con6kM_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: callnode.o; +text: .text%__1cITypeFuncEhash6kM_i_; +text: .text%__1cJAssemblerJemit_data6MinJrelocInfoJrelocType_i_v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_22nHAddressLScaleFactor_irknQRelocationHolder__v_; +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMLinkResolverNresolve_klass6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cNloadRangeNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cLPhaseValuesHmakecon6MpknEType__pnHConNode__; +text: .text%__1cOcompI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIciMethodPliveness_at_bci6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessPget_liveness_at6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessKBasicBlockPget_liveness_at6MpnIciMethod_i_nGBitMap__; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cITypeLongJsingleton6kM_i_; +text: .text%__1cOcompI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cLCounterDataOis_CounterData6M_i_: ciMethodData.o; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cLCastP2INodeGOpcode6kM_i_; +text: .text%__1cKCodeBufferOadd_stub_reloc6MpCrknQRelocationHolder_i_v_; +text: .text%__1cKCodeBufferOalloc_relocate6M_pnORelocateBuffer__; +text: .text%__1cMCallLeafNodeGOpcode6kM_i_; +text: .text%__1cJLoadSNodeGOpcode6kM_i_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: subnode.o; +text: .text%__1cKjmpDirNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHTypePtrJsingleton6kM_i_; +text: .text%__1cJAssemblerEcall6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cIGraphKitObasic_plus_adr6MpnENode_2i_2_; +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIimmPOperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRshrI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMPhaseIterGVNHmakecon6MpknEType__pnHConNode__; +text: .text%__1cMTypeKlassPtrCeq6kMpknEType__i_; +text: .text%__1cRaddI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%__1cRNativeInstructionFwrote6Mi_v_; +text: .text%__1cWShouldNotReachHereNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: interpreterRuntime.o; +text: .text%__1cWConstantPoolCacheEntryIas_flags6MnITosState_iiiii_i_; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cQMachCallJavaNodePis_MachCallJava6M_p0_: ad_i486_misc.o; +text: .text%__1cWShouldNotReachHereNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJimmI0OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cNloadConI0NodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cHMatcherWis_short_branch_offset6Mi_i_; +text: .text%__1cWConstantPoolCacheEntryGverify6kMpnMoutputStream__v_; +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cITypeFuncCeq6kMpknEType__i_; +text: .text%__1cNSafePointNode2t6MIpnIJVMState__v_; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cHcommute6FpnENode_ii_i_: addnode.o; +text: .text%__1cJeRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: cfgnode.o; +text: .text%__1cGOopMapJheap_size6kM_i_; +text: .text%__1cHAddNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_: ad_i486.o; +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_; +text: .text%__1cIAddINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKjmpDirNodeFclone6kM_pnENode__; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_; +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_; +text: .text%__1cOCallRelocationPset_destination6MpCi_v_; +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__: ad_i486.o; +text: .text%__1cIHaltNodeGpinned6kM_i_: classes.o; +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_; +text: .text%__1cYCallStaticJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cJStartNodeGpinned6kM_i_: classes.o; +text: .text%__1cMTypeKlassPtr2t6MnHTypePtrDPTR_pnHciKlass_i_v_; +text: .text%__1cJloadPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%JVM_IsNaN; +text: .text%__1cJStoreNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQciByteCodeStreamQget_method_index6M_i_; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cIGraphKitQkill_dead_locals6M_v_; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cICallNodeHis_Call6M_p0_: classes.o; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cNaddI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cJLoadPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIGraphKitMreset_memory6M_pnENode__; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cNaddI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeQlatency_from_use6kMrnLBlock_Array_rnNGrowableArray4CI__pk0p0_i_; +text: .text%__1cMPhaseChaitinKprompt_use6MpnFBlock_I_i_; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2ipnGThread__v_; +text: .text%__1cOkill_dead_code6FpnENode_pnMPhaseIterGVN__i_: node.o; +text: .text%__1cFParsePload_state_from6Mpn0AFBlock__v_; +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_; +text: .text%__1cNloadConI0NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStartNodeIis_Start6M_p0_: callnode.o; +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_; +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cIAddINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLProfileDataPfollow_contents6M_v_: ciMethodData.o; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: ciMethodData.o; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cNsubI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cNloadRangeNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFBlockUhoist_LCA_above_defs6Mp01IrnLBlock_Array__1_; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cIciMethodLscale_count6Mi_i_; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: ad_i486.o; +text: .text%__1cNtestP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cScompP_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_; +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_; +text: .text%__1cNSafePointNodeGpinned6kM_i_: classes.o; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cENodeHis_Type6M_pnITypeNode__: classes.o; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitRnull_check_common6MpnENode_nJBasicType_i_2_; +text: .text%__1cICmpLNodeGOpcode6kM_i_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constMethodKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constMethodKlass.o; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cKoopFactoryPnew_constMethod6FiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: constMethodKlass.o; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodKlass.o; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: methodKlass.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: methodKlass.o; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cOcompU_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTciConstantPoolCacheEfind6Mi_i_; +text: .text%__1cKciTypeFlowGJsrSetJcopy_into6Mp1_v_; +text: .text%__1cJLoadLNodeGOpcode6kM_i_; +text: .text%__1cHOrINodeGOpcode6kM_i_; +text: .text%__1cILoadNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cJMarkSweepNpreserve_mark6FpnHoopDesc_pnLmarkOopDesc__v_; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cJloadINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_; +text: .text%__1cQleaPIdxScaleNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeOopPtrFempty6kM_i_; +text: .text%__1cJleaP8NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%JVM_CurrentThread; +text: .text%__1cPindOffset32OperFscale6kM_i_: ad_i486.o; +text: .text%__1cKBranchDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cLOopMapCacheIentry_at6kMi_pnQOopMapCacheEntry__; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cHOopFlowEmake6FpnFArena_i_p0_; +text: .text%__1cOGenerateOopMapKcheck_type6MnNCellTypeState_1_v_; +text: .text%__1cMMergeMemNodeNgrow_to_match6Mpk0_v_; +text: .text%__1cVeADXRegL_low_onlyOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cVloadConL_low_onlyNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypeIntFxdual6kM_pknEType__; +text: .text%__1cVloadConL_low_onlyNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNMachIdealNodePoper_input_base6kM_I_: machnode.o; +text: .text%__1cNSharedRuntimeDf2i6Ff_i_; +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVloadConL_low_onlyNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__; +text: .text%__1cRaddI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cRInterpretedRFrameEinit6M_v_; +text: .text%__1cMMergeMemNodeRmake_empty_memory6F_pnENode__; +text: .text%__1cMMergeMemNode2t6MpnENode__v_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: callnode.o; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cFciEnvXget_field_by_index_impl6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cQciByteCodeStreamJget_field6Mri_pnHciField__; +text: .text%__1cKBlock_ListGremove6MI_v_; +text: .text%__1cECopyXconjoint_words_to_lower6FpnIHeapWord_2I_v_: block.o; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_16MnJBytecodesECode__v_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: connode.o; +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_; +text: .text%__1cMLinkResolverbFlinktime_resolve_virtual_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%__1cKCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNstoreImmBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHOopFlowFclone6Mp0i_v_; +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cMPhaseChaitinJsplit_DEF6MpnENode_pnFBlock_iIp25nNGrowableArray4CI__i_I_; +text: .text%__1cNGCTaskManagerNresource_flag6MI_i_; +text: .text%__1cNGCTaskManagerYshould_release_resources6MI_i_; +text: .text%__1cNtestP_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLStringTableGlookup6MipHiI_pnHoopDesc__; +text: .text%__1cIBoolNodeJideal_reg6kM_I_: subnode.o; +text: .text%__1cFStateM_sub_Op_Bool6MpknENode__v_; +text: .text%__1cHCmpNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_; +text: .text%__1cRcmpFastUnlockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: subnode.o; +text: .text%__1cKciTypeFlowNmake_range_at6Mi_pn0AFRange__; +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNaddI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cJStartNodeGOpcode6kM_i_; +text: .text%__1cPconvF2D_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: subnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: subnode.o; +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cNsubI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cMWarmCallInfoHis_cold6kM_i_; +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_; +text: .text%__1cJleaP8NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseFBlockJinit_node6Mp0i_v_; +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_; +text: .text%__1cGBitMapVset_union_with_result6M0_i_; +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_; +text: .text%__1cNtestI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIPhaseIFGFUnion6MII_v_; +text: .text%__1cIGraphKit2t6MpnIJVMState__v_; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cLcastP2INodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cWConstantPoolCacheEntryOset_bytecode_26MnJBytecodesECode__v_; +text: .text%__1cJStartNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cLBoxLockNodeHsize_of6kM_I_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_; +text: .text%__1cNincI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpConNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: graphKit.o; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cScompI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Goto6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: memnode.o; +text: .text%__1cOGenerateOopMapFppop16MnNCellTypeState__v_; +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cScompI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: subnode.o; +text: .text%__1cHTypePtrLdual_offset6kM_i_; +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__; +text: .text%__1cGGCTaskKinitialize6M_v_; +text: .text%__1cNGCTaskManagerWdecrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__; +text: .text%__1cNGCTaskManagerWincrement_busy_workers6M_I_; +text: .text%__1cLGCTaskQdDueueHdequeue6M_pnGGCTask__; +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_; +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_; +text: .text%__1cENodeHlatency6MI_I_; +text: .text%__1cNsubI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNtestI_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFframeZsender_for_compiled_frame6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cPClassFileParserbFparse_constant_pool_class_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNstoreImmPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeBlobLlink_offset6M_i_; +text: .text%__1cFParseFBlockMrecord_state6Mp0_v_; +text: .text%__1cFParseMdo_one_block6M_v_; +text: .text%__1cJCatchNodeIis_Catch6kM_pk0_: classes.o; +text: .text%__1cENodeHdel_out6Mp0_v_: addnode.o; +text: .text%__1cOMethodLivenessKBasicBlockMmerge_normal6MnGBitMap__i_; +text: .text%__1cLConvI2LNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: callnode.o; +text: .text%__1cIGraphKitTadd_safepoint_edges6MpnNSafePointNode_i_v_; +text: .text%__1cIJVMStateKclone_deep6kM_p0_; +text: .text%__1cENodeNadd_req_batch6Mp0I_v_; +text: .text%__1cIJVMStateLdebug_depth6kM_I_; +text: .text%__1cSvframeStreamCommonbBfill_from_interpreter_frame6M_v_; +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFParseFmerge6Mi_v_; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cPFieldAccessInfoDset6MnLKlassHandle_nMsymbolHandle_iinJBasicType_nLAccessFlags__v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cNaddI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cICmpPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cFframebFinterpreter_frame_monitor_begin6kM_pnPBasicObjectLock__; +text: .text%__1cGGCTask2t6M_v_; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cNstoreImmBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cETypeFwiden6kMpk0_2_: type.o; +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__; +text: .text%__1cJLoadBNodeGOpcode6kM_i_; +text: .text%__1cRsalI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFframeNis_java_frame6kM_i_; +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_; +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: callnode.o; +text: .text%__1cJloadCNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafePointNodeEhash6kM_I_: callnode.o; +text: .text%__1cMnabxRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_; +text: .text%__1cKciTypeFlowIblock_at6Mipn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cKciTypeFlowFRangeNget_block_for6Mpn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_; +text: .text%__1cLjmpConUNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNaddI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTCreateExceptionNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_; +text: .text%__1cLRegisterMap2t6Mpk0_v_; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cIMachNodeMcisc_operand6kM_i_: machnode.o; +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_; +text: .text%__1cENodeLnonnull_req6kM_p0_; +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_; +text: .text%__1cITypeLongFxmeet6kMpknEType__3_; +text: .text%__1cRcmpFastUnlockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKstoreCNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVector2t6Mp0_v_; +text: .text%__1cOMethodLivenessNwork_list_get6M_pn0AKBasicBlock__; +text: .text%__1cIIndexSetFclear6M_v_: coalesce.o; +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_; +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_; +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_; +text: .text%__1cCosGmalloc6FI_pv_; +text: .text%__1cNmethodOopDescWwas_executed_more_than6kMi_i_; +text: .text%__1cRInterpreterOopMapKinitialize6M_v_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: machnode.o; +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cILoopNodeGOpcode6kM_i_; +text: .text%__1cICmpINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cHCompileKTracePhase2t6MpkcpnMelapsedTimer_i_v_; +text: .text%__1cHCompileKTracePhase2T6M_v_; +text: .text%__1cILoadNodeHsize_of6kM_I_; +text: .text%__1cKciTypeFlowFBlockPis_simpler_than6Mp1_i_; +text: .text%__1cRshrI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cQciByteCodeStreamKget_method6Mri_pnIciMethod__; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cQleaPIdxScaleNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__: cfgnode.o; +text: .text%__1cXindIndexScaleOffsetOperFscale6kM_i_: ad_i486.o; +text: .text%__1cNCatchProjNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cKciTypeFlowLStateVectorEmeet6Mpk1_i_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cRMachSafePointNode2t6M_v_; +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cQleaPIdxScaleNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOcompI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHemit_cc6FrnKCodeBuffer_ii_v_; +text: .text%__1cJloadLNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNCatchProjNodeEhash6kM_I_; +text: .text%__1cNincI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopHdom_lca6kMpnENode_2_2_; +text: .text%__1cHMatcherScalling_convention6FpnLRegPair_Ii_v_; +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_; +text: .text%__1cQindOffset32XOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cICallNodeOis_CallRuntime6kM_pknPCallRuntimeNode__: callnode.o; +text: .text%__1cXinsert_anti_dependences6FrpnFBlock_pnENode_rnLBlock_Array__i_: gcm.o; +text: .text%__1cIimmLOperJconstantL6kM_x_: ad_i486_clone.o; +text: .text%__1cWflagsReg_long_LTGEOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cENodeHdel_out6Mp0_v_: lcm.o; +text: .text%__1cXAdaptiveWeightedAverageYcompute_adaptive_average6Mff_f_; +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cFframebDinterpreter_frame_monitor_end6kM_pnPBasicObjectLock__; +text: .text%__1cHMatcherVReduceInst_Chain_Rule6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_; +text: .text%__1cICodeBlobTfix_oop_relocations6MpC1_v_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cHMatcherPc_frame_pointer6kM_nHOptoRegEName__; +text: .text%__1cMMachCallNode2t6M_v_; +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cHTypeIntEmake6Fii_pk0_; +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_; +text: .text%__1cNsubI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cNsubI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cQSystemDictionaryKfind_class6FiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cIGraphKitbLset_predefined_input_for_runtime_call6MpnNSafePointNode__v_; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%__1cLcastP2INodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: ciStreams.o; +text: .text%__1cHRetNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHBitDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cScompP_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_; +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_; +text: .text%__1cOcompU_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNstoreImmPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpFastUnlockNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: connode.o; +text: .text%__1cPindOffset32OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cPClassFileParserbGparse_constant_pool_string_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNloadConI0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: connode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: connode.o; +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_; +text: .text%__1cNincI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: callnode.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: memnode.o; +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cMorI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_; +text: .text%__1cMoutputStreamPupdate_position6MpkcI_v_; +text: .text%__1cMstringStreamFwrite6MpkcI_v_; +text: .text%__1cMFastLockNodeGOpcode6kM_i_; +text: .text%__1cIciMethodRhas_compiled_code6M_i_; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cICallInfoDset6MnLKlassHandle_nMmethodHandle_pnGThread__v_; +text: .text%__1cKstoreCNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvL2INodeGOpcode6kM_i_; +text: .text%__1cRMachSafePointNodeSis_MachCallRuntime6M_pnTMachCallRuntimeNode__: ad_i486_misc.o; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__; +text: .text%__1cITypeLongEmake6Fx_pk0_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: ciTypeFlow.o; +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICallNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cRMachSafePointNodePis_MachCallLeaf6M_pnQMachCallLeafNode__: ad_i486_misc.o; +text: .text%__1cRMachSafePointNodeLset_oop_map6MpnGOopMap__v_: ad_i486_misc.o; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cHOopFlowNbuild_oop_map6MpnENode_ipnNPhaseRegAlloc_pi_pnGOopMap__; +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_; +text: .text%__1cENodeGis_Con6kM_I_: callnode.o; +text: .text%__1cILoopNodeHis_Loop6M_p0_: classes.o; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cJVectorSetFClear6M_v_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: cfgnode.o; +text: .text%__1cMCallJavaNodeLis_CallJava6kM_pk0_: callnode.o; +text: .text%__1cICallNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachOperIconstant6kM_i_; +text: .text%__1cRaddI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cWMachCallStaticJavaNodeVis_MachCallStaticJava6M_p0_: ad_i486_misc.o; +text: .text%__1cFStateW_sub_Op_CallStaticJava6MpknENode__v_; +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cLklassVtableNput_method_at6MpnNmethodOopDesc_i_v_; +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNdecI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse3.o; +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJleaP8NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o; +text: .text%__1cJimmI8OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGOopMapHcopy_to6MpC_v_; +text: .text%__1cRMachSafePointNodeWis_MachCallInterpreter6M_pnXMachCallInterpreterNode__: ad_i486_misc.o; +text: .text%__1cLPhaseValuesHzerocon6MnJBasicType__pnHConNode__; +text: .text%__1cScompI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: classes.o; +text: .text%__1cOGenerateOopMapGppush16MnNCellTypeState__v_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: cpCacheOop.o; +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cKInlineTreeJcallee_at6kMipnIciMethod__p0_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOleaPIdxOffNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_kp0_v_; +text: .text%__1cYCallStaticJavaDirectNodeFreloc6kM_i_; +text: .text%__1cKciTypeFlowLStateVectorJcopy_into6kMp1_v_; +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJLoadCNodeGOpcode6kM_i_; +text: .text%__1cKciTypeFlowFBlockKsuccessors6MpnQciByteCodeStream_pn0ALStateVector_pn0AGJsrSet__pnNGrowableArray4Cp1___; +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_; +text: .text%__1cKciTypeFlowOwork_list_next6M_pn0AFBlock__; +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cIGraphKitNuncommon_trap6MipnHciKlass_pkci_v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cIGraphKitJmake_load6MpnENode_2pknEType_nJBasicType_i_2_; +text: .text%__1cILoadNodeEmake6FpnENode_22pknHTypePtr_pknEType_nJBasicType__p0_; +text: .text%__1cOleaPIdxOffNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__; +text: .text%__1cIHaltNode2t6MpnENode_2_v_; +text: .text%__1cMindirectOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKciTypeFlowFBlock2t6Mp0pn0AFRange_pn0AGJsrSet__v_; +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__; +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_; +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_; +text: .text%__1cScompU_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: psTasks.o; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cIimmPOperPconstant_is_oop6kM_i_: ad_i486_clone.o; +text: .text%__1cLanyRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLanyRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOMethodLivenessNwork_list_add6Mpn0AKBasicBlock__v_; +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParsePdo_field_access6Mii_v_; +text: .text%__1cPCountedLoopNodeOis_CountedLoop6M_p0_: classes.o; +text: .text%__1cIAndLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitQset_saved_ex_oop6FpnNSafePointNode_pnENode__v_; +text: .text%__1cKciTypeFlowPflow_successors6MpnNGrowableArray4Cpn0AFBlock___pn0ALStateVector__v_; +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__; +text: .text%__1cLcastP2INodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRshrI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_; +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__; +text: .text%__1cIJumpDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cNdecI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFTypeDCeq6kMpknEType__i_; +text: .text%__1cSindIndexOffsetOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cIMulLNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessNmake_block_at6Mipn0AKBasicBlock__2_; +text: .text%__1cENodeHis_Goto6kM_I_: cfgnode.o; +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__; +text: .text%__1cINodeHashJhash_find6MpknENode__p1_; +text: .text%__1cNloadConL0NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cLLShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsalI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJLoadINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLnaxRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseFBlockNlocal_type_at6kMi_pknEType__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: connode.o; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: loopnode.o; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cNaddI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: methodLiveness.o; +text: .text%__1cNloadConL0NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cLStringTableGintern6FnGHandle_pHipnGThread__pnHoopDesc__; +text: .text%__1cLStringTableLhash_string6FpHi_i_; +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cPshlI_eReg_1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMCreateExNodeGpinned6kM_i_: classes.o; +text: .text%__1cIXorINodeGOpcode6kM_i_; +text: .text%__1cRindIndexScaleOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNloadConL0NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVLoaderConstraintTableWfind_loader_constraint6MnMsymbolHandle_nGHandle__ppnVLoaderConstraintEntry__; +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_; +text: .text%__1cHi2sNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRindIndexScaleOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMPhaseChaitinTsplit_Rematerialize6MpnENode_pnFBlock_IrInNGrowableArray4CI__ipIp2i_2_; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: ad_i486_misc.o; +text: .text%__1cIConLNodeGOpcode6kM_i_; +text: .text%__1cHCompileZintrinsic_insertion_index6MpnIciMethod_i_i_; +text: .text%__1cNstoreImmBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cILoopNodeHis_Loop6M_p0_: loopnode.o; +text: .text%__1cRandI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKstoreINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cGciTypeMis_classless6kM_i_: ciType.o; +text: .text%__1cTleaPIdxScaleOffNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypeIntFwiden6kMpknEType__3_; +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_; +text: .text%__1cIGraphKit2t6M_v_; +text: .text%__1cHMulNodeEhash6kM_I_; +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o; +text: .text%__1cIAddLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_; +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cScompU_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHCompilebAallow_range_check_smearing6kM_i_; +text: .text%__1cITypeLongEmake6Fxxi_pk0_; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNmodI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJeRegIOperFclone6kM_pnIMachOper__; +text: .text%__1cQciByteCodeStreamZget_declared_field_holder6M_pnPciInstanceKlass__; +text: .text%__1cQciByteCodeStreamWget_field_holder_index6M_i_; +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: classes.o; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cNdecI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeOnew_objArray_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadSNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKReturnNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cNmethodOopDescbHhas_unloaded_classes_in_signature6FnMmethodHandle_pnGThread__i_; +text: .text%__1cPindOffset32OperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cPindOffset32OperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cQmark_inner_loops6FpnIPhaseCFG_pnFBlock__v_: block.o; +text: .text%__1cPindOffset32OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cPindOffset32OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cPindOffset32OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOemit_d32_reloc6FrnKCodeBuffer_inJrelocInfoJrelocType_i_v_; +text: .text%__1cJloadLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadSNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cKjmpConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeFis_If6M_pnGIfNode__: memnode.o; +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_; +text: .text%__1cLBuildCutout2T6M_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cPDictionaryEntrybAcontains_protection_domain6kMpnHoopDesc__i_; +text: .text%__1cNPhaseRegAllocKoffset2reg6kMi_nHOptoRegEName__; +text: .text%__1cFParseRensure_memory_phi6Mii_pnHPhiNode__; +text: .text%__1cHTypeAryCeq6kMpknEType__i_; +text: .text%__1cMorI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadRangeNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: memnode.o; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeGpinned6kM_i_: loopnode.o; +text: .text%__1cJloadINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKBranchDataNis_BranchData6M_i_: ciMethodData.o; +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIJumpDataLis_JumpData6M_i_: ciMethodData.o; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o; +text: .text%__1cJxRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cNsubI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_; +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_; +text: .text%__1cOcompI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJcmpOpOperFccode6kM_i_: ad_i486_clone.o; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__; +text: .text%__1cENodeHdel_out6Mp0_v_: ifnode.o; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cRSignatureIteratorSskip_optional_size6M_v_; +text: .text%__1cKjmpDirNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFParseKdo_get_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cScompP_mem_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapSget_basic_block_at6kMi_pnKBasicBlock__; +text: .text%__1cJleaP8NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRaddI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_; +text: .text%__1cRshrI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowGJsrSetNapply_control6Mp0pnQciByteCodeStream_pn0ALStateVector__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cKReturnNodeGOpcode6kM_i_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cNchunk_oops_do6FpnKOopClosure_pnFChunk_pc_I_: handles.o; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cQjava_lang_StringMbasic_create6FpnQtypeArrayOopDesc_ipnGThread__nGHandle__; +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__; +text: .text%__1cUParallelScavengeHeapNtlab_capacity6kM_I_; +text: .text%__1cKjmpConNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cKjmpConNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: connode.o; +text: .text%__1cPshlI_eReg_1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: machnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: machnode.o; +text: .text%__1cHi2sNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: memnode.o; +text: .text%__1cENodeRis_cisc_alternate6kM_i_: machnode.o; +text: .text%__1cICallNodeSis_CallDynamicJava6kM_pknTCallDynamicJavaNode__: callnode.o; +text: .text%__1cKTypeRawPtrHget_con6kM_i_; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: machnode.o; +text: .text%__1cKStoreLNodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserbIparse_constant_pool_fieldref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cScompU_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_; +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_; +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_; +text: .text%__1cQindOffset32XOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRandI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_; +text: .text%__1cMWarmCallInfoGis_hot6kM_i_; +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cMPhaseChaitinKFind_const6kMI_I_; +text: .text%__1cMPhaseChaitinKFind_const6kMpknENode__I_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopTransform.o; +text: .text%__1cHCompileFstart6kM_pnJStartNode__; +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cTemit_java_to_interp6FrnKCodeBuffer__v_; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cKCodeBufferMstart_a_stub6M_v_; +text: .text%__1cKCodeBufferKend_a_stub6M_v_; +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cPThreadRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cIBoolNodeHsize_of6kM_I_; +text: .text%__1cFParseUprofile_taken_branch6Mi_v_; +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cTleaPIdxScaleOffNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFframeNis_glue_frame6kM_i_; +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cKstoreLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: machnode.o; +text: .text%__1cICodeHeapLheader_size6F_I_; +text: .text%__1cScompU_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: methodDataOop.o; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cTleaPIdxScaleOffNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_; +text: .text%__1cPcheckCastPPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__; +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_; +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_Halt6MpknENode__v_; +text: .text%__1cIHaltNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cWflagsReg_long_EQdDNEOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cWThreadLocalAllocBufferVinitialize_statistics6M_v_; +text: .text%__1cWThreadLocalAllocBufferImax_size6F_I_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_: ciMethodData.o; +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_; +text: .text%__1cTStackWalkCompPolicyIsenderOf6MpnGRFrame_pnNGrowableArray4C2___2_; +text: .text%__1cENodeLbottom_type6kM_pknEType__; +text: .text%__1cFframeTis_first_java_frame6kM_i_; +text: .text%__1cGRFrameGcaller6M_p0_; +text: .text%__1cGRFrameMset_distance6Mi_v_; +text: .text%__1cGRFrameKnew_RFrame6FnFframe_pnKJavaThread_kp0_4_; +text: .text%__1cHConNodeEmake6FpknEType__p0_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cWThreadLocalAllocBufferVaccumulate_statistics6MIi_v_; +text: .text%__1cWThreadLocalAllocBufferGresize6M_v_; +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitOtoo_many_traps6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cWflagsReg_long_LEGTOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cQindOffset32XOperFscale6kM_i_: ad_i486.o; +text: .text%__1cUThreadSafepointStateMroll_forward6Mn0AMsuspend_type_pnHnmethod_i_v_; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cUThreadSafepointStateHrestart6M_v_; +text: .text%__1cIGraphKitTtoo_many_recompiles6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cGIfNodeHsize_of6kM_I_: classes.o; +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_; +text: .text%__1cIIndexSetEswap6Mp0_v_; +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_; +text: .text%__1cLcastP2INodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_; +text: .text%__1cScompP_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cPcmpFastLockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_mem_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cXcmpL_reg_flags_LTGENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o; +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_; +text: .text%__1cHCompileOcall_generator6MpnIciMethod_ipnIJVMState_if_pnNCallGenerator__; +text: .text%__1cNciCallProfileRapply_prof_factor6Mf_v_; +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__; +text: .text%__1cHCompileOfind_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: multnode.o; +text: .text%__1cENodeHdel_out6Mp0_v_: compile.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: codeBlob.o; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_kpnGRFrame__v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cXindIndexScaleOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cNCatchProjNode2t6MpnENode_Ii_v_; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: cfgnode.o; +text: .text%__1cQciByteCodeStreamXget_method_holder_index6M_i_; +text: .text%__1cFParseHdo_call6M_v_; +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_; +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_; +text: .text%__1cFParseZcan_not_compile_call_site6MpnIciMethod_pnPciInstanceKlass__i_; +text: .text%__1cQciByteCodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cFParseMprofile_call6MpnENode__v_; +text: .text%__1cKstorePNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMMachProjNodeHsize_of6kM_I_: classes.o; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cSindIndexOffsetOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMeADXRegLOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKciTypeFlowLStateVectorJdo_invoke6MpnQciByteCodeStream_i_v_; +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLPCTableNodeHsize_of6kM_I_: classes.o; +text: .text%__1cLPCTableNodeKis_PCTable6kM_pk0_: classes.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: cfgnode.o; +text: .text%__1cJStartNodeIis_Start6M_p0_: classes.o; +text: .text%__1cLPCTableNodeEhash6kM_I_; +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cIProjNodeDcmp6kMrknENode__I_; +text: .text%__1cXindIndexScaleOffsetOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKDataLayoutKinitialize6MCHi_v_; +text: .text%__1cKDataLayoutPneeds_array_len6FC_i_; +text: .text%__1cScompP_mem_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNincI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__; +text: .text%jni_GetObjectField: jni.o; +text: .text%__1cSCompareAndSwapNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cJloadBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRAbstractAssemblerHbind_to6MrnFLabel_i_v_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cMnabxRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: ad_i486_misc.o; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_; +text: .text%__1cKMemBarNodeEhash6kM_I_; +text: .text%__1cPstoreImmI16NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIciSymbolJmake_impl6Fpkc_p0_; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cXcmpL_reg_flags_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadConI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNtestI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitNcast_not_null6MpnENode__2_; +text: .text%__1cKEntryPointFentry6kMnITosState__pC_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopnode.o; +text: .text%__1cJleaP8NodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKGCStatInfoMset_gc_usage6MinLMemoryUsage_i_v_; +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cGRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cRInterpretedRFrameOis_interpreted6kM_i_: rframe.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: callnode.o; +text: .text%__1cRsalI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_; +text: .text%__1cENodeHdel_out6Mp0_v_: callnode.o; +text: .text%__1cISubINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cILoadNodeDcmp6kMrknENode__I_; +text: .text%__1cMDisplacementEbind6MrnFLabel_ipnRAbstractAssembler__v_; +text: .text%__1cTStackWalkCompPolicyMshouldInline6FnMmethodHandle_fi_pkc_; +text: .text%__1cTconstantPoolOopDescMklass_at_put6MipnMklassOopDesc__v_: constantPoolOop.o; +text: .text%__1cFTypeDEhash6kM_i_; +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cKInlineTreeWfind_subtree_from_root6Fp0pnIJVMState_pnIciMethod_i_1_; +text: .text%__1cIciMethodNshould_inline6M_i_; +text: .text%__1cKInlineTreeMshouldInline6kMpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cTpass_initial_checks6FpnIciMethod_i1_i_; +text: .text%__1cOCompilerOracleNshould_inline6FnMmethodHandle__i_; +text: .text%__1cIciMethodbAinterpreter_throwout_count6kM_i_; +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_; +text: .text%__1cKInlineTreeNtry_to_inline6MpnIciMethod_irnNciCallProfile_pnMWarmCallInfo__pkc_; +text: .text%__1cVExceptionHandlerTableJadd_entry6MnRHandlerTableEntry__v_; +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRaddI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTCreateExceptionNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOjmpLoopEndNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cLRethrowNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cFStateM_sub_Op_CmpP6MpknENode__v_; +text: .text%__1cMorI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cIciMethodWwas_executed_more_than6Mi_i_; +text: .text%__1cRshrI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConINodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_; +text: .text%__1cScompP_mem_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cJAssemblerEmovl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cLOpaque1NodeGOpcode6kM_i_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cPcmpFastLockNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLnaxRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cJloadCNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%__1cbAjni_check_async_exceptions6FpnKJavaThread__v_: jni.o; +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_; +text: .text%__1cLeDXRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cJFieldTypeSskip_optional_size6FpnNsymbolOopDesc_pi_v_; +text: .text%__1cQjmpCon_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjmpCon_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cIGraphKitZset_all_rewritable_memory6MpnENode__v_; +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_; +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKjmpDirNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cKjmpDirNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_; +text: .text%__1cKjmpDirNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cScompI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cRaddI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_: connode.o; +text: .text%__1cJloadCNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOClearArrayNodeGOpcode6kM_i_; +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_; +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMeADXRegLOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseRbranch_prediction6Mrf_f_; +text: .text%__1cRandI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLeAXRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLeAXRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cPshlI_eReg_1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVectorMdo_getstatic6MpnQciByteCodeStream__v_; +text: .text%__1cISubINodeDsub6kMpknEType_3_3_; +text: .text%__1cNCompileBrokerXcompilation_is_in_queue6FnMmethodHandle_i_i_; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_; +text: .text%__1cRindIndexScaleOperFscale6kM_i_: ad_i486.o; +text: .text%__1cNaddI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cENodeHdel_out6Mp0_v_: loopnode.o; +text: .text%__1cJxRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: loopnode.o; +text: .text%__1cIParmNodeJideal_reg6kM_I_; +text: .text%__1cICodeHeapSallocated_capacity6kM_I_; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cNtestP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICHeapObj2n6FI_pv_; +text: .text%__1cENodeHdel_out6Mp0_v_: loopTransform.o; +text: .text%__1cQleaPIdxScaleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cNloadConI0NodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypeAryFempty6kM_i_; +text: .text%__1cKTypeAryPtrFempty6kM_i_; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cNgetTimeMillis6F_x_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: scopeDesc.o; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_; +text: .text%__1cENodeRlatency_from_uses6kMrnLBlock_Array_rnNGrowableArray4CI___i_; +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cNaddI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_i_v_; +text: .text%__1cNloadKlassNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_; +text: .text%__1cNmethodOopDescWload_signature_classes6FnMmethodHandle_pnGThread__i_; +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cITypeLongFempty6kM_i_; +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cJloadSNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%__1cIAddPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cPcmpFastLockNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNmulL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStoreNodeEmake6FpnENode_22pknHTypePtr_2nJBasicType__p0_; +text: .text%__1cIGraphKitPstore_to_memory6MpnENode_22nJBasicType_i_2_; +text: .text%__1cHi2sNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cLcastP2INodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompP_mem_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNincI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNdecI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciObjectFklass6M_pnHciKlass__; +text: .text%__1cQPSGenerationPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cFParseYprofile_not_taken_branch6M_v_; +text: .text%__1cLRShiftLNodeGOpcode6kM_i_; +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cMURShiftLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: parse2.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: cfgnode.o; +text: .text%__1cIMulINodeGOpcode6kM_i_; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cIConDNodeGOpcode6kM_i_; +text: .text%__1cKInlineTreePshouldNotInline6kMpnIciMethod_pnMWarmCallInfo__pkc_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cNandL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cKciTypeFlowIcan_trap6MrnQciByteCodeStream__i_; +text: .text%__1cNxorI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cZload_long_indOffset32OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_; +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKjmpConNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKjmpConNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverbElinktime_resolve_static_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cLencode_Copy6FrnKCodeBuffer_ii_v_; +text: .text%__1cQjmpDir_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjmpDir_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cENodeIadd_prec6Mp0_v_; +text: .text%__1cLjmpConUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachNodeSalignment_required6kM_i_: machnode.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: machnode.o; +text: .text%__1cKType_ArrayEgrow6MI_v_; +text: .text%__1cMorI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMorI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cUmembar_cpu_orderNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSconstMethodOopDescbEchecked_exceptions_length_addr6kM_pH_; +text: .text%__1cFParseFdo_if6MpnENode_2nIBoolTestEmask_2_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: ciMethodData.o; +text: .text%__1cLBoxLockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJxRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cIConINodeHget_int6kMpi_i_: classes.o; +text: .text%__1cIciMethodbHhas_unloaded_classes_in_signature6M_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_; +text: .text%__1cJloadCNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNsubI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cLStatSamplerLsample_data6FpnMPerfDataList__v_; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_; +text: .text%__1cLStatSamplerOcollect_sample6F_v_; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cMPeriodicTaskMtime_to_wait6F_I_: thread.o; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2I_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cOoop_RelocationHoops_do6MpFppnHoopDesc__v_v_; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSdivD_reg_roundNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%__1cICodeHeapIcapacity6kM_I_; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cRresolve_and_patch6FppnHoopDesc__v_; +text: .text%__1cMorI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_; +text: .text%__1cIMachOperEtype6kM_pknEType__; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cIregFOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_; +text: .text%JVM_Write; +text: .text%__1cDhpiFwrite6FipkvI_I_: jvm.o; +text: .text%__1cPstoreImmI16NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cKciTypeFlowLStateVectorLdo_getfield6MpnQciByteCodeStream__v_; +text: .text%__1cFParseMvisit_blocks6M_v_; +text: .text%__1cNsubI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cRsalI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTconvD2I_reg_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowGJsrSetSis_compatible_with6Mp1_i_; +text: .text%__1cOcompU_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLOpaque1NodeEhash6kM_I_; +text: .text%__1cXcmpL_reg_flags_LTGENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: classes.o; +text: .text%__1cJcmpOpOperGnegate6M_v_: ad_i486_clone.o; +text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_2_v_; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cKMemBarNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKjmpConNodeGnegate6M_v_: ad_i486_misc.o; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%JVM_RawMonitorEnter; +text: .text%JVM_RawMonitorExit; +text: .text%__1cXmembar_release_lockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cITypeLongEmake6Fxx_pk0_; +text: .text%__1cNaddL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKcmpOpUOperFccode6kM_i_: ad_i486_clone.o; +text: .text%__1cPClassFileParserUskip_over_field_name6MpciI_1_; +text: .text%__1cLjmpConUNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFParseOreturn_current6MpnENode__v_; +text: .text%__1cETypeCeq6kMpk0_i_; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cRaddI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLeCXRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cLBoxLockNodeKis_BoxLock6kM_pk0_: classes.o; +text: .text%__1cLBoxLockNodeKstack_slot6FpnENode__nHOptoRegEName__; +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cFParseRoptimize_inlining6MpnIciMethod_ipnPciInstanceKlass_24irnKInlineTreeLInlineStyle_r2_v_; +text: .text%__1cQimprove_receiver6FpnPciInstanceKlass_pknLTypeInstPtr_ri_1_; +text: .text%__1cJloadPNodeFreloc6kM_i_; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseSmerge_memory_edges6MpnMMergeMemNode_ii_v_; +text: .text%__1cJloadBNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFBlockTimplicit_null_check6MrnLBlock_Array_rnNGrowableArray4CI__pnENode_6_v_; +text: .text%__1cJTypeTupleFxdual6kM_pknEType__; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmulL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cISubINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: loopnode.o; +text: .text%__1cRsarI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: loopnode.o; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cLCastP2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIciMethodLis_accessor6kM_i_; +text: .text%__1cRScavengeRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cPsarI_eReg_1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXindIndexScaleOffsetOperOindex_position6kM_i_: ad_i486.o; +text: .text%__1cXindIndexScaleOffsetOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cZload_long_indOffset32OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cScompU_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: memnode.o; +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__: rframe.o; +text: .text%__1cRinterpretedVFrameDbcp6kM_pC_; +text: .text%__1cRinterpretedVFrameDbci6kM_i_; +text: .text%__1cMCallLeafNodeLis_CallLeaf6kM_pk0_: classes.o; +text: .text%__1cJAssemblerEpopl6MpnMRegisterImpl__v_; +text: .text%__1cKStoreCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRethrowNodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: loopnode.o; +text: .text%__1cPsarI_eReg_1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cZload_long_indOffset32OperFscale6kM_i_: ad_i486.o; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cFArenaEused6kM_I_; +text: .text%__1cFParseNdo_all_blocks6M_v_; +text: .text%__1cOParseGeneratorJcan_parse6FpnIciMethod_i_i_; +text: .text%__1cFParseLbuild_exits6M_v_; +text: .text%__1cFParseIdo_exits6M_v_; +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_; +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__; +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cFParseLinit_blocks6M_v_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_; +text: .text%__1cFParsePdo_method_entry6M_v_; +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cTStackWalkCompPolicyPshouldNotInline6FnMmethodHandle__pkc_; +text: .text%__1cRaddI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTcompareAndSwapLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cHi2sNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIget_long6kM_x_; +text: .text%__1cQciByteCodeStreamSget_constant_index6kM_i_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cFArena2T6M_v_; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: cfgnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: cfgnode.o; +text: .text%__1cNdecI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNstoreImmINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cPJavaCallWrapper2t6MnMmethodHandle_nGHandle_pnJJavaValue_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cRJavaCallArgumentsKparameters6M_pi_; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cRruntime_type_from6FpnJJavaValue__nJBasicType__: javaCalls.o; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPJavaCallWrapper2T6M_v_; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cSCallLeafDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowFBlockPclone_loop_head6Mp0ip1pn0AGJsrSet__3_; +text: .text%__1cITypeFuncFxdual6kM_pknEType__; +text: .text%__1cNxorI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHMatcherPstack_alignment6F_I_; +text: .text%__1cKInlineTree2t6MpnHCompile_pk0pnIciMethod_pnIJVMState_if_v_; +text: .text%__1cMLinkResolverXresolve_klass_no_update6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cMURShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLeSIRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSloadL_volatileNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapCpp6MpnNCellTypeState_2_v_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%__1cHoopDescSslow_identity_hash6M_i_; +text: .text%__1cQindOffset32XOperLdisp_is_oop6kM_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodDataKlass.o; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKInlineTreeYcompute_callee_frequency6kMi_f_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cKInlineTreebCbuild_inline_tree_for_callee6MpnIciMethod_pnIJVMState_i_p0_; +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cJGC_lockerNlock_critical6FpnKJavaThread__v_: jni.o; +text: .text%__1cJAssemblerElock6M_v_; +text: .text%__1cRindIndexScaleOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cKRegionNodeEhash6kM_I_: loopnode.o; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cXmembar_release_lockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cLklassVtableKis_miranda6FpnNmethodOopDesc_pnPobjArrayOopDesc_pnMklassOopDesc__i_; +text: .text%__1cENodeHdel_out6Mp0_v_: library_call.o; +text: .text%__1cRandI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_; +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cPshlI_eReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cRalign_code_offset6Fi_I_; +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_; +text: .text%__1cMLinkResolverVresolve_invokespecial6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cJAssemblerHcmpxchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_; +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__; +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_; +text: .text%__1cOjmpLoopEndNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: ad_i486.o; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cRsalI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_: psScavenge.o; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cIDivINodeGOpcode6kM_i_; +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cIimmDOperJconstantD6kM_d_: ad_i486_clone.o; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2I_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMrep_stosNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJStartNodeOis_block_start6kM_i_: callnode.o; +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: cpCacheOop.o; +text: .text%__1cOMethodLivenessKBasicBlockIload_two6Mi_v_; +text: .text%__1cIGraphKitMarray_length6MpnENode__2_; +text: .text%__1cJloadCNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cMCreateExNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNincI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cMindirectOperFscale6kM_i_: ad_i486.o; +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLPCTableNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cLRShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJcmpOpOperFclone6kM_pnIMachOper__; +text: .text%__1cIJVMState2t6Mi_v_; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cJStartNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cNeFlagsRegOperFclone6kM_pnIMachOper__; +text: .text%__1cMMachCallNodeHis_Call6M_pnICallNode__: ad_i486_misc.o; +text: .text%__1cKTypeRawPtrCeq6kMpknEType__i_; +text: .text%__1cFStateQ_sub_Op_CreateEx6MpknENode__v_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjectFactory.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciObjectFactory.o; +text: .text%__1cLjmpConUNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLjmpConUNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_; +text: .text%__1cEDict2T6M_v_; +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_; +text: .text%__1cIGraphKitRmake_slow_call_ex6MpnENode_pnPciInstanceKlass__v_; +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_; +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_; +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cPconvL2I_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_; +text: .text%__1cTJvmtiEventCollectorYunset_jvmti_thread_state6M_v_; +text: .text%__1cZload_long_indOffset32OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cZload_long_indOffset32OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cZload_long_indOffset32OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cIMinINodeGOpcode6kM_i_; +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cHMemNodeScalculate_adr_type6FpknEType_pknHTypePtr__6_; +text: .text%__1cHMatcherXadjust_incoming_stk_arg6MnHOptoRegEName__2_; +text: .text%__1cFStateM_sub_Op_CmpU6MpknENode__v_; +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_: psScavenge.o; +text: .text%__1cFTypeDEmake6Fd_pk0_; +text: .text%jni_IsSameObject: jni.o; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cQindOffset32XOperMdisp_as_type6kM_pknHTypePtr__: ad_i486.o; +text: .text%__1cQindOffset32XOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cQindOffset32XOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cNstoreImmBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTcompareAndSwapLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLlog2_intptr6Fi_i_: mulnode.o; +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cScompP_mem_eRegNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cHMulNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRaddI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__; +text: .text%__1cKjavaVFrameNis_java_frame6kM_i_: vframe.o; +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_; +text: .text%__1cRMachSafePointNodeLis_MachCall6M_pnMMachCallNode__: ad_i486_misc.o; +text: .text%__1cKStoreBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNandL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLCounterDataOis_CounterData6M_i_: methodDataOop.o; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cENodeHget_int6kMpi_i_; +text: .text%__1cOcompI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cYSurvivorMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cTleaPIdxScaleOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJcmpOpOperFequal6kM_i_: ad_i486_clone.o; +text: .text%__1cMorI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cKRegionNodeOhas_unique_phi6kM_pnHPhiNode__; +text: .text%__1cNnegI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRsarI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPsarI_eReg_1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cPshrI_eReg_1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cQVMOperationQdDueueLqueue_empty6Mi_i_; +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFTypeFEhash6kM_i_; +text: .text%__1cVExceptionHandlerTableMadd_subtable6MipnNGrowableArray4Ci__2_v_; +text: .text%__1cQLibraryIntrinsicKis_virtual6kM_i_: library_call.o; +text: .text%__1cScompP_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNxorI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: vframe.o; +text: .text%__1cRMachSafePointNodePis_MachCallJava6M_pnQMachCallJavaNode__: ad_i486_misc.o; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: classes.o; +text: .text%__1cIimmIOperFclone6kM_pnIMachOper__; +text: .text%__1cJleaP8NodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cMloadConINodeFclone6kM_pnENode__; +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cMciMethodData2t6M_v_; +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__; +text: .text%__1cRitableMethodEntryKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cRaddL_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIHaltNodeEhash6kM_I_: classes.o; +text: .text%__1cNmulL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQinit_input_masks6FIrnHRegMask_1_p0_: matcher.o; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cNIdealLoopTreeObeautify_loops6MpnOPhaseIdealLoop__i_; +text: .text%__1cOjmpLoopEndNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cMrep_stosNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cFStateO_sub_Op_StoreP6MpknENode__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: phaseX.o; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: callnode.o; +text: .text%__1cPshlI_eReg_1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cSindIndexOffsetOperFscale6kM_i_: ad_i486.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: cfgnode.o; +text: .text%__1cNaddL_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%__1cMMutableSpaceFclear6M_v_; +text: .text%__1cQjmpCon_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cQjmpCon_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cQjmpCon_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKjmpConNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cPshrI_eReg_1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cYmulI_imm_RShift_highNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPstoreImmI16NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cUParallelScavengeHeapMmem_allocate6MIii_pnIHeapWord__; +text: .text%__1cNstoreImmBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cIGraphKitXset_edges_for_java_call6MpnMCallJavaNode_i_v_; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cKcmpOpUOperGnegate6M_v_: ad_i486_clone.o; +text: .text%__1cLjmpConUNodeGnegate6M_v_: ad_i486_misc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: loopnode.o; +text: .text%__1cRandI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_; +text: .text%__1cNnegI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__; +text: .text%__1cOstackSlotDOperEtype6kM_pknEType__: ad_i486.o; +text: .text%JVM_GetMethodIxModifiers; +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_; +text: .text%__1cNandL_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cJleaP8NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%__1cENodeGis_Sub6M_pnHSubNode__: classes.o; +text: .text%__1cTcompareAndSwapLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cScompU_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompU_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cYmulI_imm_RShift_highNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNinstanceKlassbCfind_local_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_; +text: .text%__1cJLoadSNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%JVM_IsInterface; +text: .text%__1cNinstanceKlassWfind_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cPciInstanceKlassTget_field_by_offset6Mii_pnHciField__; +text: .text%__1cFStateM_sub_Op_RegL6MpknENode__v_; +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__: machnode.o; +text: .text%__1cRshrI_eReg_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cScompP_mem_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadSNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXconvI2L_reg_reg_zexNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cRsalI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJimmI0OperFclone6kM_pnIMachOper__; +text: .text%__1cNloadConI0NodeFclone6kM_pnENode__; +text: .text%__1cWflagsReg_long_LTGEOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQciByteCodeStreamMget_constant6M_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cRMachNullCheckNode2t6MpnENode_2I_v_; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: machnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: machnode.o; +text: .text%__1cXconvI2L_reg_reg_zexNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLLShiftLNodeGOpcode6kM_i_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cEhash6Fpkc1_I_; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cZresource_reallocate_bytes6FpcII_0_; +text: .text%__1cLeCXRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cScompI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLCastP2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cUreloc_java_to_interp6F_I_; +text: .text%__1cTsize_java_to_interp6F_I_; +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cScompU_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotLOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeRis_safepoint_node6kM_i_: ad_i486_misc.o; +text: .text%__1cQciByteCodeStreamPget_klass_index6M_i_; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cMnadxRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cJCHAResult2t6MnLKlassHandle_nMsymbolHandle_2pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___n0E_i_v_; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cRaddL_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKMemBarNode2t6M_v_; +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_; +text: .text%__1cKMemBarNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMloadConFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKPSYoungGenNused_in_bytes6kM_I_; +text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o; +text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cMloadConFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSReferenceProcessorOprocess_phase36MppnHoopDesc_ipnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorOprocess_phase26MppnHoopDesc_pnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cNandL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLLShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOFastUnlockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cMnegF_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIModINodeGOpcode6kM_i_; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIMaxINodeGOpcode6kM_i_; +text: .text%__1cQjmpDir_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKjmpDirNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cQjmpDir_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cQjmpDir_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKMemBarNodeJis_MemBar6kM_pk0_: classes.o; +text: .text%__1cIMachNodeKconst_size6kM_i_: machnode.o; +text: .text%__1cIMachNodeFreloc6kM_i_: machnode.o; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_: machnode.o; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cIGraphKitOinsert_mem_bar6MpnKMemBarNode__v_; +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: callGenerator.o; +text: .text%__1cNCallGeneratorCtf6kM_pknITypeFunc__; +text: .text%__1cNCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cLOptoRuntimebCcomplete_monitor_unlocking_C6FpnHoopDesc_pnJBasicLock__v_; +text: .text%__1cITypeNodeHis_Type6M_p0_: classes.o; +text: .text%__1cNdecI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: chaitin.o; +text: .text%__1cYmulI_imm_RShift_highNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTleaPIdxScaleOffNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNGCTaskManagerRset_resource_flag6MIi_v_; +text: .text%__1cScompI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cScompI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__; +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_; +text: .text%__1cNmulL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMeADXRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQleaPIdxScaleNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cXcopy_u2_with_conversion6FpH0i_v_: classFileParser.o; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_locking_C6FpnHoopDesc_pnJBasicLock_pnKJavaThread__v_; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cIGraphKitbMset_predefined_output_for_runtime_call6MpnENode_pnMMergeMemNode__v_; +text: .text%__1cNminI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__; +text: .text%__1cJloadLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: parse1.o; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_; +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_; +text: .text%__1cNaddL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMrep_stosNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cZCallInterpreterDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: methodDataOop.o; +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLBlock_ArrayEgrow6MI_v_; +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQciByteCodeStreamJget_klass6Mri_pnHciKlass__; +text: .text%__1cNandL_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNandL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHi2sNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cIAndINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: callnode.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: callnode.o; +text: .text%__1cOGenerateOopMapNrestore_state6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cPCheckCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cISubINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_; +text: .text%__1cXcmpL_reg_flags_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: loopnode.o; +text: .text%__1cSloadL_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cJPhaseLiveHcompute6MI_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: live.o; +text: .text%__1cIPhaseIFGEinit6MI_v_; +text: .text%__1cMPhaseChaitinQgather_lrg_masks6Mi_v_; +text: .text%__1cRjmpConU_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRjmpConU_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVmerge_point_too_heavy6FpnHCompile_pnENode__i_: loopopts.o; +text: .text%__1cMloadConPNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHRetNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: loopnode.o; +text: .text%__1cNandI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_; +text: .text%__1cPsarI_eReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_acquire_lockNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKstoreBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXconvI2L_reg_reg_zexNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: connode.o; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: oopFactory.o; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: oopFactory.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: oopFactory.o; +text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_; +text: .text%__1cOleaPIdxOffNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_: parse2.o; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cPshlI_eReg_1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbHparse_constant_pool_integer_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerFpushl6Mi_v_; +text: .text%__1cKmethodOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: reg_split.o; +text: .text%__1cMLinkResolverUresolve_invokestatic6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNxorI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNxorI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHMatcherMreturn_value6Fii_nLRegPair__; +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_; +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cbACallCompiledJavaDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSsafePoint_pollNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSloadL_volatileNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cLcastP2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o; +text: .text%__1cFStateR_sub_Op_LoadRange6MpknENode__v_; +text: .text%__1cOcompU_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_; +text: .text%__1cHMemNodeHsize_of6kM_I_; +text: .text%__1cYCallStaticJavaDirectNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNRelocIteratorJset_limit6MpC_v_; +text: .text%__1cIMachNodeTmay_be_short_branch6kM_i_: ad_i486.o; +text: .text%__1cRsarI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cVjava_lang_ClassLoaderGparent6FpnHoopDesc__2_; +text: .text%__1cLOpaque2NodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLCastP2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitSclear_saved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__; +text: .text%__1cKstoreINodeFreloc6kM_i_; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cNaddL_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNaddL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitOmake_slow_call6MpknITypeFunc_pCpkcpnENode_88_8_; +text: .text%__1cPcheckCastPPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_; +text: .text%__1cNnegI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJStartNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cKciTypeFlowHdo_flow6M_v_; +text: .text%__1cKciTypeFlowKmap_blocks6M_v_; +text: .text%__1cKciTypeFlowKflow_types6M_v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_i_v_; +text: .text%__1cKciTypeFlowLfind_ranges6M_v_; +text: .text%__1cKciTypeFlowXmark_known_range_starts6M_v_; +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cOeFlagsRegUOperFclone6kM_pnIMachOper__; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_; +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cFframebHnext_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cNsubL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMorI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cMciMethodDataJload_data6M_v_; +text: .text%__1cPconvL2I_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJimmI0OperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: multnode.o; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cPconvI2L_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%JVM_GetCPClassNameUTF; +text: .text%__1cLConvI2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNIdealLoopTreeNDCE_loop_body6M_v_; +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_; +text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cKstoreCNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__; +text: .text%__1cICodeHeapIallocate6MI_pv_; +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionarybAcompute_loader_lock_object6FnGHandle_pnGThread__1_; +text: .text%__1cLeAXRegIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cFParseKdo_put_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cPmethodDataKlassRoop_is_methodData6kM_i_: methodDataKlass.o; +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodDataKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: methodDataKlass.o; +text: .text%__1cTcompareAndSwapLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOLibraryCallKitOgenerate_guard6MpnENode_pnKRegionNode_f_v_; +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: jvm.o; +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_: psGenerationCounters.o; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cWflagsReg_long_LTGEOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNloadRangeNodeFreloc6kM_i_; +text: .text%__1cFParseNpush_constant6MnKciConstant__i_; +text: .text%__1cMstringStream2t6MI_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: classes.o; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cNaddL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMURShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJrelocInfoKset_format6Mi_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: objArrayKlass.o; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cISubLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse2.o; +text: .text%__1cNminI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cQVMOperationQdDueueSqueue_remove_front6Mi_pnMVM_Operation__; +text: .text%__1cLProfileDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cJScopeDescGis_top6kM_i_; +text: .text%__1cMorI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOjmpLoopEndNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKTypeRawPtrFempty6kM_i_; +text: .text%__1cTCallInterpreterNodeGOpcode6kM_i_; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cScompI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRxorI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cXmembar_acquire_lockNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNPrefetchQdDueueFclear6M_v_: psPromotionManager.o; +text: .text%__1cSPSPromotionManagerFreset6M_v_; +text: .text%__1cXconvI2L_reg_reg_zexNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSPSPromotionManagerKflush_labs6M_v_; +text: .text%__1cRaddI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cIAndINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIregDOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cSCompareAndSwapNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cICodeHeapPfollowing_block6MpnJFreeBlock__2_; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cFciEnvZcheck_klass_accessibility6MpnHciKlass_pnMklassOopDesc__i_; +text: .text%__1cCosRcurrent_thread_id6F_i_; +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__; +text: .text%__1cHciKlassMis_interface6M_i_: ciObjArrayKlass.o; +text: .text%__1cNtestP_regNodeFreloc6kM_i_; +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_; +text: .text%__1cIGotoNodeGOpcode6kM_i_; +text: .text%__1cENodeMsetup_is_top6M_v_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cOleaPIdxOffNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvL2I_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHCompilePneed_stack_bang6kMi_i_; +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cNsubI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSmembar_acquireNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHRetNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cHRetNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKimmL32OperJconstantL6kM_x_: ad_i486_clone.o; +text: .text%__1cRindIndexScaleOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cRindIndexScaleOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRindIndexScaleOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cQleaPIdxScaleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_; +text: .text%__1cRxorI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOMethodLivenessKBasicBlockJstore_two6Mi_v_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cJloadINodeIpeephole6MpnFBlock_ipnNPhaseRegAlloc_ri_pnIMachNode__; +text: .text%__1cJloadINodeFreloc6kM_i_; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOleaPIdxOffNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cLklassVtableVinitialize_from_super6MnLKlassHandle__i_; +text: .text%__1cLklassVtableOcopy_vtable_to6MpnLvtableEntry__v_; +text: .text%__1cJloadBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSCallLeafDirectNodeKmethod_set6Mi_v_; +text: .text%__1cKReturnNodeEhash6kM_I_: classes.o; +text: .text%__1cTleaPIdxScaleOffNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cIPhaseIFGISquareUp6M_v_; +text: .text%__1cYmulI_imm_RShift_highNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: klass.o; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: classes.o; +text: .text%__1cQPlaceholderTableJnew_entry6MipnNsymbolOopDesc_pnHoopDesc__pnQPlaceholderEntry__; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cJcmpOpOperJnot_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cIAndINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cKCMoveINodeGOpcode6kM_i_; +text: .text%__1cOMachEpilogNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopnode.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopnode.o; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopnode.o; +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_; +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_; +text: .text%__1cPsarI_eReg_1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cHCompileQsync_stack_slots6kM_i_; +text: .text%__1cPindOffset32OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cUDebugInfoWriteStreamMwrite_handle6MpnI_jobject__v_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cPconvI2D_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNIdealLoopTreeTcheck_inner_safepts6MpnOPhaseIdealLoop__v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: assembler_i486.o; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cRaddI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNIdealLoopTreeUiteration_split_impl6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNIdealLoopTreeOpolicy_peeling6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreebBpolicy_do_remove_empty_loop6MpnOPhaseIdealLoop__i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%JVM_InternString; +text: .text%__1cXcmpL_reg_flags_LEGTNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cNget_next_hash6F_i_: synchronizer.o; +text: .text%__1cPshrI_eReg_1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cUimpl_fp_store_helper6FpnKCodeBuffer_iiiiiii_i_: ad_i486.o; +text: .text%__1cVloadConL_low_onlyNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOMacroAssemblerJincrement6MpnMRegisterImpl_i_v_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeHdel_out6Mp0_v_: callGenerator.o; +text: .text%__1cJCmpL3NodeGOpcode6kM_i_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cPclear_hashtable6FppnLNameSigHash__v_; +text: .text%__1cWflagsReg_long_LEGTOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cEDictIdoubhash6M_v_; +text: .text%__1cUinitialize_hashtable6FppnLNameSigHash__v_; +text: .text%__1cRcmpOp_commuteOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cScompU_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSshrL_eReg_1_31NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPCountedLoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRMachSafePointNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cUmembar_cpu_orderNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cNmodI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLRShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMMutableSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%jni_SetIntField: jni.o; +text: .text%__1cENodeHis_Copy6kM_I_: ad_i486.o; +text: .text%__1cKstorePNodeFreloc6kM_i_; +text: .text%__1cNincI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cNcmovI_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTresource_free_bytes6FpcI_v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cQSystemDictionaryRupdate_dictionary6FiIiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: dictionary.o; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cQSystemDictionaryQfind_placeholder6FiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cIProjNodeJideal_reg6kM_I_; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cKDictionaryJnew_entry6MIpnMklassOopDesc_pnHoopDesc__pnPDictionaryEntry__; +text: .text%__1cVloadConL_low_onlyNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPstoreImmI16NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cWflagsReg_long_EQdDNEOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cIAndINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIAndINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cRandI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cJLoadBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cNmodI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRshrI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJLoadSNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOGenerateOopMapOset_bbmark_bit6Mi_v_; +text: .text%__1cIGraphKitOhas_ex_handler6M_i_; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: ciMethodData.o; +text: .text%__1cRaddI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRsubI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_; +text: .text%__1cPRoundDoubleNodeGOpcode6kM_i_; +text: .text%__1cRsarI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cICallNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_i_v_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: doCall.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: connode.o; +text: .text%__1cLklassVtableQfill_in_mirandas6Mri_v_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSshlL_eReg_1_31NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cLregFPR1OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cLeDIRegPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLregFPR1OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cSmembar_acquireNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitRmerge_fast_memory6MpnENode_2i_v_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cSCompareAndSwapNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cSindIndexOffsetOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cSindIndexOffsetOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cSindIndexOffsetOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cNstoreImmBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIGraphKitOmake_merge_mem6MpnENode_22_v_; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_; +text: .text%__1cOcompP_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cSmembar_releaseNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: classes.o; +text: .text%__1cJLoadFNodeGOpcode6kM_i_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_2_v_; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cTjava_lang_ThrowableNset_backtrace6FpnHoopDesc_2_v_; +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMStartOSRNodeGOpcode6kM_i_; +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cTClassLoadingServiceScompute_class_size6FpnNinstanceKlass__I_; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cNinstanceKlassbBdo_local_static_fields_impl6FnTinstanceKlassHandle_pFpnPfieldDescriptor_pnGThread__v5_v_; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cLklassVtableQget_num_mirandas6FpnMklassOopDesc_pnPobjArrayOopDesc_4_i_; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: constantPoolKlass.o; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cQSystemDictionaryQadd_to_hierarchy6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constantPoolKlass.o; +text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cPClassFileParserYcheck_super_class_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constantPoolKlass.o; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cLloadSSFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNObjectMonitorHis_busy6kM_i_; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cSindIndexOffsetOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cENodeHis_Call6M_pnICallNode__: loopnode.o; +text: .text%__1cQSystemDictionaryRfind_shared_class6FnMsymbolHandle__pnMklassOopDesc__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cHMatcherNfind_receiver6Fi_nFVMRegEName__; +text: .text%__1cNnegI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cENodeHdel_out6Mp0_v_: generateOptoStub.o; +text: .text%__1cRsubI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNandI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cILoopNode2t6MpnENode_2_v_; +text: .text%__1cVeADXRegL_low_onlyOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cENodeGis_Con6kM_I_: memnode.o; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: debugInfo.o; +text: .text%__1cMPhaseIterGVNIoptimize6M_v_; +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQPackageHashtableMcompute_hash6Mpkci_I_: classLoader.o; +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_; +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_; +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_; +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_; +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_; +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_; +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_; +text: .text%__1cTMachCallRuntimeNodeSis_MachCallRuntime6M_p0_: ad_i486_misc.o; +text: .text%__1cMloadConPNodeFreloc6kM_i_; +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: cpCacheKlass.o; +text: .text%__1cNinstanceKlassLverify_code6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cIRewriterScompute_index_maps6FnSconstantPoolHandle_rpnIintArray_rpnIintStack__v_; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cIintArray2t6Mki1_v_: rewriter.o; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: cpCacheKlass.o; +text: .text%__1cIRewriterXnew_constant_pool_cache6FrnIintArray_pnGThread__nXconstantPoolCacheHandle__; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: cpCacheKlass.o; +text: .text%__1cRmethodDataOopDescJis_mature6kM_i_; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cQciByteCodeStreamUis_unresolved_string6kM_i_; +text: .text%__1cFciEnvUis_unresolved_string6kMpnPciInstanceKlass_i_i_; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%__1cFciEnvZis_unresolved_string_impl6kMpnNinstanceKlass_i_i_; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_; +text: .text%__1cNmulL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNminI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: classes.o; +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_; +text: .text%__1cPconvI2L_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSshrL_eReg_1_31NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJLoadLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPshrI_eReg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cYmulI_imm_RShift_highNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHMatcherUc_calling_convention6FpnLRegPair_I_v_; +text: .text%__1cPCallRuntimeNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: classes.o; +text: .text%__1cHAddNodeGis_Add6kM_pk0_: classes.o; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cRshrI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMelapsedTimerHseconds6kM_d_; +text: .text%__1cRaddL_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNaddI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cSloadL_volatileNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNandI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cQPackageHashtableJget_entry6MiIpkcI_pnLPackageInfo__: classLoader.o; +text: .text%__1cLClassLoaderOlookup_package6Fpkc_pnLPackageInfo__; +text: .text%__1cLeDIRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: connode.o; +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_; +text: .text%__1cQVMOperationQdDueueNqueue_oops_do6MipnKOopClosure__v_; +text: .text%__1cUCallCompiledJavaNodeGOpcode6kM_i_; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%__1cUmembar_cpu_orderNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbSparse_constant_pool_interfacemethodref_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRsalI_eReg_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cNloadKlassNodeFreloc6kM_i_; +text: .text%__1cFStateV_sub_Op_MemBarRelease6MpknENode__v_; +text: .text%__1cMloadConLNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherQpost_fast_unlock6FpknENode__i_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cLeSIRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKciTypeFlowLStateVectorMdo_putstatic6MpnQciByteCodeStream__v_; +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNaddP_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cLregFPR1OperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cKciTypeFlowLStateVectorGdo_ldc6MpnQciByteCodeStream__v_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: cfgnode.o; +text: .text%__1cIPSOldGenPupdate_counters6M_v_; +text: .text%__1cNSharedRuntimebOraw_exception_handler_for_return_address6FpC_1_; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cNaddI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHOrINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cMnegF_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMnegF_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKciTypeFlowLStateVectorLdo_putfield6MpnQciByteCodeStream__v_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cSshlL_eReg_1_31NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%signalHandler; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cMnegF_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassRclass_initializer6M_pnNmethodOopDesc__; +text: .text%__1cNinstanceKlassbOset_initialization_state_and_notify_impl6FnTinstanceKlassHandle_n0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassbJset_initialization_state_and_notify6Mn0AKClassState_pnGThread__v_; +text: .text%__1cNinstanceKlassWcall_class_initializer6MpnGThread__v_; +text: .text%__1cKcopy_table6FppC1i_v_: interpreter.o; +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQVMOperationQdDueueLremove_next6M_pnMVM_Operation__; +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopopts.o; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_; +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKBufferBlob2t6Mpkci_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cNaddL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKBufferBlob2n6FII_pv_; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cMeBCXRegLOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: compile.o; +text: .text%__1cScompP_mem_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; +text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_i486.o; +text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; +text: .text%__1cKloadUBNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cLGCTaskQdDueueKinitialize6M_v_; +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_; +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cJStealTaskEname6M_pc_: psTasks.o; +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cJStealTask2t6Mi_v_; +text: .text%__1cNGCTaskManagerMnote_release6MI_v_; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: memnode.o; +text: .text%__1cJcmpOpOperEless6kM_i_: ad_i486_clone.o; +text: .text%__1cSaddD_reg_roundNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSmembar_releaseNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeHeapTmark_segmap_as_used6MII_v_; +text: .text%__1cJloadCNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cNsubL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJimmL0OperJconstantL6kM_x_: ad_i486_clone.o; +text: .text%__1cFParseWensure_phis_everywhere6M_v_; +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNxorI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeHsize_of6kM_I_; +text: .text%__1cHMatcherFxform6MpnENode_i_2_; +text: .text%__1cHMatcherLfind_shared6MpnENode__v_; +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_; +text: .text%__1cILRG_List2t6MI_v_; +text: .text%__1cILoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_; +text: .text%__1cGBundlePinitialize_nops6FppnIMachNode__v_; +text: .text%__1cOMachPrologNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNstoreImmPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLeAXRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cNsubL_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cHi2bNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRmethodDataOopDescYcompute_extra_data_count6Fii_i_; +text: .text%__1cWCallLeafNoFPDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse3.o; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: jni.o; +text: .text%__1cKimmI16OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cSshrL_eReg_1_31NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPstoreImmI16NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIemit_d166FrnKCodeBuffer_i_v_; +text: .text%__1cRxorI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRsubI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJeRegPOperFclone6kM_pnIMachOper__; +text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_; +text: .text%__1cKciTypeFlowLStateVectorJhalf_type6FpnGciType__3_: ciTypeFlow.o; +text: .text%__1cJAssemblerDnop6M_v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: connode.o; +text: .text%__1cMFastLockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%JVM_GetFieldIxModifiers; +text: .text%__1cNmodI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKReturnNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsarI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseKarray_load6MnJBasicType__v_; +text: .text%__1cRjmpConU_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cRjmpConU_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cLjmpConUNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cRjmpConU_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKcmpOpUOperFclone6kM_pnIMachOper__; +text: .text%__1cFStateO_sub_Op_StoreB6MpknENode__v_; +text: .text%__1cFTypeFCeq6kMpknEType__i_; +text: .text%__1cFParseNadd_safepoint6M_v_; +text: .text%__1cLOpaque2NodeEhash6kM_I_; +text: .text%JVM_IsConstructorIx; +text: .text%__1cIimmPOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConPNodeFclone6kM_pnENode__; +text: .text%__1cSshrL_eReg_1_31NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: connode.o; +text: .text%__1cRxorI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cKadd_n_reqs6FpnENode_1_v_: graphKit.o; +text: .text%__1cNdecI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cILoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cICodeHeapLmerge_right6MpnJFreeBlock__v_; +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNsubI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXconvI2L_reg_reg_zexNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_; +text: .text%__1cMPhaseChaitinISimplify6M_v_; +text: .text%__1cMPhaseChaitinGSelect6M_I_; +text: .text%__1cMPhaseChaitinOcache_lrg_info6M_v_; +text: .text%__1cNtestU_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJScopeDescJstream_at6kMi_pnTDebugInfoReadStream__; +text: .text%__1cTDebugInfoReadStream2t6MpknHnmethod_i_v_; +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_; +text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cENodeHrm_prec6MI_v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cKciTypeFlowLStateVectorOmeet_exception6MpnPciInstanceKlass_pk1_i_; +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cWflagsReg_long_EQdDNEOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_; +text: .text%__1cIBoolNodeZis_counted_loop_exit_test6M_i_; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cRaddL_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cQsalI_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNnegI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cILoopNodeHsize_of6kM_I_: loopnode.o; +text: .text%__1cMPhaseChaitinFSplit6MI_I_; +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_; +text: .text%__1cMPhaseChaitinHcompact6M_v_; +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_; +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_; +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_; +text: .text%__1cOcompP_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherQis_spillable_arg6Fi_i_; +text: .text%__1cSsafePoint_pollNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHMatcherKcan_be_arg6Fi_i_; +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIregFOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cTDebugInfoReadStreamLread_handle6M_nGHandle__; +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_; +text: .text%__1cYcmpL_zero_flags_LEGTNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%__1cXvirtual_call_RelocationJfirst_oop6M_pC_; +text: .text%__1cXvirtual_call_RelocationJoop_limit6M_pC_; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cNminI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYcmpL_zero_flags_LEGTNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompiledVFrameGis_top6kM_i_; +text: .text%__1cFStateQ_sub_Op_CallLeaf6MpknENode__v_; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cHi2sNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cMoutputStreamMdo_vsnprintf6FpcIpkcpvirI_3_; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cRaddI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cITypeLongFxdual6kM_pknEType__; +text: .text%__1cSloadL_volatileNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_immNodeFreloc6kM_i_; +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSvframeStreamCommonYfill_from_compiled_frame6MpnHnmethod_i_v_; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: cfgnode.o; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: cfgnode.o; +text: .text%__1cTshrL_eReg_32_63NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cPshrI_eReg_1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cJNode_ListEyank6MpnENode__v_; +text: .text%__1cQLRUMaxHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cIGraphKitNstore_barrier6MpnENode_22_v_; +text: .text%__1cMrep_stosNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cWflagsReg_long_LEGTOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cSThreadLocalStorageTpd_getTlsAccessMode6F_n0AQpd_tlsAccessMode__; +text: .text%__1cOMacroAssemblerKget_thread6MpnMRegisterImpl__v_; +text: .text%__1cKloadUBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cIRootNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_; +text: .text%__1cNSafePointNodeLpop_monitor6M_v_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__; +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cFStateS_sub_Op_FastUnlock6MpknENode__v_; +text: .text%__1cJloadFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotFOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cSstore_to_stackslot6FrnKCodeBuffer_iii_v_; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cMnadxRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: callnode.o; +text: .text%__1cIAndINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cUmembar_cpu_orderNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConINodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: cfgnode.o; +text: .text%__1cPconvI2L_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cOjmpLoopEndNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRmulI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPsarI_eReg_1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cKstoreLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cOcompP_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMvalue_of_loc6FppnHoopDesc__i_; +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_; +text: .text%__1cTcompareAndSwapLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNandI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTshrL_eReg_32_63NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSshlL_eReg_1_31NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKcmpOpUOperJnot_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cOcompP_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNSCMemProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cNandI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNaddP_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNstoreImmPNodeFreloc6kM_i_; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKtype2basic6FpknEType__nJBasicType__; +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNstoreImmBNodeFreloc6kM_i_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: jvm.o; +text: .text%JVM_Clone; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: jvm.o; +text: .text%__1cNcmovI_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVeADXRegL_low_onlyOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: objArrayKlass.o; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: objArrayKlass.o; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: objArrayKlass.o; +text: .text%__1cMeBCXRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNimmI_1_31OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cLPhaseValues2T5B6M_v_; +text: .text%__1cNtestU_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJLoadCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsubI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cMalloc_object6FpnH_jclass_pnGThread__pnPinstanceOopDesc__: jni.o; +text: .text%__1cIGraphKitOnull_check_oop6MpnKRegionNode_pnENode_i_4_; +text: .text%__1cRmulI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHOrINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_; +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cFKlassDLCA6Mp0_1_; +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_; +text: .text%__1cPconvL2I_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJeRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cNstoreImmINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPshlI_eReg_1NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNmaxI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHnmethodVis_dependent_on_entry6MpnMklassOopDesc_2pnNmethodOopDesc__i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cKloadUBNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeHeapPadd_to_freelist6MpnJHeapBlock__v_; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cJVectorSetGslamin6Mrk0_v_; +text: .text%__1cQsalI_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKNode_ArrayFclear6M_v_; +text: .text%__1cKBufferBlobEfree6Fp0_v_; +text: .text%__1cIRootNodeHis_Root6M_p0_: classes.o; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%__1cOMethodLivenessKBasicBlockPmerge_exception6MnGBitMap__i_; +text: .text%__1cNminI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNsubI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cMorI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNtestU_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%jni_NewObject: jni.o; +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_; +text: .text%__1cIciMethodRinstructions_size6M_i_; +text: .text%__1cRsarI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPshrI_eReg_1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKStoreFNodeGOpcode6kM_i_; +text: .text%__1cFStateO_sub_Op_StoreC6MpknENode__v_; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRaddL_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNsubL_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNsubL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompU_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cScompU_eReg_memNodeFreloc6kM_i_; +text: .text%__1cTleaPIdxScaleOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNSharedRuntimeQfind_callee_info6FpnKJavaThread_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cNsubL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSCallLeafDirectNodeFreloc6kM_i_; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cNcmovI_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cNcmovI_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_; +text: .text%__1cIMulDNodeGOpcode6kM_i_; +text: .text%__1cHMatcherPprior_fast_lock6FpknENode__i_; +text: .text%__1cFStateV_sub_Op_MemBarAcquire6MpknENode__v_; +text: .text%__1cLConvL2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMachEpilogNodeFreloc6kM_i_; +text: .text%__1cOMachEpilogNodeNis_MachEpilog6M_p0_: ad_i486.o; +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivLNodeGOpcode6kM_i_; +text: .text%__1cSstring_compareNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_; +text: .text%__1cSindIndexOffsetOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cLStrCompNodeGOpcode6kM_i_; +text: .text%__1cSindIndexOffsetOperOindex_position6kM_i_: ad_i486.o; +text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cLPcDescCacheKpc_desc_at6kMpnHnmethod_pCi_pnGPcDesc__; +text: .text%__1cXmembar_acquire_lockNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLloadSSFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNaddP_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEtemp6F_pnMRegisterImpl__; +text: .text%__1cPstoreImmI16NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__; +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cNmodL_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmulL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIDivINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJAssemblerDjmp6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cSloadL_volatileNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cENodeHdel_out6Mp0_v_: parseHelper.o; +text: .text%__1cNandI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_; +text: .text%__1cLConvD2INodeGOpcode6kM_i_; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%__1cOPhaseIdealLoop2t6MrnMPhaseIterGVN_pk0i_v_; +text: .text%__1cKciTypeFlowFRangeSprivate_copy_count6kMpn0AGJsrSet__i_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXcmpL_reg_flags_LTGENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: codeBlob.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: callnode.o; +text: .text%__1cNIdealLoopTreeMis_loop_exit6kMpnENode_pnOPhaseIdealLoop__2_; +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cZload_long_indOffset32OperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cZload_long_indOffset32OperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cNincI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKstoreFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNandL_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cNtestU_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cXcmpL_reg_flags_LEGTNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNincI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: loopnode.o; +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_; +text: .text%__1cNxorI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRsubI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cKBlock_ListGinsert6MIpnFBlock__v_; +text: .text%__1cECopyYconjoint_words_to_higher6FpnIHeapWord_2I_v_: block.o; +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: loopnode.o; +text: .text%__1cFStateP_sub_Op_CastP2I6MpknENode__v_; +text: .text%__1cLCastP2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNaddL_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_I_; +text: .text%__1cMtlsLoadPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNmaxI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNandL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_; +text: .text%__1cHNTarjanIsetdepth6MIpI_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopKDominators6M_v_; +text: .text%__1cHNTarjanDDFS6Fp0rnJVectorSet_pnOPhaseIdealLoop_pI_i_; +text: .text%__1cOPhaseIdealLoopRinit_dom_lca_tags6M_v_; +text: .text%__1cNaddP_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2L_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLeCXRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cJcmpOpOperHgreater6kM_i_: ad_i486_clone.o; +text: .text%__1cSmembar_acquireNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cScheck_phi_clipping6FpnHPhiNode_rpnHConNode_rI45rpnENode_5_i_: cfgnode.o; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_; +text: .text%__1cKciTypeFlowLStateVectorGdo_new6MpnQciByteCodeStream__v_; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cENodeGOpcode6kM_i_; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cSInterpreterRuntimePset_bcp_and_mdp6FpCpnKJavaThread__v_; +text: .text%__1cTjava_lang_ThrowableQclear_stacktrace6FpnHoopDesc__v_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSshlL_eReg_1_31NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%__1cLPcDescCacheLadd_pc_desc6MpnGPcDesc__v_; +text: .text%__1cNcmovI_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cOstackSlotFOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLloadSSFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateN_sub_Op_LoadL6MpknENode__v_; +text: .text%__1cNnegI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEcmpl6MnHAddress_i_v_; +text: .text%__1cJloadFNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cIModINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cObox_handleNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: memnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: memnode.o; +text: .text%__1cUParallelScavengeHeapEused6kM_I_; +text: .text%__1cOmulIS_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cTshrL_eReg_32_63NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerGmovzxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cOMacroAssemblerSload_unsigned_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cICmpDNodeGOpcode6kM_i_; +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: parse2.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: loopnode.o; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cFParseJdo_ifnull6MnIBoolTestEmask__v_; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cJLoadBNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cLeAXRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%jni_NewLocalRef: jni.o; +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHMatcherQinline_cache_reg6F_nHOptoRegEName__; +text: .text%__1cLloadSSFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cGOopMapPset_derived_oop6MnHOptoRegEName_ii2_v_; +text: .text%__1cOimmI_32_63OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cLOptoRuntimebAresolve_opt_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cPClassFileParserbEparse_constant_pool_long_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cLConvF2DNodeGOpcode6kM_i_; +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cPconvL2I_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitOset_pair_local6MipnENode__v_: parse2.o; +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cICodeHeapMmax_capacity6kM_I_; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cKloadUBNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cJLoadCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_; +text: .text%__1cRsubI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJcmpOpOperKless_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cNaddI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_; +text: .text%__1cJArrayDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cRmulI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cNloadConI0NodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cOJNIHandleBlockMweak_oops_do6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cQVMOperationQdDueueHoops_do6MpnKOopClosure__v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollectorXoops_do_for_all_threads6FpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cNstoreImmINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; +text: .text%JVM_GetCPMethodModifiers; +text: .text%__1cIModLNodeGOpcode6kM_i_; +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHnmethodNscope_desc_at6MpCi_pnJScopeDesc__; +text: .text%__1cTcompareAndSwapLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%jni_SetLongField: jni.o; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: multnode.o; +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%jio_vsnprintf; +text: .text%__1cLRethrowNodeEhash6kM_I_: classes.o; +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsalI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRsalI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWCallLeafNoFPDirectNodeRis_safepoint_node6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerDjmp6MnHAddress__v_; +text: .text%__1cIimmLOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConLNodeFclone6kM_pnENode__; +text: .text%jio_snprintf; +text: .text%__1cTCallDynamicJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: subnode.o; +text: .text%__1cMstoreSSINodeHis_Copy6kM_I_: ad_i486_misc.o; +text: .text%__1cYmulI_imm_RShift_highNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalL_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIMulLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOjmpLoopEndNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOmulIS_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cMdecI_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__; +text: .text%__1cOleaPIdxOffNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReturnNode2t6MpnENode_2222_v_; +text: .text%__1cKReturnNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cPGlobalTLABStatsKinitialize6M_v_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cSReferenceProcessorOprocess_phase16MppnHoopDesc_pnPReferencePolicy_pnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cUParallelScavengeHeapTensure_parseability6M_v_; +text: .text%__1cUParallelScavengeHeapOfill_all_tlabs6M_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cNMemoryServiceIgc_begin6Fi_v_; +text: .text%__1cNMemoryServiceGgc_end6Fi_v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cRaddL_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_; +text: .text%__1cKPSYoungGenPupdate_counters6M_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cTDerivedPointerTableFclear6F_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_; +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_; +text: .text%__1cIMachOperFscale6kM_i_; +text: .text%__1cMtlsLoadPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachOperNconstant_disp6kM_i_; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cPVM_GC_OperationZacquire_pending_list_lock6M_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_; +text: .text%__1cIAndLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIAndLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cURethrowExceptionNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cIAndLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cRmulI_eReg_immNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cNmodI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cUSafepointSynchronizeQdo_cleanup_tasks6F_v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cURethrowExceptionNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOCompiledRFrameEinit6M_v_; +text: .text%__1cGvframeDtop6kM_p0_; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_; +text: .text%JVM_DoPrivileged; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_base6MnITosState_ppCi_v_; +text: .text%__1cZInterpreterMacroAssemblerKverify_FPU6MinITosState__v_; +text: .text%__1cNmodL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQVMOperationQdDueueDadd6MpnMVM_Operation__i_; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cIVMThreadSevaluate_operation6MpnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueGunlink6MpnMVM_Operation__v_; +text: .text%__1cQVMOperationQdDueueOqueue_add_back6MipnMVM_Operation__v_; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cQVMOperationQdDueueGinsert6MpnMVM_Operation_2_v_; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cScompI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: jvm.o; +text: .text%__1cOcompI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_; +text: .text%__1cFParseGdo_new6M_v_; +text: .text%__1cLGCTaskQdDueue2t6Mi_v_; +text: .text%__1cJloadFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cKPSScavengeXshould_attempt_scavenge6F_i_; +text: .text%__1cUWaitForBarrierGCTask2t6Mi_v_; +text: .text%__1cUPSAdaptiveSizePolicyPupdate_averages6MiII_v_; +text: .text%__1cNBarrierGCTaskIdestruct6M_v_; +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_; +text: .text%__1cGGCTaskIdestruct6M_v_; +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_; +text: .text%__1cKPSYoungGenLswap_spaces6M_v_; +text: .text%__1cKPSYoungGenNresize_spaces6MII_v_; +text: .text%__1cKPSScavengeQinvoke_no_policy6Fpi_i_; +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_; +text: .text%__1cKPSYoungGenRresize_generation6MII_i_; +text: .text%__1cKPSYoungGenGresize6MII_v_; +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MII_v_; +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_; +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_; +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_: gcTaskManager.o; +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_; +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_; +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_; +text: .text%__1cUPSAdaptiveSizePolicyUminor_collection_end6MnHGCCauseFCause__v_; +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_; +text: .text%__1cUPSAdaptiveSizePolicyWminor_collection_begin6M_v_; +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_; +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__; +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_; +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_; +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_; +text: .text%__1cUPSAdaptiveSizePolicybPcompute_survivor_space_size_and_threshold6MiiI_i_; +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_; +text: .text%__1cPGlobalTLABStatsHpublish6M_v_; +text: .text%__1cMindirectOperNconstant_disp6kM_i_: ad_i486.o; +text: .text%__1cMindirectOperNbase_position6kM_i_: ad_i486.o; +text: .text%__1cbDVM_ParallelGCFailedAllocation2t6MIiiI_v_; +text: .text%__1cJloadSNodeFreloc6kM_i_; +text: .text%__1cMrep_stosNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateN_sub_Op_LoadS6MpknENode__v_; +text: .text%__1cQorI_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQshrI_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cNIdealLoopTreePiteration_split6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cINodeHash2t6Mp0_v_; +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_; +text: .text%__1cLPhaseValues2t6Mp0_v_; +text: .text%__1cZCallInterpreterDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_; +text: .text%__1cOJNIHandleBlockRrebuild_free_list6M_v_; +text: .text%__1cFStateM_sub_Op_Goto6MpknENode__v_; +text: .text%__1cFDictIFreset6MpknEDict__v_; +text: .text%__1cIPhaseCFGQFind_Inner_Loops6M_v_; +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_; +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_; +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_; +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_; +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_; +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: cfgnode.o; +text: .text%__1cETypeKInitialize6FpnHCompile__v_; +text: .text%__1cIPhaseCFGKDominators6M_v_; +text: .text%__1cHCompileEInit6Mi_v_; +text: .text%__1cIPhaseCFGDDFS6MpnGTarjan__I_; +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_; +text: .text%__1cIPhaseCFGOschedule_early6MrnJVectorSet_rnJNode_List_rnLBlock_Array__i_; +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_: coalesce.o; +text: .text%__1cWNode_Backward_Iterator2t6MpnENode_rnJVectorSet_rnJNode_List_rnLBlock_Array__v_; +text: .text%__1cIPhaseCFGNschedule_late6MrnJVectorSet_rnJNode_List_rnNGrowableArray4CI___v_; +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_; +text: .text%__1cVExceptionHandlerTable2t6Mi_v_; +text: .text%__1cGTarjanIsetdepth6MI_v_; +text: .text%__1cHCompileICode_Gen6M_v_; +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_; +text: .text%__1cMPhaseChaitinbGstretch_base_pointer_live_ranges6MpnMResourceArea__i_; +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_; +text: .text%__1cQorI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: matcher.o; +text: .text%__1cHMatcherUvalidate_null_checks6M_v_; +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_; +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_; +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_; +text: .text%__1cHMatcherFmatch6M_v_; +text: .text%__1cHCompileOcompute_old_SP6M_nHOptoRegEName__; +text: .text%__1cHMatcher2t6MrnJNode_List__v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cOleaPIdxOffNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cHMatcherLreturn_addr6kM_nHOptoRegEName__; +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_; +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_; +text: .text%__1cIAndLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_; +text: .text%__1cIPhaseIFG2t6MpnFArena__v_; +text: .text%__1cMPhaseChaitinGde_ssa6M_v_; +text: .text%__1cFArenaNmove_contents6Mp0_1_; +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_; +text: .text%__1cFArena2t6MI_v_; +text: .text%__1cWsize_exception_handler6F_I_; +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_; +text: .text%__1cOCompileWrapper2T6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyZdecay_supplemental_growth6Mi_v_; +text: .text%__1cNPhasePeepholeMdo_transform6M_v_; +text: .text%__1cHCompileTframe_size_in_words6kM_i_; +text: .text%__1cNPhasePeephole2T6M_v_; +text: .text%__1cNPhasePeephole2t6MpnNPhaseRegAlloc_rnIPhaseCFG__v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: buildOopMap.o; +text: .text%__1cHCompileMBuildOopMaps6M_v_; +text: .text%__1cWemit_exception_handler6FrnKCodeBuffer__v_; +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o; +text: .text%__1cMPhaseChaitin2T6M_v_; +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_; +text: .text%__1cIPhaseCFGLRemoveEmpty6M_v_; +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_; +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_; +text: .text%__1cHCompileGOutput6M_v_; +text: .text%__1cHCompileQShorten_branches6MpnFLabel_ri333_v_; +text: .text%__1cHCompileLFill_buffer6M_v_; +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_; +text: .text%__1cHCompileRScheduleAndBundle6M_v_; +text: .text%__1cKCodeBufferOrelocate_stubs6M_v_; +text: .text%__1cUPSAdaptiveSizePolicybPeden_increment_with_supplement_aligned_up6MI_I_; +text: .text%__1cUPSAdaptiveSizePolicyOeden_increment6MII_I_; +text: .text%__1cOMachPrologNodeFreloc6kM_i_; +text: .text%__1cUPSAdaptiveSizePolicyVadjust_for_throughput6MipI1_v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cJPhaseLive2T6M_v_; +text: .text%__1cUPSAdaptiveSizePolicybDcompute_generation_free_space6MIIIIIIIi_v_; +text: .text%__1cIPSOldGenMmax_gen_size6M_I_: psOldGen.o; +text: .text%__1cUPSAdaptiveSizePolicybHclear_generation_free_space_flags6M_v_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: loopnode.o; +text: .text%__1cUPSAdaptiveSizePolicyQdecaying_gc_cost6kM_d_; +text: .text%__1cHCompileYinit_scratch_locs_memory6M_v_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: methodDataKlass.o; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_words6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescPpost_initialize6MpnOBytecodeStream__v_; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_bytes6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodDataOop.o; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodDataKlass.o; +text: .text%__1cNmulL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_; +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: methodDataKlass.o; +text: .text%__1cKPSScavengeGinvoke6Fpi_v_; +text: .text%__1cUParallelScavengeHeapTfailed_mem_allocate6MpiIii_pnIHeapWord__; +text: .text%__1cUPSAdaptiveSizePolicyOshould_full_GC6MI_i_; +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cOcmpD_cc_P6NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopTransform.o; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateO_sub_Op_Return6MpknENode__v_; +text: .text%__1cLlog2_intptr6Fi_i_: divnode.o; +text: .text%__1cbLtransform_int_divide_to_long_multiply6FpnIPhaseGVN_pnENode_i_3_: divnode.o; +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_; +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNcmovI_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cHRetNodeFreloc6kM_i_; +text: .text%__1cOClearArrayNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectOis_method_data6M_i_: ciInstance.o; +text: .text%__1cIciObjectJis_method6M_i_: ciInstance.o; +text: .text%__1cNaddP_eRegNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIConFNodeGOpcode6kM_i_; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cJCodeCacheNalive_nmethod6FpnICodeBlob__pnHnmethod__; +text: .text%__1cIGraphKitNallocate_heap6MpnENode_222pknITypeFunc_pC22ipknKTypeOopPtr__2_; +text: .text%__1cPciInstanceKlassbBcompute_shared_has_subklass6M_i_; +text: .text%__1cOLibraryCallKitNtry_to_inline6M_i_; +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cOcompP_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_CmpL6MpknENode__v_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: library_call.o; +text: .text%__1cJAssemblerDjcc6Mn0AJCondition_pCnJrelocInfoJrelocType__v_; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopTransform.o; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cLConvL2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHBitDataKis_BitData6M_i_: ciMethodData.o; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cRxorI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNaddL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPshlI_eReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cSmembar_releaseNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNdecI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNandL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQshrI_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNdecI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJLoadDNodeGOpcode6kM_i_; +text: .text%__1cMstoreSSINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_; +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_; +text: .text%__1cOcmpD_cc_P6NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadLNodeFreloc6kM_i_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: multnode.o; +text: .text%__1cRxorI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLBoxLockNodeEhash6kM_I_: classes.o; +text: .text%__1cIAddLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRaddI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: ad_i486_misc.o; +text: .text%__1cJlabelOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cNsubL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cIMulLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cTconvF2I_reg_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopopts.o; +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cRxorI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateR_sub_Op_SafePoint6MpknENode__v_; +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_; +text: .text%__1cSsafePoint_pollNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsafePoint_pollNodeFreloc6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cLOptoRuntimeRmultianewarray1_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cNandI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSshlL_eReg_1_31NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: memnode.o; +text: .text%__1cOmulIS_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalI_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopKclone_loop6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cQsalL_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXroundDouble_mem_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKciTypeFlowFBlockQset_private_copy6Mi_v_; +text: .text%__1cLimmI_16OperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cHi2sNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHi2sNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRxorI_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2L_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_; +text: .text%JVM_GetClassNameUTF; +text: .text%__1cSshrL_eReg_1_31NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKStoreDNodeGOpcode6kM_i_; +text: .text%__1cNcmovP_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopTransform.o; +text: .text%__1cIXorINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cOcmpD_cc_P6NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNmaxI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_; +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_FindLoadedClass; +text: .text%__1cIMulFNodeGOpcode6kM_i_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cNstoreImmINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cOstackSlotLOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cRaddI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvI2DNodeGOpcode6kM_i_; +text: .text%__1cTcompareAndSwapLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSloadL_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUjmpLoopEnd_shortNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cUjmpLoopEnd_shortNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cNmodL_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEfrom6F_pnMRegisterImpl__; +text: .text%__1cIjniIdMapHoops_do6MpnKOopClosure__v_; +text: .text%__1cTDerivedPointerTableDadd6FppnHoopDesc_3_v_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: classes.o; +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: loopnode.o; +text: .text%__1cKstoreCNodeFreloc6kM_i_; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: loopnode.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: loopnode.o; +text: .text%__1cMincI_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Copy6kM_I_: loopnode.o; +text: .text%__1cXconvI2L_reg_reg_zexNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJOopMapSetMgrow_om_data6M_v_; +text: .text%__1cScompP_mem_eRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cKReflectionDbox6FpnGjvalue_nJBasicType_pnGThread__pnHoopDesc__; +text: .text%__1cJAssemblerDret6Mi_v_; +text: .text%__1cTcmovII_reg_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerEcall6MrnFLabel_nJrelocInfoJrelocType__v_; +text: .text%__1cLBoxLockNode2t6Mi_v_; +text: .text%__1cIGraphKitMnext_monitor6M_i_; +text: .text%__1cTshrL_eReg_32_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__; +text: .text%__1cMdecI_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateQ_sub_Op_FastLock6MpknENode__v_; +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cKciTypeFlowLStateVectorJdo_aaload6MpnQciByteCodeStream__v_; +text: .text%__1cNaddI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_; +text: .text%__1cFParseFBlockMadd_new_path6M_i_; +text: .text%__1cYcmpL_zero_flags_LEGTNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%JVM_FindClassFromClass; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc_ii_v_: nativeLookup.o; +text: .text%__1cIMulINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cMdecI_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPshlI_eReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cNminI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cMorI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc__v_: nativeLookup.o; +text: .text%__1cNIdealLoopTreeMpolicy_align6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeNpolicy_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeSpolicy_range_check6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeQpolicy_peel_only6kMpnOPhaseIdealLoop__i_; +text: .text%__1cLloadSSFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYmulI_imm_RShift_highNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFParseFBlockNstack_type_at6kMi_pknEType__; +text: .text%__1cJAssemblerGmovzxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFStateN_sub_Op_LoadB6MpknENode__v_; +text: .text%__1cIMulLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOMacroAssemblerSload_unsigned_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cKstoreBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cZCallInterpreterDirectNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cZCallInterpreterDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cTsarL_eReg_32_63NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMorI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: callnode.o; +text: .text%__1cNtestU_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMURShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLMachUEPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cWflagsReg_long_LTGEOperFclone6kM_pnIMachOper__; +text: .text%__1cIAddLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRsarI_eReg_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIJVMStateNmonitor_depth6kM_i_: graphKit.o; +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: node.o; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cFParseLarray_store6MnJBasicType__v_; +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLRethrowNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPsarI_eReg_1NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKcmpOpUOperEless6kM_i_: ad_i486_clone.o; +text: .text%__1cFParseMdo_checkcast6M_v_; +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_; +text: .text%__1cbACallCompiledJavaDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cMLinkResolverbHlinktime_resolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cENodeHdel_out6Mp0_v_: doCall.o; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%jni_NewString: jni.o; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%__1cMincI_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cHTypePtrFempty6kM_i_; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_i_v_; +text: .text%__1cTshrL_eReg_32_63NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__: memnode.o; +text: .text%__1cITypeLongFwiden6kMpknEType__3_; +text: .text%__1cIplus_adr6FpnENode_i_1_: generateOptoStub.o; +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cScompP_mem_eRegNodeFreloc6kM_i_; +text: .text%__1cKciTypeFlowLStateVectorMdo_checkcast6MpnQciByteCodeStream__v_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o; +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o; +text: .text%__1cNmaxI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNtestU_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateP_sub_Op_ConvI2L6MpknENode__v_; +text: .text%__1cNdivL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJScopeDescGsender6kM_p0_; +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__; +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__: phaseX.o; +text: .text%__1cQsalI_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_; +text: .text%__1cJloadBNodeFreloc6kM_i_; +text: .text%__1cOcmpD_cc_P6NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOleaPIdxOffNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cKJavaThreadJframes_do6MpFpnFframe_pknLRegisterMap__v_v_; +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKEntryPoint2t6MpC11111111_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cRmulI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%__1cPstoreImmI16NodeFreloc6kM_i_; +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_; +text: .text%__1cLloadSSDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIXorINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOmulIS_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVloadConL_low_onlyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsubI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cOcmpD_cc_P6NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cObox_handleNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCompileBrokerNallocate_task6F_pnLCompileTask__; +text: .text%__1cNCompileBrokerTcreate_compile_task6FpnMCompileQdDueue_inMmethodHandle_i3ipkcii_pnLCompileTask__; +text: .text%__1cMCompileQdDueueDadd6MpnLCompileTask__v_; +text: .text%__1cLCompileTaskKinitialize6MinMmethodHandle_i1ipkcii_v_; +text: .text%jni_GetObjectClass: jni.o; +text: .text%__1cMCompileQdDueueDget6M_pnLCompileTask__; +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_; +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cPconvF2D_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSCompileTaskWrapper2t6MpnLCompileTask__v_; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cFStateM_sub_Op_MulL6MpknENode__v_; +text: .text%__1cOstackSlotFOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cLCompileTaskEfree6M_v_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cSCompileTaskWrapper2T6M_v_; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cNCompileBrokerJfree_task6FpnLCompileTask__v_; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cNCompileBrokerVpush_jni_handle_block6F_v_; +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cSshrL_eReg_1_31NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cNmodI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cNCompileBrokerUpop_jni_handle_block6F_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: arrayKlass.o; +text: .text%__1cNaddP_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIimmFOperJconstantF6kM_f_: ad_i486_clone.o; +text: .text%__1cOcompP_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cOstackSlotPOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cObox_handleNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cSInterpreterCodeletKinitialize6MpkcinJBytecodesECode__v_; +text: .text%__1cNandI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmodL_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIMinINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNandI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%__1cPshrI_eReg_1NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cHi2bNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cNnegI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_; +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_; +text: .text%__1cMstoreSSINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_; +text: .text%__1cINodeHash2t6MpnFArena_I_v_; +text: .text%__1cINodeHashIround_up6FI_I_; +text: .text%__1cbACallCompiledJavaDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_; +text: .text%JVM_IHashCode; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: jvm.o; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cOcompiledVFrameScreate_stack_value6kMpnKScopeValue__pnKStackValue__; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorCto6F_pnMRegisterImpl__; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cPconvI2L_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%__1cRtestI_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_i486_misc.o; +text: .text%__1cNmaxI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_AddL6MpknENode__v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cXroundDouble_mem_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cENodeEgetd6kM_d_; +text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o; +text: .text%__1cMstoreSSPNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_; +text: .text%__1cKConv2BNodeGOpcode6kM_i_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%JVM_GetClassLoader; +text: .text%__1cRmulI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cOjmpLoopEndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateW_sub_Op_CountedLoopEnd6MpknENode__v_; +text: .text%__1cXconvI2L_reg_reg_zexNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMinINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSCompareAndSwapNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKBinaryNodeGOpcode6kM_i_; +text: .text%__1cUmembar_cpu_orderNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsalL_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFStateW_sub_Op_MemBarCPUOrder6MpknENode__v_; +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cPClassFileParserbGparse_constant_pool_double_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cRaddI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSleaP_eReg_immINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeOis_pc_relative6kM_i_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeTmay_be_short_branch6kM_i_: ad_i486_misc.o; +text: .text%__1cIModINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRmulI_eReg_immNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cKNativeJumpbEcheck_verified_entry_alignment6FpC1_v_; +text: .text%__1cTbasictype2arraycopy6FnJBasicType_i_pC_; +text: .text%__1cFStateU_sub_Op_CallLeafNoFP6MpknENode__v_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cLOptoRuntimeOarraycopy_Type6F_pknITypeFunc__; +text: .text%__1cMMachCallNodeXreturns_float_or_double6kM_i_; +text: .text%__1cOLibraryCallKitQinline_arraycopy6M_i_; +text: .text%__1cOMacroAssemblerOcall_VM_helper6MpnMRegisterImpl_pCii_v_; +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTcmovII_reg_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cNLocationValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cUCompressedReadStreamIread_int6M_i_: location.o; +text: .text%__1cNLocationValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_2_v_; +text: .text%__1cFParseRarray_store_check6M_v_; +text: .text%__1cQshrI_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNCompileBrokerTis_compile_blocking6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerRassign_compile_id6FnMmethodHandle_i_I_; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cNCompileBrokerTis_not_compile_only6FnMmethodHandle__i_; +text: .text%__1cNsubL_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQsalL_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%__1cJAssemblerLemit_farith6Miii_v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStartNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cNCompileBrokerOcheck_break_at6FnMmethodHandle_iii_i_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: callnode.o; +text: .text%__1cJStartNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cOCompiledRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: nmethod.o; +text: .text%__1cNminI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOCompiledRFrame2t6MnFframe_pnKJavaThread_kpnGRFrame__v_; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cMelapsedTimerDadd6M0_v_; +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cFciEnvbUsystem_dictionary_modification_counter_changed6M_i_; +text: .text%__1cLConvI2FNodeGOpcode6kM_i_; +text: .text%__1cIciMethodQbreak_at_execute6M_i_; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cICodeHeapMinsert_after6MpnJFreeBlock_2_v_; +text: .text%__1cHnmFlagsFclear6M_v_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cHnmethod2n6FIi_pv_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cLPcDescCache2t6M_v_; +text: .text%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cTcmovII_reg_LEGTNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeInew_Type6F_pknITypeFunc__; +text: .text%__1cQorI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cNsubL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cIGraphKitMnew_instance6MpnPciInstanceKlass__pnENode__; +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cIciMethodRbuild_method_data6MnMmethodHandle__v_; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod_ii_v_; +text: .text%__1cHCompileWprint_compile_messages6M_v_; +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cINodeHashUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_; +text: .text%__1cIciMethodRbuild_method_data6M_v_; +text: .text%__1cHCompileLInline_Warm6M_i_; +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_; +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_; +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__; +text: .text%__1cIPhaseCCP2T6M_v_; +text: .text%__1cIPhaseCCPHanalyze6M_v_; +text: .text%__1cIPhaseCCPMdo_transform6M_v_; +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_; +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_; +text: .text%__1cHCompileVfinal_graph_reshaping6M_i_; +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_; +text: .text%__1cHCompileLFinish_Warm6M_v_; +text: .text%__1cMPhaseIterGVN2t6Mp0_v_; +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHCompileIOptimize6M_v_; +text: .text%__1cXMachCallInterpreterNodePret_addr_offset6M_i_; +text: .text%__1cZCallInterpreterDirectNodePcompute_padding6kMi_i_; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cZCallInterpreterDirectNodeKmethod_set6Mi_v_; +text: .text%__1cTsarL_eReg_32_63NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOMachEpilogNodeQsafepoint_offset6kM_i_; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2pnGThread__v_; +text: .text%__1cMLinkResolverWresolve_interface_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cKcmpOpUOperKless_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cRaddL_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cJAssemblerEincl6MpnMRegisterImpl__v_; +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__; +text: .text%__1cMTailCallNodeGOpcode6kM_i_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_iii_v_; +text: .text%__1cMstoreSSPNodeHis_Copy6kM_I_: ad_i486_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cXSignatureHandlerLibraryKinitialize6F_v_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cNGrowableArray4CX_Efind6kMkX_i_: interpreterRuntime.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: interpreterRuntime.o; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cIAddFNodeGOpcode6kM_i_; +text: .text%__1cRaddL_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerFffree6Mi_v_; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinGHandle__i_; +text: .text%__1cKExceptionsG_throw6FpnGThread_pkcinGHandle__v_; +text: .text%__1cSstring_compareNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cJloadDNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapIppop_any6Mi_v_; +text: .text%__1cXroundDouble_mem_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSshlL_eReg_1_31NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cKPerfMemoryFalloc6FI_pc_; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cIPerfDataMcreate_entry6MnJBasicType_II_v_; +text: .text%__1cIPerfData2T6M_v_; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse1.o; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%jni_ReleaseStringUTFChars; +text: .text%__1cTconvD2I_reg_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNmulI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cObox_handleNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cNloadConI0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cXcmpL_reg_flags_LEGTNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_IsInterrupted; +text: .text%__1cJStoreNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinMsymbolHandle_4_i_; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cLOptoRuntimeSnew_typeArray_Type6F_pknITypeFunc__; +text: .text%__1cNdivL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_FindLibraryEntry; +text: .text%__1cPCountedLoopNodeGstride6kM_pnENode__: loopTransform.o; +text: .text%__1cIGraphKitJnew_array6MpnENode_nJBasicType_pknEType_pknMTypeKlassPtr__2_; +text: .text%__1cSleaP_eReg_immINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cLlog2_intptr6Fi_i_: graphKit.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cJloadCNodeFreloc6kM_i_; +text: .text%__1cFStateN_sub_Op_LoadC6MpknENode__v_; +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cTcmovII_reg_LEGTNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_LEGTNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_; +text: .text%__1cMTypeKlassPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cQsalI_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPconvI2F_SSFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSshrL_eReg_1_31NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJleaP8NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObox_handleNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNcmpL_LTGENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbFunnecessary_membar_volatileNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNcmpL_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParsePmerge_exception6Mi_v_; +text: .text%__1cSaddF24_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopJdo_unroll6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cFStateS_sub_Op_ClearArray6MpknENode__v_; +text: .text%__1cMrep_stosNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMrep_stosNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOcompI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFTypeFEmake6Ff_pk0_; +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%__1cNtestP_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cSstring_compareNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherOc_return_value6Fii_nLRegPair__; +text: .text%__1cRandL_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSleaP_eReg_immINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cHi2bNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHCompile2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_; +text: .text%__1cKcmpOpUOperHgreater6kM_i_: ad_i486_clone.o; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cRtestI_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMmulD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cUjmpLoopEnd_shortNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOjmpLoopEndNodeUshort_branch_version6M_pnIMachNode__; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cUjmpLoopEnd_shortNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvF2D_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cUjmpLoopEnd_shortNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHi2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQPSIsAliveClosureLdo_object_b6MpnHoopDesc__i_: psScavenge.o; +text: .text%__1cFStateO_sub_Op_StoreL6MpknENode__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: ad_i486_misc.o; +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreLNodeFreloc6kM_i_; +text: .text%__1cENodeHdel_out6Mp0_v_: divnode.o; +text: .text%__1cGThreadLnmethods_do6M_v_; +text: .text%__1cSsafePoint_pollNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRsubI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSReferenceProcessorZadd_to_discovered_list_mt6MppnHoopDesc_23_v_; +text: .text%__1cPRoundDoubleNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cScompP_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cWflagsReg_long_EQdDNEOperFclone6kM_pnIMachOper__; +text: .text%__1cWflagsReg_long_LEGTOperFclone6kM_pnIMachOper__; +text: .text%__1cTcmovII_reg_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%__1cMNativeLookupMlookup_style6FnMmethodHandle_pcpkciiripnGThread__pC_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cRmulI_eReg_immNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cRandI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVlookup_special_native6Fpc_pC_: nativeLookup.o; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cRandI_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPDictionaryEntryVadd_protection_domain6MpnHoopDesc__v_; +text: .text%__1cRmulI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTsarL_eReg_32_63NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cPshrI_eReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cRxorI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLregDPR1OperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cLregDPR1OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cJAssemblerSemit_arith_operand6MipnMRegisterImpl_nHAddress_i_v_; +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_; +text: .text%__1cMNativeLookupNpure_jni_name6FnMmethodHandle__pc_; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cMnegD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpOp_commuteOperFclone6kM_pnIMachOper__; +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cScompU_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_; +text: .text%__1cKciTypeFlowLStateVectorEtrap6MpnQciByteCodeStream_pnHciKlass_i_v_; +text: .text%__1cOmulF24_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQleaPIdxScaleNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRtestI_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cRsubI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cCosHSolarisFEventEpark6M_v_: objectMonitor_solaris.o; +text: .text%__1cKJavaThreadLnmethods_do6M_v_; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cNcmovI_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZCallDynamicJavaDirectNodeSalignment_required6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNObjectMonitorREntryQdDueue_insert6MpnMObjectWaiter_i_v_; +text: .text%__1cQorI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cPClassFileParserbFparse_constant_pool_float_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cbFunnecessary_membar_volatileNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeFreloc6kM_i_; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cNObjectMonitorbAEntryQdDueue_SelectSuccessor6M_pnMObjectWaiter__; +text: .text%__1cRsarI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsarI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cKstoreDNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeFreloc6kM_i_; +text: .text%__1cLlog2_intptr6Fi_i_: objArrayKlassKlass.o; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%jni_GetStringCritical: jni.o; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cPsarI_eReg_1NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOaddF24_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2D_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPsarI_eReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: callnode.o; +text: .text%__1cbACallCompiledJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cODeoptimizationYtrap_state_is_recompiled6Fi_i_; +text: .text%__1cPconvF2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYMachCallCompiledJavaNodePret_addr_offset6M_i_; +text: .text%__1cNdivI_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cRaddL_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSInterpreterRuntimeTnmethod_entry_point6FpnKJavaThread_pnNmethodOopDesc_pnHnmethod__pC_; +text: .text%__1cRandL_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cHnmethodXinterpreter_entry_point6M_pC_; +text: .text%__1cFTypeDJsingleton6kM_i_; +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_; +text: .text%__1cLRethrowNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o; +text: .text%__1cIGraphKitbKcombine_and_pop_all_exception_states6M_pnNSafePointNode__: parse1.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cQshrL_eReg_CLNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotIOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMmulD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJloadDNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateP_sub_Op_Rethrow6MpknENode__v_; +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_; +text: .text%__1cFTypeDFxmeet6kMpknEType__3_; +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cKloadUBNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_LEGTNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNsubI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__; +text: .text%__1cNaddP_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cURethrowExceptionNodeFreloc6kM_i_; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +text: .text%__1cQorI_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cQshrI_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl_i_v_; +text: .text%__1cKJavaThreadLgc_prologue6M_v_; +text: .text%__1cRaddL_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cLloadSSDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvI2F_SSFNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseTprofile_switch_case6Mi_v_; +text: .text%__1cKJavaThreadLgc_epilogue6M_v_; +text: .text%__1cFParseOmerge_new_path6Mi_v_; +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_; +text: .text%__1cTCallDynamicJavaNodeSis_CallDynamicJava6kM_pk0_: callnode.o; +text: .text%__1cNxorI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%jni_NewByteArray: jni.o; +text: .text%__1cNmulL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: cfgnode.o; +text: .text%__1cMstoreSSINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConPNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cNcmovP_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKReflectionTget_exception_types6FnMmethodHandle_pnGThread__nOobjArrayHandle__; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cQorI_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cLloadSSDNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cOstackSlotDOperJnum_edges6kM_I_: ad_i486.o; +text: .text%__1cMstoreSSINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cRxorI_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOleaPIdxOffNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddL_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFframeZinterpreter_frame_set_mdx6Mi_v_; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cPstoreImmI16NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOcmpD_cc_P6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTshrL_eReg_32_63NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: loopnode.o; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cRandI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNandL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cTcmovII_reg_LTGENodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cSleaP_eReg_immINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOtypeArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cPregister_native6FnLKlassHandle_nMsymbolHandle_1pCpnGThread__i_: jni.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEmove6Mii_v_; +text: .text%__1cNcmovI_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENode2t6Mp0111111_v_; +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_; +text: .text%__1cMloadConLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIAddDNodeGOpcode6kM_i_; +text: .text%__1cNmodL_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cTconvD2I_reg_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%__1cFParseQjump_if_fork_int6MpnENode_2nIBoolTestEmask__pnGIfNode__; +text: .text%__1cOcompP_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXis_positive_zero_double6Fd_i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: arrayKlass.o; +text: .text%__1cNmaxI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: arrayKlass.o; +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodDataKlass.o; +text: .text%__1cNmulI_eRegNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodDataKlass.o; +text: .text%__1cHTypeAryFxdual6kM_pknEType__; +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_; +text: .text%__1cLloadSSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cTcmovII_reg_LTGENodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOrepush_if_args6FpnFParse_pnENode_3_v_: parse2.o; +text: .text%__1cNaddL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotFOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cTcmovII_reg_LTGENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotFOperFscale6kM_i_: ad_i486.o; +text: .text%__1cSshlL_eReg_1_31NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotFOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cJAssemblerGpushad6M_v_; +text: .text%JVM_GetCallerClass; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cQorI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapXdo_return_monitor_check6M_v_; +text: .text%__1cQsalL_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKciTypeFlowOsplit_range_at6Mi_pn0AFRange__; +text: .text%__1cPshrI_eReg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%jni_GetFieldID: jni.o; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%__1cISubLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNnegI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNnegI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cFParseXfetch_interpreter_state6MipknEType_pnENode__5_; +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_; +text: .text%__1cJAssemblerEaddl6MnHAddress_i_v_; +text: .text%__1cQmulD_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTsarL_eReg_32_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotDOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXroundDouble_mem_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalI_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKCMovePNodeGOpcode6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cNmodI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRcmpOp_commuteOperFccode6kM_i_: ad_i486_clone.o; +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSCountedLoopEndNode2t6MpnENode_2ff_v_; +text: .text%__1cPCountedLoopNode2t6MpnENode_2_v_; +text: .text%__1cNcmovP_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCi_v_; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOmulF24_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateX_sub_Op_CompareAndSwapL6MpknENode__v_; +text: .text%__1cSloadL_volatileNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cNtestU_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTCompareAndSwapLNode2t6MpnENode_2222_v_; +text: .text%__1cSCompareAndSwapNode2t6MpnENode_2222_v_; +text: .text%__1cOLibraryCallKitRinline_unsafe_CAS6MnJBasicType__i_; +text: .text%__1cSloadL_volatileNodeFreloc6kM_i_; +text: .text%__1cTcompareAndSwapLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cObox_handleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFframeVnmethods_code_blob_do6M_v_; +text: .text%__1cYcmpL_zero_flags_LTGENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cVCallRuntimeDirectNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cJloadDNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cJAssemblerDhlt6M_v_; +text: .text%__1cKstoreDNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cTcmovII_reg_LTGENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQshrI_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_; +text: .text%__1cQciByteCodeStreamFtable6MnJBytecodesECode__2_; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cSstoreD_roundedNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvD2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: machnode.o; +text: .text%__1cHMatcherbDinterpreter_frame_pointer_reg6F_nHOptoRegEName__; +text: .text%__1cFStateT_sub_Op_ThreadLocal6MpknENode__v_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF_vc_v_; +text: .text%__1cMtlsLoadPNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cWResolveOopMapConflictsRpossible_gc_point6MpnOBytecodeStream__i_: rewriter.o; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_: interpreterRuntime.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorDbox6Mii_v_; +text: .text%__1cSsarL_eReg_1_31NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o; +text: .text%__1cYmulI_imm_RShift_highNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLLShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPRoundDoubleNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKstoreDNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPRoundDoubleNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cTCallInterpreterNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cLPhaseValuesHlongcon6Mx_pnIConLNode__; +text: .text%__1cFStateX_sub_Op_CallInterpreter6MpknENode__v_; +text: .text%__1cZCallInterpreterDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeFreloc6kM_i_; +text: .text%__1cTCallInterpreterNodeSis_CallInterpreter6kM_pk0_: classes.o; +text: .text%__1cHMatcherbAinterpreter_method_oop_reg6F_nHOptoRegEName__; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cHCompilebMGenerate_Compiled_To_Interpreter_Graph6MpknITypeFunc_pC_v_; +text: .text%__1cMdecI_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHMatcherXcompiler_method_oop_reg6F_nHOptoRegEName__; +text: .text%__1cXMachCallInterpreterNodeWis_MachCallInterpreter6M_p0_: ad_i486_misc.o; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: classes.o; +text: .text%__1cIciMethodRinterpreter_entry6M_pC_; +text: .text%__1cXjvm_define_class_common6FpnHJNIEnv__pkcpnI_jobject_pkWi53pnGThread__pnH_jclass__: jvm.o; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSTailCalljmpIndNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOCallNativeNodeGOpcode6kM_i_; +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cLregDPR1OperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cZCallDynamicJavaDirectNodePcompute_padding6kMi_i_; +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cSTailCalljmpIndNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%__1cIModINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_; +text: .text%__1cJLoadPNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cNcmpL_EQdDNENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNcmpL_EQdDNENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXroundDouble_mem_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cOstoreF_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cOmulIS_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSalign_to_page_size6FI_I_: heap.o; +text: .text%__1cOaddF24_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2D_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcinMsymbolHandle_4nGHandle_6_v_; +text: .text%__1cNmulI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cNmulI_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRandI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cTcmovII_reg_EQdDNENodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_LEGTNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJChunkPoolMfree_all_but6MI_v_: allocation.o; +text: .text%__1cRandL_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOaddF24_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%JVM_MonitorWait; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cMmulD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQshrL_eReg_CLNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNdivI_eRegNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cSleaP_eReg_immINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciKlassMis_interface6M_i_: ciTypeArrayKlass.o; +text: .text%__1cNcmpL_LEGTNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateQ_sub_Op_URShiftL6MpknENode__v_; +text: .text%__1cNcmpL_LEGTNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_; +text: .text%__1cHMatcherXinterpreter_arg_ptr_reg6F_nHOptoRegEName__; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLOptoRuntimeVresolve_static_call_C6FpnKJavaThread__pC_; +text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_; +text: .text%__1cOmulF24_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cRsubL_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsubL_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRmulI_imm_highNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cFParseHdo_irem6M_v_; +text: .text%__1cHi2bNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHBoxNodeGOpcode6kM_i_; +text: .text%__1cQmulD_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cTcmovII_reg_EQdDNENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTcmovII_reg_EQdDNENodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNtestI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerFpopfd6M_v_; +text: .text%__1cUParallelScavengeHeapIcapacity6kM_I_; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cNmaxI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cPconvF2D_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNIdealLoopTreeXpolicy_maximally_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cKciTypeFlowLStateVectorLdo_newarray6MpnQciByteCodeStream__v_; +text: .text%__1cTmembar_volatileNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cNxorI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cPconvI2F_SSFNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cMjniIdPrivateGid_for6FnTinstanceKlassHandle_i_i_: jniId.o; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cICmpFNodeGOpcode6kM_i_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cRmulI_imm_highNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cQorI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVloadConL_low_onlyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cPshlI_eReg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_SetClassSigners; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cJAssemblerGpushfd6M_v_; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cNloadConL0NodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cNmulI_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIMulINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cNmulI_eRegNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cHAddress2t6MinJrelocInfoJrelocType__v_; +text: .text%__1cTsarL_eReg_32_63NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvF2I_reg_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cNsubL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRaddI_mem_eRegNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_; +text: .text%__1cNaddL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRmulI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLloadSSDNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cS__ieee754_rem_pio26Fdpd_i_: sharedRuntimeTrig.o; +text: .text%__1cQshrL_eReg_CLNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cObox_handleNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cTshlL_eReg_32_63NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cTmembar_volatileNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRtestI_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cMstoreSSINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: connode.o; +text: .text%__1cCosbBthread_local_storage_at_put6Fipv_v_; +text: .text%__1cLConvF2INodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: connode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: cfgnode.o; +text: .text%get_thread; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%jni_CallIntMethod: jni.o; +text: .text%__1cPconvF2D_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeGnegate6M_v_: ad_i486_misc.o; +text: .text%__1cHCompileRmake_vm_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: node.o; +text: .text%__1cJCMoveNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsubI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLloadSSFNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: node.o; +text: .text%__1cMnegD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: node.o; +text: .text%__1cSsarL_eReg_1_31NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cTshrL_eReg_32_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cNRelocIteratorEnext6M_i_: sharedRuntime.o; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cLOptoRuntimeThandle_wrong_method6FpnKJavaThread__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cTmembar_volatileNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUPSMarkSweepDecoratorHcompact6Mi_v_; +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_; +text: .text%__1cTcmovII_reg_EQdDNENodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvI2F_SSFNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cUPSMarkSweepDecoratorVdestination_decorator6F_p0_; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_; +text: .text%__1cQshrI_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOaddF24_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cIDivDNodeGOpcode6kM_i_; +text: .text%__1cOmulF24_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOaddF24_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOmulF24_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHCompileQgrow_alias_types6M_v_; +text: .text%__1cFParseScreate_jump_tables6MpnENode_pnLSwitchRange_4_i_; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%JVM_GetClassCPTypes; +text: .text%__1cUverify_byte_codes_fn6F_pv_: verifier.o; +text: .text%JVM_GetClassMethodsCount; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%__1cOstoreF_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%JVM_GetClassFieldsCount; +text: .text%__1cQshrL_eReg_CLNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKstoreBNodeFreloc6kM_i_; +text: .text%__1cPconvI2F_SSFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cMdecI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cPconvI2L_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cJLoadFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddD_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerGmembar6M_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_; +text: .text%__1cXSignatureHandlerLibraryLset_handler6FpnKCodeBuffer__pC_; +text: .text%__1cNcmovP_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cFJNIidEfind6Mi_p0_; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cJCmpD3NodeGOpcode6kM_i_; +text: .text%__1cXconvI2L_reg_reg_zexNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbFunnecessary_membar_volatileNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%JVM_IsPrimitiveClass; +text: .text%__1cIMinINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRsubL_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cRsubL_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cRsubL_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%jni_FindClass: jni.o; +text: .text%__1cPmovI_nocopyNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cHCompilebMGenerate_Interpreter_To_Compiled_Graph6MpknITypeFunc__v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cFStateM_sub_Op_RegD6MpknENode__v_; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cUCallCompiledJavaNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cbACallCompiledJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbACallCompiledJavaDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cTcmovII_reg_LEGTNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_2_v_; +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbACallCompiledJavaDirectNodeFreloc6kM_i_; +text: .text%__1cUPipeline_Use_Element2t6MIIIinXPipeline_Use_Cycle_Mask__v_: ad_i486_pipeline.o; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cFStateY_sub_Op_CallCompiledJava6MpknENode__v_; +text: .text%__1cXPipeline_Use_Cycle_Mask2t6MI_v_: ad_i486_pipeline.o; +text: .text%__1cMsubD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSleaP_eReg_immINodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTshlL_eReg_32_63NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRxorI_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_; +text: .text%__1cNcmovI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRandL_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSstoreD_roundedNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSstoreD_roundedNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateM_sub_Op_MinI6MpknENode__v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cNminI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFMutex2T6M_v_; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cQmulD_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cHi2bNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cIimmIOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMmulD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cINodeHashEgrow6M_v_; +text: .text%__1cMnegD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%__1cIMulINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKLoadPCNodeGOpcode6kM_i_; +text: .text%__1cRaddI_mem_eRegNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKCMoveINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcmpOp_commuteOperHgreater6kM_i_: ad_i486_clone.o; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cNmodL_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpOp_commuteOperGnegate6M_v_: ad_i486_clone.o; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cQaddD_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFTypeFFxmeet6kMpknEType__3_; +text: .text%__1cYcmpL_zero_flags_LTGENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRmulI_imm_highNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJCMoveNode2t6MpnENode_22pknEType__v_: connode.o; +text: .text%__1cJCMoveNodeEmake6FpnENode_222pknEType__p0_; +text: .text%__1cOPhaseIdealLoopVinsert_pre_post_loops6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cLloadSSDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferWinsert_double_constant6Md_pC_; +text: .text%__1cXroundDouble_mem_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cICodeBlobJis_zombie6kM_i_: onStackReplacement.o; +text: .text%__1cWis_positive_one_double6Fd_i_; +text: .text%__1cNaddP_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cXcmpL_reg_flags_LEGTNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPciObjectFactoryPinsert_non_perm6Mrpn0ANNonPermObject_pnHoopDesc_pnIciObject__v_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: callnode.o; +text: .text%__1cMincI_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cSleaP_eReg_immINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJEventMark2t6MpkcE_v_: psMarkSweep.o; +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_; +text: .text%__1cISubDNodeGOpcode6kM_i_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cTconvD2I_reg_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cOcmpD_cc_P6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvD2FNodeGOpcode6kM_i_; +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSshrL_eReg_1_31NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_MonitorNotify; +text: .text%__1cHMatcherXpost_store_load_barrier6FpknENode__i_; +text: .text%__1cFParseNdo_instanceof6M_v_; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRuntime.o; +text: .text%__1cSsarL_eReg_1_31NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_; +text: .text%__1cSmulF24_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cLTypeInstPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cNCallGeneratorSfor_predicted_call6FpnHciKlass_p03_3_; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateN_sub_Op_LoadF6MpknENode__v_; +text: .text%__1cNandI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cTconvD2I_reg_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cWPredictedCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cWPredictedCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cMstoreSSPNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cOGenerateOopMapKpp_new_ref6MpnNCellTypeState_i_v_; +text: .text%__1cRmulI_imm_highNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFParseTjump_if_always_fork6Mii_v_; +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateW_sub_Op_MemBarVolatile6MpknENode__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cHRegMask2t6M_v_: matcher.o; +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cPconvI2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cINegDNodeGOpcode6kM_i_; +text: .text%__1cIciObjectOis_method_data6M_i_: ciObjectFactory.o; +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_; +text: .text%__1cIciObjectJis_method6M_i_: ciObjectFactory.o; +text: .text%__1cNmodI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateP_sub_Op_LShiftL6MpknENode__v_; +text: .text%__1cNmodI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: callGenerator.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cKcmpOpUOperFequal6kM_i_: ad_i486_clone.o; +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_; +text: .text%__1cMloadConDNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovI_nocopyNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIci2bNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMVirtualSpaceQuncommitted_size6kM_I_; +text: .text%__1cMVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cJAssemblerFfld_d6MnHAddress__v_; +text: .text%__1cJloadFNodeFreloc6kM_i_; +text: .text%__1cOaddF24_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Mi_v_; +text: .text%__1cRaddI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cTcmovII_reg_LTGENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cSTailCalljmpIndNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNinstanceKlassSregister_finalizer6FpnPinstanceOopDesc_pnGThread__2_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cMstoreSSPNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cOmulF24_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cQsalI_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNdivI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cNandI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPClassFileParserXverify_unqualified_name6MpcIi_i_; +text: .text%__1cUGenericGrowableArrayKraw_remove6MpknEGrET__v_; +text: .text%__1cOcmovI_regUNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMmulD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_; +text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFTypeFJsingleton6kM_i_; +text: .text%__1cRandL_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cPconvI2D_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: memnode.o; +text: .text%__1cNmulI_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cQsalL_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cNdivL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cRsubL_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvF2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterLdeopt_entry6FnITosState_i_pC_; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cQshrL_eReg_CLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRuntime.o; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cNcmovP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQorI_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cMdecI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSstring_compareNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cRInterpreterOopMapIis_empty6M_i_; +text: .text%__1cJMarkSweepSMarkAndPushClosureLdo_nmethods6kM_ki_: markSweep.o; +text: .text%__1cTsarL_eReg_32_63NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNcmovP_regNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cOmulF24_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMsubD_regNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cRjni_invoke_static6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cMsubD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cJAssemblerFbswap6MpnMRegisterImpl__v_; +text: .text%__1cTconvF2I_reg_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMstoreSSPNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cOjmpLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cOjmpLoopEndNodeJis_Branch6kM_I_: ad_i486_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__: ad_i486_misc.o; +text: .text%__1cIGraphKitXinsert_mem_bar_volatile6MpnKMemBarNode_i_v_; +text: .text%__1cRsubI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%__1cFStateX_sub_Op_CallDynamicJava6MpknENode__v_; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQMachCallJavaNodeVis_MachCallStaticJava6M_pnWMachCallStaticJavaNode__: ad_i486_misc.o; +text: .text%__1cZCallDynamicJavaDirectNodeFreloc6kM_i_; +text: .text%__1cTcmovII_reg_LTGENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYcmpL_zero_flags_LTGENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEdecl6MpnMRegisterImpl__v_; +text: .text%__1cVLoaderConstraintTableYextend_loader_constraint6MpnVLoaderConstraintEntry_nGHandle_pnMklassOopDesc__v_; +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cNCallGeneratorQfor_virtual_call6FpnIciMethod__p0_; +text: .text%__1cVLoaderConstraintTablebHensure_loader_constraint_capacity6MpnVLoaderConstraintEntry_i_v_; +text: .text%__1cIModLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQOopMapCacheEntryFflush6M_v_; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cQOopMapCacheEntryTdeallocate_bit_mask6M_v_; +text: .text%__1cQOopMapCacheEntryRallocate_bit_mask6M_v_; +text: .text%__1cOPSPromotionLABRunallocate_object6MpnHoopDesc__i_; +text: .text%__1cICodeHeapTmark_segmap_as_free6MII_v_; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cNsubI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICodeHeapJexpand_by6MI_i_; +text: .text%__1cTcmovII_reg_LEGTNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJAssemblerFfld_s6MnHAddress__v_; +text: .text%__1cOstoreF_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cVscale_to_lwp_priority6Fiii_i_: os_solaris.o; +text: .text%__1cMdivD_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddI_mem_eRegNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMdivD_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNxorI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmulI_mem_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQmulI_mem_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cMsubD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cUVirtualCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cPconvI2F_SSFNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpOp_commuteOperKless_equal6kM_i_: ad_i486_clone.o; +text: .text%__1cIci2bNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cIDivLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMincI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cQmulD_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_ConD6MpknENode__v_; +text: .text%__1cSaddD_reg_roundNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNmodL_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNRelocIteratorEnext6M_i_: output.o; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cNaddI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosHSolarisKvm_signals6F_pnIsigset_t__; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cSdivD_reg_roundNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cTOopMapForCacheEntry2t6MnMmethodHandle_ipnQOopMapCacheEntry__v_; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cGICStubIfinalize6M_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cCosScurrent_stack_size6F_I_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_2_v_; +text: .text%__1cCosHSolarisRunblocked_signals6F_pnIsigset_t__; +text: .text%__1cJStubQdDueueMremove_first6M_v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_; +text: .text%__1cTshlL_eReg_32_63NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cFTypeDGis_nan6kM_i_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%_start: os_solaris.o; +text: .text%__1cYcmpL_zero_flags_LTGENodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_; +text: .text%__1cLStrCompNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%JVM_SetThreadPriority; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__; +text: .text%__1cTcmovII_reg_EQdDNENodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_; +text: .text%__1cMPipeline_Use2t6MIIIpnUPipeline_Use_Element__v_: ad_i486_pipeline.o; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cNsubL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_; +text: .text%__1cPRoundDoubleNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cFParseScan_rerun_bytecode6M_i_; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cDhpiEread6FipvI_I_: jvm.o; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cMstoreSSINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cIPipeline2t6MIIiIIiiiikpnSmachPipelineStages_3kpInMPipeline_Use__v_: ad_i486_pipeline.o; +text: .text%JVM_Read; +text: .text%__1cOJavaAssertionsNmatch_package6Fpkc_pn0AKOptionList__; +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cOcmpF_cc_P6NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cOcmpF_cc_P6NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeFreloc6kM_i_; +text: .text%__1cWThreadLocalAllocBufferMinitial_size6F_I_; +text: .text%__1cQaddD_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cOJavaAssertionsLmatch_class6Fpkc_pn0AKOptionList__: javaAssertions.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cENodeEgetf6kM_f_; +text: .text%__1cLConvL2FNodeGOpcode6kM_i_; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cIjniIdMapGcreate6FnTinstanceKlassHandle__p0_; +text: .text%__1cQorI_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cLConvL2DNodeGOpcode6kM_i_; +text: .text%__1cNloadConL0NodeFclone6kM_pnENode__; +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJimmL0OperFclone6kM_pnIMachOper__; +text: .text%__1cENodeGis_Con6kM_I_: loopnode.o; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cIjniIdMap2t6MpnMklassOopDesc_i_v_; +text: .text%__1cIjniIdMapRcompute_index_cnt6FnTinstanceKlassHandle__i_; +text: .text%__1cLjniIdBucket2t6MpnIjniIdMap_p0_v_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: loopnode.o; +text: .text%__1cUThreadSafepointState2t6MpnKJavaThread__v_; +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_; +text: .text%__1cCosMguard_memory6FpcI_i_; +text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_; +text: .text%__1cNaddL_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cOaddF24_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_; +text: .text%__1cSmulF24_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQorl_eReg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cKJavaThreadRthread_main_inner6M_v_; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cKReflectionbFbasic_type_mirror_to_basic_type6FpnHoopDesc_pnGThread__nJBasicType__; +text: .text%__1cM__kernel_cos6Fdd_d_: sharedRuntimeTrig.o; +text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_3_v_; +text: .text%__1cM__kernel_sin6Fddi_d_: sharedRuntimeTrig.o; +text: .text%__1cSleaP_eReg_immINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cNSharedRuntimeEdsin6Fd_d_; +text: .text%__1cNSharedRuntimeEdcos6Fd_d_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: multnode.o; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cSshlL_eReg_1_31NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cNdecI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTconvF2I_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNcmovP_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEsbbl6MnHAddress_i_v_; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cHRegMask2t6Miiiii_v_: ad_i486_expand.o; +text: .text%__1cLConvD2INodeJideal_reg6kM_I_: classes.o; +text: .text%JVM_IsArrayClass; +text: .text%__1cVloadConL_low_onlyNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHTypePtrFxdual6kM_pknEType__; +text: .text%__1cSMachBreakpointNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cICmpDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVMoveL2D_reg_stackNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQshrI_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRmulI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: output.o; +text: .text%JVM_GetClassName; +text: .text%__1cSsarL_eReg_1_31NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cQorl_eReg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2D_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cFJNIid2t6MpnMklassOopDesc_ip0_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: mulnode.o; +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateM_sub_Op_RegF6MpknENode__v_; +text: .text%__1cPmovI_nocopyNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cNinstanceKlassPjni_id_for_impl6FnTinstanceKlassHandle_i_pnFJNIid__; +text: .text%__1cOcmpF_cc_P6NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKRelocationYpd_get_address_from_code6M_pC_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRuntime.o; +text: .text%__1cSmulF24_reg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmulF24_reg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNtestU_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cJArrayDataKcell_count6M_i_: ciMethodData.o; +text: .text%JVM_Open; +text: .text%__1cOmulIS_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cYcmpL_zero_flags_EQdDNENodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%JVM_StartThread; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_: interpreterRuntime.o; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cGICStubLdestination6kM_pC_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cYcmpL_zero_flags_LEGTNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRxorI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cQmulI_mem_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cScompI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOstackSlotDOperFscale6kM_i_: ad_i486.o; +text: .text%__1cOstackSlotDOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cMmulD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOstackSlotDOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_; +text: .text%__1cQmulD_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOstackSlotPOperFscale6kM_i_: ad_i486.o; +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOGenerateOopMapMdo_checkcast6M_v_; +text: .text%__1cQmulI_mem_immNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPconvI2D_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRandL_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterWlayout_activation_impl6FpnNmethodOopDesc_iiiipnFframe_4i_i_; +text: .text%JVM_TotalMemory; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: generateOptoStub.o; +text: .text%JVM_FreeMemory; +text: .text%__1cObox_handleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMaxINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTshlL_eReg_32_63NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQmulI_mem_immNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cQmulI_mem_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvI2F_SSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJScopeDescTdecode_scope_values6Mi_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cCosOunguard_memory6FpcI_i_; +text: .text%__1cHRetDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cQorI_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRmulI_imm_highNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cKloadUBNodeFreloc6kM_i_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cUThreadSafepointState2T6M_v_; +text: .text%__1cFStateT_sub_Op_RoundDouble6MpknENode__v_; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_; +text: .text%__1cHOrLNodeGOpcode6kM_i_; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: library_call.o; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cTcmovII_reg_EQdDNENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cNcmovL_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cMTailCallNode2t6MpnENode_222222_v_; +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cFStateQ_sub_Op_TailCall6MpknENode__v_; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cLeDIRegIOperEtype6kM_pknEType__: ad_i486.o; +text: .text%__1cPmovI_nocopyNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSTailCalljmpIndNodeFreloc6kM_i_; +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +text: .text%__1cNandI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cFStateO_sub_Op_StoreF6MpknENode__v_; +text: .text%__1cQaddD_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTsarL_eReg_32_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cNaddP_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNaddP_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovI_regUNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__; +text: .text%__1cENodeJis_MemBar6kM_pknKMemBarNode__: classes.o; +text: .text%__1cMstoreSSPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cRandI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cSsarL_eReg_1_31NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cRCardTableModRefBSPclear_MemRegion6MnJMemRegion__v_; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cRInlineCacheBufferLnew_ic_stub6F_pnGICStub__; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cVMoveL2D_reg_stackNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cObox_handleNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cGICStubIset_stub6MpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cTcmovII_reg_LEGTNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: onStackReplacement.o; +text: .text%__1cObox_handleNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRuntime.o; +text: .text%__1cSaddD_reg_roundNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmulF24_reg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSaddD_reg_roundNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOmulF24_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cHi2bNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cOLibraryCallKitYinline_native_time_funcs6Mi_i_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__; +text: .text%__1cIPSOldGenHcompact6M_v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cOcmpF_cc_P6NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIPSOldGenPadjust_pointers6M_v_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: ciTypeFlow.o; +text: .text%__1cNmulI_eRegNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cKdirectOperFscale6kM_i_: ad_i486_clone.o; +text: .text%__1cQorI_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQorI_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQObjectStartArrayFreset6M_v_; +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_; +text: .text%__1cMsubD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cIci2bNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIregDOperFclone6kM_pnIMachOper__; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNmaxI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMelapsedTimer2t6M_v_: phase.o; +text: .text%__1cSvframeArrayElementDbci6kM_i_; +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cODeoptimizationYquery_update_method_data6FnQmethodDataHandle_in0ALDeoptReason_rIri4_pnLProfileData__; +text: .text%__1cKstoreDNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNminI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNPhaseRegAllocHset_oop6MpknENode_i_v_; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cFStateM_sub_Op_MaxI6MpknENode__v_; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cWis_positive_zero_float6Ff_i_; +text: .text%__1cTcmovII_reg_LTGENodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSPerfStringConstant2t6MnJCounterNS_pkc3_v_; +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cQorI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cOMacroAssemblerFleave6M_v_; +text: .text%__1cMloadConDNodeFclone6kM_pnENode__; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%JVM_NativePath; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%__1cVLoaderConstraintTableJnew_entry6MIpnNsymbolOopDesc_pnMklassOopDesc_ii_pnVLoaderConstraintEntry__; +text: .text%__1cVloadConL_low_onlyNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIimmDOperFclone6kM_pnIMachOper__; +text: .text%__1cIMachNodeOmemory_operand6kM_pknIMachOper__: ad_i486_misc.o; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cNmulI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQorl_eReg_immNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalI_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOaddF24_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: callnode.o; +text: .text%__1cFStateT_sub_Op_CallRuntime6MpknENode__v_; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: callnode.o; +text: .text%__1cOcmovI_regUNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodDataKlass.o; +text: .text%__1cQsalI_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodDataKlass.o; +text: .text%__1cVCallRuntimeDirectNodeFreloc6kM_i_; +text: .text%__1cSdivD_reg_roundNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cISubFNodeGOpcode6kM_i_; +text: .text%__1cNandI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveL2D_reg_stackNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOmulF24_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOcmovI_regUNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJloadCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRandL_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTcmovII_reg_LTGENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTshrL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOcmpF_cc_P6NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOaddF24_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cSaddF24_reg_immNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNcmovP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovI_regUNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cSmulF24_reg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_; +text: .text%__1cKstoreFNodeFreloc6kM_i_; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%__1cZInterpreterMacroAssemblerWupdate_mdp_by_constant6MpnMRegisterImpl_i_v_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_; +text: .text%__1cSstoreD_roundedNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i_v_; +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferVinsert_float_constant6Mf_pC_; +text: .text%__1cWroundFloat_mem_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOsubF24_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cSaddF24_reg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cSaddD_reg_roundNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdecI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTconvF2I_reg_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKReflectionTunbox_for_primitive6FpnHoopDesc_pnGjvalue_pnGThread__nJBasicType__; +text: .text%__1cMloadConDNodeFreloc6kM_i_; +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_; +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQaddD_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMdecI_memNodeFreloc6kM_i_; +text: .text%__1cTshlL_eReg_32_63NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cFTypeDFxdual6kM_pknEType__; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cWCallLeafNoFPDirectNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConDNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__: ad_i486_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMdecI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeWresolve_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cLTypeInstPtrLmirror_type6kM_pnGciType__; +text: .text%__1cSmulF24_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationVtrap_state_add_reason6Fii_i_; +text: .text%__1cKstoreINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMdivD_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF3_v3_v_; +text: .text%__1cOCompilerThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cYcmpL_zero_flags_LTGENodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cLRuntimeStub2n6FII_pv_; +text: .text%__1cYcmpL_zero_flags_LTGENodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cLRuntimeStub2t6MpkcpnKCodeBuffer_iipnJOopMapSet_i_v_; +text: .text%__1cOstoreF_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLloadSSDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQshrL_eReg_CLNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQsalL_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cFTypeDFempty6kM_i_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodDataOop.o; +text: .text%__1cPconvL2D_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKdirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486_clone.o; +text: .text%__1cKdirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486_clone.o; +text: .text%__1cPoldgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cKdirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486_clone.o; +text: .text%__1cKdirectOperLdisp_is_oop6kM_i_: ad_i486_clone.o; +text: .text%__1cLConvD2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cPconvI2F_SSFNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cRmulI_imm_highNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIDivLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cXpartialSubtypeCheckNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJOperation__v4_v_; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cRxorI_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXpartialSubtypeCheckNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl_i_v_; +text: .text%__1cIModLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cDhpiFclose6Fi_i_: jvm.o; +text: .text%__1cFParsePdo_monitor_exit6M_v_; +text: .text%__1cFStateN_sub_Op_LoadD6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_MulD6MpknENode__v_; +text: .text%__1cSMachCallNativeNodePret_addr_offset6M_i_; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cOsubF24_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: deoptimization.o; +text: .text%__1cTAbstractInterpreterMreturn_entry6FnITosState_i_pC_; +text: .text%JVM_Close; +text: .text%__1cLOptoRuntimeRnew_objArray_Type6F_pknITypeFunc__; +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__; +text: .text%__1cIMulDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOmulIS_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovL_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_i_v_; +text: .text%__1cNdivI_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cSaddF24_reg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cFParseMdo_anewarray6M_v_; +text: .text%__1cIAddFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJLoadDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTinc_decompile_count6FpnHnmethod__v_: nmethod.o; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cMsubD_regNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cOcmpD_cc_P6NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTcmovII_reg_EQdDNENodeQuse_cisc_RegMask6M_v_; +text: .text%__1cFStateM_sub_Op_CmpD6MpknENode__v_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cJAssemblerEmovb6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_; +text: .text%__1cSCallLeafDirectNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovI_nocopyNodeErule6kM_I_: ad_i486_misc.o; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIci2bNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cSsarL_eReg_1_31NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovI_regUNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cGICStubKcached_oop6kM_pnHoopDesc__; +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cMdivD_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvF2I_reg_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSmulF24_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%jni_EnsureLocalCapacity; +text: .text%__1cTcmovII_reg_LEGTNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cQAbstractCompilerMsupports_osr6M_i_: c2compiler.o; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cXcmpL_reg_flags_LTGENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQshrI_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cHCompile2t6MpnFciEnv_pF_pknITypeFunc_pCpkciiii_v_; +text: .text%__1cKemit_break6FrnKCodeBuffer__v_; +text: .text%__1cQshrI_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2F_SSF_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIGraphKitIgen_stub6MpCpkciii_v_; +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__; +text: .text%__1cLloadSSINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_; +text: .text%__1cMloadConFNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cQmulD_reg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSmulF24_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJlog2_long6Fx_i_: mulnode.o; +text: .text%__1cJAssemblerEsbbl6MpnMRegisterImpl_i_v_; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__; +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_; +text: .text%__1cINegFNodeGOpcode6kM_i_; +text: .text%__1cQorl_eReg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVeADXRegL_low_onlyOperFclone6kM_pnIMachOper__; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cKimmL32OperFclone6kM_pnIMachOper__; +text: .text%__1cLOptoRuntimeNgenerate_stub6FpnFciEnv_pF_pknITypeFunc_pCpkciiii_8_; +text: .text%__1cVloadConL_low_onlyNodeFclone6kM_pnENode__; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cFParseWload_interpreter_state6MpnENode_2_v_; +text: .text%__1cVMoveL2D_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMStartOSRNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cPconvI2D_regNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cTshlL_eReg_32_63NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateO_sub_Op_StoreD6MpknENode__v_; +text: .text%__1cPmovI_nocopyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_; +text: .text%__1cFStateM_sub_Op_ConF6MpknENode__v_; +text: .text%__1cNcmovI_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cNcmovI_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cIci2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivD_reg_roundNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cWroundFloat_mem_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOmulF24_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_; +text: .text%__1cFTypeFFxdual6kM_pknEType__; +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_; +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cTcmovII_reg_LTGENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQorI_eReg_memNodeFreloc6kM_i_; +text: .text%__1cFframebHinterpreter_frame_set_monitor_end6MpnPBasicObjectLock__v_; +text: .text%__1cOstoreF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cPmovP_nocopyNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFframebBinterpreter_frame_sender_sp6kM_pi_; +text: .text%__1cNcmovL_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cTAbstractInterpreterPsize_activation6FpnNmethodOopDesc_iiiii_i_; +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescVdecode_monitor_values6Mi_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__; +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__; +text: .text%__1cTcmovII_reg_EQdDNENodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSvframeArrayElementPunpack_on_stack6MiipnFframe_ii_v_; +text: .text%__1cTconvD2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFframebCinterpreter_frame_set_locals6Mpi_v_; +text: .text%__1cFframebCinterpreter_frame_set_method6MpnNmethodOopDesc__v_; +text: .text%__1cTAbstractInterpreterQcontinuation_for6FpnNmethodOopDesc_pCiiri_3_; +text: .text%__1cTAbstractInterpreterRTosState_as_index6FnITosState__i_; +text: .text%__1cTAbstractInterpreterRlayout_activation6FpnNmethodOopDesc_iiiipnFframe_4i_v_; +text: .text%__1cSvframeArrayElementNon_stack_size6kMiiii_i_; +text: .text%__1cSInterpreterRuntimeJnote_trap6FpnKJavaThread_ipnGThread__v_; +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRmulI_imm_highNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____; +text: .text%__1cRsubL_eReg_memNodeFreloc6kM_i_; +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_; +text: .text%__1cFStateP_sub_Op_ConvF2D6MpknENode__v_; +text: .text%__1cCosNcommit_memory6FpcI_i_; +text: .text%__1cScompP_eReg_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_memNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cXpartialSubtypeCheckNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNdivL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cFStateM_sub_Op_DivL6MpknENode__v_; +text: .text%__1cNsubL_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cLimmI_32OperIconstant6kM_i_: ad_i486_clone.o; +text: .text%__1cNTemplateTableLindex_check6FpnMRegisterImpl_2_v_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%Unsafe_DefineClass1; +text: .text%JVM_GetComponentType; +text: .text%JVM_DefineClass; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cNmodI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLOptoRuntimeNfetch_monitor6FipnJBasicLock_pC_pnHoopDesc__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLStrCompNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRaddD_reg_imm1NodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cJAssemblerGfrstor6MnHAddress__v_; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cSUnsafe_DefineClass6FpnHJNIEnv__pnI_jstring_pnL_jbyteArray_iipnI_jobject_7_pnH_jclass__: unsafe.o; +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_; +text: .text%__1cSObjectSynchronizerOinflate_helper6FpnHoopDesc__pnNObjectMonitor__: synchronizer.o; +text: .text%__1cSdivD_reg_roundNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cOsubF24_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2D_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLimmI_24OperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cLloadSSDNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cKPSYoungGenKprecompact6M_v_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cOstackSlotPOperFclone6kM_pnIMachOper__; +text: .text%__1cLPSMarkSweepRmark_sweep_phase26F_v_; +text: .text%__1cKPSYoungGenHcompact6M_v_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase16Frii_v_; +text: .text%Unsafe_AllocateInstance; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase36F_v_; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cLPSMarkSweepRmark_sweep_phase46F_v_; +text: .text%__1cPconvL2F_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cLPSMarkSweepbAreset_millis_since_last_gc6F_v_; +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MInHGCCauseFCause__v_; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cMsubD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cRCardTableModRefBSEis_a6MnKBarrierSetEName__i_: cardTableExtension.o; +text: .text%__1cMset_property6FnGHandle_pkc2pnGThread__v_: jvm.o; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cHBoxNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cQorl_eReg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLPSMarkSweepQinvoke_no_policy6Fpii_v_; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%__1cObox_handleNodeFclone6kM_pnENode__; +text: .text%__1cLPSMarkSweepPallocate_stacks6F_v_; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cMdivD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLPSMarkSweepRdeallocate_stacks6F_v_; +text: .text%__1cRInlineCacheBufferOinit_next_stub6F_v_; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cMincI_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cNmodL_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MpnMRegisterImpl_i2rnFLabel__v_; +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_; +text: .text%__1cFStateL_sub_Op_Box6MpknENode__v_; +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_; +text: .text%JVM_InvokeMethod; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cSmulF24_reg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cIPSOldGenKprecompact6M_v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cJPSPermGenQcompute_new_size6MI_v_; +text: .text%__1cXpartialSubtypeCheckNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerYprofile_not_taken_branch6MpnMRegisterImpl__v_; +text: .text%__1cHi2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_; +text: .text%__1cFStateM_sub_Op_ModL6MpknENode__v_; +text: .text%__1cSaddF24_reg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJPSPermGenKprecompact6M_v_; +text: .text%__1cHi2bNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTleaPIdxScaleOffNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_i486.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cVMoveF2I_reg_stackNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cQaddD_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJCondition__v4_v_; +text: .text%__1cVMoveL2D_reg_stackNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMincI_memNodeFreloc6kM_i_; +text: .text%__1cOaddF24_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_pnNsymbolOopDesc_pkc_nGHandle__; +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cJStubQdDueueMremove_first6Mi_v_; +text: .text%__1cSsarL_eReg_1_31NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_NewArray; +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOcmpF_cc_P6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%__1cFStateM_sub_Op_MulF6MpknENode__v_; +text: .text%__1cTconvF2I_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTshlL_eReg_32_63NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerFpopad6M_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cSmulF24_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNcmovI_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cRmethodDataOopDescRbci_to_extra_data6Mii_pnLProfileData__; +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cTconvD2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNcmovI_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIMulFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFStateP_sub_Op_ConvD2I6MpknENode__v_; +text: .text%__1cLMoveF2INodeGOpcode6kM_i_; +text: .text%__1cPconvI2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: compiledICHolderKlass.o; +text: .text%__1cVVM_ParallelGCSystemGC2t6MI_v_; +text: .text%__1cOcmovI_regUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIregFOperFclone6kM_pnIMachOper__; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cLCastP2INodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cPconvL2D_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovP_nocopyNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cSaddF24_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__; +text: .text%__1cHCompileWget_MethodAccessorImpl6M_pnPciInstanceKlass__; +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cSmulF24_reg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_; +text: .text%__1cScompP_eReg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cScompP_eReg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cHCompileRget_Method_invoke6M_pnIciMethod__; +text: .text%__1cLPSMarkSweepGinvoke6Fpii_v_; +text: .text%JVM_GC; +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_; +text: .text%__1cXpartialSubtypeCheckNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cWroundFloat_mem_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cQinitialize_class6FnMsymbolHandle_pnGThread__v_: thread.o; +text: .text%__1cKScopeValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cVVM_ParallelGCSystemGCEname6kM_pkc_: vm_operations.o; +text: .text%__1cRfind_field_offset6FpnI_jobject_ipnGThread__i_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: compiledICHolderKlass.o; +text: .text%__1cOLibraryCallKitbDis_method_invoke_or_aux_frame6MpnIJVMState__i_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_; +text: .text%JVM_GetSystemPackage; +text: .text%__1cLStatSamplerTget_system_property6FpkcpnGThread__2_; +text: .text%__1cPconvL2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cETypeJis_finite6kM_i_; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: compiledICHolderKlass.o; +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSaddF24_reg_immNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cCosNcommit_memory6FpcII_i_; +text: .text%__1cOresolve_symbol6Fpkc_pC_: os_solaris.o; +text: .text%__1cLloadSSINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cNloadConL0NodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%JVM_RawMonitorCreate; +text: .text%__1cImulINodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreter.o; +text: .text%__1cQConstantIntValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_; +text: .text%__1cImulINodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_22pC_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFStateP_sub_Op_StrComp6MpknENode__v_; +text: .text%__1cIcp2bNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cFStateP_sub_Op_ConvI2F6MpknENode__v_; +text: .text%__1cOMacroAssemblerPempty_FPU_stack6M_v_; +text: .text%jni_GetStringRegion: jni.o; +text: .text%__1cQConstantIntValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cICodeBlobZis_at_poll_or_poll_return6MpC_i_; +text: .text%__1cSmulF24_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddD_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cIAddFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cScompP_eReg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHBoxNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvL2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cTcmovII_reg_EQdDNENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_; +text: .text%__1cLvframeArrayZdeallocate_monitor_chunks6M_v_; +text: .text%__1cLvframeArrayHfill_in6MpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pknLRegisterMap_i_v_; +text: .text%__1cLvframeArrayIallocate6FpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pnLRegisterMap_nFframe_9A9A9A_p0_; +text: .text%__1cOstoreF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNcmovI_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cImulINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsalL_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cbCAbstractInterpreterGeneratorUset_wide_entry_point6MpnITemplate_rpC_v_; +text: .text%Unsafe_CompareAndSwapInt; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_data_at6MpnMRegisterImpl_i2_v_; +text: .text%__1cRsubI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cQsalL_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMsubD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYcmpL_zero_flags_LTGENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cODeoptimizationLUnrollBlock2t6MiiiiipippCnJBasicType__v_; +text: .text%__1cNTemplateTableRlocals_index_wide6FpnMRegisterImpl__v_; +text: .text%__1cKstorePNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConDNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LEGTNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cSmulF24_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosHSolarisVcleanup_interruptible6FpnKJavaThread__v_; +text: .text%__1cCosHSolarisTsetup_interruptible6F_pnKJavaThread__; +text: .text%__1cQmulI_mem_immNodeFreloc6kM_i_; +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_Sleep; +text: .text%JVM_Lseek; +text: .text%__1cNnmethodLocker2t6MpC_v_; +text: .text%__1cRmulI_eReg_immNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableQvolatile_barrier6F_v_; +text: .text%__1cHnmethodVmark_as_seen_on_stack6M_v_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cQmulD_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cMdivD_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cQmulD_reg_immNodeFreloc6kM_i_; +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_; +text: .text%__1cFParseQdo_monitor_enter6M_v_; +text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_; +text: .text%__1cODeoptimizationScreate_vframeArray6FpnKJavaThread_nFframe_pnLRegisterMap__pnLvframeArray__; +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRaddI_mem_eRegNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__; +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_; +text: .text%__1cNcmovL_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: deoptimization.o; +text: .text%__1cODeoptimizationRgather_statistics6Fn0ALDeoptReason_n0ALDeoptAction_nJBytecodesECode__v_; +text: .text%__1cIcp2bNodeMideal_Opcode6kM_i_: ad_i486_misc.o; +text: .text%__1cODeoptimizationPget_method_data6FpnKJavaThread_nMmethodHandle_i_pnRmethodDataOopDesc__; +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__; +text: .text%__1cIci2bNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__; +text: .text%__1cOMacroAssemblerFenter6M_v_; +text: .text%Unsafe_GetNativeByte; +text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_; +text: .text%__1cTsarL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPconvL2F_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQmulD_reg_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRuntime.o; +text: .text%__1cFTypeFGis_nan6kM_i_; +text: .text%__1cSaddF24_reg_memNodeMcisc_operand6kM_i_: ad_i486_misc.o; +text: .text%__1cKCMoveLNodeGOpcode6kM_i_; +text: .text%JVM_NanoTime; +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cFParseOdo_tableswitch6M_v_; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_; +text: .text%__1cVMoveF2I_reg_stackNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerFpushl6MnHAddress__v_; +text: .text%jni_GetEnv; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodLiveness.o; +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cGICStubFclear6M_v_; +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_i486.o; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cKstoreDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cMloadConFNodeFreloc6kM_i_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_; +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_; +text: .text%__1cMorI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cFStateP_sub_Op_ConvI2D6MpknENode__v_; +text: .text%__1cJAssemblerEmovw6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cQshrL_eReg_CLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIimmFOperFclone6kM_pnIMachOper__; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cFStateM_sub_Op_AddF6MpknENode__v_; +text: .text%__1cOcompI_eRegNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cMloadConFNodeFclone6kM_pnENode__; +text: .text%__1cTconvI2F_SSF_memNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%jint_cmp: parse2.o; +text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_i_v_; +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cOsubF24_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstoreF_immNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cENodeHis_Call6M_pnICallNode__: machnode.o; +text: .text%__1cLloadSSINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSINodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConFNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cKstoreDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSaddD_reg_roundNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWroundFloat_mem_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cPmovP_nocopyNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: callnode.o; +text: .text%__1cOsubF24_regNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNstoreImmPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cRSignatureIteratorHiterate6M_v_; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_; +text: .text%__1cOcmovI_regUNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddD_reg_imm1NodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cPmovI_nocopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJAssemblerFfwait6M_v_; +text: .text%__1cJAssemblerKrepne_scan6M_v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cPmovP_nocopyNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_; +text: .text%__1cRandL_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNcmovL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cZInterpreterMacroAssemblerRremove_activation6MnITosState_pnMRegisterImpl_iii_v_; +text: .text%__1cImulINodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cImulINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSaddF24_reg_memNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cImulINodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%__1cOcmpF_cc_P6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cLvframeArrayRregister_location6kMi_pC_; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o; +text: .text%__1cVis_positive_one_float6Ff_i_; +text: .text%__1cRaddI_mem_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmulF24_reg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cSdivD_reg_roundNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cSaddF24_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cSaddF24_reg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cIDivINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSstoreD_roundedNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMdivD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMmulD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIAddDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cFStateO_sub_Op_LoadPC6MpknENode__v_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cSmulF24_reg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cJCmpF3NodeGOpcode6kM_i_; +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cHMulNodeGis_Mul6kM_pk0_: classes.o; +text: .text%__1cJAssemblerFtestb6MpnMRegisterImpl_i_v_; +text: .text%__1cIGraphKitSgen_native_wrapper6MpnIciMethod__v_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cOmulIS_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNtestI_regNodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%__1cSaddF24_reg_memNodeRis_cisc_alternate6kM_i_: ad_i486_misc.o; +text: .text%__1cOsubF24_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cKLoadPCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cXpartialSubtypeCheckNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%Unsafe_StaticFieldOffset; +text: .text%__1cVMoveL2D_reg_stackNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cKdirectOperJnum_edges6kM_I_: ad_i486_clone.o; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cTconvI2F_SSF_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cJAssemblerMemit_arith_b6MiipnMRegisterImpl_i_v_; +text: .text%__1cFTypeFFempty6kM_i_; +text: .text%__1cKdirectOperNconstant_disp6kM_i_: ad_i486_clone.o; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cNReservedSpace2t6MpcI_v_; +text: .text%__1cNaddP_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cIMachOperNbase_position6kM_i_; +text: .text%__1cQorl_eReg_immNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cPconvL2F_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cWroundFloat_mem_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cYinternal_word_RelocationMforce_target6MpC_v_: relocInfo.o; +text: .text%__1cQmulD_reg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cZInterpreterMacroAssemblerGf2ieee6M_v_; +text: .text%__1cQmulD_reg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cPconvL2D_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_i486_clone.o; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%__1cFStateS_sub_Op_CallNative6MpknENode__v_; +text: .text%__1cIciSymbolHas_utf86M_pkc_; +text: .text%__1cOtypeArrayKlassNexternal_name6FnJBasicType__pkc_; +text: .text%__1cENodeHdel_out6Mp0_v_: connode.o; +text: .text%JVM_GetClassContext; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cNdivI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJAssemblerHfincstp6M_v_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MnITosState__v_; +text: .text%__1cETypeFxdual6kM_pk0_; +text: .text%__1cQorI_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOMacroAssemblerEfpop6M_v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%__1cLlog2_intptr6Fi_i_: typeArrayKlass.o; +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod__v_; +text: .text%__1cQAbstractCompilerPsupports_native6M_i_: c2compiler.o; +text: .text%__1cOGenerateOopMapGdo_jsr6Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerbGget_unsigned_2_byte_index_at_bcp6MpnMRegisterImpl_i_v_; +text: .text%__1cSaddF24_reg_immNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cIAddDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_; +text: .text%__1cZInterpreterMacroAssemblerRcall_VM_leaf_base6MpCi_v_; +text: .text%__1cZInterpreterMacroAssemblerGd2ieee6M_v_; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cSmulF24_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSaddF24_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%JVM_GetClassDeclaredFields; +text: .text%stat: os_solaris.o; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cWroundFloat_mem_regNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%__1cLConvF2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cLClassLoaderbCupdate_class_path_entry_list6Fpkc_v_; +text: .text%__1cOcmovI_regUNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cJArgumentsRverify_percentage6FIpkc_i_; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cFStateM_sub_Op_AddD6MpknENode__v_; +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cFTypeFJis_finite6kM_i_; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cLOptoRuntimeRresolve_call_Type6F_pknITypeFunc__; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cOPhaseIdealLoopTdo_maximally_unroll6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_2_v_; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cSaddF24_reg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cJAssemblerGfnsave6MnHAddress__v_; +text: .text%__1cVMoveF2I_reg_stackNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMdecI_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cJloadDNodeFreloc6kM_i_; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%__1cMloadConLNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%__1cSaddF24_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConFNodeGis_Con6kM_I_: ad_i486_misc.o; +text: .text%__1cISubDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLloadSSINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cISubDNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cQorl_eReg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_MonitorExit: jni.o; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cSaddD_reg_roundNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvL2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cKReturnNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframebFset_interpreter_frame_sender_sp6Mpi_v_; +text: .text%__1cRmulI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovI_regUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cULinearLeastSquareFit2t6MI_v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cOMacroAssemblerEfcmp6MpnMRegisterImpl__v_; +text: .text%__1cMTailJumpNodeGOpcode6kM_i_; +text: .text%__1cOsubF24_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWroundFloat_mem_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%jni_GetJavaVM; +text: .text%__1cHciKlassIis_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__; +text: .text%__1cSaddF24_reg_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTconvI2F_SSF_memNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cTconvI2F_SSF_memNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cNReservedSpaceKinitialize6MIIipc_v_; +text: .text%JVM_LoadLibrary; +text: .text%__1cCosOreserve_memory6FIpc_1_; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%__1cOstoreF_immNodeFreloc6kM_i_; +text: .text%__1cVMoveF2I_reg_stackNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%__1cNmulI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerGfild_d6MnHAddress__v_; +text: .text%__1cRmulI_imm_highNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_nHAddress__v_; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cIcp2bNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIcp2bNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeHsize_of6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerEincl6MnHAddress__v_; +text: .text%__1cXpartialSubtypeCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPmovP_nocopyNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cVMoveL2D_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cRaddI_mem_eRegNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOLibraryCallKitbNinline_native_Reflection_getCallerClass6M_i_; +text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cOLibraryCallKitZinline_native_Class_query6MnIciMethodLIntrinsicId__i_; +text: .text%__1cOstoreF_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cPconvL2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKciTypeFlowLStateVectorOdo_null_assert6MpnHciKlass__v_; +text: .text%__1cPconvL2F_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIci2bNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cXcmpL_reg_flags_LEGTNodeFclone6kM_pnENode__; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cQmulD_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJAssemblerHfucomip6Mi_v_; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cFciEnvbNArrayIndexOutOfBoundsException_instance6M_pnKciInstance__; +text: .text%__1cRsalI_eReg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%jni_SetObjectField: jni.o; +text: .text%Unsafe_AllocateMemory; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: callnode.o; +text: .text%__1cTconvI2F_SSF_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMelapsedTimer2t6M_v_: methodLiveness.o; +text: .text%__1cISubDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvI2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_; +text: .text%__1cRaddD_reg_imm1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosHSolarisOset_mpss_range6FpcII_i_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cPciInstanceKlassbDcompute_shared_is_initialized6M_i_; +text: .text%jni_Throw: jni.o; +text: .text%__1cNSpaceCounters2t6MpkciIpnMMutableSpace_pnSGenerationCounters__v_; +text: .text%__1cOPSVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cSaddF24_reg_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cPmovP_nocopyNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cIcp2bNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLklassVtableTis_miranda_entry_at6Mi_i_; +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_; +text: .text%__1cLVtableStubsIcontains6FpC_i_; +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_; +text: .text%__1cNdivI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorLpass_double6M_v_: interpreterRuntime.o; +text: .text%__1cLVtableStubsPstub_containing6FpC_pnKVtableStub__; +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%__1cFStateP_sub_Op_ConvF2I6MpknENode__v_; +text: .text%__1cODeoptimizationYreset_invocation_counter6FpnJScopeDesc_i_v_; +text: .text%__1cQshrL_eReg_CLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvF2I_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRuntime.o; +text: .text%__1cOcompI_eRegNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_2pC22_v_; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cNCellTypeStateImake_any6Fi_0_: generateOopMap.o; +text: .text%__1cJAssemblerEmovb6MnHAddress_i_v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__; +text: .text%__1cTshlL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cWImplicitExceptionTableCat6kMI_I_; +text: .text%__1cQshrL_eReg_CLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLVtableStubsGlookup6Fiii_pnKVtableStub__; +text: .text%__1cSsarL_eReg_1_31NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNReservedSpaceKfirst_part6MIii_0_; +text: .text%__1cJAssemblerFfinit6M_v_; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: subnode.o; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerSstore_check_part_26MpnMRegisterImpl__v_; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_i486.o; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%__1cLOptoRuntimebBhandle_wrong_method_ic_miss6FpnKJavaThread__pC_; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%__1cNReservedSpace2t6MI_v_; +text: .text%__1cVMoveF2I_reg_stackNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationZtrap_state_set_recompiled6Fii_i_; +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSmulF24_reg_immNodeFreloc6kM_i_; +text: .text%__1cLloadSSINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerSstore_check_part_16MpnMRegisterImpl__v_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cIDivDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%JVM_GetLastErrorString; +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cSmulF24_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cFStateM_sub_Op_DivD6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cINegDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNTemplateTableOprepare_invoke6FpnMRegisterImpl_2inJBytecodesECode__v_; +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cOcompP_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cKConv2BNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNGCTaskManagerGthread6MI_pnMGCTaskThread__; +text: .text%__1cPdouble_quadword6Fpxxx_0_: ad_i486.o; +text: .text%__1cFStateM_sub_Op_SubD6MpknENode__v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cSmulF24_reg_memNodeFreloc6kM_i_; +text: .text%__1cScompP_eReg_memNodeFreloc6kM_i_; +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cFStateM_sub_Op_NegD6MpknENode__v_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: gcTaskThread.o; +text: .text%__1cJlog2_long6Fx_i_: divnode.o; +text: .text%__1cMelapsedTimer2t6M_v_: compileBroker.o; +text: .text%__1cNcmovI_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cQaddD_reg_immNodeFreloc6kM_i_; +text: .text%__1cIRetTableHadd_jsr6Mii_v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNcmovI_memNodeFreloc6kM_i_; +text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: unsafe.o; +text: .text%__1cPaddress_of_flag6FnXCommandLineFlagWithType__pnEFlag__: globals.o; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_; +text: .text%__1cNdivI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cImulINodeFreloc6kM_i_; +text: .text%__1cNmulI_eRegNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__; +text: .text%__1cNmulI_eRegNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cRClassPathZipEntry2t6Mppvpc_v_; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cLClassLoaderLadd_to_list6FpnOClassPathEntry__v_; +text: .text%__1cOLibraryCallKitbBinline_native_currentThread6M_i_; +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_; +text: .text%__1cTconvI2F_SSF_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cQaddD_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cLClassLoaderSget_canonical_path6Fpc1i_i_; +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMstoreSSINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRaddD_reg_imm1NodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEcmpb6MnHAddress_i_v_; +text: .text%__1cINegDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerEdecl6MnHAddress__v_; +text: .text%__1cOstackSlotFOperFclone6kM_pnIMachOper__; +text: .text%__1cSaddF24_reg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cMNativeLookupNlong_jni_name6FnMmethodHandle__pc_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl__v_; +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQmulD_reg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cOcompP_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cHRetNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_LTGENodeFclone6kM_pnENode__; +text: .text%__1cNtestI_regNodeFclone6kM_pnENode__; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cVMoveF2I_reg_stackNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%Unsafe_SetMemory; +text: .text%__1cRaddL_eReg_memNodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cRaddL_eReg_memNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIcp2bNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cFciEnvOrecord_failure6Mpkc_v_; +text: .text%__1cNcmovL_regNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cENodeHdel_out6Mp0_v_: ifg.o; +text: .text%__1cJAssemblerEfld16M_v_; +text: .text%__1cSmembar_acquireNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cJAssemblerFfld_x6MnHAddress__v_; +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJAssemblerHfistp_d6MnHAddress__v_; +text: .text%__1cKstoreFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJAssemblerFimull6MpnMRegisterImpl_2_v_; +text: .text%__1cRInvocationCounterDdef6Fn0AFState_ipFnMmethodHandle_pnGThread__pC_v_; +text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_; +text: .text%__1cFciEnvXget_or_create_exception6MrpnI_jobject_nMsymbolHandle__pnKciInstance__; +text: .text%__1cMPerfDataList2t6Mi_v_; +text: .text%__1cLloadSSINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cRaddI_mem_eRegNodeFreloc6kM_i_; +text: .text%__1cUInterpreterGeneratorTgenerate_math_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cIDivFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cSCardTableExtensionbEresize_covered_region_by_start6MnJMemRegion__v_; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cJArgumentsObuild_jvm_args6Fpkc_v_; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cJAssemblerFfmulp6Mi_v_; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstanceKlass.o; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_22_v_; +text: .text%__1cNcmovL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_22_v_; +text: .text%__1cRaddI_mem_eRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_FindSignal; +text: .text%__1cKVtableStub2n6FIi_pv_; +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_; +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRuntime.o; +text: .text%__1cLOptoRuntimeVgenerate_handler_blob6FpCi_pnNSafepointBlob__; +text: .text%JVM_RegisterSignal; +text: .text%__1cFParsePdo_lookupswitch6M_v_; +text: .text%__1cSaddF24_reg_memNodeQuse_cisc_RegMask6M_v_; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_2_v_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cOcmpF_cc_P6NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJAssemblerEcdql6M_v_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6M_v_; +text: .text%__1cUInterpreterGeneratorXcheck_for_compiled_code6MrnFLabel__v_; +text: .text%__1cRCardTableModRefBSbCfind_covering_region_by_base6MpnIHeapWord__i_; +text: .text%__1cIci2bNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRCardTableModRefBSbAlargest_prev_committed_end6kMi_pnIHeapWord__; +text: .text%__1cMSysClassPathNreset_item_at6Mi_v_: arguments.o; +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cFStateM_sub_Op_CmpF6MpknENode__v_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cOLibraryCallKitVinline_fp_conversions6MnIciMethodLIntrinsicId__i_; +text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cHCompilePget_invoke_name6M_pnIciSymbol__; +text: .text%__1cFParseRdo_multianewarray6M_v_; +text: .text%__1cJAssemblerGfmul_d6MnHAddress__v_; +text: .text%__1cLVtableStubsFenter6FiiipnKVtableStub__v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl__v_; +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRaddD_reg_imm1NodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cRaddD_reg_imm1NodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cISubFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNandI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cNReservedSpaceJlast_part6MI_0_; +text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cJlookupOne6FpnHJNIEnv__pkcpnGThread__pnH_jclass__: jni.o; +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_; +text: .text%__1cNSafepointBlob2n6FII_pv_; +text: .text%__1cQmulD_reg_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTconvI2F_SSF_memNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_I_v_; +text: .text%__1cFTypeDJis_finite6kM_i_; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cSPSPromotionManager2t6M_v_; +text: .text%__1cOstackSlotIOperFscale6kM_i_: ad_i486.o; +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_i486.o; +text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o; +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveF2I_reg_stackNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddF24_reg_memNodeFreloc6kM_i_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cSdivD_reg_roundNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cPmovP_nocopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cIci2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMOopTaskQdDueue2t6M_v_; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cRaddI_mem_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_Available; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_2i_v_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_2i_v_; +text: .text%__1cMOopTaskQdDueueKinitialize6M_v_; +text: .text%__1cPmovI_nocopyNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOtailjmpIndNodeHtwo_adr6kM_I_: ad_i486_misc.o; +text: .text%__1cIci2bNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cOtailjmpIndNodeGpinned6kM_i_: ad_i486_misc.o; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cIcp2bNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLconvI2BNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cXcmpL_reg_flags_EQdDNENodeFclone6kM_pnENode__; +text: .text%__1cKciTypeFlowLStateVectorRdo_multianewarray6MpnQciByteCodeStream__v_; +text: .text%__1cJAssemblerGfild_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfstp_d6Mi_v_; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_i486.o; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cNTemplateTableUinvokevirtual_helper6FpnMRegisterImpl_22_v_; +text: .text%__1cJAssemblerEfxch6Mi_v_; +text: .text%__1cJAssemblerFfprem6M_v_; +text: .text%__1cSvframeStreamCommonbFfill_in_compiled_inlined_sender6M_i_; +text: .text%__1cJAssemblerJfnstsw_ax6M_v_; +text: .text%__1cJAssemblerEsahf6M_v_; +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_; +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cOtailjmpIndNodeJnum_opnds6kM_I_: ad_i486_misc.o; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cJAssemblerEfchs6M_v_; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cSaddF24_reg_immNodeFreloc6kM_i_; +text: .text%__1cJAssemblerEfabs6M_v_; +text: .text%__1cJStubQdDueueOregister_queue6Fp0_v_; +text: .text%__1cOMacroAssemblerPcorrected_idivl6MpnMRegisterImpl__i_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cRsubI_eReg_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cFStateL_sub_Op_OrL6MpknENode__v_; +text: .text%__1cPconvF2D_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFStateM_sub_Op_SubF6MpknENode__v_; +text: .text%__1cPOopTaskQdDueueSetOregister_queue6MipnMOopTaskQdDueue__v_; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cMPeriodicTask2t6MI_v_; +text: .text%__1cSestimate_path_freq6FpnENode__f_: loopnode.o; +text: .text%__1cFStateP_sub_Op_MoveL2D6MpknENode__v_; +text: .text%__1cNincI_eRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: memoryService.o; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cKGCStatInfo2t6Mi_v_; +text: .text%__1cJMarkSweepUAdjustPointerClosure2t6Mi_v_: markSweep.o; +text: .text%__1cNstoreImmBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLloadSSINodeZcheck_for_anti_dependence6kM_i_: ad_i486_misc.o; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cJAssemblerEaddl6MnHAddress_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerFidivl6MpnMRegisterImpl__v_; +text: .text%__1cOmulF24_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cJAssemblerEmull6MnHAddress__v_; +text: .text%__1cJAssemblerDorl6MnHAddress_i_v_; +text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEshll6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerFshrdl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl__v_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cJAssemblerFcpuid6M_v_; +text: .text%__1cJAssemblerEfldz6M_v_; +text: .text%__1cJAssemblerFfld_s6Mi_v_; +text: .text%__1cJAssemblerFfst_s6MnHAddress__v_; +text: .text%__1cJAssemblerFfst_d6MnHAddress__v_; +text: .text%__1cOaddF24_regNodeMcisc_version6Mi_pnIMachNode__; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cSaddF24_reg_immNodeKconst_size6kM_i_: ad_i486_misc.o; +text: .text%__1cHMatcherQconvL2FSupported6F_ki_; +text: .text%__1cFStateP_sub_Op_ConvL2F6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_ConvL2D6MpknENode__v_; +text: .text%__1cGatomll6Fpkcpx_i_: arguments.o; +text: .text%__1cJArgumentsRcheck_memory_size6Fxx_n0AJArgsRange__; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cPconvD2F_regNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cJArgumentsRparse_memory_size6Fpkcpxx_n0AJArgsRange__; +text: .text%__1cFStateP_sub_Op_ConvD2F6MpknENode__v_; +text: .text%__1cHnmethodVinvalidate_osr_method6M_v_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o; +text: .text%__1cJAssemblerEmovb6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cPconvL2D_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: parse1.o; +text: .text%__1cPconvD2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNmulI_eRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cINegFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMFastLockNodeLis_FastLock6kM_pk0_: classes.o; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_; +text: .text%__1cIPSOldGenOgen_size_limit6M_I_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cIPSOldGenGresize6MI_v_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cQorl_eReg_immNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cPconvL2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cFParseNfetch_monitor6MipnENode_2_2_; +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_; +text: .text%__1cMGCTaskThreadFstart6M_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cMGCTaskThreadDrun6M_v_; +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cGThreadOis_Java_thread6kM_i_: gcTaskThread.o; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o; +text: .text%__1cXpartialSubtypeCheckNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cISubFNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_; +text: .text%lstat: perfMemory_solaris.o; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cPfilename_to_pid6Fpkc_l_: perfMemory_solaris.o; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerWdispatch_only_noverify6MnITosState__v_; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_flag_at6MpnMRegisterImpl_i_v_; +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MpnMRegisterImpl__v_; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MpnMRegisterImpl_2_v_; +text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o; +text: .text%__1cIPSOldGenYinitialize_virtual_space6MnNReservedSpace_I_v_; +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Ipkci_v_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__; +text: .text%__1cbCAbstractInterpreterGeneratorTgenerate_error_exit6Mpkc_pC_; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cLNamedThread2t6M_v_; +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_: ad_i486.o; +text: .text%__1cLNamedThreadIset_name6MpkcE_v_; +text: .text%__1cMelapsedTimer2t6M_v_: psAdaptiveSizePolicy.o; +text: .text%__1cUPSAdaptiveSizePolicybQpromo_increment_with_supplement_aligned_up6MI_I_; +text: .text%__1cUPSAdaptiveSizePolicyPpromo_increment6MII_I_; +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_2i_v_; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOMacroAssemblerIsave_eax6MpnMRegisterImpl__v_; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cRCardTableModRefBSbCpar_chunk_heapword_alignment6F_I_: tenuredGeneration.o; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cCosWactive_processor_count6F_i_; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cOMacroAssemblerLrestore_eax6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerFfremr6MpnMRegisterImpl__v_; +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cHOrLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKReflectionbFbasic_type_arrayklass_to_mirror6FpnMklassOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cCosHrealloc6FpvI_1_; +text: .text%__1cOMacroAssemblerGsincos6Miii_v_; +text: .text%__1cEMIN24CI_6FTA0_0_: tenuredGeneration.o; +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cNGCTaskManagerKset_thread6MIpnMGCTaskThread__v_; +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPSVirtualSpace2t6M_v_; +text: .text%__1cNdefaultStreamMhas_log_file6M_i_; +text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cOPSVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cYalign_to_allocation_size6FI_I_: heap.o; +text: .text%__1cWcheck_compare_clipping6FipnGIfNode_pnHConNode_rpnENode__i_: cfgnode.o; +text: .text%__1cLConvL2FNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_; +text: .text%__1cUParallelScavengeHeapOresize_old_gen6MI_v_; +text: .text%__1cIciObjectOis_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cRcheck_if_clipping6FpknKRegionNode_rpnGIfNode_5_i_: cfgnode.o; +text: .text%__1cHOrLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cKJavaThread2t6M_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%Unsafe_SetNativeLong; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__cplus_fini_at_exit: CCrti.o; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cIUniverseUreinitialize_itables6F_v_; +text: .text%__1cNReservedSpace2t6MIIipc_v_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cIUniversePinitialize_heap6F_i_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cIUniverseYcompute_base_vtable_size6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_; +text: .text%__1cQVMOperationQdDueue2t6M_v_; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%Unsafe_PageSize; +text: .text%__1cNTemplateTableDret6F_v_; +text: .text%Unsafe_FreeMemory; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cNCollectedHeapYlarge_typearray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cIVMThreadEloop6M_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__; +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cKVM_VersionWget_processor_features6F_v_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_; +text: .text%__1cHVM_ExitNset_vm_exited6F_i_; +text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cNWatcherThread2t6M_v_; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadFstart6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cQVerificationTypeKinitialize6F_v_; +text: .text%__1cQVerificationTypeIfinalize6F_v_; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_i486.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_i486.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cLVtableStubsKinitialize6F_v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cIVMThread2t6M_v_; +text: .text%__1cGThreadWset_as_starting_thread6M_i_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cIUniversePcheck_alignment6FIIpkc_v_; +text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFJNIidKdeallocate6Fp0_v_; +text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cRInlineCacheBufferKinitialize6F_v_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cLlog2_intptr6Fi_i_: heap.o; +text: .text%__1cICodeHeapFclear6M_v_; +text: .text%__1cICodeHeapHreserve6MIII_i_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_; +text: .text%__1cSInterpreterRuntimeWcreate_klass_exception6FpnKJavaThread_pcpnHoopDesc__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cbCAbstractInterpreterGeneratorbCset_safepoints_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_; +text: .text%__1cZInterpreterMacroAssemblerUdispatch_only_normal6MnITosState__v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl_33_v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cSCommandLineFlagsExKuintxAtPut6FnXCommandLineFlagWithType_I_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cKDictionaryKfree_entry6MpnPDictionaryEntry__v_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cFStateQ_sub_Op_TailJump6MpknENode__v_; +text: .text%__1cFStateM_sub_Op_NegF6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_; +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_; +text: .text%__1cODeoptimizationTload_class_by_index6FnSconstantPoolHandle_i_v_; +text: .text%__1cODeoptimizationTload_class_by_index6FnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKScopeValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cUConstantOopReadValuePis_constant_oop6kM_i_: debugInfo.o; +text: .text%__1cUConstantOopReadValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o; +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cRComputeEntryStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cNCellTypeStateLmake_bottom6F_0_: generateOopMap.o; +text: .text%__1cNCellTypeStateImake_top6F_0_: generateOopMap.o; +text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o; +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o; +text: .text%__1cKNoopGCTaskQcreate_on_c_heap6F_p0_; +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_; +text: .text%__1cNGCTaskManagerKinitialize6M_v_; +text: .text%__1cNGCTaskManager2t6MI_v_; +text: .text%__1cXSynchronizedGCTaskQdDueue2t6MpnLGCTaskQdDueue_pnFMutex__v_; +text: .text%__1cLGCTaskQdDueueQcreate_on_c_heap6F_p0_; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cXSignatureHandlerLibraryQset_handler_blob6F_pC_; +text: .text%JVM_MaxMemory; +text: .text%JVM_Halt; +text: .text%JVM_InitProperties; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cKJNIHandlesKinitialize6F_v_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%JNI_CreateJavaVM; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cbDinitializeDirectBufferSupport6FpnHJNIEnv___i_: jni.o; +text: .text%lookupDirectBufferClasses: jni.o; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cRJvmtiEventEnabledFclear6M_v_; +text: .text%__1cRJvmtiEventEnabled2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiEnvBase.o; +text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_: jvmtiEnvBase.o; +text: .text%JVM_SupportsCX8; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%JVM_Socket; +text: .text%JVM_InitializeSocketLibrary; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectPcompute_offsets6F_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cQprint_statistics6F_v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cLlog2_intptr6Fi_i_: interpreter_i486.o; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cUInterpreterGeneratorXgenerate_abstract_entry6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRuntime.o; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%__1cLJavaClassesPcompute_offsets6F_v_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_; +text: .text%__1cPjava_nio_BufferPcompute_offsets6F_v_; +text: .text%__1cQjava_lang_SystemPcompute_offsets6F_v_; +text: .text%__1cbIjava_security_AccessControlContextPcompute_offsets6F_v_; +text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o; +text: .text%__1cJAssemblerEmull6MpnMRegisterImpl__v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_i_v_; +text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_2_v_; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cJArgumentsbOparse_java_compiler_environment_variable6F_v_; +text: .text%__1cJAssemblerEsbbl6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerJdecrement6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerLextend_sign6MpnMRegisterImpl_2_v_; +text: .text%__1cJAssemblerFfaddp6Mi_v_; +text: .text%__1cJAssemblerGfdivrp6Mi_v_; +text: .text%__1cJAssemblerHfdivr_d6MnHAddress__v_; +text: .text%__1cJAssemblerHfdivr_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfmul_s6MnHAddress__v_; +text: .text%__1cJAssemblerHfsubr_d6MnHAddress__v_; +text: .text%__1cJAssemblerHfsubr_s6MnHAddress__v_; +text: .text%__1cJAssemblerGfadd_d6MnHAddress__v_; +text: .text%__1cJAssemblerGfadd_s6MnHAddress__v_; +text: .text%__1cJAssemblerFfsqrt6M_v_; +text: .text%__1cJAssemblerEfcos6M_v_; +text: .text%__1cJAssemblerEfsin6M_v_; +text: .text%__1cJAssemblerEsetb6Mn0AJCondition_pnMRegisterImpl__v_; +text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_; +text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_; +text: .text%__1cJAssemblerFshldl6MpnMRegisterImpl_2_v_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cHi2sNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHi2bNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreBNodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cNstoreImmINodeErule6kM_I_: ad_i486_misc.o; +text: .text%__1cLconvP2BNodePoper_input_base6kM_I_: ad_i486_misc.o; +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_i486_expand.o; +text: .text%__1cRaddL_eReg_memNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLconvP2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIcp2bNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_i486.o; +text: .text%__1cTconvI2F_SSF_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQmulD_reg_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOtailjmpIndNodeFreloc6kM_i_; +text: .text%__1cTconvI2F_SSF_memNodeFreloc6kM_i_; +text: .text%__1cQmulD_reg_memNodeFreloc6kM_i_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cPconvI2L_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cQno_shared_spaces6F_v_: arguments.o; +text: .text%__1cJArgumentsMget_property6Fpkc_2_; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cMSysClassPath2T6M_v_; +text: .text%__1cMSysClassPath2t6Mpkc_v_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_III_v_; +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_IIIIIII_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_i486_pipeline.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_; +text: .text%__1cOcompiler2_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cMelapsedTimer2t6M_v_: compilationPolicy.o; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cJCodeCacheKinitialize6F_v_; +text: .text%__1cNExceptionBlob2n6FII_pv_; +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNExceptionBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cQUncommonTrapBlob2n6FII_pv_; +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cQUncommonTrapBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; +text: .text%__1cSDeoptimizationBlob2n6FII_pv_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_; +text: .text%__1cOCompilerOraclePparse_from_file6F_v_; +text: .text%__1cHcc_file6F_pkc_: compilerOracle.o; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: cfgnode.o; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cMTailJumpNode2t6MpnENode_22222_v_; +text: .text%__1cKC2CompilerKinitialize6M_v_; +text: .text%__1cHCompileRpd_compiler2_init6F_v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_; +text: .text%__1cOMacroAssemblerQsign_extend_byte6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerRsign_extend_short6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerIlcmp2int6MpnMRegisterImpl_222_v_; +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerElmul6Mii_v_; +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerGc2bool6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl_3_v_; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cSDeoptimizationBlob2t6MpnKCodeBuffer_ipnJOopMapSet_iiii_v_; +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cTClassLoadingServiceVnotify_class_unloaded6FpnNinstanceKlass_i_v_; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cPClassFileParserYjava_lang_Class_fix_post6Mpi_v_; +text: .text%__1cPClassFileParserXjava_lang_Class_fix_pre6MpnOobjArrayHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cMciKlassKlassEmake6F_p0_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray1_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_; +text: .text%__1cWResolveOopMapConflictsOreport_results6kM_i_: rewriter.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cQRelocationHolder2t6M_v_: relocInfo.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cLStatSamplerKinitialize6F_v_; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cICarSpaceEinit6F_v_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cNSharedRuntimeUlookup_function_DD_D6FrpFpnHJNIEnv__pnH_jclass_dd_dpkc_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o; +text: .text%__1cLOptoRuntimeYgenerate_arraycopy_stubs6F_v_; +text: .text%__1cLOptoRuntimebPgenerate_polling_page_return_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebPgenerate_illegal_instruction_handler_blob6F_v_; +text: .text%__1cLOptoRuntimeUsetup_exception_blob6F_v_; +text: .text%__1cLOptoRuntimeWfill_in_exception_blob6F_v_; +text: .text%__1cLOptoRuntimebBgenerate_uncommon_trap_blob6F_v_; +text: .text%__1cHRegMask2t6Miiiii_v_: regmask.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cMelapsedTimer2t6M_v_: psMarkSweep.o; +text: .text%__1cTPSAlwaysTrueClosure2t6M_v_: psMarkSweep.o; +text: .text%__1cLPSMarkSweepKinitialize6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cUPSAdaptiveSizePolicy2t6MIIIIIddI_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_; +text: .text%__1cUdelete_shared_memory6FpcI_v_: perfMemory_solaris.o; +text: .text%__1cUcreate_shared_memory6FI_pc_: perfMemory_solaris.o; +text: .text%__1cSmmap_create_shared6FI_pc_: perfMemory_solaris.o; +text: .text%__1cbAcreate_sharedmem_resources6Fpkc1I_i_: perfMemory_solaris.o; +text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cIPSOldGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cSReferenceProcessorMinit_statics6F_v_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cKPSYoungGenUset_space_boundaries6MII_v_; +text: .text%__1cKPSYoungGenbGcompute_initial_space_boundaries6M_v_; +text: .text%__1cKPSYoungGenPinitialize_work6M_v_; +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGen2t6MIII_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cMelapsedTimer2t6M_v_: psScavenge.o; +text: .text%__1cKPSScavengeKinitialize6F_v_; +text: .text%__1cPOopTaskQdDueueSet2t6Mi_v_: psPromotionManager.o; +text: .text%__1cSPSPromotionManagerKinitialize6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: psPromotionLAB.o; +text: .text%__1cRalign_object_size6Fi_i_: psPromotionLAB.o; +text: .text%__1cJPSPermGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cIPSOldGen2t6MIIIpkci_v_; +text: .text%__1cLStatSamplerUcreate_misc_perfdata6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cINegFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cMStubRoutinesLinitialize26F_v_; +text: .text%__1cMStubRoutinesLinitialize16F_v_; +text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_i486.o; +text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_i486.o; +text: .text%__1cLStatSamplerXcreate_sampled_perfdata6F_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cMPeriodicTaskLis_enrolled6kM_i_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cLremove_file6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cNMemoryServicebFadd_parallel_scavenge_heap_info6FpnUParallelScavengeHeap__v_; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cYSurvivorMutableSpacePool2t6MpnKPSYoungGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cUEdenMutableSpacePool2t6MpnKPSYoungGen_pnMMutableSpace_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cQPSGenerationPool2t6MpnJPSPermGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cQPSGenerationPool2t6MpnIPSOldGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cJMarkSweepSMarkAndPushClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepRFollowRootClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepSFollowStackClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepOIsAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cJMarkSweepQKeepAliveClosure2t6M_v_: markSweep.o; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cNMemoryServiceXadd_psYoung_memory_pool6FpnKPSYoungGen_pnNMemoryManager_4_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cHRetDataKis_RetData6M_i_: methodDataOop.o; +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_; +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_: memoryService.o; +text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cNMemoryServiceWadd_psPerm_memory_pool6FpnJPSPermGen_pnNMemoryManager__v_; +text: .text%__1cNMemoryServiceVadd_psOld_memory_pool6FpnIPSOldGen_pnNMemoryManager__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cJMemRegion2t6M_v_: jvmtiTagMap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiImpl.o; +text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__2t6Mii_v_: jvmtiImpl.o; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o; +text: .text%__1cJTimeStamp2t6M_v_: management.o; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cKManagementEinit6F_v_; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cRLowMemoryDetectorUhas_pending_requests6F_i_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: loopnode.o; +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_; +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o; +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: library_call.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_I_; +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_; +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o; +text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o; +text: .text%__1cHoopDescLheader_size6F_i_: parGCAllocBuffer.o; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__1cMostream_exit6F_v_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cNdefaultStreamEinit6M_v_; +text: .text%__1cCosMsupports_sse6F_i_; +text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_i486.o; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_; +text: .text%__1cWget_sharedmem_filename6Fpkci_pc_: perfMemory_solaris.o; +text: .text%__1cNget_user_name6Fl_pc_: perfMemory_solaris.o; +text: .text%__1cQget_user_tmp_dir6Fpkc_pc_: perfMemory_solaris.o; +text: .text%__1cKPerfMemoryHdestroy6F_v_; +text: .text%__1cKPerfMemoryKinitialize6F_v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cMPerfDataListFclone6M_p0_; +text: .text%__1cMPerfDataList2t6Mp0_v_; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_; +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_: parallelScavengeHeap.o; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEkind6M_nNCollectedHeapEName__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEheap6F_p0_; +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cSOnStackReplacementKinitialize6F_v_; +text: .text%__1cNObjectMonitorREntryQdDueue_unlink6MpnMObjectWaiter__v_; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cCosPuncommit_memory6FpcI_i_; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosHSolarisWinitialize_system_info6F_v_; +text: .text%__1cCosPphysical_memory6F_X_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/reorder_TIERED_sparc 2009-08-01 04:16:58.663305826 +0100 @@ -0,0 +1,7113 @@ +data = R0x2000; +text = LOAD ?RXO; + + +text: .text%__1cLOptoRuntimeLjshort_copy6Fph1I_v_; +text: .text%__1cLOptoRuntimeTarrayof_jshort_copy6FpnIHeapWord_2I_v_; +text: .text%__1cSPSPromotionManagerWcopy_to_survivor_space6MpnHoopDesc__2_; +text: .text%__1cCosOjavaTimeMillis6F_x_; +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_; +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_; +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMOopTaskQdDueueKpop_global6MrpnHoopDesc__i_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_sparc_misc.o; +text: .text%__1cIPhaseIFGIadd_edge6MII_i_; +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: ifg.o; +text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_sparc_misc.o; +text: .text%__1cENodeEjvms6kM_pnIJVMState__; +text: .text%__1cHRegMaskFis_UP6kM_i_; +text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_sparc_misc.o; +text: .text%__1cIMachNodeNrematerialize6kM_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: classes.o; +text: .text%__1cIProjNodeHis_Proj6M_p0_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: classes.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: classes.o; +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__; +text: .text%__1cENodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cETypeDcmp6Fkpk03_i_; +text: .text%__1cENodeHis_Copy6kM_I_: classes.o; +text: .text%__1cENodeHlatency6MI_I_; +text: .text%__1cHRegMaskJis_bound16kM_i_; +text: .text%__1cDff16FI_i_; +text: .text%__1cHRegMaskESize6kM_I_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_sparc_misc.o; +text: .text%__1cXresource_allocate_bytes6FI_pc_; +text: .text%__1cIMachNodeJideal_reg6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cRMachSpillCopyNodeMis_SpillCopy6M_p0_: ad_sparc.o; +text: .text%__1cENodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJVectorSet2R6MI_rnDSet__; +text: .text%__1cHRegMaskJis_bound26kM_i_; +text: .text%__1cNSharedRuntimeElmul6Fxx_x_; +text: .text%__1cPOopTaskQdDueueSetFsteal6MipirpnHoopDesc__i_; +text: .text%__1cIMachNodeGOpcode6kM_i_; +text: .text%__1cENodeGpinned6kM_i_: classes.o; +text: .text%__1cJiRegIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cIIndexSetKinitialize6MI_v_; +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__; +text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o; +text: .text%__1cRMachSpillCopyNodeHis_Copy6kM_I_: ad_sparc.o; +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_; +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__: type.o; +text: .text%__1cHPhiNodeGis_Phi6M_p0_: cfgnode.o; +text: .text%__1cETypeFuhash6Fkpk0_i_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: chaitin.o; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cENodeIout_grow6MI_v_; +text: .text%__1cIMachNodeHis_Mach6M_p0_: ad_sparc.o; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cOloadConI13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cENodeHadd_req6Mp0_v_; +text: .text%__1cEDictGInsert6Mpv1i_1_; +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cJMultiNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cOloadConI13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cIProjNodeGis_CFG6kM_i_; +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cINodeHashLhash_delete6MpknENode__i_; +text: .text%__1cFArenaIcontains6kMpkv_i_; +text: .text%__1cOloadConI13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeKmatch_edge6kMI_I_; +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: classes.o; +text: .text%__1cHPhiNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntCeq6kMpknEType__i_; +text: .text%__1cKbranchNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cIProjNodeGpinned6kM_i_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: classes.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: classes.o; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_sparc_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: cfgnode.o; +text: .text%__1cIProjNodeGOpcode6kM_i_; +text: .text%__1cETypeIhashcons6M_pk0_; +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: ad_sparc.o; +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_; +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__; +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_; +text: .text%__1cMPhaseChaitinKelide_copy6MpnENode_ipnFBlock_rnJNode_List_6i_i_; +text: .text%__1cQObjectStartArrayMobject_start6MpnIHeapWord__2_: cardTableExtension.o; +text: .text%__1cHCompileRvalid_bundle_info6MpknENode__i_; +text: .text%__1cENodeNrematerialize6kM_i_: classes.o; +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cHCompileNnode_bundling6MpknENode__pnGBundle__; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopopts.o; +text: .text%__1cOlower_pressure6FpnDLRG_IpnFBlock_pI4_v_: ifg.o; +text: .text%__1cGIfNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_; +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_; +text: .text%__1cIUniverseMnon_oop_word6F_pv_; +text: .text%__1cDLRGOcompute_degree6kMr0_i_; +text: .text%__1cFArenaIArealloc6MpvII_1_; +text: .text%__1cIConINodeGOpcode6kM_i_; +text: .text%__1cETypeEmeet6kMpk0_2_; +text: .text%__1cENode2t6MI_v_; +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_: ad_sparc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_; +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_: psTasks.o; +text: .text%__1cENodeHis_Copy6kM_I_: cfgnode.o; +text: .text%__1cKSchedulingLanti_do_def6MpnFBlock_pnENode_nHOptoRegEName_i_v_; +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: cfgnode.o; +text: .text%__1cKIfTrueNodeGOpcode6kM_i_; +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: ad_sparc.o; +text: .text%__1cIMachNodePin_oper_RegMask6kMIII_pknHRegMask__; +text: .text%__1cETypeJsingleton6kM_i_; +text: .text%__1cQIndexSetIteratorEnext6M_I_: coalesce.o; +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cJloadPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_; +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_; +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_; +text: .text%__1cIAddPNodeGOpcode6kM_i_; +text: .text%__1cIPhaseIFGJre_insert6MI_v_; +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__; +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_; +text: .text%__1cHTypeIntEhash6kM_i_; +text: .text%__1cETypeLisa_oop_ptr6kM_i_; +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_; +text: .text%__1cDfh16FI_i_; +text: .text%__1cNMachIdealNodeErule6kM_I_: ad_sparc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_sparc_misc.o; +text: .text%__1cIciObjectGequals6Mp0_i_; +text: .text%__1cIIndexSetKfree_block6MI_v_; +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cHTypeIntJsingleton6kM_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: classes.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: cfgnode.o; +text: .text%__1cLIfFalseNodeGOpcode6kM_i_; +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_; +text: .text%__1cENodeEhash6kM_I_; +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_; +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__; +text: .text%JVM_ArrayCopy; +text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cNSharedRuntimeDl2f6Fx_f_; +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: classes.o; +text: .text%__1cIParmNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cMPhaseChaitinKbias_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cHConNodeGOpcode6kM_i_; +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_; +text: .text%__1cMMachProjNodeGOpcode6kM_i_; +text: .text%__1cJiRegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: classes.o; +text: .text%__1cJiRegIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cENodeXpartial_latency_of_defs6MrnLBlock_Array_rnNGrowableArray4CI___v_; +text: .text%__1cXPipeline_Use_Cycle_Mask2L6Mi_r0_: ad_sparc_pipeline.o; +text: .text%__1cIBoolNodeGOpcode6kM_i_; +text: .text%__1cJMultiNodeIis_Multi6M_p0_; +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeEgrow6MI_v_; +text: .text%__1cIciObjectEhash6M_i_; +text: .text%__1cKRegionNodeGOpcode6kM_i_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o; +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_; +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o; +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__: ad_sparc.o; +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_; +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_; +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cFMutexGunlock6M_v_; +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_; +text: .text%__1cFStateRMachOperGenerator6MipnIMachNode_pnHCompile__pnIMachOper__; +text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o; +text: .text%__1cOoop_RelocationLunpack_data6M_v_; +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o; +text: .text%__1cPJavaFrameAnchorNmake_walkable6MpnKJavaThread__v_; +text: .text%__1cENodeNis_block_proj6kM_pk0_; +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__; +text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_; +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_; +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_; +text: .text%__1cKRegionNodeGpinned6kM_i_: classes.o; +text: .text%__1cLTypeInstPtrEhash6kM_i_; +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_; +text: .text%__1cJloadINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__: ad_sparc.o; +text: .text%__1cKbranchNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateDDFA6MipknENode__i_; +text: .text%__1cMMachProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cENodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cOMethodLivenessKBasicBlockXcompute_gen_kill_single6MpnQciByteCodeStream__v_; +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__: ad_sparc.o; +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o; +text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_I_: parallelScavengeHeap.o; +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_; +text: .text%__1cGciTypeEmake6FnJBasicType__p0_; +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeFclone6kM_p0_; +text: .text%__1cITypeNodeEhash6kM_I_; +text: .text%__1cIBoolNodeHis_Bool6M_p0_: subnode.o; +text: .text%__1cMPipeline_UseMfull_latency6kMIrk0_I_; +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_: ad_sparc.o; +text: .text%__1cENodeKmatch_edge6kMI_I_; +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cHPhiNodeGpinned6kM_i_: cfgnode.o; +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_; +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_; +text: .text%__1cICallNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeEjvms6kM_pnIJVMState__: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: loopnode.o; +text: .text%JVM_CurrentTimeMillis; +text: .text%__1cIMachNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_; +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_; +text: .text%__1cENodeFIdeal6MpnIPhaseGVN_i_p0_; +text: .text%__1cKTypeAryPtrEhash6kM_i_; +text: .text%__1cICallNodeHis_Call6M_p0_: callnode.o; +text: .text%__1cETypeFxmeet6kMpk0_2_; +text: .text%__1cILRG_ListGextend6MII_v_; +text: .text%__1cJVectorSet2F6kMI_i_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: ad_sparc.o; +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_; +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cIProjNodeEhash6kM_I_; +text: .text%__1cGIfNodeGpinned6kM_i_: classes.o; +text: .text%__1cIAddINodeGOpcode6kM_i_; +text: .text%__1cIIndexSet2t6Mp0_v_; +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_; +text: .text%__1cITypeNodeJideal_reg6kM_I_; +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrCeq6kMpknEType__i_; +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: classes.o; +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMachNode2t6M_v_; +text: .text%__1cIMachNodeQis_MachNullCheck6M_pnRMachNullCheckNode__: ad_sparc.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: callnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: callnode.o; +text: .text%__1cOis_diamond_phi6FpnENode__i_: cfgnode.o; +text: .text%__1cHMatcherKLabel_Root6MpknENode_pnFState_p16_6_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: memnode.o; +text: .text%__1cENodeHsize_of6kM_I_; +text: .text%__1cICmpPNodeGOpcode6kM_i_; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: classes.o; +text: .text%__1cKNode_ArrayGremove6MI_v_; +text: .text%__1cNSafePointNodeGpinned6kM_i_: callnode.o; +text: .text%__1cMOopTaskQdDueueOpop_local_slow6MInOTaskQdDueueSuperDAge__i_; +text: .text%__1cHPhiNodeEhash6kM_I_; +text: .text%__1cKTypeOopPtrJsingleton6kM_i_; +text: .text%__1cPindOffset13OperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cFMutexElock6M_v_; +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__; +text: .text%__1cJCProjNodeGis_CFG6kM_i_: cfgnode.o; +text: .text%__1cKmethodOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cGIfNodeFis_If6M_p0_: classes.o; +text: .text%__1cENodeNrematerialize6kM_i_: cfgnode.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o; +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__; +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc_misc.o; +text: .text%__1cOmatch_into_reg6FpnENode_iii1_i_: matcher.o; +text: .text%__1cENodeSremove_dead_region6MpnIPhaseGVN_i_i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: classes.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: memnode.o; +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__; +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__; +text: .text%__1cKTypeAryPtrCeq6kMpknEType__i_; +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cICmpINodeGOpcode6kM_i_; +text: .text%Unsafe_CompareAndSwapLong; +text: .text%__1cNCatchProjNodeGOpcode6kM_i_; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: cfgnode.o; +text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o; +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_; +text: .text%__1cENode2t6Mp0_v_; +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: classes.o; +text: .text%__1cMloadConPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitHstopped6M_i_; +text: .text%__1cETypeKhas_memory6kM_i_; +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: cfgnode.o; +text: .text%__1cMloadConPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStartNodeGpinned6kM_i_: callnode.o; +text: .text%__1cTCreateExceptionNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileMFillLocArray6MpnENode_pnNGrowableArray4CpnKScopeValue____i_; +text: .text%__1cIHaltNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrEmake6FnHTypePtrDPTR_pnHciKlass_ipnIciObject_i_pk0_; +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: cfgnode.o; +text: .text%__1cHRegMaskMSmearToPairs6M_v_; +text: .text%__1cYCallStaticJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: callnode.o; +text: .text%__1cKMachIfNodeJis_MachIf6kM_pk0_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: classes.o; +text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_; +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__; +text: .text%__1cSCallStaticJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cHMatcherKReduceOper6MpnFState_ipnIMachNode_rpnENode__v_; +text: .text%__1cMPipeline_UseJadd_usage6Mrk0_v_; +text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableExtension.o; +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_; +text: .text%__1cJiRegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cGcmpkey6Fpkv1_i_; +text: .text%__1cNsymbolOopDescGequals6kMpkci_i_; +text: .text%__1cMPhaseChaitinMchoose_color6MrnDLRG_i_nHOptoRegEName__; +text: .text%__1cMMergeMemNodeGOpcode6kM_i_; +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__; +text: .text%__1cJTypeTupleJsingleton6kM_i_; +text: .text%__1cIParmNodeGOpcode6kM_i_; +text: .text%__1cJiRegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_; +text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cHTypeIntEmake6Fiii_pk0_; +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_; +text: .text%__1cKSchedulingWAddNodeToAvailableList6MpnENode__v_; +text: .text%__1cKSchedulingSChooseNodeToBundle6M_pnENode__; +text: .text%__1cKSchedulingPAddNodeToBundle6MpnENode_pknFBlock__v_; +text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: memnode.o; +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: classes.o; +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cJLoadPNodeGOpcode6kM_i_; +text: .text%__1cMMutableSpaceIallocate6MI_pnIHeapWord__; +text: .text%__1cJPSPermGenSallocate_permanent6MI_pnIHeapWord__; +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6MI_pnIHeapWord__; +text: .text%__1cENodeGis_CFG6kM_i_: connode.o; +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMMutableSpaceMcas_allocate6MI_pnIHeapWord__; +text: .text%__1cNflagsRegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cJCatchNodeGOpcode6kM_i_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: cfgnode.o; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pnIciObject_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cHCmpNodeGis_Cmp6kM_pk0_: classes.o; +text: .text%__1cIJVMStateLdebug_start6kM_I_; +text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_; +text: .text%__1cENodeHdel_req6MI_v_; +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_; +text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_; +text: .text%__1cFBlockIis_Empty6kM_i_; +text: .text%__1cOThreadCritical2T6M_v_; +text: .text%__1cOThreadCritical2t6M_v_; +text: .text%method_compare: methodOop.o; +text: .text%__1cENodeGis_CFG6kM_i_: subnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: multnode.o; +text: .text%__1cICodeHeapKfind_start6kMpv_1_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: classes.o; +text: .text%__1cETypeEhash6kM_i_; +text: .text%__1cRNativeInstructionLset_long_at6Mii_v_; +text: .text%__1cJMultiNodeEhash6kM_I_: classes.o; +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__; +text: .text%__1cQciByteCodeStreamEjava6MnJBytecodesECode__2_; +text: .text%__1cJCProjNodeEhash6kM_I_: classes.o; +text: .text%__1cIHaltNodeGOpcode6kM_i_; +text: .text%__1cMMachCallNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cFBlockGselect6MrnJNode_List_rnLBlock_Array_pirnJVectorSet_IrnNGrowableArray4CI___pnENode__; +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__; +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__; +text: .text%__1cICmpUNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_; +text: .text%__1cXPipeline_Use_Cycle_MaskCOr6Mrk0_v_; +text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cILoadNodeEhash6kM_I_; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc_misc.o; +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cIMachNodeGExpand6MpnFState_rnJNode_List__p0_: ad_sparc_misc.o; +text: .text%__1cGBitMapFclear6M_v_; +text: .text%__1cHConNodeGis_Con6kM_I_: classes.o; +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_; +text: .text%__1cKHandleMark2T6M_v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o; +text: .text%__1cFBlockLis_uncommon6kMrnLBlock_Array__i_; +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_; +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: cfgnode.o; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: multnode.o; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%JVM_ReleaseUTF; +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJTypeTupleEhash6kM_i_; +text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__; +text: .text%__1cENodeHget_int6kM_i_; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodDataOop.o; +text: .text%__1cHMatcherTReduceInst_Interior6MpnFState_ipnIMachNode_IrpnENode__I_; +text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cMflagsRegOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: cfgnode.o; +text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__; +text: .text%__1cObranchConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cMOopMapStreamJfind_next6M_v_; +text: .text%__1cFDictI2i6M_v_; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: classes.o; +text: .text%__1cKNode_ArrayEgrow6MI_v_; +text: .text%__1cHTypeIntEmake6Fi_pk0_; +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__: memnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: multnode.o; +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_; +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_; +text: .text%__1cPciInstanceKlassMis_interface6M_i_: ciInstanceKlass.o; +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__; +text: .text%__1cPindOffset13OperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: coalesce.o; +text: .text%__1cUcompI_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o; +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_; +text: .text%__1cKCastPPNodeGOpcode6kM_i_; +text: .text%__1cOoop_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cOoop_RelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cPSignatureStreamEnext6M_v_; +text: .text%__1cLLShiftINodeGOpcode6kM_i_; +text: .text%__1cENodeOis_block_start6kM_i_; +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_; +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_; +text: .text%__1cKbranchNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_; +text: .text%__1cGBitMapJset_union6M0_v_; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cIAddPNodeHis_AddP6M_p0_: classes.o; +text: .text%__1cIConPNodeGOpcode6kM_i_; +text: .text%__1cJLoadINodeGOpcode6kM_i_; +text: .text%__1cUGenericGrowableArray2t6Mii_v_; +text: .text%JVM_GetMethodIxExceptionTableLength; +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__; +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__; +text: .text%__1cNSharedRuntimeDd2i6Fd_i_; +text: .text%__1cVcompP_iRegP_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeEhash6kM_I_: classes.o; +text: .text%__1cNbranchConNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSafePointNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: subnode.o; +text: .text%__1cENodeHis_Copy6kM_I_: memnode.o; +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_; +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_; +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGBitMap2t6MpII_v_; +text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_; +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_; +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__; +text: .text%__1cFParsePdo_one_bytecode6M_v_; +text: .text%__1cFParseNdo_exceptions6M_v_; +text: .text%__1cITypeLongCeq6kMpknEType__i_; +text: .text%__1cLPCTableNodeGpinned6kM_i_: classes.o; +text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_; +text: .text%__1cHPhiNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeGpinned6kM_i_: connode.o; +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeGis_Con6kM_I_: classes.o; +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__; +text: .text%__1cUParallelScavengeHeapPis_in_permanent6kMpkv_i_: parallelScavengeHeap.o; +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_: phaseX.o; +text: .text%__1cHCompileJcan_alias6MpknHTypePtr_i_i_; +text: .text%__1cKimmI13OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cCosVcurrent_stack_pointer6F_pC_; +text: .text%__1cENodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cEDict2F6kMpkv_pv_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cENodeIdestruct6M_v_; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_sparc_misc.o; +text: .text%__1cMCreateExNodeGOpcode6kM_i_; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: cfgnode.o; +text: .text%__1cIBoolNodeEhash6kM_I_; +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: classes.o; +text: .text%__1cKNode_ArrayFclear6M_v_; +text: .text%__1cObranchConPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIProjNodeHsize_of6kM_I_; +text: .text%__1cKis_x2logic6FpnIPhaseGVN_pnENode__3_: cfgnode.o; +text: .text%__1cHAbsNodeLis_absolute6FpnIPhaseGVN_pnENode__4_; +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_; +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGBitMapGat_put6MIi_v_; +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cLPhaseValuesGintcon6Mi_pnIConINode__; +text: .text%__1cJloadBNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: multnode.o; +text: .text%__1cGThreadLis_in_stack6kMpC_i_; +text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_; +text: .text%__1cFMutexElock6MpnGThread__v_; +text: .text%__1cKciTypeFlowLStateVectorSapply_one_bytecode6MpnQciByteCodeStream__i_; +text: .text%__1cHhashptr6Fpkv_i_; +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__; +text: .text%__1cENodeLis_MergeMem6M_pnMMergeMemNode__: classes.o; +text: .text%__1cGOopMapJset_value6MnHOptoRegEName_ii_v_; +text: .text%__1cHhashkey6Fpkv_i_; +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_; +text: .text%__1cIJVMStateJdebug_end6kM_I_; +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: classes.o; +text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cLBoxLockNodeNrematerialize6kM_i_: classes.o; +text: .text%__1cKBranchDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cJloadPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: callnode.o; +text: .text%__1cJTypeTupleCeq6kMpknEType__i_; +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_; +text: .text%__1cSaddP_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: ad_sparc.o; +text: .text%__1cNSafePointNodeHsize_of6kM_I_; +text: .text%__1cObranchConPNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_; +text: .text%__1cNbranchConNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENode2t6Mp011_v_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o; +text: .text%__1cHCompilePfind_alias_type6MpknHTypePtr_i_pn0AJAliasType__; +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_; +text: .text%__1cKbranchNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPSPromotionLABFflush6M_v_; +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o; +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o; +text: .text%__1cOcompU_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__; +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_; +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__; +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_; +text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_; +text: .text%__1cKbranchNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: ad_sparc_misc.o; +text: .text%__1cJloadPNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: cfgnode.o; +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__; +text: .text%__1cHMemNodeMIdeal_common6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCellTypeStateFmerge6kM0i_0_; +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o; +text: .text%__1cILoadNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeGis_Con6kM_I_: subnode.o; +text: .text%__1cFframeUis_interpreted_frame6kM_i_; +text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o; +text: .text%__1cJloadINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cETypeFempty6kM_i_; +text: .text%__1cNExceptionMark2T6M_v_; +text: .text%__1cNExceptionMark2t6MrpnGThread__v_; +text: .text%__1cMMachCallNodeLis_MachCall6M_p0_: ad_sparc_misc.o; +text: .text%__1cIMachNodeHis_Mach6M_p0_: machnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: connode.o; +text: .text%__1cITypeLongEhash6kM_i_; +text: .text%__1cNSafePointNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__; +text: .text%__1cJiRegLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: multnode.o; +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__; +text: .text%__1cMPhaseIterGVNbGregister_new_node_with_optimizer6MpnENode__2_; +text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__; +text: .text%__1cOloadConI13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKStoreINodeGOpcode6kM_i_; +text: .text%__1cJcmpOpOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_; +text: .text%__1cOno_flip_branch6FpnFBlock__i_: block.o; +text: .text%__1cMloadConINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJiRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKstorePNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_; +text: .text%__1cPSignatureStreamHis_done6kM_i_; +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_; +text: .text%__1cIAddPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQaddP_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescPis_empty_method6kM_i_; +text: .text%__1cSaddI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_; +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_; +text: .text%__1cMflagsRegOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLCounterDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cHRegMaskMClearToPairs6M_v_; +text: .text%__1cJiRegLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cRshlI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZPhaseConservativeCoalesceJcopy_copy6MpnENode_2pnFBlock_I_i_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: lcm.o; +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_; +text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o; +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_; +text: .text%__1cNflagsRegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKcmpOpPOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cMloadConINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_; +text: .text%__1cNmethodOopDescLis_accessor6kM_i_; +text: .text%__1cFArenaEgrow6MI_pv_; +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_; +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__; +text: .text%__1cKTypeRawPtrJsingleton6kM_i_; +text: .text%__1cGIfNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMemNodeGis_Mem6M_p0_: classes.o; +text: .text%__1cSCallStaticJavaNodeRis_CallStaticJava6kM_pk0_: callnode.o; +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__: subnode.o; +text: .text%__1cENodeHis_Goto6kM_I_: classes.o; +text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_; +text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__; +text: .text%__1cQaddP_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_; +text: .text%__1cKStorePNodeGOpcode6kM_i_; +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o; +text: .text%__1cNflagsRegUOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cQciByteCodeStreamMreset_to_bci6Mi_v_; +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRInvocationCounterEinit6M_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: block.o; +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__; +text: .text%__1cMMergeMemNodeLis_MergeMem6M_p0_: memnode.o; +text: .text%__1cFBlockOschedule_local6MrnHMatcher_rnLBlock_Array_pirnJVectorSet_rnNGrowableArray4CI___i_; +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_; +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_; +text: .text%__1cObranchConUNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQCompressedStream2t6MpCi_v_; +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_; +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_; +text: .text%__1cMObjectLocker2T6M_v_; +text: .text%__1cNSafePointNodebBneeds_polling_address_input6F_i_; +text: .text%__1cKRegionNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOcompI_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: callnode.o; +text: .text%__1cMURShiftINodeGOpcode6kM_i_; +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_; +text: .text%__1cENodeGis_CFG6kM_i_: memnode.o; +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_; +text: .text%__1cIRootNodeGOpcode6kM_i_; +text: .text%__1cOloadConI13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_; +text: .text%__1cRMachSafePointNodeQis_MachSafePoint6M_p0_: ad_sparc_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: parse2.o; +text: .text%__1cPcheckCastPPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: classes.o; +text: .text%__1cISubINodeGOpcode6kM_i_; +text: .text%__1cNbranchConNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframeZsender_with_pc_adjustment6kMpnLRegisterMap_pnICodeBlob_i_0_; +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_; +text: .text%__1cJTypeTupleGfields6FI_ppknEType__; +text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_; +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__; +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__; +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_; +text: .text%__1cILoadNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeGpinned6kM_i_: subnode.o; +text: .text%__1cKbranchNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__: cfgnode.o; +text: .text%__1cHAddNodeEhash6kM_I_; +text: .text%__1cMPhaseIterGVNFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cNbranchConNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSPSPromotionManagerMdrain_stacks6M_v_; +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_; +text: .text%__1cLis_cond_add6FpnIPhaseGVN_pnHPhiNode__pnENode__; +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: cfgnode.o; +text: .text%__1cNbranchConNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cSaddI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompU_iRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cHConNodeEhash6kM_I_; +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_; +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o; +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o; +text: .text%__1cILoadNodeHis_Load6M_p0_: classes.o; +text: .text%__1cYCallStaticJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: subnode.o; +text: .text%__1cOPhaseIdealLoopSget_ctrl_no_update6kMpnENode__2_: split_if.o; +text: .text%__1cITypeNodeHsize_of6kM_I_; +text: .text%__1cVcompP_iRegP_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: parse1.o; +text: .text%__1cENodeGpinned6kM_i_: memnode.o; +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__: callnode.o; +text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: methodDataOop.o; +text: .text%__1cLProfileDataPfollow_contents6M_v_: methodDataOop.o; +text: .text%__1cQaddP_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStoreNodeIis_Store6kM_pk0_: classes.o; +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstorePNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassModifiers; +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__; +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_; +text: .text%JVM_GetClassAccessFlags; +text: .text%__1cKbranchNodeHis_Goto6kM_I_: ad_sparc_misc.o; +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_; +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o; +text: .text%__1cHTypeAryEhash6kM_i_; +text: .text%__1cTremove_useless_bool6FpnGIfNode_pnIPhaseGVN__pnENode__: ifnode.o; +text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_; +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_; +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPPerfLongVariantGsample6M_v_; +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_; +text: .text%__1cJloadINodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cPSignatureStreamJis_object6kM_i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: callnode.o; +text: .text%__1cIBoolNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: multnode.o; +text: .text%__1cIMachOperNconstant_disp6kM_i_; +text: .text%__1cIMachOperFscale6kM_i_; +text: .text%__1cENode2t6Mp0111_v_; +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_; +text: .text%__1cNCompileBrokerLmaybe_block6F_v_; +text: .text%__1cFBlockOcode_alignment6M_I_; +text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o; +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__; +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: subnode.o; +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_; +text: .text%JVM_GetCPMethodSignatureUTF; +text: .text%__1cFChunkJnext_chop6M_v_; +text: .text%__1cMMergeMemNodeEhash6kM_I_; +text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_; +text: .text%__1cKSchedulingbFComputeRegisterAntidependencies6MpnFBlock__v_; +text: .text%__1cKSchedulingPComputeUseCount6MpknFBlock__v_; +text: .text%__1cITypeNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cHTypePtrHget_con6kM_i_; +text: .text%__1cUcompI_iReg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: ad_sparc.o; +text: .text%__1cIJumpDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc.o; +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc.o; +text: .text%__1cENodeJis_Branch6kM_I_: ad_sparc.o; +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_; +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__; +text: .text%__1cVcompP_iRegP_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cHSubNodeGis_Sub6M_p0_: classes.o; +text: .text%__1cNPhaseRegAllocGis_oop6kMpknENode__i_; +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_; +text: .text%__1cQaddI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConUNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: multnode.o; +text: .text%__1cUParallelScavengeHeapVunsafe_max_tlab_alloc6kM_I_; +text: .text%__1cFBlockJfind_node6kMpknENode__I_; +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o; +text: .text%__1cFMutexbClock_without_safepoint_check6M_v_; +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__; +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_; +text: .text%__1cHTypePtrEhash6kM_i_; +text: .text%__1cIMachNodeNis_MachEpilog6M_pnOMachEpilogNode__: ad_sparc.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o; +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_; +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6MI_pnIHeapWord__; +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_; +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_; +text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_; +text: .text%__1cFBlockLfind_remove6MpknENode__v_; +text: .text%__1cIIndexSetJlrg_union6MIIkIpknIPhaseIFG_rknHRegMask__I_; +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cUcompI_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_; +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_; +text: .text%__1cIimmPOperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cLMachNopNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cVCompressedWriteStream2t6Mi_v_; +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_; +text: .text%__1cRlock_ptr_RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPVirtualCallDataPadjust_pointers6M_v_; +text: .text%__1cPVirtualCallDataPfollow_contents6M_v_; +text: .text%__1cIJVMStateNclone_shallow6kM_p0_; +text: .text%__1cENodeKreplace_by6Mp0_v_; +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_; +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRMachSpillCopyNodeOimplementation6kMpnKCodeBuffer_pnNPhaseRegAlloc_i_I_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: classes.o; +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJStoreNodeEhash6kM_I_; +text: .text%__1cHMatcherQis_save_on_entry6Mi_i_; +text: .text%__1cSaddP_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQaddI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKTypeOopPtrWmake_from_klass_common6FpnHciKlass_ii_pk0_; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: machnode.o; +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__; +text: .text%__1cMMergeMemNodeQclone_all_memory6FpnENode__p0_; +text: .text%__1cOcompU_iRegNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_; +text: .text%__1cHTypeIntFempty6kM_i_; +text: .text%__1cKbranchNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLPhaseValuesFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cIMachOperIconstant6kM_i_; +text: .text%__1cNCatchProjNodeMis_CatchProj6kM_pk0_: cfgnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: subnode.o; +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_: spaceCounters.o; +text: .text%__1cQaddI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_; +text: .text%__1cOcompU_iRegNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_; +text: .text%__1cRPSOldPromotionLABFflush6M_v_; +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__; +text: .text%__1cPcompP_iRegPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferRtransform_address6kMrk0pC_3_; +text: .text%__1cLBoxLockNodeGOpcode6kM_i_; +text: .text%__1cIciObjectJset_ident6MI_v_; +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__; +text: .text%__1cKTypeRawPtrEhash6kM_i_; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: subnode.o; +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_: subnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: memnode.o; +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_; +text: .text%__1cOcompI_iRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cCosPelapsed_counter6F_x_; +text: .text%__1cNPhaseRegAllocKreg2offset6kMnHOptoRegEName__i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: multnode.o; +text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_; +text: .text%__1cICallNodeLis_CallLeaf6kM_pknMCallLeafNode__: callnode.o; +text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_; +text: .text%__1cMMergeMemNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cGBitMapOset_difference6M0_v_; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: callnode.o; +text: .text%__1cMPhaseChaitinJsplit_USE6MpnENode_pnFBlock_2IIiinNGrowableArray4CI__i_I_; +text: .text%__1cENodeGis_Con6kM_I_: cfgnode.o; +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cRshlI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%JVM_GetMethodIxLocalsCount; +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: multnode.o; +text: .text%JVM_CurrentThread; +text: .text%__1cENodeHget_ptr6kM_i_; +text: .text%__1cQciByteCodeStreamFEOBCs6M_nJBytecodesECode__; +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAndINodeGOpcode6kM_i_; +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cENodeHins_req6MIp0_v_; +text: .text%__1cNRelocIteratorEnext6M_i_: codeBuffer.o; +text: .text%__1cSaddI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_; +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: codeBuffer.o; +text: .text%__1cPBoundRelocationMupdate_addrs6MpCrknKCodeBuffer_4_1_; +text: .text%__1cKstoreINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_; +text: .text%__1cLOptoRuntimeFnew_C6FpnMklassOopDesc_pnKJavaThread__v_; +text: .text%__1cITypeNodeDcmp6kMrknENode__I_; +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: connode.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: gcm.o; +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNflagsRegUOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLstoreI0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciObject2t6MnGHandle__v_; +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_; +text: .text%__1cFframeOis_entry_frame6kM_i_; +text: .text%__1cIMachOperOindex_position6kM_i_; +text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_; +text: .text%__1cXmembar_release_lockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJVectorSet2L6MI_rnDSet__; +text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_; +text: .text%__1cOcompU_iRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__; +text: .text%__1cSaddP_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPindOffset13OperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperFscale6kM_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__; +text: .text%__1cUcompI_iReg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVcompP_iRegP_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddP_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddP_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_; +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: classes.o; +text: .text%__1cIJVMStateIof_depth6kMi_p0_; +text: .text%__1cNSharedRuntimeElrem6Fxx_x_; +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_; +text: .text%__1cRcmpFastUnlockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo0RegPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cSaddI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObranchConUNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJVectorSet2t6MpnFArena__v_; +text: .text%__1cKRegionNodeGis_CFG6kM_i_: loopnode.o; +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_; +text: .text%__1cVcompP_iRegP_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o; +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cKRelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: postaloc.o; +text: .text%__1cOMethodLivenessKBasicBlockWcompute_gen_kill_range6MpnQciByteCodeStream__v_; +text: .text%__1cJTraceTime2T6M_v_; +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cObranchConUNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJloadPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cITypeLongJsingleton6kM_i_; +text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_; +text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__; +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_; +text: .text%__1cNmethodOopDescLobject_size6Fi_i_; +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_; +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_; +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__; +text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_; +text: .text%__1cNmethodOopDescJinit_code6M_v_; +text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__; +text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__; +text: .text%__1cObranchConUNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_; +text: .text%__1cLstoreI0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__; +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o; +text: .text%__1cQaddI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENode2t6Mp01_v_; +text: .text%__1cNmethodOopDescPis_final_method6kM_i_; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_; +text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_; +text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_; +text: .text%__1cHTypeAryRary_must_be_exact6kM_i_; +text: .text%__1cRshrI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_; +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJCatchNodeIis_Catch6kM_pk0_: classes.o; +text: .text%__1cIGraphKitEstop6M_v_; +text: .text%__1cOcompI_iRegNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPhaseCCPFwiden6kMpknEType_3_3_: phaseX.o; +text: .text%__1cKCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPcompP_iRegPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRMachNullCheckNodeQis_MachNullCheck6M_p0_: machnode.o; +text: .text%__1cITypeFuncEhash6kM_i_; +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNMachIdealNodeMideal_Opcode6kM_i_: machnode.o; +text: .text%__1cMTypeKlassPtrEhash6kM_i_; +text: .text%__1cMCallLeafNodeGOpcode6kM_i_; +text: .text%__1cENodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o; +text: .text%__1cOcompI_iRegNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_; +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__; +text: .text%__1cOcompU_iRegNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: connode.o; +text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_; +text: .text%__1cKRelocationTpd_call_destination6M_pC_; +text: .text%__1cJiRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNflagsRegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cJStartNodeGis_CFG6kM_i_: callnode.o; +text: .text%__1cHOrINodeGOpcode6kM_i_; +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%JVM_GetCPMethodClassNameUTF; +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMflagsRegOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: callnode.o; +text: .text%__1cKSchedulingQNodeFitsInBundle6MpnENode__i_; +text: .text%__1cLProfileDataPfollow_contents6M_v_: ciMethodData.o; +text: .text%__1cLProfileDataPadjust_pointers6M_v_: ciMethodData.o; +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_; +text: .text%__1cOMachReturnNodeNis_MachReturn6M_p0_: ad_sparc_misc.o; +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_; +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_; +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_; +text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_; +text: .text%__1cIGraphKitLclean_stack6Mi_v_; +text: .text%__1cKStoreBNodeGOpcode6kM_i_; +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_; +text: .text%__1cLklassVtableToop_follow_contents6M_v_; +text: .text%__1cVcompP_iRegP_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_; +text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_; +text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_; +text: .text%__1cITypeFuncCeq6kMpknEType__i_; +text: .text%__1cUcompI_iReg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeFclone6kM_pnENode__; +text: .text%__1cMUniverseOperFclone6kM_pnIMachOper__; +text: .text%__1cJlabelOperFclone6kM_pnIMachOper__; +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__: ad_sparc.o; +text: .text%__1cICallNodeHis_Call6M_p0_: classes.o; +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__; +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_sparc.o; +text: .text%__1cRshlI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o; +text: .text%__1cOPhaseIdealLoopIsplit_up6MpnENode_22_i_; +text: .text%__1cLCounterDataOis_CounterData6M_i_: ciMethodData.o; +text: .text%__1cJStartNodeGpinned6kM_i_: classes.o; +text: .text%__1cHAddNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOis_range_check6FpnENode_r12ri_i_: ifnode.o; +text: .text%JVM_IsNaN; +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeGOpcode6kM_i_; +text: .text%__1cOMethodLivenessKBasicBlockPget_liveness_at6MpnIciMethod_i_nGBitMap__; +text: .text%__1cIciMethodPliveness_at_bci6Mi_nGBitMap__; +text: .text%__1cOMethodLivenessPget_liveness_at6Mi_nGBitMap__; +text: .text%__1cQregF_to_stkINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: memnode.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: subnode.o; +text: .text%__1cENodeDcmp6kMrk0_I_; +text: .text%__1cFParseKensure_phi6Mii_pnHPhiNode__; +text: .text%__1cOoop_RelocationJpack_data6M_i_; +text: .text%__1cHTypeIntFxdual6kM_pknEType__; +text: .text%__1cIciObjectIencoding6M_pnI_jobject__; +text: .text%__1cNSafePointNode2t6MIpnIJVMState__v_; +text: .text%__1cHTypePtrJsingleton6kM_i_; +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o; +text: .text%__1cIGraphKitObasic_plus_adr6MpnENode_2i_2_; +text: .text%__1cJAssemblerOpatched_branch6Fiii_i_; +text: .text%__1cJAssemblerSbranch_destination6Fii_i_; +text: .text%__1cRshlI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_; +text: .text%__1cENodeIadd_prec6Mp0_v_; +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__; +text: .text%__1cSaddP_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_; +text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o; +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_; +text: .text%__1cMloadConDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_; +text: .text%__1cKTypeOopPtrHget_con6kM_i_; +text: .text%__1cQsubI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLPhaseValuesHmakecon6MpknEType__pnHConNode__; +text: .text%__1cJloadLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_; +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_; +text: .text%__1cJLoadBNodeGOpcode6kM_i_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: subnode.o; +text: .text%__1cLOptoRuntimebCcomplete_monitor_unlocking_C6FpnHoopDesc_pnJBasicLock__v_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_locking_C6FpnHoopDesc_pnJBasicLock_pnKJavaThread__v_; +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_; +text: .text%__1cLRegisterMapLpd_location6kMnFVMRegEName__pC_; +text: .text%__1cSvframeStreamCommonEnext6M_v_; +text: .text%__1cIAddINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__: classes.o; +text: .text%__1cMMergeMemNode2t6MpnENode__v_; +text: .text%__1cOcompI_iRegNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cQsubI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPindOffset13OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cPindOffset13OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: machnode.o; +text: .text%__1cPindOffset13OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_; +text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cIAddINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitQkill_dead_locals6M_v_; +text: .text%__1cCosMvm_page_size6F_i_; +text: .text%__1cHRegMaskPfind_first_pair6kM_nHOptoRegEName__; +text: .text%__1cMloadConLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cWShouldNotReachHereNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cRlock_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVcompP_iRegP_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUcompI_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o; +text: .text%__1cNloadRangeNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_; +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cLklassItableToop_adjust_pointers6M_v_; +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_; +text: .text%__1cLklassItableToop_follow_contents6M_v_; +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cNSafePointNodeGOpcode6kM_i_; +text: .text%__1cWShouldNotReachHereNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: ad_sparc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: memnode.o; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: memnode.o; +text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_; +text: .text%__1cPcompP_iRegPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshlI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_; +text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_; +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__; +text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o; +text: .text%__1cIHaltNodeGpinned6kM_i_: classes.o; +text: .text%__1cMloadConPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: ad_sparc.o; +text: .text%__1cIGraphKit2t6MpnIJVMState__v_; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: cfgnode.o; +text: .text%__1cPsp_ptr_RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cPconvI2L_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKbranchNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cQPreserveJVMState2T6M_v_; +text: .text%__1cQPreserveJVMState2t6MpnIGraphKit_i_v_; +text: .text%__1cIGraphKitRnull_check_common6MpnENode_nJBasicType_i_2_; +text: .text%__1cPcompP_iRegPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMWarmCallInfoHis_cold6kM_i_; +text: .text%__1cLCastP2INodeGOpcode6kM_i_; +text: .text%__1cRshrI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMloadConLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Copy6kM_I_: machnode.o; +text: .text%__1cFMutexNowned_by_self6kM_i_; +text: .text%__1cLConvI2LNodeGOpcode6kM_i_; +text: .text%__1cITypeLongFxmeet6kMpknEType__3_; +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_; +text: .text%__1cSaddI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowNmake_range_at6Mi_pn0AFRange__; +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_; +text: .text%__1cOCallRelocationFvalue6M_pC_: codeBlob.o; +text: .text%__1cENodeHis_Type6M_pnITypeNode__: classes.o; +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_; +text: .text%__1cENodeQlatency_from_use6kMrnLBlock_Array_rnNGrowableArray4CI__pk0p0_i_; +text: .text%__1cLBoxLockNodeHsize_of6kM_I_; +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_; +text: .text%__1cJStoreNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHTypeAryCeq6kMpknEType__i_; +text: .text%__1cJStartNodeIis_Start6M_p0_: callnode.o; +text: .text%JVM_GetCPFieldClassNameUTF; +text: .text%__1cHCompileKTracePhase2t6MpkcpnMelapsedTimer_i_v_; +text: .text%__1cMPhaseIterGVNHmakecon6MpknEType__pnHConNode__; +text: .text%__1cSaddI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_; +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJLoadCNodeGOpcode6kM_i_; +text: .text%__1cMTypeKlassPtrCeq6kMpknEType__i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: memnode.o; +text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_; +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: node.o; +text: .text%__1cQciByteCodeStreamJget_field6Mri_pnHciField__; +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__; +text: .text%__1cOcompI_iRegNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMergeMemStreamOnext_non_empty6Mi_i_: graphKit.o; +text: .text%__1cRshlI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_; +text: .text%__1cTCreateExceptionNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cGBitMapVset_union_with_result6M0_i_; +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_; +text: .text%__1cLRShiftINodeGOpcode6kM_i_; +text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o; +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: memnode.o; +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o; +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: callnode.o; +text: .text%__1cFframeOis_first_frame6kM_i_; +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_; +text: .text%__1cICodeBlobTfix_oop_relocations6M_v_; +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_; +text: .text%__1cJloadSNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKStoreCNodeGOpcode6kM_i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: subnode.o; +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_; +text: .text%__1cMstringStreamFwrite6MpkcI_v_; +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__; +text: .text%__1cHRetNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_; +text: .text%__1cIBoolNodeJideal_reg6kM_I_: subnode.o; +text: .text%__1cHCmpNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cRcmpFastUnlockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cETypeFwiden6kMpk0_2_: type.o; +text: .text%__1cRcmpFastUnlockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cILoadNodeRraise_bottom_type6MpknEType__v_; +text: .text%__1cLstoreI0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cQciByteCodeStreamKget_method6Mri_pnIciMethod__; +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__; +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__; +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cNSafePointNodeGpinned6kM_i_: classes.o; +text: .text%__1cPcompP_iRegPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cObranchConPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__; +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__: cfgnode.o; +text: .text%__1cNCatchProjNodeHsize_of6kM_I_: cfgnode.o; +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_; +text: .text%__1cIciMethodbCinterpreter_invocation_count6M_i_; +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_; +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: subnode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: subnode.o; +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_; +text: .text%__1cKCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_; +text: .text%__1cKciTypeFlowFRangeNget_block_for6Mpn0AGJsrSet_n0AMCreateOption__pn0AFBlock__; +text: .text%__1cQsubI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cXmembar_acquire_lockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddP_reg_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_; +text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_; +text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o; +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: subnode.o; +text: .text%__1cIAndLNodeGOpcode6kM_i_; +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_; +text: .text%__1cENodeHis_Goto6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_; +text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o; +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_; +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_; +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__; +text: .text%__1cRshrI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOkill_dead_code6FpnENode_pnMPhaseIterGVN__i_: node.o; +text: .text%__1cMPrefetchNodeGOpcode6kM_i_; +text: .text%__1cCosGmalloc6FI_pv_; +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_; +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_; +text: .text%__1cIimmPOperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: cfgnode.o; +text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_; +text: .text%__1cIregDOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICodeHeapLheader_size6F_I_; +text: .text%__1cQsubI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: connode.o; +text: .text%__1cFciEnvIis_in_vm6F_i_; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_sparc_misc.o; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o; +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_; +text: .text%__1cKciTypeFlowLStateVectorEmeet6Mpk1_i_; +text: .text%__1cNbranchConNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseMdo_one_block6M_v_; +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_; +text: .text%__1cLstoreB0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: callnode.o; +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIJVMStateKclone_deep6kM_p0_; +text: .text%__1cIJVMStateLdebug_depth6kM_I_; +text: .text%__1cENodeNadd_req_batch6Mp0I_v_; +text: .text%__1cIGraphKitTadd_safepoint_edges6MpnNSafePointNode_i_v_; +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_; +text: .text%__1cJloadFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: node.o; +text: .text%__1cMMachCallNodeHis_Call6M_pnICallNode__: ad_sparc_misc.o; +text: .text%__1cPVirtualCallDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__; +text: .text%__1cNloadRangeNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindirectOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cHMatcherScalling_convention6FpnLRegPair_Ii_v_; +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_; +text: .text%__1cIAddLNodeGOpcode6kM_i_; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__; +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_; +text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_; +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_; +text: .text%__1cRMachSafePointNode2t6M_v_; +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__; +text: .text%__1cMFastLockNodeGOpcode6kM_i_; +text: .text%__1cRInlineCacheBufferIcontains6FpC_i_; +text: .text%__1cLConvL2INodeGOpcode6kM_i_; +text: .text%__1cIXorINodeGOpcode6kM_i_; +text: .text%__1cICallNodeOis_CallRuntime6kM_pknPCallRuntimeNode__: callnode.o; +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_; +text: .text%__1cJloadCNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cXinsert_anti_dependences6FrpnFBlock_pnENode_rnLBlock_Array__i_: gcm.o; +text: .text%__1cPorI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompU_iRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__; +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__; +text: .text%__1cOPhaseIdealLoopHdom_lca6kMpnENode_2_2_; +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__; +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHMatcherPc_frame_pointer6kM_nHOptoRegEName__; +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_; +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMMachCallNode2t6M_v_; +text: .text%__1cILoadNodeHsize_of6kM_I_; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: methodLiveness.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o; +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cICmpPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__; +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_; +text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o; +text: .text%__1cNprefetch2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_; +text: .text%__1cIciMethodRhas_compiled_code6M_i_; +text: .text%__1cPcompP_iRegPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPsp_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cLOptoRuntimePnew_typeArray_C6FnJBasicType_ipnKJavaThread__v_; +text: .text%__1cRshrP_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__; +text: .text%__1cITypeLongEmake6Fxxi_pk0_; +text: .text%__1cRloadConP_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_GetCPMethodNameUTF; +text: .text%__1cMtlsLoadPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreB0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIimmIOperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cLstoreI0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: cfgnode.o; +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; +text: .text%__1cHBitDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cQsubI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: ciTypeFlow.o; +text: .text%__1cKReturnNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_; +text: .text%__1cQaddP_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKmethodOperGmethod6kM_i_: ad_sparc.o; +text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__; +text: .text%__1cFKlassIsubklass6kM_p0_; +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o; +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o; +text: .text%__1cENodeHis_Goto6kM_I_: cfgnode.o; +text: .text%__1cICmpINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPemit_call_reloc6FrnKCodeBuffer_inJrelocInfoJrelocType_iii_v_; +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__; +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_; +text: .text%__1cIMulLNodeGOpcode6kM_i_; +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_; +text: .text%__1cENodeHis_Call6M_pnICallNode__: callnode.o; +text: .text%__1cILoopNodeHis_Loop6M_p0_: classes.o; +text: .text%__1cGOopMap2t6Mii_v_; +text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_; +text: .text%__1cNloadConP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJLoadSNodeGOpcode6kM_i_; +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__; +text: .text%__1cKBranchDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cRloadConP_pollNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_; +text: .text%__1cENodeJis_Region6kM_pknKRegionNode__: connode.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: connode.o; +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadLNodeGOpcode6kM_i_; +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_; +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_; +text: .text%__1cSaddI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKReturnNodeGOpcode6kM_i_; +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o; +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: callnode.o; +text: .text%__1cNflagsRegUOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIGraphKitbLset_predefined_input_for_runtime_call6MpnNSafePointNode__v_; +text: .text%__1cRshlI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cNCatchProjNodeEhash6kM_I_; +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_; +text: .text%__1cHCompileZintrinsic_insertion_index6MpnIciMethod_i_i_; +text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_; +text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_; +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_; +text: .text%__1cRMachSafePointNodePis_MachCallLeaf6M_pnQMachCallLeafNode__: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeLset_oop_map6MpnGOopMap__v_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_; +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_; +text: .text%__1cRMachSafePointNodeSis_MachCallRuntime6M_pnTMachCallRuntimeNode__: ad_sparc_misc.o; +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_; +text: .text%__1cNMachIdealNodePoper_input_base6kM_I_: machnode.o; +text: .text%__1cSCallLeafDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHRegMaskQis_aligned_Pairs6kM_i_; +text: .text%__1cKCompiledICOic_destination6kM_pC_; +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_; +text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_; +text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_; +text: .text%__1cICallNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__; +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_; +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cKstorePNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_; +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_; +text: .text%__1cKciTypeFlowFBlockKsuccessors6MpnQciByteCodeStream_pn0ALStateVector_pn0AGJsrSet__pnNGrowableArray4Cp1___; +text: .text%__1cRMachSafePointNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cEUTF8Enext6FpkcpH_pc_; +text: .text%__1cJVectorSetFClear6M_v_; +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_; +text: .text%__1cMCallJavaNodeLis_CallJava6kM_pk0_: callnode.o; +text: .text%__1cQMachCallJavaNodePis_MachCallJava6M_p0_: ad_sparc_misc.o; +text: .text%__1cCosEfree6Fpv_v_; +text: .text%__1cICallNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cKTypeOopPtrFempty6kM_i_; +text: .text%__1cKciTypeFlowFBlock2t6Mp0pn0AFRange_pn0AGJsrSet__v_; +text: .text%__1cRshrI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_; +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_; +text: .text%__1cIPhaseIFGFUnion6MII_v_; +text: .text%__1cLstoreB0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWMachCallStaticJavaNodeVis_MachCallStaticJava6M_p0_: ad_sparc_misc.o; +text: .text%__1cILoopNodeGOpcode6kM_i_; +text: .text%__1cRMachSafePointNodeWis_MachCallInterpreter6M_pnXMachCallInterpreterNode__: ad_sparc_misc.o; +text: .text%__1cICmpLNodeGOpcode6kM_i_; +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_; +text: .text%__1cQaddI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindIndexOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cIConLNodeGOpcode6kM_i_; +text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cJloadCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_GetCPFieldSignatureUTF; +text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_; +text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cPCountedLoopNodeOis_CountedLoop6M_p0_: classes.o; +text: .text%__1cENodeLnonnull_req6kM_p0_; +text: .text%__1cGciTypeMis_classless6kM_i_: ciType.o; +text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: connode.o; +text: .text%__1cHnmethodZsize_of_exception_handler6F_i_; +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cMelapsedTimerFstart6M_v_; +text: .text%__1cMelapsedTimerEstop6M_v_; +text: .text%__1cQandL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSaddP_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cObranchConPNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_; +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_; +text: .text%__1cOMethodLivenessNmake_block_at6Mipn0AKBasicBlock__2_; +text: .text%__1cPorI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_DeleteLocalRef: jni.o; +text: .text%__1cIGraphKit2t6M_v_; +text: .text%__1cMoutputStreamDput6Mc_v_; +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_; +text: .text%__1cRInterpretedRFrameEinit6M_v_; +text: .text%__1cHMulNodeEhash6kM_I_; +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_; +text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_; +text: .text%__1cJLoadINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cINodeHashLhash_insert6MpnENode__v_; +text: .text%__1cHTypeIntEmake6Fii_pk0_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: ad_sparc.o; +text: .text%__1cKstoreCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o; +text: .text%__1cNSafePointNodeEhash6kM_I_: callnode.o; +text: .text%__1cENodeLbottom_type6kM_pknEType__; +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__; +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMCreateExNodeGpinned6kM_i_: classes.o; +text: .text%__1cIAddPNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__; +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cFParsePdo_field_access6Mii_v_; +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_; +text: .text%__1cLRegisterMap2t6Mpk0_v_; +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQmark_inner_loops6FpnIPhaseCFG_pnFBlock__v_: block.o; +text: .text%__1cILoadNodeEmake6FpnENode_22pknHTypePtr_pknEType_nJBasicType__p0_; +text: .text%__1cICallNodeSis_CallDynamicJava6kM_pknTCallDynamicJavaNode__: callnode.o; +text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__; +text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__; +text: .text%__1cNmethodOopDescWwas_executed_more_than6kMi_i_; +text: .text%__1cRshrI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompI_iRegNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciSymbolEmake6Fpkc_p0_; +text: .text%__1cPorI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLPhaseValuesHzerocon6MnJBasicType__pnHConNode__; +text: .text%__1cGPcDesc2t6Miii_v_; +text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_; +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__; +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_; +text: .text%__1cPconvI2L_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__; +text: .text%__1cWCallLeafNoFPDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKbranchNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cKbranchNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: loopnode.o; +text: .text%__1cOloadConI13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetObjectField: jni.o; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: methodDataOop.o; +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_; +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__; +text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_; +text: .text%__1cQandL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cQaddL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cJloadBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: machnode.o; +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cYinlineCallClearArrayNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJmake_load6MpnENode_2pknEType_nJBasicType_i_2_; +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_; +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_; +text: .text%__1cENodeGis_Con6kM_I_: callnode.o; +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cIGraphKitNuncommon_trap6MipnHciKlass_pkci_v_; +text: .text%__1cHCompileKTracePhase2T6M_v_; +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_; +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__; +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_; +text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_; +text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cIHaltNode2t6MpnENode_2_v_; +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__; +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowGJsrSetNapply_control6Mp0pnQciByteCodeStream_pn0ALStateVector__v_; +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cINodeHashJhash_find6MpknENode__p1_; +text: .text%__1cQmulL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_; +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_; +text: .text%__1cQciByteCodeStreamZget_declared_field_holder6M_pnPciInstanceKlass__; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: classes.o; +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_; +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_; +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_; +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConUNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_; +text: .text%__1cMWarmCallInfoGis_hot6kM_i_; +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__; +text: .text%__1cSCompareAndSwapNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cLRethrowNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cTmembar_CPUOrderNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPcmpFastLockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTCreateExceptionNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowLStateVectorJdo_invoke6MpnQciByteCodeStream_i_v_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: connode.o; +text: .text%__1cQmulL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcmpFastLockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreB0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadBNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cFTypeDCeq6kMpknEType__i_; +text: .text%__1cITypeLongEmake6Fx_pk0_; +text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_; +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: memnode.o; +text: .text%__1cMPhaseChaitinTsplit_Rematerialize6MpnENode_pnFBlock_IrInNGrowableArray4CI__ipIp2i_2_; +text: .text%__1cKimmI13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_; +text: .text%__1cQandL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseKdo_get_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_; +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNCatchProjNode2t6MpnENode_Ii_v_; +text: .text%__1cPVirtualCallDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: cfgnode.o; +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_; +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_; +text: .text%__1cLPCTableNodeHsize_of6kM_I_: classes.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: cfgnode.o; +text: .text%__1cLPCTableNodeKis_PCTable6kM_pk0_: classes.o; +text: .text%__1cNciCallProfileRapply_prof_factor6Mf_v_; +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__; +text: .text%__1cHCompileOcall_generator6MpnIciMethod_ipnIJVMState_if_pnNCallGenerator__; +text: .text%__1cHCompileOfind_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cIProjNodeDcmp6kMrknENode__I_; +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_; +text: .text%__1cLLShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFParseMprofile_call6MpnENode__v_; +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__; +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_; +text: .text%__1cQciByteCodeStreambAget_declared_method_holder6M_pnHciKlass__; +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_; +text: .text%__1cFParseHdo_call6M_v_; +text: .text%__1cNloadConP0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIregFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_; +text: .text%__1cHTypeIntFwiden6kMpknEType__3_; +text: .text%__1cQxorI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKarrayKlassLobject_size6kMi_i_; +text: .text%__1cIciMethodLscale_count6Mi_i_; +text: .text%__1cKMemBarNodeEhash6kM_I_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o; +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_; +text: .text%__1cMURShiftLNodeGOpcode6kM_i_; +text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_; +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__; +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_: ciMethodData.o; +text: .text%__1cLstoreI0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKBranchDataNis_BranchData6M_i_: ciMethodData.o; +text: .text%__1cKRegionNodeGpinned6kM_i_: loopnode.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: memnode.o; +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_; +text: .text%__1cNloadRangeNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQxorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIJumpDataLis_JumpData6M_i_: ciMethodData.o; +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNflagsRegLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cCosOis_interrupted6FpnGThread_i_i_; +text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o; +text: .text%__1cQsubI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_release_lockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: memnode.o; +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__; +text: .text%__1cRshrI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeOis_CountedLoop6M_pnPCountedLoopNode__: loopnode.o; +text: .text%__1cTcan_branch_register6FpnENode_1_i_; +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_; +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKstoreCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHConNodeEmake6FpknEType__p0_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: memnode.o; +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_; +text: .text%__1cRshrP_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__; +text: .text%__1cISubINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPciInstanceKlassFsuper6M_p0_; +text: .text%__1cIBoolNodeHsize_of6kM_I_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: callnode.o; +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o; +text: .text%__1cPcompP_iRegPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvI2D_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSsafePoint_pollNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__; +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__; +text: .text%__1cHMonitorKnotify_all6M_i_; +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o; +text: .text%__1cOstackSlotLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIciMethodbAinterpreter_throwout_count6kM_i_; +text: .text%__1cOCompilerOracleNshould_inline6FnMmethodHandle__i_; +text: .text%__1cIciMethodOshould_exclude6M_i_; +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_; +text: .text%__1cKInlineTreeWfind_subtree_from_root6Fp0pnIJVMState_pnIciMethod_i_1_; +text: .text%__1cIciMethodNshould_inline6M_i_; +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__; +text: .text%__1cGThreadXclear_pending_exception6M_v_; +text: .text%__1cICodeHeapSallocated_capacity6kM_I_; +text: .text%__1cSstkL_to_regD_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescbHhas_unloaded_classes_in_signature6FnMmethodHandle_pnGThread__i_; +text: .text%__1cTmembar_CPUOrderNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: connode.o; +text: .text%__1cNprefetch2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICHeapObj2n6FI_pv_; +text: .text%__1cQaddI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJStartNodeIis_Start6M_p0_: classes.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o; +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosFsleep6FpnGThread_xi_i_; +text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o; +text: .text%__1cSstkL_to_regD_2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeDEhash6kM_i_; +text: .text%__1cKTypeRawPtrHget_con6kM_i_; +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%jni_ExceptionOccurred: jni.o; +text: .text%__1cPconvI2L_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_; +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGIfNodeHsize_of6kM_I_: classes.o; +text: .text%__1cPconvL2I_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIimmLOperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_; +text: .text%jni_GetByteArrayRegion: jni.o; +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_; +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o; +text: .text%__1cHCompileFstart6kM_pnJStartNode__; +text: .text%__1cRis_error_reported6F_i_; +text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o; +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_; +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__; +text: .text%__1cIParmNodeJideal_reg6kM_I_; +text: .text%__1cQandL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeRget_base_and_disp6kMrirpknHTypePtr__pknENode__; +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cHCompilebAallow_range_check_smearing6kM_i_; +text: .text%__1cRbranchLoopEndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_kp0_v_; +text: .text%__1cIciMethodWwas_executed_more_than6Mi_i_; +text: .text%jni_GetArrayLength: jni.o; +text: .text%__1cIMachNodeHtwo_adr6kM_I_: machnode.o; +text: .text%__1cENodeHis_Proj6M_pnIProjNode__: machnode.o; +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_; +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOClearArrayNodeGOpcode6kM_i_; +text: .text%__1cWCallLeafNoFPDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_; +text: .text%__1cVCompressedWriteStreamEgrow6M_v_; +text: .text%__1cPcheckCastPPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_Write; +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_; +text: .text%__1cIciMethod2t6MnMmethodHandle__v_; +text: .text%__1cIHaltNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; +text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_; +text: .text%__1cLOpaque1NodeGOpcode6kM_i_; +text: .text%__1cQmulL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cEUTF8Fequal6FpWi1i_i_; +text: .text%__1cSbranchCon_longNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_; +text: .text%__1cUcompU_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%JVM_RawMonitorEnter; +text: .text%__1cFMutexMjvm_raw_lock6M_v_; +text: .text%JVM_RawMonitorExit; +text: .text%__1cFMutexOjvm_raw_unlock6M_v_; +text: .text%__1cKg1RegIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopnode.o; +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__; +text: .text%__1cPClassFileParserUskip_over_field_name6MpciI_1_; +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__; +text: .text%__1cIGraphKitNcast_not_null6MpnENode__2_; +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitTtoo_many_recompiles6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_; +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o; +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o; +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_; +text: .text%__1cIGraphKitOtoo_many_traps6MnODeoptimizationLDeoptReason__i_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o; +text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSandI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cFParseRensure_memory_phi6Mii_pnHPhiNode__; +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cFParseFmerge6Mi_v_; +text: .text%__1cFParseUprofile_taken_branch6Mi_v_; +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cYcompareAndSwapL_boolNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_; +text: .text%__1cILoopNodeHis_Loop6M_p0_: loopnode.o; +text: .text%__1cNCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cQxorI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJTypeTupleFxdual6kM_pknEType__; +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeGOpcode6kM_i_; +text: .text%__1cYinlineCallClearArrayNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYinlineCallClearArrayNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeHeapIcapacity6kM_I_; +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cPcmpFastLockNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cNloadKlassNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFArena2T6M_v_; +text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_; +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__; +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cHoopDescSslow_identity_hash6M_i_; +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_; +text: .text%__1cJloadCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIimmPOperPconstant_is_oop6kM_i_: ad_sparc_clone.o; +text: .text%__1cLPCTableNodeEhash6kM_I_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_; +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__: classes.o; +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_; +text: .text%__1cOloadConI13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMtlsLoadPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeGis_Con6kM_I_: multnode.o; +text: .text%__1cQandI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_; +text: .text%__1cITypeLongEmake6Fxx_pk0_; +text: .text%__1cMindIndexOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_; +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_; +text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_; +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__; +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__; +text: .text%__1cNGCTaskManagerYshould_release_resources6MI_i_; +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_; +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_; +text: .text%__1cITypeLongFempty6kM_i_; +text: .text%__1cJloadBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__; +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__; +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_; +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__; +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cSmembar_acquireNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetSuperclass: jni.o; +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_; +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_; +text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: ad_sparc.o; +text: .text%__1cIMulINodeGOpcode6kM_i_; +text: .text%__1cKInlineTreePshouldNotInline6kMpnIciMethod_pnMWarmCallInfo__pkc_; +text: .text%__1cRcompL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciMethodbHhas_unloaded_classes_in_signature6M_i_; +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGGCTask2t6M_v_; +text: .text%__1cJloadSNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: classes.o; +text: .text%__1cIJumpDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cObranchConUNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cITypeFuncFxdual6kM_pknEType__; +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o; +text: .text%__1cKstoreINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKMemBarNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_; +text: .text%__1cJcmpOpOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cKTypeRawPtrCeq6kMpknEType__i_; +text: .text%__1cGciType2t6MnLKlassHandle__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle__v_; +text: .text%__1cKTypeAryPtrEmake6FnHTypePtrDPTR_pknHTypeAry_pnHciKlass_ii_pk0_; +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFParseRoptimize_inlining6MpnIciMethod_ipnPciInstanceKlass_24irnKInlineTreeLInlineStyle_r2_v_; +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_; +text: .text%__1cQxorI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJloadLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIregFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cKcmpOpPOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_; +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_: connode.o; +text: .text%__1cObranchConPNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cObranchConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICHeapObj2k6Fpv_v_; +text: .text%__1cSaddL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_; +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRInvocationCounterJset_carry6M_v_; +text: .text%__1cFArena2t6M_v_; +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_; +text: .text%__1cRInterpreterOopMap2T6M_v_; +text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_; +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_; +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_; +text: .text%__1cRInterpreterOopMap2t6M_v_; +text: .text%__1cJloadCNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cISubINodeDsub6kMpknEType_3_3_; +text: .text%__1cFParseOreturn_current6MpnENode__v_; +text: .text%__1cRsarI_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIGraphKitPstore_to_memory6MpnENode_22nJBasicType_i_2_; +text: .text%__1cJStoreNodeEmake6FpnENode_22pknHTypePtr_2nJBasicType__p0_; +text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; +text: .text%__1cLBoxLockNodeKis_BoxLock6kM_pk0_: classes.o; +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cLBoxLockNodeKstack_slot6FpnENode__nHOptoRegEName__; +text: .text%__1cMloadConLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__; +text: .text%__1cHMatcherPstack_alignment6F_I_; +text: .text%jni_GetPrimitiveArrayCritical: jni.o; +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o; +text: .text%__1cPconvI2L_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeIget_long6kM_x_; +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_; +text: .text%__1cSmembar_releaseNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJimmU5OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_; +text: .text%__1cLOpaque1NodeEhash6kM_I_; +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSbranchCon_longNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_; +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRshrI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_; +text: .text%__1cKciTypeFlowGJsrSetSis_compatible_with6Mp1_i_; +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__; +text: .text%__1cZCallDynamicJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKStoreLNodeGOpcode6kM_i_; +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_; +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cETypeCeq6kMpk0_i_; +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_; +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_; +text: .text%__1cFParseRbranch_prediction6Mrf_f_; +text: .text%__1cRsarI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cHTypeAryFempty6kM_i_; +text: .text%__1cKTypeAryPtrFempty6kM_i_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodDataKlass.o; +text: .text%__1cOMacroAssemblerFjumpl6MrnHAddress_pnMRegisterImpl_ipkci_v_; +text: .text%__1cOMacroAssemblerEjump6MrnHAddress_ipkci_v_; +text: .text%__1cIciMethodLis_accessor6kM_i_; +text: .text%__1cRbranchLoopEndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o; +text: .text%__1cQmulL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFframeUentry_frame_is_first6kM_i_; +text: .text%__1cJiRegIOperFclone6kM_pnIMachOper__; +text: .text%__1cLstoreP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_; +text: .text%__1cFTypeFEhash6kM_i_; +text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_; +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_; +text: .text%__1cIciMethodPcan_be_compiled6M_i_; +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__; +text: .text%__1cFArenaEused6kM_I_; +text: .text%__1cFParseLbuild_exits6M_v_; +text: .text%__1cFParseIdo_exits6M_v_; +text: .text%__1cIciMethodVhas_balanced_monitors6M_i_; +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_; +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_; +text: .text%__1cQsubI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParsePdo_method_entry6M_v_; +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_; +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_; +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_; +text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_; +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_; +text: .text%jni_IsSameObject: jni.o; +text: .text%__1cMloadConINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNbranchConNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cNbranchConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQandL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQaddL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_; +text: .text%__1cJStartNodeOis_block_start6kM_i_: callnode.o; +text: .text%__1cRsarI_reg_imm5NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObjectFklass6M_pnHciKlass__; +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__; +text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_; +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_; +text: .text%__1cENodeRlatency_from_uses6kMrnLBlock_Array_rnNGrowableArray4CI___i_; +text: .text%__1cPconvL2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_; +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_; +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_; +text: .text%__1cIIndexSetEswap6Mp0_v_; +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_; +text: .text%__1cXmembar_release_lockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_; +text: .text%__1cJloadLNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cRshrP_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__; +text: .text%__1cFBlockTimplicit_null_check6MrnLBlock_Array_rnNGrowableArray4CI__pnENode_6_v_; +text: .text%__1cQandI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeNis_glue_frame6kM_i_; +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_; +text: .text%__1cFParseYprofile_not_taken_branch6M_v_; +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cHciKlassNis_subtype_of6Mp0_i_; +text: .text%__1cbACallCompiledJavaDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: cfgnode.o; +text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_; +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cKstoreBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQLibraryIntrinsicKis_virtual6kM_i_: library_call.o; +text: .text%__1cMPrefetchNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: callnode.o; +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_; +text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_; +text: .text%__1cENodeGOpcode6kM_i_; +text: .text%__1cRshrP_reg_imm5NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQandI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMURShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cGBitMapIset_from6M0_v_; +text: .text%__1cNmethodOopDescWload_signature_classes6FnMmethodHandle_pnGThread__i_; +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__; +text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__; +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_; +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cMloadConFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFChunk2n6FII_pv_; +text: .text%__1cbACallCompiledJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2L_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_; +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cILoadNodeDcmp6kMrknENode__I_; +text: .text%__1cIciObject2t6M_v_; +text: .text%__1cENodeHdel_out6Mp0_v_: library_call.o; +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: cfgnode.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: cfgnode.o; +text: .text%__1cRcompL_reg_conNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_; +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__; +text: .text%__1cMciMethodData2t6M_v_; +text: .text%__1cLOopRecorderKfind_index6MpnI_jobject__i_; +text: .text%__1cJStartNodeOis_block_start6kM_i_: classes.o; +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_; +text: .text%__1cFframeTis_first_java_frame6kM_i_; +text: .text%__1cGRFrameGcaller6M_p0_; +text: .text%__1cFframeNis_java_frame6kM_i_; +text: .text%__1cNprefetch2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowFBlockPclone_loop_head6Mp0ip1pn0AGJsrSet__3_; +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cRshrP_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cKRegionNodeJis_Region6kM_pk0_: loopnode.o; +text: .text%__1cFParseFdo_if6MpnENode_2nIBoolTestEmask_2_v_; +text: .text%__1cLCastP2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSandI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: callnode.o; +text: .text%__1cMPrefetchNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o; +text: .text%__1cMloadConFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMCreateExNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJCodeCacheIcontains6Fpv_i_; +text: .text%__1cQaddL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMCallLeafNodeLis_CallLeaf6kM_pk0_: classes.o; +text: .text%__1cLPCTableNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIMachNodeJis_MachIf6kM_pknKMachIfNode__: ad_sparc.o; +text: .text%__1cISubINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_; +text: .text%__1cQdivD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_; +text: .text%__1cIGraphKitRmake_slow_call_ex6MpnENode_pnPciInstanceKlass__v_; +text: .text%__1cKTypeOopPtrEhash6kM_i_; +text: .text%__1cIMinINodeGOpcode6kM_i_; +text: .text%__1cYinlineCallClearArrayNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNflagsRegLOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIConINodeHget_int6kMpi_i_: classes.o; +text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: psTasks.o; +text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_; +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_; +text: .text%__1cFBlockUhoist_LCA_above_defs6Mp01IrnLBlock_Array__1_; +text: .text%JVM_GetMethodIxModifiers; +text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_; +text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_; +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: codeBlob.o; +text: .text%__1cVExceptionHandlerTableMadd_subtable6MipnNGrowableArray4Ci__2_v_; +text: .text%__1cPconvI2L_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLLShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cHMulNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_IsInterface; +text: .text%__1cHciKlassOis_subclass_of6Mp0_i_; +text: .text%__1cJloadCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInterpretedRFrameOis_interpreted6kM_i_: rframe.o; +text: .text%__1cGRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cPorI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIDivINodeGOpcode6kM_i_; +text: .text%__1cbACallCompiledJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_; +text: .text%__1cICodeHeapIallocate6MI_pv_; +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__; +text: .text%__1cRcompL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_; +text: .text%__1cSconvI2D_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHMatcherQis_spillable_arg6Fi_i_; +text: .text%__1cLRegisterMapFclear6Mpi_v_; +text: .text%__1cLRShiftLNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__; +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_; +text: .text%__1cUPipeline_Use_Element2t6M_v_: output.o; +text: .text%__1cRshrL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: classes.o; +text: .text%__1cFParseSmerge_memory_edges6MpnMMergeMemNode_ii_v_; +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_; +text: .text%__1cNimmP_pollOperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cRloadConP_pollNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSconvI2D_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_; +text: .text%__1cIMulINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__; +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_; +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__; +text: .text%__1cLRegisterMapIpd_clear6M_v_; +text: .text%__1cSstkL_to_regD_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHUNICODEHas_utf86FpHi_pc_; +text: .text%__1cLstoreP0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cMnegF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo0RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cSaddL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_; +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_; +text: .text%__1cSstkL_to_regD_2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassWfind_field_from_offset6kMiipnPfieldDescriptor__i_; +text: .text%__1cPciInstanceKlassTget_field_by_offset6Mii_pnHciField__; +text: .text%__1cRshrP_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__; +text: .text%__1cSstkL_to_regD_2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cWstatic_stub_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cSstkL_to_regD_2NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: codeBlob.o; +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_; +text: .text%__1cKstoreLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_; +text: .text%__1cRbranchLoopEndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvI2D_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_; +text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_; +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cSconvI2D_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_; +text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; +text: .text%__1cUcompU_iReg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitXset_edges_for_java_call6MpnMCallJavaNode_i_v_; +text: .text%__1cOMacroAssemblerNverify_thread6M_v_; +text: .text%__1cJloadLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: methodDataOop.o; +text: .text%__1cSbranchCon_longNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_; +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_; +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNloadKlassNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_; +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_; +text: .text%__1cRbranchLoopEndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_; +text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__; +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_; +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; +text: .text%__1cYcompareAndSwapL_boolNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassMnext_sibling6kM_p0_; +text: .text%__1cSbranchCon_longNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__; +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_; +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%__1cJimmU5OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRbranchLoopEndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: ad_sparc.o; +text: .text%__1cLBlock_ArrayEgrow6MI_v_; +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: loopnode.o; +text: .text%__1cbCcatch_cleanup_fix_all_inputs6FpnENode_11_v_: lcm.o; +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_; +text: .text%__1cOMacroAssemblerUallocate_oop_address6MpnI_jobject_pnMRegisterImpl__nHAddress__; +text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_; +text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_; +text: .text%__1cUParallelScavengeHeapNtlab_capacity6kM_I_; +text: .text%__1cKcmpOpPOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cObranchConPNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cKstoreBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_; +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeEhash6kM_I_: loopnode.o; +text: .text%__1cPconvL2I_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNnmethodLocker2T6M_v_; +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_; +text: .text%__1cLstoreB0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescOis_initializer6kM_i_; +text: .text%__1cbDcatch_cleanup_find_cloned_def6FpnFBlock_pnENode_1rnLBlock_Array_i_3_: lcm.o; +text: .text%__1cQxorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferMstart_a_stub6M_v_; +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferKend_a_stub6M_v_; +text: .text%__1cFTypeFCeq6kMpknEType__i_; +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cQciByteCodeStreamMget_constant6M_nKciConstant__; +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__; +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_; +text: .text%__1cPconvL2I_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSbranchCon_longNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cOFastUnlockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o; +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_; +text: .text%__1cQandL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJiRegPOperFclone6kM_pnIMachOper__; +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__; +text: .text%__1cIregDOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cKMemBarNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cKMemBarNode2t6M_v_; +text: .text%__1cNIdealLoopTreeObeautify_loops6MpnOPhaseIdealLoop__i_; +text: .text%__1cRsarI_reg_imm5NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o; +text: .text%__1cKimmI13OperFclone6kM_pnIMachOper__; +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_; +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: callGenerator.o; +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o; +text: .text%__1cNCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cJcmpOpOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cNprefetch2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRcompL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_SetObjectArrayElement: jni.o; +text: .text%__1cSandI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cKMemBarNodeJis_MemBar6kM_pk0_: classes.o; +text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_; +text: .text%__1cJCHAResultOis_monomorphic6kM_i_; +text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__; +text: .text%__1cOloadConI13NodeFclone6kM_pnENode__; +text: .text%__1cObranchConUNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOinsert_mem_bar6MpnKMemBarNode__v_; +text: .text%__1cLOptoRuntimeOnew_objArray_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cRshlL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_; +text: .text%__1cZresource_reallocate_bytes6FpcII_0_; +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_; +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMaxINodeGOpcode6kM_i_; +text: .text%__1cIGraphKitMarray_length6MpnENode__2_; +text: .text%__1cIGraphKitbMset_predefined_output_for_runtime_call6MpnENode_pnMMergeMemNode__v_; +text: .text%__1cWThreadLocalAllocBufferFclear6M_v_; +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_; +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_; +text: .text%__1cMPhaseChaitinQgather_lrg_masks6Mi_v_; +text: .text%__1cIimmDOperJconstantD6kM_d_: ad_sparc_clone.o; +text: .text%__1cIPhaseIFGEinit6MI_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: live.o; +text: .text%__1cJPhaseLiveHcompute6MI_v_; +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cSaddI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRcompL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZnoG3_iRegI_64bit_safeOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cFTypeDEmake6Fd_pk0_; +text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o; +text: .text%__1cPThreadRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cRshlI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvL2I_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cQaddL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMatcherMreturn_value6Fii_nLRegPair__; +text: .text%__1cGThreadQunboost_priority6Fp0_v_; +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_; +text: .text%__1cIMachOperEtype6kM_pknEType__; +text: .text%JVM_GetCPClassNameUTF; +text: .text%__1cUcompU_iReg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cITypeNodeHis_Type6M_p0_: classes.o; +text: .text%__1cICodeBlob2t6Mpkcii_v_; +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_; +text: .text%__1cTmembar_CPUOrderNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcmpOpUOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cObranchConUNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cObranchConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_acquireNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__; +text: .text%__1cTCallInterpreterNodeGOpcode6kM_i_; +text: .text%jni_GetStringLength: jni.o; +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cRshrP_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__; +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cHMonitorEwait6Mil_i_; +text: .text%__1cJloadPNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cMoutputStream2t6Mi_v_; +text: .text%__1cMstringStreamJas_string6M_pc_; +text: .text%__1cMstringStream2T6M_v_; +text: .text%__1cMstringStream2t6MI_v_; +text: .text%__1cIGraphKitMreset_memory6M_pnENode__; +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstoreLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNprefetch2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeMsetup_is_top6M_v_; +text: .text%__1cIGotoNodeGOpcode6kM_i_; +text: .text%__1cPorI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_; +text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: callnode.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: callnode.o; +text: .text%__1cHRetNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cNbranchConNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cHRetNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjectFactory.o; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciObjectFactory.o; +text: .text%__1cKcmpOpPOperFequal6kM_i_: ad_sparc_clone.o; +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_; +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSandI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_; +text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__; +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_; +text: .text%__1cXmembar_acquire_lockNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_; +text: .text%__1cKStoreBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cSaddL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_; +text: .text%__1cEDict2T6M_v_; +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cLOopRecorder2t6MpnFArena__v_; +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__; +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__; +text: .text%__1cIModINodeGOpcode6kM_i_; +text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_; +text: .text%__1cJloadCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeHget_int6kMpi_i_; +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_; +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__; +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_; +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_; +text: .text%__1cKjavaVFrameNis_java_frame6kM_i_: vframe.o; +text: .text%__1cISubLNodeGOpcode6kM_i_; +text: .text%__1cKciTypeFlowXmark_known_range_starts6M_v_; +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cKciTypeFlowLfind_ranges6M_v_; +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__; +text: .text%__1cKciTypeFlowHdo_flow6M_v_; +text: .text%__1cKciTypeFlowKflow_types6M_v_; +text: .text%__1cKciTypeFlowKmap_blocks6M_v_; +text: .text%__1cMloadConPNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cKStoreCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciMethodJload_code6M_v_; +text: .text%__1cMciMethodDataJload_data6M_v_; +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__; +text: .text%__1cOcompU_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitGmemory6MI_pnENode__; +text: .text%__1cIHaltNodeEhash6kM_I_: classes.o; +text: .text%__1cFKlassQup_cast_abstract6M_p0_; +text: .text%__1cKReturnNodeEhash6kM_I_: classes.o; +text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_; +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_; +text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o; +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__; +text: .text%__1cIAndINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassRoop_is_methodData6kM_i_: methodDataKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: methodDataKlass.o; +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_; +text: .text%__1cIGraphKitOmake_slow_call6MpknITypeFunc_pCpkcpnENode_88_8_; +text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_; +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cLOpaque2NodeGOpcode6kM_i_; +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_; +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_; +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_; +text: .text%__1cKstoreCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o; +text: .text%__1cIMachNodeQis_MachSafePoint6M_pnRMachSafePointNode__: machnode.o; +text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o; +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_; +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_; +text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o; +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__; +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_; +text: .text%jni_GetStringUTFLength: jni.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_; +text: .text%jni_GetStringUTFRegion: jni.o; +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_; +text: .text%__1cHUNICODELutf8_length6FpHi_i_; +text: .text%__1cENodeHis_Mach6M_pnIMachNode__: loopnode.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: loopnode.o; +text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_; +text: .text%__1cENodeHis_Copy6kM_I_: ad_sparc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: memnode.o; +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cRsarI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUcompU_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUcompU_iReg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKPerfStringKset_string6Mpkc_v_; +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_; +text: .text%JVM_InternString; +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_; +text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_; +text: .text%__1cRcompL_reg_conNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosGrandom6F_l_; +text: .text%__1cXmembar_acquire_lockNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKimmP13OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cVcompP_iRegP_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cENodeGis_Con6kM_I_: ad_sparc.o; +text: .text%__1cSaddL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRcompL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: machnode.o; +text: .text%__1cNMachIdealNodeJnum_opnds6kM_I_: machnode.o; +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__: machnode.o; +text: .text%__1cRMachNullCheckNode2t6MpnENode_2I_v_; +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_: parse2.o; +text: .text%__1cICodeHeapKdeallocate6Mpv_v_; +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_; +text: .text%__1cNSafePointNodeMis_SafePoint6M_p0_: classes.o; +text: .text%__1cSCompareAndSwapNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseNpush_constant6MnKciConstant__i_; +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_; +text: .text%jni_SetIntField: jni.o; +text: .text%__1cENodeGis_Sub6M_pnHSubNode__: classes.o; +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cNIdealLoopTreeTcheck_inner_safepts6MpnOPhaseIdealLoop__v_; +text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o; +text: .text%__1cKBufferBlobEfree6Fp0_v_; +text: .text%__1cIRootNodeHis_Root6M_p0_: classes.o; +text: .text%__1cPconvL2I_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o; +text: .text%__1cLCastP2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__; +text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__; +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_; +text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_; +text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_; +text: .text%__1cVshrL_reg_imm6_L2INodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQmodI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_; +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: graphKit.o; +text: .text%__1cKTypeRawPtrFempty6kM_i_; +text: .text%__1cLConvI2LNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIMachNodeSalignment_required6kM_i_: machnode.o; +text: .text%__1cIMachNodePcompute_padding6kMi_i_: machnode.o; +text: .text%__1cWImplicitExceptionTableGappend6MII_v_; +text: .text%__1cIMachNodeLis_MachCall6M_pnMMachCallNode__: machnode.o; +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: ciMethodData.o; +text: .text%__1cQxorI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: assembler_sparc.o; +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cOLibraryCallKitOgenerate_guard6MpnENode_pnKRegionNode_f_v_; +text: .text%__1cSandI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreP0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPhaseIFGISquareUp6M_v_; +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_; +text: .text%__1cKCodeBuffer2T6M_v_; +text: .text%__1cLklassItableRinitialize_itable6M_v_; +text: .text%__1cQPSGenerationPoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cKNode_Array2t6MpnFArena__v_: reg_split.o; +text: .text%__1cENodeHis_Load6M_pnILoadNode__: multnode.o; +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cPconvI2L_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJLoadBNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcompL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_; +text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cQciByteCodeStreamJget_klass6Mri_pnHciKlass__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o; +text: .text%__1cFKlassWappend_to_sibling_list6M_v_; +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: classes.o; +text: .text%__1cQcmovI_reg_ltNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__; +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_; +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_; +text: .text%__1cUCallCompiledJavaNodeGOpcode6kM_i_; +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__; +text: .text%__1cNprefetch2NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_; +text: .text%__1cNinstanceKlassQinit_implementor6M_v_; +text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_; +text: .text%__1cPClassFileStream2t6MpCipc_v_; +text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_; +text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_; +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__; +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__; +text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_; +text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__; +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_; +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__; +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__; +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__; +text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_; +text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_; +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_; +text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_; +text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_; +text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_; +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_; +text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__; +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__; +text: .text%__1cKcmpOpPOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cRMachSafePointNodePis_MachCallJava6M_pnQMachCallJavaNode__: ad_sparc_misc.o; +text: .text%__1cMPhaseIterGVNIoptimize6M_v_; +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_; +text: .text%__1cISubINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o; +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o; +text: .text%__1cHMemNodeHsize_of6kM_I_; +text: .text%__1cFVTuneQstart_class_load6F_v_; +text: .text%__1cSThreadProfilerMark2T6M_v_; +text: .text%__1cFVTuneOend_class_load6F_v_; +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__; +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o; +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_; +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_; +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cQmodI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKCMoveINodeGOpcode6kM_i_; +text: .text%__1cLLShiftLNodeGOpcode6kM_i_; +text: .text%__1cYcompareAndSwapL_boolNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_; +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_; +text: .text%__1cKcmpOpUOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cObranchConUNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_; +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_; +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_; +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_; +text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_; +text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__; +text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%JVM_GetMethodIxSignatureUTF; +text: .text%JVM_GetMethodIxMaxStack; +text: .text%JVM_GetMethodIxArgsSize; +text: .text%JVM_GetMethodIxByteCodeLength; +text: .text%JVM_GetMethodIxExceptionIndexes; +text: .text%JVM_GetMethodIxByteCode; +text: .text%JVM_GetMethodIxExceptionsCount; +text: .text%__1cPCheckCastPPNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLstoreP0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_; +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cQmulD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__; +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_; +text: .text%__1cLProfileDataSis_VirtualCallData6M_i_: ciMethodData.o; +text: .text%__1cLBuildCutout2T6M_v_; +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cITypeFuncMreturns_long6kM_i_; +text: .text%__1cJloadSNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse2.o; +text: .text%__1cENodeGis_Con6kM_I_: connode.o; +text: .text%__1cNloadConP0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cJimmP0OperEtype6kM_pknEType__: ad_sparc_clone.o; +text: .text%__1cLstoreI0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_; +text: .text%__1cIAndINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o; +text: .text%__1cQandL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKRegionNodeOis_block_start6kM_i_: loopnode.o; +text: .text%__1cHNTarjanICOMPRESS6M_v_; +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_; +text: .text%__1cQsubL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: compile.o; +text: .text%__1cLClassLoaderLadd_package6Fpkci_i_; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopnode.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopnode.o; +text: .text%__1cOcompI_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopnode.o; +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_; +text: .text%__1cHTypePtrCeq6kMpknEType__i_; +text: .text%__1cQandI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__; +text: .text%__1cSCompareAndSwapNodeKmatch_edge6kMI_I_: classes.o; +text: .text%__1cRcompL_reg_conNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmembar_acquireNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPindOffset13OperFclone6kM_pnIMachOper__; +text: .text%__1cICallNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_; +text: .text%__1cJloadCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o; +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: loopnode.o; +text: .text%__1cLstoreI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_; +text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cSaddL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshrL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_; +text: .text%__1cILoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cHMatcherLfind_shared6MpnENode__v_; +text: .text%__1cJStartNodeHsize_of6kM_I_; +text: .text%__1cHMatcherFxform6MpnENode_i_2_; +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_; +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__: rframe.o; +text: .text%__1cQmodI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRinterpretedVFrameDbci6kM_i_; +text: .text%__1cRreturn_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cQmodI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseKdo_put_xxx6MpknHTypePtr_pnENode_pnHciField_i_v_; +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cMnegF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNIdealLoopTreeOpolicy_peeling6kMpnOPhaseIdealLoop__i_; +text: .text%__1cNIdealLoopTreeUiteration_split_impl6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cNIdealLoopTreebBpolicy_do_remove_empty_loop6MpnOPhaseIdealLoop__i_; +text: .text%__1cQsubL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAndINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitRmerge_fast_memory6MpnENode_2i_v_; +text: .text%__1cRcompL_reg_conNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cMnegF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o; +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_; +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o; +text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o; +text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_; +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_; +text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_; +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_; +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_; +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_; +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_; +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_; +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_; +text: .text%__1cLCastP2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cUcompU_iReg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJTimeStampGupdate6M_v_; +text: .text%__1cRmethodDataOopDescJis_mature6kM_i_; +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_; +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMMachCallNodeMreturns_long6kM_i_; +text: .text%__1cIGraphKitOhas_ex_handler6M_i_; +text: .text%__1cMloadConDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJStartNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__; +text: .text%__1cNloadConP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKReturnNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_: classes.o; +text: .text%__1cIProjNodeJideal_reg6kM_I_; +text: .text%__1cQaddI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQcmovI_reg_ltNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQcmovI_reg_gtNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRMachSafePointNodeLis_MachCall6M_pnMMachCallNode__: ad_sparc_misc.o; +text: .text%__1cSsafePoint_pollNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJcmpOpOperFequal6kM_i_: ad_sparc_clone.o; +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_; +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o; +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_; +text: .text%__1cTMachCallRuntimeNodeSis_MachCallRuntime6M_p0_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_; +text: .text%__1cFciEnvUis_unresolved_string6kMpnPciInstanceKlass_i_i_; +text: .text%__1cQciByteCodeStreamUis_unresolved_string6kM_i_; +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cNflagsRegLOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cZCallDynamicJavaDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerOcas_under_lock6MpnMRegisterImpl_22pCi_v_; +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_; +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_; +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_; +text: .text%__1cTmembar_CPUOrderNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%JVM_GetFieldIxModifiers; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cENodeFis_If6M_pnGIfNode__: connode.o; +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cRScavengeRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cQmulL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cILoopNode2t6MpnENode_2_v_; +text: .text%JVM_IsConstructorIx; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: classes.o; +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_; +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_; +text: .text%__1cSaddP_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHMatcherUc_calling_convention6FpnLRegPair_I_v_; +text: .text%__1cPCallRuntimeNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__; +text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o; +text: .text%__1cPCountedLoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: classes.o; +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_; +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_; +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__; +text: .text%__1cLRShiftINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKg1RegIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o; +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_; +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o; +text: .text%__1cIciObjectMhas_encoding6M_i_; +text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cIAndINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cIAndINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_; +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKo0RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVshrL_reg_imm6_L2INodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_; +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_; +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__; +text: .text%__1cPBytecode_invokeFindex6kM_i_; +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_; +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_: psScavenge.o; +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_; +text: .text%__1cVPreserveExceptionMark2T6M_v_; +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_; +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMatcherQpost_fast_unlock6FpknENode__i_; +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cMoutputStreamFprint6MpkcE_v_; +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_; +text: .text%__1cHAddNodeGis_Add6kM_pk0_: classes.o; +text: .text%__1cHCompileQsync_stack_slots6kM_i_; +text: .text%__1cJLoadCNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJLoadFNodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o; +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_; +text: .text%__1cKciTypeFlowLStateVectorMdo_putstatic6MpnQciByteCodeStream__v_; +text: .text%__1cHOrINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o; +text: .text%__1cKciTypeFlowLStateVectorGdo_ldc6MpnQciByteCodeStream__v_; +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_; +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRloadConP_pollNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_; +text: .text%__1cQxorI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_: ad_sparc.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o; +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMFastLockNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o; +text: .text%__1cPCountedLoopNodeDphi6kM_pnENode__: loopopts.o; +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_; +text: .text%__1cSmembar_releaseNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o; +text: .text%__1cRshrL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse3.o; +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_; +text: .text%__1cQLRUMaxHeapPolicyWshould_clear_reference6MpnHoopDesc__i_; +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_; +text: .text%__1cPcompP_iRegPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSxorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: output.o; +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_: psScavenge.o; +text: .text%jni_ExceptionCheck: jni.o; +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__; +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_; +text: .text%__1cQshlI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_; +text: .text%__1cWCallLeafNoFPDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvL2I_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLCounterDataOis_CounterData6M_i_: methodDataOop.o; +text: .text%__1cJloadPNodeFclone6kM_pnENode__; +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_; +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_; +text: .text%__1cJVectorSetGslamin6Mrk0_v_; +text: .text%JVM_Clone; +text: .text%__1cRAbstractAssemblerFflush6M_v_; +text: .text%__1cYinlineCallClearArrayNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_; +text: .text%__1cITypeLongFxdual6kM_pknEType__; +text: .text%__1cSmembar_releaseNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cRshrL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_; +text: .text%__1cOstackSlotLOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshrL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXmembar_release_lockNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOpaque2NodeEhash6kM_I_; +text: .text%__1cJloadFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompU_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYSurvivorMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cUEdenMutableSpacePoolImax_size6kM_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__; +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__; +text: .text%__1cYSurvivorMutableSpacePoolNused_in_bytes6M_I_: memoryPool.o; +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_; +text: .text%__1cLstoreP0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHOrINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_; +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_; +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o; +text: .text%__1cHMatcherNfind_receiver6Fi_nFVMRegEName__; +text: .text%__1cIMulINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cQandI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMachEpilogNodeNis_MachEpilog6M_p0_: ad_sparc.o; +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmPOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConPNodeFclone6kM_pnENode__; +text: .text%__1cNSCMemProjNodeGis_CFG6kM_i_: classes.o; +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_; +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__; +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__; +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRMachSafePointNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_; +text: .text%jni_NewObject: jni.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: assembler_sparc.o; +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_; +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cJNode_ListEyank6MpnENode__v_; +text: .text%__1cMPhaseChaitinISimplify6M_v_; +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_; +text: .text%__1cQcmovI_reg_gtNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cMStartOSRNodeGOpcode6kM_i_; +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cLcmpD_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJLoadSNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cJcmpOpOperEless6kM_i_: ad_sparc_clone.o; +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: ad_sparc_misc.o; +text: .text%__1cKType_ArrayEgrow6MI_v_; +text: .text%__1cSaddL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConP0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKCompiledICWis_in_transition_state6kM_i_; +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPconvF2D_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJScopeDescGis_top6kM_i_; +text: .text%__1cRshrL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowLStateVectorOmeet_exception6MpnPciInstanceKlass_pk1_i_; +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cNloadConL0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cVshrL_reg_imm6_L2INodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_; +text: .text%__1cLstoreB0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cRshrI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o; +text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o; +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_; +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNSafePointNodeLpop_monitor6M_v_; +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_; +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__; +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: connode.o; +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_; +text: .text%__1cTCallDynamicJavaNodeEhash6kM_I_: callnode.o; +text: .text%__1cQsubI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassDLCA6Mp0_1_; +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_; +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_; +text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o; +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_; +text: .text%__1cRcompL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeGis_Con6kM_I_: memnode.o; +text: .text%__1cRshlL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoop2t6MrnMPhaseIterGVN_pk0i_v_; +text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o; +text: .text%__1cRloadConP_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshlL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_; +text: .text%__1cSCallLeafDirectNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCallLeafDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJLoadBNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSstring_compareNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cFBytesNget_native_u46FpC_I_: bytecodes.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o; +text: .text%__1cRcompL_reg_conNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: cfgnode.o; +text: .text%__1cRcompL_reg_conNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: cfgnode.o; +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: connode.o; +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_; +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interp_masm_sparc.o; +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__; +text: .text%__1cGOopMapPset_derived_oop6MnHOptoRegEName_ii2_v_; +text: .text%__1cLConvL2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOPhaseIdealLoopKDominators6M_v_; +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_; +text: .text%__1cKCompiledICIis_clean6kM_i_; +text: .text%jni_NewGlobalRef: jni.o; +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_; +text: .text%__1cIAndINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; +text: .text%__1cYcompareAndSwapL_boolNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRbranchLoopEndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: loopnode.o; +text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_; +text: .text%__1cKtype2basic6FpknEType__nJBasicType__; +text: .text%__1cMPhaseChaitinFSplit6MI_I_; +text: .text%__1cMPhaseChaitinHcompact6M_v_; +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_; +text: .text%__1cNIdealLoopTreeMis_loop_exit6kMpnENode_pnOPhaseIdealLoop__2_; +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_; +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_; +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQshlI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerWcompiler_unlock_object6MpnMRegisterImpl_222rnFLabel__v_; +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interpreter_sparc.o; +text: .text%__1cKPSYoungGenNused_in_bytes6kM_I_; +text: .text%__1cOMachEpilogNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_; +text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_; +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cIRootNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHMatcherPprior_fast_lock6FpknENode__i_; +text: .text%__1cJLoadLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_; +text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_; +text: .text%__1cKJavaThreadNreguard_stack6MpC_i_; +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_; +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_; +text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_; +text: .text%JVM_FillInStackTrace; +text: .text%__1cKJavaThreadGactive6F_p0_; +text: .text%__1cKstoreFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o; +text: .text%__1cMtlsLoadPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: connode.o; +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_; +text: .text%__1cICodeHeapMmax_capacity6kM_I_; +text: .text%__1cSbranchCon_longNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_; +text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o; +text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o; +text: .text%__1cNflagsRegFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cFParseWensure_phis_everywhere6M_v_; +text: .text%__1cVshrL_reg_imm6_L2INodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadFNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cTDebugInfoReadStream2t6MpknHnmethod_i_v_; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: ad_sparc.o; +text: .text%__1cLRethrowNodeEhash6kM_I_: classes.o; +text: .text%__1cIDivLNodeGOpcode6kM_i_; +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_; +text: .text%__1cNmethodOopDescVclear_native_function6M_v_; +text: .text%__1cOloadConL13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; +text: .text%__1cNloadConL0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%jio_snprintf; +text: .text%__1cENodeGis_Phi6M_pnHPhiNode__: node.o; +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o; +text: .text%__1cSmulI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimebAresolve_opt_virtual_call_C6FpnKJavaThread__pC_; +text: .text%jni_NewLocalRef: jni.o; +text: .text%__1cRsubI_zero_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulDNodeGOpcode6kM_i_; +text: .text%__1cLStrCompNodeGOpcode6kM_i_; +text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_; +text: .text%__1cQcmovI_reg_gtNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cURethrowExceptionNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: callnode.o; +text: .text%__1cKstoreBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_; +text: .text%__1cKStoreFNodeGOpcode6kM_i_; +text: .text%__1cLConvD2INodeGOpcode6kM_i_; +text: .text%__1cURethrowExceptionNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLPhaseValues2T5B6M_v_; +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKReturnNode2t6MpnENode_2222_v_; +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKReturnNodeJideal_reg6kM_I_: classes.o; +text: .text%jni_DeleteGlobalRef: jni.o; +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_; +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_; +text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_; +text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_; +text: .text%__1cZnoG3_iRegI_64bit_safeOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_; +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_; +text: .text%__1cQcmovI_reg_gtNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_; +text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_; +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cJOopMapSet2t6M_v_; +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%JVM_GetCPMethodModifiers; +text: .text%jni_GetObjectArrayElement: jni.o; +text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: memnode.o; +text: .text%__1cFParseKarray_load6MnJBasicType__v_; +text: .text%jni_SetLongField: jni.o; +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_; +text: .text%__1cJOopMapSetHcopy_to6MpC_v_; +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_; +text: .text%__1cJOopMapSetJheap_size6kM_i_; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: classes.o; +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_; +text: .text%__1cIJVMState2t6Mi_v_; +text: .text%__1cIAndLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interp_masm_sparc.o; +text: .text%__1cIAndLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cIAndLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o; +text: .text%__1cJLoadSNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_; +text: .text%__1cMMachProjNodeHsize_of6kM_I_: classes.o; +text: .text%__1cTDebugInfoReadStreamLread_handle6M_nGHandle__; +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_; +text: .text%__1cMtlsLoadPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_; +text: .text%__1cLOopRecorderIoop_size6M_i_; +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_; +text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o; +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_; +text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o; +text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o; +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_; +text: .text%__1cHnmethodKtotal_size6kM_i_; +text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o; +text: .text%__1cbFunnecessary_membar_volatileNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMloadConLNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cJiRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cFParseNadd_safepoint6M_v_; +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: phaseX.o; +text: .text%__1cLPhaseValues2t6Mp0_v_; +text: .text%__1cQcmovI_reg_ltNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_: coalesce.o; +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_; +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_; +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_; +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_; +text: .text%__1cHCompileEInit6Mi_v_; +text: .text%__1cVExceptionHandlerTable2t6Mi_v_; +text: .text%__1cHMatcherLreturn_addr6kM_nHOptoRegEName__; +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_; +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_; +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_; +text: .text%__1cIPhaseCFGQFind_Inner_Loops6M_v_; +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_; +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: block.o; +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_; +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_; +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_; +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_; +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_; +text: .text%__1cHCompileICode_Gen6M_v_; +text: .text%__1cKNode_Array2t6MpnFArena__v_: matcher.o; +text: .text%__1cFArena2t6MI_v_; +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_; +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_; +text: .text%__1cFArenaNmove_contents6Mp0_1_; +text: .text%__1cKCodeBufferGresize6Miiii_v_; +text: .text%__1cFArenaRdestruct_contents6M_v_; +text: .text%__1cIPhaseIFG2t6MpnFArena__v_; +text: .text%__1cFDictIFreset6MpknEDict__v_; +text: .text%__1cHMatcherFmatch6M_v_; +text: .text%__1cHMatcher2t6MrnJNode_List__v_; +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_; +text: .text%__1cIPhaseCFGOschedule_early6MrnJVectorSet_rnJNode_List_rnLBlock_Array__i_; +text: .text%__1cETypeKInitialize6FpnHCompile__v_; +text: .text%__1cIPhaseCFGNschedule_late6MrnJVectorSet_rnJNode_List_rnNGrowableArray4CI___v_; +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_; +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_; +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_; +text: .text%__1cIPhaseCFGKDominators6M_v_; +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_; +text: .text%__1cMPhaseChaitinbGstretch_base_pointer_live_ranges6MpnMResourceArea__i_; +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_; +text: .text%__1cJPhaseLive2T6M_v_; +text: .text%__1cWemit_exception_handler6FrnKCodeBuffer__v_; +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_; +text: .text%__1cKCodeBufferOrelocate_stubs6M_v_; +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMPhaseChaitin2T6M_v_; +text: .text%__1cHMatcherWis_short_branch_offset6Mi_i_; +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_; +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_; +text: .text%__1cIPhaseCFGLRemoveEmpty6M_v_; +text: .text%__1cHCompileGOutput6M_v_; +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_; +text: .text%__1cHCompileMBuildOopMaps6M_v_; +text: .text%__1cHCompilePneed_stack_bang6kMi_i_; +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o; +text: .text%__1cGBundlePinitialize_nops6FppnIMachNode__v_; +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_; +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_; +text: .text%__1cLBlock_Array2t6MpnFArena__v_: buildOopMap.o; +text: .text%__1cHCompileLFill_buffer6M_v_; +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZCallInterpreterDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o; +text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cENodeHrm_prec6MI_v_; +text: .text%__1cLcmpD_ccNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o; +text: .text%__1cIMulLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIGraphKitOset_pair_local6MipnENode__v_: parse2.o; +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_; +text: .text%JVM_DoPrivileged; +text: .text%__1cOcompiledVFrameGis_top6kM_i_; +text: .text%__1cRsubI_zero_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_; +text: .text%__1cQaddL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; +text: .text%__1cHciKlassMis_interface6M_i_: ciObjArrayKlass.o; +text: .text%__1cIConDNodeGOpcode6kM_i_; +text: .text%__1cObranchConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cTresource_free_bytes6FpcI_v_; +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_; +text: .text%__1cIAddLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRcompL_reg_conNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_; +text: .text%__1cPconvL2I_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFciEnvKcompile_id6M_I_; +text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; +text: .text%__1cRmethodDataOopDescKinitialize6MpnNmethodOopDesc__v_; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_bytes6FpnNmethodOopDesc__i_; +text: .text%__1cRmethodDataOopDescbGcompute_allocation_size_in_words6FpnNmethodOopDesc__i_; +text: .text%__1cIDivINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cILoopNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__; +text: .text%__1cNprefetch2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cILoopNodeHsize_of6kM_I_: loopnode.o; +text: .text%__1cIAndLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKCodeBuffer2t6MpCi_v_; +text: .text%__1cVshrL_reg_imm6_L2INodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nativeInst_sparc.o; +text: .text%__1cIConPNodeEmake6FpC_p0_; +text: .text%__1cIGraphKitNstore_barrier6MpnENode_22_v_; +text: .text%__1cOcmovII_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: callnode.o; +text: .text%__1cIciMethodRinstructions_size6M_i_; +text: .text%__1cSmulI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: memnode.o; +text: .text%__1cCosXthread_local_storage_at6Fi_pv_; +text: .text%__1cMindIndexOperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cMindIndexOperOindex_position6kM_i_: ad_sparc.o; +text: .text%__1cMindIndexOperFscale6kM_i_: ad_sparc.o; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: assembler_sparc.o; +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_; +text: .text%__1cMindIndexOperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_; +text: .text%__1cGTarjanICOMPRESS6M_v_; +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKloadUBNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o; +text: .text%__1cICmpDNodeGOpcode6kM_i_; +text: .text%__1cNloadConL0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_; +text: .text%__1cVCallRuntimeDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOloadConL13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cYcompareAndSwapL_boolNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cZCallInterpreterDirectNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreB0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_; +text: .text%__1cHMonitorGnotify6M_i_; +text: .text%__1cNIdealLoopTreePiteration_split6MpnOPhaseIdealLoop_rnJNode_List__v_; +text: .text%__1cOloadConL13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cJloadPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o; +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_; +text: .text%__1cLRShiftINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLOptoRuntimeRmultianewarray1_C6FpnMklassOopDesc_ipnKJavaThread__v_; +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__: connode.o; +text: .text%__1cGThreadOis_interrupted6Fp0i_i_; +text: .text%__1cSconvI2D_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: subnode.o; +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_: psGenerationCounters.o; +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_; +text: .text%__1cQregP_to_stkPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_; +text: .text%__1cJTimeStampSticks_since_update6kM_x_; +text: .text%__1cJArrayDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cQmodI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o; +text: .text%__1cNmethodOopDescThas_native_function6kM_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: loopnode.o; +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_; +text: .text%__1cURethrowExceptionNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_; +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cQcmovI_reg_ltNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreB0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRshrL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIModINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKstoreFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cJcmpOpOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cQstkI_to_regFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJimmL0OperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cFParseJdo_ifnull6MnIBoolTestEmask__v_; +text: .text%__1cQmulD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmI0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_; +text: .text%__1cOloadConL13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorHis_busy6kM_i_; +text: .text%JVM_GetClassNameUTF; +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIXorINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_; +text: .text%__1cVshrL_reg_imm6_L2INodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: multnode.o; +text: .text%__1cKcmpOpFOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cNflagsRegFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cTmembar_volatileNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRshlL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMultiNodeUdepends_only_on_test6kM_i_: callnode.o; +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: memnode.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: memnode.o; +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_; +text: .text%__1cJCmpL3NodeGOpcode6kM_i_; +text: .text%__1cIciObjectOis_method_data6M_i_: ciInstance.o; +text: .text%__1cIciObjectJis_method6M_i_: ciInstance.o; +text: .text%JVM_FindLoadedClass; +text: .text%__1cLCastP2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIMulLNodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIMulLNodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cRbranchLoopEndNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_; +text: .text%__1cMMutableSpaceFclear6M_v_; +text: .text%__1cIConFNodeGOpcode6kM_i_; +text: .text%__1cOClearArrayNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cIGraphKitNallocate_heap6MpnENode_222pknITypeFunc_pC22ipknKTypeOopPtr__2_; +text: .text%__1cPciInstanceKlassbBcompute_shared_has_subklass6M_i_; +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cMPrefetchNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQmulD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNprefetch2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIModLNodeGOpcode6kM_i_; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_; +text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__; +text: .text%__1cSbranchCon_longNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_; +text: .text%__1cOLibraryCallKitNtry_to_inline6M_i_; +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__; +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cTInlineCallGeneratorJis_inline6kM_i_: library_call.o; +text: .text%__1cFTypeFEmake6Ff_pk0_; +text: .text%__1cIimmFOperJconstantF6kM_f_: ad_sparc_clone.o; +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_; +text: .text%__1cCosRcurrent_thread_id6F_i_; +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_; +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_; +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_; +text: .text%__1cbACallCompiledJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cPorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_; +text: .text%__1cENodeHis_Copy6kM_I_: node.o; +text: .text%__1cZCallInterpreterDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCountedLoopNodeNstride_is_con6kM_i_: loopTransform.o; +text: .text%__1cITypeLongFwiden6kMpknEType__3_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: loopnode.o; +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__; +text: .text%__1cSxorI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_; +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cLcmpD_ccNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMindIndexOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindIndexOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cMindIndexOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%JVM_FindClassFromClass; +text: .text%__1cRshrP_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cObranchConFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLRethrowNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMMutableSpaceKinitialize6MnJMemRegion_i_v_; +text: .text%__1cKstoreLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: loopnode.o; +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_; +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_; +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_; +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_; +text: .text%JVM_NewInstanceFromConstructor; +text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o; +text: .text%__1cObox_handleNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeEhash6kM_I_: classes.o; +text: .text%__1cFParseFBlockMadd_new_path6M_i_; +text: .text%__1cJBytecodesRspecial_length_at6FpC_i_; +text: .text%__1cIimmPOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cMloadConPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: loopnode.o; +text: .text%__1cQsubL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvF2DNodeGOpcode6kM_i_; +text: .text%__1cOstackSlotIOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cLConvI2DNodeGOpcode6kM_i_; +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__; +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKcmpOpPOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKimmL13OperJconstantL6kM_x_: ad_sparc_clone.o; +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o; +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_; +text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cMURShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cRsubI_zero_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_; +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_; +text: .text%__1cQshlI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHnmethodNscope_desc_at6MpCi_pnJScopeDesc__; +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_; +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o; +text: .text%__1cSmulI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: multnode.o; +text: .text%__1cIGraphKitOnull_check_oop6MpnKRegionNode_pnENode_i_4_; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o; +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_; +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__; +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_; +text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_; +text: .text%__1cJcmpOpOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cOcmovII_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__; +text: .text%__1cOMacroAssemblerEsetx6MxpnMRegisterImpl_2nJrelocInfoJrelocType__v_; +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMindirectOperNconstant_disp6kM_i_: ad_sparc.o; +text: .text%__1cMindirectOperNbase_position6kM_i_: ad_sparc.o; +text: .text%__1cIAddLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMindirectOperFscale6kM_i_: ad_sparc.o; +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cSsubL_reg_reg_2NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescTset_native_function6MpC_v_; +text: .text%jni_NewString: jni.o; +text: .text%__1cLConvL2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__; +text: .text%__1cQshlI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__; +text: .text%__1cHMatcherQinline_cache_reg6F_nHOptoRegEName__; +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__; +text: .text%__1cIGraphKitMnext_monitor6M_i_; +text: .text%__1cOloadConI13NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNode2t6Mi_v_; +text: .text%__1cPconvF2D_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__; +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__; +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNloadConP0NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLOptoRuntimeKjbyte_copy6FpW1I_v_; +text: .text%__1cRorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o; +text: .text%__1cKcmpOpUOperEless6kM_i_: ad_sparc_clone.o; +text: .text%__1cQaddF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_; +text: .text%lwp_mutex_init: os_solaris.o; +text: .text%__1cObox_handleNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFframeLnmethods_do6M_v_; +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__; +text: .text%__1cQnotemp_iRegIOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__; +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o; +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o; +text: .text%__1cCosPhint_no_preempt6F_v_; +text: .text%__1cYinternal_word_RelocationJpack_data6M_i_; +text: .text%__1cOcmovII_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_; +text: .text%__1cIMulLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_; +text: .text%__1cIMulINodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_; +text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_; +text: .text%__1cRsarL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: cfgnode.o; +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFciEnv2T6M_v_; +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_; +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__: memnode.o; +text: .text%__1cbLtransform_int_divide_to_long_multiply6FpnIPhaseGVN_pnENode_i_3_: divnode.o; +text: .text%__1cJcmpOpOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cQmulD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGvframeDtop6kM_p0_; +text: .text%__1cOCompiledRFrameEinit6M_v_; +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerUcompiler_lock_object6MpnMRegisterImpl_222rnFLabel__v_; +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; +text: .text%__1cIXorINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_; +text: .text%__1cQregF_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o; +text: .text%__1cOMacroAssemblerLsave_thread6MkpnMRegisterImpl__v_; +text: .text%__1cOcmovII_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o; +text: .text%__1cLcmpD_ccNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKNode_Array2t6MpnFArena__v_: loopopts.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o; +text: .text%__1cMloadConINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cScheck_phi_clipping6FpnHPhiNode_rpnHConNode_rI45rpnENode_5_i_: cfgnode.o; +text: .text%__1cOcmovII_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeMis_SpillCopy6M_pnRMachSpillCopyNode__: ad_sparc.o; +text: .text%__1cRshlL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseGdo_new6M_v_; +text: .text%__1cZCallDynamicJavaDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIimmIOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cQmodI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOPhaseIdealLoopKclone_loop6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%jni_GetObjectClass: jni.o; +text: .text%__1cSxorI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerFalign6Mi_v_; +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_; +text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o; +text: .text%__1cKManagementJtimestamp6F_x_; +text: .text%__1cIPSOldGenPupdate_counters6M_v_; +text: .text%__1cQshrI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNIdealLoopTreeNpolicy_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cFForteNregister_stub6FpkcpC3_v_; +text: .text%__1cNIdealLoopTreeSpolicy_range_check6kMpnOPhaseIdealLoop__i_; +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_; +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: loopTransform.o; +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cTloadL_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6M_v_; +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_2_v_; +text: .text%__1cSstring_compareNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o; +text: .text%__1cJloadFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQregF_to_stkINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cINodeHash2t6MpnFArena_I_v_; +text: .text%__1cPconvI2L_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_; +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_; +text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o; +text: .text%__1cJStubQdDueueGcommit6Mi_v_; +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__; +text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o; +text: .text%__1cOcmovII_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerKsave_frame6Mi_v_; +text: .text%__1cVshrL_reg_imm6_L2INodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreC0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_; +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_; +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_; +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_; +text: .text%__1cQregI_to_stkINodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%JVM_FindClassFromClassLoader; +text: .text%JVM_FindClassFromBootLoader; +text: .text%signalHandler; +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o; +text: .text%JVM_handle_solaris_signal; +text: .text%__1cRInlineCacheBufferIis_empty6F_i_; +text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_; +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__; +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o; +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_; +text: .text%__1cKConv2BNodeGOpcode6kM_i_; +text: .text%__1cSstring_compareNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFframeRis_compiled_frame6kMpi_i_; +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_; +text: .text%JVM_IHashCode; +text: .text%__1cSconvI2D_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJStartNodeJideal_reg6kM_I_: callnode.o; +text: .text%__1cJStartNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cOMacroAssemblerbBcheck_and_forward_exception6MpnMRegisterImpl__v_; +text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cQshlI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_ltNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_; +text: .text%__1cJloadCNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQandL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovPP_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o; +text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_; +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__: phaseX.o; +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_; +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_; +text: .text%JVM_GetClassLoader; +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_; +text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_; +text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_; +text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_; +text: .text%__1cIciMethodJhas_loops6kM_i_; +text: .text%__1cSconvD2I_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosRelapsed_frequency6F_x_; +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_; +text: .text%__1cIciMethodQbreak_at_execute6M_i_; +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_; +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_; +text: .text%__1cKScheduling2t6MpnFArena_rnHCompile__v_; +text: .text%__1cKSchedulingMDoScheduling6M_v_; +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_; +text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_; +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_; +text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_; +text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_; +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_; +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cFframeMpd_gc_epilog6M_v_; +text: .text%__1cMelapsedTimerHseconds6kM_d_; +text: .text%__1cJStealTaskEname6M_pc_: psTasks.o; +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o; +text: .text%__1cJStealTask2t6Mi_v_; +text: .text%__1cFframeLgc_epilogue6M_v_; +text: .text%__1cFframeLgc_prologue6M_v_; +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerMnote_release6MI_v_; +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_; +text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__; +text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_; +text: .text%__1cQshrI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_; +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_; +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodSresolve_JNIHandles6M_v_; +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_; +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_; +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_; +text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_; +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_; +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_; +text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_; +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_; +text: .text%__1cIimmLOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConLNodeFclone6kM_pnENode__; +text: .text%__1cHCompileVfinal_graph_reshaping6M_i_; +text: .text%__1cIciMethodRbuild_method_data6M_v_; +text: .text%__1cHCompileIOptimize6M_v_; +text: .text%__1cHCompileLFinish_Warm6M_v_; +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o; +text: .text%__1cHCompileLInline_Warm6M_i_; +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_; +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_; +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__; +text: .text%__1cIPhaseCCPHanalyze6M_v_; +text: .text%__1cIPhaseCCPMdo_transform6M_v_; +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_; +text: .text%__1cIPhaseCCP2T6M_v_; +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_; +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_; +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_; +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_; +text: .text%__1cMPhaseIterGVN2t6Mp0_v_; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod_ii_v_; +text: .text%__1cQmulI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_; +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_; +text: .text%__1cOcmovII_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXMachCallInterpreterNodePret_addr_offset6M_i_; +text: .text%__1cOMachEpilogNodeQsafepoint_offset6kM_i_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_; +text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_; +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSxorI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_; +text: .text%__1cRsarI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%lwp_cond_init: os_solaris.o; +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbFunnecessary_membar_volatileNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cENodeIis_Start6M_pnJStartNode__: callnode.o; +text: .text%__1cRshlL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_I_; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: interp_masm_sparc.o; +text: .text%__1cMloadConDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; +text: .text%__1cZInterpreterMacroAssemblerTdispatch_Lbyte_code6MnITosState_ppCii_v_; +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_; +text: .text%__1cIJVMStateOis_monitor_use6kMI_i_: reg_split.o; +text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; +text: .text%__1cOloadConI13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSconvI2F_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIAddFNodeGOpcode6kM_i_; +text: .text%__1cObranchConFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKciTypeFlowLStateVectorJdo_aaload6MpnQciByteCodeStream__v_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_; +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_; +text: .text%__1cKBinaryNodeGOpcode6kM_i_; +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_; +text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_; +text: .text%__1cQstkI_to_regFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o; +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_; +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_; +text: .text%__1cRNativeMovConstRegIset_data6Mi_v_; +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_; +text: .text%__1cJStartNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMTailCallNodeGOpcode6kM_i_; +text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_; +text: .text%__1cQregP_to_stkPNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_; +text: .text%__1cHTypePtrFempty6kM_i_; +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_; +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_; +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_; +text: .text%__1cIMulFNodeGOpcode6kM_i_; +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQmulD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPconvF2D_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConI13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o; +text: .text%__1cTmembar_CPUOrderNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTmembar_CPUOrderNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o; +text: .text%__1cSCompareAndSwapNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSThreadLocalStorageGthread6F_pnGThread__: assembler_sparc.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o; +text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_; +text: .text%jni_SetByteArrayRegion: jni.o; +text: .text%__1cFMutex2t6Mipkci_v_; +text: .text%__1cQregI_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__; +text: .text%__1cSdivL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_; +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_; +text: .text%__1cCosLelapsedTime6F_d_; +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_; +text: .text%__1cKPerfMemoryMmark_updated6F_v_; +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_; +text: .text%__1cKPerfMemoryFalloc6FI_pc_; +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_; +text: .text%__1cQmulL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__; +text: .text%jni_ReleaseStringUTFChars; +text: .text%jni_GetStringUTFChars: jni.o; +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cFParseLarray_store6MnJBasicType__v_; +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_; +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_; +text: .text%__1cSmulI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_IsInterrupted; +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o; +text: .text%JVM_FindLibraryEntry; +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_; +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: assembler_sparc.o; +text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__; +text: .text%__1cRshlL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHCompile2t6MpnFciEnv_pnIciMethod_i_v_; +text: .text%__1cQshlL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: classes.o; +text: .text%__1cHBitDataKis_BitData6M_i_: ciMethodData.o; +text: .text%__1cNLocationValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cPconvF2D_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMinINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerRload_ptr_contents6MrnHAddress_pnMRegisterImpl_i_v_: assembler_sparc.o; +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_; +text: .text%__1cKRegionNodeJideal_reg6kM_I_: loopnode.o; +text: .text%__1cENodeKis_PCTable6kM_pknLPCTableNode__: loopnode.o; +text: .text%__1cENodeHis_Root6M_pnIRootNode__: loopnode.o; +text: .text%__1cSconvI2F_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cObranchConFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMatcherOc_return_value6Fii_nLRegPair__; +text: .text%__1cENodeHis_Copy6kM_I_: loopnode.o; +text: .text%__1cKloadUBNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKg3RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cSsubL_reg_reg_2NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddP_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cTloadL_unalignedNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cICallNodeRis_CallStaticJava6kM_pknSCallStaticJavaNode__: callnode.o; +text: .text%__1cMregD_lowOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cTCallDynamicJavaNodeSis_CallDynamicJava6kM_pk0_: callnode.o; +text: .text%__1cTloadL_unalignedNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_; +text: .text%__1cObranchConFNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cObox_handleNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o; +text: .text%__1cQmodI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRbranchLoopEndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKcmpOpUOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cUParallelScavengeHeapEused6kM_I_; +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cVCallRuntimeDirectNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQxorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLOptoRuntimeOarraycopy_Type6F_pknITypeFunc__; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o; +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTbasictype2arraycopy6FnJBasicType_i_pC_; +text: .text%__1cOLibraryCallKitQinline_arraycopy6M_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o; +text: .text%__1cLcmpD_ccNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cWCallLeafNoFPDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLOptoRuntimeSnew_typeArray_Type6F_pknITypeFunc__; +text: .text%__1cJloadINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJnew_array6MpnENode_nJBasicType_pknEType_pknMTypeKlassPtr__2_; +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_; +text: .text%__1cIMinINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__; +text: .text%__1cTmembar_CPUOrderNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cRbranchLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRbranchLoopEndNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTmembar_volatileNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo1RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cCosTnative_java_library6F_pv_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_; +text: .text%__1cSxorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cYinlineCallClearArrayNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cYinlineCallClearArrayNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMPhaseChaitinGSelect6M_I_; +text: .text%__1cLOptoRuntimeInew_Type6F_pknITypeFunc__; +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_; +text: .text%__1cSbranchCon_longNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cSbranchCon_longNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSbranchCon_longNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_; +text: .text%__1cIModINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_; +text: .text%__1cIGraphKitMnew_instance6MpnPciInstanceKlass__pnENode__; +text: .text%__1cLstoreP0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cMloadConLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseTprofile_switch_case6Mi_v_; +text: .text%__1cOcmovIL_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cSandI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIimmLOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cFParseOmerge_new_path6Mi_v_; +text: .text%__1cYMachCallCompiledJavaNodePret_addr_offset6M_i_; +text: .text%__1cQregP_to_stkPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_; +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__; +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cHnmethodXinterpreter_entry_point6M_pC_; +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQPSIsAliveClosureLdo_object_b6MpnHoopDesc__i_: psScavenge.o; +text: .text%jni_NewByteArray: jni.o; +text: .text%__1cQdivL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_2NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_; +text: .text%__1cUdivL_reg_imm13_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeTnmethod_entry_point6FpnKJavaThread_pnNmethodOopDesc_pnHnmethod__pC_; +text: .text%__1cQaddF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_; +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_; +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_; +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__; +text: .text%__1cKRegionNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cSconvI2D_helperNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreP0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHRetNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_; +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__; +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_; +text: .text%__1cSTailCalljmpIndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQcmovI_reg_gtNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cObox_handleNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetMethodIxExceptionTableEntry; +text: .text%__1cSReferenceProcessorZadd_to_discovered_list_mt6MppnHoopDesc_23_v_; +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLstoreP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRethrowNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKloadUBNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_; +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_; +text: .text%__1cTLoadL_unalignedNodeGOpcode6kM_i_; +text: .text%__1cSmulI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerZget_2_byte_integer_at_bcp6MipnMRegisterImpl_2n0ALsignedOrNot_n0AKsetCCOrNot__v_; +text: .text%__1cQcmovI_reg_gtNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_; +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_; +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cQandI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cQmulI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParsePmerge_exception6Mi_v_; +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cYinlineCallClearArrayNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o; +text: .text%__1cNloadConP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_ReleaseStringCritical: jni.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o; +text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_; +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%jni_GetStringCritical: jni.o; +text: .text%__1cUmulL_reg_imm13_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHciKlassSsuper_check_offset6M_I_; +text: .text%__1cSTailCalljmpIndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o; +text: .text%__1cRorI_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregF_to_stkINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Mi_v_; +text: .text%__1cWCallLeafNoFPDirectNodeRis_safepoint_node6kM_i_: ad_sparc_misc.o; +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: parse1.o; +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o; +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o; +text: .text%__1cMloadConFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o; +text: .text%__1cJScopeDescGsender6kM_p0_; +text: .text%__1cNloadConP0NodeFclone6kM_pnENode__; +text: .text%__1cSxorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJimmP0OperFclone6kM_pnIMachOper__; +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__; +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_; +text: .text%__1cNloadConPCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_2NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_; +text: .text%__1cKciTypeFlowLStateVectorEtrap6MpnQciByteCodeStream_pnHciKlass_i_v_; +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_; +text: .text%__1cQregI_to_stkINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_; +text: .text%__1cIregDOperFclone6kM_pnIMachOper__; +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_; +text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_; +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_; +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_; +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_; +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_; +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_; +text: .text%__1cYinternal_word_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cIregFOperFclone6kM_pnIMachOper__; +text: .text%__1cJloadDNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQshlL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_; +text: .text%__1cOcmovPI_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cObox_handleNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreI0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQciByteCodeStreamPget_klass_index6M_i_; +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_; +text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_; +text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cRtestI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__; +text: .text%__1cFParseMdo_checkcast6M_v_; +text: .text%__1cOCompiledRFrameLis_compiled6kM_i_: rframe.o; +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__: rframe.o; +text: .text%__1cICodeBlobPis_runtime_stub6kM_i_: nmethod.o; +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__; +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJimmU6OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cHRegMask2t6M_v_: matcher.o; +text: .text%__1cOPhaseIdealLoopJdo_unroll6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_; +text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o; +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_; +text: .text%__1cIimmIOperFclone6kM_pnIMachOper__; +text: .text%__1cMloadConINodeFclone6kM_pnENode__; +text: .text%__1cSmulL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPstoreI_FregNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_; +text: .text%__1cENodeFis_If6M_pnGIfNode__: ad_sparc_misc.o; +text: .text%__1cNflagsRegFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; +text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_; +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_; +text: .text%__1cSconvD2I_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMonitor2t6Mipkci_v_; +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOtypeArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__; +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_; +text: .text%__1cXTraceMemoryManagerStats2T6M_v_; +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_; +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_; +text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_; +text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_; +text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_; +text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_; +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_; +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_; +text: .text%__1cUParallelScavengeHeapTensure_parseability6M_v_; +text: .text%__1cUParallelScavengeHeapOfill_all_tlabs6M_v_; +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_; +text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_; +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_; +text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_; +text: .text%__1cNCollectedHeapTensure_parseability6M_v_; +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_; +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_; +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_; +text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__; +text: .text%__1cKPSYoungGenPupdate_counters6M_v_; +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_; +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_; +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_; +text: .text%__1cPGCMemoryManagerGgc_end6M_v_; +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_; +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_; +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_; +text: .text%__1cTDerivedPointerTableFclear6F_v_; +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_; +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_; +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_; +text: .text%__1cMCounterDecayFdecay6F_v_; +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_; +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_; +text: .text%__1cLConvI2FNodeGOpcode6kM_i_; +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_; +text: .text%__1cQaddF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRshlL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUSafepointSynchronizeFbegin6F_v_; +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cONMethodSweeperFsweep6F_v_; +text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o; +text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_; +text: .text%__1cCosbAmake_polling_page_readable6F_v_; +text: .text%__1cUSafepointSynchronizeDend6F_v_; +text: .text%__1cOcmovII_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_; +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_; +text: .text%__1cJStoreNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cPconvF2D_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKimmU13OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cQshlL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUcompU_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetCallerClass; +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o; +text: .text%__1cOcmovPP_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cLstoreC0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadL_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICmpFNodeGOpcode6kM_i_; +text: .text%__1cSdivL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQregF_to_stkINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJLoadDNodeGOpcode6kM_i_; +text: .text%__1cQmulD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvI2F_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_IsAssignableFrom: jni.o; +text: .text%jni_GetFieldID: jni.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCii_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o; +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o; +text: .text%__1cLstoreB0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_; +text: .text%__1cHTypeAryFxdual6kM_pknEType__; +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o; +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_; +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__; +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__; +text: .text%__1cMVM_OperationIevaluate6M_v_; +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_; +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o; +text: .text%__1cQdivL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUParallelScavengeHeapMmem_allocate6MIii_pnIHeapWord__; +text: .text%__1cQregP_to_stkPNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQcmovI_reg_gtNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cWloadConI_x43300000NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_; +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_; +text: .text%__1cUPSAdaptiveSizePolicyWminor_collection_begin6M_v_; +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_; +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_; +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_; +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_; +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_; +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_; +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_; +text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_; +text: .text%__1cUPSAdaptiveSizePolicyUminor_collection_end6MnHGCCauseFCause__v_; +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_; +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_; +text: .text%__1cNJvmtiGCMarker2T6M_v_; +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_; +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_; +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_; +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_; +text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_; +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_; +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_: psTasks.o; +text: .text%__1cKPSYoungGenLswap_spaces6M_v_; +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MII_v_; +text: .text%__1cODeoptimizationYtrap_state_is_recompiled6Fi_i_; +text: .text%__1cKPSYoungGenGresize6MII_v_; +text: .text%__1cKPSYoungGenNresize_spaces6MII_v_; +text: .text%__1cUPSAdaptiveSizePolicyPupdate_averages6MiII_v_; +text: .text%__1cUPSAdaptiveSizePolicybPcompute_survivor_space_size_and_threshold6MiiI_i_; +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_; +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_; +text: .text%__1cKPSScavengeQinvoke_no_policy6Fpi_i_; +text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_; +text: .text%__1cKPSYoungGenRresize_generation6MII_i_; +text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_; +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_; +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__; +text: .text%__1cNJvmtiGCMarker2t6Mi_v_; +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_; +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_: gcTaskManager.o; +text: .text%__1cTmembar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__; +text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_; +text: .text%__1cVCallRuntimeDirectNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadL_unalignedNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__; +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSCompiledStaticCallIis_clean6kM_i_; +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; +text: .text%__1cUmulL_reg_imm13_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_; +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_; +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_; +text: .text%__1cLOptoRuntimeVresolve_static_call_C6FpnKJavaThread__pC_; +text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_; +text: .text%__1cHMatcherbAinterpreter_method_oop_reg6F_nHOptoRegEName__; +text: .text%__1cTCallInterpreterNodeSis_CallInterpreter6kM_pk0_: classes.o; +text: .text%__1cTCallInterpreterNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cHCompilebMGenerate_Compiled_To_Interpreter_Graph6MpknITypeFunc_pC_v_; +text: .text%__1cISubLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cZCallInterpreterDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIciMethodRinterpreter_entry6M_pC_; +text: .text%__1cQmulF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cXMachCallInterpreterNodeWis_MachCallInterpreter6M_p0_: ad_sparc_misc.o; +text: .text%__1cZCallInterpreterDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPconvF2D_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZCallInterpreterDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRcompL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWloadConI_x41f00000NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%jni_SetBooleanField: jni.o; +text: .text%__1cQciByteCodeStreamFtable6MnJBytecodesECode__2_; +text: .text%__1cKimmL13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRsarL_reg_imm6NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o; +text: .text%__1cLcmpF_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKloadUBNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o; +text: .text%__1cRorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cWResolveOopMapConflictsRpossible_gc_point6MpnOBytecodeStream__i_: rewriter.o; +text: .text%__1cRsarL_reg_imm6NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQmulI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMnegD_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_; +text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_; +text: .text%__1cUdivL_reg_imm13_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_; +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cOcmovIL_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_memNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o; +text: .text%__1cJloadSNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorHRecycle6M_v_; +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cQshlL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cMloadConLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOMacroAssemblerWstore_unaligned_double6MpnRFloatRegisterImpl_pnMRegisterImpl_i_v_; +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__; +text: .text%__1cQstkI_to_regFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQregP_to_stkPNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_; +text: .text%__1cSTailCalljmpIndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cLOptoRuntimeThandle_wrong_method6FpnKJavaThread__pC_; +text: .text%__1cOMacroAssemblerUstore_unaligned_long6MpnMRegisterImpl_2i_v_; +text: .text%__1cSmulL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cOLibraryCallKitRinline_unsafe_CAS6MnJBasicType__i_; +text: .text%__1cTCompareAndSwapLNode2t6MpnENode_2222_v_; +text: .text%__1cYcompareAndSwapL_boolNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSCompareAndSwapNode2t6MpnENode_2222_v_; +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cYcompareAndSwapL_boolNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cUPSAdaptiveSizePolicyOshould_full_GC6MI_i_; +text: .text%__1cIPSOldGenMmax_gen_size6M_I_: psOldGen.o; +text: .text%__1cUPSAdaptiveSizePolicyQdecaying_gc_cost6kM_d_; +text: .text%__1cUPSAdaptiveSizePolicybDcompute_generation_free_space6MIIIIIIIi_v_; +text: .text%__1cSmulL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKPSScavengeGinvoke6Fpi_v_; +text: .text%__1cUPSAdaptiveSizePolicyVadjust_for_throughput6MipI1_v_; +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_; +text: .text%__1cSsubL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUPSAdaptiveSizePolicyZdecay_supplemental_growth6Mi_v_; +text: .text%__1cSdivL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cUParallelScavengeHeapTfailed_mem_allocate6MpiIii_pnIHeapWord__; +text: .text%__1cbDVM_ParallelGCFailedAllocation2t6MIiiI_v_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_; +text: .text%__1cbDVM_ParallelGCFailedAllocationEname6kM_pkc_: vm_operations.o; +text: .text%__1cQaddL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o; +text: .text%__1cMregD_lowOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_; +text: .text%__1cOcmovII_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseScreate_jump_tables6MpnENode_pnLSwitchRange_4_i_; +text: .text%__1cENodeEgetd6kM_d_; +text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_; +text: .text%__1cOcmovIL_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_; +text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cIciObject2t6MpnHciKlass__v_; +text: .text%__1cRtestI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSxorI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__; +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_; +text: .text%__1cKstoreFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMatcherXinterpreter_arg_ptr_reg6F_nHOptoRegEName__; +text: .text%__1cPstoreI_FregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCMovePNodeGOpcode6kM_i_; +text: .text%__1cLstoreC0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKciTypeFlowOsplit_range_at6Mi_pn0AFRange__; +text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_; +text: .text%__1cSmulI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%JVM_MonitorWait; +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_; +text: .text%__1cNloadConPCNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_; +text: .text%__1cGciType2t6MpnHciKlass__v_; +text: .text%__1cQshlI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQdivD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_; +text: .text%__1cNloadConL0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHBoxNodeGOpcode6kM_i_; +text: .text%__1cRshrL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMflagsRegOperFclone6kM_pnIMachOper__; +text: .text%__1cSconvI2F_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregF_to_stkINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cbFunnecessary_membar_volatileNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_; +text: .text%__1cENode2t6Mp0111111_v_; +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cPstoreI_FregNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParseRarray_store_check6M_v_; +text: .text%__1cQsubF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIciSymbolHbyte_at6Mi_i_; +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_; +text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o; +text: .text%__1cQaddD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__; +text: .text%__1cQshlI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQset_lwp_priority6Fiii_i_; +text: .text%__1cSstkL_to_regD_0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_; +text: .text%__1cQregI_to_stkINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%jni_NewStringUTF: jni.o; +text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_; +text: .text%__1cQsubI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cZInterpreterMacroAssemblerXget_constant_pool_cache6MpnMRegisterImpl__v_; +text: .text%__1cSbranchCon_longNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_; +text: .text%__1cKcmpOpUOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_; +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__; +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__; +text: .text%__1cUParallelScavengeHeapIcapacity6kM_I_; +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__; +text: .text%__1cSsubL_reg_reg_2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQaddD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o; +text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o; +text: .text%__1cENodeGis_Mem6M_pnHMemNode__: cfgnode.o; +text: .text%JVM_DefineClassWithSource; +text: .text%__1cLstoreF0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cJloadINodeFclone6kM_pnENode__; +text: .text%JVM_SetClassSigners; +text: .text%__1cQdivL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cXconvI2D_regDHi_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICMset_to_clean6M_v_; +text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o; +text: .text%__1cSandL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRbranchLoopEndNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o; +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o; +text: .text%__1cFParseXfetch_interpreter_state6MipknEType_pnENode__5_; +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_; +text: .text%__1cOcmovPP_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_; +text: .text%get_thread; +text: .text%__1cKstoreCNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__; +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_; +text: .text%jni_CallIntMethod: jni.o; +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_; +text: .text%__1cKloadUBNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cUCallCompiledJavaNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cSconvD2I_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHCompilebMGenerate_Interpreter_To_Compiled_Graph6MpknITypeFunc__v_; +text: .text%__1cbACallCompiledJavaDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cbACallCompiledJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSconvD2I_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cbACallCompiledJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cbACallCompiledJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_; +text: .text%__1cSaddP_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIAddDNodeGOpcode6kM_i_; +text: .text%__1cOcmovPP_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2D_helperNodeFclone6kM_pnENode__; +text: .text%__1cOloadI_fregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cHCompileRmake_vm_intrinsic6MpnIciMethod_i_pnNCallGenerator__; +text: .text%__1cOloadI_fregNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCountedLoopEndNode2t6MpnENode_2ff_v_; +text: .text%__1cQmulD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosJyield_all6Fi_v_; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o; +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPstoreI_FregNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFTypeDJsingleton6kM_i_; +text: .text%__1cLstoreC0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassMethodsCount; +text: .text%__1cKstoreINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%JVM_GetClassFieldsCount; +text: .text%__1cLconvI2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_; +text: .text%__1cRorI_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassCPEntriesCount; +text: .text%JVM_GetClassCPTypes; +text: .text%__1cGEventsDlog6FpkcE_v_: thread.o; +text: .text%__1cHciKlassMis_interface6M_i_: ciTypeArrayKlass.o; +text: .text%__1cQmulI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_; +text: .text%__1cVinline_cache_regPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__; +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_; +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cPconvF2D_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregI_to_stkINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_; +text: .text%__1cTloadL_unalignedNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreterRT_sparc.o; +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_; +text: .text%JVM_IsPrimitiveClass; +text: .text%__1cJimmU6OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cIDivDNodeGOpcode6kM_i_; +text: .text%__1cObox_handleNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRorI_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Loop6M_pnILoopNode__: cfgnode.o; +text: .text%__1cKloadUBNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTmembar_volatileNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__; +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cXconvI2D_regDHi_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%jni_FindClass: jni.o; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cOMacroAssemblerOstore_argument6MpnMRegisterImpl_rnIArgument__v_: interpreterRT_sparc.o; +text: .text%__1cFParseHdo_irem6M_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_; +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__; +text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o; +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__; +text: .text%__1cENodeZcheck_for_anti_dependence6kM_i_: machnode.o; +text: .text%__1cNloadConP0NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarL_reg_imm6NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cTloadL_unalignedNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNSharedRuntimeDd2l6Fd_x_; +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__; +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_; +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_; +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o; +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o; +text: .text%__1cObox_handleNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cQregI_to_stkINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRorI_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__; +text: .text%__1cMregD_lowOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSconvI2F_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHMonitor2T6M_v_; +text: .text%__1cFTypeDFxmeet6kMpknEType__3_; +text: .text%__1cFMutex2T6M_v_; +text: .text%__1cRtestI_reg_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%lwp_cond_destroy: os_solaris.o; +text: .text%lwp_mutex_destroy: os_solaris.o; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: multnode.o; +text: .text%__1cQdivI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_; +text: .text%__1cLPhaseValuesHlongcon6Mx_pnIConLNode__; +text: .text%__1cQregP_to_stkPNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQstkI_to_regFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregI_to_stkINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQRelocationHolderEplus6kMi_0_; +text: .text%__1cUPSMarkSweepDecoratorHcompact6Mi_v_; +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_; +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_; +text: .text%__1cTloadL_unalignedNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_; +text: .text%__1cQregF_to_stkINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHCompileQgrow_alias_types6M_v_; +text: .text%__1cLLShiftLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cbFunnecessary_membar_volatileNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOcmovII_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNiRegIsafeOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_; +text: .text%__1cUmulL_reg_imm13_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_; +text: .text%__1cTloadD_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cZInterpreterMacroAssemblerHpop_ptr6MpnMRegisterImpl__v_; +text: .text%__1cQdivD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHis_Bool6M_pnIBoolNode__: connode.o; +text: .text%__1cKLoadPCNodeGOpcode6kM_i_; +text: .text%__1cENodeIis_CMove6M_pnJCMoveNode__: connode.o; +text: .text%__1cOloadConL13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRcompL_reg_conNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFKlassMoop_is_array6kM_i_: methodDataKlass.o; +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFKlassPoop_is_instance6kM_i_: methodDataKlass.o; +text: .text%__1cLcmpF_ccNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o; +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_; +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cQaddD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o; +text: .text%__1cObox_handleNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSconvD2I_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_; +text: .text%__1cJloadINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSdivL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregP_to_stkPNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRloadConP_pollNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIModINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Mi_v_; +text: .text%__1cFframeVshould_be_deoptimized6kM_i_; +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXvirtual_call_RelocationMupdate_addrs6MrknKCodeBuffer_3_v_; +text: .text%__1cUdivL_reg_imm13_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvD2I_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCMoveINodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPCallRuntimeNodeOis_CallRuntime6kM_pk0_: callnode.o; +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsubF_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o; +text: .text%__1cQMachCallJavaNodeVis_MachCallStaticJava6M_pnWMachCallStaticJavaNode__: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_; +text: .text%__1cUmulL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQdivL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cENodeMis_CatchProj6kM_pknNCatchProjNode__: classes.o; +text: .text%__1cOcmovPI_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUdivL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cMTypeKlassPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cUmulL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNCallGeneratorQfor_virtual_call6FpnIciMethod__p0_; +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cNObjectMonitor2t6M_v_; +text: .text%__1cTmembar_volatileNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulINodeKadd_opcode6kM_i_: classes.o; +text: .text%__1cIMulINodeKmul_opcode6kM_i_: classes.o; +text: .text%__1cQdivD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cJCmpD3NodeGOpcode6kM_i_; +text: .text%__1cJloadDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMinINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__; +text: .text%__1cUVirtualCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cQmulF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_MonitorNotify; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: templateTable_sparc.o; +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_; +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_; +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_sparc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: interpreterRT_sparc.o; +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cSstring_compareNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_; +text: .text%__1cVshrL_reg_imm6_L2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cOloadConL13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__; +text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_; +text: .text%__1cINegDNodeGOpcode6kM_i_; +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_; +text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_; +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_; +text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_; +text: .text%__1cOimmI_32_63OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_; +text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__; +text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cNIdealLoopTreeXpolicy_maximally_unroll6kMpnOPhaseIdealLoop__i_; +text: .text%__1cSsubL_reg_reg_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregP_to_stkPNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRelocIteratorEnext6M_i_: output.o; +text: .text%__1cOcmovII_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%jni_GetMethodID: jni.o; +text: .text%__1cTloadD_unalignedNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cQshlL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIMulINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o; +text: .text%__1cObranchConFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNminI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRshlI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNminI_eRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOloadConL13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_; +text: .text%__1cOMacroAssemblerDjmp6MpnMRegisterImpl_ipkci_v_; +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSsubD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassDeclaredConstructors; +text: .text%__1cUdivL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmulD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_; +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_; +text: .text%__1cOcmovIF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUmulL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsubD_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cUdivL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSandL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConPCNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o; +text: .text%__1cZregDHi_regDLo_to_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJEventMark2t6MpkcE_v_: psMarkSweep.o; +text: .text%__1cQregP_to_stkPNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCMoveNodeEmake6FpnENode_222pknEType__p0_; +text: .text%__1cJCMoveNode2t6MpnENode_22pknEType__v_: connode.o; +text: .text%__1cSconvI2F_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_; +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_; +text: .text%__1cOcmovIF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MipnMRegisterImpl__v_; +text: .text%__1cQcmovI_reg_ltNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNloadConL0NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cKo1RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cRtestI_reg_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_; +text: .text%__1cQshrL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRsarL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadBNodeFclone6kM_pnENode__; +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddD_regD_regDNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRT_sparc.o; +text: .text%__1cRorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_; +text: .text%__1cQshrL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQshrI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOimmI_32_63OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cSstkL_to_regD_0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUGenericGrowableArrayKraw_remove6MpknEGrET__v_; +text: .text%__1cOloadI_fregNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMnegD_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTAbstractInterpreterLdeopt_entry6FnITosState_i_pC_; +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cJloadSNodeFclone6kM_pnENode__; +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_; +text: .text%__1cMnegD_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__; +text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: output.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cISubFNodeGOpcode6kM_i_; +text: .text%JVM_IsThreadAlive; +text: .text%__1cQstkI_to_regINodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQshrL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_; +text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o; +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_; +text: .text%__1cSsubD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSmulD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLconvI2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovIF_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cRsarL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQaddI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRtestI_reg_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRtestI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregI_to_stkINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFTypeFJsingleton6kM_i_; +text: .text%__1cLconvI2BNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvD2I_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubF_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopVinsert_pre_post_loops6MpnNIdealLoopTree_rnJNode_List_i_v_; +text: .text%__1cQaddD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovPI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQregL_to_stkLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_; +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_; +text: .text%__1cLTypeInstPtrRcast_to_exactness6kMi_pknEType__; +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cWPredictedCallGeneratorJis_inline6kM_i_: callGenerator.o; +text: .text%__1cLcmpF_ccNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cWPredictedCallGeneratorKis_virtual6kM_i_: callGenerator.o; +text: .text%__1cNCallGeneratorSfor_predicted_call6FpnHciKlass_p03_3_; +text: .text%__1cSconvI2F_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cXconvI2D_regDHi_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cObox_handleNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_; +text: .text%__1cOcmovPP_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSaddD_regD_regDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o; +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQmulF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpF_ccNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cGThread2t6M_v_; +text: .text%__1cOcmovLI_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_; +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_; +text: .text%__1cFTypeFFxmeet6kMpknEType__3_; +text: .text%__1cCosScurrent_stack_size6F_I_; +text: .text%__1cOcmovLL_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cIOSThreadNpd_initialize6M_v_; +text: .text%__1cCosScurrent_stack_base6F_pC_; +text: .text%__1cIOSThread2t6MpFpv_i1_v_; +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cCosRinitialize_thread6F_v_; +text: .text%__1cSdivL_reg_reg_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o; +text: .text%__1cCosPpd_start_thread6FpnGThread__v_; +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o; +text: .text%__1cFTypeDGis_nan6kM_i_; +text: .text%jni_NewObjectArray: jni.o; +text: .text%__1cSsubL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%JVM_SetThreadPriority; +text: .text%__1cQaddF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cCosMstart_thread6FpnGThread__v_; +text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_; +text: .text%_start: os_solaris.o; +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_; +text: .text%JVM_GetStackAccessControlContext; +text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_; +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_; +text: .text%__1cQsubD_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerTload_unaligned_long6MpnMRegisterImpl_i2_v_; +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_; +text: .text%JVM_Read; +text: .text%__1cOcmovPI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_; +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_; +text: .text%__1cKCompiledICMstub_address6kM_pC_; +text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_; +text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__; +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__; +text: .text%__1cQsubL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmodI_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cISubDNodeGOpcode6kM_i_; +text: .text%__1cQmodI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__; +text: .text%__1cRsarI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: callGenerator.o; +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_; +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_; +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__; +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_; +text: .text%__1cLOptoRuntimeWresolve_virtual_call_C6FpnKJavaThread__pC_; +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_; +text: .text%__1cSmulD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_; +text: .text%__1cMloadConINodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetStaticFieldID: jni.o; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadKlassNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSstring_compareNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreF0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_; +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_; +text: .text%__1cSsubD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__; +text: .text%__1cSstkL_to_regD_0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_; +text: .text%__1cRsarL_reg_imm6NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIModLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cENodeEgetf6kM_f_; +text: .text%JVM_DesiredAssertionStatus; +text: .text%__1cKJavaThreadKinitialize6M_v_; +text: .text%__1cENodeIis_Multi6M_pnJMultiNode__: node.o; +text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_; +text: .text%__1cUregI_to_stkLHi_1NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cLConvL2DNodeGOpcode6kM_i_; +text: .text%__1cNjni_functions6F_pknTJNINativeInterface___; +text: .text%__1cCosMguard_memory6FpcI_i_; +text: .text%__1cENodeIis_Store6kM_pknJStoreNode__: node.o; +text: .text%__1cENodeGis_Cmp6kM_pknHCmpNode__: node.o; +text: .text%__1cQThreadStatistics2t6M_v_; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_; +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_; +text: .text%__1cQshrL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cGParker2t6M_v_; +text: .text%__1cIDivLNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSdivL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: interp_masm_sparc.o; +text: .text%__1cMFlatProfilerJis_active6F_i_; +text: .text%__1cOMacroAssemblerNload_contents6MrnHAddress_pnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cGThreadFstart6Fp0_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: interp_masm_sparc.o; +text: .text%__1cPconvI2D_memNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%jni_GetFloatArrayRegion: jni.o; +text: .text%__1cJMarkSweepMfollow_stack6F_v_; +text: .text%__1cNimmP_pollOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cRtestI_reg_immNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZregDHi_regDLo_to_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMemRegionMintersection6kMk0_0_; +text: .text%__1cMVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cUregI_to_stkLHi_0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_; +text: .text%__1cOcmovIF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__; +text: .text%__1cRloadConP_pollNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_; +text: .text%__1cKJavaThreadDrun6M_v_; +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__; +text: .text%JVM_IsArrayClass; +text: .text%jni_CallStaticVoidMethod: jni.o; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__; +text: .text%__1cJloadDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNObjectMonitorGenter26MpnGThread__v_; +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_; +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cKstoreBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFKlassNexternal_name6kM_pkc_; +text: .text%__1cOloadI_fregNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeHdel_out6Mp0_v_: generateOptoStub.o; +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_; +text: .text%__1cKstoreLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNloadConPCNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLstoreC0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cICodeHeapJexpand_by6MI_i_; +text: .text%__1cOGenerateOopMapKinterp_all6M_v_; +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_; +text: .text%__1cObranchConFNodeJis_Branch6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadD_unalignedNodeIpipeline6kM_pknIPipeline__; +text: .text%JVM_GetClassName; +text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_; +text: .text%__1cOloadI_fregNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o; +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_; +text: .text%__1cOGenerateOopMapKinit_state6M_v_; +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_; +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_; +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_; +text: .text%__1cSaddD_regD_regDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cOcmovIF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: loopnode.o; +text: .text%__1cQshrL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMatcherXpost_store_load_barrier6FpknENode__i_; +text: .text%__1cLConvD2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQstkI_to_regFNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_; +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_sparc.o; +text: .text%__1cINodeHashEgrow6M_v_; +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_; +text: .text%__1cOcmovPP_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cMloadConDNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLStrCompNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerVload_unaligned_double6MpnMRegisterImpl_ipnRFloatRegisterImpl__v_; +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cJloadSNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_; +text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o; +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQsubI_reg_regNodeFclone6kM_pnENode__; +text: .text%JVM_Open; +text: .text%__1cRInvocationCounterFreset6M_v_; +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_; +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_; +text: .text%__1cSsubL_reg_reg_2NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_; +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_; +text: .text%__1cICodeBlobJis_zombie6kM_i_: onStackReplacement.o; +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_; +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_; +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_; +text: .text%__1cSmulL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_; +text: .text%JVM_StartThread; +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o; +text: .text%jni_GetStaticObjectField: jni.o; +text: .text%__1cJArrayDataKcell_count6M_i_: ciMethodData.o; +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_; +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_; +text: .text%__1cHAddress2t6Mn0AJaddr_type_i_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: generateOptoStub.o; +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__; +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_; +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_; +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_; +text: .text%__1cIciObjectJis_method6M_i_: ciObjectFactory.o; +text: .text%__1cNloadConPCNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cTLoadD_unalignedNodeGOpcode6kM_i_; +text: .text%__1cSstkL_to_regD_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQshrI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%JVM_FreeMemory; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o; +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_; +text: .text%JVM_TotalMemory; +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_; +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o; +text: .text%__1cUmulL_reg_imm13_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCodeBufferWinsert_double_constant6Md_pC_; +text: .text%__1cTAbstractInterpreterWlayout_activation_impl6FpnNmethodOopDesc_iiiipnFframe_4i_i_; +text: .text%__1cQdivL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIciObjectOis_method_data6M_i_: ciObjectFactory.o; +text: .text%__1cOcmovIL_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_memNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: interpreter_sparc.o; +text: .text%__1cUdivL_reg_imm13_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_; +text: .text%__1cIOSThread2T6M_v_; +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_; +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_; +text: .text%__1cQandI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_; +text: .text%__1cGParker2T6M_v_; +text: .text%__1cSandL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_; +text: .text%__SLIP.DELETER__A: thread.o; +text: .text%__1cCosOunguard_memory6FpcI_i_; +text: .text%__1cKJavaThreadEexit6Mi_v_; +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cIOSThreadKpd_destroy6M_v_; +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_; +text: .text%__1cODeoptimizationYquery_update_method_data6FnQmethodDataHandle_in0ALDeoptReason_rIri4_pnLProfileData__; +text: .text%__1cKJavaThread2T6M_v_; +text: .text%__1cGThread2T5B6M_v_; +text: .text%__1cCosLfree_thread6FpnIOSThread__v_; +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_; +text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_; +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_; +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o; +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_; +text: .text%__1cUregI_to_stkLHi_0NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_; +text: .text%__1cUregI_to_stkLHi_1NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQsubF_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cICmpDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLconvI2BNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cRNativeMovConstRegEdata6kM_i_; +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cLcmpF_ccNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_; +text: .text%jni_CallObjectMethod: jni.o; +text: .text%__1cMTailCallNode2t6MpnENode_222222_v_; +text: .text%__1cQaddD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cGEventsDlog6FpkcE_v_: deoptimization.o; +text: .text%__1cPconvD2F_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNimmP_pollOperFclone6kM_pnIMachOper__; +text: .text%__1cQdivD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRtestI_reg_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbEset_method_data_pointer_offset6MpnMRegisterImpl__v_; +text: .text%__1cSconvF2I_helperNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMaxINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o; +text: .text%__1cJLoadPNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cRloadConP_pollNodeFclone6kM_pnENode__; +text: .text%__1cHTypeInt2t6Miii_v_; +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_; +text: .text%__1cNloadConPCNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConPCNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIL_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o; +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cXconvI2D_regDHi_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: library_call.o; +text: .text%__1cSandL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLI_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadRangeNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__; +text: .text%__1cRshlI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_; +text: .text%__1cQregL_to_stkLNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRT_sparc.o; +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o; +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__; +text: .text%__1cHTypePtrFxdual6kM_pknEType__; +text: .text%__1cURethrowExceptionNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcastP2INodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOcmovLL_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%JVM_MonitorNotifyAll; +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cIGraphKitXinsert_mem_bar_volatile6MpnKMemBarNode_i_v_; +text: .text%__1cKCMoveLNodeGOpcode6kM_i_; +text: .text%__1cRshlL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOLibraryCallKitYinline_native_time_funcs6Mi_i_; +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_; +text: .text%__1cVMoveL2D_stack_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHRetDataKcell_count6M_i_: methodDataOop.o; +text: .text%__1cTloadD_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cNiRegIsafeOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cNloadConP0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: ciTypeFlow.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: stubGenerator_sparc.o; +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o; +text: .text%__1cZInterpreterMacroAssemblerFpop_f6MpnRFloatRegisterImpl__v_; +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cMnegD_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o; +text: .text%__1cKimmL13OperFclone6kM_pnIMachOper__; +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__; +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o; +text: .text%__1cLstoreF0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o; +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o; +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o; +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o; +text: .text%__1cQshlL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o; +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o; +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o; +text: .text%__1cSsubL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSmulL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSdivL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o; +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cOloadI_fregNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRtestI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_; +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__; +text: .text%__1cLstoreF0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_; +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cNmaxI_eRegNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetInheritedAccessControlContext; +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__; +text: .text%__1cNmaxI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%JVM_NativePath; +text: .text%__1cOMacroAssemblerNflush_windows6M_v_; +text: .text%__1cNloadConL0NodeFclone6kM_pnENode__; +text: .text%__1cSsubD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_; +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_; +text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_; +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_; +text: .text%__1cKg3RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cVinline_cache_regPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cWloadConI_x41f00000NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstorePNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIGraphKitbAgen_stub_or_native_wrapper6MpCpkcpnIciMethod_iiiii_v_; +text: .text%__1cQObjectStartArrayFreset6M_v_; +text: .text%__1cPconvI2D_memNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_; +text: .text%__1cQaddD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvF2INodeGOpcode6kM_i_; +text: .text%__1cJimmL0OperFclone6kM_pnIMachOper__; +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_; +text: .text%__1cIPSOldGenPadjust_pointers6M_v_; +text: .text%__1cVCallRuntimeDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cPCallRuntimeNodeEhash6kM_I_: callnode.o; +text: .text%__1cICallNodeSis_CallInterpreter6kM_pknTCallInterpreterNode__: callnode.o; +text: .text%__1cOcmovPI_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cIPSOldGenHcompact6M_v_; +text: .text%__1cMtlsLoadPNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_; +text: .text%__1cLcmpF_ccNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregL_to_stkLNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Mi_v_; +text: .text%__1cKimmI11OperIconstant6kM_i_: ad_sparc_clone.o; +text: .text%__1cSstkL_to_regD_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cQcmovI_reg_gtNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQstkI_to_regINodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLstoreP0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovIF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLL_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%jni_GetStaticMethodID: jni.o; +text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MipnMRegisterImpl__v_; +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_; +text: .text%__1cPconvF2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodDataKlass.o; +text: .text%__1cOcmovDF_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_; +text: .text%__1cOcmovLL_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%jint_cmp: parse2.o; +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__; +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_; +text: .text%__1cVMoveL2D_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_; +text: .text%__1cSconvF2I_helperNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodDataKlass.o; +text: .text%__1cJloadCNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOloadI_fregNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovLL_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLConvD2FNodeGOpcode6kM_i_; +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_; +text: .text%__1cWloadConI_x41f00000NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKcmpOpFOperFccode6kM_i_: ad_sparc_clone.o; +text: .text%__1cLstoreC0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQregL_to_stkLNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_; +text: .text%__1cIAddFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cLcastP2INodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKo2RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cOcmovIF_immNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQaddL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cZregDHi_regDLo_to_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%JVM_Close; +text: .text%__1cSmulD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSsubD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddP_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cXconvI2D_regDHi_regDNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadD_unalignedNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__; +text: .text%__1cOMacroAssemblerNget_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cQsubF_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbIcompute_extra_locals_size_in_bytes6MpnMRegisterImpl_22_v_; +text: .text%__1cLcmpF_ccNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_; +text: .text%__1cPorI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cUregI_to_stkLHi_1NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSxorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvI2D_memNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cQdivI_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_memNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvI2BNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_2NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cWloadConI_x43300000NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x41f00000NodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeGis_Con6kM_I_: loopnode.o; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: templateTable_sparc.o; +text: .text%__1cSmulI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o; +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_; +text: .text%__1cENodeRraise_bottom_type6MpknEType__v_: loopnode.o; +text: .text%__1cENodeJis_MemBar6kM_pknKMemBarNode__: classes.o; +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFParseNdo_instanceof6M_v_; +text: .text%__1cLconvI2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_; +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRshrL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJloadBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJloadDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQdivI_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIDivLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cSmulD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOstackSlotLOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cUregI_to_stkLHi_1NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQshlI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_; +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_; +text: .text%__1cPconvD2F_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJloadDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOcmovPP_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cQsubF_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_NewObjectV: jni.o; +text: .text%__1cOcmovLI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_; +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%jni_EnsureLocalCapacity; +text: .text%__1cLstoreI0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__; +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvD2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cPorL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_; +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cUregI_to_stkLHi_0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__; +text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o; +text: .text%__1cSaddD_regD_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_; +text: .text%__1cQsubD_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovPP_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableGbranch6Fii_v_; +text: .text%__1cSsubL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: interpreter_sparc.o; +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__; +text: .text%__1cJloadFNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddI_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSandL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cFParsePdo_monitor_exit6M_v_; +text: .text%__1cSdivL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cObranchConFNodeJlabel_set6MrnFLabel_I_v_; +text: .text%__1cSconvF2I_helperNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__; +text: .text%__1cOloadI_fregNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cObranchConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPstoreI_FregNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcmpD_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cJloadLNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: stubGenerator_sparc.o; +text: .text%__1cSmulL_reg_reg_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; +text: .text%__1cHCompile2t6MpnFciEnv_pF_pknITypeFunc_pCpkciiii_v_; +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_; +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_sparc.o; +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__; +text: .text%__1cUregI_to_stkLHi_1NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cIGraphKitIgen_stub6MpCpkciii_v_; +text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_; +text: .text%__1cFTypeFFxdual6kM_pknEType__; +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o; +text: .text%__1cKVtableStubRpd_code_alignment6F_i_; +text: .text%__1cSstkL_to_regD_2NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSstkL_to_regD_0NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIMachNodeOmemory_operand6kM_pknIMachOper__: ad_sparc_misc.o; +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__; +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQaddD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadL_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cVMoveF2I_stack_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOcmovLL_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cTloadL_unalignedNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cTloadL_unalignedNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_; +text: .text%__1cXconvI2D_regDHi_regDNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSvframeArrayElementPunpack_on_stack6MiipnFframe_ii_v_; +text: .text%__1cSconvF2I_helperNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerbFtest_invocation_counter_for_mdp6MpnMRegisterImpl_22rnFLabel__v_; +text: .text%__1cXconvI2D_regDHi_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_; +text: .text%__1cFTypeDFxdual6kM_pknEType__; +text: .text%__1cSaddD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZInterpreterMacroAssemblerbAincrement_backedge_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerbBtest_backedge_count_for_osr6MpnMRegisterImpl_22_v_; +text: .text%__1cSmulL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cOcmovPI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKEntryPoint2t6M_v_; +text: .text%__1cTloadD_unalignedNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cZregDHi_regDLo_to_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____; +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__; +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_; +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__; +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__; +text: .text%__1cJimmP0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_; +text: .text%__1cQshrI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveL2D_stack_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cWloadConI_x43300000NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_; +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_; +text: .text%__1cSvframeArrayElementNon_stack_size6kMiiii_i_; +text: .text%__1cWloadConI_x41f00000NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_; +text: .text%__1cIimmDOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cFframeZinterpreter_frame_set_mdx6Mi_v_; +text: .text%__1cOstackSlotLOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotLOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cTloadD_unalignedNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cTloadD_unalignedNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIModLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJimmU5OperFclone6kM_pnIMachOper__; +text: .text%__1cTAbstractInterpreterPsize_activation6FpnNmethodOopDesc_iiiii_i_; +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cSmulD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSstkL_to_regD_0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cMloadConDNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cTAbstractInterpreterQcontinuation_for6FpnNmethodOopDesc_pCiiri_3_; +text: .text%__1cUregI_to_stkLHi_1NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cINegFNodeGOpcode6kM_i_; +text: .text%__1cSsubD_regD_regDNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cTAbstractInterpreterRlayout_activation6FpnNmethodOopDesc_iiiipnFframe_4i_v_; +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____; +text: .text%__1cUregI_to_stkLHi_0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o; +text: .text%JVM_GetComponentType; +text: .text%__1cQdivI_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%Unsafe_DefineClass1; +text: .text%__1cOcmovII_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_; +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_; +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSvframeArrayElementDbci6kM_i_; +text: .text%__1cVMoveF2I_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cICodeBlobZis_at_poll_or_poll_return6MpC_i_; +text: .text%__1cLvframeArrayIallocate6FpnKJavaThread_ipnNGrowableArray4CpnOcompiledVFrame___pnLRegisterMap_nFframe_9A9A9A_p0_; +text: .text%JVM_GetCPFieldModifiers; +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_; +text: .text%__1cLcastP2INodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNnmethodLocker2t6MpC_v_; +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: deoptimization.o; +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__; +text: .text%__1cNloadConL0NodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_; +text: .text%__1cODeoptimizationScreate_vframeArray6FpnKJavaThread_nFframe_pnLRegisterMap__pnLvframeArray__; +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__; +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__; +text: .text%__1cZInterpreterMacroAssemblerXindex_check_without_pop6MpnMRegisterImpl_2i22_v_; +text: .text%__1cRSignatureIteratorKparse_type6M_i_; +text: .text%__1cPconvD2F_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__; +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_; +text: .text%__1cQsubD_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%JVM_DefineClass; +text: .text%JVM_InvokeMethod; +text: .text%__1cOcmovPP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_NewDirectByteBuffer; +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o; +text: .text%__1cHBoxNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%jni_AllocObject: jni.o; +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_; +text: .text%__1cFStateL_sub_Op_Box6MpknENode__v_; +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCodeBufferVinsert_float_constant6Mf_pC_; +text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: jniFastGetField_sparc.o; +text: .text%__1cMnegD_regNodeIpipeline6kM_pknIPipeline__; +text: .text%Unsafe_AllocateInstance; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o; +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o; +text: .text%__1cQstkI_to_regINodeIpipeline6kM_pknIPipeline__; +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_; +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_; +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_; +text: .text%__1cYinternal_word_RelocationMforce_target6MpC_v_: relocInfo.o; +text: .text%__1cJStubQdDueueKremove_all6M_v_; +text: .text%__1cMloadConFNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cETypeJis_finite6kM_i_; +text: .text%__1cPconvI2D_memNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLconvI2BNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPorL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cPorL_reg_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerGif_cmp6MnJAssemblerJCondition_i_v_; +text: .text%__1cZInterpreterMacroAssemblerLindex_check6MpnMRegisterImpl_2i22_v_; +text: .text%__1cOMacroAssemblerPcasx_under_lock6MpnMRegisterImpl_22pCi_v_; +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_; +text: .text%__1cQsubF_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cRshlI_reg_imm5NodeFclone6kM_pnENode__; +text: .text%__1cNloadRangeNodeFclone6kM_pnENode__; +text: .text%__1cSaddL_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovPI_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o; +text: .text%__1cKstfSSFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_; +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_; +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvI2L_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovII_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_; +text: .text%__1cJCmpF3NodeGOpcode6kM_i_; +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o; +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_; +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_; +text: .text%__1cLTypeInstPtrLmirror_type6kM_pnGciType__; +text: .text%__1cOstackSlotIOperFclone6kM_pnIMachOper__; +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_; +text: .text%__1cOcmovII_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOMacroAssemblerHbr_null6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cFParseScan_rerun_bytecode6M_i_; +text: .text%__1cQshrL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKExceptionsNnew_exception6FpnGThread_pnNsymbolOopDesc_pkc_nGHandle__; +text: .text%__1cIAddFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKstfSSFNodeHis_Copy6kM_I_: ad_sparc_misc.o; +text: .text%__1cHdom_lca6FpnFBlock_1_1_: gcm.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o; +text: .text%JVM_NewArray; +text: .text%__1cHOrLNodeGOpcode6kM_i_; +text: .text%__1cLStrCompNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o; +text: .text%__1cLOopMapCache2t6M_v_; +text: .text%__1cNTemplateTableHconvert6F_v_; +text: .text%__1cOcmovDF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_; +text: .text%__1cOcmovLI_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_; +text: .text%__1cFParseWload_interpreter_state6MpnENode_2_v_; +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cKPSYoungGenKprecompact6M_v_; +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_; +text: .text%__1cSconvD2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHThreadsLgc_prologue6F_v_; +text: .text%__1cHThreadsLgc_epilogue6F_v_; +text: .text%__1cPconvI2L_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPconvD2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_; +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_; +text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_; +text: .text%__1cMStartOSRNodeScalling_convention6kMpnLRegPair_I_v_; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: callnode.o; +text: .text%__1cRCardTableModRefBSEis_a6MnKBarrierSetEName__i_: cardTableExtension.o; +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_; +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_; +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cLconvP2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o; +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_; +text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__; +text: .text%__1cPconvD2F_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLMoveF2INodeGOpcode6kM_i_; +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_; +text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_; +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__; +text: .text%__1cVVM_ParallelGCSystemGCEname6kM_pkc_: vm_operations.o; +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_; +text: .text%__1cVVM_ParallelGCSystemGC2t6MI_v_; +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_; +text: .text%__1cOMacroAssemblerPbreakpoint_trap6M_v_; +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_; +text: .text%__1cJMarkSweepNrestore_marks6F_v_; +text: .text%__1cJMarkSweepMadjust_marks6F_v_; +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_; +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_; +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; +text: .text%__1cMStubCodeMark2T6M_v_; +text: .text%__1cQAbstractCompilerMsupports_osr6M_i_: c2compiler.o; +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_; +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cJPSPermGenKprecompact6M_v_; +text: .text%JVM_GC; +text: .text%__1cIPSOldGenKprecompact6M_v_; +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_; +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_; +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_; +text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_; +text: .text%__1cLPSMarkSweepQinvoke_no_policy6Fpii_v_; +text: .text%__1cLPSMarkSweepGinvoke6Fpii_v_; +text: .text%__1cQmulL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cWloadConI_x43300000NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MInHGCCauseFCause__v_; +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_; +text: .text%__1cWloadConI_x41f00000NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_; +text: .text%__1cJPSPermGenQcompute_new_size6MI_v_; +text: .text%__1cKPSYoungGenHcompact6M_v_; +text: .text%JVM_GetSystemPackage; +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_; +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_; +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_: codeBlob.o; +text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o; +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_; +text: .text%__1cJCodeCacheLgc_prologue6F_v_; +text: .text%__1cJCodeCacheLgc_epilogue6F_v_; +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNStubGeneratorFalign6Mi_v_: stubGenerator_sparc.o; +text: .text%__1cQregL_to_stkLNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLcastP2INodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKcmpOpFOperKless_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cOcmovPI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cJCMoveNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQdivD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSmulL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cOcmovIF_immNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cKCMoveDNodeGOpcode6kM_i_; +text: .text%__1cJLoadDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__: classes.o; +text: .text%__1cNStubGeneratorLstub_prolog6MpnMStubCodeDesc__v_: stubGenerator_sparc.o; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o; +text: .text%__1cQaddL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%jni_GetStringRegion: jni.o; +text: .text%JVM_RawMonitorCreate; +text: .text%__1cJloadLNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMloadConFNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cIMulFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o; +text: .text%__1cNloadConPCNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_; +text: .text%__1cSaddP_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cOstackSlotFOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%JVM_Sleep; +text: .text%__1cHBoxNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cObox_handleNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cLstoreF0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o; +text: .text%__1cQstkI_to_regFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o; +text: .text%__1cRorI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cVMoveF2I_stack_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOstackSlotLOperFclone6kM_pnIMachOper__; +text: .text%Unsafe_CompareAndSwapInt; +text: .text%JVM_Lseek; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: templateTable_sparc.o; +text: .text%__1cNloadRangeNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cPconvD2F_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o; +text: .text%__1cPconvF2D_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cQmulI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQmulF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cSconvF2I_helperNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRmethodDataOopDescRbci_to_extra_data6Mii_pnLProfileData__; +text: .text%__1cOcmovLI_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cQregL_to_stkLNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQsubD_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQregP_to_stkPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPconvI2D_memNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQaddL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MipnMRegisterImpl_rnFLabel_2_v_; +text: .text%__1cVMoveL2D_stack_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQstkI_to_regINodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cENodeIis_Catch6kM_pknJCatchNode__: loopnode.o; +text: .text%__1cPconvD2F_regNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLI_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_; +text: .text%__1cQdivI_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cGciType2t6MnJBasicType__v_; +text: .text%__1cOMacroAssemblerKbr_notnull6MpnMRegisterImpl_inJAssemblerHPredict_rnFLabel__v_; +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cENodeHis_Call6M_pnICallNode__: machnode.o; +text: .text%__1cLOptoRuntimeRnew_objArray_Type6F_pknITypeFunc__; +text: .text%__1cQaddF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_; +text: .text%__1cSconvF2I_helperNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cRsarL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_GetEnv; +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cQstkI_to_regINodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_sparc.o; +text: .text%Unsafe_GetNativeByte; +text: .text%JVM_NanoTime; +text: .text%__1cCosNjavaTimeNanos6F_x_; +text: .text%__1cOMacroAssemblerOrestore_thread6MkpnMRegisterImpl__v_; +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cQandL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIimmFOperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cHThreadsLnmethods_do6F_v_; +text: .text%__1cKcmpOpFOperGnegate6M_v_: ad_sparc_clone.o; +text: .text%__1cICodeBlobFflush6M_v_; +text: .text%__1cZInterpreterMacroAssemblerFpop_d6MpnRFloatRegisterImpl__v_; +text: .text%__1cFParseMdo_anewarray6M_v_; +text: .text%__1cSdivL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_CallVoidMethod: jni.o; +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__; +text: .text%__1cObranchConFNodeGnegate6M_v_: ad_sparc_misc.o; +text: .text%__1cFParseOdo_tableswitch6M_v_; +text: .text%__1cOcmovIF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cSaddL_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLstoreC0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%Unsafe_GetNativeFloat; +text: .text%__1cOMacroAssemblerGmembar6MnJAssemblerQMembar_mask_bits__v_: templateTable_sparc.o; +text: .text%__1cNTemplateTableXjvmti_post_field_access6Fiii_v_; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodLiveness.o; +text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_; +text: .text%__1cOstackSlotFOperEtype6kM_pknEType__: ad_sparc.o; +text: .text%__1cHnmethodFflush6M_v_; +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_; +text: .text%__1cKo2RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cQregI_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMloadConFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_; +text: .text%__1cMregD_lowOperFclone6kM_pnIMachOper__; +text: .text%__1cJloadFNodeFclone6kM_pnENode__; +text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o; +text: .text%__1cCosNcommit_memory6FpcII_i_; +text: .text%__1cJloadLNodeFclone6kM_pnENode__; +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_; +text: .text%__1cSaddI_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_; +text: .text%__1cLOptoRuntimeNgenerate_stub6FpnFciEnv_pF_pknITypeFunc_pCpkciiii_8_; +text: .text%__1cWloadConI_x43300000NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFParseQdo_monitor_enter6M_v_; +text: .text%__1cPorL_reg_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cLstoreC0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOPSPromotionLABRunallocate_object6MpnHoopDesc__i_; +text: .text%JVM_FindPrimitiveClass; +text: .text%__1cVMoveL2D_stack_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_; +text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__; +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_; +text: .text%__1cSmodL_reg_imm13NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cRshrI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cSsubL_reg_reg_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUmulL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cPconvI2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__; +text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cUdivL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_; +text: .text%__1cRSignatureIteratorHiterate6M_v_; +text: .text%__1cOcmovLL_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLcastP2INodeIpipeline6kM_pknIPipeline__; +text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o; +text: .text%__1cJname2type6Fpkc_nJBasicType__; +text: .text%__1cSmulL_reg_imm13NodeIpipeline6kM_pknIPipeline__; +text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_; +text: .text%__1cLcastP2INodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__; +text: .text%__1cLCastP2INodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cOloadConL13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSmodL_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeFJis_finite6kM_i_; +text: .text%__1cKcmpOpFOperHgreater6kM_i_: ad_sparc_clone.o; +text: .text%__1cIDivDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOMacroAssemblerKget_thread6M_v_; +text: .text%__1cPconvI2F_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovDF_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovIF_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSconvI2F_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSaddD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKVtableStub2n6FIi_pv_; +text: .text%__1cNloadConPCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWloadConI_x41f00000NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKScopeValueLis_location6kM_i_: debugInfo.o; +text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_; +text: .text%__1cLOptoRuntimebBhandle_wrong_method_ic_miss6FpnKJavaThread__pC_; +text: .text%__1cSmulD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_; +text: .text%__1cZregDHi_regDLo_to_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSsubD_regD_regDNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLVtableStubsOis_entry_point6FpC_i_; +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o; +text: .text%__1cPconvD2F_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cIciMethodMnative_entry6M_pC_; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__; +text: .text%__1cVMoveF2I_stack_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__; +text: .text%__1cQAbstractCompilerPsupports_native6M_i_: c2compiler.o; +text: .text%__1cPorL_reg_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvD2F_regNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cIciSymbolHas_utf86M_pkc_; +text: .text%__1cQandI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_; +text: .text%__1cMnegD_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o; +text: .text%__1cSconvF2I_helperNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_1NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerZtotal_frame_size_in_bytes6Mi_i_; +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_; +text: .text%__1cKCompiledICOis_megamorphic6kM_i_; +text: .text%__1cVMoveF2I_stack_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_; +text: .text%Unsafe_StaticFieldOffset; +text: .text%__1cQmulI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_; +text: .text%__1cQaddI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%JVM_GetClassContext; +text: .text%__1cHCompile2t6MpnFciEnv_pnKC2Compiler_pnIciMethod__v_; +text: .text%Unsafe_StaticFieldBaseFromField; +text: .text%Unsafe_EnsureClassInitialized; +text: .text%__1cOcmovIF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pCi_v_; +text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_; +text: .text%__1cWloadConI_x43300000NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%Unsafe_GetObjectVolatile; +text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o; +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_; +text: .text%__1cKstoreFNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o; +text: .text%__1cVMoveL2D_stack_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cSmulL_reg_imm13NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHMulNodeGis_Mul6kM_pk0_: classes.o; +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__; +text: .text%__1cOloadConL13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cKLoadPCNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_; +text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o; +text: .text%__1cLstoreF0NodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cPconvI2D_memNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_22pC22i_v_; +text: .text%__1cNloadConPCNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cETypeFxdual6kM_pk0_; +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__; +text: .text%__1cKimmU13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MinITosState__v_; +text: .text%__1cSaddL_reg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; +text: .text%__1cZInterpreterMacroAssemblerbCincrement_invocation_counter6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_int6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerbDunlock_if_synchronized_method6MnITosState_ii_v_; +text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__; +text: .text%__1cIGraphKitSgen_native_wrapper6MpnIciMethod__v_; +text: .text%__1cPconvI2L_regNodeFclone6kM_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerWempty_expression_stack6M_v_; +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_; +text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MirnFLabel__v_; +text: .text%__1cOcmovIL_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_; +text: .text%__1cOCompilerThreadSis_Compiler_thread6kM_i_: thread.o; +text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_; +text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_; +text: .text%__1cLOptoRuntimeRresolve_call_Type6F_pknITypeFunc__; +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_; +text: .text%__1cCosEstat6FpkcpnEstat__i_; +text: .text%__1cQregF_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o; +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o; +text: .text%__1cMMonitorChunk2t6Mi_v_; +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__; +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: callnode.o; +text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_; +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o; +text: .text%__1cMMonitorValue2t6MpnTDebugInfoReadStream__v_; +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_; +text: .text%__1cPorL_reg_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__; +text: .text%__1cUBytecode_tableswitchGlength6M_i_: methodDataOop.o; +text: .text%jni_SetStaticObjectField: jni.o; +text: .text%jni_RegisterNatives: jni.o; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: templateTable_sparc.o; +text: .text%__1cFframebLprevious_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_; +text: .text%__1cQshlL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetClassDeclaredFields; +text: .text%__1cCosMuser_handler6F_pv_; +text: .text%JVM_IsSameClassPackage; +text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreter.o; +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_; +text: .text%__1cKJavaThreadRadd_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cKJavaThreadUremove_monitor_chunk6MpnMMonitorChunk__v_; +text: .text%__1cLcastP2INodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVMoveL2D_stack_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableGiconst6Fi_v_; +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%JVM_LoadLibrary; +text: .text%JVM_IsSupportedJNIVersion; +text: .text%Unsafe_ObjectFieldOffset; +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MrnFLabel__v_; +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; +text: .text%__1cHCompileRget_Method_invoke6M_pnIciMethod__; +text: .text%__1cZInterpreterMacroAssemblerSget_cpool_and_tags6MpnMRegisterImpl_2_v_; +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o; +text: .text%__1cHCompileWget_MethodAccessorImpl6M_pnPciInstanceKlass__; +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableH_return6FnITosState__v_; +text: .text%__1cOPSVirtualSpaceJexpand_by6MI_i_; +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKimmP13OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_; +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_; +text: .text%__1cNSharedRuntimeEdrem6Fdd_d_; +text: .text%__1cQstkI_to_regINodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_; +text: .text%__1cSstkL_to_regD_2NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_1NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cIAddDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cSstkL_to_regD_0NodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cPstoreI_FregNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cTloadD_unalignedNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLVtableStubsIcontains6FpC_i_; +text: .text%__1cOloadI_fregNodeOmemory_operand6kM_pknIMachOper__; +text: .text%__1cLconvP2BNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cUInterpreterGeneratorbCgenerate_check_compiled_code6MrnFLabel__v_; +text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6MpnMRegisterImpl_22_v_; +text: .text%__1cCosZvm_allocation_granularity6F_i_; +text: .text%__1cMTailJumpNodeGOpcode6kM_i_; +text: .text%__1cPconvF2D_regNodeFclone6kM_pnENode__; +text: .text%__1cOLibraryCallKitbDis_method_invoke_or_aux_frame6MpnIJVMState__i_; +text: .text%__1cTloadD_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_; +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o; +text: .text%__1cSestimate_path_freq6FpnENode__f_: loopnode.o; +text: .text%__1cCosOreserve_memory6FIpc_1_; +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_; +text: .text%__1cSmulL_reg_imm13NodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_; +text: .text%__1cKstfSSFNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_; +text: .text%__1cENodeHis_AddP6M_pnIAddPNode__: subnode.o; +text: .text%__1cRtestI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cCosNcommit_memory6FpcI_i_; +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_; +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_; +text: .text%__1cWImplicitExceptionTableCat6kMI_I_; +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_; +text: .text%jni_GetJavaVM; +text: .text%__1cOcmovDF_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%jni_MonitorEnter: jni.o; +text: .text%__1cQConstantIntValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%jni_MonitorExit: jni.o; +text: .text%__1cOMacroAssemblerDret6Mi_v_: templateTable_sparc.o; +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cULinearLeastSquareFit2t6MI_v_; +text: .text%__1cQdivL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o; +text: .text%__1cOLibraryCallKitbBinline_native_currentThread6M_i_; +text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o; +text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o; +text: .text%__1cNReservedSpaceKfirst_part6MIii_0_; +text: .text%__1cNReservedSpace2t6MI_v_; +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cOloadI_fregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_; +text: .text%__1cIAddDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cJloadFNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKConv2BNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvI2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cSconvD2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%jni_Throw: jni.o; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_; +text: .text%__1cFTypeFGis_nan6kM_i_; +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cKReturnNodeUdepends_only_on_test6kM_i_: classes.o; +text: .text%__1cIDivINodeJideal_reg6kM_I_: classes.o; +text: .text%__1cISubDNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cPstoreI_FregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cQciByteCodeStreamXget_method_holder_index6M_i_; +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_; +text: .text%__1cOMacroAssemblerEfneg6MnRFloatRegisterImplFWidth_p13_v_; +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRT_sparc.o; +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNSpaceCounters2t6MpkciIpnMMutableSpace_pnSGenerationCounters__v_; +text: .text%__1cLcmpF_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_; +text: .text%jni_SetObjectField: jni.o; +text: .text%__1cISubDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cISubFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__; +text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_; +text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_sparc.o; +text: .text%bootstrap_flush_windows; +text: .text%__1cMloadConPNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cSdivL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerbCverify_oop_or_return_address6MpnMRegisterImpl_2_v_; +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_; +text: .text%__1cQmodL_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__; +text: .text%__1cSmulL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o; +text: .text%__1cIDivFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_; +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_; +text: .text%__1cSsubL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_; +text: .text%__1cPciInstanceKlassbDcompute_shared_is_initialized6M_i_; +text: .text%__1cQmulD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%Unsafe_AllocateMemory; +text: .text%__1cSandL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_GetLastErrorString; +text: .text%__1cQmodL_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_; +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_; +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_; +text: .text%__1cPstoreI_FregNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_; +text: .text%__1cSandI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_; +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNciMethodKlassEmake6F_p0_; +text: .text%__1cOLibraryCallKitZinline_native_Class_query6MnIciMethodLIntrinsicId__i_; +text: .text%__1cNTemplateTableGlstore6Fi_v_; +text: .text%__1cOLibraryCallKitbNinline_native_Reflection_getCallerClass6M_i_; +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cQaddI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o; +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_; +text: .text%__1cRcompL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_2_v_; +text: .text%__1cSconvD2I_helperNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cPconvI2D_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsubI_zero_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_; +text: .text%__1cKstfSSFNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cOClassPathEntry2t6M_v_; +text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl_2_v_; +text: .text%__1cINegDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cOloadConL13NodeFclone6kM_pnENode__; +text: .text%__1cNTemplateTableGistore6Fi_v_; +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__; +text: .text%__1cPconvL2I_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cUcompI_iReg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cRsarI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cSstkL_to_regD_2NodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableGastore6Fi_v_; +text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_; +text: .text%__1cOMacroAssemblerMload_address6MrnHAddress_i_v_: assembler_sparc.o; +text: .text%__1cIRetTableHadd_jsr6Mii_v_; +text: .text%__1cMnegF_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cQregF_to_stkINodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o; +text: .text%__1cPorL_reg_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cQmulD_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cNTemplateTableGdstore6Fi_v_; +text: .text%__1cNTemplateTableGfstore6Fi_v_; +text: .text%jni_CallStaticObjectMethod: jni.o; +text: .text%__1cPconvD2F_regNodeFclone6kM_pnENode__; +text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_; +text: .text%__1cLconvP2BNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvP2BNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cOcmovLL_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cQandI_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cMnegD_regNodeFclone6kM_pnENode__; +text: .text%__1cOMacroAssemblerJfloat_cmp6MiipnRFloatRegisterImpl_2pnMRegisterImpl__v_; +text: .text%__1cLconvI2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cODeoptimizationLUnrollBlockOsize_of_frames6kM_i_; +text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o; +text: .text%__1cCosGsignal6Fipv_1_; +text: .text%__1cQaddD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cISubDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cISubFNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cISubFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNflagsRegLOperFclone6kM_pnIMachOper__; +text: .text%__1cNTemplateTableFlload6Fi_v_; +text: .text%__1cNTemplateTableFiload6Fi_v_; +text: .text%__1cJcmpOpOperFclone6kM_pnIMachOper__; +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_; +text: .text%__1cFTypeFFempty6kM_i_; +text: .text%__1cLconvP2BNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cVMoveF2I_stack_regNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC22_v_; +text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_; +text: .text%__1cKstfSSFNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_; +text: .text%__1cPconvI2D_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%Unsafe_SetMemory; +text: .text%__1cSstkL_to_regD_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKstfSSFNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_x6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cVMoveF2I_stack_regNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_; +text: .text%__1cOcmovLI_regNodeHsize_of6kM_I_: ad_sparc_misc.o; +text: .text%__1cNloadConL0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cKg1RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cOcmovPI_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovDF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_; +text: .text%__1cQsubF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_1NodeFclone6kM_pnENode__; +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_icc6MnJAssemblerJCondition_pCpnMRegisterImpl__v_; +text: .text%__1cNTemplateTableFfload6Fi_v_; +text: .text%__1cFParsePdo_lookupswitch6M_v_; +text: .text%__1cNTemplateTableFdload6Fi_v_; +text: .text%__1cNgen_new_frame6FpnOMacroAssembler_i_v_: runtime_sparc.o; +text: .text%__1cKstfSSFNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cINegDNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableFaload6Fi_v_; +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_: ad_sparc.o; +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_; +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_; +text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_; +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%jni_CallStaticObjectMethodV: jni.o; +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_; +text: .text%__1cIDivDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMloadConDNodeGis_Con6kM_I_: ad_sparc_misc.o; +text: .text%__1cJMemRegionFminus6kMk0_0_; +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__; +text: .text%__1cJArgumentsMbuild_string6Fppcpkc_v_; +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_; +text: .text%__1cOtailjmpIndNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cNMemoryManager2t6M_v_; +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_; +text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_22_v_; +text: .text%__1cVMoveL2D_stack_regNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cJArgumentsMadd_property6Fpkc_i_; +text: .text%__1cOtailjmpIndNodeHtwo_adr6kM_I_: ad_sparc_misc.o; +text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__; +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_; +text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_; +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_; +text: .text%__1cCosFyield6F_v_; +text: .text%__1cLOptoRuntimeVgenerate_handler_blob6FpCi_pnNSafepointBlob__; +text: .text%__1cKstfSSFNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: runtime_sparc.o; +text: .text%__1cQsubD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRT_sparc.o; +text: .text%__1cOtailjmpIndNodeGpinned6kM_i_: ad_sparc_misc.o; +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cFParseRdo_multianewarray6M_v_; +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__; +text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_; +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cOcmovIF_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o; +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__: classes.o; +text: .text%__1cSconvI2D_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveF2I_stack_regNodeZcheck_for_anti_dependence6kM_i_: ad_sparc_misc.o; +text: .text%__1cLstoreF0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl_2_v_; +text: .text%__1cPstoreI_FregNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cSCommandLineFlagsExJboolAtPut6FnXCommandLineFlagWithType_i_v_; +text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_; +text: .text%__1cOcmovLL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i2_v_; +text: .text%__1cRNativeInstructionPis_ic_miss_trap6M_i_; +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__; +text: .text%JVM_GetInterfaceVersion; +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_2222rnFLabel__v_; +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableGfconst6Fi_v_; +text: .text%__1cOMacroAssemblerDbrx6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: stubGenerator_sparc.o; +text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o; +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_; +text: .text%__1cOcmovPI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_; +text: .text%JVM_RegisterSignal; +text: .text%JVM_FindSignal; +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o; +text: .text%jio_vsnprintf; +text: .text%__1cQshrL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cZInterpreterMacroAssemblerVincrement_mdp_data_at6MpnMRegisterImpl_i22_v_; +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_222_v_; +text: .text%__1cNReservedSpaceJlast_part6MI_0_; +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_; +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_I_v_; +text: .text%__1cFTypeDFempty6kM_i_; +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o; +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_; +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_; +text: .text%jni_GetDoubleArrayRegion: jni.o; +text: .text%__1cHciKlassIis_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_; +text: .text%__1cLconvP2BNodeErule6kM_I_: ad_sparc_misc.o; +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o; +text: .text%__1cZInterpreterMacroAssemblerUadd_monitor_to_stack6MipnMRegisterImpl_2_v_; +text: .text%JVM_Available; +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_; +text: .text%__1cQshlL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMOopTaskQdDueueKinitialize6M_v_; +text: .text%__1cMOopTaskQdDueue2t6M_v_; +text: .text%__1cRNativeInstructionKis_illegal6M_i_; +text: .text%__1cZInterpreterMacroAssemblerQtop_most_monitor6M_nHAddress__; +text: .text%__1cLstoreF0NodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cCosGgetenv6Fpkcpci_i_; +text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o; +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_; +text: .text%__1cKi0RegPOperJnum_edges6kM_I_: ad_sparc.o; +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_; +text: .text%__1cPOopTaskQdDueueSetOregister_queue6MipnMOopTaskQdDueue__v_; +text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o; +text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o; +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_; +text: .text%__1cSconvF2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNloadConP0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_; +text: .text%__1cbAconvL2D_reg_slow_fxtofNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cOstackSlotFOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_; +text: .text%__1cOstackSlotFOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cOstackSlotFOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o; +text: .text%__1cPconvF2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_; +text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_; +text: .text%__1cHMatcherQconvL2FSupported6F_ki_; +text: .text%__1cNTemplateTableGlconst6Fi_v_; +text: .text%__1cLstoreC0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cRcompL_reg_regNodeFclone6kM_pnENode__; +text: .text%__1cUcompI_iReg_imm13NodeFclone6kM_pnENode__; +text: .text%__1cMPeriodicTaskGenroll6M_v_; +text: .text%__1cMPeriodicTask2t6MI_v_; +text: .text%__1cPconvF2I_regNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cFTypeDJis_finite6kM_i_; +text: .text%__1cPconvL2I_regNodeFclone6kM_pnENode__; +text: .text%__1cNTemplateTableHcastore6F_v_; +text: .text%Unsafe_CompareAndSwapObject; +text: .text%__1cLNamedThread2t6M_v_; +text: .text%__1cSconvD2I_helperNodeFclone6kM_pnENode__; +text: .text%__1cLNamedThreadIset_name6MpkcE_v_; +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQdivD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWloadConI_x43300000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableKinitialize6F_v_; +text: .text%__1cKcmpOpFOperJnot_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cPconvD2F_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_; +text: .text%__1cNTemplateTableGdconst6Fi_v_; +text: .text%__1cNTemplateTableDldc6Fi_v_; +text: .text%__1cSconvF2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovIF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovIF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o; +text: .text%__1cJimmL0OperJnum_edges6kM_I_: ad_sparc_clone.o; +text: .text%__1cLcastP2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovLL_regNodeFclone6kM_pnENode__; +text: .text%__1cbAconvL2D_reg_slow_fxtofNodePoper_input_base6kM_I_: ad_sparc_misc.o; +text: .text%__1cLconvP2BNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cSaddD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSsubD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQregF_to_stkINodeFclone6kM_pnENode__; +text: .text%__1cSstkL_to_regD_2NodeFclone6kM_pnENode__; +text: .text%__1cQregF_to_stkINodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNTemplateTableTinvokevfinal_helper6FpnMRegisterImpl_2_v_; +text: .text%__1cSmulD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableUgenerate_vtable_call6FpnMRegisterImpl_22_v_; +text: .text%__1cSstkL_to_regD_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cSstkL_to_regD_0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_; +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_; +text: .text%__1cSmembar_releaseNodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cJimmI0OperFclone6kM_pnIMachOper__; +text: .text%__1cNVM_DeoptimizeEdoit6M_v_; +text: .text%__1cNloadConL0NodeJnum_opnds6kM_I_: ad_sparc_misc.o; +text: .text%__1cUregI_to_stkLHi_0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMnegF_regNodeFclone6kM_pnENode__; +text: .text%__1cQsubL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cMVirtualSpace2t6M_v_; +text: .text%__1cRsarI_reg_imm5NodeFclone6kM_pnENode__; +text: .text%__1cWloadConI_x41f00000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQdivI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cZregDHi_regDLo_to_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cRsubI_zero_regNodeFclone6kM_pnENode__; +text: .text%__1cXconvI2D_regDHi_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cKloadUBNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cNTemplateTableEidiv6F_v_; +text: .text%__1cOcmovLI_regNodeFclone6kM_pnENode__; +text: .text%__1cQstkI_to_regINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUregI_to_stkLHi_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__; +text: .text%__1cLConvF2INodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_; +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__; +text: .text%__1cLOptoRuntimeIl2f_Type6F_pknITypeFunc__; +text: .text%__1cOMacroAssemblerUcalc_mem_param_words6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MnITosState_pnMRegisterImpl_3_v_; +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_; +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_; +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MnITosState_pnMRegisterImpl__v_; +text: .text%__1cOLibraryCallKitVinline_fp_conversions6MnIciMethodLIntrinsicId__i_; +text: .text%__1cZInterpreterMacroAssemblerPset_mdp_flag_at6MipnMRegisterImpl_2_v_; +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_; +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_; +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_26MpCpnMRegisterImpl_rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_1_x6MnJAssemblerJCondition_rnFLabel__v_; +text: .text%__1cZInterpreterMacroAssemblerZget_4_byte_integer_at_bcp6MipnMRegisterImpl_2n0AKsetCCOrNot__v_; +text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_; +text: .text%__1cCosHrealloc6FpvI_1_; +text: .text%__1cUConstantOopReadValuePis_constant_oop6kM_i_: debugInfo.o; +text: .text%__1cKScopeValuePis_constant_int6kM_i_: debugInfo.o; +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_; +text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o; +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_; +text: .text%__1cZInterpreterMacroAssemblerRaccess_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_; +text: .text%__1cCosIjvm_path6Fpci_v_; +text: .text%__1cCosNsigexitnum_pd6F_i_; +text: .text%__1cCosScurrent_process_id6F_i_; +text: .text%__1cINegFNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o; +text: .text%__1cCosPuncommit_memory6FpcI_i_; +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_; +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__; +text: .text%__1cZInterpreterMacroAssemblerSaccess_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerTaccess_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_int6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerPstore_local_ptr6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerQstore_local_long6MpnMRegisterImpl_2_v_; +text: .text%__1cZInterpreterMacroAssemblerRstore_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cZInterpreterMacroAssemblerSstore_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_; +text: .text%__1cCosWactive_processor_count6F_i_; +text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: jniFastGetField_sparc.o; +text: .text%__1cRcheck_if_clipping6FpknKRegionNode_rpnGIfNode_5_i_: cfgnode.o; +text: .text%__1cTAbstractInterpreterKinitialize6F_v_; +text: .text%__1cIciObjectOis_array_klass6M_i_: ciInstanceKlass.o; +text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstanceKlass.o; +text: .text%__1cPmake_new_frames6FpnOMacroAssembler_i_v_: runtime_sparc.o; +text: .text%jni_NewWeakGlobalRef: jni.o; +text: .text%__1cGatomll6Fpkcpx_i_: arguments.o; +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o; +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o; +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_; +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_; +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_; +text: .text%__1cOPSVirtualSpace2t6M_v_; +text: .text%jni_IsInstanceOf: jni.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: gcTaskThread.o; +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o; +text: .text%__1cMGCTaskThreadDrun6M_v_; +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_; +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o; +text: .text%jni_CallStaticVoidMethodV: jni.o; +text: .text%__1cKCodeBufferGresize6M_v_; +text: .text%jni_CallStaticBooleanMethod: jni.o; +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_; +text: .text%__1cOtailjmpIndNodeIpipeline6kM_pknIPipeline__; +text: .text%__1cMGCTaskThreadFstart6M_v_; +text: .text%__1cNStubGenerator2t6MpnKCodeBuffer_i_v_: stubGenerator_sparc.o; +text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; +text: .text%__1cRFloatRegisterImplIencoding6kMn0AFWidth__i_: interpreter_sparc.o; +text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_; +text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_; +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_; +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_; +text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_; +text: .text%__1cZInterpreterMacroAssemblerbAdispatch_next_noverify_oop6MnITosState_i_v_; +text: .text%__1cOMacroAssemblerDret6Mi_v_: stubGenerator_sparc.o; +text: .text%__1cRCollectorCounters2t6Mpkci_v_; +text: .text%__1cFParseDl2f6M_v_; +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_; +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_; +text: .text%__1cPGCMemoryManager2t6M_v_; +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o; +text: .text%__1cOPSVirtualSpaceKinitialize6MnNReservedSpace_I_i_; +text: .text%__1cSInterpreterRuntimeWcreate_klass_exception6FpnKJavaThread_pcpnHoopDesc__v_; +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o; +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_; +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_; +text: .text%__1cPorL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_; +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRT_sparc.o; +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_; +text: .text%__1cCosScreate_main_thread6FpnGThread__i_; +text: .text%__1cCosbDallocate_thread_local_storage6F_i_; +text: .text%__1cUInterpreterGeneratorVrestore_native_result6M_v_; +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_; +text: .text%__1cHMatcherbDinterpreter_frame_pointer_reg6F_nHOptoRegEName__; +text: .text%__1cLconvP2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVshrL_reg_imm6_L2INodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_; +text: .text%__1cCosGstrdup6Fpkc_pc_; +text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_; +text: .text%__1cCosLinit_random6Fl_v_; +text: .text%__1cCosNset_boot_path6Fcc_i_; +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; +text: .text%__1cCosXis_server_class_machine6F_i_; +text: .text%__1cCosXterminate_signal_thread6F_v_; +text: .text%__1cCosLsignal_init6F_v_; +text: .text%__1cKTypeOopPtrCeq6kMpknEType__i_; +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o; +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_; +text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_; +text: .text%__1cCosbDinit_system_properties_values6F_v_; +text: .text%__1cCosPphysical_memory6F_X_; +text: .text%__1cHvm_exit6Fi_v_; +text: .text%__1cLbefore_exit6FpnKJavaThread__v_; +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_; +text: .text%__1cSThreadLocalStorageHpd_init6F_v_; +text: .text%__1cVMoveF2I_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVMoveL2D_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cWinvocationCounter_init6F_v_; +text: .text%__1cRInvocationCounterMreinitialize6Fi_v_; +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_; +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__; +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_; +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o; +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o; +text: .text%__1cLconvP2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__; +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_; +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_; +text: .text%__1cVInterfaceSupport_init6F_v_; +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpnMRegisterImpl_pC2_v_; +text: .text%__1cFParseNfetch_monitor6MipnENode_2_2_; +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_: parallelScavengeHeap.o; +text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEkind6M_nNCollectedHeapEName__: parallelScavengeHeap.o; +text: .text%__1cZInterpreterMacroAssemblerPdispatch_normal6MnITosState__v_; +text: .text%__1cJTimeStampMmilliseconds6kM_x_; +text: .text%__1cDhpiZinitialize_socket_library6F_i_; +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_; +text: .text%__1cWInlineCacheBuffer_init6F_v_; +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_; +text: .text%__1cPGlobalTLABStats2t6M_v_; +text: .text%__1cLicache_init6F_v_; +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: parse1.o; +text: .text%__1cSThreadLocalStorageEinit6F_v_; +text: .text%__1cNThreadServiceEinit6F_v_; +text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o; +text: .text%__1cPvm_init_globals6F_v_; +text: .text%__1cMinit_globals6F_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_expand.o; +text: .text%__1cMexit_globals6F_v_; +text: .text%__1cSset_init_completed6F_v_; +text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_; +text: .text%__1cJTimeStampJupdate_to6Mx_v_; +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cOisT2_libthread6F_i_; +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interp_masm_sparc.o; +text: .text%__1cQinterpreter_init6F_v_; +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_; +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o; +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o; +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_; +text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_; +text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_; +text: .text%__1cCosLsignal_wait6F_i_; +text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o; +text: .text%__1cCosNsignal_notify6Fi_v_; +text: .text%__1cCosOsignal_init_pd6F_v_; +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; +text: .text%__1cCosSget_temp_directory6F_pkc_; +text: .text%__1cCosHSolarisOlibthread_init6F_v_; +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o; +text: .text%__1cUParallelScavengeHeapEheap6F_p0_; +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_; +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_; +text: .text%__1cKcmpOpFOperNgreater_equal6kM_i_: ad_sparc_clone.o; +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_I_; +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_; +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_; +text: .text%__SLIP.DELETER__C: ostream.o; +text: .text%__1cMostream_exit6F_v_; +text: .text%__1cQostream_init_log6F_v_; +text: .text%__1cMostream_init6F_v_; +text: .text%__1cCosXnon_memory_address_word6F_pc_; +text: .text%__1cCosGinit_26F_i_; +text: .text%__1cCosEinit6F_v_; +text: .text%__1cCosHSolarisUsynchronization_init6F_v_; +text: .text%__1cVjni_GetLongField_addr6F_pC_; +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_; +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_; +text: .text%__1cNReservedSpace2t6MIIipc_v_; +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_; +text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o; +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_; +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_; +text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o; +text: .text%__1cTloadL_unalignedNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o; +text: .text%__1cPmanagement_init6F_v_; +text: .text%__1cOvmStructs_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o; +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_; +text: .text%__1cKManagementKinitialize6FpnGThread__v_; +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_; +text: .text%__1cIVMThreadGcreate6F_v_; +text: .text%__1cIVMThreadDrun6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o; +text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o; +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_; +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_; +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_; +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_; +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o; +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o; +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o; +text: .text%__1cOLibraryCallKitWinline_native_hashcode6Mii_i_; +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: library_call.o; +text: .text%__1cVLoaderConstraintTable2t6Mi_v_; +text: .text%__1cQregL_to_stkLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_; +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; +text: .text%__1cPVM_Version_init6F_v_; +text: .text%__1cKVM_VersionKinitialize6F_v_; +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_; +text: .text%__1cHRetDataKis_RetData6M_i_: methodDataOop.o; +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o; +text: .text%__1cQvtableStubs_init6F_v_; +text: .text%__1cKi0RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cKg1RegPOperKin_RegMask6kMi_pknHRegMask__; +text: .text%__1cFVTuneEexit6F_v_; +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o; +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_; +text: .text%__1cKvtune_init6F_v_; +text: .text%__1cKmutex_init6F_v_; +text: .text%__1cQaccessFlags_init6F_v_; +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC222_v_; +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_; +text: .text%__1cOmarksweep_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o; +text: .text%__1cHMatcherVfind_callee_arguments6FpnNsymbolOopDesc_ipi_pnLRegPair__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o; +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_; +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__; +text: .text%__1cQPSGenerationPool2t6MpnIPSOldGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cQPSGenerationPool2t6MpnJPSPermGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cUEdenMutableSpacePool2t6MpnKPSYoungGen_pnMMutableSpace_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cYSurvivorMutableSpacePool2t6MpnKPSYoungGen_pkcnKMemoryPoolIPoolType_i_v_; +text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_; +text: .text%__1cHVM_ExitEdoit6M_v_; +text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o; +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_; +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o; +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o; +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o; +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_; +text: .text%__1cLstoreF0NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%JNI_CreateJavaVM; +text: .text%__1cXonStackReplacement_init6F_v_; +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_; +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_; +text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o; +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_; +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o; +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o; +text: .text%__1cIUniversePcheck_alignment6FIIpkc_v_; +text: .text%__1cIUniverseHgenesis6FpnGThread__v_; +text: .text%__1cVquicken_jni_functions6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_; +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_; +text: .text%__1cQdivD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_; +text: .text%__1cQjavaClasses_init6F_v_; +text: .text%jni_ToReflectedMethod: jni.o; +text: .text%__1cQsubD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cYjni_GetBooleanField_addr6F_pC_; +text: .text%__1cVjni_GetByteField_addr6F_pC_; +text: .text%__1cQaddF_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cVjni_GetCharField_addr6F_pC_; +text: .text%__1cWjni_GetShortField_addr6F_pC_; +text: .text%__1cUjni_GetIntField_addr6F_pC_; +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_; +text: .text%__1cWjni_GetFloatField_addr6F_pC_; +text: .text%__1cRsarL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cXjni_GetDoubleField_addr6F_pC_; +text: .text%__1cQshlI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_; +text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o; +text: .text%JVM_InitializeSocketLibrary; +text: .text%JVM_RegisterUnsafeMethods; +text: .text%__1cOcmovLI_regNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cOcmovLI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cOcmovDF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%JVM_Socket; +text: .text%__1cbEinitialize_converter_functions6F_v_; +text: .text%JVM_SupportsCX8; +text: .text%__1cOcmovIF_immNodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_; +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_; +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o; +text: .text%__1cKstfSSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cVverificationType_init6F_v_; +text: .text%__1cVverificationType_exit6F_v_; +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_; +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o; +text: .text%__1cSmulL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_; +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_; +text: .text%__1cSmulI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_; +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_; +text: .text%__1cNuniverse_init6F_i_; +text: .text%__1cOuniverse2_init6F_v_; +text: .text%__1cSuniverse_post_init6F_v_; +text: .text%__1cQjni_handles_init6F_v_; +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o; +text: .text%Unsafe_SetNativeLong; +text: .text%JVM_InitProperties; +text: .text%JVM_Halt; +text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o; +text: .text%Unsafe_FreeMemory; +text: .text%Unsafe_PageSize; +text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o; +text: .text%JVM_MaxMemory; +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__; +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%JVM_GetClassDeclaredMethods; +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__; +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_; +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_; +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_; +text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_; +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_; +text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o; +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_; +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_; +text: .text%__1cLClassLoaderQload_zip_library6F_v_; +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_; +text: .text%__1cLClassLoaderKinitialize6F_v_; +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_; +text: .text%__1cMPeriodicTask2T5B6M_v_; +text: .text%__1cQclassLoader_init6F_v_; +text: .text%__1cMPeriodicTaskJdisenroll6M_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o; +text: .text%__1cTClassLoadingServiceEinit6F_v_; +text: .text%__1cTClassLoadingServiceVnotify_class_unloaded6FpnNinstanceKlass_i_v_; +text: .text%__1cMFastLockNodeLis_FastLock6kM_pk0_: classes.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o; +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_; +text: .text%__1cLOptoRuntimeUmultianewarray1_Type6F_pknITypeFunc__; +text: .text%__1cVRegistersForDebuggingRrestore_registers6FpnOMacroAssembler_pnMRegisterImpl__v_: assembler_sparc.o; +text: .text%__1cVRegistersForDebuggingOsave_registers6FpnOMacroAssembler__v_: assembler_sparc.o; +text: .text%__1cJBytecodesKinitialize6F_v_; +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_; +text: .text%__1cObytecodes_init6F_v_; +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_; +text: .text%__1cJBytecodesNpd_initialize6F_v_; +text: .text%__1cHCompileRpd_compiler2_init6F_v_; +text: .text%__1cKC2CompilerKinitialize6M_v_; +text: .text%__1cMTailJumpNode2t6MpnENode_22222_v_; +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_; +text: .text%__1cWResolveOopMapConflictsOreport_results6kM_i_: rewriter.o; +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_; +text: .text%__1cENodeMis_SafePoint6M_pnNSafePointNode__: cfgnode.o; +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o; +text: .text%__1cMciKlassKlassEmake6F_p0_; +text: .text%__1cIciMethodMvtable_index6M_i_; +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_; +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_: classes.o; +text: .text%__1cNTemplateTableGsipush6F_v_; +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNTemplateTableGldc2_w6F_v_; +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; +text: .text%__1cNTemplateTableFiload6F_v_; +text: .text%__1cNTemplateTableLfast_iload26F_v_; +text: .text%__1cNTemplateTableKfast_iload6F_v_; +text: .text%__1cNTemplateTableFlload6F_v_; +text: .text%__1cNTemplateTableFfload6F_v_; +text: .text%__1cNTemplateTableFdload6F_v_; +text: .text%__1cNTemplateTableFaload6F_v_; +text: .text%__1cNTemplateTableKwide_iload6F_v_; +text: .text%__1cNTemplateTableKwide_lload6F_v_; +text: .text%__1cNTemplateTableKwide_fload6F_v_; +text: .text%__1cNTemplateTableKwide_dload6F_v_; +text: .text%__1cNTemplateTableKwide_aload6F_v_; +text: .text%__1cNTemplateTableGiaload6F_v_; +text: .text%__1cNTemplateTableGlaload6F_v_; +text: .text%__1cNTemplateTableGfaload6F_v_; +text: .text%__1cNTemplateTableGdaload6F_v_; +text: .text%__1cNTemplateTableGbipush6F_v_; +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_: classes.o; +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_; +text: .text%__1cHOrLNodeGadd_id6kM_pknEType__: classes.o; +text: .text%__1cHOrLNodeJideal_reg6kM_I_: classes.o; +text: .text%__1cNTemplateTableF_goto6F_v_; +text: .text%__1cNTemplateTableGgoto_w6F_v_; +text: .text%__1cNTemplateTableFjsr_w6F_v_; +text: .text%__1cNTemplateTableDjsr6F_v_; +text: .text%__1cXreferenceProcessor_init6F_v_; +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_; +text: .text%__1cStemplateTable_init6F_v_; +text: .text%__1cNTemplateTableNpd_initialize6F_v_; +text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_; +text: .text%__1cNTemplateTableDnop6F_v_; +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_; +text: .text%__1cNTemplateTableLaconst_null6F_v_; +text: .text%__1cKPSYoungGenbCreset_survivors_after_shrink6M_v_; +text: .text%__1cKPSYoungGenQlimit_gen_shrink6MI_I_; +text: .text%__1cKPSYoungGenRavailable_to_live6M_I_; +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_; +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_pipeline.o; +text: .text%__1cNcarSpace_init6F_v_; +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_IIIIIII_v_; +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_III_v_; +text: .text%__1cOchunkpool_init6F_v_; +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_; +text: .text%__1cJArgumentsWinit_system_properties6F_v_; +text: .text%__1cINegFNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_; +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_; +text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_; +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_; +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_; +text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_; +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_; +text: .text%__1cLStatSamplerGengage6F_v_; +text: .text%__1cNStubGeneratorbNgenerate_flush_callers_register_windows6M_pC_: stubGenerator_sparc.o; +text: .text%__1cSstubRoutines_init16F_v_; +text: .text%__1cSstubRoutines_init26F_v_; +text: .text%__1cHAddressQrspec_from_rtype6MnJrelocInfoJrelocType_pC_nQRelocationHolder__: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerIjumpl_to6MrnHAddress_pnMRegisterImpl_i_v_: stubGenerator_sparc.o; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorSgenerate_test_stop6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbIgenerate_copy_words_aligned8_lower6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbJgenerate_copy_words_aligned8_higher6M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbBgenerate_set_words_aligned86M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbCgenerate_zero_words_aligned86M_pC_: stubGenerator_sparc.o; +text: .text%__1cNStubGeneratorbEgenerate_partial_subtype_check6M_pC_: stubGenerator_sparc.o; +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_; +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_; +text: .text%__1cLStatSamplerHdestroy6F_v_; +text: .text%__1cLStatSamplerJdisengage6F_v_; +text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_; +text: .text%__1cLOptoRuntimeYgenerate_arraycopy_stubs6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o; +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_; +text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_; +text: .text%__1cORuntimeServiceEinit6F_v_; +text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__; +text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_; +text: .text%__1cOMacroAssemblerPstop_subroutine6M_v_; +text: .text%__1cOMacroAssemblerElcmp6MpnMRegisterImpl_2222_v_; +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_; +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_22222_v_; +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_22222_v_; +text: .text%__1cOMacroAssemblerFlushr6MpnMRegisterImpl_22222_v_; +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__; +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__; +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o; +text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_; +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_; +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_; +text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o; +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o; +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o; +text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_; +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpnMRegisterImpl_pCi_v_; +text: .text%__1cLOptoRuntimebPgenerate_polling_page_return_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_; +text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o; +text: .text%__1cLOptoRuntimebPgenerate_illegal_instruction_handler_blob6F_v_; +text: .text%__1cLOptoRuntimebBgenerate_uncommon_trap_blob6F_v_; +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_; +text: .text%__1cLOptoRuntimeWfill_in_exception_blob6F_v_; +text: .text%__1cLOptoRuntimeUsetup_exception_blob6F_v_; +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o; +text: .text%__1cNTemplateTableGaaload6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o; +text: .text%__1cKCMoveDNodeFIdeal6MpnIPhaseGVN_i_pnENode__; +text: .text%__1cOMacroAssemblerCfb6MnJAssemblerJCondition_in0BHPredict_pCnJrelocInfoJrelocType__v_: templateTable_sparc.o; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o; +text: .text%__1cUPSAdaptiveSizePolicy2t6MIIIIIddI_v_; +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o; +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o; +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_; +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o; +text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o; +text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o; +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o; +text: .text%__1cQPlaceholderTable2t6Mi_v_; +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o; +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o; +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o; +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o; +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_; +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_; +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o; +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_; +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_; +text: .text%__1cNTemplateTablebDinvokeinterface_object_method6FpnMRegisterImpl_222_v_; +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_; +text: .text%__1cNTemplateTableE_new6F_v_; +text: .text%__1cNTemplateTableInewarray6F_v_; +text: .text%__1cNTemplateTableJanewarray6F_v_; +text: .text%__1cNTemplateTableLarraylength6F_v_; +text: .text%__1cNTemplateTableJcheckcast6F_v_; +text: .text%__1cNTemplateTableKinstanceof6F_v_; +text: .text%__1cNTemplateTableL_breakpoint6F_v_; +text: .text%__1cNTemplateTableGathrow6F_v_; +text: .text%__1cNTemplateTableMmonitorenter6F_v_; +text: .text%__1cNTemplateTableLmonitorexit6F_v_; +text: .text%__1cNTemplateTableEwide6F_v_; +text: .text%__1cNTemplateTableOmultianewarray6F_v_; +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: templateTable_sparc.o; +text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_; +text: .text%__1cTcompilerOracle_init6F_v_; +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__; +text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; +text: .text%__1cZCompiledArgumentOopFinderRhandle_oop_offset6M_v_: frame.o; +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_; +text: .text%__1cHGCStats2t6M_v_; +text: .text%__1cNGCTaskManager2t6MI_v_; +text: .text%__1cNGCTaskManagerKinitialize6M_v_; +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_; +text: .text%__1cPPerfDataManagerHdestroy6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_; +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o; +text: .text%__1cRcheck_basic_types6F_v_; +text: .text%__1cSCommandLineFlagsExKuintxAtPut6FnXCommandLineFlagWithType_I_v_; +text: .text%__1cOThreadCriticalKinitialize6F_v_; +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; +text: .text%__1cICodeHeap2t6M_v_; +text: .text%__1cICodeHeapHreserve6MIII_i_; +text: .text%__1cDhpiKinitialize6F_i_; +text: .text%__1cMPerfDataList2T6M_v_; +text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o; +text: .text%__1cNWatcherThreadDrun6M_v_; +text: .text%__1cNWatcherThreadEstop6F_v_; +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o; +text: .text%__1cFStateO_sub_Op_CMoveD6MpknENode__v_; +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_; +text: .text%__1cKDictionary2t6Mi_v_; +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_; +text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o; +text: .text%__1cNeventlog_init6F_v_; +text: .text%__1cScheck_ThreadShadow6F_v_; +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o; +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o; +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_; +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_; +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o; +text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_; +text: .text%__1cPperfMemory_exit6F_v_; +text: .text%__1cPperfMemory_init6F_v_; +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_; +text: .text%__1cNTemplateTableHfastore6F_v_; +text: .text%__1cNTemplateTableHdastore6F_v_; +text: .text%__1cNTemplateTableHaastore6F_v_; +text: .text%__1cNTemplateTableHbastore6F_v_; +text: .text%__1cNTemplateTableHsastore6F_v_; +text: .text%__1cOcodeCache_init6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o; +text: .text%__1cNTemplateTableDpop6F_v_; +text: .text%__1cNTemplateTableEpop26F_v_; +text: .text%__1cNTemplateTableDdup6F_v_; +text: .text%__1cNTemplateTableGdup_x16F_v_; +text: .text%__1cNTemplateTableGdup_x26F_v_; +text: .text%__1cNTemplateTableEdup26F_v_; +text: .text%__1cNTemplateTableHdup2_x16F_v_; +text: .text%__1cNTemplateTableHdup2_x26F_v_; +text: .text%__1cNTemplateTableEswap6F_v_; +text: .text%__1cNCollectedHeap2t6M_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o; +text: .text%__1cNTemplateTableEirem6F_v_; +text: .text%__1cNTemplateTableElmul6F_v_; +text: .text%__1cNTemplateTableHlastore6F_v_; +text: .text%__1cNTemplateTableGbaload6F_v_; +text: .text%__1cNTemplateTableGcaload6F_v_; +text: .text%__1cNTemplateTableMfast_icaload6F_v_; +text: .text%__1cNTemplateTableGsaload6F_v_; +text: .text%__1cKPSYoungGenPinitialize_work6M_v_; +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_I_v_; +text: .text%__1cKPSYoungGen2t6MIII_v_; +text: .text%__1cOPSVirtualSpaceJshrink_by6MI_i_; +text: .text%__1cNTemplateTableHaload_06F_v_; +text: .text%__1cNTemplateTableGistore6F_v_; +text: .text%__1cNTemplateTableGlstore6F_v_; +text: .text%__1cNTemplateTableGfstore6F_v_; +text: .text%__1cNTemplateTableGdstore6F_v_; +text: .text%__1cNTemplateTableGastore6F_v_; +text: .text%__1cNTemplateTableLwide_istore6F_v_; +text: .text%__1cNTemplateTableLwide_lstore6F_v_; +text: .text%__1cNTemplateTableLwide_fstore6F_v_; +text: .text%__1cNTemplateTableLwide_dstore6F_v_; +text: .text%__1cNTemplateTableLwide_astore6F_v_; +text: .text%__1cNTemplateTableHiastore6F_v_; +text: .text%__1cNTemplateTableEldiv6F_v_; +text: .text%__1cNTemplateTableLtableswitch6F_v_; +text: .text%__1cNTemplateTableMlookupswitch6F_v_; +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_; +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_; +text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_; +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_; +text: .text%__1cJPSPermGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cNCompileBrokerQset_should_block6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o; +text: .text%__1cNTemplateTableIgetfield6Fi_v_; +text: .text%__1cNTemplateTableJgetstatic6Fi_v_; +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Ipkci_v_; +text: .text%__1cIPSOldGen2t6MIIIpkci_v_; +text: .text%__1cIPSOldGen2t6MnNReservedSpace_IIIIpkci_v_; +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o; +text: .text%__1cNTemplateTableIputfield6Fi_v_; +text: .text%__1cNTemplateTableJputstatic6Fi_v_; +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o; +text: .text%__1cLPSMarkSweepKinitialize6F_v_; +text: .text%__1cNTemplateTableIwide_ret6F_v_; +text: .text%__1cNTemplateTableElrem6F_v_; +text: .text%__1cNTemplateTableElshl6F_v_; +text: .text%__1cNTemplateTableElshr6F_v_; +text: .text%__1cNTemplateTableFlushr6F_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_; +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_; +text: .text%__1cNTemplateTableEineg6F_v_; +text: .text%__1cNTemplateTableElneg6F_v_; +text: .text%__1cNTemplateTableEfneg6F_v_; +text: .text%__1cNTemplateTableEdneg6F_v_; +text: .text%__1cNTemplateTableEiinc6F_v_; +text: .text%__1cNTemplateTableJwide_iinc6F_v_; +text: .text%__1cKPSScavengeKinitialize6F_v_; +text: .text%__1cNTemplateTableElcmp6F_v_; +text: .text%__1cWcompilationPolicy_init6F_v_; +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o; +text: .text%__1cOcompiler2_init6F_v_; +text: .text%__1cSPSPromotionManagerKinitialize6F_v_; +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o; +text: .text%__1cNTemplateTableDret6F_v_; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/rules.make 2009-08-01 04:16:59.353729503 +0100 @@ -0,0 +1,198 @@ +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Common rules/macros for the vm, adlc. + +# Tell make that .cpp is important +.SUFFIXES: .cpp $(SUFFIXES) + +# For now. Other makefiles use CPP as the c++ compiler, but that should really +# name the preprocessor. +ifeq ($(CCC),) +CCC = $(CPP) +endif + +DEMANGLER = c++filt +DEMANGLE = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@ + +# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++). +C_COMPILE = $(CC) $(CPPFLAGS) $(CFLAGS) +CC_COMPILE = $(CCC) $(CPPFLAGS) $(CFLAGS) + +AS.S = $(AS) $(ASFLAGS) + +COMPILE.c = $(C_COMPILE) -c +GENASM.c = $(C_COMPILE) -S +LINK.c = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS) +LINK_LIB.c = $(CC) $(LFLAGS) $(SHARED_FLAG) +PREPROCESS.c = $(C_COMPILE) -E + +COMPILE.CC = $(CC_COMPILE) -c +GENASM.CC = $(CC_COMPILE) -S +LINK.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS) +LINK_NOPROF.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) +LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG) +PREPROCESS.CC = $(CC_COMPILE) -E + +# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k". +REMOVE_TARGET = rm -f $@ + +# Synonyms. +COMPILE.cpp = $(COMPILE.CC) +GENASM.cpp = $(GENASM.CC) +LINK.cpp = $(LINK.CC) +LINK_LIB.cpp = $(LINK_LIB.CC) +PREPROCESS.cpp = $(PREPROCESS.CC) + +# Note use of ALT_BOOTDIR to explicitly specify location of java and +# javac; this is the same environment variable used in the J2SE build +# process for overriding the default spec, which is BOOTDIR. +# Note also that we fall back to using JAVA_HOME if neither of these is +# specified. + +ifdef ALT_BOOTDIR + +RUN.JAVA = $(ALT_BOOTDIR)/bin/java +RUN.JAVAP = $(ALT_BOOTDIR)/bin/javap +RUN.JAVAH = $(ALT_BOOTDIR)/bin/javah +RUN.JAR = $(ALT_BOOTDIR)/bin/jar +COMPILE.JAVAC = $(ALT_BOOTDIR)/bin/javac +COMPILE.RMIC = $(ALT_BOOTDIR)/bin/rmic +BOOT_JAVA_HOME = $(ALT_BOOTDIR) + +else + +ifdef BOOTDIR + +RUN.JAVA = $(BOOTDIR)/bin/java +RUN.JAVAP = $(BOOTDIR)/bin/javap +RUN.JAVAH = $(BOOTDIR)/bin/javah +RUN.JAR = $(BOOTDIR)/bin/jar +COMPILE.JAVAC = $(BOOTDIR)/bin/javac +COMPILE.RMIC = $(BOOTDIR)/bin/rmic +BOOT_JAVA_HOME = $(BOOTDIR) + +else + +ifdef JAVA_HOME + +RUN.JAVA = $(JAVA_HOME)/bin/java +RUN.JAVAP = $(JAVA_HOME)/bin/javap +RUN.JAVAH = $(JAVA_HOME)/bin/javah +RUN.JAR = $(JAVA_HOME)/bin/jar +COMPILE.JAVAC = $(JAVA_HOME)/bin/javac +COMPILE.RMIC = $(JAVA_HOME)/bin/rmic +BOOT_JAVA_HOME = $(JAVA_HOME) + +else + +# take from the PATH, if ALT_BOOTDIR, BOOTDIR and JAVA_HOME are not defined +# note that this is to support hotspot build without SA. To build +# SA along with hotspot, you need to define ALT_BOOTDIR, BOOTDIR or JAVA_HOME + +RUN.JAVA = java +RUN.JAVAP = javap +RUN.JAVAH = javah +RUN.JAR = jar +COMPILE.JAVAC = javac +COMPILE.RMIC = rmic + +endif +endif +endif + +SUM = /usr/bin/sum + +# 'gmake MAKE_VERBOSE=y' gives all the gory details. +QUIETLY$(MAKE_VERBOSE) = @ +RUN.JAR$(MAKE_VERBOSE) += >/dev/null + +# With parallel makes, print a message at the end of compilation. +ifeq ($(findstring j,$(MFLAGS)),j) +COMPILE_DONE = && { echo Done with $<; } +endif + +# Include NONPIC_OBJ_FILES definition +ifndef LP64 +include $(GAMMADIR)/make/pic.make +endif + +# Sun compiler for 64 bit Solaris does not support building non-PIC object files. +ifdef LP64 +%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) +else +%.o: %.cpp + @echo Compiling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \ + $(subst $(VM_PICFLAG), ,$(COMPILE.CC)) -o $@ $< $(COMPILE_DONE), \ + $(COMPILE.CC) -o $@ $< $(COMPILE_DONE)) +endif + +%.o: %.s + @echo Assembling $< + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(AS.S) -o $@ $< $(COMPILE_DONE) + +%.s: %.cpp + @echo Generating assembly for $< + $(QUIETLY) $(GENASM.CC) -o $@ $< + $(QUIETLY) $(DEMANGLE) $(COMPILE_DONE) + +# Intermediate files (for debugging macros) +%.i: %.cpp + @echo Preprocessing $< to $@ + $(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE) + +# Override gnumake built-in rules which do sccs get operations badly. +# (They put the checked out code in the current directory, not in the +# directory of the original file.) Since this is a symptom of a teamware +# failure, and since not all problems can be detected by gnumake due +# to incomplete dependency checking... just complain and stop. +%:: s.% + @echo "=========================================================" + @echo File $@ + @echo is out of date with respect to its SCCS file. + @echo This file may be from an unresolved Teamware conflict. + @echo This is also a symptom of a Teamware bringover/putback failure + @echo in which SCCS files are updated but not checked out. + @echo Check for other out of date files in your workspace. + @echo "=========================================================" + @exit 666 + +%:: SCCS/s.% + @echo "=========================================================" + @echo File $@ + @echo is out of date with respect to its SCCS file. + @echo This file may be from an unresolved Teamware conflict. + @echo This is also a symptom of a Teamware bringover/putback failure + @echo in which SCCS files are updated but not checked out. + @echo Check for other out of date files in your workspace. + @echo "=========================================================" + @exit 666 + +.PHONY: default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/sa.make 2009-08-01 04:17:00.103917294 +0100 @@ -0,0 +1,87 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile (sa.make) is included from the sa.make in the +# build directories. + +# This makefile is used to build Serviceability Agent java code +# and generate JNI header file for native methods. + +include $(GAMMADIR)/make/solaris/makefiles/rules.make +AGENT_DIR = $(GAMMADIR)/agent +include $(GAMMADIR)/make/sa.files +GENERATED = ../generated + +# tools.jar is needed by the JDI - SA binding +SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar + +# gnumake 3.78.1 does not accept the *s that +# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them +AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1)) +AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2)) + +SA_CLASSDIR = $(GENERATED)/saclasses + +SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)" + +SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties + +# if $(AGENT_DIR) does not exist, we don't build SA. +all: + $(QUIETLY) if [ -d $(AGENT_DIR) ] ; then \ + $(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \ + fi + +$(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2) + $(QUIETLY) echo "Making $@"; + $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ + echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ + exit 1; \ + fi + $(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \ + echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\ + echo ""; \ + exit 1; \ + fi + $(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \ + mkdir -p $(SA_CLASSDIR); \ + fi + $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1) + $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2) + + $(QUIETLY) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer + $(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) + $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js + $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql + $(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources + $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/* + $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/ + $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/ + $(QUIETLY) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ . + $(QUIETLY) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector + $(QUIETLY) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal + +clean: + rm -rf $(SA_CLASSDIR) + rm -rf $(GENERATED)/sa-jdi.jar --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/saproc.make 2009-08-01 04:17:00.537331776 +0100 @@ -0,0 +1,79 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build serviceability agent library, used by vm.make + +# libsaproc[_g].so: serviceability agent +SAPROC = saproc$(G_SUFFIX) +LIBSAPROC = lib$(SAPROC).so + +AGENT_DIR = $(GAMMADIR)/agent + +SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)/proc + +SASRCFILES = $(SASRCDIR)/saproc.cpp + +SAMAPFILE = $(SASRCDIR)/mapfile + +DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC) + +# if $(AGENT_DIR) does not exist, we don't build SA + +checkAndBuildSA: + $(QUIETLY) if [ -d $(AGENT_DIR) ] ; then \ + $(MAKE) -f vm.make $(LIBSAPROC); \ + fi + +SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) + +ifdef USE_GCC +SA_LFLAGS += -D_REENTRANT +else +SA_LFLAGS += -mt -xnolib -norunpath +endif + +$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE) + $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ + echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ + exit 1; \ + fi + @echo Making SA debugger back-end... + $(QUIETLY) $(CPP) \ + $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ + -I$(SASRCDIR) \ + -I$(GENERATED) \ + -I$(BOOT_JAVA_HOME)/include \ + -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \ + $(SASRCFILES) \ + $(SA_LFLAGS) \ + -o $@ \ + -ldl -ldemangle -lthread -lc + +install_saproc: checkAndBuildSA + $(QUIETLY) if [ -f $(LIBSAPROC) ] ; then \ + echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"; \ + cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"; \ + fi + +.PHONY: checkAndBuildSA install_saproc --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/sparc.make 2009-08-01 04:17:00.946067678 +0100 @@ -0,0 +1,124 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +Obj_Files += solaris_sparc.o +ASFLAGS += $(AS_ARCHFLAG) + +ifeq ("${Platform_compiler}", "sparcWorks") +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 505), 1) +# For 5.2 ad_sparc file is compiled with -O2 %%%% remove when adlc is fixed +OPT_CFLAGS/ad_sparc.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/dfa_sparc.o = $(OPT_CFLAGS/SLOWER) +# CC drops core on systemDictionary.o in -xO4 mode +OPT_CFLAGS/systemDictionary.o = $(OPT_CFLAGS/SLOWER) +# SC5.0 bug 4284168 +OPT_CFLAGS/carRememberedSet.o = $(OPT_CFLAGS/O2) +# Temporarily drop the optimization level for compiling +# jniHandles.cpp to O3 from O4; see bug 4309181 +OPT_CFLAGS/jniHandles.o = $(OPT_CFLAGS/O2) +# CC brings an US-II to its knees compiling the vmStructs asserts under -xO4 +OPT_CFLAGS/vmStructs.o = $(OPT_CFLAGS/O2) +endif # COMPILER_REV_NUMERIC < 505 +else +# Options for gcc +OPT_CFLAGS/ad_sparc.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/dfa_sparc.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/systemDictionary.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/carRememberedSet.o = $(OPT_CFLAGS/O2) +OPT_CFLAGS/jniHandles.o = $(OPT_CFLAGS/O2) +OPT_CFLAGS/vmStructs.o = $(OPT_CFLAGS/O2) +endif + +# File-specific adjustments to the PICFLAG, applicable only to SPARC, +# which has a very tight limit on global constant references. + +# Old files which seemed hot at one point or another: +#PICFLAG/runtime.o = $(PICFLAG/BETTER) +#PICFLAG/generateOopMap.o = $(PICFLAG/BETTER) +#PICFLAG/thread.o = $(PICFLAG/BETTER) +#PICFLAG/parse2.o = $(PICFLAG/BETTER) +#PICFLAG/parse1.o = $(PICFLAG/BETTER) +#PICFLAG/universe.o = $(PICFLAG/BETTER) +#PICFLAG/safepoint.o = $(PICFLAG/BETTER) +#PICFLAG/parse3.o = $(PICFLAG/BETTER) +#PICFLAG/compile.o = $(PICFLAG/BETTER) +#PICFLAG/codeBlob.o = $(PICFLAG/BETTER) +#PICFLAG/mutexLocker.o = $(PICFLAG/BETTER) +#PICFLAG/nativeInst_sparc.o = $(PICFLAG/BETTER) +#PICFLAG/methodLiveness.o = $(PICFLAG/BETTER) +#PICFLAG/synchronizer.o = $(PICFLAG/BETTER) +#PICFLAG/methodOop.o = $(PICFLAG/BETTER) +#PICFLAG/space.o = $(PICFLAG/BETTER) +#PICFLAG/interpreterRT_sparc.o = $(PICFLAG/BETTER) +#PICFLAG/generation.o = $(PICFLAG/BETTER) +#PICFLAG/markSweep.o = $(PICFLAG/BETTER) +#PICFLAG/parseHelper.o = $(PICFLAG/BETTER) + +# Confirmed by function-level profiling: +PICFLAG/scavenge.o = $(PICFLAG/BETTER) +PICFLAG/instanceKlass.o = $(PICFLAG/BETTER) +PICFLAG/frame.o = $(PICFLAG/BETTER) +PICFLAG/phaseX.o = $(PICFLAG/BETTER) +PICFLAG/lookupCache.o = $(PICFLAG/BETTER) +PICFLAG/chaitin.o = $(PICFLAG/BETTER) +PICFLAG/type.o = $(PICFLAG/BETTER) +PICFLAG/jvm.o = $(PICFLAG/BETTER) +PICFLAG/jni.o = $(PICFLAG/BETTER) +PICFLAG/matcher.o = $(PICFLAG/BETTER) + +# New from module-level profiling (trustworthy?): +PICFLAG/rememberedSet.o = $(PICFLAG/BETTER) +PICFLAG/frame_sparc.o = $(PICFLAG/BETTER) +PICFLAG/live.o = $(PICFLAG/BETTER) +PICFLAG/vectset.o = $(PICFLAG/BETTER) +PICFLAG/objArrayKlass.o = $(PICFLAG/BETTER) +PICFLAG/do_call.o = $(PICFLAG/BETTER) +PICFLAG/loopnode.o = $(PICFLAG/BETTER) +PICFLAG/cfgnode.o = $(PICFLAG/BETTER) +PICFLAG/ifg.o = $(PICFLAG/BETTER) +PICFLAG/vframe.o = $(PICFLAG/BETTER) +PICFLAG/postaloc.o = $(PICFLAG/BETTER) +PICFLAG/carRememberedSet.o = $(PICFLAG/BETTER) +PICFLAG/gcm.o = $(PICFLAG/BETTER) +PICFLAG/coalesce.o = $(PICFLAG/BETTER) +PICFLAG/oop.o = $(PICFLAG/BETTER) +PICFLAG/oopMap.o = $(PICFLAG/BETTER) +PICFLAG/resourceArea.o = $(PICFLAG/BETTER) +PICFLAG/node.o = $(PICFLAG/BETTER) +PICFLAG/dict.o = $(PICFLAG/BETTER) +PICFLAG/domgraph.o = $(PICFLAG/BETTER) +PICFLAG/dfa_sparc.o = $(PICFLAG/BETTER) +PICFLAG/block.o = $(PICFLAG/BETTER) +PICFLAG/javaClasses.o = $(PICFLAG/BETTER) + +# New hot files: +PICFLAG/classes.o = $(PICFLAG/BETTER) +#PICFLAG/ad_sparc.o = $(PICFLAG/BETTER) +PICFLAG/nmethod.o = $(PICFLAG/BETTER) +PICFLAG/relocInfo.o = $(PICFLAG/BETTER) +PICFLAG/codeBuffer_sparc.o = $(PICFLAG/BETTER) +PICFLAG/callnode.o = $(PICFLAG/BETTER) +PICFLAG/multnode.o = $(PICFLAG/BETTER) +PICFLAG/os_solaris.o = $(PICFLAG/BETTER) +PICFLAG/typeArrayKlass.o = $(PICFLAG/BETTER) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/sparcWorks.make 2009-08-01 04:17:01.380712838 +0100 @@ -0,0 +1,548 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Compiler-specific flags for sparcworks. + +# tell make which C and C++ compilers to use +CC = cc +CPP = CC + +# Note that this 'as' is an older version of the Sun Studio 'fbe', and will +# use the older style options. The 'fbe' options will match 'cc' and 'CC'. +AS = /usr/ccs/bin/as + +NM = /usr/ccs/bin/nm +NAWK = /bin/nawk + +REORDER_FLAG = -xF + +# Check for the versions of C++ and C compilers ($CPP and $CC) used. + +# Get the last thing on the line that looks like x.x+ (x is a digit). +COMPILER_REV := \ +$(shell $(CPP) -V 2>&1 | sed -n 's/^.*[ ,\t]C++[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p') +C_COMPILER_REV := \ +$(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p') + +# Pick which compiler is validated +ifeq ($(JDK_MINOR_VERSION),6) + # Validated compiler for JDK6 is SS11 (5.8) + VALIDATED_COMPILER_REV := 5.8 + VALIDATED_C_COMPILER_REV := 5.8 +else + # Validated compiler for JDK7 is SS12 (5.9) + VALIDATED_COMPILER_REV := 5.9 + VALIDATED_C_COMPILER_REV := 5.9 +endif + +# Warning messages about not using the above validated version +ENFORCE_COMPILER_REV${ENFORCE_COMPILER_REV} := ${VALIDATED_COMPILER_REV} +ifneq (${COMPILER_REV},${ENFORCE_COMPILER_REV}) +dummy_target_to_enforce_compiler_rev:=\ +$(shell echo >&2 WARNING: You are using CC version ${COMPILER_REV} \ +and should be using version ${ENFORCE_COMPILER_REV}. Set ENFORCE_COMPILER_REV=${COMPILER_REV} to avoid this warning.) +endif + +ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := ${VALIDATED_C_COMPILER_REV} +ifneq (${C_COMPILER_REV},${ENFORCE_C_COMPILER_REV}) +dummy_target_to_enforce_c_compiler_rev:=\ +$(shell echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} \ +and should be using version ${ENFORCE_C_COMPILER_REV}. Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this warning.) +endif + +COMPILER_REV_NUMERIC := $(shell echo $(COMPILER_REV) | awk -F. '{ print $$1 * 100 + $$2 }') + +# Fail the build if __fabsf is used. __fabsf exists only in Solaris 8 2/04 +# and newer; objects with a dependency on this symbol will not run on older +# Solaris 8. +JVM_FAIL_IF_UNDEFINED = __fabsf + +JVM_CHECK_SYMBOLS = $(NM) -u -p $(LIBJVM.o) | \ + $(NAWK) -v f="${JVM_FAIL_IF_UNDEFINED}" \ + 'BEGIN { c=split(f,s); rc=0; } \ + /:$$/ { file = $$1; } \ + /[^:]$$/ { for(n=1;n<=c;++n) { \ + if($$1==s[n]) { \ + printf("JVM_CHECK_SYMBOLS: %s contains illegal symbol %s\n", \ + file,$$1); \ + rc=1; \ + } \ + } \ + } \ + END { exit rc; }' + +LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1; + +# Some interfaces (_lwp_create) changed with LP64 and Solaris 7 +SOLARIS_7_OR_LATER := \ +$(shell uname -r | awk -F. '{ if ($$2 >= 7) print "-DSOLARIS_7_OR_LATER"; }') +CFLAGS += ${SOLARIS_7_OR_LATER} + +# New architecture options started in SS12 (5.9), we need both styles to build. +# The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as. +# Note: SS12 default for 32bit sparc is now the same as v8plus, so the +# settings below have changed all SS12 32bit sparc builds to be v8plus. +# The older SS11 (5.8) settings have remained as they always have been. +ifeq ($(TYPE),COMPILER2) + ARCHFLAG_OLD/sparc = -xarch=v8plus +else + ifeq ($(TYPE),TIERED) + ARCHFLAG_OLD/sparc = -xarch=v8plus + else + ARCHFLAG_OLD/sparc = -xarch=v8 + endif +endif +ARCHFLAG_NEW/sparc = -m32 -xarch=sparc +ARCHFLAG_OLD/sparcv9 = -xarch=v9 +ARCHFLAG_NEW/sparcv9 = -m64 -xarch=sparc +ARCHFLAG_OLD/i486 = +ARCHFLAG_NEW/i486 = -m32 +ARCHFLAG_OLD/amd64 = -xarch=amd64 +ARCHFLAG_NEW/amd64 = -m64 + +# Select the ARCHFLAGs and other SS12 (5.9) options +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) + ARCHFLAG/sparc = $(ARCHFLAG_NEW/sparc) + ARCHFLAG/sparcv9 = $(ARCHFLAG_NEW/sparcv9) + ARCHFLAG/i486 = $(ARCHFLAG_NEW/i486) + ARCHFLAG/amd64 = $(ARCHFLAG_NEW/amd64) +else + ARCHFLAG/sparc = $(ARCHFLAG_OLD/sparc) + ARCHFLAG/sparcv9 = $(ARCHFLAG_OLD/sparcv9) + ARCHFLAG/i486 = $(ARCHFLAG_OLD/i486) + ARCHFLAG/amd64 = $(ARCHFLAG_OLD/amd64) +endif + +# ARCHFLAGS for the current build arch +ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) +AS_ARCHFLAG = $(ARCHFLAG_OLD/$(BUILDARCH)) + +# Optional sub-directory in /usr/lib where BUILDARCH libraries are kept. +ISA_DIR=$(ISA_DIR/$(BUILDARCH)) +ISA_DIR/sparcv9=/sparcv9 +ISA_DIR/amd64=/amd64 + +# Use these to work around compiler bugs: +OPT_CFLAGS/SLOWER=-xO3 +OPT_CFLAGS/O2=-xO2 +OPT_CFLAGS/NOOPT=-xO1 + +################################################# +# Begin current (>=5.6) Forte compiler options # +################################################# + +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 506), 1) + +ifeq ("${Platform_arch}", "sparc") + +# We MUST allow data alignment of 4 for sparc (sparcv9 is ok at 8s) +ifndef LP64 +CFLAGS += -xmemalign=4s +endif + +endif + +endif + +################################################# +# Begin current (>=5.5) Forte compiler options # +################################################# + +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1) + +CFLAGS += $(ARCHFLAG) +AOUT_FLAGS += $(ARCHFLAG) +LIB_FLAGS += $(ARCHFLAG) +LFLAGS += $(ARCHFLAG) + +ifeq ("${Platform_arch}", "sparc") + +# Flags for Optimization + +# [phh] Commented out pending verification that we do indeed want +# to potentially bias against u1 and u3 targets. +#CFLAGS += -xchip=ultra2 + +OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS) + +endif # sparc + +ifeq ("${Platform_arch_model}", "x86_32") + +OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS) + +# UBE (CC 5.5) has bug 4923569 with -xO4 +OPT_CFLAGS+=-xO3 + +endif # 32bit x86 + +ifeq ("${Platform_arch_model}", "x86_64") + +ASFLAGS += $(AS_ARCHFLAG) +CFLAGS += $(ARCHFLAG/amd64) +# this one seemed useless +LFLAGS_VM += $(ARCHFLAG/amd64) +# this one worked +LFLAGS += $(ARCHFLAG/amd64) +AOUT_FLAGS += $(ARCHFLAG/amd64) + +# -xO3 is faster than -xO4 on specjbb with SS10 compiler +OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS) + +endif # 64bit x86 + +# Inline functions +CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_${Platform_arch}/vm/solaris_${Platform_arch_model}.il + +# no more exceptions +CFLAGS/NOEX=-features=no%except + + +# avoid compilation problems arising from fact that C++ compiler tries +# to search for external template definition by just compiling additional +# source files in th same context +CFLAGS += -template=no%extdef + +# Reduce code bloat by reverting back to 5.0 behavior for static initializers +CFLAGS += -features=no%split_init + +# Use -D_Crun_inline_placement so we don't get references to +# __1c2n6FIpv_0_ or void*operator new(unsigned,void*) +# This avoids the hard requirement of the newer Solaris C++ runtime patches. +# NOTE: This is an undocumented feature of the SS10 compiler. See 6306698. +CFLAGS += -D_Crun_inline_placement + +# PIC is safer for SPARC, and is considerably slower +# a file foo.o which wants to compile -pic can set "PICFLAG/foo.o = -PIC" +PICFLAG = -KPIC +PICFLAG/DEFAULT = $(PICFLAG) +# [RGV] Need to figure which files to remove to get link to work +#PICFLAG/BETTER = -pic +PICFLAG/BETTER = $(PICFLAG/DEFAULT) +PICFLAG/BYFILE = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@)) + +# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. +MAPFLAG = -M FILENAME + +# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj +SONAMEFLAG = -h SONAME + +# Build shared library +SHARED_FLAG = -G + +# We don't need libCstd.so and librwtools7.so, only libCrun.so +CFLAGS += -library=%none +LFLAGS += -library=%none + +LFLAGS += -mt + +endif # COMPILER_REV_NUMERIC >= 505 + +###################################### +# End 5.5 Forte compiler options # +###################################### + +###################################### +# Begin 5.2 Forte compiler options # +###################################### + +ifeq ($(COMPILER_REV_NUMERIC), 502) + +CFLAGS += $(ARCHFLAG) +AOUT_FLAGS += $(ARCHFLAG) +LIB_FLAGS += $(ARCHFLAG) +LFLAGS += $(ARCHFLAG) + +ifeq ("${Platform_arch}", "sparc") + +# Flags for Optimization + +# [phh] Commented out pending verification that we do indeed want +# to potentially bias against u1 and u3 targets. +#CFLAGS += -xchip=ultra2 + +ifdef LP64 +# SC5.0 tools on v9 are flakey at -xO4 +# [phh] Is this still true for 6.1? +OPT_CFLAGS=-xO3 $(EXTRA_OPT_CFLAGS) +else +OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS) +endif + +CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_sparc/vm/solaris_sparc.il + +endif # sparc + +ifeq ("${Platform_arch_model}", "x86_32") + +OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS) + +# SC5.0 tools on x86 are flakey at -xO4 +# [phh] Is this still true for 6.1? +OPT_CFLAGS+=-xO3 + +CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_x86/vm/solaris_x86_32.il + +endif # 32bit x86 + +# no more exceptions +CFLAGS/NOEX=-noex + +# Reduce code bloat by reverting back to 5.0 behavior for static initializers +CFLAGS += -Qoption ccfe -one_static_init + +# PIC is safer for SPARC, and is considerably slower +# a file foo.o which wants to compile -pic can set "PICFLAG/foo.o = -PIC" +PICFLAG = -KPIC +PICFLAG/DEFAULT = $(PICFLAG) +# [RGV] Need to figure which files to remove to get link to work +#PICFLAG/BETTER = -pic +PICFLAG/BETTER = $(PICFLAG/DEFAULT) +PICFLAG/BYFILE = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@)) + +# Would be better if these weren't needed, since we link with CC, but +# at present removing them causes run-time errors +LFLAGS += -library=Crun +LIBS += -library=Crun -lCrun + +endif # COMPILER_REV_NUMERIC == 502 + +################################## +# End 5.2 Forte compiler options # +################################## + +################################## +# Begin old 5.1 compiler options # +################################## +ifeq ($(COMPILER_REV_NUMERIC), 501) + +_JUNK_ := $(shell echo >&2 \ + "*** ERROR: sparkWorks.make incomplete for 5.1 compiler") + @exit 1 +endif +################################## +# End old 5.1 compiler options # +################################## + +################################## +# Begin old 5.0 compiler options # +################################## + +ifeq (${COMPILER_REV_NUMERIC}, 500) + +# Had to hoist this higher apparently because of other changes. Must +# come before -xarch specification. +# NOTE: native says optimize for the machine doing the compile, bad news. +CFLAGS += -xtarget=native + +CFLAGS += $(ARCHFLAG) +AOUT_FLAGS += $(ARCHFLAG) +LIB_FLAGS += $(ARCHFLAG) +LFLAGS += $(ARCHFLAG) + +CFLAGS += -library=iostream +LFLAGS += -library=iostream -library=Crun +LIBS += -library=iostream -library=Crun -lCrun + +# Flags for Optimization +ifdef LP64 +# SC5.0 tools on v9 are flakey at -xO4 +OPT_CFLAGS=-xO3 $(EXTRA_OPT_CFLAGS) +else +OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS) +endif + +ifeq ("${Platform_arch}", "sparc") + +CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.il + +endif # sparc + +ifeq ("${Platform_arch_model}", "x86_32") +OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS) +ifeq ("${COMPILER_REV_NUMERIC}", "500") +# SC5.0 tools on x86 are flakey at -xO4 +OPT_CFLAGS+=-xO3 +else +OPT_CFLAGS+=-xO4 +endif + +CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_x86/vm/solaris_x86_32.il + +endif # 32bit x86 + +# The following options run into misaligned ldd problem (raj) +#OPT_CFLAGS = -fast -O4 $(ARCHFLAG/sparc) -xchip=ultra + +# no more exceptions +CFLAGS/NOEX=-noex + +# PIC is safer for SPARC, and is considerably slower +# a file foo.o which wants to compile -pic can set "PICFLAG/foo.o = -PIC" +PICFLAG = -PIC +PICFLAG/DEFAULT = $(PICFLAG) +# [RGV] Need to figure which files to remove to get link to work +#PICFLAG/BETTER = -pic +PICFLAG/BETTER = $(PICFLAG/DEFAULT) +PICFLAG/BYFILE = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@)) + +endif # COMPILER_REV_NUMERIC = 500 + +################################ +# End old 5.0 compiler options # +################################ + +ifeq ("${COMPILER_REV_NUMERIC}", "402") +# 4.2 COMPILERS SHOULD NO LONGER BE USED +_JUNK_ := $(shell echo >&2 \ + "*** ERROR: SC4.2 compilers are not supported by this code base!") + @exit 1 +endif + +# do not include shared lib path in a.outs +AOUT_FLAGS += -norunpath +LFLAGS_VM = -norunpath -z noversion + +# need position-indep-code for shared libraries +# (ild appears to get errors on PIC code, so we'll try non-PIC for debug) +ifeq ($(PICFLAGS),DEFAULT) +VM_PICFLAG/LIBJVM = $(PICFLAG/DEFAULT) +else +VM_PICFLAG/LIBJVM = $(PICFLAG/BYFILE) +endif +VM_PICFLAG/AOUT = + +VM_PICFLAG = $(VM_PICFLAG/$(LINK_INTO)) +CFLAGS += $(VM_PICFLAG) + +# less dynamic linking (no PLTs, please) +#LIB_FLAGS += $(LINK_MODE) +# %%%%% despite -znodefs, -Bsymbolic gets link errors -- Rose + +LINK_MODE = $(LINK_MODE/$(VERSION)) +LINK_MODE/debug = +LINK_MODE/optimized = -Bsymbolic -znodefs + +# Have thread local errnos +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1) +CFLAGS += -mt +else +CFLAGS += -D_REENTRANT +endif + +ifdef CC_INTERP +# C++ Interpreter +CFLAGS += -DCC_INTERP +endif + +# Flags for Debugging +DEBUG_CFLAGS = -g +FASTDEBUG_CFLAGS = -g0 +# The -g0 setting allows the C++ frontend to inline, which is a big win. + +# Special global options for SS12 +ifeq ($(COMPILER_REV_NUMERIC),509) + # There appears to be multiple issues with the new Dwarf2 debug format, so + # we tell the compiler to use the older 'stabs' debug format all the time. + # Note that this needs to be used in optimized compiles too to be 100%. + # This is a workaround for SS12 (5.9) bug 6694600 + CFLAGS += -xdebugformat=stabs +endif + +# Enable the following CFLAGS additions if you need to compare the +# built ELF objects. +# +# The -g option makes static data global and the "-Qoption ccfe +# -xglobalstatic" option tells the compiler to not globalize static +# data using a unique globalization prefix. Instead force the use of +# a static globalization prefix based on the source filepath so the +# objects from two identical compilations are the same. +#DEBUG_CFLAGS += -Qoption ccfe -xglobalstatic +#FASTDEBUG_CFLAGS += -Qoption ccfe -xglobalstatic + +ifeq (${COMPILER_REV_NUMERIC}, 502) +COMPILER_DATE := $(shell $(CPP) -V 2>&1 | sed -n '/^.*[ ]C++[ ]\([1-9]\.[0-9][0-9]*\)/p' | awk '{ print $$NF; }') +ifeq (${COMPILER_DATE}, 2001/01/31) +# disable -g0 in fastdebug since SC6.1 dated 2001/01/31 seems to be buggy +# use an innocuous value because it will get -g if it's empty +FASTDEBUG_CFLAGS = -c +endif +endif + +# Uncomment or 'gmake CFLAGS_BROWSE=-sbfast' to get source browser information. +# CFLAGS_BROWSE = -sbfast +CFLAGS += $(CFLAGS_BROWSE) + +# ILD is gone as of SS11 (5.8), not supportted in SS10 (5.7) +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 507), 1) + # use ild when debugging (but when optimizing we want reproducible results) + ILDFLAG = $(ILDFLAG/$(VERSION)) + ILDFLAG/debug = -xildon + ILDFLAG/optimized = + AOUT_FLAGS += $(ILDFLAG) +endif + +# Where to put the *.o files (a.out, or shared library)? +LINK_INTO = $(LINK_INTO/$(VERSION)) +LINK_INTO/debug = LIBJVM +LINK_INTO/optimized = LIBJVM + +# We link the debug version into the a.out because: +# 1. ild works on a.out but not shared libraries, and using ild +# can cut rebuild times by 25% for small changes. (ILD is gone in SS11) +# 2. dbx cannot gracefully set breakpoints in shared libraries +# + +# apply this setting to link into the shared library even in the debug version: +ifdef LP64 +LINK_INTO = LIBJVM +else +#LINK_INTO = LIBJVM +endif + +MCS = /usr/ccs/bin/mcs +STRIP = /usr/ccs/bin/strip + +# Solaris platforms collect lots of redundant file-ident lines, +# to the point of wasting a significant percentage of file space. +# (The text is stored in ELF .comment sections, contributed by +# all "#pragma ident" directives in header and source files.) +# This command "compresses" the .comment sections simply by +# removing repeated lines. The data can be extracted from +# binaries in the field by using "mcs -p libjvm.so" or the older +# command "what libjvm.so". +LINK_LIB.CC/POST_HOOK += $(MCS) -c $@ || exit 1; +# (The exit 1 is necessary to cause a build failure if the command fails and +# multiple commands are strung together, and the final semicolon is necessary +# since the hook must terminate itself as a valid command.) + +# Also, strip debug and line number information (worth about 1.7Mb). +STRIP_LIB.CC/POST_HOOK = $(STRIP) -x $@ || exit 1; +# STRIP_LIB.CC/POST_HOOK is incorporated into LINK_LIB.CC/POST_HOOK +# in certain configurations, such as product.make. Other configurations, +# such as debug.make, do not include the strip operation. + +# Enable "#pragma ident" directives. They are conditionally compiled because +# redundant copies from header files can bloat the binaries on some platforms. +SYSDEFS += -DUSE_PRAGMA_IDENT_HDR -DUSE_PRAGMA_IDENT_SRC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/sparcv9.make 2009-08-01 04:17:01.814897101 +0100 @@ -0,0 +1,49 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +Obj_Files += solaris_sparc.o +ASFLAGS += $(AS_ARCHFLAG) + +ifeq ("${Platform_compiler}", "sparcWorks") +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 505), 1) +# When optimized fully, stubGenerator_sparc.cpp +# has bogus code for the routine +# StubGenerator::generate_flush_callers_register_windows() +OPT_CFLAGS/stubGenerator_sparc.o = $(OPT_CFLAGS/SLOWER) + +# For now ad_sparc file is compiled with -O2 %%%% remove when adlc is fixed +OPT_CFLAGS/ad_sparc.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/dfa_sparc.o = $(OPT_CFLAGS/SLOWER) + +# CC brings an US-II to its knees compiling the vmStructs asserts under -xO4 +OPT_CFLAGS/vmStructs.o = $(OPT_CFLAGS/O2) +endif + +else +#Options for gcc +OPT_CFLAGS/stubGenerator_sparc.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/ad_sparc.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/dfa_sparc.o = $(OPT_CFLAGS/SLOWER) +OPT_CFLAGS/vmStructs.o = $(OPT_CFLAGS/O2) +endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/tiered.make 2009-08-01 04:17:02.248827690 +0100 @@ -0,0 +1,31 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Sets make macros for making tiered version of VM + +TYPE=TIERED + +VM_SUBDIR = server + +CFLAGS += -DCOMPILER2 -DCOMPILER1 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/top.make 2009-08-01 04:17:02.673889390 +0100 @@ -0,0 +1,185 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# top.make is included in the Makefile in the build directories. +# It DOES NOT include the vm dependency info in order to be faster. +# It's main job is to implement the incremental form of make lists. +# It also: +# -builds and runs adlc via adlc.make +# -generates JVMTI source and docs via jvmti.make (JSR-163) +# -generate sa-jdi.jar (JDI binding to core files) + +# It assumes the following flags are set: +# CFLAGS Platform_file, Src_Dirs, SYSDEFS, AOUT, Jvm_Obj_Files + +# -- D. Ungar (5/97) from a file by Bill Bush + +# Don't override the built-in $(MAKE). +# Instead, use "gmake" (or "gnumake") from the command line. --Rose +#MAKE = gmake + +GENERATED = ../generated +VM = $(GAMMADIR)/src/share/vm +Plat_File = $(Platform_file) +CDG = cd $(GENERATED); + +# Pick up MakeDeps' sources and definitions +include $(GAMMADIR)/make/$(Platform_os_family)/makefiles/makedeps.make +MakeDepsClass = MakeDeps.class +MakeDeps = $(RUN.JAVA) -classpath . MakeDeps + +Include_DBs/GC = $(VM)/includeDB_gc \ + $(VM)/includeDB_gc_parallel \ + $(VM)/gc_implementation/includeDB_gc_parallelScavenge \ + $(VM)/gc_implementation/includeDB_gc_concurrentMarkSweep \ + $(VM)/gc_implementation/includeDB_gc_parNew \ + $(VM)/gc_implementation/includeDB_gc_g1 \ + $(VM)/gc_implementation/includeDB_gc_serial \ + $(VM)/gc_implementation/includeDB_gc_shared + + +Include_DBs/KERNEL = $(VM)/includeDB_core $(VM)/includeDB_gc \ + $(VM)/gc_implementation/includeDB_gc_serial \ + $(VM)/includeDB_jvmti \ + $(VM)/includeDB_compiler1 + +Include_DBs/CORE = $(VM)/includeDB_core $(Include_DBs/GC) \ + $(VM)/includeDB_jvmti \ + $(VM)/includeDB_features +Include_DBs/COMPILER1 = $(Include_DBs/CORE) $(VM)/includeDB_compiler1 +Include_DBs/COMPILER2 = $(Include_DBs/CORE) $(VM)/includeDB_compiler2 +Include_DBs/TIERED = $(Include_DBs/CORE) $(VM)/includeDB_compiler1 \ + $(VM)/includeDB_compiler2 + +Include_DBs = $(Include_DBs/$(TYPE)) + +Cached_plat = platform.current +Cached_db = includeDB.current + +Incremental_Lists =$(GENERATED)/$(Cached_db) +# list generation also creates $(GENERATED)/$(Cached_plat) + + +AD_Dir = $(GENERATED)/adfiles +ADLC = $(AD_Dir)/adlc +AD_Spec = $(GAMMADIR)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad +AD_Src = $(GAMMADIR)/src/share/vm/adlc +AD_Names = ad_$(Platform_arch_model).hpp ad_$(Platform_arch_model).cpp +AD_Files = $(AD_Names:%=$(AD_Dir)/%) + +# AD_Files_If_Required/COMPILER1 = ad_stuff +AD_Files_If_Required/COMPILER2 = ad_stuff +AD_Files_If_Required/TIERED = ad_stuff +AD_Files_If_Required = $(AD_Files_If_Required/$(TYPE)) + +# Wierd argument adjustment for "gnumake -j..." +adjust-mflags = $(GENERATED)/adjust-mflags +MFLAGS-adjusted = -r `$(adjust-mflags) "$(MFLAGS)" "$(HOTSPOT_BUILD_JOBS)"` + + +# default target: make makeDeps, update lists, make vm +# done in stages to force sequential order with parallel make +# + +default: vm_build_preliminaries the_vm + @echo All done. + +# This is an explicit dependency for the sake of parallel makes. +vm_build_preliminaries: checks $(Incremental_Lists) $(AD_Files_If_Required) jvmti_stuff sa_stuff + @# We need a null action here, so implicit rules don't get consulted. + +# make makeDeps: (and zap the cached db files to force a nonincremental run) + +$(GENERATED)/$(MakeDepsClass): $(MakeDepsSources) + @$(COMPILE.JAVAC) -classpath $(GAMMADIR)/src/share/tools/MakeDeps -g -d $(GENERATED) $(MakeDepsSources) + @echo Removing $(Incremental_Lists) to force regeneration. + @rm -f $(Incremental_Lists) + @$(CDG) echo >$(Cached_plat) + +# make incremental_lists, if cached files out of date, run makeDeps + +$(Incremental_Lists): $(Include_DBs) $(Plat_File) $(GENERATED)/$(MakeDepsClass) + $(CDG) cat $(Include_DBs) > includeDB + $(CDG) if [ ! -r incls ] ; then \ + mkdir incls ; \ + fi + $(CDG) $(MakeDeps) diffs UnixPlatform $(Cached_plat) $(Cached_db) $(Plat_File) includeDB $(MakeDepsOptions) + $(CDG) cp includeDB $(Cached_db) + $(CDG) cp $(Plat_File) $(Cached_plat) + +# symbolic target for command lines +lists: $(Incremental_Lists) + @: lists are now up to date + +# make AD files as necessary +ad_stuff: $(Incremental_Lists) $(adjust-mflags) + @$(MAKE) -f adlc.make $(MFLAGS-adjusted) + +# generate JVMTI files from the spec +jvmti_stuff: $(Incremental_Lists) $(adjust-mflags) + @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) + +# generate SA jar files and native header +sa_stuff: + @$(MAKE) -f sa.make $(MFLAGS-adjusted) + +# and the VM: must use other makefile with dependencies included + +# We have to go to great lengths to get control over the -jN argument +# to the recursive invocation of vm.make. The problem is that gnumake +# resets -jN to -j1 for recursive runs. (How helpful.) +# Note that the user must specify the desired parallelism level via a +# command-line or environment variable name HOTSPOT_BUILD_JOBS. +$(adjust-mflags): $(GAMMADIR)/make/$(Platform_os_family)/makefiles/adjust-mflags.sh + @+rm -f $@ $@+ + @+cat $< > $@+ + @+chmod +x $@+ + @+mv $@+ $@ + +the_vm: vm_build_preliminaries $(adjust-mflags) + @$(MAKE) -f vm.make $(MFLAGS-adjusted) + +install: the_vm + @$(MAKE) -f vm.make install + +# next rules support "make foo.[oi]" + +%.o %.i %.s: + $(MAKE) -f vm.make $(MFLAGS) $@ + #$(MAKE) -f vm.make $@ + +# this should force everything to be rebuilt +clean: + rm -f $(GENERATED)/*.class + $(MAKE) $(MFLAGS) $(GENERATED)/$(MakeDepsClass) + $(MAKE) -f vm.make $(MFLAGS) clean + +# just in case it doesn't, this should do it +realclean: + $(MAKE) -f vm.make $(MFLAGS) clean + rm -fr $(GENERATED) + +.PHONY: default vm_build_preliminaries +.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean +.PHONY: checks check_os_version install --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/makefiles/vm.make 2009-08-01 04:17:03.109474160 +0100 @@ -0,0 +1,208 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Rules to build JVM and related libraries, included from vm.make in the build +# directory. + +# Common build rules. +MAKEFILES_DIR=$(GAMMADIR)/make/$(Platform_os_family)/makefiles +include $(MAKEFILES_DIR)/rules.make + +default: build + +#---------------------------------------------------------------------- +# Defs + +GENERATED = ../generated + +# read a generated file defining the set of .o's and the .o .h dependencies +include $(GENERATED)/Dependencies + +# read machine-specific adjustments (%%% should do this via buildtree.make?) +include $(MAKEFILES_DIR)/$(BUILDARCH).make + +# set VPATH so make knows where to look for source files +# Src_Dirs is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm +# The incls directory contains generated header file lists for inclusion. +# The adfiles directory contains ad_.[ch]pp. +# The jvmtifiles directory contains jvmti*.[ch]pp +Src_Dirs_V = $(GENERATED)/adfiles $(GENERATED)/jvmtifiles ${Src_Dirs} $(GENERATED)/incls +VPATH += $(Src_Dirs_V:%=%:) + +# set INCLUDES for C preprocessor +Src_Dirs_I = $(GENERATED)/adfiles $(GENERATED)/jvmtifiles ${Src_Dirs} $(GENERATED) +INCLUDES += $(Src_Dirs_I:%=-I%) + +ifeq (${VERSION}, debug) + SYMFLAG = -g +else + SYMFLAG = +endif + +# The following variables are defined in the generated flags.make file. +BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HS_BUILD_VER)\"" +JRE_VERSION = -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\"" +HS_LIB_ARCH = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" +BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\"" +BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\"" +VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\"" + +CPPFLAGS = \ + ${SYSDEFS} \ + ${INCLUDES} \ + ${BUILD_VERSION} \ + ${BUILD_TARGET} \ + ${BUILD_USER} \ + ${HS_LIB_ARCH} \ + ${JRE_VERSION} \ + ${VM_DISTRO} + +# CFLAGS_WARN holds compiler options to suppress/enable warnings. +CFLAGS += $(CFLAGS_WARN) + +# Do not use C++ exception handling +CFLAGS += $(CFLAGS/NOEX) + +# Extra flags from gnumake's invocation or environment +CFLAGS += $(EXTRA_CFLAGS) + +# Math Library (libm.so), do not use -lm. +# There might be two versions of libm.so on the build system: +# libm.so.1 and libm.so.2, and we want libm.so.1. +# Depending on the Solaris release being used to build with, +# /usr/lib/libm.so could point at a libm.so.2, so we are +# explicit here so that the libjvm.so you have built will work on an +# older Solaris release that might not have libm.so.2. +# This is a critical factor in allowing builds on Solaris 10 or newer +# to run on Solaris 8 or 9. +# +LIBM=/usr/lib$(ISA_DIR)/libm.so.1 + +ifeq ("${Platform_compiler}", "sparcWorks") +# The whole megilla: +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1) +# Old Comment: List the libraries in the order the compiler was designed for +# Not sure what the 'designed for' comment is referring too above. +# The order may not be too significant anymore, but I have placed this +# older libm before libCrun, just to make sure it's found and used first. +LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc +else +LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor +endif +else +LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc +endif + +# By default, link the *.o into the library, not the executable. +LINK_INTO$(LINK_INTO) = LIBJVM + +JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH) + +#---------------------------------------------------------------------- +# jvm_db & dtrace +include $(MAKEFILES_DIR)/dtrace.make + +#---------------------------------------------------------------------- +# JVM + +JVM = jvm$(G_SUFFIX) +LIBJVM = lib$(JVM).so + +JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS) + +vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES)) + +mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) + rm -f $@ + cat $^ > $@ + +mapfile_reorder : mapfile $(MAPFILE_DTRACE_OPT) $(REORDERFILE) + rm -f $@ + cat $^ > $@ + +ifeq ($(LINK_INTO),AOUT) + LIBJVM.o = + LIBJVM_MAPFILE = + LIBS_VM = $(LIBS) +else + LIBJVM.o = $(JVM_OBJ_FILES) + LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder + LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE)) + LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM)) +ifndef USE_GCC + LIBS_VM = $(LIBS) +else + # JVM is statically linked with libgcc[_s] and libstdc++; this is needed to + # get around library dependency and compatibility issues. Must use gcc not + # g++ to link. + LFLAGS_VM += $(STATIC_LIBGCC) + LIBS_VM += $(STATIC_STDCXX) $(LIBS) +endif +endif + +ifdef USE_GCC +LINK_VM = $(LINK_LIB.c) +else +LINK_VM = $(LINK_LIB.CC) +endif +# making the library: +$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) + $(QUIETLY) \ + case "$(CFLAGS_BROWSE)" in \ + -sbfast|-xsbfast) \ + ;; \ + *) \ + echo Linking vm...; \ + $(LINK_LIB.CC/PRE_HOOK) \ + $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \ + $(LINK_LIB.CC/POST_HOOK) \ + rm -f $@.1; ln -s $@ $@.1; \ + ;; \ + esac + +DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM) + +install_jvm: $(LIBJVM) + @echo "Copying $(LIBJVM) to $(DEST_JVM)" + $(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done" + +#---------------------------------------------------------------------- +# Other files + +# Gamma launcher +include $(MAKEFILES_DIR)/launcher.make + +# Signal interposition library +include $(MAKEFILES_DIR)/jsig.make + +# Serviceability agent +include $(MAKEFILES_DIR)/saproc.make + +#---------------------------------------------------------------------- + +build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(LIBJVM_DTRACE) checkAndBuildSA dtraceCheck + +install: install_jvm install_jsig install_saproc + +.PHONY: default build install install_jvm --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_amd64 2009-08-01 04:17:03.559777658 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = x86 + +arch_model = x86_64 + +os_arch = solaris_x86 + +os_arch_model = solaris_x86_64 + +lib_arch = amd64 + +compiler = sparcWorks + +sysdefs = -DSOLARIS -DSPARC_WORKS -DAMD64 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_amd64.gcc 2009-08-01 04:17:03.968083675 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = x86 + +arch_model = x86_64 + +os_arch = solaris_x86 + +os_arch_model = solaris_x86_64 + +lib_arch = amd64 + +compiler = gcc + +sysdefs = -DSOLARIS -D_GNU_SOURCE -DAMD64 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_i486 2009-08-01 04:17:04.381124649 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = x86 + +arch_model = x86_32 + +os_arch = solaris_x86 + +os_arch_model = solaris_x86_32 + +lib_arch = i386 + +compiler = sparcWorks + +sysdefs = -DSOLARIS -DSPARC_WORKS -DIA32 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_i486.gcc 2009-08-01 04:17:04.795172810 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = x86 + +arch_model = x86_32 + +os_arch = solaris_x86 + +os_arch_model = solaris_x86_32 + +lib_arch = i386 + +compiler = gcc + +sysdefs = -DSOLARIS -D_GNU_SOURCE -DIA32 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_sparc 2009-08-01 04:17:05.203958929 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = sparc + +arch_model = sparc + +os_arch = solaris_sparc + +os_arch_model = solaris_sparc + +lib_arch = sparc + +compiler = sparcWorks + +sysdefs = -DSOLARIS -DSPARC_WORKS -DSPARC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_sparc.gcc 2009-08-01 04:17:05.616519798 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = sparc + +arch_model = sparc + +os_arch = solaris_sparc + +os_arch_model = solaris_sparc + +lib_arch = sparc + +compiler = gcc + +sysdefs = -DSOLARIS -D_GNU_SOURCE -DSPARC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_sparcv9 2009-08-01 04:17:06.025560303 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = sparc + +arch_model = sparc + +os_arch = solaris_sparc + +os_arch_model = solaris_sparc + +lib_arch = sparcv9 + +compiler = sparcWorks + +sysdefs = -DSOLARIS -DSPARC_WORKS -DSPARC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/platform_sparcv9.gcc 2009-08-01 04:17:06.434607251 +0100 @@ -0,0 +1,15 @@ +os_family = solaris + +arch = sparc + +arch_model = sparc + +os_arch = solaris_sparc + +os_arch_model = solaris_sparc + +lib_arch = sparcv9 + +compiler = gcc + +sysdefs = -DSOLARIS -D_GNU_SOURCE -DSPARC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/solaris/reorder.sh 2009-08-01 04:17:06.848127984 +0100 @@ -0,0 +1,355 @@ +#!/bin/sh -x +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Generate the reorder data for hotspot. +# +# Usage: +# +# sh reorder.sh +# +# is a *built* SDK workspace which contains the +# reordering tools for the SDK. This script relies on lib_mcount.so +# from this workspace. +# +# is a working SDK which you can use to run the profiled +# JVMs in to collect data. You must be able to write to this SDK. +# +# is a directory containing JBB test jar files and properties +# which will be used to run the JBB test to provide reordering data +# for the server VM. +# +# Profiled builds of the VM are needed (before running this script), +# build with PROFILE_PRODUCT=1: +# +# gnumake profiled1 profiled PROFILE_PRODUCT=1 +# +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +test_setup() { + + # $1 = "client" or "server" + # $2 = name of reorder file to be generated. + + echo "" + echo "TEST_SETUP $1 $2" + echo "" + libreldir=${ALT_OUTPUTDIR:-../../../make/solaris-$arch5}/reorder + libabsdir=${ALT_OUTPUTDIR:-$sdk_ws/make/solaris-$arch5}/reorder + ( cd $sdk_ws/make/tools/reorder ; gnumake $libreldir/$arch5/libmcount.so ) + if [ "${arch3}" = "i386" ] ; then + # On Solaris/x86 we need to remove the symbol _mcount from the command + ( cd $sdk_ws/make/tools/reorder ; \ + gnumake $libreldir/$arch5/remove_mcount ) + echo Remove _mcount from java command. + $libabsdir/$arch5/remove_mcount $jre/bin/java + fi + ( cd $sdk_ws/make/tools/reorder ; gnumake tool_classes ) + ( cd $sdk_ws/make/tools/reorder ; gnumake test_classes ) + + tests="Null Exit Hello Sleep IntToString \ + LoadToolkit LoadFrame LoadJFrame JHello" + swingset=$sdk/demo/jfc/SwingSet2/SwingSet2.jar + java=$jre/bin/java + if [ "X$LP64" != "X" ] ; then + testjava="$jre/bin/${arch3}/java" + else + testjava="$jre/bin/java" + fi + mcount=$libabsdir/$arch5/libmcount.so + + if [ ! -x $mcount ] ; then + echo $mcount is missing! + exit 1 + fi + + if [ "X$1" = "client" ] ; then + if [ "X$NO_SHARING" = "X" ] ; then + echo "Dumping shared file." + LD_PRELOAD=$mcount \ + JDK_ALTERNATE_VM=jvm_profiled \ + $testjava -Xshare:dump -Xint -XX:PermSize=16m -version 2> /dev/null + shared_client="-Xshare:on" + echo "Shared file dump completed." + else + shared_client="-Xshare:off" + echo "NO_SHARING defined, not using sharing." + fi + else + echo "Server: no sharing" + shared_server="-Xshare:off" + fi + + testpath=$libabsdir/classes + + reorder_file=$2 + + rm -f ${reorder_file} + rm -f ${reorder_file}_tmp2 + rm -f ${reorder_file}_tmp1 + + echo "data = R0x2000;" > ${reorder_file} + echo "text = LOAD ?RXO;" >> ${reorder_file} + echo "" >> ${reorder_file} + echo "" >> ${reorder_file} +} + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +test_client() { + + # Run each of a set of tests, extract the methods called, + # append the new functions to the reorder list. + # $1 = "client" or "server" + # $2 = name of reorder file to be generated. + + echo "TEST_CLIENT $1 $2." + test_setup $1 $2 + echo "TEST_CLIENT $1 $2." + + for f in $tests ; do + echo Running test $f. + rm -f ${reorder_file}_tmp1 + echo "# Test $f" >> ${reorder_file} + + echo "Using LD_PRELOAD=$mcount" + echo $testjava ${shared_client} -classpath $testpath $f + + LD_PRELOAD=$mcount \ + JDK_ALTERNATE_VM=jvm_profiled \ + $testjava ${shared_client} -classpath $testpath $f 2> ${reorder_file}_tmp1 + + echo "Done." + sed -n -e '/^text:/p' ${reorder_file}_tmp1 > ${reorder_file}_tmp2 + sed -e '/^text:/d' ${reorder_file}_tmp1 + LD_LIBRARY_PATH=$lib/server \ + $java -classpath $testpath Combine ${reorder_file} \ + ${reorder_file}_tmp2 \ + > ${reorder_file}_tmp3 + mv ${reorder_file}_tmp3 ${reorder_file} + rm -f ${reorder_file}_tmp2 + rm -f ${reorder_file}_tmp1 + done + + # Run SwingSet, extract the methods called, + # append the new functions to the reorder list. + + echo "# SwingSet" >> ${reorder_file} + + echo "" + echo "" + echo "When SwingSet has finished drawing, " \ + "you may terminate it (with your mouse)." + echo "Otherwise, it should be automatically terminated in 3 minutes." + echo "" + echo "" + + echo "Using LD_PRELOAD=$mcount, JDK_ALTERNATE=jvm_profiled." + echo $testjava ${shared_client} -classpath $testpath MaxTime $swingset 60 + LD_PRELOAD=$mcount \ + JDK_ALTERNATE_VM=jvm_profiled \ + $testjava ${shared_client} -classpath $testpath MaxTime \ + $swingset 60 2> ${reorder_file}_tmp1 + + sed -n -e '/^text:/p' ${reorder_file}_tmp1 > ${reorder_file}_tmp2 + + LD_LIBRARY_PATH=$lib/server \ + $java -server -classpath $testpath Combine ${reorder_file} ${reorder_file}_tmp2 \ + > ${reorder_file}_tmp3 + echo mv ${reorder_file}_tmp3 ${reorder_file} + mv ${reorder_file}_tmp3 ${reorder_file} + echo rm -f ${reorder_file}_tmp2 + rm -f ${reorder_file}_tmp2 + echo rm -f ${reorder_file}_tmp1 + rm -f ${reorder_file}_tmp1 +} + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +test_server() { + + # Run the JBB script, collecting data on the way. + # $1 = "client" or "server" + # $2 = name of reorder file to be generated. + + echo "TEST_SERVER $1 $2." + test_setup $1 $2 + echo "TEST_SERVER $1 $2." + + echo Running JBB. + + rm -f ${reorder_file}_tmp1 + rm -f ${reorder_file}_tmp2 + heap=200m + + CLASSPATH=jbb.jar:jbb_no_precompile.jar:check.jar:reporter.jar + + ( cd $jbb_dir; LD_PRELOAD=$mcount MCOUNT_ORDER_BY_COUNT=1 \ + JDK_ALTERNATE_VM=jvm_profiled \ + $testjava ${shared_server} -classpath $CLASSPATH -Xms${heap} -Xmx${heap} \ + spec.jbb.JBBmain -propfile SPECjbb.props ) 2> ${reorder_file}_tmp1 + + sed -n -e '/^text:/p' ${reorder_file}_tmp1 > ${reorder_file}_tmp2 + sed -e '/^text:/d' ${reorder_file}_tmp1 + cat ${reorder_file}_tmp2 >> ${reorder_file} + rm -f ${reorder_file}_tmp2 + rm -f ${reorder_file}_tmp1 +} + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +# Rename the old VMs, copy the new in, run the test, and put the +# old one back. + +copy_and_test() { + + # $1 = "client" or "server" + # $2 = name of reorder file to be generated. + # $3 = profiled jvm to copy in + + echo "COPY_AND_TEST ($1, $2, $3)." + # $2 = name of reorder file to be generated. + # $3 = profiled jvm to copy in + + rm -rf $lib/jvm_profiled + mkdir $lib/jvm_profiled + cp $3 $lib/jvm_profiled + test_$1 $1 $2 + rm -rf $lib/jvm_profiled +} + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +# Check arguments: + +if [ $# != 3 ] ; then + echo "" + echo "Usage:" + echo " sh reorder.sh " + echo "" + exit 1 +fi + +sdk_ws=$1 +if [ ! -r $sdk_ws/make/tools/reorder/Makefile ] ; then + echo "" + echo "test workspace "$sdk_ws" does not contain the reordering tools." + echo "" + exit 1 +fi + +sdk=$2 +jre=$sdk/jre + +# Set up architecture names as needed by various components. +# Why couldn't we just use x86 for everything? + +# Arch name as used in JRE runtime (eg. i386): +# .../jre/lib/${arch3}/server +arch3=`uname -p` + +# Arch name as used in Hotspot build: (eg. i486) +# /export/hotspot/make/solaris/solaris_${arch4}_compiler1 +arch4=$arch3 + +# Arch name as used in SDK build (eg. i586): +# /export/tiger/make/solaris-${arch3} +arch5=$arch3 + +# Tweak for 64-bit sparc builds. At least they all agree. +if [ $arch3 = sparc -a "X$LP64" != "X" ] ; then + arch3=sparcv9 + arch4=sparcv9 + arch5=sparcv9 +fi + +# Tweak for 64-bit i386 == amd64 builds. At least they all agree. +if [ $arch3 = i386 -a "X$LP64" != "X" ] ; then + arch3=amd64 + arch4=amd64 + arch5=amd64 +fi + +# Tweak for x86 builds. All different. +if [ $arch3 = i386 ] ; then + arch4=i486 + arch5=i586 +fi + +lib=$jre/lib/$arch3 +if [ ! -r $jre/lib/rt.jar ] ; then + echo "" + echo "test SDK "$sdk" is not a suitable SDK." + echo "" + exit 1 +fi + +jbb_dir=$3 +if [ ! -r $jbb_dir/jbb.jar ] ; then + echo "" + echo "jbb.jar not present in $jbb_dir" + echo "" + exit 1 +fi + + +# Were profiled VMs built? + +if [ "X$LP64" != "X" ] ; then + if [ ! -r solaris_${arch4}_compiler2/profiled/libjvm.so ] ; then + echo "" + echo "Profiled builds of compiler2 are needed first." + echo ' -- build with "make profiled PROFILE_PRODUCT=1" -- ' + echo "" + exit 1 + fi +else + if [ ! -r solaris_${arch4}_compiler1/profiled/libjvm.so \ + -o ! -r solaris_${arch4}_compiler2/profiled/libjvm.so ] ; then + echo "" + echo "Profiled builds of compiler1 and compiler2 are needed first." + echo ' -- build with "make profiled{,1} PROFILE_PRODUCT=1" -- ' + exit 1 + fi +fi + + +# Compiler1 - not supported in 64-bit (b69 java launcher rejects it). + +if [ "X$LP64" = "X" ] ; then + #gnumake profiled1 + echo Using profiled client VM. + echo + copy_and_test client \ + reorder_COMPILER1_$arch4 \ + solaris_${arch4}_compiler1/profiled/libjvm.so +fi + +#gnumake profiled +echo Using profiled server VM. +echo +copy_and_test server \ + reorder_COMPILER2_$arch4 \ + solaris_${arch4}_compiler2/profiled/libjvm.so --- old/hotspot/build/test/Queens.java 2009-08-01 04:17:07.393115582 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,86 +0,0 @@ -/* - * Copyright 2006 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -import java.util.*; - -// Copyright 1996, Animorphic Systems -// gri 28 Aug 92 / 15 Jan 93 / 8 Dec 95 - -class Queens { - - static void try_i(boolean a[], boolean b[], boolean c[], int x[], int i) { - int adj = 7; - - for (int j = 1; j <= 8; j++) { - if (b[j] && a[i+j] && c[adj+i-j]) { - x[i] = j; - b[j] = false; - a[i+j] = false; - c[adj+i-j] = false; - if (i < 8) try_i(a, b, c, x, i+1); - else print(x); - b[j] = true; - a[i+j] = true; - c[adj+i-j] = true; - } - } - } - - public static void main(String s[]) { - boolean a[] = new boolean[16+1]; - boolean b[] = new boolean[ 8+1]; - boolean c[] = new boolean[14+1]; - int x[] = new int[8+1]; - int adj = 7; - - for (int i = -7; i <= 16; i++) { - if (i >= 1 && i <= 8) b[i] = true; - if (i >= 2) a[i] = true; - if (i <= 7) c[adj+i] = true; - } - - x[0] = 0; // solution counter - - try_i(a, b, c, x, 1); - } - - static void print(int x[]) { - // first correct solution: A1 B5 C8 D6 E3 F7 G2 H4 - - char LF = (char)0xA; - char CR = (char)0xD; - - x[0]++; - if (x[0] < 10) - System.out.print(" "); - System.out.print(x[0] + ". "); - for (int i = 1; i <= 8; i++) { - char p = (char)('A' + i - 1); - System.out.print(p); - System.out.print (x[i] + " "); - } - System.out.println(); - } - -}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/test/Queens.java 2009-08-01 04:17:07.314981863 +0100 @@ -0,0 +1,86 @@ +/* + * Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +import java.util.*; + +// Copyright 1996, Animorphic Systems +// gri 28 Aug 92 / 15 Jan 93 / 8 Dec 95 + +class Queens { + + static void try_i(boolean a[], boolean b[], boolean c[], int x[], int i) { + int adj = 7; + + for (int j = 1; j <= 8; j++) { + if (b[j] && a[i+j] && c[adj+i-j]) { + x[i] = j; + b[j] = false; + a[i+j] = false; + c[adj+i-j] = false; + if (i < 8) try_i(a, b, c, x, i+1); + else print(x); + b[j] = true; + a[i+j] = true; + c[adj+i-j] = true; + } + } + } + + public static void main(String s[]) { + boolean a[] = new boolean[16+1]; + boolean b[] = new boolean[ 8+1]; + boolean c[] = new boolean[14+1]; + int x[] = new int[8+1]; + int adj = 7; + + for (int i = -7; i <= 16; i++) { + if (i >= 1 && i <= 8) b[i] = true; + if (i >= 2) a[i] = true; + if (i <= 7) c[adj+i] = true; + } + + x[0] = 0; // solution counter + + try_i(a, b, c, x, 1); + } + + static void print(int x[]) { + // first correct solution: A1 B5 C8 D6 E3 F7 G2 H4 + + char LF = (char)0xA; + char CR = (char)0xD; + + x[0]++; + if (x[0] < 10) + System.out.print(" "); + System.out.print(x[0] + ". "); + for (int i = 1; i <= 8; i++) { + char p = (char)('A' + i - 1); + System.out.print(p); + System.out.print (x[i] + " "); + } + System.out.println(); + } + +}; --- old/hotspot/build/windows/README 2009-08-01 04:17:08.262213531 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,214 +0,0 @@ -Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. -DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - -This code is free software; you can redistribute it and/or modify it -under the terms of the GNU General Public License version 2 only, as -published by the Free Software Foundation. - -This code is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -version 2 for more details (a copy is included in the LICENSE file that -accompanied this code). - -You should have received a copy of the GNU General Public License version -2 along with this work; if not, write to the Free Software Foundation, -Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - -Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -CA 95054 USA or visit www.sun.com if you need additional information or -have any questions. - -________________________________________________________________________________ - -@(#)README 1.29 07/03/14 16:32:09 - -__Introduction__________________________________________________________________ - -This readme file should provide all the information needed to build -the HotSpot VM for Windows 95/Windows NT from its teamware workspace. -It is intended as a starting point for people who want to learn how -to work with the current HotSpot source workspace and who need to -build the VM locally. It is not intended as a tutorial for licensees. - -Last update: 03/28/05 - - -__Platform______________________________________________________________________ - -The VM builds under the following platforms: -- Windows NT 4.0 on Intel x486 or greater -- x486 PC (or greater), 32MByte or more - - -__Tools_________________________________________________________________________ - -For building/testing the following tools need to be available: -- Microsoft Visual C++ 6.0 (with nmake version 1.62.7022 or greater) -- MKS Toolkit 6.1 or greater - see: /net/reinstall/export/vol0/pc-archive/software/mks6.1 (NFS) - or: \\reinstall\pc-archive\software\mks6.1 (NT) - - -__JDK___________________________________________________________________________ - -The workspace works with the following version of the JDK: -(NOTE: these are out of date) -- JDK1.2FCS "V" build - see: /usr/local/java/jdk1.2/win32 - -and the following version(s) of HotJava: -- hjb1.1.4 -- hjb1.1.5 - see /usr/local/java/hjb1.1.x/win32 - - -__Environment variables_________________________________________________________ - -The following environment variables need to be set up for the IDE -build process. For batch builds these do not need to be set. - -HotSpotMksHome points to the (NFS or PC-local) directory where the MKS - executables (like sh.exe and grep.exe) are installed - -Optionally you may set the following variables in your environment and they -will be picked up by the create.bat script used to generate the vm.vcproj files. -See the section on building within MS Developer Studio for more details. - -HotSpotWorkSpace points to the (NFS) directory where the workspace is located -HotSpotBuildSpace points to the (PC-local) directory where the vm is built -HotSpotReleaseBinDest points to the (NFS or PC-local) directory where the product DLL is - written -HotSpotDebugBinDest points to the (NFS or PC-local) directory where the debug DLL is - written - -NOTE: For both batch and IDE builds, java and javac must be in your -PATH, and the versions found by default must work. (If this turns out -to be a problem, we can define HotSpotJava and HotSpotJavaC for -bootstrapping...) - -__Building the JVM from the command line________________________________________ - -1) choose a directory in which you want to build the vm - (the build process will create a subdirectory) - -2) To build the 'core' version (debug || optimized) - %HotSpotWorkSpace%\build\windows\build core %HotSpotWorkSpace% - To build the 'compiler2' version (debug || optimized) - %HotSpotWorkSpace%\build\windows\build compiler2 %HotSpotWorkSpace% - - where is a full path to a JDK in which bin/java and - bin/javac are present and working. - -3) If you have problems with building, first try: - vcvars32 (sets path for VC++) - -4) In addition to jvm.dll, the Serviceability Agent (SA) based JDI connector - and command line tools are built if dbgeng.h and dbgeng.lib - can be located, and BUILD_WIN_SA=1 is specified. We look for dbgeng.h here: - $(MSVCDIR)\PlatformSDK\Include - $(SYSTEMROOT)\..\Program Files\Microsoft SDK\include - - The first directory is part of Visual Studio VC .NET 2003. - The second is used on Windows-amd64. - - -__Building the JVM from within MS Developer Studio______________________________ - -0) Set environment variables as described above - -1) Run the following script: - %HotSpotWorkSpace%\build\windows\create { } - where type is one of core, compiler1, compiler2. If you leave off the - " " part, the script expects to find their - values in the HotSpotWorkSpace, HotSpotBuildSpace, HotSpotReleaseBinDest, and HotSpotDebugBinDest environment - variables. The resulting vm.vcproj does not depend on these values in the environment. - - This will populate the build space with the appropriate makefiles - and run nmake in it. This builds and runs makedeps, which now - generates the appropriate vm.vcproj into the build space. It also - builds and runs adlc. - - To regenerate the .incl and .dsp files after changing the include - databases, just run nmake in the build space. - - The build process now relies on java and javac. For the IDE builds, - the full path to a JDK (in which bin/java and bin/javac are present - and working) can be specified either explicitly with the - ALT_BOOTDIR environment variable (like the JDK build process), via - the JDK build's default BOOTDIR environment variable, via JAVA_HOME, - or implicitly via the PATH. - - (Note that there are now many more command line options to MakeDeps - on the Windows platform than before. These have been bundled into - makefiles/makedeps.make, but it is still necessary to keep this in - sync with the batch makefiles, in vm/generated.) - - If you have problems with building (i.e,. finding nmake), first try: - vcvars32 (sets path for VC++) - -2) Double-click the vm.vcproj file in the %HotSpotBuildSpace% directory - to open MS Developer Studio. - -3) build desired or all versions: - menu Build -> Batch Build... -> Build (or Rebuild All) - -4) jvm.dll is in the %HotSpotReleaseBinDest% or %HotSpotDebugBinDest% directory - depending on which configuration you built (release or debug). - -Note: do not edit any of the files (especially the vm.vcproj file) in the -build space, since they are all either autogenerated or copied from -the work space. If necessary, modify the original Makefiles in -%HotSpotWorkSpace%\build\windows\projectfiles, or the shared -makedeps arguments in -%HotSpotWorkSpace%\build\windows\makefiles\makedeps.make. - -Note that it appears that some options set in the IDE (for example, -the default executable) show up not in the .dsp file, but in the .opt -file, so the automatic regeneration of the .dsp file should not -destroy the project settings. However, makedeps.make should be edited -to supply per-file compiler options. - -To build adlc from within the IDE for debugging purposes: - -1) in MS Developer Studio, open ADLCompiler.dsw: - menu File -> Open Workspace... - select & double-click ADLCompiler.dsw - -2) rebuild all (debug mode is enough) - menu Build -> Rebuild All (make sure Win32 Debug version is selected) - - -__Testing the VM________________________________________________________________ - -To test the VM using the Tonga Testsuite, use testlook. testlook is a very -simple testing framework on top of Tonga which allows us to use one (Tonga) -test file, that can be extended with attributes. - -1) copy %HotSpotWorkSpace%\test\testlook.bat onto PC (preferably - %HotSpotBuildSpace%\bin, which should ideally be in the path) - -2) run testlook or testlook help for details - -3) to run testlook you need to have Tonga mounted: - net use T: \\tapas\export1\psqe - - -__HotJava under HotSpot_________________________________________________________ - -To run HotJava, use the .bat file %HotSpotWorkSpace%\test\h.bat. Copy -it into %HotSpotBuildSpace%/ (which ideally is in the path) and run -HotJava: h java (e.g., h java_g -Xint). - - -__Preferred directory setup under Windows NT____________________________________ - -Within the HotSpot group we are using the following directory setup: - -D:\jdk1.2 - where we install the JDK - -The following drives are mounted for testing/putbacks/etc.: - -net use T: \\tapas\export1\psqe -net use Y: \\rschmidt\GammaBase -net use Z: \\animorphic\animorphic --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/README 2009-08-01 04:17:08.184654479 +0100 @@ -0,0 +1,214 @@ +Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. +DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + +This code is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License version 2 only, as +published by the Free Software Foundation. + +This code is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +version 2 for more details (a copy is included in the LICENSE file that +accompanied this code). + +You should have received a copy of the GNU General Public License version +2 along with this work; if not, write to the Free Software Foundation, +Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + +Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +CA 95054 USA or visit www.sun.com if you need additional information or +have any questions. + +________________________________________________________________________________ + +@(#)README 1.29 07/03/14 16:32:09 + +__Introduction__________________________________________________________________ + +This readme file should provide all the information needed to build +the HotSpot VM for Windows 95/Windows NT from its teamware workspace. +It is intended as a starting point for people who want to learn how +to work with the current HotSpot source workspace and who need to +build the VM locally. It is not intended as a tutorial for licensees. + +Last update: 03/28/05 + + +__Platform______________________________________________________________________ + +The VM builds under the following platforms: +- Windows NT 4.0 on Intel x486 or greater +- x486 PC (or greater), 32MByte or more + + +__Tools_________________________________________________________________________ + +For building/testing the following tools need to be available: +- Microsoft Visual C++ 6.0 (with nmake version 1.62.7022 or greater) +- MKS Toolkit 6.1 or greater + see: /net/reinstall/export/vol0/pc-archive/software/mks6.1 (NFS) + or: \\reinstall\pc-archive\software\mks6.1 (NT) + + +__JDK___________________________________________________________________________ + +The workspace works with the following version of the JDK: +(NOTE: these are out of date) +- JDK1.2FCS "V" build + see: /usr/local/java/jdk1.2/win32 + +and the following version(s) of HotJava: +- hjb1.1.4 +- hjb1.1.5 + see /usr/local/java/hjb1.1.x/win32 + + +__Environment variables_________________________________________________________ + +The following environment variables need to be set up for the IDE +build process. For batch builds these do not need to be set. + +HotSpotMksHome points to the (NFS or PC-local) directory where the MKS + executables (like sh.exe and grep.exe) are installed + +Optionally you may set the following variables in your environment and they +will be picked up by the create.bat script used to generate the vm.vcproj files. +See the section on building within MS Developer Studio for more details. + +HotSpotWorkSpace points to the (NFS) directory where the workspace is located +HotSpotBuildSpace points to the (PC-local) directory where the vm is built +HotSpotReleaseBinDest points to the (NFS or PC-local) directory where the product DLL is + written +HotSpotDebugBinDest points to the (NFS or PC-local) directory where the debug DLL is + written + +NOTE: For both batch and IDE builds, java and javac must be in your +PATH, and the versions found by default must work. (If this turns out +to be a problem, we can define HotSpotJava and HotSpotJavaC for +bootstrapping...) + +__Building the JVM from the command line________________________________________ + +1) choose a directory in which you want to build the vm + (the build process will create a subdirectory) + +2) To build the 'core' version (debug || optimized) + %HotSpotWorkSpace%\build\windows\build core %HotSpotWorkSpace% + To build the 'compiler2' version (debug || optimized) + %HotSpotWorkSpace%\build\windows\build compiler2 %HotSpotWorkSpace% + + where is a full path to a JDK in which bin/java and + bin/javac are present and working. + +3) If you have problems with building, first try: + vcvars32 (sets path for VC++) + +4) In addition to jvm.dll, the Serviceability Agent (SA) based JDI connector + and command line tools are built if dbgeng.h and dbgeng.lib + can be located, and BUILD_WIN_SA=1 is specified. We look for dbgeng.h here: + $(MSVCDIR)\PlatformSDK\Include + $(SYSTEMROOT)\..\Program Files\Microsoft SDK\include + + The first directory is part of Visual Studio VC .NET 2003. + The second is used on Windows-amd64. + + +__Building the JVM from within MS Developer Studio______________________________ + +0) Set environment variables as described above + +1) Run the following script: + %HotSpotWorkSpace%\build\windows\create { } + where type is one of core, compiler1, compiler2. If you leave off the + " " part, the script expects to find their + values in the HotSpotWorkSpace, HotSpotBuildSpace, HotSpotReleaseBinDest, and HotSpotDebugBinDest environment + variables. The resulting vm.vcproj does not depend on these values in the environment. + + This will populate the build space with the appropriate makefiles + and run nmake in it. This builds and runs makedeps, which now + generates the appropriate vm.vcproj into the build space. It also + builds and runs adlc. + + To regenerate the .incl and .dsp files after changing the include + databases, just run nmake in the build space. + + The build process now relies on java and javac. For the IDE builds, + the full path to a JDK (in which bin/java and bin/javac are present + and working) can be specified either explicitly with the + ALT_BOOTDIR environment variable (like the JDK build process), via + the JDK build's default BOOTDIR environment variable, via JAVA_HOME, + or implicitly via the PATH. + + (Note that there are now many more command line options to MakeDeps + on the Windows platform than before. These have been bundled into + makefiles/makedeps.make, but it is still necessary to keep this in + sync with the batch makefiles, in vm/generated.) + + If you have problems with building (i.e,. finding nmake), first try: + vcvars32 (sets path for VC++) + +2) Double-click the vm.vcproj file in the %HotSpotBuildSpace% directory + to open MS Developer Studio. + +3) build desired or all versions: + menu Build -> Batch Build... -> Build (or Rebuild All) + +4) jvm.dll is in the %HotSpotReleaseBinDest% or %HotSpotDebugBinDest% directory + depending on which configuration you built (release or debug). + +Note: do not edit any of the files (especially the vm.vcproj file) in the +build space, since they are all either autogenerated or copied from +the work space. If necessary, modify the original Makefiles in +%HotSpotWorkSpace%\build\windows\projectfiles, or the shared +makedeps arguments in +%HotSpotWorkSpace%\build\windows\makefiles\makedeps.make. + +Note that it appears that some options set in the IDE (for example, +the default executable) show up not in the .dsp file, but in the .opt +file, so the automatic regeneration of the .dsp file should not +destroy the project settings. However, makedeps.make should be edited +to supply per-file compiler options. + +To build adlc from within the IDE for debugging purposes: + +1) in MS Developer Studio, open ADLCompiler.dsw: + menu File -> Open Workspace... + select & double-click ADLCompiler.dsw + +2) rebuild all (debug mode is enough) + menu Build -> Rebuild All (make sure Win32 Debug version is selected) + + +__Testing the VM________________________________________________________________ + +To test the VM using the Tonga Testsuite, use testlook. testlook is a very +simple testing framework on top of Tonga which allows us to use one (Tonga) +test file, that can be extended with attributes. + +1) copy %HotSpotWorkSpace%\test\testlook.bat onto PC (preferably + %HotSpotBuildSpace%\bin, which should ideally be in the path) + +2) run testlook or testlook help for details + +3) to run testlook you need to have Tonga mounted: + net use T: \\tapas\export1\psqe + + +__HotJava under HotSpot_________________________________________________________ + +To run HotJava, use the .bat file %HotSpotWorkSpace%\test\h.bat. Copy +it into %HotSpotBuildSpace%/ (which ideally is in the path) and run +HotJava: h java (e.g., h java_g -Xint). + + +__Preferred directory setup under Windows NT____________________________________ + +Within the HotSpot group we are using the following directory setup: + +D:\jdk1.2 - where we install the JDK + +The following drives are mounted for testing/putbacks/etc.: + +net use T: \\tapas\export1\psqe +net use Y: \\rschmidt\GammaBase +net use Z: \\animorphic\animorphic --- old/hotspot/build/windows/build.bat 2009-08-01 04:17:08.620624329 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,107 +0,0 @@ -@echo off -REM -REM Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. -REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -REM -REM This code is free software; you can redistribute it and/or modify it -REM under the terms of the GNU General Public License version 2 only, as -REM published by the Free Software Foundation. -REM -REM This code is distributed in the hope that it will be useful, but WITHOUT -REM ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -REM FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -REM version 2 for more details (a copy is included in the LICENSE file that -REM accompanied this code). -REM -REM You should have received a copy of the GNU General Public License version -REM 2 along with this work; if not, write to the Free Software Foundation, -REM Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -REM -REM Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -REM CA 95054 USA or visit www.sun.com if you need additional information or -REM have any questions. -REM -REM - - -REM -REM Since we don't have uname and we could be cross-compiling, -REM Use the compiler to determine which ARCH we are building -REM -cl 2>&1 | grep "IA-64" >NUL -if %errorlevel% == 0 goto isia64 -cl 2>&1 | grep "AMD64" >NUL -if %errorlevel% == 0 goto amd64 -set ARCH=x86 -set BUILDARCH=i486 -set Platform_arch=x86 -set Platform_arch_model=x86_32 -goto end -:amd64 -set LP64=1 -set ARCH=x86 -set BUILDARCH=amd64 -set Platform_arch=x86 -set Platform_arch_model=x86_64 -goto end -:isia64 -set LP64=1 -set ARCH=ia64 -set Platform_arch=ia64 -set Platform_arch_model=ia64 -:end - -if "%4" == "" goto usage -if not "%7" == "" goto usage - -if "%1" == "product" goto test1 -if "%1" == "debug" goto test1 -if "%1" == "fastdebug" goto test1 -goto usage - -:test1 -if "%2" == "core" goto test2 -if "%2" == "kernel" goto test2 -if "%2" == "compiler1" goto test2 -if "%2" == "compiler2" goto test2 -if "%2" == "tiered" goto test2 -if "%2" == "adlc" goto build_adlc - -goto usage - -:test2 -REM check_j2se_version -REM jvmti.make requires J2SE 1.4.x or newer. -REM If not found then fail fast. -%4\bin\javap javax.xml.transform.TransformerFactory >NUL -if %errorlevel% == 0 goto build -echo. -echo J2SE version found at %4\bin\java: -%4\bin\java -version -echo. -echo An XSLT processor (J2SE 1.4.x or newer) is required to -echo bootstrap this build -echo. - -goto usage - -:build -nmake -f %3/build/windows/build.make Variant=%2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION="%5" %1 -goto end - -:build_adlc -nmake -f %3/build/windows/build.make Variant=compiler2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION=%5 ADLC_ONLY=1 %1 -goto end - -:usage -echo Usage: build flavor version workspace bootstrap_dir [build_id] [windbg_home] -echo. -echo where: -echo flavor is "product", "debug" or "fastdebug", -echo version is "core", "kernel", "compiler1", "compiler2", or "tiered", -echo workspace is source directory without trailing slash, -echo bootstrap_dir is a full path to echo a JDK in which bin/java -echo and bin/javac are present and working, and echo build_id is an -echo optional build identifier displayed by java -version - -:end --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/build.bat 2009-08-01 04:17:08.544914031 +0100 @@ -0,0 +1,107 @@ +@echo off +REM +REM Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. +REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +REM +REM This code is free software; you can redistribute it and/or modify it +REM under the terms of the GNU General Public License version 2 only, as +REM published by the Free Software Foundation. +REM +REM This code is distributed in the hope that it will be useful, but WITHOUT +REM ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +REM FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +REM version 2 for more details (a copy is included in the LICENSE file that +REM accompanied this code). +REM +REM You should have received a copy of the GNU General Public License version +REM 2 along with this work; if not, write to the Free Software Foundation, +REM Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +REM +REM Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +REM CA 95054 USA or visit www.sun.com if you need additional information or +REM have any questions. +REM +REM + + +REM +REM Since we don't have uname and we could be cross-compiling, +REM Use the compiler to determine which ARCH we are building +REM +cl 2>&1 | grep "IA-64" >NUL +if %errorlevel% == 0 goto isia64 +cl 2>&1 | grep "AMD64" >NUL +if %errorlevel% == 0 goto amd64 +set ARCH=x86 +set BUILDARCH=i486 +set Platform_arch=x86 +set Platform_arch_model=x86_32 +goto end +:amd64 +set LP64=1 +set ARCH=x86 +set BUILDARCH=amd64 +set Platform_arch=x86 +set Platform_arch_model=x86_64 +goto end +:isia64 +set LP64=1 +set ARCH=ia64 +set Platform_arch=ia64 +set Platform_arch_model=ia64 +:end + +if "%4" == "" goto usage +if not "%7" == "" goto usage + +if "%1" == "product" goto test1 +if "%1" == "debug" goto test1 +if "%1" == "fastdebug" goto test1 +goto usage + +:test1 +if "%2" == "core" goto test2 +if "%2" == "kernel" goto test2 +if "%2" == "compiler1" goto test2 +if "%2" == "compiler2" goto test2 +if "%2" == "tiered" goto test2 +if "%2" == "adlc" goto build_adlc + +goto usage + +:test2 +REM check_j2se_version +REM jvmti.make requires J2SE 1.4.x or newer. +REM If not found then fail fast. +%4\bin\javap javax.xml.transform.TransformerFactory >NUL +if %errorlevel% == 0 goto build +echo. +echo J2SE version found at %4\bin\java: +%4\bin\java -version +echo. +echo An XSLT processor (J2SE 1.4.x or newer) is required to +echo bootstrap this build +echo. + +goto usage + +:build +nmake -f %3/make/windows/build.make Variant=%2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION="%5" %1 +goto end + +:build_adlc +nmake -f %3/make/windows/build.make Variant=compiler2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION=%5 ADLC_ONLY=1 %1 +goto end + +:usage +echo Usage: build flavor version workspace bootstrap_dir [build_id] [windbg_home] +echo. +echo where: +echo flavor is "product", "debug" or "fastdebug", +echo version is "core", "kernel", "compiler1", "compiler2", or "tiered", +echo workspace is source directory without trailing slash, +echo bootstrap_dir is a full path to echo a JDK in which bin/java +echo and bin/javac are present and working, and echo build_id is an +echo optional build identifier displayed by java -version + +:end --- old/hotspot/build/windows/build.make 2009-08-01 04:17:09.426023499 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,309 +0,0 @@ -# -# Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -# CA 95054 USA or visit www.sun.com if you need additional information or -# have any questions. -# -# - -# Note: this makefile is invoked both from build.bat and from the J2SE -# control workspace in exactly the same manner; the required -# environment variables (Variant, WorkSpace, BootStrapDir, BuildUser, HOTSPOT_BUILD_VERSION) -# are passed in as command line arguments. - -# SA components are built if BUILD_WIN_SA=1 is specified. -# See notes in README. This produces files: -# 1. sa-jdi.jar - This is built before building jvm.dll -# 2. sawindbg[_g].dll - Native library for SA - This is built after jvm.dll -# - Also, .lib, .map, .pdb. -# -# Please refer to ./makefiles/sa.make - -# If we haven't set an ARCH yet use x86 -# create.bat and build.bat will set it, if used. -!ifndef ARCH -ARCH=x86 -!endif - - -# Must be one of these values (if value comes in from env, can't trust it) -!if "$(ARCH)" != "x86" -!if "$(ARCH)" != "ia64" -ARCH=x86 -!endif -!endif - -# At this point we should be certain that ARCH has a definition -# now determine the BUILDARCH -# - -# the default BUILDARCH -BUILDARCH=i486 - -# Allow control workspace to force Itanium or AMD64 builds with LP64 -ARCH_TEXT= -!ifdef LP64 -!if "$(LP64)" == "1" -ARCH_TEXT=64-Bit -!if "$(ARCH)" == "x86" -BUILDARCH=amd64 -!else -BUILDARCH=ia64 -!endif -!endif -!endif - -!if "$(BUILDARCH)" != "amd64" -!if "$(BUILDARCH)" != "ia64" -!ifndef CC_INTERP -FORCE_TIERED=1 -!endif -!endif -!endif - -!if "$(BUILDARCH)" == "amd64" -Platform_arch=x86 -Platform_arch_model=x86_64 -!endif -!if "$(BUILDARCH)" == "i486" -Platform_arch=x86 -Platform_arch_model=x86_32 -!endif - -# Supply these from the command line or the environment -# It doesn't make sense to default this one -Variant= -# It doesn't make sense to default this one -WorkSpace= - -variantDir = windows_$(BUILDARCH)_$(Variant) - -realVariant=$(Variant) -VARIANT_TEXT=Core -!if "$(Variant)" == "compiler1" -VARIANT_TEXT=Client -!elseif "$(Variant)" == "compiler2" -!ifdef FORCE_TIERED -VARIANT_TEXT=Server -realVariant=tiered -!else -VARIANT_TEXT=Server -!endif -!elseif "$(Variant)" == "tiered" -VARIANT_TEXT=Tiered -!elseif "$(Variant)" == "kernel" -VARIANT_TEXT=Kernel -!endif - -######################################################################### -# Parameters for VERSIONINFO resource for jvm[_g].dll. -# These can be overridden via the nmake.exe command line. -# They are overridden by RE during the control builds. -# -!include "$(WorkSpace)/make/hotspot_version" - -# Define HOTSPOT_VM_DISTRO based on settings in build/hotspot_distro -# or build/closed/hotspot_distro. -!ifndef HOTSPOT_VM_DISTRO -!if exists($(WorkSpace)\build\closed) -!include $(WorkSpace)\build\closed\hotspot_distro -!else -!include $(WorkSpace)\build\hotspot_distro -!endif -!endif - -# Following the Web Start / Plugin model here.... -# We can have update versions like "01a", but Windows requires -# we use only integers in the file version field. So: -# JDK_UPDATE_VER = JDK_UPDATE_VERSION * 10 + EXCEPTION_VERSION -# -JDK_UPDATE_VER=0 -JDK_BUILD_NUMBER=0 - -HS_FILEDESC=$(HOTSPOT_VM_DISTRO) $(ARCH_TEXT) $(VARIANT_TEXT) VM - -# JDK ProductVersion: -# 1.5.0_-b will have DLL version 5.0.wx*10.yz -# Thus, 1.5.0_10-b04 will be 5.0.100.4 -# 1.6.0-b01 will be 6.0.0.1 -# 1.6.0_01a-b02 will be 6.0.11.2 -# -# JDK_* variables are defined in make/hotspot_version or on command line -# -JDK_VER=$(JDK_MINOR_VER),$(JDK_MICRO_VER),$(JDK_UPDATE_VER),$(JDK_BUILD_NUMBER) -JDK_DOTVER=$(JDK_MINOR_VER).$(JDK_MICRO_VER).$(JDK_UPDATE_VER).$(JDK_BUILD_NUMBER) -!if "$(JRE_RELEASE_VERSION)" == "" -JRE_RELEASE_VER=$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER) -!else -JRE_RELEASE_VER=$(JRE_RELEASE_VERSION) -!endif -!if "$(JDK_MKTG_VERSION)" == "" -JDK_MKTG_VERSION=$(JDK_MINOR_VER).$(JDK_MICRO_VER) -!endif - -# Hotspot Express VM FileVersion: -# 10.0-b will have DLL version 10.0.0.yz (need 4 numbers). -# -# HS_* variables are defined in make/hotspot_version -# -HS_VER=$(HS_MAJOR_VER),$(HS_MINOR_VER),0,$(HS_BUILD_NUMBER) -HS_DOTVER=$(HS_MAJOR_VER).$(HS_MINOR_VER).0.$(HS_BUILD_NUMBER) - -!if "$(HOTSPOT_RELEASE_VERSION)" == "" -HOTSPOT_RELEASE_VERSION=$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER) -!endif - -!if "$(HOTSPOT_BUILD_VERSION)" == "" -HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION) -!else -HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION) -!endif - -# End VERSIONINFO parameters - - -# We don't support SA on ia64, and we can't -# build it if we are using a version of Vis Studio -# older than .Net 2003. -# SA_INCLUDE and SA_LIB are hold-overs from a previous -# implementation in which we could build SA using -# Debugging Tools For Windows, in which the .h/.lib files -# and the .dlls are in different places than -# they are for Vis Studio .Net 2003. -# If that code ever needs to be resurrected, these vars -# can be set here. They are used in makefiles/sa.make. - -checkSA:: - -!if "$(BUILD_WIN_SA)" != "1" -checkSA:: - @echo Not building SA: BUILD_WIN_SA != 1 - -!elseif "$(ARCH)" == "ia64" -BUILD_WIN_SA = 0 -checkSA:: - @echo Not building SA: ARCH = ia64 - -!elseif exist("$(MSVCDIR)\PlatformSDK\Include\dbgeng.h") -# These don't have to be set because the default -# setting of INCLUDE and LIB already contain the needed dirs. -SA_INCLUDE = -SA_LIB = - -!elseif exist("$(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h") -# These don't have to be set because the default -# setting of INCLUDE and LIB already contain the needed dirs. -SA_INCLUDE = -SA_LIB = - -!else -checkSA:: - @echo . - @echo ERROR: Can't build SA because dbgeng.h does not exist here: - @echo $(MSVCDIR)\PlatformSDK\Include\dbgeng.h - @echo nor here: - @echo $(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h - @echo You must use Vis. Studio .Net 2003 on Win 32, and you must - @echo have the Microsoft SDK installed on Win amd64. - @echo You can disable building of SA by specifying BUILD_WIN_SA = 0 - @echo . && false -!endif # ! "$(BUILD_WIN_SA)" != "1" - -######################################################################### - -# With the jvm_g.dll now being named jvm.dll, we can't build both and place -# the dll's in the same directory, so we only build one at a time, -# re-directing the output to different output directories (done by user -# of this makefile). -# -defaultTarget: product - -# The product or release build is an optimized build, and is the default - -# note that since all the build targets depend on local.make that BUILDARCH -# and Platform_arch and Platform_arch_model will get set in local.make -# and there is no need to pass them thru here on the command line -# -product release optimized: checks $(variantDir) $(variantDir)\local.make sanity - cd $(variantDir) - nmake -nologo -f $(WorkSpace)\build\windows\makefiles\top.make BUILD_FLAVOR=product ARCH=$(ARCH) - -# The debug or jvmg (all the same thing) is an optional build -debug jvmg: checks $(variantDir) $(variantDir)\local.make sanity - cd $(variantDir) - nmake -nologo -f $(WorkSpace)\build\windows\makefiles\top.make BUILD_FLAVOR=debug ARCH=$(ARCH) -fastdebug: checks $(variantDir) $(variantDir)\local.make sanity - cd $(variantDir) - nmake -nologo -f $(WorkSpace)\build\windows\makefiles\top.make BUILD_FLAVOR=fastdebug ARCH=$(ARCH) - -develop: checks $(variantDir) $(variantDir)\local.make sanity - cd $(variantDir) - nmake -nologo -f $(WorkSpace)\build\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH) - -sanity: - @ echo; - @ cd $(variantDir) - @ nmake -nologo -f $(WorkSpace)\build\windows\makefiles\sanity.make - @ cd .. - @ echo; - -clean: checkVariant - - rm -r -f $(variantDir) - -$(variantDir): - mkdir $(variantDir) - -$(variantDir)\local.make: checks - @ echo # Generated file > $@ - @ echo Variant=$(realVariant) >> $@ - @ echo WorkSpace=$(WorkSpace) >> $@ - @ echo BootStrapDir=$(BootStrapDir) >> $@ - @ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME) >> $@ - @ echo HS_VER=$(HS_VER) >> $@ - @ echo HS_DOTVER=$(HS_DOTVER) >> $@ - @ echo HS_COMPANY=$(COMPANY_NAME) >> $@ - @ echo HS_FILEDESC=$(HS_FILEDESC) >> $@ - @ echo HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) >> $@ - @ echo HS_COPYRIGHT=$(HOTSPOT_VM_COPYRIGHT) >> $@ - @ echo HS_NAME=$(PRODUCT_NAME) $(JDK_MKTG_VERSION) >> $@ - @ echo HS_BUILD_VER=$(HS_BUILD_VER) >> $@ - @ echo BUILD_WIN_SA=$(BUILD_WIN_SA) >> $@ - @ echo SA_BUILD_VERSION=$(HS_BUILD_VER) >> $@ - @ echo SA_INCLUDE=$(SA_INCLUDE) >> $@ - @ echo SA_LIB=$(SA_LIB) >> $@ - @ echo JDK_VER=$(JDK_VER) >> $@ - @ echo JDK_DOTVER=$(JDK_DOTVER) >> $@ - @ echo JRE_RELEASE_VER=$(JRE_RELEASE_VER) >> $@ - @ echo BUILDARCH=$(BUILDARCH) >> $@ - @ echo Platform_arch=$(Platform_arch) >> $@ - @ echo Platform_arch_model=$(Platform_arch_model) >> $@ - @ sh $(WorkSpace)/build/windows/get_msc_ver.sh >> $@ - -checks: checkVariant checkWorkSpace checkSA - -checkVariant: - @ if "$(Variant)"=="" echo Need to specify "Variant=[tiered|compiler2|compiler1|kernel|core]" && false - @ if "$(Variant)" NEQ "tiered" if "$(Variant)" NEQ "compiler2" if "$(Variant)" NEQ "compiler1" if "$(Variant)" NEQ "kernel" if "$(Variant)" NEQ "core" \ - echo Need to specify "Variant=[tiered|compiler2|compiler1|kernel|core]" && false - -checkWorkSpace: - @ if "$(WorkSpace)"=="" echo Need to specify "WorkSpace=..." && false - -checkBuildID: - @ if "$(BuildID)"=="" echo Need to specify "BuildID=..." && false --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/build.make 2009-08-01 04:17:09.356323266 +0100 @@ -0,0 +1,286 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Note: this makefile is invoked both from build.bat and from the J2SE +# control workspace in exactly the same manner; the required +# environment variables (Variant, WorkSpace, BootStrapDir, BuildUser, HOTSPOT_BUILD_VERSION) +# are passed in as command line arguments. + +# SA components are built if BUILD_WIN_SA=1 is specified. +# See notes in README. This produces files: +# 1. sa-jdi.jar - This is built before building jvm.dll +# 2. sawindbg[_g].dll - Native library for SA - This is built after jvm.dll +# - Also, .lib, .map, .pdb. +# +# Please refer to ./makefiles/sa.make + +# If we haven't set an ARCH yet use x86 +# create.bat and build.bat will set it, if used. +!ifndef ARCH +ARCH=x86 +!endif + + +# Must be one of these values (if value comes in from env, can't trust it) +!if "$(ARCH)" != "x86" +!if "$(ARCH)" != "ia64" +ARCH=x86 +!endif +!endif + +# At this point we should be certain that ARCH has a definition +# now determine the BUILDARCH +# + +# the default BUILDARCH +BUILDARCH=i486 + +# Allow control workspace to force Itanium or AMD64 builds with LP64 +ARCH_TEXT= +!ifdef LP64 +!if "$(LP64)" == "1" +ARCH_TEXT=64-Bit +!if "$(ARCH)" == "x86" +BUILDARCH=amd64 +!else +BUILDARCH=ia64 +!endif +!endif +!endif + +!if "$(BUILDARCH)" != "amd64" +!if "$(BUILDARCH)" != "ia64" +!ifndef CC_INTERP +FORCE_TIERED=1 +!endif +!endif +!endif + +!if "$(BUILDARCH)" == "amd64" +Platform_arch=x86 +Platform_arch_model=x86_64 +!endif +!if "$(BUILDARCH)" == "i486" +Platform_arch=x86 +Platform_arch_model=x86_32 +!endif + +# Supply these from the command line or the environment +# It doesn't make sense to default this one +Variant= +# It doesn't make sense to default this one +WorkSpace= + +variantDir = windows_$(BUILDARCH)_$(Variant) + +realVariant=$(Variant) +VARIANT_TEXT=Core +!if "$(Variant)" == "compiler1" +VARIANT_TEXT=Client +!elseif "$(Variant)" == "compiler2" +!ifdef FORCE_TIERED +VARIANT_TEXT=Server +realVariant=tiered +!else +VARIANT_TEXT=Server +!endif +!elseif "$(Variant)" == "tiered" +VARIANT_TEXT=Tiered +!elseif "$(Variant)" == "kernel" +VARIANT_TEXT=Kernel +!endif + +######################################################################### +# Parameters for VERSIONINFO resource for jvm[_g].dll. +# These can be overridden via the nmake.exe command line. +# They are overridden by RE during the control builds. +# +!include "$(WorkSpace)/make/hotspot_version" + +# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro +# or make/hotspot_distro. +!ifndef HOTSPOT_VM_DISTRO +!if exists($(WorkSpace)\src\closed) +!include $(WorkSpace)\make\hotspot_distro +!else +!include $(WorkSpace)\make\openjdk_distro +!endif +!endif + +# Following the Web Start / Plugin model here.... +# We can have update versions like "01a", but Windows requires +# we use only integers in the file version field. So: +# JDK_UPDATE_VER = JDK_UPDATE_VERSION * 10 + EXCEPTION_VERSION +# +JDK_UPDATE_VER=0 +JDK_BUILD_NUMBER=0 + +HS_FILEDESC=$(HOTSPOT_VM_DISTRO) $(ARCH_TEXT) $(VARIANT_TEXT) VM + +# JDK ProductVersion: +# 1.5.0_-b will have DLL version 5.0.wx*10.yz +# Thus, 1.5.0_10-b04 will be 5.0.100.4 +# 1.6.0-b01 will be 6.0.0.1 +# 1.6.0_01a-b02 will be 6.0.11.2 +# +# JDK_* variables are defined in make/hotspot_version or on command line +# +JDK_VER=$(JDK_MINOR_VER),$(JDK_MICRO_VER),$(JDK_UPDATE_VER),$(JDK_BUILD_NUMBER) +JDK_DOTVER=$(JDK_MINOR_VER).$(JDK_MICRO_VER).$(JDK_UPDATE_VER).$(JDK_BUILD_NUMBER) +!if "$(JRE_RELEASE_VERSION)" == "" +JRE_RELEASE_VER=$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER) +!else +JRE_RELEASE_VER=$(JRE_RELEASE_VERSION) +!endif +!if "$(JDK_MKTG_VERSION)" == "" +JDK_MKTG_VERSION=$(JDK_MINOR_VER).$(JDK_MICRO_VER) +!endif + +# Hotspot Express VM FileVersion: +# 10.0-b will have DLL version 10.0.0.yz (need 4 numbers). +# +# HS_* variables are defined in make/hotspot_version +# +HS_VER=$(HS_MAJOR_VER),$(HS_MINOR_VER),0,$(HS_BUILD_NUMBER) +HS_DOTVER=$(HS_MAJOR_VER).$(HS_MINOR_VER).0.$(HS_BUILD_NUMBER) + +!if "$(HOTSPOT_RELEASE_VERSION)" == "" +HOTSPOT_RELEASE_VERSION=$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER) +!endif + +!if "$(HOTSPOT_BUILD_VERSION)" == "" +HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION) +!else +HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION) +!endif + +# End VERSIONINFO parameters + + +# We don't support SA on ia64, and we can't +# build it if we are using a version of Vis Studio +# older than .Net 2003. +# SA_INCLUDE and SA_LIB are hold-overs from a previous +# implementation in which we could build SA using +# Debugging Tools For Windows, in which the .h/.lib files +# and the .dlls are in different places than +# they are for Vis Studio .Net 2003. +# If that code ever needs to be resurrected, these vars +# can be set here. They are used in makefiles/sa.make. + +checkSA:: + +!if "$(BUILD_WIN_SA)" != "1" +checkSA:: + @echo Not building SA: BUILD_WIN_SA != 1 + +!elseif "$(ARCH)" == "ia64" +BUILD_WIN_SA = 0 +checkSA:: + @echo Not building SA: ARCH = ia64 + +!endif # ! "$(BUILD_WIN_SA)" != "1" + +######################################################################### + +# With the jvm_g.dll now being named jvm.dll, we can't build both and place +# the dll's in the same directory, so we only build one at a time, +# re-directing the output to different output directories (done by user +# of this makefile). +# +defaultTarget: product + +# The product or release build is an optimized build, and is the default + +# note that since all the build targets depend on local.make that BUILDARCH +# and Platform_arch and Platform_arch_model will get set in local.make +# and there is no need to pass them thru here on the command line +# +product release optimized: checks $(variantDir) $(variantDir)\local.make sanity + cd $(variantDir) + nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product ARCH=$(ARCH) + +# The debug or jvmg (all the same thing) is an optional build +debug jvmg: checks $(variantDir) $(variantDir)\local.make sanity + cd $(variantDir) + nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=debug ARCH=$(ARCH) +fastdebug: checks $(variantDir) $(variantDir)\local.make sanity + cd $(variantDir) + nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=fastdebug ARCH=$(ARCH) + +develop: checks $(variantDir) $(variantDir)\local.make sanity + cd $(variantDir) + nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH) + +sanity: + @ echo; + @ cd $(variantDir) + @ nmake -nologo -f $(WorkSpace)\make\windows\makefiles\sanity.make + @ cd .. + @ echo; + +clean: checkVariant + - rm -r -f $(variantDir) + +$(variantDir): + mkdir $(variantDir) + +$(variantDir)\local.make: checks + @ echo # Generated file > $@ + @ echo Variant=$(realVariant) >> $@ + @ echo WorkSpace=$(WorkSpace) >> $@ + @ echo BootStrapDir=$(BootStrapDir) >> $@ + @ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME) >> $@ + @ echo HS_VER=$(HS_VER) >> $@ + @ echo HS_DOTVER=$(HS_DOTVER) >> $@ + @ echo HS_COMPANY=$(COMPANY_NAME) >> $@ + @ echo HS_FILEDESC=$(HS_FILEDESC) >> $@ + @ echo HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) >> $@ + @ echo HS_COPYRIGHT=$(HOTSPOT_VM_COPYRIGHT) >> $@ + @ echo HS_NAME=$(PRODUCT_NAME) $(JDK_MKTG_VERSION) >> $@ + @ echo HS_BUILD_VER=$(HS_BUILD_VER) >> $@ + @ echo BUILD_WIN_SA=$(BUILD_WIN_SA) >> $@ + @ echo SA_BUILD_VERSION=$(HS_BUILD_VER) >> $@ + @ echo SA_INCLUDE=$(SA_INCLUDE) >> $@ + @ echo SA_LIB=$(SA_LIB) >> $@ + @ echo JDK_VER=$(JDK_VER) >> $@ + @ echo JDK_DOTVER=$(JDK_DOTVER) >> $@ + @ echo JRE_RELEASE_VER=$(JRE_RELEASE_VER) >> $@ + @ echo BUILDARCH=$(BUILDARCH) >> $@ + @ echo Platform_arch=$(Platform_arch) >> $@ + @ echo Platform_arch_model=$(Platform_arch_model) >> $@ + @ sh $(WorkSpace)/make/windows/get_msc_ver.sh >> $@ + +checks: checkVariant checkWorkSpace checkSA + +checkVariant: + @ if "$(Variant)"=="" echo Need to specify "Variant=[tiered|compiler2|compiler1|kernel|core]" && false + @ if "$(Variant)" NEQ "tiered" if "$(Variant)" NEQ "compiler2" if "$(Variant)" NEQ "compiler1" if "$(Variant)" NEQ "kernel" if "$(Variant)" NEQ "core" \ + echo Need to specify "Variant=[tiered|compiler2|compiler1|kernel|core]" && false + +checkWorkSpace: + @ if "$(WorkSpace)"=="" echo Need to specify "WorkSpace=..." && false + +checkBuildID: + @ if "$(BuildID)"=="" echo Need to specify "BuildID=..." && false --- old/hotspot/build/windows/build_vm_def.sh 2009-08-01 04:17:10.347229694 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,57 +0,0 @@ -# -# Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -# CA 95054 USA or visit www.sun.com if you need additional information or -# have any questions. -# -# - -# This shell script builds a vm.def file for the current VM variant. -# The .def file exports vtbl symbols which allow the Serviceability -# Agent to run on Windows. See build/windows/projectfiles/*/vm.def -# for more information. -# -# The script expects to be executed in the directory containing all of -# the object files. - -# Note that we currently do not have a way to set HotSpotMksHome in -# the batch build, but so far this has not seemed to be a problem. The -# reason this environment variable is necessary is that it seems that -# Windows truncates very long PATHs when executing shells like MKS's -# sh, and it has been found that sometimes `which sh` fails. -if [ "x$HOTSPOTMKSHOME" != "x" ]; then - MKS_HOME="$HOTSPOTMKSHOME" -else - SH=`which sh` - MKS_HOME=`dirname "$SH"` -fi - -echo "EXPORTS" > vm1.def - -AWK="$MKS_HOME/awk.exe" -GREP="$MKS_HOME/grep.exe" -SORT="$MKS_HOME/sort.exe" -UNIQ="$MKS_HOME/uniq.exe" -CAT="$MKS_HOME/cat.exe" -RM="$MKS_HOME/rm.exe" -DUMPBIN="link.exe /dump" - -$DUMPBIN /symbols *.obj | "$GREP" "??_7.*@@6B@" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def -"$CAT" vm1.def vm2.def > vm.def -"$RM" -f vm1.def vm2.def --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/build_vm_def.sh 2009-08-01 04:17:10.275991679 +0100 @@ -0,0 +1,57 @@ +# +# Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This shell script builds a vm.def file for the current VM variant. +# The .def file exports vtbl symbols which allow the Serviceability +# Agent to run on Windows. See make/windows/projectfiles/*/vm.def +# for more information. +# +# The script expects to be executed in the directory containing all of +# the object files. + +# Note that we currently do not have a way to set HotSpotMksHome in +# the batch build, but so far this has not seemed to be a problem. The +# reason this environment variable is necessary is that it seems that +# Windows truncates very long PATHs when executing shells like MKS's +# sh, and it has been found that sometimes `which sh` fails. +if [ "x$HOTSPOTMKSHOME" != "x" ]; then + MKS_HOME="$HOTSPOTMKSHOME" +else + SH=`which sh` + MKS_HOME=`dirname "$SH"` +fi + +echo "EXPORTS" > vm1.def + +AWK="$MKS_HOME/awk.exe" +GREP="$MKS_HOME/grep.exe" +SORT="$MKS_HOME/sort.exe" +UNIQ="$MKS_HOME/uniq.exe" +CAT="$MKS_HOME/cat.exe" +RM="$MKS_HOME/rm.exe" +DUMPBIN="link.exe /dump" + +$DUMPBIN /symbols *.obj | "$GREP" "??_7.*@@6B@" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def +"$CAT" vm1.def vm2.def > vm.def +"$RM" -f vm1.def vm2.def --- old/hotspot/build/windows/create.bat 2009-08-01 04:17:11.228444325 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,179 +0,0 @@ -@echo off -REM -REM Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. -REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -REM -REM This code is free software; you can redistribute it and/or modify it -REM under the terms of the GNU General Public License version 2 only, as -REM published by the Free Software Foundation. -REM -REM This code is distributed in the hope that it will be useful, but WITHOUT -REM ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -REM FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -REM version 2 for more details (a copy is included in the LICENSE file that -REM accompanied this code). -REM -REM You should have received a copy of the GNU General Public License version -REM 2 along with this work; if not, write to the Free Software Foundation, -REM Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -REM -REM Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -REM CA 95054 USA or visit www.sun.com if you need additional information or -REM have any questions. -REM -REM - -REM This is the interactive build setup script (as opposed to the batch -REM build execution script). It creates $HotSpotBuildSpace if necessary, -REM copies the appropriate files out of $HotSpotWorkSpace into it, and -REM builds and runs MakeDeps in it. This has the side-effect of creating -REM the vm.vcproj file in the buildspace, which is then used in Visual C++. -REM -REM The generated project file depends upon the include databases. If -REM those are changed then MakeDeps is rerun. - -REM -REM Since we don't have uname and we could be cross-compiling, -REM Use the compiler to determine which ARCH we are building -REM -cl 2>&1 | grep "IA-64" >NUL -if %errorlevel% == 0 goto isia64 -cl 2>&1 | grep "AMD64" >NUL -if %errorlevel% == 0 goto amd64 -set ARCH=x86 -set BUILDARCH=i486 -set Platform_arch=x86 -set Platform_arch_model=x86_32 -goto end -:amd64 -set ARCH=x86 -set BUILDARCH=amd64 -set Platform_arch=x86 -set Platform_arch_model=x86_64 -goto end -:isia64 -set ARCH=ia64 -set BUILDARCH=ia64 -set Platform_arch=ia64 -set Platform_arch_model=ia64 -:end - -setlocal - -if "%1" == "" goto usage - -if not "%4" == "" goto usage - -set HotSpotWorkSpace=%1 -set HotSpotBuildSpace=%2 -set HotSpotJDKDist=%3 - -REM figure out MSC version -for /F %%i in ('sh %HotSpotWorkSpace%/build/windows/get_msc_ver.sh') do set %%i - -echo ************************************************************** -if "%MSC_VER%" == "1200" ( -set ProjectFile=vm.dsp -echo Will generate VC6 project {unsupported} -) else ( -set ProjectFile=vm.vcproj -echo Will generate VC7 project -) -echo %ProjectFile% -echo ************************************************************** - -REM Test all variables to see whether the directories they -REM reference exist - -if exist %HotSpotWorkSpace% goto test1 - -echo Error: directory pointed to by HotSpotWorkSpace -echo does not exist, or the variable is not set. -echo. -goto usage - -:test1 -if exist %HotSpotBuildSpace% goto test2 -if not "%HotSpotBuildSpace%" == "" mkdir %HotSpotBuildSpace% -if exist %HotSpotBuildSpace% goto test2 -echo Error: directory pointed to by HotSpotBuildSpace -echo does not exist, or the variable is not set. -echo. -goto usage - -:test2 -if exist %HotSpotJDKDist% goto test3 -echo Error: directory pointed to by %HotSpotJDKDist% -echo does not exist, or the variable is not set. -echo. -goto usage - -:test3 -if not "%HOTSPOTMKSHOME%" == "" goto makedir -echo Warning: please set variable HOTSPOTMKSHOME to place where -echo your MKS/Cygwin installation is -echo. -goto usage - -:makedir -echo NOTE: Using the following settings: -echo HotSpotWorkSpace=%HotSpotWorkSpace% -echo HotSpotBuildSpace=%HotSpotBuildSpace% -echo HotSpotJDKDist=%HotSpotJDKDist% - - -REM This is now safe to do. -:copyfiles -for /D %%i in (compiler1, compiler2, tiered, core, kernel) do ( -if NOT EXIST %HotSpotBuildSpace%\%%i mkdir %HotSpotBuildSpace%\%%i -copy %HotSpotWorkSpace%\build\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\ > NUL -) - -REM force regneration of ProjectFile -if exist %HotSpotBuildSpace%\%ProjectFile% del %HotSpotBuildSpace%\%ProjectFile% - -for /D %%i in (compiler1, compiler2, tiered, core, kernel) do ( - -echo # Generated file! > %HotSpotBuildSpace%\%%i\local.make -echo # Changing a variable below and then deleting %ProjectFile% will cause >> %HotSpotBuildSpace%\%%i\local.make -echo # %ProjectFile% to be regenerated with the new values. Changing the >> %HotSpotBuildSpace%\%%i\local.make -echo # version requires rerunning create.bat. >> %HotSpotBuildSpace%\%%i\local.make -echo. >> %HotSpotBuildSpace%\%%i\local.make -echo HOTSPOTWORKSPACE=%HotSpotWorkSpace% >> %HotSpotBuildSpace%\%%i\local.make -echo HOTSPOTBUILDSPACE=%HotSpotBuildSpace% >> %HotSpotBuildSpace%\%%i\local.make -echo HOTSPOTJDKDIST=%HotSpotJDKDist% >> %HotSpotBuildSpace%\%%i\local.make -echo ARCH=%ARCH% >> %HotSpotBuildSpace%\%%i\local.make -echo BUILDARCH=%BUILDARCH% >> %HotSpotBuildSpace%\%%i\local.make -echo Platform_arch=%Platform_arch% >> %HotSpotBuildSpace%\%%i\local.make -echo Platform_arch_model=%Platform_arch_model% >> %HotSpotBuildSpace%\%%i\local.make - -REM build config specific stuff - -pushd %HotSpotBuildSpace%\%%i -nmake /nologo -popd -) - -goto end - -:usage -echo Usage: create HotSpotWorkSpace HotSpotBuildSpace HotSpotJDKDist -echo. -echo This is the interactive build setup script (as opposed to the batch -echo build execution script). It creates HotSpotBuildSpace if necessary, -echo copies the appropriate files out of HotSpotWorkSpace into it, and -echo builds and runs MakeDeps in it. This has the side-effect of creating -echo the %ProjectFile% file in the build space, which is then used in Visual C++. -echo The HotSpotJDKDist defines place where JVM binaries should be placed. -echo Environment variable FORCE_MSC_VER allows to override MSVC version autodetection. -echo. -echo The generated project file depends upon the include databases. If -echo those are changed then MakeDeps is rerun. -echo. -echo NOTE that it is now NOT safe to modify any of the files in the build -echo space, since they may be overwritten whenever this script is run or -echo nmake is run in that directory. - -:end - -endlocal --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/create.bat 2009-08-01 04:17:11.148405633 +0100 @@ -0,0 +1,179 @@ +@echo off +REM +REM Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +REM +REM This code is free software; you can redistribute it and/or modify it +REM under the terms of the GNU General Public License version 2 only, as +REM published by the Free Software Foundation. +REM +REM This code is distributed in the hope that it will be useful, but WITHOUT +REM ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +REM FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +REM version 2 for more details (a copy is included in the LICENSE file that +REM accompanied this code). +REM +REM You should have received a copy of the GNU General Public License version +REM 2 along with this work; if not, write to the Free Software Foundation, +REM Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +REM +REM Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +REM CA 95054 USA or visit www.sun.com if you need additional information or +REM have any questions. +REM +REM + +REM This is the interactive build setup script (as opposed to the batch +REM build execution script). It creates $HotSpotBuildSpace if necessary, +REM copies the appropriate files out of $HotSpotWorkSpace into it, and +REM builds and runs MakeDeps in it. This has the side-effect of creating +REM the vm.vcproj file in the buildspace, which is then used in Visual C++. +REM +REM The generated project file depends upon the include databases. If +REM those are changed then MakeDeps is rerun. + +REM +REM Since we don't have uname and we could be cross-compiling, +REM Use the compiler to determine which ARCH we are building +REM +cl 2>&1 | grep "IA-64" >NUL +if %errorlevel% == 0 goto isia64 +cl 2>&1 | grep "AMD64" >NUL +if %errorlevel% == 0 goto amd64 +set ARCH=x86 +set BUILDARCH=i486 +set Platform_arch=x86 +set Platform_arch_model=x86_32 +goto end +:amd64 +set ARCH=x86 +set BUILDARCH=amd64 +set Platform_arch=x86 +set Platform_arch_model=x86_64 +goto end +:isia64 +set ARCH=ia64 +set BUILDARCH=ia64 +set Platform_arch=ia64 +set Platform_arch_model=ia64 +:end + +setlocal + +if "%1" == "" goto usage + +if not "%4" == "" goto usage + +set HotSpotWorkSpace=%1 +set HotSpotBuildSpace=%2 +set HotSpotJDKDist=%3 + +REM figure out MSC version +for /F %%i in ('sh %HotSpotWorkSpace%/make/windows/get_msc_ver.sh') do set %%i + +echo ************************************************************** +if "%MSC_VER%" == "1200" ( +set ProjectFile=vm.dsp +echo Will generate VC6 project {unsupported} +) else ( +set ProjectFile=vm.vcproj +echo Will generate VC7 project +) +echo %ProjectFile% +echo ************************************************************** + +REM Test all variables to see whether the directories they +REM reference exist + +if exist %HotSpotWorkSpace% goto test1 + +echo Error: directory pointed to by HotSpotWorkSpace +echo does not exist, or the variable is not set. +echo. +goto usage + +:test1 +if exist %HotSpotBuildSpace% goto test2 +if not "%HotSpotBuildSpace%" == "" mkdir %HotSpotBuildSpace% +if exist %HotSpotBuildSpace% goto test2 +echo Error: directory pointed to by HotSpotBuildSpace +echo does not exist, or the variable is not set. +echo. +goto usage + +:test2 +if exist %HotSpotJDKDist% goto test3 +echo Error: directory pointed to by %HotSpotJDKDist% +echo does not exist, or the variable is not set. +echo. +goto usage + +:test3 +if not "%HOTSPOTMKSHOME%" == "" goto makedir +echo Warning: please set variable HOTSPOTMKSHOME to place where +echo your MKS/Cygwin installation is +echo. +goto usage + +:makedir +echo NOTE: Using the following settings: +echo HotSpotWorkSpace=%HotSpotWorkSpace% +echo HotSpotBuildSpace=%HotSpotBuildSpace% +echo HotSpotJDKDist=%HotSpotJDKDist% + + +REM This is now safe to do. +:copyfiles +for /D %%i in (compiler1, compiler2, tiered, core, kernel) do ( +if NOT EXIST %HotSpotBuildSpace%\%%i mkdir %HotSpotBuildSpace%\%%i +copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\ > NUL +) + +REM force regneration of ProjectFile +if exist %HotSpotBuildSpace%\%ProjectFile% del %HotSpotBuildSpace%\%ProjectFile% + +for /D %%i in (compiler1, compiler2, tiered, core, kernel) do ( + +echo # Generated file! > %HotSpotBuildSpace%\%%i\local.make +echo # Changing a variable below and then deleting %ProjectFile% will cause >> %HotSpotBuildSpace%\%%i\local.make +echo # %ProjectFile% to be regenerated with the new values. Changing the >> %HotSpotBuildSpace%\%%i\local.make +echo # version requires rerunning create.bat. >> %HotSpotBuildSpace%\%%i\local.make +echo. >> %HotSpotBuildSpace%\%%i\local.make +echo HOTSPOTWORKSPACE=%HotSpotWorkSpace% >> %HotSpotBuildSpace%\%%i\local.make +echo HOTSPOTBUILDSPACE=%HotSpotBuildSpace% >> %HotSpotBuildSpace%\%%i\local.make +echo HOTSPOTJDKDIST=%HotSpotJDKDist% >> %HotSpotBuildSpace%\%%i\local.make +echo ARCH=%ARCH% >> %HotSpotBuildSpace%\%%i\local.make +echo BUILDARCH=%BUILDARCH% >> %HotSpotBuildSpace%\%%i\local.make +echo Platform_arch=%Platform_arch% >> %HotSpotBuildSpace%\%%i\local.make +echo Platform_arch_model=%Platform_arch_model% >> %HotSpotBuildSpace%\%%i\local.make + +REM build config specific stuff + +pushd %HotSpotBuildSpace%\%%i +nmake /nologo +popd +) + +goto end + +:usage +echo Usage: create HotSpotWorkSpace HotSpotBuildSpace HotSpotJDKDist +echo. +echo This is the interactive build setup script (as opposed to the batch +echo build execution script). It creates HotSpotBuildSpace if necessary, +echo copies the appropriate files out of HotSpotWorkSpace into it, and +echo builds and runs MakeDeps in it. This has the side-effect of creating +echo the %ProjectFile% file in the build space, which is then used in Visual C++. +echo The HotSpotJDKDist defines place where JVM binaries should be placed. +echo Environment variable FORCE_MSC_VER allows to override MSVC version autodetection. +echo. +echo The generated project file depends upon the include databases. If +echo those are changed then MakeDeps is rerun. +echo. +echo NOTE that it is now NOT safe to modify any of the files in the build +echo space, since they may be overwritten whenever this script is run or +echo nmake is run in that directory. + +:end + +endlocal --- old/hotspot/build/windows/cross_build.bat 2009-08-01 04:17:12.073288688 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,61 +0,0 @@ -@echo off -REM -REM Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. -REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -REM -REM This code is free software; you can redistribute it and/or modify it -REM under the terms of the GNU General Public License version 2 only, as -REM published by the Free Software Foundation. -REM -REM This code is distributed in the hope that it will be useful, but WITHOUT -REM ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -REM FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -REM version 2 for more details (a copy is included in the LICENSE file that -REM accompanied this code). -REM -REM You should have received a copy of the GNU General Public License version -REM 2 along with this work; if not, write to the Free Software Foundation, -REM Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -REM -REM Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -REM CA 95054 USA or visit www.sun.com if you need additional information or -REM have any questions. -REM -REM - -REM Cross compile IA64 compiler2 VM -REM Usage: -REM cross_compile flavor workspace bootstrap_dir [build_id] -REM %1 %2 %3 %4 -REM -REM Set current directory -for /F %%i in ('cd') do set CD=%%i -echo Setting up Visual C++ Compilation Environment -if "%MSVCDir%" == "" goto setdir1 -goto setenv1 -:setdir1 -SET MSVCDir=C:\Program Files\Microsoft Visual Studio\VC98 -:setenv1 -SET OLDINCLUDE=%INCLUDE% -SET OLDLIB=%LIB% -SET OLDPATH=%PATH% -call "%MSVCDir%\Bin\VCVARS32" -call %2\build\windows\build %1 adlc %2 %3 %4 -SET INCLUDE=%OLDINCLUDE% -SET LIB=%OLDLIB% -SET PATH=%OLDPATH% -echo Setting up 64-BIT Compilation Environment -if "%MSSdk%" == "" goto setdir2 -goto setenv2 -:setdir2 -SET MSSdk=C:\Program Files\Microsoft SDK -:setenv2 -call "%MSSdk%\SetEnv.bat" /XP64 -SET ALT_ADLC_PATH=%CD%\windows_i486_compiler2\generated -call %2\build\windows\build %1 compiler2 %2 %3 %4 -SET INCLUDE=%OLDINCLUDE% -SET LIB=%OLDLIB% -SET PATH=%OLDPATH% -SET OLDINCLUDE= -SET OLDLIB= -SET OLDPATH= --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/cross_build.bat 2009-08-01 04:17:11.999355830 +0100 @@ -0,0 +1,61 @@ +@echo off +REM +REM Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +REM +REM This code is free software; you can redistribute it and/or modify it +REM under the terms of the GNU General Public License version 2 only, as +REM published by the Free Software Foundation. +REM +REM This code is distributed in the hope that it will be useful, but WITHOUT +REM ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +REM FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +REM version 2 for more details (a copy is included in the LICENSE file that +REM accompanied this code). +REM +REM You should have received a copy of the GNU General Public License version +REM 2 along with this work; if not, write to the Free Software Foundation, +REM Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +REM +REM Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +REM CA 95054 USA or visit www.sun.com if you need additional information or +REM have any questions. +REM +REM + +REM Cross compile IA64 compiler2 VM +REM Usage: +REM cross_compile flavor workspace bootstrap_dir [build_id] +REM %1 %2 %3 %4 +REM +REM Set current directory +for /F %%i in ('cd') do set CD=%%i +echo Setting up Visual C++ Compilation Environment +if "%MSVCDir%" == "" goto setdir1 +goto setenv1 +:setdir1 +SET MSVCDir=C:\Program Files\Microsoft Visual Studio\VC98 +:setenv1 +SET OLDINCLUDE=%INCLUDE% +SET OLDLIB=%LIB% +SET OLDPATH=%PATH% +call "%MSVCDir%\Bin\VCVARS32" +call %2\make\windows\build %1 adlc %2 %3 %4 +SET INCLUDE=%OLDINCLUDE% +SET LIB=%OLDLIB% +SET PATH=%OLDPATH% +echo Setting up 64-BIT Compilation Environment +if "%MSSdk%" == "" goto setdir2 +goto setenv2 +:setdir2 +SET MSSdk=C:\Program Files\Microsoft SDK +:setenv2 +call "%MSSdk%\SetEnv.bat" /XP64 +SET ALT_ADLC_PATH=%CD%\windows_i486_compiler2\generated +call %2\make\windows\build %1 compiler2 %2 %3 %4 +SET INCLUDE=%OLDINCLUDE% +SET LIB=%OLDLIB% +SET PATH=%OLDPATH% +SET OLDINCLUDE= +SET OLDLIB= +SET OLDPATH= --- old/hotspot/build/windows/get_msc_ver.sh 2009-08-01 04:17:12.967220289 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,79 +0,0 @@ -# -# Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -# CA 95054 USA or visit www.sun.com if you need additional information or -# have any questions. -# -# - -# This shell script echoes "MSC_VER=" -# It ignores the micro version component. -# Examples: -# cl version 12.00.8804 returns "MSC_VER=1200" -# cl version 13.10.3077 returns "MSC_VER=1310" -# cl version 14.00.30701 returns "MSC_VER=1399" (OLD_MSSDK version) -# cl version 14.00.40310.41 returns "MSC_VER=1400" - -# Note that we currently do not have a way to set HotSpotMksHome in -# the batch build, but so far this has not seemed to be a problem. The -# reason this environment variable is necessary is that it seems that -# Windows truncates very long PATHs when executing shells like MKS's -# sh, and it has been found that sometimes `which sh` fails. - -if [ "x$HotSpotMksHome" != "x" ]; then - MKS_HOME="$HotSpotMksHome" -else - SH=`which sh` - MKS_HOME=`dirname "$SH"` -fi - -HEAD="$MKS_HOME/head" -ECHO="$MKS_HOME/echo" -EXPR="$MKS_HOME/expr" -CUT="$MKS_HOME/cut" -SED="$MKS_HOME/sed" - -if [ "x$FORCE_MSC_VER" != "x" ]; then - echo "MSC_VER=$FORCE_MSC_VER" -else - MSC_VER_RAW=`cl 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'` - MSC_VER_MAJOR=`"$ECHO" $MSC_VER_RAW | "$CUT" -d'.' -f1` - MSC_VER_MINOR=`"$ECHO" $MSC_VER_RAW | "$CUT" -d'.' -f2` - MSC_VER_MICRO=`"$ECHO" $MSC_VER_RAW | "$CUT" -d'.' -f3` - if [ "${MSC_VER_MAJOR}" -eq 14 -a "${MSC_VER_MINOR}" -eq 0 -a "${MSC_VER_MICRO}" -eq 30701 ] ; then - # This said 1400 but it was really more like VS2003 (VC7) in terms of options - MSC_VER=1399 - else - MSC_VER=`"$EXPR" $MSC_VER_MAJOR \* 100 + $MSC_VER_MINOR` - fi - echo "MSC_VER=$MSC_VER" - echo "MSC_VER_RAW=$MSC_VER_RAW" -fi - -if [ "x$FORCE_LINK_VER" != "x" ]; then - echo "LINK_VER=$FORCE_LINK_VER" -else - LINK_VER_RAW=`link 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'` - LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1` - LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2` - LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3` - LINK_VER=`"$EXPR" $LINK_VER_MAJOR \* 100 + $LINK_VER_MINOR` - echo "LINK_VER=$LINK_VER" - echo "LINK_VER_RAW=$LINK_VER_RAW" -fi --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/get_msc_ver.sh 2009-08-01 04:17:12.892803171 +0100 @@ -0,0 +1,79 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This shell script echoes "MSC_VER=" +# It ignores the micro version component. +# Examples: +# cl version 12.00.8804 returns "MSC_VER=1200" +# cl version 13.10.3077 returns "MSC_VER=1310" +# cl version 14.00.30701 returns "MSC_VER=1399" (OLD_MSSDK version) +# cl version 14.00.40310.41 returns "MSC_VER=1400" + +# Note that we currently do not have a way to set HotSpotMksHome in +# the batch build, but so far this has not seemed to be a problem. The +# reason this environment variable is necessary is that it seems that +# Windows truncates very long PATHs when executing shells like MKS's +# sh, and it has been found that sometimes `which sh` fails. + +if [ "x$HotSpotMksHome" != "x" ]; then + MKS_HOME="$HotSpotMksHome" +else + SH=`which sh` + MKS_HOME=`dirname "$SH"` +fi + +HEAD="$MKS_HOME/head" +ECHO="$MKS_HOME/echo" +EXPR="$MKS_HOME/expr" +CUT="$MKS_HOME/cut" +SED="$MKS_HOME/sed" + +if [ "x$FORCE_MSC_VER" != "x" ]; then + echo "MSC_VER=$FORCE_MSC_VER" +else + MSC_VER_RAW=`cl 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'` + MSC_VER_MAJOR=`"$ECHO" $MSC_VER_RAW | "$CUT" -d'.' -f1` + MSC_VER_MINOR=`"$ECHO" $MSC_VER_RAW | "$CUT" -d'.' -f2` + MSC_VER_MICRO=`"$ECHO" $MSC_VER_RAW | "$CUT" -d'.' -f3` + if [ "${MSC_VER_MAJOR}" -eq 14 -a "${MSC_VER_MINOR}" -eq 0 -a "${MSC_VER_MICRO}" -eq 30701 ] ; then + # This said 1400 but it was really more like VS2003 (VC7) in terms of options + MSC_VER=1399 + else + MSC_VER=`"$EXPR" $MSC_VER_MAJOR \* 100 + $MSC_VER_MINOR` + fi + echo "MSC_VER=$MSC_VER" + echo "MSC_VER_RAW=$MSC_VER_RAW" +fi + +if [ "x$FORCE_LINK_VER" != "x" ]; then + echo "LINK_VER=$FORCE_LINK_VER" +else + LINK_VER_RAW=`link 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'` + LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1` + LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2` + LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3` + LINK_VER=`"$EXPR" $LINK_VER_MAJOR \* 100 + $LINK_VER_MINOR` + echo "LINK_VER=$LINK_VER" + echo "LINK_VER_RAW=$LINK_VER_RAW" +fi --- old/hotspot/build/windows/jvmexp.lcf 2009-08-01 04:17:13.831690100 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,10 +0,0 @@ --export:JNI_GetDefaultJavaVMInitArgs --export:JNI_CreateJavaVM --export:JNI_GetCreatedJavaVMs - --export:jio_snprintf --export:jio_printf --export:jio_fprintf --export:jio_vfprintf --export:jio_vsnprintf - --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/jvmexp.lcf 2009-08-01 04:17:13.761002023 +0100 @@ -0,0 +1,10 @@ +-export:JNI_GetDefaultJavaVMInitArgs +-export:JNI_CreateJavaVM +-export:JNI_GetCreatedJavaVMs + +-export:jio_snprintf +-export:jio_printf +-export:jio_fprintf +-export:jio_vfprintf +-export:jio_vsnprintf + --- old/hotspot/build/windows/jvmexp_g.lcf 2009-08-01 04:17:14.140718662 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,10 +0,0 @@ --export:JNI_GetDefaultJavaVMInitArgs --export:JNI_CreateJavaVM --export:JNI_GetCreatedJavaVMs - --export:jio_snprintf --export:jio_printf --export:jio_fprintf --export:jio_vfprintf --export:jio_vsnprintf - --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/jvmexp_g.lcf 2009-08-01 04:17:14.071592017 +0100 @@ -0,0 +1,10 @@ +-export:JNI_GetDefaultJavaVMInitArgs +-export:JNI_CreateJavaVM +-export:JNI_GetCreatedJavaVMs + +-export:jio_snprintf +-export:jio_printf +-export:jio_fprintf +-export:jio_vfprintf +-export:jio_vsnprintf + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/adlc.make 2009-08-01 04:17:14.406207483 +0100 @@ -0,0 +1,120 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +!include $(WorkSpace)/make/windows/makefiles/compile.make + +# Rules for building adlc.exe + +# Need exception handling support here +# $(MS_RUNTIME_OPTION) ( with /D_STATIC_CPPLIB) +# causes adlc.exe to link with the static +# multithread Standard C++ library (libcpmt.lib) instead of +# the dynamic version (msvcprt.lib), which is not included +# in any of the free tools. +EXH_FLAGS=$(GX_OPTION) $(MS_RUNTIME_OPTION) + +!ifdef ALT_ADLC_PATH +ADLC=$(ALT_ADLC_PATH)\adlc.exe +!else +ADLC=adlc +!endif + +!ifdef LP64 +ADLCFLAGS=-q -T -D_LP64 +!else +ADLCFLAGS=-q -T -U_LP64 +!endif + + +CPP_INCLUDE_DIRS=\ + /I "..\generated" \ + /I "$(WorkSpace)\src\share\vm\compiler" \ + /I "$(WorkSpace)\src\share\vm\code" \ + /I "$(WorkSpace)\src\share\vm\interpreter" \ + /I "$(WorkSpace)\src\share\vm\classfile" \ + /I "$(WorkSpace)\src\share\vm\asm" \ + /I "$(WorkSpace)\src\share\vm\memory" \ + /I "$(WorkSpace)\src\share\vm\oops" \ + /I "$(WorkSpace)\src\share\vm\prims" \ + /I "$(WorkSpace)\src\share\vm\runtime" \ + /I "$(WorkSpace)\src\share\vm\utilities" \ + /I "$(WorkSpace)\src\share\vm\libadt" \ + /I "$(WorkSpace)\src\share\vm\opto" \ + /I "$(WorkSpace)\src\os\windows\vm" \ + /I "$(WorkSpace)\src\cpu\$(Platform_arch)\vm" + +# NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_INCL +# and MakeDepsIDEOptions in makedeps.make. +GENERATED_NAMES=\ + ad_$(Platform_arch_model).cpp \ + ad_$(Platform_arch_model).hpp \ + ad_$(Platform_arch_model)_clone.cpp \ + ad_$(Platform_arch_model)_expand.cpp \ + ad_$(Platform_arch_model)_format.cpp \ + ad_$(Platform_arch_model)_gen.cpp \ + ad_$(Platform_arch_model)_misc.cpp \ + ad_$(Platform_arch_model)_peephole.cpp \ + ad_$(Platform_arch_model)_pipeline.cpp \ + adGlobals_$(Platform_arch_model).hpp \ + dfa_$(Platform_arch_model).cpp + +# NOTE! This must be kept in sync with GENERATED_NAMES +GENERATED_NAMES_IN_INCL=\ + incls/ad_$(Platform_arch_model).cpp \ + incls/ad_$(Platform_arch_model).hpp \ + incls/ad_$(Platform_arch_model)_clone.cpp \ + incls/ad_$(Platform_arch_model)_expand.cpp \ + incls/ad_$(Platform_arch_model)_format.cpp \ + incls/ad_$(Platform_arch_model)_gen.cpp \ + incls/ad_$(Platform_arch_model)_misc.cpp \ + incls/ad_$(Platform_arch_model)_peephole.cpp \ + incls/ad_$(Platform_arch_model)_pipeline.cpp \ + incls/adGlobals_$(Platform_arch_model).hpp \ + incls/dfa_$(Platform_arch_model).cpp + +{$(WorkSpace)\src\share\vm\adlc}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $< + +{$(WorkSpace)\src\share\vm\opto}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $< + +adlc.exe: main.obj adlparse.obj archDesc.obj arena.obj dfa.obj dict2.obj filebuff.obj \ + forms.obj formsopt.obj formssel.obj opcodes.obj output_c.obj output_h.obj + $(LINK) $(LINK_FLAGS) /subsystem:console /out:$@ $** +!if "$(MT)" != "" +# The previous link command created a .manifest file that we want to +# insert into the linked artifact so we do not need to track it +# separately. Use ";#2" for .dll and ";#1" for .exe: + $(MT) /manifest $@.manifest /outputresource:$@;#1 +!endif + +$(GENERATED_NAMES_IN_INCL): $(Platform_arch_model).ad adlc.exe includeDB.current + rm -f $(GENERATED_NAMES) + $(ADLC) $(ADLCFLAGS) $(Platform_arch_model).ad + mv $(GENERATED_NAMES) incls/ + +$(Platform_arch_model).ad: $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad + rm -f $(Platform_arch_model).ad + cat $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \ + $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad >$(Platform_arch_model).ad --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/compile.make 2009-08-01 04:17:14.848241402 +0100 @@ -0,0 +1,232 @@ +# +# Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Generic compiler settings +CPP=cl.exe + +# CPP Flags: (these vary slightly from VC6->VS2003->VS2005 compilers) +# /nologo Supress copyright message at every cl.exe startup +# /W3 Warning level 3 +# /Zi Include debugging information +# /WX Treat any warning error as a fatal error +# /MD Use dynamic multi-threaded runtime (msvcrt.dll or msvc*NN.dll) +# /MTd Use static multi-threaded runtime debug versions +# /O1 Optimize for size (/Os), skips /Oi +# /O2 Optimize for speed (/Ot), adds /Oi to /O1 +# /Ox Old "all optimizations flag" for VC6 (in /O1) +# /Oy Use frame pointer register as GP reg (in /Ox and /O1) +# /GF Merge string constants and put in read-only memory (in /O1) +# /Gy Func level link (in /O1, allows for link-time func ordering) +# /Gs Inserts stack probes (in /O1) +# /GS Inserts security stack checks in some functions (VS2005 default) +# /Oi Use intrinsics (in /O2) +# /Od Disable all optimizations +# +# NOTE: Normally following any of the above with a '-' will turn off that flag +# +# 6655385: For VS2003/2005 we now specify /Oy- (disable frame pointer +# omission.) This has little to no effect on performance while vastly +# improving the quality of crash log stack traces involving jvm.dll. + +# These are always used in all compiles +CPP_FLAGS=/nologo /W3 /WX + +# Let's add debug information always too. +CPP_FLAGS=$(CPP_FLAGS) /Zi + +# Based on BUILDARCH we add some flags and select the default compiler name +!if "$(BUILDARCH)" == "ia64" +MACHINE=IA64 +DEFAULT_COMPILER_NAME=VS2003 +CPP_FLAGS=$(CPP_FLAGS) /D "CC_INTERP" /D "_LP64" /D "IA64" +!endif + +!if "$(BUILDARCH)" == "amd64" +MACHINE=AMD64 +DEFAULT_COMPILER_NAME=VS2005 +CPP_FLAGS=$(CPP_FLAGS) /D "_LP64" /D "AMD64" +LP64=1 +!endif + +!if "$(BUILDARCH)" == "i486" +MACHINE=I386 +DEFAULT_COMPILER_NAME=VS2003 +CPP_FLAGS=$(CPP_FLAGS) /D "IA32" +!endif + +# Sanity check, this is the default if not amd64, ia64, or i486 +!ifndef DEFAULT_COMPILER_NAME +CPP=ARCH_ERROR +!endif + +# MSC_VER is a 4 digit number that tells us what compiler is being used +# and is generated when the local.make file is created by build.make +# via the script get_msc_ver.sh +# +# If MSC_VER is set, it overrides the above default setting. +# But it should be set. +# Possible values: +# 1200 is for VC6 +# 1300 and 1310 is VS2003 or VC7 +# 1399 is our fake number for the VS2005 compiler that really isn't 1400 +# 1400 is for VS2005 +# 1500 is for VS2008 +# Do not confuse this MSC_VER with the predefined macro _MSC_VER that the +# compiler provides, when MSC_VER==1399, _MSC_VER will be 1400. +# Normally they are the same, but a pre-release of the VS2005 compilers +# in the Windows 64bit Platform SDK said it was 1400 when it was really +# closer to VS2003 in terms of option spellings, so we use 1399 for that +# 1400 version that really isn't 1400. +# See the file get_msc_ver.sh for more info. +!if "x$(MSC_VER)" == "x" +COMPILER_NAME=$(DEFAULT_COMPILER_NAME) +!else +!if "$(MSC_VER)" == "1200" +COMPILER_NAME=VC6 +!endif +!if "$(MSC_VER)" == "1300" +COMPILER_NAME=VS2003 +!endif +!if "$(MSC_VER)" == "1310" +COMPILER_NAME=VS2003 +!endif +!if "$(MSC_VER)" == "1399" +# Compiler might say 1400, but if it's 14.00.30701, it isn't really VS2005 +COMPILER_NAME=VS2003 +!endif +!if "$(MSC_VER)" == "1400" +COMPILER_NAME=VS2005 +!endif +!if "$(MSC_VER)" == "1500" +COMPILER_NAME=VS2008 +!endif +!endif + +# Add what version of the compiler we think this is to the compile line +CPP_FLAGS=$(CPP_FLAGS) /D "MSC_VER=$(MSC_VER)" + +# By default, we do not want to use the debug version of the msvcrt.dll file +# but if MFC_DEBUG is defined in the environment it will be used. +MS_RUNTIME_OPTION = /MD +!if "$(MFC_DEBUG)" == "true" +MS_RUNTIME_OPTION = /MTd /D "_DEBUG" +!endif + +# Always add the _STATIC_CPPLIB flag +STATIC_CPPLIB_OPTION = /D _STATIC_CPPLIB +MS_RUNTIME_OPTION = $(MS_RUNTIME_OPTION) $(STATIC_CPPLIB_OPTION) +CPP_FLAGS=$(CPP_FLAGS) $(MS_RUNTIME_OPTION) + +# How /GX option is spelled +GX_OPTION = /GX + +# Optimization settings for various versions of the compilers and types of +# builds. Three basic sets of settings: product, fastdebug, and debug. +# These get added into CPP_FLAGS as needed by other makefiles. +!if "$(COMPILER_NAME)" == "VC6" +PRODUCT_OPT_OPTION = /Ox /Os /Gy /GF +FASTDEBUG_OPT_OPTION = /Ox /Os /Gy /GF +DEBUG_OPT_OPTION = /Od +!endif + +!if "$(COMPILER_NAME)" == "VS2003" +PRODUCT_OPT_OPTION = /O2 /Oy- +FASTDEBUG_OPT_OPTION = /O2 /Oy- +DEBUG_OPT_OPTION = /Od +!endif + +!if "$(COMPILER_NAME)" == "VS2005" +PRODUCT_OPT_OPTION = /O2 /Oy- +FASTDEBUG_OPT_OPTION = /O2 /Oy- +DEBUG_OPT_OPTION = /Od +GX_OPTION = /EHsc +# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib +# on the link command line, otherwise we get missing __security_check_cookie +# externals at link time. Even with /GS-, you need bufferoverflowU.lib. +# NOTE: Currently we decided to not use /GS- +BUFFEROVERFLOWLIB = bufferoverflowU.lib +LINK_FLAGS = /manifest $(LINK_FLAGS) $(BUFFEROVERFLOWLIB) +# Manifest Tool - used in VS2005 and later to adjust manifests stored +# as resources inside build artifacts. +MT=mt.exe +!if "$(BUILDARCH)" == "i486" +# VS2005 on x86 restricts the use of certain libc functions without this +CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_DEPRECATE +!endif +!endif + +!if "$(COMPILER_NAME)" == "VS2008" +PRODUCT_OPT_OPTION = /O2 /Oy- +FASTDEBUG_OPT_OPTION = /O2 /Oy- +DEBUG_OPT_OPTION = /Od +GX_OPTION = /EHsc +LINK_FLAGS = /manifest $(LINK_FLAGS) +# Manifest Tool - used in VS2005 and later to adjust manifests stored +# as resources inside build artifacts. +MT=mt.exe +!if "$(BUILDARCH)" == "i486" +# VS2005 on x86 restricts the use of certain libc functions without this +CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_DEPRECATE +!endif +!endif + +# Compile for space above time. +!if "$(Variant)" == "kernel" +PRODUCT_OPT_OPTION = /O1 /Oy- +FASTDEBUG_OPT_OPTION = /O1 /Oy- +DEBUG_OPT_OPTION = /Od +!endif + +# If NO_OPTIMIZATIONS is defined in the environment, turn everything off +!ifdef NO_OPTIMIZATIONS +PRODUCT_OPT_OPTION = $(DEBUG_OPT_OPTION) +FASTDEBUG_OPT_OPTION = $(DEBUG_OPT_OPTION) +!endif + +# Generic linker settings +LINK=link.exe +LINK_FLAGS= $(LINK_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \ + comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \ + uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \ + /opt:ICF,8 /map /debug + +# Resource compiler settings +RC=rc.exe +RC_FLAGS=/D "HS_VER=$(HS_VER)" \ + /D "HS_DOTVER=$(HS_DOTVER)" \ + /D "HS_BUILD_ID=$(HS_BUILD_ID)" \ + /D "JDK_VER=$(JDK_VER)" \ + /D "JDK_DOTVER=$(JDK_DOTVER)" \ + /D "HS_COMPANY=$(HS_COMPANY)" \ + /D "HS_FILEDESC=$(HS_FILEDESC)" \ + /D "HS_COPYRIGHT=$(HS_COPYRIGHT)" \ + /D "HS_FNAME=$(HS_FNAME)" \ + /D "HS_INTERNAL_NAME=$(HS_INTERNAL_NAME)" \ + /D "HS_NAME=$(HS_NAME)" + +# Need this to match the CPP_FLAGS settings +!if "$(MFC_DEBUG)" == "true" +RC_FLAGS = $(RC_FLAGS) /D "_DEBUG" +!endif + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/debug.make 2009-08-01 04:17:15.265749433 +0100 @@ -0,0 +1,61 @@ +# +# Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +HS_INTERNAL_NAME=jvm +HS_FNAME=$(HS_INTERNAL_NAME).dll +AOUT=$(HS_FNAME) +SAWINDBG=sawindbg.dll +GENERATED=../generated + +default:: _build_pch_file.obj $(AOUT) checkAndBuildSA + +!include ../local.make +!include compile.make + +CPP_FLAGS=$(CPP_FLAGS) $(DEBUG_OPT_OPTION) + +!include $(WorkSpace)/make/windows/makefiles/vm.make +!include local.make + +!include $(GENERATED)/Dependencies + +HS_BUILD_ID=$(HS_BUILD_VER)-debug + +# Force resources to be rebuilt every time +$(Res_Files): FORCE + +$(AOUT): $(Res_Files) $(Obj_Files) + sh $(WorkSpace)/make/windows/build_vm_def.sh + $(LINK) @<< + $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) +<< +!if "$(MT)" != "" +# The previous link command created a .manifest file that we want to +# insert into the linked artifact so we do not need to track it +# separately. Use ";#2" for .dll and ";#1" for .exe: + $(MT) /manifest $@.manifest /outputresource:$@;#2 +!endif + +!include $(WorkSpace)/make/windows/makefiles/shared.make +!include $(WorkSpace)/make/windows/makefiles/sa.make --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/defs.make 2009-08-01 04:17:15.674611999 +0100 @@ -0,0 +1,160 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# The common definitions for hotspot windows builds. +# Include the top level defs.make under make directory instead of this one. +# This file is included into make/defs.make. +# On windows it is only used to construct parameters for +# make/windows/build.make when make/Makefile is used to build VM. + +SLASH_JAVA ?= J: +PATH_SEP = ; + +# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name +ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) x86),) + ARCH_DATA_MODEL=32 + PLATFORM=windows-i586 + VM_PLATFORM=windows_i486 + HS_ARCH=x86 + MAKE_ARGS += ARCH=x86 + MAKE_ARGS += BUILDARCH=i486 + MAKE_ARGS += Platform_arch=x86 + MAKE_ARGS += Platform_arch_model=x86_32 +endif + +ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) ia64),) + ARCH_DATA_MODEL=64 + PLATFORM=windows-ia64 + VM_PLATFORM=windows_ia64 + HS_ARCH=ia64 + MAKE_ARGS += LP64=1 + MAKE_ARGS += ARCH=ia64 + MAKE_ARGS += BUILDARCH=ia64 + MAKE_ARGS += Platform_arch=ia64 + MAKE_ARGS += Platform_arch_model=ia64 +endif + +ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),) + ARCH_DATA_MODEL=64 + PLATFORM=windows-amd64 + VM_PLATFORM=windows_amd64 + HS_ARCH=x86 + MAKE_ARGS += LP64=1 + MAKE_ARGS += ARCH=x86 + MAKE_ARGS += BUILDARCH=amd64 + MAKE_ARGS += Platform_arch=x86 + MAKE_ARGS += Platform_arch_model=x86_64 +endif + +JDK_INCLUDE_SUBDIR=win32 + +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined +# and added to MAKE_ARGS list in $(GAMMADIR)/make/defs.make. + +# next parameters are defined in $(GAMMADIR)/make/defs.make. +MAKE_ARGS += JDK_MKTG_VERSION=$(JDK_MKTG_VERSION) +MAKE_ARGS += JDK_MAJOR_VER=$(JDK_MAJOR_VERSION) +MAKE_ARGS += JDK_MINOR_VER=$(JDK_MINOR_VERSION) +MAKE_ARGS += JDK_MICRO_VER=$(JDK_MICRO_VERSION) + +ifdef COOKED_JDK_UPDATE_VERSION + MAKE_ARGS += JDK_UPDATE_VER=$(COOKED_JDK_UPDATE_VERSION) +endif + +# COOKED_BUILD_NUMBER should only be set if we have a numeric +# build number. It must not be zero padded. +ifdef COOKED_BUILD_NUMBER + MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER) +endif + +NMAKE= MAKEFLAGS= MFLAGS= nmake /NOLOGO + +# Check for CYGWIN +ifneq (,$(findstring CYGWIN,$(shell uname))) + USING_CYGWIN=true +else + USING_CYGWIN=false +endif +# FIXUP: The subdirectory for a debug build is NOT the same on all platforms +VM_DEBUG=debug + +# Windows wants particular paths due to nmake (must be after macros defined) +# It is important that gnumake invokes nmake with C:\\...\\ formated +# strings so that nmake gets C:\...\ style strings. +# Check for CYGWIN +ifeq ($(USING_CYGWIN), true) + ABS_OUTPUTDIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(OUTPUTDIR)")) + ABS_BOOTDIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(BOOTDIR)")) + ABS_GAMMADIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(GAMMADIR)")) + ABS_OS_MAKEFILE := $(shell /bin/cygpath -m -a "$(HS_MAKE_DIR)/$(OSNAME)")/build.make +else + ABS_OUTPUTDIR := $(subst /,\\,$(shell $(CD) $(OUTPUTDIR);$(PWD))) + ABS_BOOTDIR := $(subst /,\\,$(shell $(CD) $(BOOTDIR);$(PWD))) + ABS_GAMMADIR := $(subst /,\\,$(shell $(CD) $(GAMMADIR);$(PWD))) + ABS_OS_MAKEFILE := $(subst /,\\,$(shell $(CD) $(HS_MAKE_DIR)/$(OSNAME);$(PWD))/build.make) +endif + +# Disable building SA on windows until we are sure +# we want to release it. If we build it here, +# the SDK makefiles will copy it over and put it into +# the created image. +BUILD_WIN_SA = 1 +ifneq ($(ALT_BUILD_WIN_SA),) + BUILD_WIN_SA = $(ALT_BUILD_WIN_SA) +endif + +ifeq ($(BUILD_WIN_SA), 1) + ifeq ($(ARCH),ia64) + BUILD_WIN_SA = 0 + endif +endif + +EXPORT_SERVER_DIR = $(EXPORT_JRE_BIN_DIR)/server +EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt +EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.dll +EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.pdb +EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map +EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib +ifeq ($(ARCH_DATA_MODEL), 32) + EXPORT_CLIENT_DIR = $(EXPORT_JRE_BIN_DIR)/client + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.dll + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.pdb + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.map + # kernel vm + EXPORT_KERNEL_DIR = $(EXPORT_JRE_BIN_DIR)/kernel + EXPORT_LIST += $(EXPORT_KERNEL_DIR)/Xusage.txt + EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.dll + EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.pdb + EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.map +endif + +ifeq ($(BUILD_WIN_SA), 1) + EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.dll + EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.pdb + EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.map + EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar + # Must pass this down to nmake. + MAKE_ARGS += BUILD_WIN_SA=1 +endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/fastdebug.make 2009-08-01 04:17:16.099851401 +0100 @@ -0,0 +1,62 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +HS_INTERNAL_NAME=jvm +HS_FNAME=$(HS_INTERNAL_NAME).dll +AOUT=$(HS_FNAME) +SAWINDBG=sawindbg.dll +GENERATED=../generated + +default:: _build_pch_file.obj $(AOUT) checkAndBuildSA + +!include ../local.make +!include compile.make + +CPP_FLAGS=$(CPP_FLAGS) $(FASTDEBUG_OPT_OPTION) + +!include $(WorkSpace)/make/windows/makefiles/vm.make +!include local.make + +!include $(GENERATED)/Dependencies + +HS_BUILD_ID=$(HS_BUILD_VER)-fastdebug + +# Force resources to be rebuilt every time +$(Res_Files): FORCE + +$(AOUT): $(Res_Files) $(Obj_Files) + sh $(WorkSpace)/make/windows/build_vm_def.sh + $(LINK) @<< + $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) +<< +!if "$(MT)" != "" +# The previous link command created a .manifest file that we want to +# insert into the linked artifact so we do not need to track it +# separately. Use ";#2" for .dll and ";#1" for .exe: + $(MT) /manifest $@.manifest /outputresource:$@;#2 +!endif + + +!include $(WorkSpace)/make/windows/makefiles/shared.make +!include $(WorkSpace)/make/windows/makefiles/sa.make --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/generated.make 2009-08-01 04:17:16.517283667 +0100 @@ -0,0 +1,102 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +!include ../local.make +!include $(WorkSpace)/make/windows/makefiles/makedeps.make +!include local.make + +# Pick up rules for building JVMTI (JSR-163) +JvmtiOutDir=jvmtifiles +!include $(WorkSpace)/make/windows/makefiles/jvmti.make + +# Pick up rules for building SA +!include $(WorkSpace)/make/windows/makefiles/sa.make + +!if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered") +default:: includeDB.current Dependencies incls/ad_$(Platform_arch_model).cpp incls/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) +!else +default:: includeDB.current Dependencies $(JvmtiGeneratedFiles) +!endif + +# core plus serial gc +IncludeDBs_base=$(WorkSpace)/src/share/vm/includeDB_core \ + $(WorkSpace)/src/share/vm/includeDB_jvmti \ + $(WorkSpace)/src/share/vm/includeDB_gc \ + $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_serial + +# parallel gc +IncludeDBs_gc= $(WorkSpace)/src/share/vm/includeDB_gc_parallel \ + $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge \ + $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_shared \ + $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_parNew \ + $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep \ + $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_g1 + +IncludeDBs_core=$(IncludeDBs_base) $(IncludeDBs_gc) \ + $(WorkSpace)/src/share/vm/includeDB_features + +!if "$(Variant)" == "core" +IncludeDBs=$(IncludeDBs_core) +!endif + +!if "$(Variant)" == "kernel" +IncludeDBs=$(IncludeDBs_base) $(WorkSpace)/src/share/vm/includeDB_compiler1 +!endif + +!if "$(Variant)" == "compiler1" +IncludeDBs=$(IncludeDBs_core) $(WorkSpace)/src/share/vm/includeDB_compiler1 +!endif + + +!if "$(Variant)" == "compiler2" +IncludeDBs=$(IncludeDBs_core) $(WorkSpace)/src/share/vm/includeDB_compiler2 +!endif + +!if "$(Variant)" == "tiered" +IncludeDBs=$(IncludeDBs_core) $(WorkSpace)/src/share/vm/includeDB_compiler1 \ + $(WorkSpace)/src/share/vm/includeDB_compiler2 +!endif + +# Note we don't generate a Visual C++ project file using MakeDeps for +# the batch build. +includeDB.current Dependencies: classes/MakeDeps.class $(IncludeDBs) + cat $(IncludeDBs) > includeDB + if exist incls rmdir /s /q incls + mkdir incls + $(RUN_JAVA) -Djava.class.path=classes MakeDeps WinGammaPlatform$(VcVersion) $(WorkSpace)/make/windows/platform_$(BUILDARCH) includeDB $(MakeDepsOptions) + rm -f includeDB.current + cp includeDB includeDB.current + +classes/MakeDeps.class: $(MakeDepsSources) + if exist classes rmdir /s /q classes + mkdir classes + $(COMPILE_JAVAC) -classpath $(WorkSpace)\src\share\tools\MakeDeps -g -d classes $(MakeDepsSources) + +!if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered") + +!include $(WorkSpace)/make/windows/makefiles/adlc.make + +!endif + +!include $(WorkSpace)/make/windows/makefiles/shared.make --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/jvmti.make 2009-08-01 04:17:16.951165365 +0100 @@ -0,0 +1,114 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile (jvmti.make) is included from the jvmti.make in the +# build directories. +# +# It knows how to build and run the tools to generate jvmti. + +!include $(WorkSpace)/make/windows/makefiles/rules.make + +# ######################################################################### + +JvmtiSrcDir = $(WorkSpace)/src/share/vm/prims +InterpreterSrcDir = $(WorkSpace)/src/share/vm/interpreter + +JvmtiGeneratedNames = \ + jvmtiEnv.hpp \ + jvmtiEnter.cpp \ + jvmtiEnterTrace.cpp \ + jvmtiEnvRecommended.cpp \ + bytecodeInterpreterWithChecks.cpp \ + jvmti.h \ + +JvmtiEnvFillSource = $(JvmtiSrcDir)/jvmtiEnvFill.java +JvmtiEnvFillClass = $(JvmtiOutDir)/jvmtiEnvFill.class + +JvmtiGenSource = $(JvmtiSrcDir)/jvmtiGen.java +JvmtiGenClass = $(JvmtiOutDir)/jvmtiGen.class + +#Note: JvmtiGeneratedFiles must be kept in sync with JvmtiGeneratedNames by hand. +#Should be equivalent #to "JvmtiGeneratedFiles = $(JvmtiGeneratedNames:%=$(JvmtiOutDir)/%)" +JvmtiGeneratedFiles = \ + $(JvmtiOutDir)/jvmtiEnv.hpp \ + $(JvmtiOutDir)/jvmtiEnter.cpp \ + $(JvmtiOutDir)/jvmtiEnterTrace.cpp \ + $(JvmtiOutDir)/jvmtiEnvRecommended.cpp\ + $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp\ + $(JvmtiOutDir)/jvmti.h \ + +XSLT = $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +# ######################################################################### + +both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl + +default:: + @if not exist $(JvmtiOutDir) mkdir $(JvmtiOutDir) + +$(JvmtiGenClass): $(JvmtiGenSource) + $(COMPILE_JAVAC) -g -d $(JvmtiOutDir) $(JvmtiGenSource) + +$(JvmtiEnvFillClass): $(JvmtiEnvFillSource) + @$(COMPILE_JAVAC) -g -d $(JvmtiOutDir) $(JvmtiEnvFillSource) + +$(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl + @echo Generating $@ + @$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnter.cpp -PARAM interface jvmti + +$(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp: $(JvmtiGenClass) $(InterpreterSrcDir)/bytecodeInterpreter.cpp $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl + @echo Generating $@ + @$(XSLT) -IN $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml -XSL $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl -OUT $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp + +$(JvmtiOutDir)/jvmtiEnterTrace.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl + @echo Generating $@ + @$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnterTrace.cpp -PARAM interface jvmti -PARAM trace Trace + +$(JvmtiOutDir)/jvmtiEnvRecommended.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnv.xsl $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiEnvFillClass) + @echo Generating $@ + @$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnv.xsl -OUT $(JvmtiOutDir)/jvmtiEnvStub.cpp + @$(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiEnvFill $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiOutDir)/jvmtiEnvStub.cpp $(JvmtiOutDir)/jvmtiEnvRecommended.cpp + +$(JvmtiOutDir)/jvmtiEnv.hpp: $(both) $(JvmtiSrcDir)/jvmtiHpp.xsl + @echo Generating $@ + @$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiHpp.xsl -OUT $(JvmtiOutDir)/jvmtiEnv.hpp + +$(JvmtiOutDir)/jvmti.h: $(both) $(JvmtiSrcDir)/jvmtiH.xsl + @echo Generating $@ + @$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiH.xsl -OUT $(JvmtiOutDir)/jvmti.h + +jvmtidocs: $(JvmtiOutDir)/jvmti.html + +$(JvmtiOutDir)/jvmti.html: $(both) $(JvmtiSrcDir)/jvmti.xsl + @echo Generating $@ + @$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmti.xsl -OUT $(JvmtiOutDir)/jvmti.html + +# ######################################################################### + +cleanall : + rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles) + +# ######################################################################### + +.PHONY: jvmtidocs cleanall --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/makedeps.make 2009-08-01 04:17:17.402050176 +0100 @@ -0,0 +1,175 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +!include $(WorkSpace)/make/windows/makefiles/rules.make + +# This is used externally by both batch and IDE builds, so can't +# reference any of the HOTSPOTWORKSPACE, HOTSPOTBUILDSPACE, +# HOTSPOTRELEASEBINDEST, or HOTSPOTDEBUGBINDEST environment variables. +# +# NOTE: unfortunately the MakeDepsSources list must be kept +# synchronized between this and the Solaris version +# (make/solaris/makefiles/makedeps.make). + +MakeDepsSources=\ + $(WorkSpace)\src\share\tools\MakeDeps\Database.java \ + $(WorkSpace)\src\share\tools\MakeDeps\DirectoryTree.java \ + $(WorkSpace)\src\share\tools\MakeDeps\DirectoryTreeNode.java \ + $(WorkSpace)\src\share\tools\MakeDeps\FileFormatException.java \ + $(WorkSpace)\src\share\tools\MakeDeps\FileList.java \ + $(WorkSpace)\src\share\tools\MakeDeps\FileName.java \ + $(WorkSpace)\src\share\tools\MakeDeps\Macro.java \ + $(WorkSpace)\src\share\tools\MakeDeps\MacroDefinitions.java \ + $(WorkSpace)\src\share\tools\MakeDeps\MakeDeps.java \ + $(WorkSpace)\src\share\tools\MakeDeps\MetroWerksMacPlatform.java \ + $(WorkSpace)\src\share\tools\MakeDeps\Platform.java \ + $(WorkSpace)\src\share\tools\MakeDeps\UnixPlatform.java \ + $(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatform.java \ + $(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatformVC6.java \ + $(WorkSpace)\src\share\tools\MakeDeps\WinGammaPlatformVC7.java \ + $(WorkSpace)\src\share\tools\MakeDeps\Util.java \ + $(WorkSpace)\src\share\tools\MakeDeps\BuildConfig.java \ + $(WorkSpace)\src\share\tools\MakeDeps\ArgsParser.java + +# This is only used internally +MakeDepsIncludesPRIVATE=\ + -relativeInclude src\share\vm\c1 \ + -relativeInclude src\share\vm\compiler \ + -relativeInclude src\share\vm\code \ + -relativeInclude src\share\vm\interpreter \ + -relativeInclude src\share\vm\ci \ + -relativeInclude src\share\vm\classfile \ + -relativeInclude src\share\vm\gc_implementation\parallelScavenge \ + -relativeInclude src\share\vm\gc_implementation\shared \ + -relativeInclude src\share\vm\gc_implementation\parNew \ + -relativeInclude src\share\vm\gc_implementation\concurrentMarkSweep \ + -relativeInclude src\share\vm\gc_implementation\g1 \ + -relativeInclude src\share\vm\gc_interface \ + -relativeInclude src\share\vm\asm \ + -relativeInclude src\share\vm\memory \ + -relativeInclude src\share\vm\oops \ + -relativeInclude src\share\vm\prims \ + -relativeInclude src\share\vm\runtime \ + -relativeInclude src\share\vm\services \ + -relativeInclude src\share\vm\utilities \ + -relativeInclude src\share\vm\libadt \ + -relativeInclude src\share\vm\opto \ + -relativeInclude src\os\windows\vm \ + -relativeInclude src\os_cpu\windows_$(Platform_arch)\vm \ + -relativeInclude src\cpu\$(Platform_arch)\vm + +# This is referenced externally by both the IDE and batch builds +MakeDepsOptions= + +# This is used externally, but only by the IDE builds, so we can +# reference environment variables which aren't defined in the batch +# build process. + +MakeDepsIDEOptions = \ + -useToGeneratePch java.cpp \ + -disablePch os_windows.cpp \ + -disablePch os_windows_$(Platform_arch).cpp \ + -disablePch osThread_windows.cpp \ + -disablePch bytecodeInterpreter.cpp \ + -disablePch bytecodeInterpreterWithChecks.cpp \ + -disablePch getThread_windows_$(Platform_arch).cpp \ + -disablePch_compiler2 opcodes.cpp + +# Common options for the IDE builds for core, c1, and c2 +MakeDepsIDEOptions=\ + $(MakeDepsIDEOptions) \ + -sourceBase $(HOTSPOTWORKSPACE) \ + -buildBase $(HOTSPOTBUILDSPACE)\%f\%b \ + -startAt src \ + -compiler $(VcVersion) \ + -projectFileName $(HOTSPOTBUILDSPACE)\$(ProjectFile) \ + -jdkTargetRoot $(HOTSPOTJDKDIST) \ + -define ALIGN_STACK_FRAMES \ + -define VM_LITTLE_ENDIAN \ + -additionalFile includeDB_compiler1 \ + -additionalFile includeDB_compiler2 \ + -additionalFile includeDB_core \ + -additionalFile includeDB_features \ + -additionalFile includeDB_jvmti \ + -additionalFile includeDB_gc \ + -additionalFile includeDB_gc_parallel \ + -additionalFile includeDB_gc_parallelScavenge \ + -additionalFile includeDB_gc_concurrentMarkSweep \ + -additionalFile includeDB_gc_g1 \ + -additionalFile includeDB_gc_parNew \ + -additionalFile includeDB_gc_shared \ + -additionalFile includeDB_gc_serial \ + -additionalGeneratedFile $(HOTSPOTBUILDSPACE)\%f\%b vm.def \ + -prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh" \ + $(MakeDepsIncludesPRIVATE) + +# Add in build-specific options +!if "$(BUILDARCH)" == "i486" +MakeDepsIDEOptions=$(MakeDepsIDEOptions) -define IA32 +!endif + +################################################## +# JKERNEL specific options +################################################## +MakeDepsIDEOptions=$(MakeDepsIDEOptions) \ + -define_kernel KERNEL \ + +################################################## +# Client(C1) compiler specific options +################################################## +MakeDepsIDEOptions=$(MakeDepsIDEOptions) \ + -define_compiler1 COMPILER1 \ + +################################################## +# Server(C2) compiler specific options +################################################## +#NOTE! This list must be kept in sync with GENERATED_NAMES in adlc.make. +MakeDepsIDEOptions=$(MakeDepsIDEOptions) \ + -define_compiler2 COMPILER2 \ + -absoluteInclude_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls \ + -additionalFile_compiler2 $(Platform_arch_model).ad \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model).cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model).hpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model)_clone.cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model)_expand.cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model)_format.cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model)_gen.cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model)_misc.cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model)_peephole.cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls ad_$(Platform_arch_model)_pipeline.cpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls adGlobals_$(Platform_arch_model).hpp \ + -additionalGeneratedFile_compiler2 $(HOTSPOTBUILDSPACE)/%f/incls dfa_$(Platform_arch_model).cpp + +# Add in the jvmti (JSR-163) options +# NOTE: do not pull in jvmtiEnvRecommended.cpp. This file is generated +# so the programmer can diff it with jvmtiEnv.cpp to be sure the +# code merge was done correctly (@see jvmti.make and jvmtiEnvFill.java). +# If so, they would then check it in as a new version of jvmtiEnv.cpp. +MakeDepsIDEOptions=$(MakeDepsIDEOptions) \ + -absoluteInclude $(HOTSPOTBUILDSPACE)/jvmtifiles \ + -additionalGeneratedFile $(HOTSPOTBUILDSPACE)/jvmtifiles jvmtiEnv.hpp \ + -additionalGeneratedFile $(HOTSPOTBUILDSPACE)/jvmtifiles jvmtiEnter.cpp \ + -additionalGeneratedFile $(HOTSPOTBUILDSPACE)/jvmtifiles jvmtiEnterTrace.cpp \ + -additionalGeneratedFile $(HOTSPOTBUILDSPACE)/jvmtifiles jvmti.h \ + -additionalGeneratedFile $(HOTSPOTBUILDSPACE)/jvmtifiles bytecodeInterpreterWithChecks.cpp --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/product.make 2009-08-01 04:17:17.837979431 +0100 @@ -0,0 +1,72 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +HS_INTERNAL_NAME=jvm +HS_FNAME=$(HS_INTERNAL_NAME).dll +AOUT=$(HS_FNAME) +GENERATED=../generated + +default:: _build_pch_file.obj $(AOUT) checkAndBuildSA + +!include ../local.make +!include compile.make + +CPP_FLAGS=$(CPP_FLAGS) $(PRODUCT_OPT_OPTION) + +RELEASE= + +RC_FLAGS=$(RC_FLAGS) /D "NDEBUG" + +!include $(WorkSpace)/make/windows/makefiles/vm.make +!include local.make + +!include $(GENERATED)/Dependencies + +HS_BUILD_ID=$(HS_BUILD_VER) + +# Force resources to be rebuilt every time +$(Res_Files): FORCE + +# Kernel doesn't need exported vtbl symbols. +!if "$(Variant)" == "kernel" +$(AOUT): $(Res_Files) $(Obj_Files) + $(LINK) @<< + $(LINK_FLAGS) /out:$@ /implib:$*.lib $(Obj_Files) $(Res_Files) +<< +!else +$(AOUT): $(Res_Files) $(Obj_Files) + sh $(WorkSpace)/make/windows/build_vm_def.sh + $(LINK) @<< + $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) +<< +!endif +!if "$(MT)" != "" +# The previous link command created a .manifest file that we want to +# insert into the linked artifact so we do not need to track it +# separately. Use ";#2" for .dll and ";#1" for .exe: + $(MT) /manifest $@.manifest /outputresource:$@;#2 +!endif + +!include $(WorkSpace)/make/windows/makefiles/shared.make +!include $(WorkSpace)/make/windows/makefiles/sa.make --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/rules.make 2009-08-01 04:17:18.278450566 +0100 @@ -0,0 +1,51 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# These are the commands used externally to compile and run. + +!ifdef BootStrapDir +RUN_JAVA=$(BootStrapDir)\bin\java +RUN_JAVAP=$(BootStrapDir)\bin\javap +RUN_JAVAH=$(BootStrapDir)\bin\javah +RUN_JAR=$(BootStrapDir)\bin\jar +COMPILE_JAVAC=$(BootStrapDir)\bin\javac +COMPILE_RMIC=$(BootStrapDir)\bin\rmic +BOOT_JAVA_HOME=$(BootStrapDir) +!else +RUN_JAVA=java +RUN_JAVAP=javap +RUN_JAVAH=javah +RUN_JAR=jar +COMPILE_JAVAC=javac +COMPILE_RMIC=rmic +BOOT_JAVA_HOME= +!endif + +!if "$(MSC_VER)" == "1200" +VcVersion=VC6 +ProjectFile=vm.dsp +!else +VcVersion=VC7 +ProjectFile=vm.vcproj +!endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/sa.make 2009-08-01 04:17:18.754115114 +0100 @@ -0,0 +1,128 @@ +# +# Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# This makefile is used to build Serviceability Agent code +# and generate JNI header file for native methods. + +AGENT_DIR = $(WorkSpace)/agent +checkAndBuildSA:: + +!if "$(BUILD_WIN_SA)" != "1" +# Already warned about this in build.make +!else + +# This first part is used to build sa-jdi.jar +!include $(WorkSpace)/make/windows/makefiles/rules.make +!include $(WorkSpace)/make/sa.files + +GENERATED = ..\generated + +# tools.jar is needed by the JDI - SA binding +SA_CLASSPATH = $(BOOT_JAVA_HOME)\lib\tools.jar + +SA_CLASSDIR = $(GENERATED)\saclasses + +SA_BUILD_VERSION_PROP = sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION) + +SA_PROPERTIES = $(SA_CLASSDIR)\sa.properties + +default:: $(GENERATED)\sa-jdi.jar + +# Remove the space between $(SA_BUILD_VERSION_PROP) and > below as it adds a white space +# at the end of SA version string and causes a version mismatch with the target VM version. + +$(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\) + @if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR) + @echo ...Building sa-jdi.jar + @echo ...$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) .... + @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\) + @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\) + $(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer + $(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES) + $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js + $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql + $(QUIETLY) rm -rf $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources + $(QUIETLY) mkdir $(SA_CLASSDIR)\sun\jvm\hotspot\ui\resources + $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources + $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR) + $(RUN_JAR) cf $@ -C saclasses . + $(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector + $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal + $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext + $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext + $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext + + + +# This second part is used to build sawindbg.dll +# We currently build it the same way for product, debug, and fastdebug. + +SAWINDBG=sawindbg.dll + +checkAndBuildSA:: $(SAWINDBG) + +# These do not need to be optimized (don't run a lot of code) and it +# will be useful to have the assertion checks in place + +!if "$(BUILDARCH)" == "ia64" +SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +!elseif "$(BUILDARCH)" == "amd64" +SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line, +# otherwise we get missing __security_check_cookie externals at link time. +SA_LINK_FLAGS = bufferoverflowU.lib +!else +SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /ZI /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c +!endif +!if "$(MT)" != "" +SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS) +!endif +SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp +SA_LFLAGS = $(SA_LINK_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE) + +# Note that we do not keep sawindbj.obj around as it would then +# get included in the dumpbin command in build_vm_def.sh + +# In VS2005 or VS2008 the link command creates a .manifest file that we want +# to insert into the linked artifact so we do not need to track it separately. +# Use ";#2" for .dll and ";#1" for .exe in the MT command below: +$(SAWINDBG): $(SASRCFILE) + set INCLUDE=$(SA_INCLUDE)$(INCLUDE) + $(CPP) @<< + /I"$(BootStrapDir)/include" /I"$(BootStrapDir)/include/win32" + /I"$(GENERATED)" $(SA_CFLAGS) + $(SASRCFILE) + /out:sawindbg.obj +<< + set LIB=$(SA_LIB)$(LIB) + $(LINK) /out:$@ /DLL sawindbg.obj dbgeng.lib $(SA_LFLAGS) +!if "$(MT)" != "" + $(MT) /manifest $(@F).manifest /outputresource:$(@F);#2 +!endif + -@rm -f sawindbg.obj + +cleanall : + rm -rf $(GENERATED:\=/)/saclasses + rm -rf $(GENERATED:\=/)/sa-jdi.jar +!endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/sanity.make 2009-08-01 04:17:19.187530815 +0100 @@ -0,0 +1,35 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +!include local.make + +all: checkCL checkLink + +checkCL: + @ if "$(MSC_VER)" NEQ "1310" if "$(MSC_VER)" NEQ "1399" if "$(MSC_VER)" NEQ "1400" \ + echo *** WARNING *** unrecognized cl.exe version $(MSC_VER) ($(RAW_MSC_VER)). Use FORCE_MSC_VER to override automatic detection. + +checkLink: + @ if "$(LINK_VER)" NEQ "710" if "$(LINK_VER)" NEQ "800" \ + echo *** WARNING *** unrecognized link.exe version $(LINK_VER) ($(RAW_LINK_VER)). Use FORCE_LINK_VER to override automatic detection. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/shared.make 2009-08-01 04:17:19.615577923 +0100 @@ -0,0 +1,70 @@ +# +# Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +DEFAULTACTIONS=clean post_update create + +default:: $(SUBDIRS) + +!ifndef DIR +DIR=. +!endif + +!ifndef CPP +CPP=cl.exe +!endif + + +!ifdef SUBDIRS +$(SUBDIRS): FORCE + @if not exist $@ mkdir $@ + @if not exist $@\local.make echo # Empty > $@\local.make + @echo nmake $(ACTION) in $(DIR)\$@ + cd $@ && $(MAKE) /NOLOGO /f $(WorkSpace)\make\windows\makefiles\$@.make $(ACTION) DIR=$(DIR)\$@ BUILD_FLAVOR=$(BUILD_FLAVOR) +!endif + +# Creates the needed directory +create:: +!if "$(DIR)" != "." + @echo mkdir $(DIR) +!endif + +# Epilog to update for generating derived files +post_update:: + +# Removes scrap files +clean:: FORCE + -@rm -f *.OLD *.publish + +# Remove all scrap files and all generated files +pure:: clean + -@rm -f *.OLD *.publish + +$(DEFAULTACTIONS) $(ACTIONS):: +!ifdef SUBDIRS + @$(MAKE) -nologo ACTION=$@ DIR=$(DIR) +!endif + +FORCE: + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/top.make 2009-08-01 04:17:20.041348732 +0100 @@ -0,0 +1,34 @@ +# +# Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +!include local.make + +!ifdef ADLC_ONLY +SUBDIRS=generated +!else +SUBDIRS=generated $(BUILD_FLAVOR) +!endif + +!include $(WorkSpace)/make/windows/makefiles/shared.make + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/makefiles/vm.make 2009-08-01 04:17:20.456223982 +0100 @@ -0,0 +1,284 @@ +# +# Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Resource file containing VERSIONINFO +Res_Files=.\version.res + +!ifdef RELEASE +!ifdef DEVELOP +CPP_FLAGS=$(CPP_FLAGS) /D "DEBUG" +!else +CPP_FLAGS=$(CPP_FLAGS) /D "PRODUCT" +!endif +!else +CPP_FLAGS=$(CPP_FLAGS) /D "ASSERT" +!endif + +!if "$(Variant)" == "core" +# No need to define anything, CORE is defined as !COMPILER1 && !COMPILER2 +!endif + +!if "$(Variant)" == "kernel" +CPP_FLAGS=$(CPP_FLAGS) /D "KERNEL" +!endif + +!if "$(Variant)" == "compiler1" +CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1" +!endif + +!if "$(Variant)" == "compiler2" +CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER2" +!endif + +!if "$(Variant)" == "tiered" +CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1" /D "COMPILER2" +!endif + +# The following variables are defined in the generated local.make file. +CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\"" +CPP_FLAGS=$(CPP_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\"" +CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(BUILDARCH)\"" +CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\"" +CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\"" +CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\"" + +CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS" $(CPP_INCLUDE_DIRS) + +# Must specify this for sharedRuntimeTrig.cpp +CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN" + +# Define that so jni.h is on correct side +CPP_FLAGS=$(CPP_FLAGS) /D "_JNI_IMPLEMENTATION_" + +!if "$(BUILDARCH)" == "ia64" +STACK_SIZE="/STACK:1048576,262144" +!else +STACK_SIZE= +!endif + +!if "$(BUILDARCH)" == "ia64" +# AsyncGetCallTrace is not supported on IA64 yet +AGCT_EXPORT= +!else +!if "$(Variant)" == "kernel" +AGCT_EXPORT= +!else +AGCT_EXPORT=/export:AsyncGetCallTrace +!endif +!endif + +LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \ + /export:JNI_GetDefaultJavaVMInitArgs \ + /export:JNI_CreateJavaVM \ + /export:JVM_FindClassFromBootLoader \ + /export:JNI_GetCreatedJavaVMs \ + /export:jio_snprintf \ + /export:jio_printf \ + /export:jio_fprintf \ + /export:jio_vfprintf \ + /export:jio_vsnprintf \ + $(AGCT_EXPORT) \ + /export:JVM_GetVersionInfo \ + /export:JVM_GetThreadStateNames \ + /export:JVM_GetThreadStateValues \ + /export:JVM_InitAgentProperties + +CPP_INCLUDE_DIRS=\ + /I "..\generated" \ + /I "..\generated\jvmtifiles" \ + /I "$(WorkSpace)\src\share\vm\c1" \ + /I "$(WorkSpace)\src\share\vm\compiler" \ + /I "$(WorkSpace)\src\share\vm\code" \ + /I "$(WorkSpace)\src\share\vm\interpreter" \ + /I "$(WorkSpace)\src\share\vm\ci" \ + /I "$(WorkSpace)\src\share\vm\classfile" \ + /I "$(WorkSpace)\src\share\vm\gc_implementation\parallelScavenge"\ + /I "$(WorkSpace)\src\share\vm\gc_implementation\shared"\ + /I "$(WorkSpace)\src\share\vm\gc_implementation\parNew"\ + /I "$(WorkSpace)\src\share\vm\gc_implementation\concurrentMarkSweep"\ + /I "$(WorkSpace)\src\share\vm\gc_implementation\g1"\ + /I "$(WorkSpace)\src\share\vm\gc_interface"\ + /I "$(WorkSpace)\src\share\vm\asm" \ + /I "$(WorkSpace)\src\share\vm\memory" \ + /I "$(WorkSpace)\src\share\vm\oops" \ + /I "$(WorkSpace)\src\share\vm\prims" \ + /I "$(WorkSpace)\src\share\vm\runtime" \ + /I "$(WorkSpace)\src\share\vm\services" \ + /I "$(WorkSpace)\src\share\vm\utilities" \ + /I "$(WorkSpace)\src\share\vm\libadt" \ + /I "$(WorkSpace)\src\share\vm\opto" \ + /I "$(WorkSpace)\src\os\windows\vm" \ + /I "$(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm" \ + /I "$(WorkSpace)\src\cpu\$(Platform_arch)\vm" + +CPP_USE_PCH=/Fp"vm.pch" /Yu"incls/_precompiled.incl" + +# Where to find the source code for the virtual machine +VM_PATH=../generated/incls +VM_PATH=$(VM_PATH);../generated/jvmtifiles +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1 +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/interpreter +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/ci +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/classfile +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/parallelScavenge +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shared +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/parNew +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/concurrentMarkSweep +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/g1 +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_interface +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/asm +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/memory +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/oops +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/prims +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/runtime +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/services +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/utilities +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/libadt +VM_PATH=$(VM_PATH);$(WorkSpace)/src/os/windows/vm +VM_PATH=$(VM_PATH);$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm +VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm +VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto + +VM_PATH={$(VM_PATH)} + +# Special case files not using precompiled header files. + +c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp + +os_windows.obj: $(WorkSpace)\src\os\windows\vm\os_windows.cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\os\windows\vm\os_windows.cpp + +os_windows_$(Platform_arch).obj: $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp + +osThread_windows.obj: $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp + +conditionVar_windows.obj: $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp + +getThread_windows_$(Platform_arch).obj: $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp + +opcodes.obj: $(WorkSpace)\src\share\vm\opto\opcodes.cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\share\vm\opto\opcodes.cpp + +bytecodeInterpreter.obj: $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp + $(CPP) $(CPP_FLAGS) /c $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp + +bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp + $(CPP) $(CPP_FLAGS) /c ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp + +# Default rules for the Virtual Machine +{$(WorkSpace)\src\share\vm\c1}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\compiler}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\code}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\interpreter}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\ci}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\classfile}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\gc_implementation\parallelScavenge}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\gc_implementation\shared}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\gc_implementation\parNew}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\gc_implementation\concurrentMarkSweep}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\gc_implementation\g1}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\gc_interface}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\asm}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\memory}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\oops}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\prims}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\runtime}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\services}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\utilities}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\libadt}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\share\vm\opto}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\os\windows\vm}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +# This guy should remain a single colon rule because +# otherwise we can't specify the output filename. +{$(WorkSpace)\src\os\windows\vm}.rc.res: + @$(RC) $(RC_FLAGS) /fo"$@" $< + +{$(WorkSpace)\src\cpu\$(Platform_arch)\vm}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{$(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{..\generated\incls}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +{..\generated\jvmtifiles}.cpp.obj:: + $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $< + +default:: + +_build_pch_file.obj: + @echo #include "incls/_precompiled.incl" > ../generated/_build_pch_file.cpp + $(CPP) $(CPP_FLAGS) /Fp"vm.pch" /Yc"incls/_precompiled.incl" /c ../generated/_build_pch_file.cpp --- old/hotspot/build/windows/platform_amd64 2009-08-01 04:17:20.978668794 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,15 +0,0 @@ -// Platform file for win32 NT platform @(#)platform_amd64 1.2 07/09/17 09:41:33 - -os_family = windows - -arch = x86 - -arch_model = x86_64 - -os_arch = windows_x86 - -os_arch_model = windows_x86_64 - -compiler = visCPP - -gnu_dis_arch = amd64 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/platform_amd64 2009-08-01 04:17:20.905962860 +0100 @@ -0,0 +1,15 @@ +// Platform file for win32 NT platform @(#)platform_amd64 1.2 07/09/17 09:41:33 + +os_family = windows + +arch = x86 + +arch_model = x86_64 + +os_arch = windows_x86 + +os_arch_model = windows_x86_64 + +lib_arch = amd64 + +compiler = visCPP --- old/hotspot/build/windows/platform_i486 2009-08-01 04:17:21.776820880 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,16 +0,0 @@ -// Platform file for windows platform @(#)platform_i486 1.7 07/09/17 09:41:33 - -os_family = windows - -arch = x86 - -arch_model = x86_32 - -os_arch = windows_x86 - -os_arch_model = windows_x86_32 - -compiler = visCPP - -gnu_dis_arch = i386 - --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/platform_i486 2009-08-01 04:17:21.691068968 +0100 @@ -0,0 +1,15 @@ +// Platform file for windows platform @(#)platform_i486 1.7 07/09/17 09:41:33 + +os_family = windows + +arch = x86 + +arch_model = x86_32 + +os_arch = windows_x86 + +os_arch_model = windows_x86_32 + +lib_arch = i386 + +compiler = visCPP --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/platform_ia64 2009-08-01 04:17:22.494411931 +0100 @@ -0,0 +1,12 @@ +// Platform file for Itanium Windows platform $Revision: 1.0 $ + +os_family = win32 + +arch = ia64 + +os_arch = win32_ia64 + +compiler = visCPP + +gnu_dis_arch = ia64 + --- old/hotspot/build/windows/projectfiles/common/Makefile 2009-08-01 04:17:23.037418443 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,183 +0,0 @@ -# -# Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, -# CA 95054 USA or visit www.sun.com if you need additional information or -# have any questions. -# -# - -WorkSpace=$(HOTSPOTWORKSPACE) - -!ifdef ALT_BOOTDIR -BootStrapDir=$(ALT_BOOTDIR) -!else -!ifdef BOOTDIR -BootStrapDir=$(BOOTDIR) -!else -!ifdef JAVA_HOME -BootStrapDir=$(JAVA_HOME) -!endif -!endif -!endif - -!include $(HOTSPOTWORKSPACE)/build/windows/makefiles/makedeps.make - -# Pick up rules for building JVMTI (JSR-163) -JvmtiOutDir=$(HOTSPOTBUILDSPACE)\jvmtifiles -!include $(HOTSPOTWORKSPACE)/build/windows/makefiles/jvmti.make - -Platform=$(HOTSPOTWORKSPACE)/build/windows/platform_$(BUILDARCH) - -default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) - -IncludeDBs_base=$(HOTSPOTWORKSPACE)/src/share/vm/includeDB_core \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_jvmti \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_gc \ - $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_serial - -# Parallel gc files -IncludeDBs_gc=$(HOTSPOTWORKSPACE)/src/share/vm/includeDB_gc_parallel \ - $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_shared \ - $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parNew \ - $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge \ - $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep - - -IncludeDBs_kernel =$(IncludeDBs_base) \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler1 - -IncludeDBs_core =$(IncludeDBs_base) $(IncludeDBs_gc) \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_features - -IncludeDBs_compiler1=$(IncludeDBs_core) \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler1 - -IncludeDBs_compiler2=$(IncludeDBs_core) \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler2 - -IncludeDBs_tiered=$(IncludeDBs_core) \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler1 \ - $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler2 - - -!if "$(Variant)" == "compiler1" -IncludeDBs = $(IncludeDBs_compiler1) -!endif - -!if "$(Variant)" == "compiler2" -IncludeDBs = $(IncludeDBs_compiler2) -# Pick up rules for building adlc -!include $(HOTSPOTWORKSPACE)/build/windows/makefiles/adlc.make -!endif - -!if "$(Variant)" == "tiered" -IncludeDBs = $(IncludeDBs_tiered) -# Pick up rules for building adlc -!include $(HOTSPOTWORKSPACE)/build/windows/makefiles/adlc.make -!endif - -!if "$(Variant)" == "core" -IncludeDBs = $(IncludeDBs_core) -!endif - -!if "$(Variant)" == "kernel" -IncludeDBs = $(IncludeDBs_kernel) -!endif - -!include $(HOTSPOTWORKSPACE)/make/hotspot_version - -!if "$(HOTSPOT_RELEASE_VERSION)" != "" -HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)" -!else -HOTSPOT_RELEASE_VERSION="$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)" -!endif -HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal -!if "$(HOTSPOT_BUILD_VERSION)" != "" -HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)" -!endif -!if "$(JRE_RELEASE_VERSION)" != "" -JRE_RELEASE_VERSION="$(JRE_RELEASE_VERSION)" -!else -JRE_RELEASE_VERSION="$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)" -!endif - -# Define HOTSPOT_VM_DISTRO if HOTSPOT_VM_DISTRO is set, -# and if it is not see if we have the src/closed directory -!if "$(HOTSPOT_VM_DISTRO)" != "" -HOTSPOT_VM_DISTRO="$(HOTSPOT_VM_DISTRO)" -!else -!if exists($(HOTSPOTWORKSPACE)\src\closed) -HOTSPOT_VM_DISTRO="Java HotSpot(TM)" -!else -HOTSPOT_VM_DISTRO="OpenJDK" -!endif -!endif - -MakeDepsIDEOptions = $(MakeDepsIDEOptions) \ - -includeDB_kernel $(HOTSPOTBUILDSPACE)\includeDB_kernel \ - -includeDB_core $(HOTSPOTBUILDSPACE)\includeDB_core \ - -includeDB_compiler1 $(HOTSPOTBUILDSPACE)\includeDB_compiler1 \ - -includeDB_compiler2 $(HOTSPOTBUILDSPACE)\includeDB_compiler2 \ - -includeDB_tiered $(HOTSPOTBUILDSPACE)\includeDB_tiered \ - -platform $(Platform) \ - -define HOTSPOT_RELEASE_VERSION=\\\"$(HOTSPOT_RELEASE_VERSION)\\\" \ - -define JRE_RELEASE_VERSION=\\\"$(JRE_RELEASE_VERSION)\\\" \ - -define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\" - -incls: - @mkdir incls - -includeDB.current $(ProjectFile) Dependencies: local.make $(HOTSPOTBUILDSPACE)/classes/MakeDeps.class \ - $(IncludeDBs) incls - @rm -f includeDB $(HOTSPOTBUILDSPACE)\includeDB_kernel \ - $(HOTSPOTBUILDSPACE)\includeDB_core \ - $(HOTSPOTBUILDSPACE)\includeDB_compiler1 \ - $(HOTSPOTBUILDSPACE)\includeDB_compiler2 \ - $(HOTSPOTBUILDSPACE)\includeDB_tiered - @cat $(IncludeDBs_kernel) > $(HOTSPOTBUILDSPACE)\includeDB_kernel - @cat $(IncludeDBs_core) > $(HOTSPOTBUILDSPACE)\includeDB_core - @cat $(IncludeDBs_compiler1) > $(HOTSPOTBUILDSPACE)\includeDB_compiler1 - @cat $(IncludeDBs_compiler2) > $(HOTSPOTBUILDSPACE)\includeDB_compiler2 - @cat $(IncludeDBs_tiered) > $(HOTSPOTBUILDSPACE)\includeDB_tiered - @echo java.cpp jni.h > includeDB - @$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes MakeDeps diffs WinGammaPlatform$(VcVersion) \ - $(Platform) includeDB.current $(Platform) includeDB $(MakeDepsOptions) $(MakeDepsIDEOptions) - @rm -f includeDB.current - @cp includeDB includeDB.current - -lists: $(HOTSPOTBUILDSPACE)/classes/MakeDeps.class FORCE - @if exist incls rmdir /s /q incls - @rm -f includeDB - @cat $(IncludeDBs) > includeDB - @mkdir incls - @$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes MakeDeps WinGammaPlatform$(VcVersion) \ - $(Platform) includeDB $(MakeDepsOptions) $(MakeDepsIDEOptions) - @rm -f includeDB.current - @cp includeDB includeDB.current - -clean: - @rm -rf incls $(HOTSPOTBUILDSPACE)/classes - @rm -f includeDB includeDB.current $(ProjectFile) Dependencies - -$(HOTSPOTBUILDSPACE)/classes/MakeDeps.class: $(MakeDepsSources) - @if exist $(HOTSPOTBUILDSPACE)\classes rmdir /s /q $(HOTSPOTBUILDSPACE)\classes - @mkdir $(HOTSPOTBUILDSPACE)\classes - @$(COMPILE_JAVAC) -classpath $(HOTSPOTWORKSPACE)\src\share\tools\MakeDeps -g -d $(HOTSPOTBUILDSPACE)/classes $(MakeDepsSources) - -FORCE: --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/common/Makefile 2009-08-01 04:17:22.953897509 +0100 @@ -0,0 +1,184 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +WorkSpace=$(HOTSPOTWORKSPACE) + +!ifdef ALT_BOOTDIR +BootStrapDir=$(ALT_BOOTDIR) +!else +!ifdef BOOTDIR +BootStrapDir=$(BOOTDIR) +!else +!ifdef JAVA_HOME +BootStrapDir=$(JAVA_HOME) +!endif +!endif +!endif + +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/makedeps.make + +# Pick up rules for building JVMTI (JSR-163) +JvmtiOutDir=$(HOTSPOTBUILDSPACE)\jvmtifiles +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make + +Platform=$(HOTSPOTWORKSPACE)/make/windows/platform_$(BUILDARCH) + +default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) + +IncludeDBs_base=$(HOTSPOTWORKSPACE)/src/share/vm/includeDB_core \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_jvmti \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_gc \ + $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_serial + +# Parallel gc files +IncludeDBs_gc=$(HOTSPOTWORKSPACE)/src/share/vm/includeDB_gc_parallel \ + $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_shared \ + $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parNew \ + $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge \ + $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep \ + $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_g1 + + +IncludeDBs_kernel =$(IncludeDBs_base) \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler1 + +IncludeDBs_core =$(IncludeDBs_base) $(IncludeDBs_gc) \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_features + +IncludeDBs_compiler1=$(IncludeDBs_core) \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler1 + +IncludeDBs_compiler2=$(IncludeDBs_core) \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler2 + +IncludeDBs_tiered=$(IncludeDBs_core) \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler1 \ + $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_compiler2 + + +!if "$(Variant)" == "compiler1" +IncludeDBs = $(IncludeDBs_compiler1) +!endif + +!if "$(Variant)" == "compiler2" +IncludeDBs = $(IncludeDBs_compiler2) +# Pick up rules for building adlc +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make +!endif + +!if "$(Variant)" == "tiered" +IncludeDBs = $(IncludeDBs_tiered) +# Pick up rules for building adlc +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make +!endif + +!if "$(Variant)" == "core" +IncludeDBs = $(IncludeDBs_core) +!endif + +!if "$(Variant)" == "kernel" +IncludeDBs = $(IncludeDBs_kernel) +!endif + +!include $(HOTSPOTWORKSPACE)/make/hotspot_version + +!if "$(HOTSPOT_RELEASE_VERSION)" != "" +HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)" +!else +HOTSPOT_RELEASE_VERSION="$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)" +!endif +HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal +!if "$(HOTSPOT_BUILD_VERSION)" != "" +HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)" +!endif +!if "$(JRE_RELEASE_VERSION)" != "" +JRE_RELEASE_VERSION="$(JRE_RELEASE_VERSION)" +!else +JRE_RELEASE_VERSION="$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)" +!endif + +# Define HOTSPOT_VM_DISTRO if HOTSPOT_VM_DISTRO is set, +# and if it is not see if we have the src/closed directory +!if "$(HOTSPOT_VM_DISTRO)" != "" +HOTSPOT_VM_DISTRO="$(HOTSPOT_VM_DISTRO)" +!else +!if exists($(HOTSPOTWORKSPACE)\src\closed) +HOTSPOT_VM_DISTRO="Java HotSpot(TM)" +!else +HOTSPOT_VM_DISTRO="OpenJDK" +!endif +!endif + +MakeDepsIDEOptions = $(MakeDepsIDEOptions) \ + -includeDB_kernel $(HOTSPOTBUILDSPACE)\includeDB_kernel \ + -includeDB_core $(HOTSPOTBUILDSPACE)\includeDB_core \ + -includeDB_compiler1 $(HOTSPOTBUILDSPACE)\includeDB_compiler1 \ + -includeDB_compiler2 $(HOTSPOTBUILDSPACE)\includeDB_compiler2 \ + -includeDB_tiered $(HOTSPOTBUILDSPACE)\includeDB_tiered \ + -platform $(Platform) \ + -define HOTSPOT_RELEASE_VERSION=\\\"$(HOTSPOT_RELEASE_VERSION)\\\" \ + -define JRE_RELEASE_VERSION=\\\"$(JRE_RELEASE_VERSION)\\\" \ + -define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\" + +incls: + @mkdir incls + +includeDB.current $(ProjectFile) Dependencies: local.make $(HOTSPOTBUILDSPACE)/classes/MakeDeps.class \ + $(IncludeDBs) incls + @rm -f includeDB $(HOTSPOTBUILDSPACE)\includeDB_kernel \ + $(HOTSPOTBUILDSPACE)\includeDB_core \ + $(HOTSPOTBUILDSPACE)\includeDB_compiler1 \ + $(HOTSPOTBUILDSPACE)\includeDB_compiler2 \ + $(HOTSPOTBUILDSPACE)\includeDB_tiered + @cat $(IncludeDBs_kernel) > $(HOTSPOTBUILDSPACE)\includeDB_kernel + @cat $(IncludeDBs_core) > $(HOTSPOTBUILDSPACE)\includeDB_core + @cat $(IncludeDBs_compiler1) > $(HOTSPOTBUILDSPACE)\includeDB_compiler1 + @cat $(IncludeDBs_compiler2) > $(HOTSPOTBUILDSPACE)\includeDB_compiler2 + @cat $(IncludeDBs_tiered) > $(HOTSPOTBUILDSPACE)\includeDB_tiered + @echo java.cpp jni.h > includeDB + @$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes MakeDeps diffs WinGammaPlatform$(VcVersion) \ + $(Platform) includeDB.current $(Platform) includeDB $(MakeDepsOptions) $(MakeDepsIDEOptions) + @rm -f includeDB.current + @cp includeDB includeDB.current + +lists: $(HOTSPOTBUILDSPACE)/classes/MakeDeps.class FORCE + @if exist incls rmdir /s /q incls + @rm -f includeDB + @cat $(IncludeDBs) > includeDB + @mkdir incls + @$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes MakeDeps WinGammaPlatform$(VcVersion) \ + $(Platform) includeDB $(MakeDepsOptions) $(MakeDepsIDEOptions) + @rm -f includeDB.current + @cp includeDB includeDB.current + +clean: + @rm -rf incls $(HOTSPOTBUILDSPACE)/classes + @rm -f includeDB includeDB.current $(ProjectFile) Dependencies + +$(HOTSPOTBUILDSPACE)/classes/MakeDeps.class: $(MakeDepsSources) + @if exist $(HOTSPOTBUILDSPACE)\classes rmdir /s /q $(HOTSPOTBUILDSPACE)\classes + @mkdir $(HOTSPOTBUILDSPACE)\classes + @$(COMPILE_JAVAC) -classpath $(HOTSPOTWORKSPACE)\src\share\tools\MakeDeps -g -d $(HOTSPOTBUILDSPACE)/classes $(MakeDepsSources) + +FORCE: --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler1/Makefile 2009-08-01 04:17:23.815937494 +0100 @@ -0,0 +1,28 @@ +# +# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +Variant=compiler1 +!include local.make + +!include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler1/vm.def 2009-08-01 04:17:24.224817093 +0100 @@ -0,0 +1,7 @@ +; +; This .DEF file is a placeholder for one which is automatically +; generated during the build process. See +; make\windows\build_vm_def.sh and +; make\windows\makefiles\makedeps.make (esp. the "-prelink" +; options). +; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler1/vm.dsw 2009-08-01 04:17:24.633686927 +0100 @@ -0,0 +1,29 @@ +Microsoft Developer Studio Workspace File, Format Version 6.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "vm"=.\vm.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp 2009-08-01 04:17:25.068520389 +0100 @@ -0,0 +1,142 @@ +# Microsoft Developer Studio Project File - Name="ADLCompiler" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ADLCompiler - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ADLCompiler.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ADLCompiler.mak" CFG="ADLCompiler - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ADLCompiler - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ADLCompiler - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ADLCompiler - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir ".\adlc\Release" +# PROP Intermediate_Dir ".\adlc\Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /W3 /GX /O2 /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FR /FD /c +# SUBTRACT CPP /YX /Yc /Yu +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo /o".\adlc\Release\adlc.bsc" +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 /out:".\bin\adlc.exe" + +!ELSEIF "$(CFG)" == "ADLCompiler - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir ".\adlc\Debug" +# PROP Intermediate_Dir ".\adlc\Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c +# ADD BASE RSC /l 0x409 +# ADD RSC /l 0x409 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /o".\adlc\Debug\adlc_g.bsc" +# SUBTRACT BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib uuid.lib /nologo /subsystem:console /debug /machine:I386 /out:".\bin\adlc_g.exe" + +!ENDIF + +# Begin Target + +# Name "ADLCompiler - Win32 Release" +# Name "ADLCompiler - Win32 Debug" +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\adlparse.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\archDesc.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\arena.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\dfa.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\dict2.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\filebuff.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\forms.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\formsopt.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\formssel.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\main.cpp" +# SUBTRACT CPP /YX /Yc +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\opto\opcodes.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\output_c.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\output_h.cpp" +# End Source File +# End Target +# End Project + \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsw 2009-08-01 04:17:25.518000969 +0100 @@ -0,0 +1,29 @@ +Microsoft Developer Studio Workspace File, Format Version 6.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "ADLCompiler"=".\ADLCompiler.dsp" - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler2/Makefile 2009-08-01 04:17:25.978926686 +0100 @@ -0,0 +1,29 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +Variant=compiler2 +!include local.make +AdditionalTargets=incls/ad_$(Platform_arch_model).cpp incls/dfa_$(Platform_arch_model).cpp + +!include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler2/vm.def 2009-08-01 04:17:26.386778309 +0100 @@ -0,0 +1,7 @@ +; +; This .DEF file is a placeholder for one which is automatically +; generated during the build process. See +; make\windows\build_vm_def.sh and +; make\windows\makefiles\makedeps.make (esp. the "-prelink" +; options). +; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/compiler2/vm.dsw 2009-08-01 04:17:26.788390184 +0100 @@ -0,0 +1,29 @@ +Microsoft Developer Studio Workspace File, Format Version 5.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "vm"=.\vm.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/core/Makefile 2009-08-01 04:17:27.222797673 +0100 @@ -0,0 +1,28 @@ +# +# Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +Variant=core +!include local.make + +!include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/core/vm.def 2009-08-01 04:17:27.654677516 +0100 @@ -0,0 +1,7 @@ +; +; This .DEF file is a placeholder for one which is automatically +; generated during the build process. See +; make\windows\build_vm_def.sh and +; make\windows\makefiles\makedeps.make (esp. the "-prelink" +; options). +; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/core/vm.dsw 2009-08-01 04:17:28.075268112 +0100 @@ -0,0 +1,29 @@ +Microsoft Developer Studio Workspace File, Format Version 6.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "vm"=.\vm.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/kernel/Makefile 2009-08-01 04:17:28.518201884 +0100 @@ -0,0 +1,28 @@ +# +# Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +Variant=compiler1 +!include local.make + +!include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/kernel/vm.def 2009-08-01 04:17:28.931055888 +0100 @@ -0,0 +1,7 @@ +; +; This .DEF file is a placeholder for one which is automatically +; generated during the build process. See +; make\windows\build_vm_def.sh and +; make\windows\makefiles\makedeps.make (esp. the "-prelink" +; options). +; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/kernel/vm.dsw 2009-08-01 04:17:29.344412451 +0100 @@ -0,0 +1,29 @@ +Microsoft Developer Studio Workspace File, Format Version 6.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "vm"=.\vm.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp 2009-08-01 04:17:30.451963517 +0100 @@ -0,0 +1,142 @@ +# Microsoft Developer Studio Project File - Name="ADLCompiler" - Package Owner=<4> +# Microsoft Developer Studio Generated Build File, Format Version 6.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +CFG=ADLCompiler - Win32 Debug +!MESSAGE This is not a valid makefile. To build this project using NMAKE, +!MESSAGE use the Export Makefile command and run +!MESSAGE +!MESSAGE NMAKE /f "ADLCompiler.mak". +!MESSAGE +!MESSAGE You can specify a configuration when running NMAKE +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "ADLCompiler.mak" CFG="ADLCompiler - Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "ADLCompiler - Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "ADLCompiler - Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE + +# Begin Project +# PROP AllowPerConfigDependencies 0 +# PROP Scc_ProjName "" +# PROP Scc_LocalPath "" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "ADLCompiler - Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "Release" +# PROP BASE Intermediate_Dir "Release" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir ".\adlc\Release" +# PROP Intermediate_Dir ".\adlc\Release" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /W3 /GX /O2 /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FR /FD /c +# SUBTRACT CPP /YX /Yc /Yu +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo /o".\adlc\Release\adlc.bsc" +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 /out:".\bin\adlc.exe" + +!ELSEIF "$(CFG)" == "ADLCompiler - Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "Debug" +# PROP BASE Intermediate_Dir "Debug" +# PROP BASE Target_Dir "" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir ".\adlc\Debug" +# PROP Intermediate_Dir ".\adlc\Debug" +# PROP Ignore_Export_Lib 0 +# PROP Target_Dir "" +# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c +# ADD BASE RSC /l 0x409 +# ADD RSC /l 0x409 +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /o".\adlc\Debug\adlc_g.bsc" +# SUBTRACT BSC32 /nologo +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib uuid.lib /nologo /subsystem:console /debug /machine:I386 /out:".\bin\adlc_g.exe" + +!ENDIF + +# Begin Target + +# Name "ADLCompiler - Win32 Release" +# Name "ADLCompiler - Win32 Debug" +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\adlparse.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\archDesc.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\arena.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\dfa.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\dict2.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\filebuff.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\forms.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\formsopt.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\formssel.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\main.cpp" +# SUBTRACT CPP /YX /Yc +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\opto\opcodes.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\output_c.cpp" +# End Source File +# Begin Source File + +SOURCE="$(HotSpotWorkSpace)\src\share\vm\adlc\output_h.cpp" +# End Source File +# End Target +# End Project + \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsw 2009-08-01 04:17:30.887425278 +0100 @@ -0,0 +1,29 @@ +Microsoft Developer Studio Workspace File, Format Version 6.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "ADLCompiler"=".\ADLCompiler.dsp" - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/tiered/Makefile 2009-08-01 04:17:31.288099200 +0100 @@ -0,0 +1,29 @@ +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +Variant=tiered +!include local.make +AdditionalTargets=incls/ad_$(Platform_arch_model).cpp incls/dfa_$(Platform_arch_model).cpp + +!include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/tiered/vm.def 2009-08-01 04:17:31.721978329 +0100 @@ -0,0 +1,7 @@ +; +; This .DEF file is a placeholder for one which is automatically +; generated during the build process. See +; make\windows\build_vm_def.sh and +; make\windows\makefiles\makedeps.make (esp. the "-prelink" +; options). +; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/make/windows/projectfiles/tiered/vm.dsw 2009-08-01 04:17:32.130070530 +0100 @@ -0,0 +1,29 @@ +Microsoft Developer Studio Workspace File, Format Version 5.00 +# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! + +############################################################################### + +Project: "vm"=.\vm.dsp - Package Owner=<4> + +Package=<5> +{{{ +}}} + +Package=<4> +{{{ +}}} + +############################################################################### + +Global: + +Package=<5> +{{{ +}}} + +Package=<3> +{{{ +}}} + +############################################################################### + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/cpu/x86/vm/assembler_x86.cpp 2009-08-01 04:17:32.649851399 +0100 @@ -0,0 +1,7713 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_assembler_x86.cpp.incl" + +// Implementation of AddressLiteral + +AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { + _is_lval = false; + _target = target; + switch (rtype) { + case relocInfo::oop_type: + // Oops are a special case. Normally they would be their own section + // but in cases like icBuffer they are literals in the code stream that + // we don't have a section for. We use none so that we get a literal address + // which is always patchable. + break; + case relocInfo::external_word_type: + _rspec = external_word_Relocation::spec(target); + break; + case relocInfo::internal_word_type: + _rspec = internal_word_Relocation::spec(target); + break; + case relocInfo::opt_virtual_call_type: + _rspec = opt_virtual_call_Relocation::spec(); + break; + case relocInfo::static_call_type: + _rspec = static_call_Relocation::spec(); + break; + case relocInfo::runtime_call_type: + _rspec = runtime_call_Relocation::spec(); + break; + case relocInfo::poll_type: + case relocInfo::poll_return_type: + _rspec = Relocation::spec_simple(rtype); + break; + case relocInfo::none: + break; + default: + ShouldNotReachHere(); + break; + } +} + +// Implementation of Address + +#ifdef _LP64 + +Address Address::make_array(ArrayAddress adr) { + // Not implementable on 64bit machines + // Should have been handled higher up the call chain. + ShouldNotReachHere(); + return Address(); +} + +// exceedingly dangerous constructor +Address::Address(int disp, address loc, relocInfo::relocType rtype) { + _base = noreg; + _index = noreg; + _scale = no_scale; + _disp = disp; + switch (rtype) { + case relocInfo::external_word_type: + _rspec = external_word_Relocation::spec(loc); + break; + case relocInfo::internal_word_type: + _rspec = internal_word_Relocation::spec(loc); + break; + case relocInfo::runtime_call_type: + // HMM + _rspec = runtime_call_Relocation::spec(); + break; + case relocInfo::poll_type: + case relocInfo::poll_return_type: + _rspec = Relocation::spec_simple(rtype); + break; + case relocInfo::none: + break; + default: + ShouldNotReachHere(); + } +} +#else // LP64 + +Address Address::make_array(ArrayAddress adr) { + AddressLiteral base = adr.base(); + Address index = adr.index(); + assert(index._disp == 0, "must not have disp"); // maybe it can? + Address array(index._base, index._index, index._scale, (intptr_t) base.target()); + array._rspec = base._rspec; + return array; +} + +// exceedingly dangerous constructor +Address::Address(address loc, RelocationHolder spec) { + _base = noreg; + _index = noreg; + _scale = no_scale; + _disp = (intptr_t) loc; + _rspec = spec; +} + +#endif // _LP64 + + + +// Convert the raw encoding form into the form expected by the constructor for +// Address. An index of 4 (rsp) corresponds to having no index, so convert +// that to noreg for the Address constructor. +Address Address::make_raw(int base, int index, int scale, int disp) { + bool valid_index = index != rsp->encoding(); + if (valid_index) { + Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); + return madr; + } else { + Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); + return madr; + } +} + +// Implementation of Assembler + +int AbstractAssembler::code_fill_byte() { + return (u_char)'\xF4'; // hlt +} + +// make this go away someday +void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { + if (rtype == relocInfo::none) + emit_long(data); + else emit_data(data, Relocation::spec_simple(rtype), format); +} + +void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { + assert(imm_operand == 0, "default format must be immediate in this file"); + assert(inst_mark() != NULL, "must be inside InstructionMark"); + if (rspec.type() != relocInfo::none) { + #ifdef ASSERT + check_relocation(rspec, format); + #endif + // Do not use AbstractAssembler::relocate, which is not intended for + // embedded words. Instead, relocate to the enclosing instruction. + + // hack. call32 is too wide for mask so use disp32 + if (format == call32_operand) + code_section()->relocate(inst_mark(), rspec, disp32_operand); + else + code_section()->relocate(inst_mark(), rspec, format); + } + emit_long(data); +} + +static int encode(Register r) { + int enc = r->encoding(); + if (enc >= 8) { + enc -= 8; + } + return enc; +} + +static int encode(XMMRegister r) { + int enc = r->encoding(); + if (enc >= 8) { + enc -= 8; + } + return enc; +} + +void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { + assert(dst->has_byte_register(), "must have byte register"); + assert(isByte(op1) && isByte(op2), "wrong opcode"); + assert(isByte(imm8), "not a byte"); + assert((op1 & 0x01) == 0, "should be 8bit operation"); + emit_byte(op1); + emit_byte(op2 | encode(dst)); + emit_byte(imm8); +} + + +void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { + assert(isByte(op1) && isByte(op2), "wrong opcode"); + assert((op1 & 0x01) == 1, "should be 32bit operation"); + assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); + if (is8bit(imm32)) { + emit_byte(op1 | 0x02); // set sign bit + emit_byte(op2 | encode(dst)); + emit_byte(imm32 & 0xFF); + } else { + emit_byte(op1); + emit_byte(op2 | encode(dst)); + emit_long(imm32); + } +} + +// immediate-to-memory forms +void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { + assert((op1 & 0x01) == 1, "should be 32bit operation"); + assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); + if (is8bit(imm32)) { + emit_byte(op1 | 0x02); // set sign bit + emit_operand(rm, adr, 1); + emit_byte(imm32 & 0xFF); + } else { + emit_byte(op1); + emit_operand(rm, adr, 4); + emit_long(imm32); + } +} + +void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) { + LP64_ONLY(ShouldNotReachHere()); + assert(isByte(op1) && isByte(op2), "wrong opcode"); + assert((op1 & 0x01) == 1, "should be 32bit operation"); + assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); + InstructionMark im(this); + emit_byte(op1); + emit_byte(op2 | encode(dst)); + emit_data((intptr_t)obj, relocInfo::oop_type, 0); +} + + +void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { + assert(isByte(op1) && isByte(op2), "wrong opcode"); + emit_byte(op1); + emit_byte(op2 | encode(dst) << 3 | encode(src)); +} + + +void Assembler::emit_operand(Register reg, Register base, Register index, + Address::ScaleFactor scale, int disp, + RelocationHolder const& rspec, + int rip_relative_correction) { + relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); + + // Encode the registers as needed in the fields they are used in + + int regenc = encode(reg) << 3; + int indexenc = index->is_valid() ? encode(index) << 3 : 0; + int baseenc = base->is_valid() ? encode(base) : 0; + + if (base->is_valid()) { + if (index->is_valid()) { + assert(scale != Address::no_scale, "inconsistent address"); + // [base + index*scale + disp] + if (disp == 0 && rtype == relocInfo::none && + base != rbp LP64_ONLY(&& base != r13)) { + // [base + index*scale] + // [00 reg 100][ss index base] + assert(index != rsp, "illegal addressing mode"); + emit_byte(0x04 | regenc); + emit_byte(scale << 6 | indexenc | baseenc); + } else if (is8bit(disp) && rtype == relocInfo::none) { + // [base + index*scale + imm8] + // [01 reg 100][ss index base] imm8 + assert(index != rsp, "illegal addressing mode"); + emit_byte(0x44 | regenc); + emit_byte(scale << 6 | indexenc | baseenc); + emit_byte(disp & 0xFF); + } else { + // [base + index*scale + disp32] + // [10 reg 100][ss index base] disp32 + assert(index != rsp, "illegal addressing mode"); + emit_byte(0x84 | regenc); + emit_byte(scale << 6 | indexenc | baseenc); + emit_data(disp, rspec, disp32_operand); + } + } else if (base == rsp LP64_ONLY(|| base == r12)) { + // [rsp + disp] + if (disp == 0 && rtype == relocInfo::none) { + // [rsp] + // [00 reg 100][00 100 100] + emit_byte(0x04 | regenc); + emit_byte(0x24); + } else if (is8bit(disp) && rtype == relocInfo::none) { + // [rsp + imm8] + // [01 reg 100][00 100 100] disp8 + emit_byte(0x44 | regenc); + emit_byte(0x24); + emit_byte(disp & 0xFF); + } else { + // [rsp + imm32] + // [10 reg 100][00 100 100] disp32 + emit_byte(0x84 | regenc); + emit_byte(0x24); + emit_data(disp, rspec, disp32_operand); + } + } else { + // [base + disp] + assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); + if (disp == 0 && rtype == relocInfo::none && + base != rbp LP64_ONLY(&& base != r13)) { + // [base] + // [00 reg base] + emit_byte(0x00 | regenc | baseenc); + } else if (is8bit(disp) && rtype == relocInfo::none) { + // [base + disp8] + // [01 reg base] disp8 + emit_byte(0x40 | regenc | baseenc); + emit_byte(disp & 0xFF); + } else { + // [base + disp32] + // [10 reg base] disp32 + emit_byte(0x80 | regenc | baseenc); + emit_data(disp, rspec, disp32_operand); + } + } + } else { + if (index->is_valid()) { + assert(scale != Address::no_scale, "inconsistent address"); + // [index*scale + disp] + // [00 reg 100][ss index 101] disp32 + assert(index != rsp, "illegal addressing mode"); + emit_byte(0x04 | regenc); + emit_byte(scale << 6 | indexenc | 0x05); + emit_data(disp, rspec, disp32_operand); + } else if (rtype != relocInfo::none ) { + // [disp] (64bit) RIP-RELATIVE (32bit) abs + // [00 000 101] disp32 + + emit_byte(0x05 | regenc); + // Note that the RIP-rel. correction applies to the generated + // disp field, but _not_ to the target address in the rspec. + + // disp was created by converting the target address minus the pc + // at the start of the instruction. That needs more correction here. + // intptr_t disp = target - next_ip; + assert(inst_mark() != NULL, "must be inside InstructionMark"); + address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; + int64_t adjusted = disp; + // Do rip-rel adjustment for 64bit + LP64_ONLY(adjusted -= (next_ip - inst_mark())); + assert(is_simm32(adjusted), + "must be 32bit offset (RIP relative address)"); + emit_data((int32_t) adjusted, rspec, disp32_operand); + + } else { + // 32bit never did this, did everything as the rip-rel/disp code above + // [disp] ABSOLUTE + // [00 reg 100][00 100 101] disp32 + emit_byte(0x04 | regenc); + emit_byte(0x25); + emit_data(disp, rspec, disp32_operand); + } + } +} + +void Assembler::emit_operand(XMMRegister reg, Register base, Register index, + Address::ScaleFactor scale, int disp, + RelocationHolder const& rspec) { + emit_operand((Register)reg, base, index, scale, disp, rspec); +} + +// Secret local extension to Assembler::WhichOperand: +#define end_pc_operand (_WhichOperand_limit) + +address Assembler::locate_operand(address inst, WhichOperand which) { + // Decode the given instruction, and return the address of + // an embedded 32-bit operand word. + + // If "which" is disp32_operand, selects the displacement portion + // of an effective address specifier. + // If "which" is imm64_operand, selects the trailing immediate constant. + // If "which" is call32_operand, selects the displacement of a call or jump. + // Caller is responsible for ensuring that there is such an operand, + // and that it is 32/64 bits wide. + + // If "which" is end_pc_operand, find the end of the instruction. + + address ip = inst; + bool is_64bit = false; + + debug_only(bool has_disp32 = false); + int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn + + again_after_prefix: + switch (0xFF & *ip++) { + + // These convenience macros generate groups of "case" labels for the switch. +#define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 +#define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ + case (x)+4: case (x)+5: case (x)+6: case (x)+7 +#define REP16(x) REP8((x)+0): \ + case REP8((x)+8) + + case CS_segment: + case SS_segment: + case DS_segment: + case ES_segment: + case FS_segment: + case GS_segment: + // Seems dubious + LP64_ONLY(assert(false, "shouldn't have that prefix")); + assert(ip == inst+1, "only one prefix allowed"); + goto again_after_prefix; + + case 0x67: + case REX: + case REX_B: + case REX_X: + case REX_XB: + case REX_R: + case REX_RB: + case REX_RX: + case REX_RXB: + NOT_LP64(assert(false, "64bit prefixes")); + goto again_after_prefix; + + case REX_W: + case REX_WB: + case REX_WX: + case REX_WXB: + case REX_WR: + case REX_WRB: + case REX_WRX: + case REX_WRXB: + NOT_LP64(assert(false, "64bit prefixes")); + is_64bit = true; + goto again_after_prefix; + + case 0xFF: // pushq a; decl a; incl a; call a; jmp a + case 0x88: // movb a, r + case 0x89: // movl a, r + case 0x8A: // movb r, a + case 0x8B: // movl r, a + case 0x8F: // popl a + debug_only(has_disp32 = true); + break; + + case 0x68: // pushq #32 + if (which == end_pc_operand) { + return ip + 4; + } + assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); + return ip; // not produced by emit_operand + + case 0x66: // movw ... (size prefix) + again_after_size_prefix2: + switch (0xFF & *ip++) { + case REX: + case REX_B: + case REX_X: + case REX_XB: + case REX_R: + case REX_RB: + case REX_RX: + case REX_RXB: + case REX_W: + case REX_WB: + case REX_WX: + case REX_WXB: + case REX_WR: + case REX_WRB: + case REX_WRX: + case REX_WRXB: + NOT_LP64(assert(false, "64bit prefix found")); + goto again_after_size_prefix2; + case 0x8B: // movw r, a + case 0x89: // movw a, r + debug_only(has_disp32 = true); + break; + case 0xC7: // movw a, #16 + debug_only(has_disp32 = true); + tail_size = 2; // the imm16 + break; + case 0x0F: // several SSE/SSE2 variants + ip--; // reparse the 0x0F + goto again_after_prefix; + default: + ShouldNotReachHere(); + } + break; + + case REP8(0xB8): // movl/q r, #32/#64(oop?) + if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); + // these asserts are somewhat nonsensical +#ifndef _LP64 + assert(which == imm_operand || which == disp32_operand, ""); +#else + assert((which == call32_operand || which == imm_operand) && is_64bit || + which == narrow_oop_operand && !is_64bit, ""); +#endif // _LP64 + return ip; + + case 0x69: // imul r, a, #32 + case 0xC7: // movl a, #32(oop?) + tail_size = 4; + debug_only(has_disp32 = true); // has both kinds of operands! + break; + + case 0x0F: // movx..., etc. + switch (0xFF & *ip++) { + case 0x12: // movlps + case 0x28: // movaps + case 0x2E: // ucomiss + case 0x2F: // comiss + case 0x54: // andps + case 0x55: // andnps + case 0x56: // orps + case 0x57: // xorps + case 0x6E: // movd + case 0x7E: // movd + case 0xAE: // ldmxcsr a + // 64bit side says it these have both operands but that doesn't + // appear to be true + debug_only(has_disp32 = true); + break; + + case 0xAD: // shrd r, a, %cl + case 0xAF: // imul r, a + case 0xBE: // movsbl r, a (movsxb) + case 0xBF: // movswl r, a (movsxw) + case 0xB6: // movzbl r, a (movzxb) + case 0xB7: // movzwl r, a (movzxw) + case REP16(0x40): // cmovl cc, r, a + case 0xB0: // cmpxchgb + case 0xB1: // cmpxchg + case 0xC1: // xaddl + case 0xC7: // cmpxchg8 + case REP16(0x90): // setcc a + debug_only(has_disp32 = true); + // fall out of the switch to decode the address + break; + + case 0xAC: // shrd r, a, #8 + debug_only(has_disp32 = true); + tail_size = 1; // the imm8 + break; + + case REP16(0x80): // jcc rdisp32 + if (which == end_pc_operand) return ip + 4; + assert(which == call32_operand, "jcc has no disp32 or imm"); + return ip; + default: + ShouldNotReachHere(); + } + break; + + case 0x81: // addl a, #32; addl r, #32 + // also: orl, adcl, sbbl, andl, subl, xorl, cmpl + // on 32bit in the case of cmpl, the imm might be an oop + tail_size = 4; + debug_only(has_disp32 = true); // has both kinds of operands! + break; + + case 0x83: // addl a, #8; addl r, #8 + // also: orl, adcl, sbbl, andl, subl, xorl, cmpl + debug_only(has_disp32 = true); // has both kinds of operands! + tail_size = 1; + break; + + case 0x9B: + switch (0xFF & *ip++) { + case 0xD9: // fnstcw a + debug_only(has_disp32 = true); + break; + default: + ShouldNotReachHere(); + } + break; + + case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a + case REP4(0x10): // adc... + case REP4(0x20): // and... + case REP4(0x30): // xor... + case REP4(0x08): // or... + case REP4(0x18): // sbb... + case REP4(0x28): // sub... + case 0xF7: // mull a + case 0x8D: // lea r, a + case 0x87: // xchg r, a + case REP4(0x38): // cmp... + case 0x85: // test r, a + debug_only(has_disp32 = true); // has both kinds of operands! + break; + + case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 + case 0xC6: // movb a, #8 + case 0x80: // cmpb a, #8 + case 0x6B: // imul r, a, #8 + debug_only(has_disp32 = true); // has both kinds of operands! + tail_size = 1; // the imm8 + break; + + case 0xE8: // call rdisp32 + case 0xE9: // jmp rdisp32 + if (which == end_pc_operand) return ip + 4; + assert(which == call32_operand, "call has no disp32 or imm"); + return ip; + + case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 + case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl + case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a + case 0xDD: // fld_d a; fst_d a; fstp_d a + case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a + case 0xDF: // fild_d a; fistp_d a + case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a + case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a + case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a + debug_only(has_disp32 = true); + break; + + case 0xF0: // Lock + assert(os::is_MP(), "only on MP"); + goto again_after_prefix; + + case 0xF3: // For SSE + case 0xF2: // For SSE2 + switch (0xFF & *ip++) { + case REX: + case REX_B: + case REX_X: + case REX_XB: + case REX_R: + case REX_RB: + case REX_RX: + case REX_RXB: + case REX_W: + case REX_WB: + case REX_WX: + case REX_WXB: + case REX_WR: + case REX_WRB: + case REX_WRX: + case REX_WRXB: + NOT_LP64(assert(false, "found 64bit prefix")); + ip++; + default: + ip++; + } + debug_only(has_disp32 = true); // has both kinds of operands! + break; + + default: + ShouldNotReachHere(); + +#undef REP8 +#undef REP16 + } + + assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); +#ifdef _LP64 + assert(which != imm_operand, "instruction is not a movq reg, imm64"); +#else + // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); + assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); +#endif // LP64 + assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); + + // parse the output of emit_operand + int op2 = 0xFF & *ip++; + int base = op2 & 0x07; + int op3 = -1; + const int b100 = 4; + const int b101 = 5; + if (base == b100 && (op2 >> 6) != 3) { + op3 = 0xFF & *ip++; + base = op3 & 0x07; // refetch the base + } + // now ip points at the disp (if any) + + switch (op2 >> 6) { + case 0: + // [00 reg 100][ss index base] + // [00 reg 100][00 100 esp] + // [00 reg base] + // [00 reg 100][ss index 101][disp32] + // [00 reg 101] [disp32] + + if (base == b101) { + if (which == disp32_operand) + return ip; // caller wants the disp32 + ip += 4; // skip the disp32 + } + break; + + case 1: + // [01 reg 100][ss index base][disp8] + // [01 reg 100][00 100 esp][disp8] + // [01 reg base] [disp8] + ip += 1; // skip the disp8 + break; + + case 2: + // [10 reg 100][ss index base][disp32] + // [10 reg 100][00 100 esp][disp32] + // [10 reg base] [disp32] + if (which == disp32_operand) + return ip; // caller wants the disp32 + ip += 4; // skip the disp32 + break; + + case 3: + // [11 reg base] (not a memory addressing mode) + break; + } + + if (which == end_pc_operand) { + return ip + tail_size; + } + +#ifdef _LP64 + assert(false, "fix locate_operand"); +#else + assert(which == imm_operand, "instruction has only an imm field"); +#endif // LP64 + return ip; +} + +address Assembler::locate_next_instruction(address inst) { + // Secretly share code with locate_operand: + return locate_operand(inst, end_pc_operand); +} + + +#ifdef ASSERT +void Assembler::check_relocation(RelocationHolder const& rspec, int format) { + address inst = inst_mark(); + assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); + address opnd; + + Relocation* r = rspec.reloc(); + if (r->type() == relocInfo::none) { + return; + } else if (r->is_call() || format == call32_operand) { + // assert(format == imm32_operand, "cannot specify a nonzero format"); + opnd = locate_operand(inst, call32_operand); + } else if (r->is_data()) { + assert(format == imm_operand || format == disp32_operand + LP64_ONLY(|| format == narrow_oop_operand), "format ok"); + opnd = locate_operand(inst, (WhichOperand)format); + } else { + assert(format == imm_operand, "cannot specify a format"); + return; + } + assert(opnd == pc(), "must put operand where relocs can find it"); +} +#endif // ASSERT + +void Assembler::emit_operand32(Register reg, Address adr) { + assert(reg->encoding() < 8, "no extended registers"); + assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); + emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, + adr._rspec); +} + +void Assembler::emit_operand(Register reg, Address adr, + int rip_relative_correction) { + emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, + adr._rspec, + rip_relative_correction); +} + +void Assembler::emit_operand(XMMRegister reg, Address adr) { + emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, + adr._rspec); +} + +// MMX operations +void Assembler::emit_operand(MMXRegister reg, Address adr) { + assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); + emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); +} + +// work around gcc (3.2.1-7a) bug +void Assembler::emit_operand(Address adr, MMXRegister reg) { + assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); + emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); +} + + +void Assembler::emit_farith(int b1, int b2, int i) { + assert(isByte(b1) && isByte(b2), "wrong opcode"); + assert(0 <= i && i < 8, "illegal stack offset"); + emit_byte(b1); + emit_byte(b2 + i); +} + + +// Now the Assembler instruction (identical for 32/64 bits) + +void Assembler::adcl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xD0, dst, imm32); +} + +void Assembler::adcl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x13); + emit_operand(dst, src); +} + +void Assembler::adcl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x13, 0xC0, dst, src); +} + +void Assembler::addl(Address dst, int32_t imm32) { + InstructionMark im(this); + prefix(dst); + emit_arith_operand(0x81, rax, dst, imm32); +} + +void Assembler::addl(Address dst, Register src) { + InstructionMark im(this); + prefix(dst, src); + emit_byte(0x01); + emit_operand(src, dst); +} + +void Assembler::addl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xC0, dst, imm32); +} + +void Assembler::addl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x03); + emit_operand(dst, src); +} + +void Assembler::addl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x03, 0xC0, dst, src); +} + +void Assembler::addr_nop_4() { + // 4 bytes: NOP DWORD PTR [EAX+0] + emit_byte(0x0F); + emit_byte(0x1F); + emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); + emit_byte(0); // 8-bits offset (1 byte) +} + +void Assembler::addr_nop_5() { + // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset + emit_byte(0x0F); + emit_byte(0x1F); + emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); + emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); + emit_byte(0); // 8-bits offset (1 byte) +} + +void Assembler::addr_nop_7() { + // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset + emit_byte(0x0F); + emit_byte(0x1F); + emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); + emit_long(0); // 32-bits offset (4 bytes) +} + +void Assembler::addr_nop_8() { + // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset + emit_byte(0x0F); + emit_byte(0x1F); + emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4); + emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); + emit_long(0); // 32-bits offset (4 bytes) +} + +void Assembler::addsd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x58); + emit_byte(0xC0 | encode); +} + +void Assembler::addsd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF2); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x58); + emit_operand(dst, src); +} + +void Assembler::addss(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x58); + emit_byte(0xC0 | encode); +} + +void Assembler::addss(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x58); + emit_operand(dst, src); +} + +void Assembler::andl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xE0, dst, imm32); +} + +void Assembler::andl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x23); + emit_operand(dst, src); +} + +void Assembler::andl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x23, 0xC0, dst, src); +} + +void Assembler::andpd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x54); + emit_operand(dst, src); +} + +void Assembler::bswapl(Register reg) { // bswap + int encode = prefix_and_encode(reg->encoding()); + emit_byte(0x0F); + emit_byte(0xC8 | encode); +} + +void Assembler::call(Label& L, relocInfo::relocType rtype) { + // suspect disp32 is always good + int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); + + if (L.is_bound()) { + const int long_size = 5; + int offs = (int)( target(L) - pc() ); + assert(offs <= 0, "assembler error"); + InstructionMark im(this); + // 1110 1000 #32-bit disp + emit_byte(0xE8); + emit_data(offs - long_size, rtype, operand); + } else { + InstructionMark im(this); + // 1110 1000 #32-bit disp + L.add_patch_at(code(), locator()); + + emit_byte(0xE8); + emit_data(int(0), rtype, operand); + } +} + +void Assembler::call(Register dst) { + // This was originally using a 32bit register encoding + // and surely we want 64bit! + // this is a 32bit encoding but in 64bit mode the default + // operand size is 64bit so there is no need for the + // wide prefix. So prefix only happens if we use the + // new registers. Much like push/pop. + int x = offset(); + // this may be true but dbx disassembles it as if it + // were 32bits... + // int encode = prefix_and_encode(dst->encoding()); + // if (offset() != x) assert(dst->encoding() >= 8, "what?"); + int encode = prefixq_and_encode(dst->encoding()); + + emit_byte(0xFF); + emit_byte(0xD0 | encode); +} + + +void Assembler::call(Address adr) { + InstructionMark im(this); + prefix(adr); + emit_byte(0xFF); + emit_operand(rdx, adr); +} + +void Assembler::call_literal(address entry, RelocationHolder const& rspec) { + assert(entry != NULL, "call most probably wrong"); + InstructionMark im(this); + emit_byte(0xE8); + intptr_t disp = entry - (_code_pos + sizeof(int32_t)); + assert(is_simm32(disp), "must be 32bit offset (call2)"); + // Technically, should use call32_operand, but this format is + // implied by the fact that we're emitting a call instruction. + + int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); + emit_data((int) disp, rspec, operand); +} + +void Assembler::cdql() { + emit_byte(0x99); +} + +void Assembler::cmovl(Condition cc, Register dst, Register src) { + NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x40 | cc); + emit_byte(0xC0 | encode); +} + + +void Assembler::cmovl(Condition cc, Register dst, Address src) { + NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x40 | cc); + emit_operand(dst, src); +} + +void Assembler::cmpb(Address dst, int imm8) { + InstructionMark im(this); + prefix(dst); + emit_byte(0x80); + emit_operand(rdi, dst, 1); + emit_byte(imm8); +} + +void Assembler::cmpl(Address dst, int32_t imm32) { + InstructionMark im(this); + prefix(dst); + emit_byte(0x81); + emit_operand(rdi, dst, 4); + emit_long(imm32); +} + +void Assembler::cmpl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xF8, dst, imm32); +} + +void Assembler::cmpl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x3B, 0xC0, dst, src); +} + + +void Assembler::cmpl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x3B); + emit_operand(dst, src); +} + +void Assembler::cmpw(Address dst, int imm16) { + InstructionMark im(this); + assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); + emit_byte(0x66); + emit_byte(0x81); + emit_operand(rdi, dst, 2); + emit_word(imm16); +} + +// The 32-bit cmpxchg compares the value at adr with the contents of rax, +// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. +// The ZF is set if the compared values were equal, and cleared otherwise. +void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg + if (Atomics & 2) { + // caveat: no instructionmark, so this isn't relocatable. + // Emit a synthetic, non-atomic, CAS equivalent. + // Beware. The synthetic form sets all ICCs, not just ZF. + // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r) + cmpl(rax, adr); + movl(rax, adr); + if (reg != rax) { + Label L ; + jcc(Assembler::notEqual, L); + movl(adr, reg); + bind(L); + } + } else { + InstructionMark im(this); + prefix(adr, reg); + emit_byte(0x0F); + emit_byte(0xB1); + emit_operand(reg, adr); + } +} + +void Assembler::comisd(XMMRegister dst, Address src) { + // NOTE: dbx seems to decode this as comiss even though the + // 0x66 is there. Strangly ucomisd comes out correct + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + comiss(dst, src); +} + +void Assembler::comiss(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x2F); + emit_operand(dst, src); +} + +void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0xE6); + emit_byte(0xC0 | encode); +} + +void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x5B); + emit_byte(0xC0 | encode); +} + +void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x5A); + emit_byte(0xC0 | encode); +} + +void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2A); + emit_byte(0xC0 | encode); +} + +void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2A); + emit_byte(0xC0 | encode); +} + +void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x5A); + emit_byte(0xC0 | encode); +} + +void Assembler::cvttsd2sil(Register dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2C); + emit_byte(0xC0 | encode); +} + +void Assembler::cvttss2sil(Register dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2C); + emit_byte(0xC0 | encode); +} + +void Assembler::decl(Address dst) { + // Don't use it directly. Use MacroAssembler::decrement() instead. + InstructionMark im(this); + prefix(dst); + emit_byte(0xFF); + emit_operand(rcx, dst); +} + +void Assembler::divsd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF2); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x5E); + emit_operand(dst, src); +} + +void Assembler::divsd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x5E); + emit_byte(0xC0 | encode); +} + +void Assembler::divss(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x5E); + emit_operand(dst, src); +} + +void Assembler::divss(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x5E); + emit_byte(0xC0 | encode); +} + +void Assembler::emms() { + NOT_LP64(assert(VM_Version::supports_mmx(), "")); + emit_byte(0x0F); + emit_byte(0x77); +} + +void Assembler::hlt() { + emit_byte(0xF4); +} + +void Assembler::idivl(Register src) { + int encode = prefix_and_encode(src->encoding()); + emit_byte(0xF7); + emit_byte(0xF8 | encode); +} + +void Assembler::imull(Register dst, Register src) { + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0xAF); + emit_byte(0xC0 | encode); +} + + +void Assembler::imull(Register dst, Register src, int value) { + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + if (is8bit(value)) { + emit_byte(0x6B); + emit_byte(0xC0 | encode); + emit_byte(value); + } else { + emit_byte(0x69); + emit_byte(0xC0 | encode); + emit_long(value); + } +} + +void Assembler::incl(Address dst) { + // Don't use it directly. Use MacroAssembler::increment() instead. + InstructionMark im(this); + prefix(dst); + emit_byte(0xFF); + emit_operand(rax, dst); +} + +void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) { + InstructionMark im(this); + relocate(rtype); + assert((0 <= cc) && (cc < 16), "illegal cc"); + if (L.is_bound()) { + address dst = target(L); + assert(dst != NULL, "jcc most probably wrong"); + + const int short_size = 2; + const int long_size = 6; + intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos; + if (rtype == relocInfo::none && is8bit(offs - short_size)) { + // 0111 tttn #8-bit disp + emit_byte(0x70 | cc); + emit_byte((offs - short_size) & 0xFF); + } else { + // 0000 1111 1000 tttn #32-bit disp + assert(is_simm32(offs - long_size), + "must be 32bit offset (call4)"); + emit_byte(0x0F); + emit_byte(0x80 | cc); + emit_long(offs - long_size); + } + } else { + // Note: could eliminate cond. jumps to this jump if condition + // is the same however, seems to be rather unlikely case. + // Note: use jccb() if label to be bound is very close to get + // an 8-bit displacement + L.add_patch_at(code(), locator()); + emit_byte(0x0F); + emit_byte(0x80 | cc); + emit_long(0); + } +} + +void Assembler::jccb(Condition cc, Label& L) { + if (L.is_bound()) { + const int short_size = 2; + address entry = target(L); + assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)), + "Dispacement too large for a short jmp"); + intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos; + // 0111 tttn #8-bit disp + emit_byte(0x70 | cc); + emit_byte((offs - short_size) & 0xFF); + } else { + InstructionMark im(this); + L.add_patch_at(code(), locator()); + emit_byte(0x70 | cc); + emit_byte(0); + } +} + +void Assembler::jmp(Address adr) { + InstructionMark im(this); + prefix(adr); + emit_byte(0xFF); + emit_operand(rsp, adr); +} + +void Assembler::jmp(Label& L, relocInfo::relocType rtype) { + if (L.is_bound()) { + address entry = target(L); + assert(entry != NULL, "jmp most probably wrong"); + InstructionMark im(this); + const int short_size = 2; + const int long_size = 5; + intptr_t offs = entry - _code_pos; + if (rtype == relocInfo::none && is8bit(offs - short_size)) { + emit_byte(0xEB); + emit_byte((offs - short_size) & 0xFF); + } else { + emit_byte(0xE9); + emit_long(offs - long_size); + } + } else { + // By default, forward jumps are always 32-bit displacements, since + // we can't yet know where the label will be bound. If you're sure that + // the forward jump will not run beyond 256 bytes, use jmpb to + // force an 8-bit displacement. + InstructionMark im(this); + relocate(rtype); + L.add_patch_at(code(), locator()); + emit_byte(0xE9); + emit_long(0); + } +} + +void Assembler::jmp(Register entry) { + int encode = prefix_and_encode(entry->encoding()); + emit_byte(0xFF); + emit_byte(0xE0 | encode); +} + +void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { + InstructionMark im(this); + emit_byte(0xE9); + assert(dest != NULL, "must have a target"); + intptr_t disp = dest - (_code_pos + sizeof(int32_t)); + assert(is_simm32(disp), "must be 32bit offset (jmp)"); + emit_data(disp, rspec.reloc(), call32_operand); +} + +void Assembler::jmpb(Label& L) { + if (L.is_bound()) { + const int short_size = 2; + address entry = target(L); + assert(is8bit((entry - _code_pos) + short_size), + "Dispacement too large for a short jmp"); + assert(entry != NULL, "jmp most probably wrong"); + intptr_t offs = entry - _code_pos; + emit_byte(0xEB); + emit_byte((offs - short_size) & 0xFF); + } else { + InstructionMark im(this); + L.add_patch_at(code(), locator()); + emit_byte(0xEB); + emit_byte(0); + } +} + +void Assembler::ldmxcsr( Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + prefix(src); + emit_byte(0x0F); + emit_byte(0xAE); + emit_operand(as_Register(2), src); +} + +void Assembler::leal(Register dst, Address src) { + InstructionMark im(this); +#ifdef _LP64 + emit_byte(0x67); // addr32 + prefix(src, dst); +#endif // LP64 + emit_byte(0x8D); + emit_operand(dst, src); +} + +void Assembler::lock() { + if (Atomics & 1) { + // Emit either nothing, a NOP, or a NOP: prefix + emit_byte(0x90) ; + } else { + emit_byte(0xF0); + } +} + +// Serializes memory. +void Assembler::mfence() { + // Memory barriers are only needed on multiprocessors + if (os::is_MP()) { + if( LP64_ONLY(true ||) VM_Version::supports_sse2() ) { + emit_byte( 0x0F ); // MFENCE; faster blows no regs + emit_byte( 0xAE ); + emit_byte( 0xF0 ); + } else { + // All usable chips support "locked" instructions which suffice + // as barriers, and are much faster than the alternative of + // using cpuid instruction. We use here a locked add [esp],0. + // This is conveniently otherwise a no-op except for blowing + // flags (which we save and restore.) + pushf(); // Save eflags register + lock(); + addl(Address(rsp, 0), 0);// Assert the lock# signal here + popf(); // Restore eflags register + } + } +} + +void Assembler::mov(Register dst, Register src) { + LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); +} + +void Assembler::movapd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + int dstenc = dst->encoding(); + int srcenc = src->encoding(); + emit_byte(0x66); + if (dstenc < 8) { + if (srcenc >= 8) { + prefix(REX_B); + srcenc -= 8; + } + } else { + if (srcenc < 8) { + prefix(REX_R); + } else { + prefix(REX_RB); + srcenc -= 8; + } + dstenc -= 8; + } + emit_byte(0x0F); + emit_byte(0x28); + emit_byte(0xC0 | dstenc << 3 | srcenc); +} + +void Assembler::movaps(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + int dstenc = dst->encoding(); + int srcenc = src->encoding(); + if (dstenc < 8) { + if (srcenc >= 8) { + prefix(REX_B); + srcenc -= 8; + } + } else { + if (srcenc < 8) { + prefix(REX_R); + } else { + prefix(REX_RB); + srcenc -= 8; + } + dstenc -= 8; + } + emit_byte(0x0F); + emit_byte(0x28); + emit_byte(0xC0 | dstenc << 3 | srcenc); +} + +void Assembler::movb(Register dst, Address src) { + NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); + InstructionMark im(this); + prefix(src, dst, true); + emit_byte(0x8A); + emit_operand(dst, src); +} + + +void Assembler::movb(Address dst, int imm8) { + InstructionMark im(this); + prefix(dst); + emit_byte(0xC6); + emit_operand(rax, dst, 1); + emit_byte(imm8); +} + + +void Assembler::movb(Address dst, Register src) { + assert(src->has_byte_register(), "must have byte register"); + InstructionMark im(this); + prefix(dst, src, true); + emit_byte(0x88); + emit_operand(src, dst); +} + +void Assembler::movdl(XMMRegister dst, Register src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x6E); + emit_byte(0xC0 | encode); +} + +void Assembler::movdl(Register dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + // swap src/dst to get correct prefix + int encode = prefix_and_encode(src->encoding(), dst->encoding()); + emit_byte(0x0F); + emit_byte(0x7E); + emit_byte(0xC0 | encode); +} + +void Assembler::movdqa(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x6F); + emit_operand(dst, src); +} + +void Assembler::movdqa(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x6F); + emit_byte(0xC0 | encode); +} + +void Assembler::movdqa(Address dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(dst, src); + emit_byte(0x0F); + emit_byte(0x7F); + emit_operand(src, dst); +} + +void Assembler::movdqu(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x6F); + emit_operand(dst, src); +} + +void Assembler::movdqu(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF3); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x6F); + emit_byte(0xC0 | encode); +} + +void Assembler::movdqu(Address dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(dst, src); + emit_byte(0x0F); + emit_byte(0x7F); + emit_operand(src, dst); +} + +// Uses zero extension on 64bit + +void Assembler::movl(Register dst, int32_t imm32) { + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xB8 | encode); + emit_long(imm32); +} + +void Assembler::movl(Register dst, Register src) { + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x8B); + emit_byte(0xC0 | encode); +} + +void Assembler::movl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x8B); + emit_operand(dst, src); +} + +void Assembler::movl(Address dst, int32_t imm32) { + InstructionMark im(this); + prefix(dst); + emit_byte(0xC7); + emit_operand(rax, dst, 4); + emit_long(imm32); +} + +void Assembler::movl(Address dst, Register src) { + InstructionMark im(this); + prefix(dst, src); + emit_byte(0x89); + emit_operand(src, dst); +} + +// New cpus require to use movsd and movss to avoid partial register stall +// when loading from memory. But for old Opteron use movlpd instead of movsd. +// The selection is done in MacroAssembler::movdbl() and movflt(). +void Assembler::movlpd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x12); + emit_operand(dst, src); +} + +void Assembler::movq( MMXRegister dst, Address src ) { + assert( VM_Version::supports_mmx(), "" ); + emit_byte(0x0F); + emit_byte(0x6F); + emit_operand(dst, src); +} + +void Assembler::movq( Address dst, MMXRegister src ) { + assert( VM_Version::supports_mmx(), "" ); + emit_byte(0x0F); + emit_byte(0x7F); + // workaround gcc (3.2.1-7a) bug + // In that version of gcc with only an emit_operand(MMX, Address) + // gcc will tail jump and try and reverse the parameters completely + // obliterating dst in the process. By having a version available + // that doesn't need to swap the args at the tail jump the bug is + // avoided. + emit_operand(dst, src); +} + +void Assembler::movq(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x7E); + emit_operand(dst, src); +} + +void Assembler::movq(Address dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(dst, src); + emit_byte(0x0F); + emit_byte(0xD6); + emit_operand(src, dst); +} + +void Assembler::movsbl(Register dst, Address src) { // movsxb + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0xBE); + emit_operand(dst, src); +} + +void Assembler::movsbl(Register dst, Register src) { // movsxb + NOT_LP64(assert(src->has_byte_register(), "must have byte register")); + int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); + emit_byte(0x0F); + emit_byte(0xBE); + emit_byte(0xC0 | encode); +} + +void Assembler::movsd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x10); + emit_byte(0xC0 | encode); +} + +void Assembler::movsd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF2); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x10); + emit_operand(dst, src); +} + +void Assembler::movsd(Address dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF2); + prefix(dst, src); + emit_byte(0x0F); + emit_byte(0x11); + emit_operand(src, dst); +} + +void Assembler::movss(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x10); + emit_byte(0xC0 | encode); +} + +void Assembler::movss(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x10); + emit_operand(dst, src); +} + +void Assembler::movss(Address dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(dst, src); + emit_byte(0x0F); + emit_byte(0x11); + emit_operand(src, dst); +} + +void Assembler::movswl(Register dst, Address src) { // movsxw + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0xBF); + emit_operand(dst, src); +} + +void Assembler::movswl(Register dst, Register src) { // movsxw + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0xBF); + emit_byte(0xC0 | encode); +} + +void Assembler::movw(Address dst, int imm16) { + InstructionMark im(this); + + emit_byte(0x66); // switch to 16-bit mode + prefix(dst); + emit_byte(0xC7); + emit_operand(rax, dst, 2); + emit_word(imm16); +} + +void Assembler::movw(Register dst, Address src) { + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x8B); + emit_operand(dst, src); +} + +void Assembler::movw(Address dst, Register src) { + InstructionMark im(this); + emit_byte(0x66); + prefix(dst, src); + emit_byte(0x89); + emit_operand(src, dst); +} + +void Assembler::movzbl(Register dst, Address src) { // movzxb + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0xB6); + emit_operand(dst, src); +} + +void Assembler::movzbl(Register dst, Register src) { // movzxb + NOT_LP64(assert(src->has_byte_register(), "must have byte register")); + int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); + emit_byte(0x0F); + emit_byte(0xB6); + emit_byte(0xC0 | encode); +} + +void Assembler::movzwl(Register dst, Address src) { // movzxw + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0xB7); + emit_operand(dst, src); +} + +void Assembler::movzwl(Register dst, Register src) { // movzxw + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0xB7); + emit_byte(0xC0 | encode); +} + +void Assembler::mull(Address src) { + InstructionMark im(this); + prefix(src); + emit_byte(0xF7); + emit_operand(rsp, src); +} + +void Assembler::mull(Register src) { + int encode = prefix_and_encode(src->encoding()); + emit_byte(0xF7); + emit_byte(0xE0 | encode); +} + +void Assembler::mulsd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF2); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x59); + emit_operand(dst, src); +} + +void Assembler::mulsd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x59); + emit_byte(0xC0 | encode); +} + +void Assembler::mulss(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x59); + emit_operand(dst, src); +} + +void Assembler::mulss(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x59); + emit_byte(0xC0 | encode); +} + +void Assembler::negl(Register dst) { + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xF7); + emit_byte(0xD8 | encode); +} + +void Assembler::nop(int i) { +#ifdef ASSERT + assert(i > 0, " "); + // The fancy nops aren't currently recognized by debuggers making it a + // pain to disassemble code while debugging. If asserts are on clearly + // speed is not an issue so simply use the single byte traditional nop + // to do alignment. + + for (; i > 0 ; i--) emit_byte(0x90); + return; + +#endif // ASSERT + + if (UseAddressNop && VM_Version::is_intel()) { + // + // Using multi-bytes nops "0x0F 0x1F [address]" for Intel + // 1: 0x90 + // 2: 0x66 0x90 + // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) + // 4: 0x0F 0x1F 0x40 0x00 + // 5: 0x0F 0x1F 0x44 0x00 0x00 + // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 + // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 + // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + + // The rest coding is Intel specific - don't use consecutive address nops + + // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 + // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 + // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 + // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 + + while(i >= 15) { + // For Intel don't generate consecutive addess nops (mix with regular nops) + i -= 15; + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + addr_nop_8(); + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + emit_byte(0x90); // nop + } + switch (i) { + case 14: + emit_byte(0x66); // size prefix + case 13: + emit_byte(0x66); // size prefix + case 12: + addr_nop_8(); + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + emit_byte(0x90); // nop + break; + case 11: + emit_byte(0x66); // size prefix + case 10: + emit_byte(0x66); // size prefix + case 9: + emit_byte(0x66); // size prefix + case 8: + addr_nop_8(); + break; + case 7: + addr_nop_7(); + break; + case 6: + emit_byte(0x66); // size prefix + case 5: + addr_nop_5(); + break; + case 4: + addr_nop_4(); + break; + case 3: + // Don't use "0x0F 0x1F 0x00" - need patching safe padding + emit_byte(0x66); // size prefix + case 2: + emit_byte(0x66); // size prefix + case 1: + emit_byte(0x90); // nop + break; + default: + assert(i == 0, " "); + } + return; + } + if (UseAddressNop && VM_Version::is_amd()) { + // + // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. + // 1: 0x90 + // 2: 0x66 0x90 + // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) + // 4: 0x0F 0x1F 0x40 0x00 + // 5: 0x0F 0x1F 0x44 0x00 0x00 + // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 + // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 + // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + + // The rest coding is AMD specific - use consecutive address nops + + // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 + // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 + // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 + // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 + // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 + // Size prefixes (0x66) are added for larger sizes + + while(i >= 22) { + i -= 11; + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + emit_byte(0x66); // size prefix + addr_nop_8(); + } + // Generate first nop for size between 21-12 + switch (i) { + case 21: + i -= 1; + emit_byte(0x66); // size prefix + case 20: + case 19: + i -= 1; + emit_byte(0x66); // size prefix + case 18: + case 17: + i -= 1; + emit_byte(0x66); // size prefix + case 16: + case 15: + i -= 8; + addr_nop_8(); + break; + case 14: + case 13: + i -= 7; + addr_nop_7(); + break; + case 12: + i -= 6; + emit_byte(0x66); // size prefix + addr_nop_5(); + break; + default: + assert(i < 12, " "); + } + + // Generate second nop for size between 11-1 + switch (i) { + case 11: + emit_byte(0x66); // size prefix + case 10: + emit_byte(0x66); // size prefix + case 9: + emit_byte(0x66); // size prefix + case 8: + addr_nop_8(); + break; + case 7: + addr_nop_7(); + break; + case 6: + emit_byte(0x66); // size prefix + case 5: + addr_nop_5(); + break; + case 4: + addr_nop_4(); + break; + case 3: + // Don't use "0x0F 0x1F 0x00" - need patching safe padding + emit_byte(0x66); // size prefix + case 2: + emit_byte(0x66); // size prefix + case 1: + emit_byte(0x90); // nop + break; + default: + assert(i == 0, " "); + } + return; + } + + // Using nops with size prefixes "0x66 0x90". + // From AMD Optimization Guide: + // 1: 0x90 + // 2: 0x66 0x90 + // 3: 0x66 0x66 0x90 + // 4: 0x66 0x66 0x66 0x90 + // 5: 0x66 0x66 0x90 0x66 0x90 + // 6: 0x66 0x66 0x90 0x66 0x66 0x90 + // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 + // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 + // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 + // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 + // + while(i > 12) { + i -= 4; + emit_byte(0x66); // size prefix + emit_byte(0x66); + emit_byte(0x66); + emit_byte(0x90); // nop + } + // 1 - 12 nops + if(i > 8) { + if(i > 9) { + i -= 1; + emit_byte(0x66); + } + i -= 3; + emit_byte(0x66); + emit_byte(0x66); + emit_byte(0x90); + } + // 1 - 8 nops + if(i > 4) { + if(i > 6) { + i -= 1; + emit_byte(0x66); + } + i -= 3; + emit_byte(0x66); + emit_byte(0x66); + emit_byte(0x90); + } + switch (i) { + case 4: + emit_byte(0x66); + case 3: + emit_byte(0x66); + case 2: + emit_byte(0x66); + case 1: + emit_byte(0x90); + break; + default: + assert(i == 0, " "); + } +} + +void Assembler::notl(Register dst) { + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xF7); + emit_byte(0xD0 | encode ); +} + +void Assembler::orl(Address dst, int32_t imm32) { + InstructionMark im(this); + prefix(dst); + emit_byte(0x81); + emit_operand(rcx, dst, 4); + emit_long(imm32); +} + +void Assembler::orl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xC8, dst, imm32); +} + + +void Assembler::orl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0B); + emit_operand(dst, src); +} + + +void Assembler::orl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x0B, 0xC0, dst, src); +} + +// generic +void Assembler::pop(Register dst) { + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0x58 | encode); +} + +void Assembler::popf() { + emit_byte(0x9D); +} + +void Assembler::popl(Address dst) { + // NOTE: this will adjust stack by 8byte on 64bits + InstructionMark im(this); + prefix(dst); + emit_byte(0x8F); + emit_operand(rax, dst); +} + +void Assembler::prefetch_prefix(Address src) { + prefix(src); + emit_byte(0x0F); +} + +void Assembler::prefetchnta(Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "must support")); + InstructionMark im(this); + prefetch_prefix(src); + emit_byte(0x18); + emit_operand(rax, src); // 0, src +} + +void Assembler::prefetchr(Address src) { + NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); + InstructionMark im(this); + prefetch_prefix(src); + emit_byte(0x0D); + emit_operand(rax, src); // 0, src +} + +void Assembler::prefetcht0(Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "must support")); + InstructionMark im(this); + prefetch_prefix(src); + emit_byte(0x18); + emit_operand(rcx, src); // 1, src +} + +void Assembler::prefetcht1(Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "must support")); + InstructionMark im(this); + prefetch_prefix(src); + emit_byte(0x18); + emit_operand(rdx, src); // 2, src +} + +void Assembler::prefetcht2(Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "must support")); + InstructionMark im(this); + prefetch_prefix(src); + emit_byte(0x18); + emit_operand(rbx, src); // 3, src +} + +void Assembler::prefetchw(Address src) { + NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); + InstructionMark im(this); + prefetch_prefix(src); + emit_byte(0x0D); + emit_operand(rcx, src); // 1, src +} + +void Assembler::prefix(Prefix p) { + a_byte(p); +} + +void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { + assert(isByte(mode), "invalid value"); + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + + emit_byte(0x66); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x70); + emit_byte(0xC0 | encode); + emit_byte(mode & 0xFF); + +} + +void Assembler::pshufd(XMMRegister dst, Address src, int mode) { + assert(isByte(mode), "invalid value"); + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x70); + emit_operand(dst, src); + emit_byte(mode & 0xFF); +} + +void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { + assert(isByte(mode), "invalid value"); + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x70); + emit_byte(0xC0 | encode); + emit_byte(mode & 0xFF); +} + +void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { + assert(isByte(mode), "invalid value"); + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + + InstructionMark im(this); + emit_byte(0xF2); + prefix(src, dst); // QQ new + emit_byte(0x0F); + emit_byte(0x70); + emit_operand(dst, src); + emit_byte(mode & 0xFF); +} + +void Assembler::psrlq(XMMRegister dst, int shift) { + // HMM Table D-1 says sse2 or mmx + NOT_LP64(assert(VM_Version::supports_sse(), "")); + + int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding()); + emit_byte(0x66); + emit_byte(0x0F); + emit_byte(0x73); + emit_byte(0xC0 | encode); + emit_byte(shift); +} + +void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x60); + emit_byte(0xC0 | encode); +} + +void Assembler::push(int32_t imm32) { + // in 64bits we push 64bits onto the stack but only + // take a 32bit immediate + emit_byte(0x68); + emit_long(imm32); +} + +void Assembler::push(Register src) { + int encode = prefix_and_encode(src->encoding()); + + emit_byte(0x50 | encode); +} + +void Assembler::pushf() { + emit_byte(0x9C); +} + +void Assembler::pushl(Address src) { + // Note this will push 64bit on 64bit + InstructionMark im(this); + prefix(src); + emit_byte(0xFF); + emit_operand(rsi, src); +} + +void Assembler::pxor(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0xEF); + emit_operand(dst, src); +} + +void Assembler::pxor(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0xEF); + emit_byte(0xC0 | encode); +} + +void Assembler::rcll(Register dst, int imm8) { + assert(isShiftCount(imm8), "illegal shift count"); + int encode = prefix_and_encode(dst->encoding()); + if (imm8 == 1) { + emit_byte(0xD1); + emit_byte(0xD0 | encode); + } else { + emit_byte(0xC1); + emit_byte(0xD0 | encode); + emit_byte(imm8); + } +} + +// copies data from [esi] to [edi] using rcx pointer sized words +// generic +void Assembler::rep_mov() { + emit_byte(0xF3); + // MOVSQ + LP64_ONLY(prefix(REX_W)); + emit_byte(0xA5); +} + +// sets rcx pointer sized words with rax, value at [edi] +// generic +void Assembler::rep_set() { // rep_set + emit_byte(0xF3); + // STOSQ + LP64_ONLY(prefix(REX_W)); + emit_byte(0xAB); +} + +// scans rcx pointer sized words at [edi] for occurance of rax, +// generic +void Assembler::repne_scan() { // repne_scan + emit_byte(0xF2); + // SCASQ + LP64_ONLY(prefix(REX_W)); + emit_byte(0xAF); +} + +#ifdef _LP64 +// scans rcx 4 byte words at [edi] for occurance of rax, +// generic +void Assembler::repne_scanl() { // repne_scan + emit_byte(0xF2); + // SCASL + emit_byte(0xAF); +} +#endif + +void Assembler::ret(int imm16) { + if (imm16 == 0) { + emit_byte(0xC3); + } else { + emit_byte(0xC2); + emit_word(imm16); + } +} + +void Assembler::sahf() { +#ifdef _LP64 + // Not supported in 64bit mode + ShouldNotReachHere(); +#endif + emit_byte(0x9E); +} + +void Assembler::sarl(Register dst, int imm8) { + int encode = prefix_and_encode(dst->encoding()); + assert(isShiftCount(imm8), "illegal shift count"); + if (imm8 == 1) { + emit_byte(0xD1); + emit_byte(0xF8 | encode); + } else { + emit_byte(0xC1); + emit_byte(0xF8 | encode); + emit_byte(imm8); + } +} + +void Assembler::sarl(Register dst) { + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xD3); + emit_byte(0xF8 | encode); +} + +void Assembler::sbbl(Address dst, int32_t imm32) { + InstructionMark im(this); + prefix(dst); + emit_arith_operand(0x81, rbx, dst, imm32); +} + +void Assembler::sbbl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xD8, dst, imm32); +} + + +void Assembler::sbbl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x1B); + emit_operand(dst, src); +} + +void Assembler::sbbl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x1B, 0xC0, dst, src); +} + +void Assembler::setb(Condition cc, Register dst) { + assert(0 <= cc && cc < 16, "illegal cc"); + int encode = prefix_and_encode(dst->encoding(), true); + emit_byte(0x0F); + emit_byte(0x90 | cc); + emit_byte(0xC0 | encode); +} + +void Assembler::shll(Register dst, int imm8) { + assert(isShiftCount(imm8), "illegal shift count"); + int encode = prefix_and_encode(dst->encoding()); + if (imm8 == 1 ) { + emit_byte(0xD1); + emit_byte(0xE0 | encode); + } else { + emit_byte(0xC1); + emit_byte(0xE0 | encode); + emit_byte(imm8); + } +} + +void Assembler::shll(Register dst) { + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xD3); + emit_byte(0xE0 | encode); +} + +void Assembler::shrl(Register dst, int imm8) { + assert(isShiftCount(imm8), "illegal shift count"); + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xC1); + emit_byte(0xE8 | encode); + emit_byte(imm8); +} + +void Assembler::shrl(Register dst) { + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xD3); + emit_byte(0xE8 | encode); +} + +// copies a single word from [esi] to [edi] +void Assembler::smovl() { + emit_byte(0xA5); +} + +void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { + // HMM Table D-1 says sse2 + // NOT_LP64(assert(VM_Version::supports_sse(), "")); + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x51); + emit_byte(0xC0 | encode); +} + +void Assembler::stmxcsr( Address dst) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + prefix(dst); + emit_byte(0x0F); + emit_byte(0xAE); + emit_operand(as_Register(3), dst); +} + +void Assembler::subl(Address dst, int32_t imm32) { + InstructionMark im(this); + prefix(dst); + if (is8bit(imm32)) { + emit_byte(0x83); + emit_operand(rbp, dst, 1); + emit_byte(imm32 & 0xFF); + } else { + emit_byte(0x81); + emit_operand(rbp, dst, 4); + emit_long(imm32); + } +} + +void Assembler::subl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xE8, dst, imm32); +} + +void Assembler::subl(Address dst, Register src) { + InstructionMark im(this); + prefix(dst, src); + emit_byte(0x29); + emit_operand(src, dst); +} + +void Assembler::subl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x2B); + emit_operand(dst, src); +} + +void Assembler::subl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x2B, 0xC0, dst, src); +} + +void Assembler::subsd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x5C); + emit_byte(0xC0 | encode); +} + +void Assembler::subsd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF2); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x5C); + emit_operand(dst, src); +} + +void Assembler::subss(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x5C); + emit_byte(0xC0 | encode); +} + +void Assembler::subss(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + emit_byte(0xF3); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x5C); + emit_operand(dst, src); +} + +void Assembler::testb(Register dst, int imm8) { + NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); + (void) prefix_and_encode(dst->encoding(), true); + emit_arith_b(0xF6, 0xC0, dst, imm8); +} + +void Assembler::testl(Register dst, int32_t imm32) { + // not using emit_arith because test + // doesn't support sign-extension of + // 8bit operands + int encode = dst->encoding(); + if (encode == 0) { + emit_byte(0xA9); + } else { + encode = prefix_and_encode(encode); + emit_byte(0xF7); + emit_byte(0xC0 | encode); + } + emit_long(imm32); +} + +void Assembler::testl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x85, 0xC0, dst, src); +} + +void Assembler::testl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x85); + emit_operand(dst, src); +} + +void Assembler::ucomisd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + ucomiss(dst, src); +} + +void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + ucomiss(dst, src); +} + +void Assembler::ucomiss(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x2E); + emit_operand(dst, src); +} + +void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2E); + emit_byte(0xC0 | encode); +} + + +void Assembler::xaddl(Address dst, Register src) { + InstructionMark im(this); + prefix(dst, src); + emit_byte(0x0F); + emit_byte(0xC1); + emit_operand(src, dst); +} + +void Assembler::xchgl(Register dst, Address src) { // xchg + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x87); + emit_operand(dst, src); +} + +void Assembler::xchgl(Register dst, Register src) { + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x87); + emit_byte(0xc0 | encode); +} + +void Assembler::xorl(Register dst, int32_t imm32) { + prefix(dst); + emit_arith(0x81, 0xF0, dst, imm32); +} + +void Assembler::xorl(Register dst, Address src) { + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x33); + emit_operand(dst, src); +} + +void Assembler::xorl(Register dst, Register src) { + (void) prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x33, 0xC0, dst, src); +} + +void Assembler::xorpd(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0x66); + xorps(dst, src); +} + +void Assembler::xorpd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x57); + emit_operand(dst, src); +} + + +void Assembler::xorps(XMMRegister dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + int encode = prefix_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x57); + emit_byte(0xC0 | encode); +} + +void Assembler::xorps(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + InstructionMark im(this); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x57); + emit_operand(dst, src); +} + +#ifndef _LP64 +// 32bit only pieces of the assembler + +void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { + // NO PREFIX AS NEVER 64BIT + InstructionMark im(this); + emit_byte(0x81); + emit_byte(0xF8 | src1->encoding()); + emit_data(imm32, rspec, 0); +} + +void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { + // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs + InstructionMark im(this); + emit_byte(0x81); + emit_operand(rdi, src1); + emit_data(imm32, rspec, 0); +} + +// The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, +// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded +// into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. +void Assembler::cmpxchg8(Address adr) { + InstructionMark im(this); + emit_byte(0x0F); + emit_byte(0xc7); + emit_operand(rcx, adr); +} + +void Assembler::decl(Register dst) { + // Don't use it directly. Use MacroAssembler::decrementl() instead. + emit_byte(0x48 | dst->encoding()); +} + +#endif // _LP64 + +// 64bit typically doesn't use the x87 but needs to for the trig funcs + +void Assembler::fabs() { + emit_byte(0xD9); + emit_byte(0xE1); +} + +void Assembler::fadd(int i) { + emit_farith(0xD8, 0xC0, i); +} + +void Assembler::fadd_d(Address src) { + InstructionMark im(this); + emit_byte(0xDC); + emit_operand32(rax, src); +} + +void Assembler::fadd_s(Address src) { + InstructionMark im(this); + emit_byte(0xD8); + emit_operand32(rax, src); +} + +void Assembler::fadda(int i) { + emit_farith(0xDC, 0xC0, i); +} + +void Assembler::faddp(int i) { + emit_farith(0xDE, 0xC0, i); +} + +void Assembler::fchs() { + emit_byte(0xD9); + emit_byte(0xE0); +} + +void Assembler::fcom(int i) { + emit_farith(0xD8, 0xD0, i); +} + +void Assembler::fcomp(int i) { + emit_farith(0xD8, 0xD8, i); +} + +void Assembler::fcomp_d(Address src) { + InstructionMark im(this); + emit_byte(0xDC); + emit_operand32(rbx, src); +} + +void Assembler::fcomp_s(Address src) { + InstructionMark im(this); + emit_byte(0xD8); + emit_operand32(rbx, src); +} + +void Assembler::fcompp() { + emit_byte(0xDE); + emit_byte(0xD9); +} + +void Assembler::fcos() { + emit_byte(0xD9); + emit_byte(0xFF); +} + +void Assembler::fdecstp() { + emit_byte(0xD9); + emit_byte(0xF6); +} + +void Assembler::fdiv(int i) { + emit_farith(0xD8, 0xF0, i); +} + +void Assembler::fdiv_d(Address src) { + InstructionMark im(this); + emit_byte(0xDC); + emit_operand32(rsi, src); +} + +void Assembler::fdiv_s(Address src) { + InstructionMark im(this); + emit_byte(0xD8); + emit_operand32(rsi, src); +} + +void Assembler::fdiva(int i) { + emit_farith(0xDC, 0xF8, i); +} + +// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) +// is erroneous for some of the floating-point instructions below. + +void Assembler::fdivp(int i) { + emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) +} + +void Assembler::fdivr(int i) { + emit_farith(0xD8, 0xF8, i); +} + +void Assembler::fdivr_d(Address src) { + InstructionMark im(this); + emit_byte(0xDC); + emit_operand32(rdi, src); +} + +void Assembler::fdivr_s(Address src) { + InstructionMark im(this); + emit_byte(0xD8); + emit_operand32(rdi, src); +} + +void Assembler::fdivra(int i) { + emit_farith(0xDC, 0xF0, i); +} + +void Assembler::fdivrp(int i) { + emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) +} + +void Assembler::ffree(int i) { + emit_farith(0xDD, 0xC0, i); +} + +void Assembler::fild_d(Address adr) { + InstructionMark im(this); + emit_byte(0xDF); + emit_operand32(rbp, adr); +} + +void Assembler::fild_s(Address adr) { + InstructionMark im(this); + emit_byte(0xDB); + emit_operand32(rax, adr); +} + +void Assembler::fincstp() { + emit_byte(0xD9); + emit_byte(0xF7); +} + +void Assembler::finit() { + emit_byte(0x9B); + emit_byte(0xDB); + emit_byte(0xE3); +} + +void Assembler::fist_s(Address adr) { + InstructionMark im(this); + emit_byte(0xDB); + emit_operand32(rdx, adr); +} + +void Assembler::fistp_d(Address adr) { + InstructionMark im(this); + emit_byte(0xDF); + emit_operand32(rdi, adr); +} + +void Assembler::fistp_s(Address adr) { + InstructionMark im(this); + emit_byte(0xDB); + emit_operand32(rbx, adr); +} + +void Assembler::fld1() { + emit_byte(0xD9); + emit_byte(0xE8); +} + +void Assembler::fld_d(Address adr) { + InstructionMark im(this); + emit_byte(0xDD); + emit_operand32(rax, adr); +} + +void Assembler::fld_s(Address adr) { + InstructionMark im(this); + emit_byte(0xD9); + emit_operand32(rax, adr); +} + + +void Assembler::fld_s(int index) { + emit_farith(0xD9, 0xC0, index); +} + +void Assembler::fld_x(Address adr) { + InstructionMark im(this); + emit_byte(0xDB); + emit_operand32(rbp, adr); +} + +void Assembler::fldcw(Address src) { + InstructionMark im(this); + emit_byte(0xd9); + emit_operand32(rbp, src); +} + +void Assembler::fldenv(Address src) { + InstructionMark im(this); + emit_byte(0xD9); + emit_operand32(rsp, src); +} + +void Assembler::fldlg2() { + emit_byte(0xD9); + emit_byte(0xEC); +} + +void Assembler::fldln2() { + emit_byte(0xD9); + emit_byte(0xED); +} + +void Assembler::fldz() { + emit_byte(0xD9); + emit_byte(0xEE); +} + +void Assembler::flog() { + fldln2(); + fxch(); + fyl2x(); +} + +void Assembler::flog10() { + fldlg2(); + fxch(); + fyl2x(); +} + +void Assembler::fmul(int i) { + emit_farith(0xD8, 0xC8, i); +} + +void Assembler::fmul_d(Address src) { + InstructionMark im(this); + emit_byte(0xDC); + emit_operand32(rcx, src); +} + +void Assembler::fmul_s(Address src) { + InstructionMark im(this); + emit_byte(0xD8); + emit_operand32(rcx, src); +} + +void Assembler::fmula(int i) { + emit_farith(0xDC, 0xC8, i); +} + +void Assembler::fmulp(int i) { + emit_farith(0xDE, 0xC8, i); +} + +void Assembler::fnsave(Address dst) { + InstructionMark im(this); + emit_byte(0xDD); + emit_operand32(rsi, dst); +} + +void Assembler::fnstcw(Address src) { + InstructionMark im(this); + emit_byte(0x9B); + emit_byte(0xD9); + emit_operand32(rdi, src); +} + +void Assembler::fnstsw_ax() { + emit_byte(0xdF); + emit_byte(0xE0); +} + +void Assembler::fprem() { + emit_byte(0xD9); + emit_byte(0xF8); +} + +void Assembler::fprem1() { + emit_byte(0xD9); + emit_byte(0xF5); +} + +void Assembler::frstor(Address src) { + InstructionMark im(this); + emit_byte(0xDD); + emit_operand32(rsp, src); +} + +void Assembler::fsin() { + emit_byte(0xD9); + emit_byte(0xFE); +} + +void Assembler::fsqrt() { + emit_byte(0xD9); + emit_byte(0xFA); +} + +void Assembler::fst_d(Address adr) { + InstructionMark im(this); + emit_byte(0xDD); + emit_operand32(rdx, adr); +} + +void Assembler::fst_s(Address adr) { + InstructionMark im(this); + emit_byte(0xD9); + emit_operand32(rdx, adr); +} + +void Assembler::fstp_d(Address adr) { + InstructionMark im(this); + emit_byte(0xDD); + emit_operand32(rbx, adr); +} + +void Assembler::fstp_d(int index) { + emit_farith(0xDD, 0xD8, index); +} + +void Assembler::fstp_s(Address adr) { + InstructionMark im(this); + emit_byte(0xD9); + emit_operand32(rbx, adr); +} + +void Assembler::fstp_x(Address adr) { + InstructionMark im(this); + emit_byte(0xDB); + emit_operand32(rdi, adr); +} + +void Assembler::fsub(int i) { + emit_farith(0xD8, 0xE0, i); +} + +void Assembler::fsub_d(Address src) { + InstructionMark im(this); + emit_byte(0xDC); + emit_operand32(rsp, src); +} + +void Assembler::fsub_s(Address src) { + InstructionMark im(this); + emit_byte(0xD8); + emit_operand32(rsp, src); +} + +void Assembler::fsuba(int i) { + emit_farith(0xDC, 0xE8, i); +} + +void Assembler::fsubp(int i) { + emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) +} + +void Assembler::fsubr(int i) { + emit_farith(0xD8, 0xE8, i); +} + +void Assembler::fsubr_d(Address src) { + InstructionMark im(this); + emit_byte(0xDC); + emit_operand32(rbp, src); +} + +void Assembler::fsubr_s(Address src) { + InstructionMark im(this); + emit_byte(0xD8); + emit_operand32(rbp, src); +} + +void Assembler::fsubra(int i) { + emit_farith(0xDC, 0xE0, i); +} + +void Assembler::fsubrp(int i) { + emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) +} + +void Assembler::ftan() { + emit_byte(0xD9); + emit_byte(0xF2); + emit_byte(0xDD); + emit_byte(0xD8); +} + +void Assembler::ftst() { + emit_byte(0xD9); + emit_byte(0xE4); +} + +void Assembler::fucomi(int i) { + // make sure the instruction is supported (introduced for P6, together with cmov) + guarantee(VM_Version::supports_cmov(), "illegal instruction"); + emit_farith(0xDB, 0xE8, i); +} + +void Assembler::fucomip(int i) { + // make sure the instruction is supported (introduced for P6, together with cmov) + guarantee(VM_Version::supports_cmov(), "illegal instruction"); + emit_farith(0xDF, 0xE8, i); +} + +void Assembler::fwait() { + emit_byte(0x9B); +} + +void Assembler::fxch(int i) { + emit_farith(0xD9, 0xC8, i); +} + +void Assembler::fyl2x() { + emit_byte(0xD9); + emit_byte(0xF1); +} + +void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format) { + InstructionMark im(this); + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xB8 | encode); + emit_data((int)imm32, rspec, format); +} + +#ifndef _LP64 + +void Assembler::incl(Register dst) { + // Don't use it directly. Use MacroAssembler::incrementl() instead. + emit_byte(0x40 | dst->encoding()); +} + +void Assembler::lea(Register dst, Address src) { + leal(dst, src); +} + +void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { + InstructionMark im(this); + emit_byte(0xC7); + emit_operand(rax, dst); + emit_data((int)imm32, rspec, 0); +} + + +void Assembler::popa() { // 32bit + emit_byte(0x61); +} + +void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { + InstructionMark im(this); + emit_byte(0x68); + emit_data(imm32, rspec, 0); +} + +void Assembler::pusha() { // 32bit + emit_byte(0x60); +} + +void Assembler::set_byte_if_not_zero(Register dst) { + emit_byte(0x0F); + emit_byte(0x95); + emit_byte(0xE0 | dst->encoding()); +} + +void Assembler::shldl(Register dst, Register src) { + emit_byte(0x0F); + emit_byte(0xA5); + emit_byte(0xC0 | src->encoding() << 3 | dst->encoding()); +} + +void Assembler::shrdl(Register dst, Register src) { + emit_byte(0x0F); + emit_byte(0xAD); + emit_byte(0xC0 | src->encoding() << 3 | dst->encoding()); +} + +#else // LP64 + +// 64bit only pieces of the assembler +// This should only be used by 64bit instructions that can use rip-relative +// it cannot be used by instructions that want an immediate value. + +bool Assembler::reachable(AddressLiteral adr) { + int64_t disp; + // None will force a 64bit literal to the code stream. Likely a placeholder + // for something that will be patched later and we need to certain it will + // always be reachable. + if (adr.reloc() == relocInfo::none) { + return false; + } + if (adr.reloc() == relocInfo::internal_word_type) { + // This should be rip relative and easily reachable. + return true; + } + if (adr.reloc() == relocInfo::virtual_call_type || + adr.reloc() == relocInfo::opt_virtual_call_type || + adr.reloc() == relocInfo::static_call_type || + adr.reloc() == relocInfo::static_stub_type ) { + // This should be rip relative within the code cache and easily + // reachable until we get huge code caches. (At which point + // ic code is going to have issues). + return true; + } + if (adr.reloc() != relocInfo::external_word_type && + adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special + adr.reloc() != relocInfo::poll_type && // relocs to identify them + adr.reloc() != relocInfo::runtime_call_type ) { + return false; + } + + // Stress the correction code + if (ForceUnreachable) { + // Must be runtimecall reloc, see if it is in the codecache + // Flipping stuff in the codecache to be unreachable causes issues + // with things like inline caches where the additional instructions + // are not handled. + if (CodeCache::find_blob(adr._target) == NULL) { + return false; + } + } + // For external_word_type/runtime_call_type if it is reachable from where we + // are now (possibly a temp buffer) and where we might end up + // anywhere in the codeCache then we are always reachable. + // This would have to change if we ever save/restore shared code + // to be more pessimistic. + + disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); + if (!is_simm32(disp)) return false; + disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); + if (!is_simm32(disp)) return false; + + disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int)); + + // Because rip relative is a disp + address_of_next_instruction and we + // don't know the value of address_of_next_instruction we apply a fudge factor + // to make sure we will be ok no matter the size of the instruction we get placed into. + // We don't have to fudge the checks above here because they are already worst case. + + // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal + // + 4 because better safe than sorry. + const int fudge = 12 + 4; + if (disp < 0) { + disp -= fudge; + } else { + disp += fudge; + } + return is_simm32(disp); +} + +void Assembler::emit_data64(jlong data, + relocInfo::relocType rtype, + int format) { + if (rtype == relocInfo::none) { + emit_long64(data); + } else { + emit_data64(data, Relocation::spec_simple(rtype), format); + } +} + +void Assembler::emit_data64(jlong data, + RelocationHolder const& rspec, + int format) { + assert(imm_operand == 0, "default format must be immediate in this file"); + assert(imm_operand == format, "must be immediate"); + assert(inst_mark() != NULL, "must be inside InstructionMark"); + // Do not use AbstractAssembler::relocate, which is not intended for + // embedded words. Instead, relocate to the enclosing instruction. + code_section()->relocate(inst_mark(), rspec, format); +#ifdef ASSERT + check_relocation(rspec, format); +#endif + emit_long64(data); +} + +int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { + if (reg_enc >= 8) { + prefix(REX_B); + reg_enc -= 8; + } else if (byteinst && reg_enc >= 4) { + prefix(REX); + } + return reg_enc; +} + +int Assembler::prefixq_and_encode(int reg_enc) { + if (reg_enc < 8) { + prefix(REX_W); + } else { + prefix(REX_WB); + reg_enc -= 8; + } + return reg_enc; +} + +int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { + if (dst_enc < 8) { + if (src_enc >= 8) { + prefix(REX_B); + src_enc -= 8; + } else if (byteinst && src_enc >= 4) { + prefix(REX); + } + } else { + if (src_enc < 8) { + prefix(REX_R); + } else { + prefix(REX_RB); + src_enc -= 8; + } + dst_enc -= 8; + } + return dst_enc << 3 | src_enc; +} + +int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { + if (dst_enc < 8) { + if (src_enc < 8) { + prefix(REX_W); + } else { + prefix(REX_WB); + src_enc -= 8; + } + } else { + if (src_enc < 8) { + prefix(REX_WR); + } else { + prefix(REX_WRB); + src_enc -= 8; + } + dst_enc -= 8; + } + return dst_enc << 3 | src_enc; +} + +void Assembler::prefix(Register reg) { + if (reg->encoding() >= 8) { + prefix(REX_B); + } +} + +void Assembler::prefix(Address adr) { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_XB); + } else { + prefix(REX_B); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_X); + } + } +} + +void Assembler::prefixq(Address adr) { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_WXB); + } else { + prefix(REX_WB); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_WX); + } else { + prefix(REX_W); + } + } +} + + +void Assembler::prefix(Address adr, Register reg, bool byteinst) { + if (reg->encoding() < 8) { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_XB); + } else { + prefix(REX_B); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_X); + } else if (reg->encoding() >= 4 ) { + prefix(REX); + } + } + } else { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_RXB); + } else { + prefix(REX_RB); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_RX); + } else { + prefix(REX_R); + } + } + } +} + +void Assembler::prefixq(Address adr, Register src) { + if (src->encoding() < 8) { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_WXB); + } else { + prefix(REX_WB); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_WX); + } else { + prefix(REX_W); + } + } + } else { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_WRXB); + } else { + prefix(REX_WRB); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_WRX); + } else { + prefix(REX_WR); + } + } + } +} + +void Assembler::prefix(Address adr, XMMRegister reg) { + if (reg->encoding() < 8) { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_XB); + } else { + prefix(REX_B); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_X); + } + } + } else { + if (adr.base_needs_rex()) { + if (adr.index_needs_rex()) { + prefix(REX_RXB); + } else { + prefix(REX_RB); + } + } else { + if (adr.index_needs_rex()) { + prefix(REX_RX); + } else { + prefix(REX_R); + } + } + } +} + +void Assembler::adcq(Register dst, int32_t imm32) { + (void) prefixq_and_encode(dst->encoding()); + emit_arith(0x81, 0xD0, dst, imm32); +} + +void Assembler::adcq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x13); + emit_operand(dst, src); +} + +void Assembler::adcq(Register dst, Register src) { + (int) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x13, 0xC0, dst, src); +} + +void Assembler::addq(Address dst, int32_t imm32) { + InstructionMark im(this); + prefixq(dst); + emit_arith_operand(0x81, rax, dst,imm32); +} + +void Assembler::addq(Address dst, Register src) { + InstructionMark im(this); + prefixq(dst, src); + emit_byte(0x01); + emit_operand(src, dst); +} + +void Assembler::addq(Register dst, int32_t imm32) { + (void) prefixq_and_encode(dst->encoding()); + emit_arith(0x81, 0xC0, dst, imm32); +} + +void Assembler::addq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x03); + emit_operand(dst, src); +} + +void Assembler::addq(Register dst, Register src) { + (void) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x03, 0xC0, dst, src); +} + +void Assembler::andq(Register dst, int32_t imm32) { + (void) prefixq_and_encode(dst->encoding()); + emit_arith(0x81, 0xE0, dst, imm32); +} + +void Assembler::andq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x23); + emit_operand(dst, src); +} + +void Assembler::andq(Register dst, Register src) { + (int) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x23, 0xC0, dst, src); +} + +void Assembler::bswapq(Register reg) { + int encode = prefixq_and_encode(reg->encoding()); + emit_byte(0x0F); + emit_byte(0xC8 | encode); +} + +void Assembler::cdqq() { + prefix(REX_W); + emit_byte(0x99); +} + +void Assembler::clflush(Address adr) { + prefix(adr); + emit_byte(0x0F); + emit_byte(0xAE); + emit_operand(rdi, adr); +} + +void Assembler::cmovq(Condition cc, Register dst, Register src) { + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x40 | cc); + emit_byte(0xC0 | encode); +} + +void Assembler::cmovq(Condition cc, Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x0F); + emit_byte(0x40 | cc); + emit_operand(dst, src); +} + +void Assembler::cmpq(Address dst, int32_t imm32) { + InstructionMark im(this); + prefixq(dst); + emit_byte(0x81); + emit_operand(rdi, dst, 4); + emit_long(imm32); +} + +void Assembler::cmpq(Register dst, int32_t imm32) { + (void) prefixq_and_encode(dst->encoding()); + emit_arith(0x81, 0xF8, dst, imm32); +} + +void Assembler::cmpq(Address dst, Register src) { + InstructionMark im(this); + prefixq(dst, src); + emit_byte(0x3B); + emit_operand(src, dst); +} + +void Assembler::cmpq(Register dst, Register src) { + (void) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x3B, 0xC0, dst, src); +} + +void Assembler::cmpq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x3B); + emit_operand(dst, src); +} + +void Assembler::cmpxchgq(Register reg, Address adr) { + InstructionMark im(this); + prefixq(adr, reg); + emit_byte(0x0F); + emit_byte(0xB1); + emit_operand(reg, adr); +} + +void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2A); + emit_byte(0xC0 | encode); +} + +void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2A); + emit_byte(0xC0 | encode); +} + +void Assembler::cvttsd2siq(Register dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_byte(0xF2); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2C); + emit_byte(0xC0 | encode); +} + +void Assembler::cvttss2siq(Register dst, XMMRegister src) { + NOT_LP64(assert(VM_Version::supports_sse(), "")); + emit_byte(0xF3); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x2C); + emit_byte(0xC0 | encode); +} + +void Assembler::decl(Register dst) { + // Don't use it directly. Use MacroAssembler::decrementl() instead. + // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xFF); + emit_byte(0xC8 | encode); +} + +void Assembler::decq(Register dst) { + // Don't use it directly. Use MacroAssembler::decrementq() instead. + // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xFF); + emit_byte(0xC8 | encode); +} + +void Assembler::decq(Address dst) { + // Don't use it directly. Use MacroAssembler::decrementq() instead. + InstructionMark im(this); + prefixq(dst); + emit_byte(0xFF); + emit_operand(rcx, dst); +} + +void Assembler::fxrstor(Address src) { + prefixq(src); + emit_byte(0x0F); + emit_byte(0xAE); + emit_operand(as_Register(1), src); +} + +void Assembler::fxsave(Address dst) { + prefixq(dst); + emit_byte(0x0F); + emit_byte(0xAE); + emit_operand(as_Register(0), dst); +} + +void Assembler::idivq(Register src) { + int encode = prefixq_and_encode(src->encoding()); + emit_byte(0xF7); + emit_byte(0xF8 | encode); +} + +void Assembler::imulq(Register dst, Register src) { + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0xAF); + emit_byte(0xC0 | encode); +} + +void Assembler::imulq(Register dst, Register src, int value) { + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + if (is8bit(value)) { + emit_byte(0x6B); + emit_byte(0xC0 | encode); + emit_byte(value); + } else { + emit_byte(0x69); + emit_byte(0xC0 | encode); + emit_long(value); + } +} + +void Assembler::incl(Register dst) { + // Don't use it directly. Use MacroAssembler::incrementl() instead. + // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) + int encode = prefix_and_encode(dst->encoding()); + emit_byte(0xFF); + emit_byte(0xC0 | encode); +} + +void Assembler::incq(Register dst) { + // Don't use it directly. Use MacroAssembler::incrementq() instead. + // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xFF); + emit_byte(0xC0 | encode); +} + +void Assembler::incq(Address dst) { + // Don't use it directly. Use MacroAssembler::incrementq() instead. + InstructionMark im(this); + prefixq(dst); + emit_byte(0xFF); + emit_operand(rax, dst); +} + +void Assembler::lea(Register dst, Address src) { + leaq(dst, src); +} + +void Assembler::leaq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x8D); + emit_operand(dst, src); +} + +void Assembler::mov64(Register dst, int64_t imm64) { + InstructionMark im(this); + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xB8 | encode); + emit_long64(imm64); +} + +void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { + InstructionMark im(this); + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xB8 | encode); + emit_data64(imm64, rspec); +} + +void Assembler::movdq(XMMRegister dst, Register src) { + // table D-1 says MMX/SSE2 + NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), "")); + emit_byte(0x66); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x0F); + emit_byte(0x6E); + emit_byte(0xC0 | encode); +} + +void Assembler::movdq(Register dst, XMMRegister src) { + // table D-1 says MMX/SSE2 + NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), "")); + emit_byte(0x66); + // swap src/dst to get correct prefix + int encode = prefixq_and_encode(src->encoding(), dst->encoding()); + emit_byte(0x0F); + emit_byte(0x7E); + emit_byte(0xC0 | encode); +} + +void Assembler::movq(Register dst, Register src) { + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x8B); + emit_byte(0xC0 | encode); +} + +void Assembler::movq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x8B); + emit_operand(dst, src); +} + +void Assembler::movq(Address dst, Register src) { + InstructionMark im(this); + prefixq(dst, src); + emit_byte(0x89); + emit_operand(src, dst); +} + +void Assembler::movslq(Register dst, int32_t imm32) { + // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) + // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) + // as a result we shouldn't use until tested at runtime... + ShouldNotReachHere(); + InstructionMark im(this); + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xC7 | encode); + emit_long(imm32); +} + +void Assembler::movslq(Address dst, int32_t imm32) { + assert(is_simm32(imm32), "lost bits"); + InstructionMark im(this); + prefixq(dst); + emit_byte(0xC7); + emit_operand(rax, dst, 4); + emit_long(imm32); +} + +void Assembler::movslq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x63); + emit_operand(dst, src); +} + +void Assembler::movslq(Register dst, Register src) { + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x63); + emit_byte(0xC0 | encode); +} + +void Assembler::negq(Register dst) { + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xF7); + emit_byte(0xD8 | encode); +} + +void Assembler::notq(Register dst) { + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xF7); + emit_byte(0xD0 | encode); +} + +void Assembler::orq(Address dst, int32_t imm32) { + InstructionMark im(this); + prefixq(dst); + emit_byte(0x81); + emit_operand(rcx, dst, 4); + emit_long(imm32); +} + +void Assembler::orq(Register dst, int32_t imm32) { + (void) prefixq_and_encode(dst->encoding()); + emit_arith(0x81, 0xC8, dst, imm32); +} + +void Assembler::orq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x0B); + emit_operand(dst, src); +} + +void Assembler::orq(Register dst, Register src) { + (void) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x0B, 0xC0, dst, src); +} + +void Assembler::popa() { // 64bit + movq(r15, Address(rsp, 0)); + movq(r14, Address(rsp, wordSize)); + movq(r13, Address(rsp, 2 * wordSize)); + movq(r12, Address(rsp, 3 * wordSize)); + movq(r11, Address(rsp, 4 * wordSize)); + movq(r10, Address(rsp, 5 * wordSize)); + movq(r9, Address(rsp, 6 * wordSize)); + movq(r8, Address(rsp, 7 * wordSize)); + movq(rdi, Address(rsp, 8 * wordSize)); + movq(rsi, Address(rsp, 9 * wordSize)); + movq(rbp, Address(rsp, 10 * wordSize)); + // skip rsp + movq(rbx, Address(rsp, 12 * wordSize)); + movq(rdx, Address(rsp, 13 * wordSize)); + movq(rcx, Address(rsp, 14 * wordSize)); + movq(rax, Address(rsp, 15 * wordSize)); + + addq(rsp, 16 * wordSize); +} + +void Assembler::popq(Address dst) { + InstructionMark im(this); + prefixq(dst); + emit_byte(0x8F); + emit_operand(rax, dst); +} + +void Assembler::pusha() { // 64bit + // we have to store original rsp. ABI says that 128 bytes + // below rsp are local scratch. + movq(Address(rsp, -5 * wordSize), rsp); + + subq(rsp, 16 * wordSize); + + movq(Address(rsp, 15 * wordSize), rax); + movq(Address(rsp, 14 * wordSize), rcx); + movq(Address(rsp, 13 * wordSize), rdx); + movq(Address(rsp, 12 * wordSize), rbx); + // skip rsp + movq(Address(rsp, 10 * wordSize), rbp); + movq(Address(rsp, 9 * wordSize), rsi); + movq(Address(rsp, 8 * wordSize), rdi); + movq(Address(rsp, 7 * wordSize), r8); + movq(Address(rsp, 6 * wordSize), r9); + movq(Address(rsp, 5 * wordSize), r10); + movq(Address(rsp, 4 * wordSize), r11); + movq(Address(rsp, 3 * wordSize), r12); + movq(Address(rsp, 2 * wordSize), r13); + movq(Address(rsp, wordSize), r14); + movq(Address(rsp, 0), r15); +} + +void Assembler::pushq(Address src) { + InstructionMark im(this); + prefixq(src); + emit_byte(0xFF); + emit_operand(rsi, src); +} + +void Assembler::rclq(Register dst, int imm8) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + int encode = prefixq_and_encode(dst->encoding()); + if (imm8 == 1) { + emit_byte(0xD1); + emit_byte(0xD0 | encode); + } else { + emit_byte(0xC1); + emit_byte(0xD0 | encode); + emit_byte(imm8); + } +} +void Assembler::sarq(Register dst, int imm8) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + int encode = prefixq_and_encode(dst->encoding()); + if (imm8 == 1) { + emit_byte(0xD1); + emit_byte(0xF8 | encode); + } else { + emit_byte(0xC1); + emit_byte(0xF8 | encode); + emit_byte(imm8); + } +} + +void Assembler::sarq(Register dst) { + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xD3); + emit_byte(0xF8 | encode); +} +void Assembler::sbbq(Address dst, int32_t imm32) { + InstructionMark im(this); + prefixq(dst); + emit_arith_operand(0x81, rbx, dst, imm32); +} + +void Assembler::sbbq(Register dst, int32_t imm32) { + (void) prefixq_and_encode(dst->encoding()); + emit_arith(0x81, 0xD8, dst, imm32); +} + +void Assembler::sbbq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x1B); + emit_operand(dst, src); +} + +void Assembler::sbbq(Register dst, Register src) { + (void) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x1B, 0xC0, dst, src); +} + +void Assembler::shlq(Register dst, int imm8) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + int encode = prefixq_and_encode(dst->encoding()); + if (imm8 == 1) { + emit_byte(0xD1); + emit_byte(0xE0 | encode); + } else { + emit_byte(0xC1); + emit_byte(0xE0 | encode); + emit_byte(imm8); + } +} + +void Assembler::shlq(Register dst) { + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xD3); + emit_byte(0xE0 | encode); +} + +void Assembler::shrq(Register dst, int imm8) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xC1); + emit_byte(0xE8 | encode); + emit_byte(imm8); +} + +void Assembler::shrq(Register dst) { + int encode = prefixq_and_encode(dst->encoding()); + emit_byte(0xD3); + emit_byte(0xE8 | encode); +} + +void Assembler::sqrtsd(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0xF2); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x51); + emit_operand(dst, src); +} + +void Assembler::subq(Address dst, int32_t imm32) { + InstructionMark im(this); + prefixq(dst); + if (is8bit(imm32)) { + emit_byte(0x83); + emit_operand(rbp, dst, 1); + emit_byte(imm32 & 0xFF); + } else { + emit_byte(0x81); + emit_operand(rbp, dst, 4); + emit_long(imm32); + } +} + +void Assembler::subq(Register dst, int32_t imm32) { + (void) prefixq_and_encode(dst->encoding()); + emit_arith(0x81, 0xE8, dst, imm32); +} + +void Assembler::subq(Address dst, Register src) { + InstructionMark im(this); + prefixq(dst, src); + emit_byte(0x29); + emit_operand(src, dst); +} + +void Assembler::subq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x2B); + emit_operand(dst, src); +} + +void Assembler::subq(Register dst, Register src) { + (void) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x2B, 0xC0, dst, src); +} + +void Assembler::testq(Register dst, int32_t imm32) { + // not using emit_arith because test + // doesn't support sign-extension of + // 8bit operands + int encode = dst->encoding(); + if (encode == 0) { + prefix(REX_W); + emit_byte(0xA9); + } else { + encode = prefixq_and_encode(encode); + emit_byte(0xF7); + emit_byte(0xC0 | encode); + } + emit_long(imm32); +} + +void Assembler::testq(Register dst, Register src) { + (void) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x85, 0xC0, dst, src); +} + +void Assembler::xaddq(Address dst, Register src) { + InstructionMark im(this); + prefixq(dst, src); + emit_byte(0x0F); + emit_byte(0xC1); + emit_operand(src, dst); +} + +void Assembler::xchgq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x87); + emit_operand(dst, src); +} + +void Assembler::xchgq(Register dst, Register src) { + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_byte(0x87); + emit_byte(0xc0 | encode); +} + +void Assembler::xorq(Register dst, Register src) { + (void) prefixq_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x33, 0xC0, dst, src); +} + +void Assembler::xorq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_byte(0x33); + emit_operand(dst, src); +} + +#endif // !LP64 + +static Assembler::Condition reverse[] = { + Assembler::noOverflow /* overflow = 0x0 */ , + Assembler::overflow /* noOverflow = 0x1 */ , + Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , + Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , + Assembler::notZero /* zero = 0x4, equal = 0x4 */ , + Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , + Assembler::above /* belowEqual = 0x6 */ , + Assembler::belowEqual /* above = 0x7 */ , + Assembler::positive /* negative = 0x8 */ , + Assembler::negative /* positive = 0x9 */ , + Assembler::noParity /* parity = 0xa */ , + Assembler::parity /* noParity = 0xb */ , + Assembler::greaterEqual /* less = 0xc */ , + Assembler::less /* greaterEqual = 0xd */ , + Assembler::greater /* lessEqual = 0xe */ , + Assembler::lessEqual /* greater = 0xf, */ + +}; + + +// Implementation of MacroAssembler + +// First all the versions that have distinct versions depending on 32/64 bit +// Unless the difference is trivial (1 line or so). + +#ifndef _LP64 + +// 32bit versions + +Address MacroAssembler::as_Address(AddressLiteral adr) { + return Address(adr.target(), adr.rspec()); +} + +Address MacroAssembler::as_Address(ArrayAddress adr) { + return Address::make_array(adr); +} + +int MacroAssembler::biased_locking_enter(Register lock_reg, + Register obj_reg, + Register swap_reg, + Register tmp_reg, + bool swap_reg_contains_mark, + Label& done, + Label* slow_case, + BiasedLockingCounters* counters) { + assert(UseBiasedLocking, "why call this otherwise?"); + assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg"); + assert_different_registers(lock_reg, obj_reg, swap_reg); + + if (PrintBiasedLockingStatistics && counters == NULL) + counters = BiasedLocking::counters(); + + bool need_tmp_reg = false; + if (tmp_reg == noreg) { + need_tmp_reg = true; + tmp_reg = lock_reg; + } else { + assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); + } + assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); + Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); + Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); + Address saved_mark_addr(lock_reg, 0); + + // Biased locking + // See whether the lock is currently biased toward our thread and + // whether the epoch is still valid + // Note that the runtime guarantees sufficient alignment of JavaThread + // pointers to allow age to be placed into low bits + // First check to see whether biasing is even enabled for this object + Label cas_label; + int null_check_offset = -1; + if (!swap_reg_contains_mark) { + null_check_offset = offset(); + movl(swap_reg, mark_addr); + } + if (need_tmp_reg) { + push(tmp_reg); + } + movl(tmp_reg, swap_reg); + andl(tmp_reg, markOopDesc::biased_lock_mask_in_place); + cmpl(tmp_reg, markOopDesc::biased_lock_pattern); + if (need_tmp_reg) { + pop(tmp_reg); + } + jcc(Assembler::notEqual, cas_label); + // The bias pattern is present in the object's header. Need to check + // whether the bias owner and the epoch are both still current. + // Note that because there is no current thread register on x86 we + // need to store off the mark word we read out of the object to + // avoid reloading it and needing to recheck invariants below. This + // store is unfortunate but it makes the overall code shorter and + // simpler. + movl(saved_mark_addr, swap_reg); + if (need_tmp_reg) { + push(tmp_reg); + } + get_thread(tmp_reg); + xorl(swap_reg, tmp_reg); + if (swap_reg_contains_mark) { + null_check_offset = offset(); + } + movl(tmp_reg, klass_addr); + xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + andl(swap_reg, ~((int) markOopDesc::age_mask_in_place)); + if (need_tmp_reg) { + pop(tmp_reg); + } + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address)counters->biased_lock_entry_count_addr())); + } + jcc(Assembler::equal, done); + + Label try_revoke_bias; + Label try_rebias; + + // At this point we know that the header has the bias pattern and + // that we are not the bias owner in the current epoch. We need to + // figure out more details about the state of the header in order to + // know what operations can be legally performed on the object's + // header. + + // If the low three bits in the xor result aren't clear, that means + // the prototype header is no longer biased and we have to revoke + // the bias on this object. + testl(swap_reg, markOopDesc::biased_lock_mask_in_place); + jcc(Assembler::notZero, try_revoke_bias); + + // Biasing is still enabled for this data type. See whether the + // epoch of the current bias is still valid, meaning that the epoch + // bits of the mark word are equal to the epoch bits of the + // prototype header. (Note that the prototype header's epoch bits + // only change at a safepoint.) If not, attempt to rebias the object + // toward the current thread. Note that we must be absolutely sure + // that the current epoch is invalid in order to do this because + // otherwise the manipulations it performs on the mark word are + // illegal. + testl(swap_reg, markOopDesc::epoch_mask_in_place); + jcc(Assembler::notZero, try_rebias); + + // The epoch of the current bias is still valid but we know nothing + // about the owner; it might be set or it might be clear. Try to + // acquire the bias of the object using an atomic operation. If this + // fails we will go in to the runtime to revoke the object's bias. + // Note that we first construct the presumed unbiased header so we + // don't accidentally blow away another thread's valid bias. + movl(swap_reg, saved_mark_addr); + andl(swap_reg, + markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); + if (need_tmp_reg) { + push(tmp_reg); + } + get_thread(tmp_reg); + orl(tmp_reg, swap_reg); + if (os::is_MP()) { + lock(); + } + cmpxchgptr(tmp_reg, Address(obj_reg, 0)); + if (need_tmp_reg) { + pop(tmp_reg); + } + // If the biasing toward our thread failed, this means that + // another thread succeeded in biasing it toward itself and we + // need to revoke that bias. The revocation will occur in the + // interpreter runtime in the slow case. + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr())); + } + if (slow_case != NULL) { + jcc(Assembler::notZero, *slow_case); + } + jmp(done); + + bind(try_rebias); + // At this point we know the epoch has expired, meaning that the + // current "bias owner", if any, is actually invalid. Under these + // circumstances _only_, we are allowed to use the current header's + // value as the comparison value when doing the cas to acquire the + // bias in the current epoch. In other words, we allow transfer of + // the bias from one thread to another directly in this situation. + // + // FIXME: due to a lack of registers we currently blow away the age + // bits in this situation. Should attempt to preserve them. + if (need_tmp_reg) { + push(tmp_reg); + } + get_thread(tmp_reg); + movl(swap_reg, klass_addr); + orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + movl(swap_reg, saved_mark_addr); + if (os::is_MP()) { + lock(); + } + cmpxchgptr(tmp_reg, Address(obj_reg, 0)); + if (need_tmp_reg) { + pop(tmp_reg); + } + // If the biasing toward our thread failed, then another thread + // succeeded in biasing it toward itself and we need to revoke that + // bias. The revocation will occur in the runtime in the slow case. + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address)counters->rebiased_lock_entry_count_addr())); + } + if (slow_case != NULL) { + jcc(Assembler::notZero, *slow_case); + } + jmp(done); + + bind(try_revoke_bias); + // The prototype mark in the klass doesn't have the bias bit set any + // more, indicating that objects of this data type are not supposed + // to be biased any more. We are going to try to reset the mark of + // this object to the prototype value and fall through to the + // CAS-based locking scheme. Note that if our CAS fails, it means + // that another thread raced us for the privilege of revoking the + // bias of this particular object, so it's okay to continue in the + // normal locking code. + // + // FIXME: due to a lack of registers we currently blow away the age + // bits in this situation. Should attempt to preserve them. + movl(swap_reg, saved_mark_addr); + if (need_tmp_reg) { + push(tmp_reg); + } + movl(tmp_reg, klass_addr); + movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + if (os::is_MP()) { + lock(); + } + cmpxchgptr(tmp_reg, Address(obj_reg, 0)); + if (need_tmp_reg) { + pop(tmp_reg); + } + // Fall through to the normal CAS-based lock, because no matter what + // the result of the above CAS, some thread must have succeeded in + // removing the bias bit from the object's header. + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address)counters->revoked_lock_entry_count_addr())); + } + + bind(cas_label); + + return null_check_offset; +} +void MacroAssembler::call_VM_leaf_base(address entry_point, + int number_of_arguments) { + call(RuntimeAddress(entry_point)); + increment(rsp, number_of_arguments * wordSize); +} + +void MacroAssembler::cmpoop(Address src1, jobject obj) { + cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); +} + +void MacroAssembler::cmpoop(Register src1, jobject obj) { + cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); +} + +void MacroAssembler::extend_sign(Register hi, Register lo) { + // According to Intel Doc. AP-526, "Integer Divide", p.18. + if (VM_Version::is_P6() && hi == rdx && lo == rax) { + cdql(); + } else { + movl(hi, lo); + sarl(hi, 31); + } +} + +void MacroAssembler::fat_nop() { + // A 5 byte nop that is safe for patching (see patch_verified_entry) + emit_byte(0x26); // es: + emit_byte(0x2e); // cs: + emit_byte(0x64); // fs: + emit_byte(0x65); // gs: + emit_byte(0x90); +} + +void MacroAssembler::jC2(Register tmp, Label& L) { + // set parity bit if FPU flag C2 is set (via rax) + save_rax(tmp); + fwait(); fnstsw_ax(); + sahf(); + restore_rax(tmp); + // branch + jcc(Assembler::parity, L); +} + +void MacroAssembler::jnC2(Register tmp, Label& L) { + // set parity bit if FPU flag C2 is set (via rax) + save_rax(tmp); + fwait(); fnstsw_ax(); + sahf(); + restore_rax(tmp); + // branch + jcc(Assembler::noParity, L); +} + +// 32bit can do a case table jump in one instruction but we no longer allow the base +// to be installed in the Address class +void MacroAssembler::jump(ArrayAddress entry) { + jmp(as_Address(entry)); +} + +// Note: y_lo will be destroyed +void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { + // Long compare for Java (semantics as described in JVM spec.) + Label high, low, done; + + cmpl(x_hi, y_hi); + jcc(Assembler::less, low); + jcc(Assembler::greater, high); + // x_hi is the return register + xorl(x_hi, x_hi); + cmpl(x_lo, y_lo); + jcc(Assembler::below, low); + jcc(Assembler::equal, done); + + bind(high); + xorl(x_hi, x_hi); + increment(x_hi); + jmp(done); + + bind(low); + xorl(x_hi, x_hi); + decrementl(x_hi); + + bind(done); +} + +void MacroAssembler::lea(Register dst, AddressLiteral src) { + mov_literal32(dst, (int32_t)src.target(), src.rspec()); +} + +void MacroAssembler::lea(Address dst, AddressLiteral adr) { + // leal(dst, as_Address(adr)); + // see note in movl as to why we must use a move + mov_literal32(dst, (int32_t) adr.target(), adr.rspec()); +} + +void MacroAssembler::leave() { + mov(rsp, rbp); + pop(rbp); +} + +void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { + // Multiplication of two Java long values stored on the stack + // as illustrated below. Result is in rdx:rax. + // + // rsp ---> [ ?? ] \ \ + // .... | y_rsp_offset | + // [ y_lo ] / (in bytes) | x_rsp_offset + // [ y_hi ] | (in bytes) + // .... | + // [ x_lo ] / + // [ x_hi ] + // .... + // + // Basic idea: lo(result) = lo(x_lo * y_lo) + // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) + Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); + Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); + Label quick; + // load x_hi, y_hi and check if quick + // multiplication is possible + movl(rbx, x_hi); + movl(rcx, y_hi); + movl(rax, rbx); + orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 + jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply + // do full multiplication + // 1st step + mull(y_lo); // x_hi * y_lo + movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, + // 2nd step + movl(rax, x_lo); + mull(rcx); // x_lo * y_hi + addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, + // 3rd step + bind(quick); // note: rbx, = 0 if quick multiply! + movl(rax, x_lo); + mull(y_lo); // x_lo * y_lo + addl(rdx, rbx); // correct hi(x_lo * y_lo) +} + +void MacroAssembler::lneg(Register hi, Register lo) { + negl(lo); + adcl(hi, 0); + negl(hi); +} + +void MacroAssembler::lshl(Register hi, Register lo) { + // Java shift left long support (semantics as described in JVM spec., p.305) + // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) + // shift value is in rcx ! + assert(hi != rcx, "must not use rcx"); + assert(lo != rcx, "must not use rcx"); + const Register s = rcx; // shift count + const int n = BitsPerWord; + Label L; + andl(s, 0x3f); // s := s & 0x3f (s < 0x40) + cmpl(s, n); // if (s < n) + jcc(Assembler::less, L); // else (s >= n) + movl(hi, lo); // x := x << n + xorl(lo, lo); + // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! + bind(L); // s (mod n) < n + shldl(hi, lo); // x := x << s + shll(lo); +} + + +void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { + // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) + // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) + assert(hi != rcx, "must not use rcx"); + assert(lo != rcx, "must not use rcx"); + const Register s = rcx; // shift count + const int n = BitsPerWord; + Label L; + andl(s, 0x3f); // s := s & 0x3f (s < 0x40) + cmpl(s, n); // if (s < n) + jcc(Assembler::less, L); // else (s >= n) + movl(lo, hi); // x := x >> n + if (sign_extension) sarl(hi, 31); + else xorl(hi, hi); + // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! + bind(L); // s (mod n) < n + shrdl(lo, hi); // x := x >> s + if (sign_extension) sarl(hi); + else shrl(hi); +} + +void MacroAssembler::movoop(Register dst, jobject obj) { + mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); +} + +void MacroAssembler::movoop(Address dst, jobject obj) { + mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); +} + +void MacroAssembler::movptr(Register dst, AddressLiteral src) { + if (src.is_lval()) { + mov_literal32(dst, (intptr_t)src.target(), src.rspec()); + } else { + movl(dst, as_Address(src)); + } +} + +void MacroAssembler::movptr(ArrayAddress dst, Register src) { + movl(as_Address(dst), src); +} + +void MacroAssembler::movptr(Register dst, ArrayAddress src) { + movl(dst, as_Address(src)); +} + +// src should NEVER be a real pointer. Use AddressLiteral for true pointers +void MacroAssembler::movptr(Address dst, intptr_t src) { + movl(dst, src); +} + + +void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { + movsd(dst, as_Address(src)); +} + +void MacroAssembler::pop_callee_saved_registers() { + pop(rcx); + pop(rdx); + pop(rdi); + pop(rsi); +} + +void MacroAssembler::pop_fTOS() { + fld_d(Address(rsp, 0)); + addl(rsp, 2 * wordSize); +} + +void MacroAssembler::push_callee_saved_registers() { + push(rsi); + push(rdi); + push(rdx); + push(rcx); +} + +void MacroAssembler::push_fTOS() { + subl(rsp, 2 * wordSize); + fstp_d(Address(rsp, 0)); +} + + +void MacroAssembler::pushoop(jobject obj) { + push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); +} + + +void MacroAssembler::pushptr(AddressLiteral src) { + if (src.is_lval()) { + push_literal32((int32_t)src.target(), src.rspec()); + } else { + pushl(as_Address(src)); + } +} + +void MacroAssembler::set_word_if_not_zero(Register dst) { + xorl(dst, dst); + set_byte_if_not_zero(dst); +} + +static void pass_arg0(MacroAssembler* masm, Register arg) { + masm->push(arg); +} + +static void pass_arg1(MacroAssembler* masm, Register arg) { + masm->push(arg); +} + +static void pass_arg2(MacroAssembler* masm, Register arg) { + masm->push(arg); +} + +static void pass_arg3(MacroAssembler* masm, Register arg) { + masm->push(arg); +} + +#ifndef PRODUCT +extern "C" void findpc(intptr_t x); +#endif + +void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { + // In order to get locks to work, we need to fake a in_VM state + JavaThread* thread = JavaThread::current(); + JavaThreadState saved_state = thread->thread_state(); + thread->set_thread_state(_thread_in_vm); + if (ShowMessageBoxOnError) { + JavaThread* thread = JavaThread::current(); + JavaThreadState saved_state = thread->thread_state(); + thread->set_thread_state(_thread_in_vm); + if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { + ttyLocker ttyl; + BytecodeCounter::print(); + } + // To see where a verify_oop failed, get $ebx+40/X for this frame. + // This is the value of eip which points to where verify_oop will return. + if (os::message_box(msg, "Execution stopped, print registers?")) { + ttyLocker ttyl; + tty->print_cr("eip = 0x%08x", eip); +#ifndef PRODUCT + tty->cr(); + findpc(eip); + tty->cr(); +#endif + tty->print_cr("rax, = 0x%08x", rax); + tty->print_cr("rbx, = 0x%08x", rbx); + tty->print_cr("rcx = 0x%08x", rcx); + tty->print_cr("rdx = 0x%08x", rdx); + tty->print_cr("rdi = 0x%08x", rdi); + tty->print_cr("rsi = 0x%08x", rsi); + tty->print_cr("rbp, = 0x%08x", rbp); + tty->print_cr("rsp = 0x%08x", rsp); + BREAKPOINT; + } + } else { + ttyLocker ttyl; + ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); + assert(false, "DEBUG MESSAGE"); + } + ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); +} + +void MacroAssembler::stop(const char* msg) { + ExternalAddress message((address)msg); + // push address of message + pushptr(message.addr()); + { Label L; call(L, relocInfo::none); bind(L); } // push eip + pusha(); // push registers + call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); + hlt(); +} + +void MacroAssembler::warn(const char* msg) { + push_CPU_state(); + + ExternalAddress message((address) msg); + // push address of message + pushptr(message.addr()); + + call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); + addl(rsp, wordSize); // discard argument + pop_CPU_state(); +} + +#else // _LP64 + +// 64 bit versions + +Address MacroAssembler::as_Address(AddressLiteral adr) { + // amd64 always does this as a pc-rel + // we can be absolute or disp based on the instruction type + // jmp/call are displacements others are absolute + assert(!adr.is_lval(), "must be rval"); + assert(reachable(adr), "must be"); + return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc()); + +} + +Address MacroAssembler::as_Address(ArrayAddress adr) { + AddressLiteral base = adr.base(); + lea(rscratch1, base); + Address index = adr.index(); + assert(index._disp == 0, "must not have disp"); // maybe it can? + Address array(rscratch1, index._index, index._scale, index._disp); + return array; +} + +int MacroAssembler::biased_locking_enter(Register lock_reg, + Register obj_reg, + Register swap_reg, + Register tmp_reg, + bool swap_reg_contains_mark, + Label& done, + Label* slow_case, + BiasedLockingCounters* counters) { + assert(UseBiasedLocking, "why call this otherwise?"); + assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq"); + assert(tmp_reg != noreg, "tmp_reg must be supplied"); + assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); + assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); + Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); + Address saved_mark_addr(lock_reg, 0); + + if (PrintBiasedLockingStatistics && counters == NULL) + counters = BiasedLocking::counters(); + + // Biased locking + // See whether the lock is currently biased toward our thread and + // whether the epoch is still valid + // Note that the runtime guarantees sufficient alignment of JavaThread + // pointers to allow age to be placed into low bits + // First check to see whether biasing is even enabled for this object + Label cas_label; + int null_check_offset = -1; + if (!swap_reg_contains_mark) { + null_check_offset = offset(); + movq(swap_reg, mark_addr); + } + movq(tmp_reg, swap_reg); + andq(tmp_reg, markOopDesc::biased_lock_mask_in_place); + cmpq(tmp_reg, markOopDesc::biased_lock_pattern); + jcc(Assembler::notEqual, cas_label); + // The bias pattern is present in the object's header. Need to check + // whether the bias owner and the epoch are both still current. + load_prototype_header(tmp_reg, obj_reg); + orq(tmp_reg, r15_thread); + xorq(tmp_reg, swap_reg); + andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place)); + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); + } + jcc(Assembler::equal, done); + + Label try_revoke_bias; + Label try_rebias; + + // At this point we know that the header has the bias pattern and + // that we are not the bias owner in the current epoch. We need to + // figure out more details about the state of the header in order to + // know what operations can be legally performed on the object's + // header. + + // If the low three bits in the xor result aren't clear, that means + // the prototype header is no longer biased and we have to revoke + // the bias on this object. + testq(tmp_reg, markOopDesc::biased_lock_mask_in_place); + jcc(Assembler::notZero, try_revoke_bias); + + // Biasing is still enabled for this data type. See whether the + // epoch of the current bias is still valid, meaning that the epoch + // bits of the mark word are equal to the epoch bits of the + // prototype header. (Note that the prototype header's epoch bits + // only change at a safepoint.) If not, attempt to rebias the object + // toward the current thread. Note that we must be absolutely sure + // that the current epoch is invalid in order to do this because + // otherwise the manipulations it performs on the mark word are + // illegal. + testq(tmp_reg, markOopDesc::epoch_mask_in_place); + jcc(Assembler::notZero, try_rebias); + + // The epoch of the current bias is still valid but we know nothing + // about the owner; it might be set or it might be clear. Try to + // acquire the bias of the object using an atomic operation. If this + // fails we will go in to the runtime to revoke the object's bias. + // Note that we first construct the presumed unbiased header so we + // don't accidentally blow away another thread's valid bias. + andq(swap_reg, + markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); + movq(tmp_reg, swap_reg); + orq(tmp_reg, r15_thread); + if (os::is_MP()) { + lock(); + } + cmpxchgq(tmp_reg, Address(obj_reg, 0)); + // If the biasing toward our thread failed, this means that + // another thread succeeded in biasing it toward itself and we + // need to revoke that bias. The revocation will occur in the + // interpreter runtime in the slow case. + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); + } + if (slow_case != NULL) { + jcc(Assembler::notZero, *slow_case); + } + jmp(done); + + bind(try_rebias); + // At this point we know the epoch has expired, meaning that the + // current "bias owner", if any, is actually invalid. Under these + // circumstances _only_, we are allowed to use the current header's + // value as the comparison value when doing the cas to acquire the + // bias in the current epoch. In other words, we allow transfer of + // the bias from one thread to another directly in this situation. + // + // FIXME: due to a lack of registers we currently blow away the age + // bits in this situation. Should attempt to preserve them. + load_prototype_header(tmp_reg, obj_reg); + orq(tmp_reg, r15_thread); + if (os::is_MP()) { + lock(); + } + cmpxchgq(tmp_reg, Address(obj_reg, 0)); + // If the biasing toward our thread failed, then another thread + // succeeded in biasing it toward itself and we need to revoke that + // bias. The revocation will occur in the runtime in the slow case. + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address) counters->rebiased_lock_entry_count_addr())); + } + if (slow_case != NULL) { + jcc(Assembler::notZero, *slow_case); + } + jmp(done); + + bind(try_revoke_bias); + // The prototype mark in the klass doesn't have the bias bit set any + // more, indicating that objects of this data type are not supposed + // to be biased any more. We are going to try to reset the mark of + // this object to the prototype value and fall through to the + // CAS-based locking scheme. Note that if our CAS fails, it means + // that another thread raced us for the privilege of revoking the + // bias of this particular object, so it's okay to continue in the + // normal locking code. + // + // FIXME: due to a lack of registers we currently blow away the age + // bits in this situation. Should attempt to preserve them. + load_prototype_header(tmp_reg, obj_reg); + if (os::is_MP()) { + lock(); + } + cmpxchgq(tmp_reg, Address(obj_reg, 0)); + // Fall through to the normal CAS-based lock, because no matter what + // the result of the above CAS, some thread must have succeeded in + // removing the bias bit from the object's header. + if (counters != NULL) { + cond_inc32(Assembler::zero, + ExternalAddress((address) counters->revoked_lock_entry_count_addr())); + } + + bind(cas_label); + + return null_check_offset; +} + +void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { + Label L, E; + +#ifdef _WIN64 + // Windows always allocates space for it's register args + assert(num_args <= 4, "only register arguments supported"); + subq(rsp, frame::arg_reg_save_area_bytes); +#endif + + // Align stack if necessary + testl(rsp, 15); + jcc(Assembler::zero, L); + + subq(rsp, 8); + { + call(RuntimeAddress(entry_point)); + } + addq(rsp, 8); + jmp(E); + + bind(L); + { + call(RuntimeAddress(entry_point)); + } + + bind(E); + +#ifdef _WIN64 + // restore stack pointer + addq(rsp, frame::arg_reg_save_area_bytes); +#endif + +} + +void MacroAssembler::cmp64(Register src1, AddressLiteral src2) { + assert(!src2.is_lval(), "should use cmpptr"); + + if (reachable(src2)) { + cmpq(src1, as_Address(src2)); + } else { + lea(rscratch1, src2); + Assembler::cmpq(src1, Address(rscratch1, 0)); + } +} + +int MacroAssembler::corrected_idivq(Register reg) { + // Full implementation of Java ldiv and lrem; checks for special + // case as described in JVM spec., p.243 & p.271. The function + // returns the (pc) offset of the idivl instruction - may be needed + // for implicit exceptions. + // + // normal case special case + // + // input : rax: dividend min_long + // reg: divisor (may not be eax/edx) -1 + // + // output: rax: quotient (= rax idiv reg) min_long + // rdx: remainder (= rax irem reg) 0 + assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); + static const int64_t min_long = 0x8000000000000000; + Label normal_case, special_case; + + // check for special case + cmp64(rax, ExternalAddress((address) &min_long)); + jcc(Assembler::notEqual, normal_case); + xorl(rdx, rdx); // prepare rdx for possible special case (where + // remainder = 0) + cmpq(reg, -1); + jcc(Assembler::equal, special_case); + + // handle normal case + bind(normal_case); + cdqq(); + int idivq_offset = offset(); + idivq(reg); + + // normal and special case exit + bind(special_case); + + return idivq_offset; +} + +void MacroAssembler::decrementq(Register reg, int value) { + if (value == min_jint) { subq(reg, value); return; } + if (value < 0) { incrementq(reg, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { decq(reg) ; return; } + /* else */ { subq(reg, value) ; return; } +} + +void MacroAssembler::decrementq(Address dst, int value) { + if (value == min_jint) { subq(dst, value); return; } + if (value < 0) { incrementq(dst, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { decq(dst) ; return; } + /* else */ { subq(dst, value) ; return; } +} + +void MacroAssembler::fat_nop() { + // A 5 byte nop that is safe for patching (see patch_verified_entry) + // Recommened sequence from 'Software Optimization Guide for the AMD + // Hammer Processor' + emit_byte(0x66); + emit_byte(0x66); + emit_byte(0x90); + emit_byte(0x66); + emit_byte(0x90); +} + +void MacroAssembler::incrementq(Register reg, int value) { + if (value == min_jint) { addq(reg, value); return; } + if (value < 0) { decrementq(reg, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { incq(reg) ; return; } + /* else */ { addq(reg, value) ; return; } +} + +void MacroAssembler::incrementq(Address dst, int value) { + if (value == min_jint) { addq(dst, value); return; } + if (value < 0) { decrementq(dst, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { incq(dst) ; return; } + /* else */ { addq(dst, value) ; return; } +} + +// 32bit can do a case table jump in one instruction but we no longer allow the base +// to be installed in the Address class +void MacroAssembler::jump(ArrayAddress entry) { + lea(rscratch1, entry.base()); + Address dispatch = entry.index(); + assert(dispatch._base == noreg, "must be"); + dispatch._base = rscratch1; + jmp(dispatch); +} + +void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { + ShouldNotReachHere(); // 64bit doesn't use two regs + cmpq(x_lo, y_lo); +} + +void MacroAssembler::lea(Register dst, AddressLiteral src) { + mov_literal64(dst, (intptr_t)src.target(), src.rspec()); +} + +void MacroAssembler::lea(Address dst, AddressLiteral adr) { + mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec()); + movptr(dst, rscratch1); +} + +void MacroAssembler::leave() { + // %%% is this really better? Why not on 32bit too? + emit_byte(0xC9); // LEAVE +} + +void MacroAssembler::lneg(Register hi, Register lo) { + ShouldNotReachHere(); // 64bit doesn't use two regs + negq(lo); +} + +void MacroAssembler::movoop(Register dst, jobject obj) { + mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); +} + +void MacroAssembler::movoop(Address dst, jobject obj) { + mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate()); + movq(dst, rscratch1); +} + +void MacroAssembler::movptr(Register dst, AddressLiteral src) { + if (src.is_lval()) { + mov_literal64(dst, (intptr_t)src.target(), src.rspec()); + } else { + if (reachable(src)) { + movq(dst, as_Address(src)); + } else { + lea(rscratch1, src); + movq(dst, Address(rscratch1,0)); + } + } +} + +void MacroAssembler::movptr(ArrayAddress dst, Register src) { + movq(as_Address(dst), src); +} + +void MacroAssembler::movptr(Register dst, ArrayAddress src) { + movq(dst, as_Address(src)); +} + +// src should NEVER be a real pointer. Use AddressLiteral for true pointers +void MacroAssembler::movptr(Address dst, intptr_t src) { + mov64(rscratch1, src); + movq(dst, rscratch1); +} + +// These are mostly for initializing NULL +void MacroAssembler::movptr(Address dst, int32_t src) { + movslq(dst, src); +} + +void MacroAssembler::movptr(Register dst, int32_t src) { + mov64(dst, (intptr_t)src); +} + +void MacroAssembler::pushoop(jobject obj) { + movoop(rscratch1, obj); + push(rscratch1); +} + +void MacroAssembler::pushptr(AddressLiteral src) { + lea(rscratch1, src); + if (src.is_lval()) { + push(rscratch1); + } else { + pushq(Address(rscratch1, 0)); + } +} + +void MacroAssembler::reset_last_Java_frame(bool clear_fp, + bool clear_pc) { + // we must set sp to zero to clear frame + movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD); + // must clear fp, so that compiled frames are not confused; it is + // possible that we need it only for debugging + if (clear_fp) { + movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD); + } + + if (clear_pc) { + movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD); + } +} + +void MacroAssembler::set_last_Java_frame(Register last_java_sp, + Register last_java_fp, + address last_java_pc) { + // determine last_java_sp register + if (!last_java_sp->is_valid()) { + last_java_sp = rsp; + } + + // last_java_fp is optional + if (last_java_fp->is_valid()) { + movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), + last_java_fp); + } + + // last_java_pc is optional + if (last_java_pc != NULL) { + Address java_pc(r15_thread, + JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); + lea(rscratch1, InternalAddress(last_java_pc)); + movptr(java_pc, rscratch1); + } + + movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); +} + +static void pass_arg0(MacroAssembler* masm, Register arg) { + if (c_rarg0 != arg ) { + masm->mov(c_rarg0, arg); + } +} + +static void pass_arg1(MacroAssembler* masm, Register arg) { + if (c_rarg1 != arg ) { + masm->mov(c_rarg1, arg); + } +} + +static void pass_arg2(MacroAssembler* masm, Register arg) { + if (c_rarg2 != arg ) { + masm->mov(c_rarg2, arg); + } +} + +static void pass_arg3(MacroAssembler* masm, Register arg) { + if (c_rarg3 != arg ) { + masm->mov(c_rarg3, arg); + } +} + +void MacroAssembler::stop(const char* msg) { + address rip = pc(); + pusha(); // get regs on stack + lea(c_rarg0, ExternalAddress((address) msg)); + lea(c_rarg1, InternalAddress(rip)); + movq(c_rarg2, rsp); // pass pointer to regs array + andq(rsp, -16); // align stack as required by ABI + call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); + hlt(); +} + +void MacroAssembler::warn(const char* msg) { + push(r12); + movq(r12, rsp); + andq(rsp, -16); // align stack as required by push_CPU_state and call + + push_CPU_state(); // keeps alignment at 16 bytes + lea(c_rarg0, ExternalAddress((address) msg)); + call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); + pop_CPU_state(); + + movq(rsp, r12); + pop(r12); +} + +#ifndef PRODUCT +extern "C" void findpc(intptr_t x); +#endif + +void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { + // In order to get locks to work, we need to fake a in_VM state + if (ShowMessageBoxOnError ) { + JavaThread* thread = JavaThread::current(); + JavaThreadState saved_state = thread->thread_state(); + thread->set_thread_state(_thread_in_vm); +#ifndef PRODUCT + if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { + ttyLocker ttyl; + BytecodeCounter::print(); + } +#endif + // To see where a verify_oop failed, get $ebx+40/X for this frame. + // XXX correct this offset for amd64 + // This is the value of eip which points to where verify_oop will return. + if (os::message_box(msg, "Execution stopped, print registers?")) { + ttyLocker ttyl; + tty->print_cr("rip = 0x%016lx", pc); +#ifndef PRODUCT + tty->cr(); + findpc(pc); + tty->cr(); +#endif + tty->print_cr("rax = 0x%016lx", regs[15]); + tty->print_cr("rbx = 0x%016lx", regs[12]); + tty->print_cr("rcx = 0x%016lx", regs[14]); + tty->print_cr("rdx = 0x%016lx", regs[13]); + tty->print_cr("rdi = 0x%016lx", regs[8]); + tty->print_cr("rsi = 0x%016lx", regs[9]); + tty->print_cr("rbp = 0x%016lx", regs[10]); + tty->print_cr("rsp = 0x%016lx", regs[11]); + tty->print_cr("r8 = 0x%016lx", regs[7]); + tty->print_cr("r9 = 0x%016lx", regs[6]); + tty->print_cr("r10 = 0x%016lx", regs[5]); + tty->print_cr("r11 = 0x%016lx", regs[4]); + tty->print_cr("r12 = 0x%016lx", regs[3]); + tty->print_cr("r13 = 0x%016lx", regs[2]); + tty->print_cr("r14 = 0x%016lx", regs[1]); + tty->print_cr("r15 = 0x%016lx", regs[0]); + BREAKPOINT; + } + ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); + } else { + ttyLocker ttyl; + ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", + msg); + } +} + +#endif // _LP64 + +// Now versions that are common to 32/64 bit + +void MacroAssembler::addptr(Register dst, int32_t imm32) { + LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); +} + +void MacroAssembler::addptr(Register dst, Register src) { + LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); +} + +void MacroAssembler::addptr(Address dst, Register src) { + LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); +} + +void MacroAssembler::align(int modulus) { + if (offset() % modulus != 0) { + nop(modulus - (offset() % modulus)); + } +} + +void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { + andpd(dst, as_Address(src)); +} + +void MacroAssembler::andptr(Register dst, int32_t imm32) { + LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); +} + +void MacroAssembler::atomic_incl(AddressLiteral counter_addr) { + pushf(); + if (os::is_MP()) + lock(); + incrementl(counter_addr); + popf(); +} + +// Writes to stack successive pages until offset reached to check for +// stack overflow + shadow pages. This clobbers tmp. +void MacroAssembler::bang_stack_size(Register size, Register tmp) { + movptr(tmp, rsp); + // Bang stack for total size given plus shadow page size. + // Bang one page at a time because large size can bang beyond yellow and + // red zones. + Label loop; + bind(loop); + movl(Address(tmp, (-os::vm_page_size())), size ); + subptr(tmp, os::vm_page_size()); + subl(size, os::vm_page_size()); + jcc(Assembler::greater, loop); + + // Bang down shadow pages too. + // The -1 because we already subtracted 1 page. + for (int i = 0; i< StackShadowPages-1; i++) { + // this could be any sized move but this is can be a debugging crumb + // so the bigger the better. + movptr(Address(tmp, (-i*os::vm_page_size())), size ); + } +} + +void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { + assert(UseBiasedLocking, "why call this otherwise?"); + + // Check for biased locking unlock case, which is a no-op + // Note: we do not have to check the thread ID for two reasons. + // First, the interpreter checks for IllegalMonitorStateException at + // a higher level. Second, if the bias was revoked while we held the + // lock, the object could not be rebiased toward another thread, so + // the bias bit would be clear. + movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); + andptr(temp_reg, markOopDesc::biased_lock_mask_in_place); + cmpptr(temp_reg, markOopDesc::biased_lock_pattern); + jcc(Assembler::equal, done); +} + +void MacroAssembler::c2bool(Register x) { + // implements x == 0 ? 0 : 1 + // note: must only look at least-significant byte of x + // since C-style booleans are stored in one byte + // only! (was bug) + andl(x, 0xFF); + setb(Assembler::notZero, x); +} + +// Wouldn't need if AddressLiteral version had new name +void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { + Assembler::call(L, rtype); +} + +void MacroAssembler::call(Register entry) { + Assembler::call(entry); +} + +void MacroAssembler::call(AddressLiteral entry) { + if (reachable(entry)) { + Assembler::call_literal(entry.target(), entry.rspec()); + } else { + lea(rscratch1, entry); + Assembler::call(rscratch1); + } +} + +// Implementation of call_VM versions + +void MacroAssembler::call_VM(Register oop_result, + address entry_point, + bool check_exceptions) { + Label C, E; + call(C, relocInfo::none); + jmp(E); + + bind(C); + call_VM_helper(oop_result, entry_point, 0, check_exceptions); + ret(0); + + bind(E); +} + +void MacroAssembler::call_VM(Register oop_result, + address entry_point, + Register arg_1, + bool check_exceptions) { + Label C, E; + call(C, relocInfo::none); + jmp(E); + + bind(C); + pass_arg1(this, arg_1); + call_VM_helper(oop_result, entry_point, 1, check_exceptions); + ret(0); + + bind(E); +} + +void MacroAssembler::call_VM(Register oop_result, + address entry_point, + Register arg_1, + Register arg_2, + bool check_exceptions) { + Label C, E; + call(C, relocInfo::none); + jmp(E); + + bind(C); + + LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); + + pass_arg2(this, arg_2); + pass_arg1(this, arg_1); + call_VM_helper(oop_result, entry_point, 2, check_exceptions); + ret(0); + + bind(E); +} + +void MacroAssembler::call_VM(Register oop_result, + address entry_point, + Register arg_1, + Register arg_2, + Register arg_3, + bool check_exceptions) { + Label C, E; + call(C, relocInfo::none); + jmp(E); + + bind(C); + + LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); + LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); + pass_arg3(this, arg_3); + + LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); + pass_arg2(this, arg_2); + + pass_arg1(this, arg_1); + call_VM_helper(oop_result, entry_point, 3, check_exceptions); + ret(0); + + bind(E); +} + +void MacroAssembler::call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + int number_of_arguments, + bool check_exceptions) { + Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); + call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); +} + +void MacroAssembler::call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + Register arg_1, + bool check_exceptions) { + pass_arg1(this, arg_1); + call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); +} + +void MacroAssembler::call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + Register arg_1, + Register arg_2, + bool check_exceptions) { + + LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); + pass_arg2(this, arg_2); + pass_arg1(this, arg_1); + call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); +} + +void MacroAssembler::call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + Register arg_1, + Register arg_2, + Register arg_3, + bool check_exceptions) { + LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); + LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); + pass_arg3(this, arg_3); + LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); + pass_arg2(this, arg_2); + pass_arg1(this, arg_1); + call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); +} + +void MacroAssembler::call_VM_base(Register oop_result, + Register java_thread, + Register last_java_sp, + address entry_point, + int number_of_arguments, + bool check_exceptions) { + // determine java_thread register + if (!java_thread->is_valid()) { +#ifdef _LP64 + java_thread = r15_thread; +#else + java_thread = rdi; + get_thread(java_thread); +#endif // LP64 + } + // determine last_java_sp register + if (!last_java_sp->is_valid()) { + last_java_sp = rsp; + } + // debugging support + assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); + LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); + assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); + assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); + + // push java thread (becomes first argument of C function) + + NOT_LP64(push(java_thread); number_of_arguments++); + LP64_ONLY(mov(c_rarg0, r15_thread)); + + // set last Java frame before call + assert(last_java_sp != rbp, "can't use ebp/rbp"); + + // Only interpreter should have to set fp + set_last_Java_frame(java_thread, last_java_sp, rbp, NULL); + + // do the call, remove parameters + MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); + + // restore the thread (cannot use the pushed argument since arguments + // may be overwritten by C code generated by an optimizing compiler); + // however can use the register value directly if it is callee saved. + if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { + // rdi & rsi (also r15) are callee saved -> nothing to do +#ifdef ASSERT + guarantee(java_thread != rax, "change this code"); + push(rax); + { Label L; + get_thread(rax); + cmpptr(java_thread, rax); + jcc(Assembler::equal, L); + stop("MacroAssembler::call_VM_base: rdi not callee saved?"); + bind(L); + } + pop(rax); +#endif + } else { + get_thread(java_thread); + } + // reset last Java frame + // Only interpreter should have to clear fp + reset_last_Java_frame(java_thread, true, false); + +#ifndef CC_INTERP + // C++ interp handles this in the interpreter + check_and_handle_popframe(java_thread); + check_and_handle_earlyret(java_thread); +#endif /* CC_INTERP */ + + if (check_exceptions) { + // check for pending exceptions (java_thread is set upon return) + cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); +#ifndef _LP64 + jump_cc(Assembler::notEqual, + RuntimeAddress(StubRoutines::forward_exception_entry())); +#else + // This used to conditionally jump to forward_exception however it is + // possible if we relocate that the branch will not reach. So we must jump + // around so we can always reach + + Label ok; + jcc(Assembler::equal, ok); + jump(RuntimeAddress(StubRoutines::forward_exception_entry())); + bind(ok); +#endif // LP64 + } + + // get oop result if there is one and reset the value in the thread + if (oop_result->is_valid()) { + movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); + movptr(Address(java_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD); + verify_oop(oop_result, "broken oop in call_VM_base"); + } +} + +void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { + + // Calculate the value for last_Java_sp + // somewhat subtle. call_VM does an intermediate call + // which places a return address on the stack just under the + // stack pointer as the user finsihed with it. This allows + // use to retrieve last_Java_pc from last_Java_sp[-1]. + // On 32bit we then have to push additional args on the stack to accomplish + // the actual requested call. On 64bit call_VM only can use register args + // so the only extra space is the return address that call_VM created. + // This hopefully explains the calculations here. + +#ifdef _LP64 + // We've pushed one address, correct last_Java_sp + lea(rax, Address(rsp, wordSize)); +#else + lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); +#endif // LP64 + + call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); + +} + +void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { + call_VM_leaf_base(entry_point, number_of_arguments); +} + +void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { + pass_arg0(this, arg_0); + call_VM_leaf(entry_point, 1); +} + +void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { + + LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); + pass_arg1(this, arg_1); + pass_arg0(this, arg_0); + call_VM_leaf(entry_point, 2); +} + +void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { + LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); + LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); + pass_arg2(this, arg_2); + LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); + pass_arg1(this, arg_1); + pass_arg0(this, arg_0); + call_VM_leaf(entry_point, 3); +} + +void MacroAssembler::check_and_handle_earlyret(Register java_thread) { +} + +void MacroAssembler::check_and_handle_popframe(Register java_thread) { +} + +void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) { + if (reachable(src1)) { + cmpl(as_Address(src1), imm); + } else { + lea(rscratch1, src1); + cmpl(Address(rscratch1, 0), imm); + } +} + +void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { + assert(!src2.is_lval(), "use cmpptr"); + if (reachable(src2)) { + cmpl(src1, as_Address(src2)); + } else { + lea(rscratch1, src2); + cmpl(src1, Address(rscratch1, 0)); + } +} + +void MacroAssembler::cmp32(Register src1, int32_t imm) { + Assembler::cmpl(src1, imm); +} + +void MacroAssembler::cmp32(Register src1, Address src2) { + Assembler::cmpl(src1, src2); +} + +void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { + ucomisd(opr1, opr2); + + Label L; + if (unordered_is_less) { + movl(dst, -1); + jcc(Assembler::parity, L); + jcc(Assembler::below , L); + movl(dst, 0); + jcc(Assembler::equal , L); + increment(dst); + } else { // unordered is greater + movl(dst, 1); + jcc(Assembler::parity, L); + jcc(Assembler::above , L); + movl(dst, 0); + jcc(Assembler::equal , L); + decrementl(dst); + } + bind(L); +} + +void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { + ucomiss(opr1, opr2); + + Label L; + if (unordered_is_less) { + movl(dst, -1); + jcc(Assembler::parity, L); + jcc(Assembler::below , L); + movl(dst, 0); + jcc(Assembler::equal , L); + increment(dst); + } else { // unordered is greater + movl(dst, 1); + jcc(Assembler::parity, L); + jcc(Assembler::above , L); + movl(dst, 0); + jcc(Assembler::equal , L); + decrementl(dst); + } + bind(L); +} + + +void MacroAssembler::cmp8(AddressLiteral src1, int imm) { + if (reachable(src1)) { + cmpb(as_Address(src1), imm); + } else { + lea(rscratch1, src1); + cmpb(Address(rscratch1, 0), imm); + } +} + +void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { +#ifdef _LP64 + if (src2.is_lval()) { + movptr(rscratch1, src2); + Assembler::cmpq(src1, rscratch1); + } else if (reachable(src2)) { + cmpq(src1, as_Address(src2)); + } else { + lea(rscratch1, src2); + Assembler::cmpq(src1, Address(rscratch1, 0)); + } +#else + if (src2.is_lval()) { + cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); + } else { + cmpl(src1, as_Address(src2)); + } +#endif // _LP64 +} + +void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { + assert(src2.is_lval(), "not a mem-mem compare"); +#ifdef _LP64 + // moves src2's literal address + movptr(rscratch1, src2); + Assembler::cmpq(src1, rscratch1); +#else + cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); +#endif // _LP64 +} + +void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) { + if (reachable(adr)) { + if (os::is_MP()) + lock(); + cmpxchgptr(reg, as_Address(adr)); + } else { + lea(rscratch1, adr); + if (os::is_MP()) + lock(); + cmpxchgptr(reg, Address(rscratch1, 0)); + } +} + +void MacroAssembler::cmpxchgptr(Register reg, Address adr) { + LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); +} + +void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { + comisd(dst, as_Address(src)); +} + +void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { + comiss(dst, as_Address(src)); +} + + +void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { + Condition negated_cond = negate_condition(cond); + Label L; + jcc(negated_cond, L); + atomic_incl(counter_addr); + bind(L); +} + +int MacroAssembler::corrected_idivl(Register reg) { + // Full implementation of Java idiv and irem; checks for + // special case as described in JVM spec., p.243 & p.271. + // The function returns the (pc) offset of the idivl + // instruction - may be needed for implicit exceptions. + // + // normal case special case + // + // input : rax,: dividend min_int + // reg: divisor (may not be rax,/rdx) -1 + // + // output: rax,: quotient (= rax, idiv reg) min_int + // rdx: remainder (= rax, irem reg) 0 + assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); + const int min_int = 0x80000000; + Label normal_case, special_case; + + // check for special case + cmpl(rax, min_int); + jcc(Assembler::notEqual, normal_case); + xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) + cmpl(reg, -1); + jcc(Assembler::equal, special_case); + + // handle normal case + bind(normal_case); + cdql(); + int idivl_offset = offset(); + idivl(reg); + + // normal and special case exit + bind(special_case); + + return idivl_offset; +} + + + +void MacroAssembler::decrementl(Register reg, int value) { + if (value == min_jint) {subl(reg, value) ; return; } + if (value < 0) { incrementl(reg, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { decl(reg) ; return; } + /* else */ { subl(reg, value) ; return; } +} + +void MacroAssembler::decrementl(Address dst, int value) { + if (value == min_jint) {subl(dst, value) ; return; } + if (value < 0) { incrementl(dst, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { decl(dst) ; return; } + /* else */ { subl(dst, value) ; return; } +} + +void MacroAssembler::division_with_shift (Register reg, int shift_value) { + assert (shift_value > 0, "illegal shift value"); + Label _is_positive; + testl (reg, reg); + jcc (Assembler::positive, _is_positive); + int offset = (1 << shift_value) - 1 ; + + if (offset == 1) { + incrementl(reg); + } else { + addl(reg, offset); + } + + bind (_is_positive); + sarl(reg, shift_value); +} + +// !defined(COMPILER2) is because of stupid core builds +#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) +void MacroAssembler::empty_FPU_stack() { + if (VM_Version::supports_mmx()) { + emms(); + } else { + for (int i = 8; i-- > 0; ) ffree(i); + } +} +#endif // !LP64 || C1 || !C2 + + +// Defines obj, preserves var_size_in_bytes +void MacroAssembler::eden_allocate(Register obj, + Register var_size_in_bytes, + int con_size_in_bytes, + Register t1, + Label& slow_case) { + assert(obj == rax, "obj must be in rax, for cmpxchg"); + assert_different_registers(obj, var_size_in_bytes, t1); + if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { + jmp(slow_case); + } else { + Register end = t1; + Label retry; + bind(retry); + ExternalAddress heap_top((address) Universe::heap()->top_addr()); + movptr(obj, heap_top); + if (var_size_in_bytes == noreg) { + lea(end, Address(obj, con_size_in_bytes)); + } else { + lea(end, Address(obj, var_size_in_bytes, Address::times_1)); + } + // if end < obj then we wrapped around => object too long => slow case + cmpptr(end, obj); + jcc(Assembler::below, slow_case); + cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr())); + jcc(Assembler::above, slow_case); + // Compare obj with the top addr, and if still equal, store the new top addr in + // end at the address of the top addr pointer. Sets ZF if was equal, and clears + // it otherwise. Use lock prefix for atomicity on MPs. + locked_cmpxchgptr(end, heap_top); + jcc(Assembler::notEqual, retry); + } +} + +void MacroAssembler::enter() { + push(rbp); + mov(rbp, rsp); +} + +void MacroAssembler::fcmp(Register tmp) { + fcmp(tmp, 1, true, true); +} + +void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { + assert(!pop_right || pop_left, "usage error"); + if (VM_Version::supports_cmov()) { + assert(tmp == noreg, "unneeded temp"); + if (pop_left) { + fucomip(index); + } else { + fucomi(index); + } + if (pop_right) { + fpop(); + } + } else { + assert(tmp != noreg, "need temp"); + if (pop_left) { + if (pop_right) { + fcompp(); + } else { + fcomp(index); + } + } else { + fcom(index); + } + // convert FPU condition into eflags condition via rax, + save_rax(tmp); + fwait(); fnstsw_ax(); + sahf(); + restore_rax(tmp); + } + // condition codes set as follows: + // + // CF (corresponds to C0) if x < y + // PF (corresponds to C2) if unordered + // ZF (corresponds to C3) if x = y +} + +void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { + fcmp2int(dst, unordered_is_less, 1, true, true); +} + +void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { + fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); + Label L; + if (unordered_is_less) { + movl(dst, -1); + jcc(Assembler::parity, L); + jcc(Assembler::below , L); + movl(dst, 0); + jcc(Assembler::equal , L); + increment(dst); + } else { // unordered is greater + movl(dst, 1); + jcc(Assembler::parity, L); + jcc(Assembler::above , L); + movl(dst, 0); + jcc(Assembler::equal , L); + decrementl(dst); + } + bind(L); +} + +void MacroAssembler::fld_d(AddressLiteral src) { + fld_d(as_Address(src)); +} + +void MacroAssembler::fld_s(AddressLiteral src) { + fld_s(as_Address(src)); +} + +void MacroAssembler::fld_x(AddressLiteral src) { + Assembler::fld_x(as_Address(src)); +} + +void MacroAssembler::fldcw(AddressLiteral src) { + Assembler::fldcw(as_Address(src)); +} + +void MacroAssembler::fpop() { + ffree(); + fincstp(); +} + +void MacroAssembler::fremr(Register tmp) { + save_rax(tmp); + { Label L; + bind(L); + fprem(); + fwait(); fnstsw_ax(); +#ifdef _LP64 + testl(rax, 0x400); + jcc(Assembler::notEqual, L); +#else + sahf(); + jcc(Assembler::parity, L); +#endif // _LP64 + } + restore_rax(tmp); + // Result is in ST0. + // Note: fxch & fpop to get rid of ST1 + // (otherwise FPU stack could overflow eventually) + fxch(1); + fpop(); +} + + +void MacroAssembler::incrementl(AddressLiteral dst) { + if (reachable(dst)) { + incrementl(as_Address(dst)); + } else { + lea(rscratch1, dst); + incrementl(Address(rscratch1, 0)); + } +} + +void MacroAssembler::incrementl(ArrayAddress dst) { + incrementl(as_Address(dst)); +} + +void MacroAssembler::incrementl(Register reg, int value) { + if (value == min_jint) {addl(reg, value) ; return; } + if (value < 0) { decrementl(reg, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { incl(reg) ; return; } + /* else */ { addl(reg, value) ; return; } +} + +void MacroAssembler::incrementl(Address dst, int value) { + if (value == min_jint) {addl(dst, value) ; return; } + if (value < 0) { decrementl(dst, -value); return; } + if (value == 0) { ; return; } + if (value == 1 && UseIncDec) { incl(dst) ; return; } + /* else */ { addl(dst, value) ; return; } +} + +void MacroAssembler::jump(AddressLiteral dst) { + if (reachable(dst)) { + jmp_literal(dst.target(), dst.rspec()); + } else { + lea(rscratch1, dst); + jmp(rscratch1); + } +} + +void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { + if (reachable(dst)) { + InstructionMark im(this); + relocate(dst.reloc()); + const int short_size = 2; + const int long_size = 6; + int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos); + if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { + // 0111 tttn #8-bit disp + emit_byte(0x70 | cc); + emit_byte((offs - short_size) & 0xFF); + } else { + // 0000 1111 1000 tttn #32-bit disp + emit_byte(0x0F); + emit_byte(0x80 | cc); + emit_long(offs - long_size); + } + } else { +#ifdef ASSERT + warning("reversing conditional branch"); +#endif /* ASSERT */ + Label skip; + jccb(reverse[cc], skip); + lea(rscratch1, dst); + Assembler::jmp(rscratch1); + bind(skip); + } +} + +void MacroAssembler::ldmxcsr(AddressLiteral src) { + if (reachable(src)) { + Assembler::ldmxcsr(as_Address(src)); + } else { + lea(rscratch1, src); + Assembler::ldmxcsr(Address(rscratch1, 0)); + } +} + +int MacroAssembler::load_signed_byte(Register dst, Address src) { + int off; + if (LP64_ONLY(true ||) VM_Version::is_P6()) { + off = offset(); + movsbl(dst, src); // movsxb + } else { + off = load_unsigned_byte(dst, src); + shll(dst, 24); + sarl(dst, 24); + } + return off; +} + +// word => int32 which seems bad for 64bit +int MacroAssembler::load_signed_word(Register dst, Address src) { + int off; + if (LP64_ONLY(true ||) VM_Version::is_P6()) { + // This is dubious to me since it seems safe to do a signed 16 => 64 bit + // version but this is what 64bit has always done. This seems to imply + // that users are only using 32bits worth. + off = offset(); + movswl(dst, src); // movsxw + } else { + off = load_unsigned_word(dst, src); + shll(dst, 16); + sarl(dst, 16); + } + return off; +} + +int MacroAssembler::load_unsigned_byte(Register dst, Address src) { + // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, + // and "3.9 Partial Register Penalties", p. 22). + int off; + if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { + off = offset(); + movzbl(dst, src); // movzxb + } else { + xorl(dst, dst); + off = offset(); + movb(dst, src); + } + return off; +} + +int MacroAssembler::load_unsigned_word(Register dst, Address src) { + // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, + // and "3.9 Partial Register Penalties", p. 22). + int off; + if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { + off = offset(); + movzwl(dst, src); // movzxw + } else { + xorl(dst, dst); + off = offset(); + movw(dst, src); + } + return off; +} + +void MacroAssembler::mov32(AddressLiteral dst, Register src) { + if (reachable(dst)) { + movl(as_Address(dst), src); + } else { + lea(rscratch1, dst); + movl(Address(rscratch1, 0), src); + } +} + +void MacroAssembler::mov32(Register dst, AddressLiteral src) { + if (reachable(src)) { + movl(dst, as_Address(src)); + } else { + lea(rscratch1, src); + movl(dst, Address(rscratch1, 0)); + } +} + +// C++ bool manipulation + +void MacroAssembler::movbool(Register dst, Address src) { + if(sizeof(bool) == 1) + movb(dst, src); + else if(sizeof(bool) == 2) + movw(dst, src); + else if(sizeof(bool) == 4) + movl(dst, src); + else + // unsupported + ShouldNotReachHere(); +} + +void MacroAssembler::movbool(Address dst, bool boolconst) { + if(sizeof(bool) == 1) + movb(dst, (int) boolconst); + else if(sizeof(bool) == 2) + movw(dst, (int) boolconst); + else if(sizeof(bool) == 4) + movl(dst, (int) boolconst); + else + // unsupported + ShouldNotReachHere(); +} + +void MacroAssembler::movbool(Address dst, Register src) { + if(sizeof(bool) == 1) + movb(dst, src); + else if(sizeof(bool) == 2) + movw(dst, src); + else if(sizeof(bool) == 4) + movl(dst, src); + else + // unsupported + ShouldNotReachHere(); +} + +void MacroAssembler::movbyte(ArrayAddress dst, int src) { + movb(as_Address(dst), src); +} + +void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + if (UseXmmLoadAndClearUpper) { + movsd (dst, as_Address(src)); + } else { + movlpd(dst, as_Address(src)); + } + } else { + lea(rscratch1, src); + if (UseXmmLoadAndClearUpper) { + movsd (dst, Address(rscratch1, 0)); + } else { + movlpd(dst, Address(rscratch1, 0)); + } + } +} + +void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + movss(dst, as_Address(src)); + } else { + lea(rscratch1, src); + movss(dst, Address(rscratch1, 0)); + } +} + +void MacroAssembler::movptr(Register dst, Register src) { + LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); +} + +void MacroAssembler::movptr(Register dst, Address src) { + LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); +} + +// src should NEVER be a real pointer. Use AddressLiteral for true pointers +void MacroAssembler::movptr(Register dst, intptr_t src) { + LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src)); +} + +void MacroAssembler::movptr(Address dst, Register src) { + LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); +} + +void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + movss(dst, as_Address(src)); + } else { + lea(rscratch1, src); + movss(dst, Address(rscratch1, 0)); + } +} + +void MacroAssembler::null_check(Register reg, int offset) { + if (needs_explicit_null_check(offset)) { + // provoke OS NULL exception if reg = NULL by + // accessing M[reg] w/o changing any (non-CC) registers + // NOTE: cmpl is plenty here to provoke a segv + cmpptr(rax, Address(reg, 0)); + // Note: should probably use testl(rax, Address(reg, 0)); + // may be shorter code (however, this version of + // testl needs to be implemented first) + } else { + // nothing to do, (later) access of M[reg + offset] + // will provoke OS NULL exception if reg = NULL + } +} + +void MacroAssembler::os_breakpoint() { + // instead of directly emitting a breakpoint, call os:breakpoint for better debugability + // (e.g., MSVC can't call ps() otherwise) + call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); +} + +void MacroAssembler::pop_CPU_state() { + pop_FPU_state(); + pop_IU_state(); +} + +void MacroAssembler::pop_FPU_state() { + NOT_LP64(frstor(Address(rsp, 0));) + LP64_ONLY(fxrstor(Address(rsp, 0));) + addptr(rsp, FPUStateSizeInWords * wordSize); +} + +void MacroAssembler::pop_IU_state() { + popa(); + LP64_ONLY(addq(rsp, 8)); + popf(); +} + +// Save Integer and Float state +// Warning: Stack must be 16 byte aligned (64bit) +void MacroAssembler::push_CPU_state() { + push_IU_state(); + push_FPU_state(); +} + +void MacroAssembler::push_FPU_state() { + subptr(rsp, FPUStateSizeInWords * wordSize); +#ifndef _LP64 + fnsave(Address(rsp, 0)); + fwait(); +#else + fxsave(Address(rsp, 0)); +#endif // LP64 +} + +void MacroAssembler::push_IU_state() { + // Push flags first because pusha kills them + pushf(); + // Make sure rsp stays 16-byte aligned + LP64_ONLY(subq(rsp, 8)); + pusha(); +} + +void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { + // determine java_thread register + if (!java_thread->is_valid()) { + java_thread = rdi; + get_thread(java_thread); + } + // we must set sp to zero to clear frame + movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD); + if (clear_fp) { + movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD); + } + + if (clear_pc) + movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD); + +} + +void MacroAssembler::restore_rax(Register tmp) { + if (tmp == noreg) pop(rax); + else if (tmp != rax) mov(rax, tmp); +} + +void MacroAssembler::round_to(Register reg, int modulus) { + addptr(reg, modulus - 1); + andptr(reg, -modulus); +} + +void MacroAssembler::save_rax(Register tmp) { + if (tmp == noreg) push(rax); + else if (tmp != rax) mov(tmp, rax); +} + +// Write serialization page so VM thread can do a pseudo remote membar. +// We use the current thread pointer to calculate a thread specific +// offset to write to within the page. This minimizes bus traffic +// due to cache line collision. +void MacroAssembler::serialize_memory(Register thread, Register tmp) { + movl(tmp, thread); + shrl(tmp, os::get_serialize_page_shift_count()); + andl(tmp, (os::vm_page_size() - sizeof(int))); + + Address index(noreg, tmp, Address::times_1); + ExternalAddress page(os::get_memory_serialize_page()); + + movptr(ArrayAddress(page, index), tmp); +} + +// Calls to C land +// +// When entering C land, the rbp, & rsp of the last Java frame have to be recorded +// in the (thread-local) JavaThread object. When leaving C land, the last Java fp +// has to be reset to 0. This is required to allow proper stack traversal. +void MacroAssembler::set_last_Java_frame(Register java_thread, + Register last_java_sp, + Register last_java_fp, + address last_java_pc) { + // determine java_thread register + if (!java_thread->is_valid()) { + java_thread = rdi; + get_thread(java_thread); + } + // determine last_java_sp register + if (!last_java_sp->is_valid()) { + last_java_sp = rsp; + } + + // last_java_fp is optional + + if (last_java_fp->is_valid()) { + movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); + } + + // last_java_pc is optional + + if (last_java_pc != NULL) { + lea(Address(java_thread, + JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), + InternalAddress(last_java_pc)); + + } + movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); +} + +void MacroAssembler::shlptr(Register dst, int imm8) { + LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); +} + +void MacroAssembler::shrptr(Register dst, int imm8) { + LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); +} + +void MacroAssembler::sign_extend_byte(Register reg) { + if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { + movsbl(reg, reg); // movsxb + } else { + shll(reg, 24); + sarl(reg, 24); + } +} + +void MacroAssembler::sign_extend_short(Register reg) { + if (LP64_ONLY(true ||) VM_Version::is_P6()) { + movswl(reg, reg); // movsxw + } else { + shll(reg, 16); + sarl(reg, 16); + } +} + +////////////////////////////////////////////////////////////////////////////////// +#ifndef SERIALGC + +void MacroAssembler::g1_write_barrier_pre(Register obj, +#ifndef _LP64 + Register thread, +#endif + Register tmp, + Register tmp2, + bool tosca_live) { + LP64_ONLY(Register thread = r15_thread;) + Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_active())); + + Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_buf())); + + + Label done; + Label runtime; + + // if (!marking_in_progress) goto done; + if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { + cmpl(in_progress, 0); + } else { + assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); + cmpb(in_progress, 0); + } + jcc(Assembler::equal, done); + + // if (x.f == NULL) goto done; + cmpptr(Address(obj, 0), NULL_WORD); + jcc(Assembler::equal, done); + + // Can we store original value in the thread's buffer? + + LP64_ONLY(movslq(tmp, index);) + movptr(tmp2, Address(obj, 0)); +#ifdef _LP64 + cmpq(tmp, 0); +#else + cmpl(index, 0); +#endif + jcc(Assembler::equal, runtime); +#ifdef _LP64 + subq(tmp, wordSize); + movl(index, tmp); + addq(tmp, buffer); +#else + subl(index, wordSize); + movl(tmp, buffer); + addl(tmp, index); +#endif + movptr(Address(tmp, 0), tmp2); + jmp(done); + bind(runtime); + // save the live input values + if(tosca_live) push(rax); + push(obj); +#ifdef _LP64 + movq(c_rarg0, Address(obj, 0)); + call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread); +#else + push(thread); + call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread); + pop(thread); +#endif + pop(obj); + if(tosca_live) pop(rax); + bind(done); + +} + +void MacroAssembler::g1_write_barrier_post(Register store_addr, + Register new_val, +#ifndef _LP64 + Register thread, +#endif + Register tmp, + Register tmp2) { + + LP64_ONLY(Register thread = r15_thread;) + Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_buf())); + BarrierSet* bs = Universe::heap()->barrier_set(); + CardTableModRefBS* ct = (CardTableModRefBS*)bs; + Label done; + Label runtime; + + // Does store cross heap regions? + + movptr(tmp, store_addr); + xorptr(tmp, new_val); + shrptr(tmp, HeapRegion::LogOfHRGrainBytes); + jcc(Assembler::equal, done); + + // crosses regions, storing NULL? + + cmpptr(new_val, (int32_t) NULL_WORD); + jcc(Assembler::equal, done); + + // storing region crossing non-NULL, is card already dirty? + + ExternalAddress cardtable((address) ct->byte_map_base); + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); +#ifdef _LP64 + const Register card_addr = tmp; + + movq(card_addr, store_addr); + shrq(card_addr, CardTableModRefBS::card_shift); + + lea(tmp2, cardtable); + + // get the address of the card + addq(card_addr, tmp2); +#else + const Register card_index = tmp; + + movl(card_index, store_addr); + shrl(card_index, CardTableModRefBS::card_shift); + + Address index(noreg, card_index, Address::times_1); + const Register card_addr = tmp; + lea(card_addr, as_Address(ArrayAddress(cardtable, index))); +#endif + cmpb(Address(card_addr, 0), 0); + jcc(Assembler::equal, done); + + // storing a region crossing, non-NULL oop, card is clean. + // dirty card and log. + + movb(Address(card_addr, 0), 0); + + cmpl(queue_index, 0); + jcc(Assembler::equal, runtime); + subl(queue_index, wordSize); + movptr(tmp2, buffer); +#ifdef _LP64 + movslq(rscratch1, queue_index); + addq(tmp2, rscratch1); + movq(Address(tmp2, 0), card_addr); +#else + addl(tmp2, queue_index); + movl(Address(tmp2, 0), card_index); +#endif + jmp(done); + + bind(runtime); + // save the live input values + push(store_addr); + push(new_val); +#ifdef _LP64 + call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread); +#else + push(thread); + call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); + pop(thread); +#endif + pop(new_val); + pop(store_addr); + + bind(done); + +} + +#endif // SERIALGC +////////////////////////////////////////////////////////////////////////////////// + + +void MacroAssembler::store_check(Register obj) { + // Does a store check for the oop in register obj. The content of + // register obj is destroyed afterwards. + store_check_part_1(obj); + store_check_part_2(obj); +} + +void MacroAssembler::store_check(Register obj, Address dst) { + store_check(obj); +} + + +// split the store check operation so that other instructions can be scheduled inbetween +void MacroAssembler::store_check_part_1(Register obj) { + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); + shrptr(obj, CardTableModRefBS::card_shift); +} + +void MacroAssembler::store_check_part_2(Register obj) { + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); + CardTableModRefBS* ct = (CardTableModRefBS*)bs; + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + + // The calculation for byte_map_base is as follows: + // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); + // So this essentially converts an address to a displacement and + // it will never need to be relocated. On 64bit however the value may be too + // large for a 32bit displacement + + intptr_t disp = (intptr_t) ct->byte_map_base; + if (is_simm32(disp)) { + Address cardtable(noreg, obj, Address::times_1, disp); + movb(cardtable, 0); + } else { + // By doing it as an ExternalAddress disp could be converted to a rip-relative + // displacement and done in a single instruction given favorable mapping and + // a smarter version of as_Address. Worst case it is two instructions which + // is no worse off then loading disp into a register and doing as a simple + // Address() as above. + // We can't do as ExternalAddress as the only style since if disp == 0 we'll + // assert since NULL isn't acceptable in a reloci (see 6644928). In any case + // in some cases we'll get a single instruction version. + + ExternalAddress cardtable((address)disp); + Address index(noreg, obj, Address::times_1); + movb(as_Address(ArrayAddress(cardtable, index)), 0); + } +} + +void MacroAssembler::subptr(Register dst, int32_t imm32) { + LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); +} + +void MacroAssembler::subptr(Register dst, Register src) { + LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); +} + +void MacroAssembler::test32(Register src1, AddressLiteral src2) { + // src2 must be rval + + if (reachable(src2)) { + testl(src1, as_Address(src2)); + } else { + lea(rscratch1, src2); + testl(src1, Address(rscratch1, 0)); + } +} + +// C++ bool manipulation +void MacroAssembler::testbool(Register dst) { + if(sizeof(bool) == 1) + testb(dst, 0xff); + else if(sizeof(bool) == 2) { + // testw implementation needed for two byte bools + ShouldNotReachHere(); + } else if(sizeof(bool) == 4) + testl(dst, dst); + else + // unsupported + ShouldNotReachHere(); +} + +void MacroAssembler::testptr(Register dst, Register src) { + LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); +} + +// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. +void MacroAssembler::tlab_allocate(Register obj, + Register var_size_in_bytes, + int con_size_in_bytes, + Register t1, + Register t2, + Label& slow_case) { + assert_different_registers(obj, t1, t2); + assert_different_registers(obj, var_size_in_bytes, t1); + Register end = t2; + Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread); + + verify_tlab(); + + NOT_LP64(get_thread(thread)); + + movptr(obj, Address(thread, JavaThread::tlab_top_offset())); + if (var_size_in_bytes == noreg) { + lea(end, Address(obj, con_size_in_bytes)); + } else { + lea(end, Address(obj, var_size_in_bytes, Address::times_1)); + } + cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); + jcc(Assembler::above, slow_case); + + // update the tlab top pointer + movptr(Address(thread, JavaThread::tlab_top_offset()), end); + + // recover var_size_in_bytes if necessary + if (var_size_in_bytes == end) { + subptr(var_size_in_bytes, obj); + } + verify_tlab(); +} + +// Preserves rbx, and rdx. +void MacroAssembler::tlab_refill(Label& retry, + Label& try_eden, + Label& slow_case) { + Register top = rax; + Register t1 = rcx; + Register t2 = rsi; + Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread); + assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx); + Label do_refill, discard_tlab; + + if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { + // No allocation in the shared eden. + jmp(slow_case); + } + + NOT_LP64(get_thread(thread_reg)); + + movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); + + // calculate amount of free space + subptr(t1, top); + shrptr(t1, LogHeapWordSize); + + // Retain tlab and allocate object in shared space if + // the amount free in the tlab is too large to discard. + cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); + jcc(Assembler::lessEqual, discard_tlab); + + // Retain + // %%% yuck as movptr... + movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); + addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2); + if (TLABStats) { + // increment number of slow_allocations + addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1); + } + jmp(try_eden); + + bind(discard_tlab); + if (TLABStats) { + // increment number of refills + addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1); + // accumulate wastage -- t1 is amount free in tlab + addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1); + } + + // if tlab is currently allocated (top or end != null) then + // fill [top, end + alignment_reserve) with array object + testptr (top, top); + jcc(Assembler::zero, do_refill); + + // set up the mark word + movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); + // set the length to the remaining space + subptr(t1, typeArrayOopDesc::header_size(T_INT)); + addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); + shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint))); + movptr(Address(top, arrayOopDesc::length_offset_in_bytes()), t1); + // set klass to intArrayKlass + // dubious reloc why not an oop reloc? + movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr())); + // store klass last. concurrent gcs assumes klass length is valid if + // klass field is not null. + store_klass(top, t1); + + // refill the tlab with an eden allocation + bind(do_refill); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); + shlptr(t1, LogHeapWordSize); + // add object_size ?? + eden_allocate(top, t1, 0, t2, slow_case); + + // Check that t1 was preserved in eden_allocate. +#ifdef ASSERT + if (UseTLAB) { + Label ok; + Register tsize = rsi; + assert_different_registers(tsize, thread_reg, t1); + push(tsize); + movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); + shlptr(tsize, LogHeapWordSize); + cmpptr(t1, tsize); + jcc(Assembler::equal, ok); + stop("assert(t1 != tlab size)"); + should_not_reach_here(); + + bind(ok); + pop(tsize); + } +#endif + movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top); + movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); + addptr(top, t1); + subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); + movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); + verify_tlab(); + jmp(retry); +} + +static const double pi_4 = 0.7853981633974483; + +void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) { + // A hand-coded argument reduction for values in fabs(pi/4, pi/2) + // was attempted in this code; unfortunately it appears that the + // switch to 80-bit precision and back causes this to be + // unprofitable compared with simply performing a runtime call if + // the argument is out of the (-pi/4, pi/4) range. + + Register tmp = noreg; + if (!VM_Version::supports_cmov()) { + // fcmp needs a temporary so preserve rbx, + tmp = rbx; + push(tmp); + } + + Label slow_case, done; + + // x ?<= pi/4 + fld_d(ExternalAddress((address)&pi_4)); + fld_s(1); // Stack: X PI/4 X + fabs(); // Stack: |X| PI/4 X + fcmp(tmp); + jcc(Assembler::above, slow_case); + + // fastest case: -pi/4 <= x <= pi/4 + switch(trig) { + case 's': + fsin(); + break; + case 'c': + fcos(); + break; + case 't': + ftan(); + break; + default: + assert(false, "bad intrinsic"); + break; + } + jmp(done); + + // slow case: runtime call + bind(slow_case); + // Preserve registers across runtime call + pusha(); + int incoming_argument_and_return_value_offset = -1; + if (num_fpu_regs_in_use > 1) { + // Must preserve all other FPU regs (could alternatively convert + // SharedRuntime::dsin and dcos into assembly routines known not to trash + // FPU state, but can not trust C compiler) + NEEDS_CLEANUP; + // NOTE that in this case we also push the incoming argument to + // the stack and restore it later; we also use this stack slot to + // hold the return value from dsin or dcos. + for (int i = 0; i < num_fpu_regs_in_use; i++) { + subptr(rsp, sizeof(jdouble)); + fstp_d(Address(rsp, 0)); + } + incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1); + fld_d(Address(rsp, incoming_argument_and_return_value_offset)); + } + subptr(rsp, sizeof(jdouble)); + fstp_d(Address(rsp, 0)); +#ifdef _LP64 + movdbl(xmm0, Address(rsp, 0)); +#endif // _LP64 + + // NOTE: we must not use call_VM_leaf here because that requires a + // complete interpreter frame in debug mode -- same bug as 4387334 + // MacroAssembler::call_VM_leaf_base is perfectly safe and will + // do proper 64bit abi + + NEEDS_CLEANUP; + // Need to add stack banging before this runtime call if it needs to + // be taken; however, there is no generic stack banging routine at + // the MacroAssembler level + switch(trig) { + case 's': + { + MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 0); + } + break; + case 'c': + { + MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 0); + } + break; + case 't': + { + MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 0); + } + break; + default: + assert(false, "bad intrinsic"); + break; + } +#ifdef _LP64 + movsd(Address(rsp, 0), xmm0); + fld_d(Address(rsp, 0)); +#endif // _LP64 + addptr(rsp, sizeof(jdouble)); + if (num_fpu_regs_in_use > 1) { + // Must save return value to stack and then restore entire FPU stack + fstp_d(Address(rsp, incoming_argument_and_return_value_offset)); + for (int i = 0; i < num_fpu_regs_in_use; i++) { + fld_d(Address(rsp, 0)); + addptr(rsp, sizeof(jdouble)); + } + } + popa(); + + // Come here with result in F-TOS + bind(done); + + if (tmp != noreg) { + pop(tmp); + } +} + + +void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { + ucomisd(dst, as_Address(src)); +} + +void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { + ucomiss(dst, as_Address(src)); +} + +void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + xorpd(dst, as_Address(src)); + } else { + lea(rscratch1, src); + xorpd(dst, Address(rscratch1, 0)); + } +} + +void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + xorps(dst, as_Address(src)); + } else { + lea(rscratch1, src); + xorps(dst, Address(rscratch1, 0)); + } +} + +void MacroAssembler::verify_oop(Register reg, const char* s) { + if (!VerifyOops) return; + + // Pass register number to verify_oop_subroutine + char* b = new char[strlen(s) + 50]; + sprintf(b, "verify_oop: %s: %s", reg->name(), s); + push(rax); // save rax, + push(reg); // pass register argument + ExternalAddress buffer((address) b); + // avoid using pushptr, as it modifies scratch registers + // and our contract is not to modify anything + movptr(rax, buffer.addr()); + push(rax); + // call indirectly to solve generation ordering problem + movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); + call(rax); +} + + +void MacroAssembler::verify_oop_addr(Address addr, const char* s) { + if (!VerifyOops) return; + + // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); + // Pass register number to verify_oop_subroutine + char* b = new char[strlen(s) + 50]; + sprintf(b, "verify_oop_addr: %s", s); + + push(rax); // save rax, + // addr may contain rsp so we will have to adjust it based on the push + // we just did + // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which + // stores rax into addr which is backwards of what was intended. + if (addr.uses(rsp)) { + lea(rax, addr); + pushptr(Address(rax, BytesPerWord)); + } else { + pushptr(addr); + } + + ExternalAddress buffer((address) b); + // pass msg argument + // avoid using pushptr, as it modifies scratch registers + // and our contract is not to modify anything + movptr(rax, buffer.addr()); + push(rax); + + // call indirectly to solve generation ordering problem + movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); + call(rax); + // Caller pops the arguments and restores rax, from the stack +} + +void MacroAssembler::verify_tlab() { +#ifdef ASSERT + if (UseTLAB && VerifyOops) { + Label next, ok; + Register t1 = rsi; + Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); + + push(t1); + NOT_LP64(push(thread_reg)); + NOT_LP64(get_thread(thread_reg)); + + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); + cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); + jcc(Assembler::aboveEqual, next); + stop("assert(top >= start)"); + should_not_reach_here(); + + bind(next); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); + cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); + jcc(Assembler::aboveEqual, ok); + stop("assert(top <= end)"); + should_not_reach_here(); + + bind(ok); + NOT_LP64(pop(thread_reg)); + pop(t1); + } +#endif +} + +class ControlWord { + public: + int32_t _value; + + int rounding_control() const { return (_value >> 10) & 3 ; } + int precision_control() const { return (_value >> 8) & 3 ; } + bool precision() const { return ((_value >> 5) & 1) != 0; } + bool underflow() const { return ((_value >> 4) & 1) != 0; } + bool overflow() const { return ((_value >> 3) & 1) != 0; } + bool zero_divide() const { return ((_value >> 2) & 1) != 0; } + bool denormalized() const { return ((_value >> 1) & 1) != 0; } + bool invalid() const { return ((_value >> 0) & 1) != 0; } + + void print() const { + // rounding control + const char* rc; + switch (rounding_control()) { + case 0: rc = "round near"; break; + case 1: rc = "round down"; break; + case 2: rc = "round up "; break; + case 3: rc = "chop "; break; + }; + // precision control + const char* pc; + switch (precision_control()) { + case 0: pc = "24 bits "; break; + case 1: pc = "reserved"; break; + case 2: pc = "53 bits "; break; + case 3: pc = "64 bits "; break; + }; + // flags + char f[9]; + f[0] = ' '; + f[1] = ' '; + f[2] = (precision ()) ? 'P' : 'p'; + f[3] = (underflow ()) ? 'U' : 'u'; + f[4] = (overflow ()) ? 'O' : 'o'; + f[5] = (zero_divide ()) ? 'Z' : 'z'; + f[6] = (denormalized()) ? 'D' : 'd'; + f[7] = (invalid ()) ? 'I' : 'i'; + f[8] = '\x0'; + // output + printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); + } + +}; + +class StatusWord { + public: + int32_t _value; + + bool busy() const { return ((_value >> 15) & 1) != 0; } + bool C3() const { return ((_value >> 14) & 1) != 0; } + bool C2() const { return ((_value >> 10) & 1) != 0; } + bool C1() const { return ((_value >> 9) & 1) != 0; } + bool C0() const { return ((_value >> 8) & 1) != 0; } + int top() const { return (_value >> 11) & 7 ; } + bool error_status() const { return ((_value >> 7) & 1) != 0; } + bool stack_fault() const { return ((_value >> 6) & 1) != 0; } + bool precision() const { return ((_value >> 5) & 1) != 0; } + bool underflow() const { return ((_value >> 4) & 1) != 0; } + bool overflow() const { return ((_value >> 3) & 1) != 0; } + bool zero_divide() const { return ((_value >> 2) & 1) != 0; } + bool denormalized() const { return ((_value >> 1) & 1) != 0; } + bool invalid() const { return ((_value >> 0) & 1) != 0; } + + void print() const { + // condition codes + char c[5]; + c[0] = (C3()) ? '3' : '-'; + c[1] = (C2()) ? '2' : '-'; + c[2] = (C1()) ? '1' : '-'; + c[3] = (C0()) ? '0' : '-'; + c[4] = '\x0'; + // flags + char f[9]; + f[0] = (error_status()) ? 'E' : '-'; + f[1] = (stack_fault ()) ? 'S' : '-'; + f[2] = (precision ()) ? 'P' : '-'; + f[3] = (underflow ()) ? 'U' : '-'; + f[4] = (overflow ()) ? 'O' : '-'; + f[5] = (zero_divide ()) ? 'Z' : '-'; + f[6] = (denormalized()) ? 'D' : '-'; + f[7] = (invalid ()) ? 'I' : '-'; + f[8] = '\x0'; + // output + printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); + } + +}; + +class TagWord { + public: + int32_t _value; + + int tag_at(int i) const { return (_value >> (i*2)) & 3; } + + void print() const { + printf("%04x", _value & 0xFFFF); + } + +}; + +class FPU_Register { + public: + int32_t _m0; + int32_t _m1; + int16_t _ex; + + bool is_indefinite() const { + return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; + } + + void print() const { + char sign = (_ex < 0) ? '-' : '+'; + const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; + printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); + }; + +}; + +class FPU_State { + public: + enum { + register_size = 10, + number_of_registers = 8, + register_mask = 7 + }; + + ControlWord _control_word; + StatusWord _status_word; + TagWord _tag_word; + int32_t _error_offset; + int32_t _error_selector; + int32_t _data_offset; + int32_t _data_selector; + int8_t _register[register_size * number_of_registers]; + + int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } + FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } + + const char* tag_as_string(int tag) const { + switch (tag) { + case 0: return "valid"; + case 1: return "zero"; + case 2: return "special"; + case 3: return "empty"; + } + ShouldNotReachHere() + return NULL; + } + + void print() const { + // print computation registers + { int t = _status_word.top(); + for (int i = 0; i < number_of_registers; i++) { + int j = (i - t) & register_mask; + printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); + st(j)->print(); + printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); + } + } + printf("\n"); + // print control registers + printf("ctrl = "); _control_word.print(); printf("\n"); + printf("stat = "); _status_word .print(); printf("\n"); + printf("tags = "); _tag_word .print(); printf("\n"); + } + +}; + +class Flag_Register { + public: + int32_t _value; + + bool overflow() const { return ((_value >> 11) & 1) != 0; } + bool direction() const { return ((_value >> 10) & 1) != 0; } + bool sign() const { return ((_value >> 7) & 1) != 0; } + bool zero() const { return ((_value >> 6) & 1) != 0; } + bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } + bool parity() const { return ((_value >> 2) & 1) != 0; } + bool carry() const { return ((_value >> 0) & 1) != 0; } + + void print() const { + // flags + char f[8]; + f[0] = (overflow ()) ? 'O' : '-'; + f[1] = (direction ()) ? 'D' : '-'; + f[2] = (sign ()) ? 'S' : '-'; + f[3] = (zero ()) ? 'Z' : '-'; + f[4] = (auxiliary_carry()) ? 'A' : '-'; + f[5] = (parity ()) ? 'P' : '-'; + f[6] = (carry ()) ? 'C' : '-'; + f[7] = '\x0'; + // output + printf("%08x flags = %s", _value, f); + } + +}; + +class IU_Register { + public: + int32_t _value; + + void print() const { + printf("%08x %11d", _value, _value); + } + +}; + +class IU_State { + public: + Flag_Register _eflags; + IU_Register _rdi; + IU_Register _rsi; + IU_Register _rbp; + IU_Register _rsp; + IU_Register _rbx; + IU_Register _rdx; + IU_Register _rcx; + IU_Register _rax; + + void print() const { + // computation registers + printf("rax, = "); _rax.print(); printf("\n"); + printf("rbx, = "); _rbx.print(); printf("\n"); + printf("rcx = "); _rcx.print(); printf("\n"); + printf("rdx = "); _rdx.print(); printf("\n"); + printf("rdi = "); _rdi.print(); printf("\n"); + printf("rsi = "); _rsi.print(); printf("\n"); + printf("rbp, = "); _rbp.print(); printf("\n"); + printf("rsp = "); _rsp.print(); printf("\n"); + printf("\n"); + // control registers + printf("flgs = "); _eflags.print(); printf("\n"); + } +}; + + +class CPU_State { + public: + FPU_State _fpu_state; + IU_State _iu_state; + + void print() const { + printf("--------------------------------------------------\n"); + _iu_state .print(); + printf("\n"); + _fpu_state.print(); + printf("--------------------------------------------------\n"); + } + +}; + + +static void _print_CPU_state(CPU_State* state) { + state->print(); +}; + + +void MacroAssembler::print_CPU_state() { + push_CPU_state(); + push(rsp); // pass CPU state + call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); + addptr(rsp, wordSize); // discard argument + pop_CPU_state(); +} + + +static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { + static int counter = 0; + FPU_State* fs = &state->_fpu_state; + counter++; + // For leaf calls, only verify that the top few elements remain empty. + // We only need 1 empty at the top for C2 code. + if( stack_depth < 0 ) { + if( fs->tag_for_st(7) != 3 ) { + printf("FPR7 not empty\n"); + state->print(); + assert(false, "error"); + return false; + } + return true; // All other stack states do not matter + } + + assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std, + "bad FPU control word"); + + // compute stack depth + int i = 0; + while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; + int d = i; + while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; + // verify findings + if (i != FPU_State::number_of_registers) { + // stack not contiguous + printf("%s: stack not contiguous at ST%d\n", s, i); + state->print(); + assert(false, "error"); + return false; + } + // check if computed stack depth corresponds to expected stack depth + if (stack_depth < 0) { + // expected stack depth is -stack_depth or less + if (d > -stack_depth) { + // too many elements on the stack + printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); + state->print(); + assert(false, "error"); + return false; + } + } else { + // expected stack depth is stack_depth + if (d != stack_depth) { + // wrong stack depth + printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); + state->print(); + assert(false, "error"); + return false; + } + } + // everything is cool + return true; +} + + +void MacroAssembler::verify_FPU(int stack_depth, const char* s) { + if (!VerifyFPU) return; + push_CPU_state(); + push(rsp); // pass CPU state + ExternalAddress msg((address) s); + // pass message string s + pushptr(msg.addr()); + push(stack_depth); // pass stack depth + call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); + addptr(rsp, 3 * wordSize); // discard arguments + // check for error + { Label L; + testl(rax, rax); + jcc(Assembler::notZero, L); + int3(); // break if error condition + bind(L); + } + pop_CPU_state(); +} + +void MacroAssembler::load_klass(Register dst, Register src) { +#ifdef _LP64 + if (UseCompressedOops) { + movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); + decode_heap_oop_not_null(dst); + } else +#endif + movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); +} + +void MacroAssembler::load_prototype_header(Register dst, Register src) { +#ifdef _LP64 + if (UseCompressedOops) { + movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); + movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + } else +#endif + { + movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); + movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); + } +} + +void MacroAssembler::store_klass(Register dst, Register src) { +#ifdef _LP64 + if (UseCompressedOops) { + encode_heap_oop_not_null(src); + movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); + } else +#endif + movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); +} + +#ifdef _LP64 +void MacroAssembler::store_klass_gap(Register dst, Register src) { + if (UseCompressedOops) { + // Store to klass gap in destination + movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); + } +} + +void MacroAssembler::load_heap_oop(Register dst, Address src) { + if (UseCompressedOops) { + movl(dst, src); + decode_heap_oop(dst); + } else { + movq(dst, src); + } +} + +void MacroAssembler::store_heap_oop(Address dst, Register src) { + if (UseCompressedOops) { + assert(!dst.uses(src), "not enough registers"); + encode_heap_oop(src); + movl(dst, src); + } else { + movq(dst, src); + } +} + +// Algorithm must match oop.inline.hpp encode_heap_oop. +void MacroAssembler::encode_heap_oop(Register r) { + assert (UseCompressedOops, "should be compressed"); +#ifdef ASSERT + if (CheckCompressedOops) { + Label ok; + push(rscratch1); // cmpptr trashes rscratch1 + cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr())); + jcc(Assembler::equal, ok); + stop("MacroAssembler::encode_heap_oop: heap base corrupted?"); + bind(ok); + pop(rscratch1); + } +#endif + verify_oop(r, "broken oop in encode_heap_oop"); + testq(r, r); + cmovq(Assembler::equal, r, r12_heapbase); + subq(r, r12_heapbase); + shrq(r, LogMinObjAlignmentInBytes); +} + +void MacroAssembler::encode_heap_oop_not_null(Register r) { + assert (UseCompressedOops, "should be compressed"); +#ifdef ASSERT + if (CheckCompressedOops) { + Label ok; + testq(r, r); + jcc(Assembler::notEqual, ok); + stop("null oop passed to encode_heap_oop_not_null"); + bind(ok); + } +#endif + verify_oop(r, "broken oop in encode_heap_oop_not_null"); + subq(r, r12_heapbase); + shrq(r, LogMinObjAlignmentInBytes); +} + +void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { + assert (UseCompressedOops, "should be compressed"); +#ifdef ASSERT + if (CheckCompressedOops) { + Label ok; + testq(src, src); + jcc(Assembler::notEqual, ok); + stop("null oop passed to encode_heap_oop_not_null2"); + bind(ok); + } +#endif + verify_oop(src, "broken oop in encode_heap_oop_not_null2"); + if (dst != src) { + movq(dst, src); + } + subq(dst, r12_heapbase); + shrq(dst, LogMinObjAlignmentInBytes); +} + +void MacroAssembler::decode_heap_oop(Register r) { + assert (UseCompressedOops, "should be compressed"); +#ifdef ASSERT + if (CheckCompressedOops) { + Label ok; + push(rscratch1); + cmpptr(r12_heapbase, + ExternalAddress((address)Universe::heap_base_addr())); + jcc(Assembler::equal, ok); + stop("MacroAssembler::decode_heap_oop: heap base corrupted?"); + bind(ok); + pop(rscratch1); + } +#endif + + Label done; + shlq(r, LogMinObjAlignmentInBytes); + jccb(Assembler::equal, done); + addq(r, r12_heapbase); +#if 0 + // alternate decoding probably a wash. + testq(r, r); + jccb(Assembler::equal, done); + leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); +#endif + bind(done); + verify_oop(r, "broken oop in decode_heap_oop"); +} + +void MacroAssembler::decode_heap_oop_not_null(Register r) { + assert (UseCompressedOops, "should only be used for compressed headers"); + // Cannot assert, unverified entry point counts instructions (see .ad file) + // vtableStubs also counts instructions in pd_code_size_limit. + // Also do not verify_oop as this is called by verify_oop. + assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong"); + leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); +} + +void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { + assert (UseCompressedOops, "should only be used for compressed headers"); + // Cannot assert, unverified entry point counts instructions (see .ad file) + // vtableStubs also counts instructions in pd_code_size_limit. + // Also do not verify_oop as this is called by verify_oop. + assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong"); + leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); +} + +void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { + assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); + int oop_index = oop_recorder()->find_index(obj); + RelocationHolder rspec = oop_Relocation::spec(oop_index); + mov_literal32(dst, oop_index, rspec, narrow_oop_operand); +} + +void MacroAssembler::reinit_heapbase() { + if (UseCompressedOops) { + movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr())); + } +} +#endif // _LP64 + +Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { + switch (cond) { + // Note some conditions are synonyms for others + case Assembler::zero: return Assembler::notZero; + case Assembler::notZero: return Assembler::zero; + case Assembler::less: return Assembler::greaterEqual; + case Assembler::lessEqual: return Assembler::greater; + case Assembler::greater: return Assembler::lessEqual; + case Assembler::greaterEqual: return Assembler::less; + case Assembler::below: return Assembler::aboveEqual; + case Assembler::belowEqual: return Assembler::above; + case Assembler::above: return Assembler::belowEqual; + case Assembler::aboveEqual: return Assembler::below; + case Assembler::overflow: return Assembler::noOverflow; + case Assembler::noOverflow: return Assembler::overflow; + case Assembler::negative: return Assembler::positive; + case Assembler::positive: return Assembler::negative; + case Assembler::parity: return Assembler::noParity; + case Assembler::noParity: return Assembler::parity; + } + ShouldNotReachHere(); return Assembler::overflow; +} + +SkipIfEqual::SkipIfEqual( + MacroAssembler* masm, const bool* flag_addr, bool value) { + _masm = masm; + _masm->cmp8(ExternalAddress((address)flag_addr), value); + _masm->jcc(Assembler::equal, _label); +} + +SkipIfEqual::~SkipIfEqual() { + _masm->bind(_label); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/cpu/x86/vm/assembler_x86.hpp 2009-08-01 04:17:33.229162664 +0100 @@ -0,0 +1,2070 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class BiasedLockingCounters; + +// Contains all the definitions needed for x86 assembly code generation. + +// Calling convention +class Argument VALUE_OBJ_CLASS_SPEC { + public: + enum { +#ifdef _LP64 +#ifdef _WIN64 + n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) + n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) +#else + n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) + n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) +#endif // _WIN64 + n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... + n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... +#else + n_register_parameters = 0 // 0 registers used to pass arguments +#endif // _LP64 + }; +}; + + +#ifdef _LP64 +// Symbolically name the register arguments used by the c calling convention. +// Windows is different from linux/solaris. So much for standards... + +#ifdef _WIN64 + +REGISTER_DECLARATION(Register, c_rarg0, rcx); +REGISTER_DECLARATION(Register, c_rarg1, rdx); +REGISTER_DECLARATION(Register, c_rarg2, r8); +REGISTER_DECLARATION(Register, c_rarg3, r9); + +REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); +REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); +REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); +REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); + +#else + +REGISTER_DECLARATION(Register, c_rarg0, rdi); +REGISTER_DECLARATION(Register, c_rarg1, rsi); +REGISTER_DECLARATION(Register, c_rarg2, rdx); +REGISTER_DECLARATION(Register, c_rarg3, rcx); +REGISTER_DECLARATION(Register, c_rarg4, r8); +REGISTER_DECLARATION(Register, c_rarg5, r9); + +REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); +REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); +REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); +REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); +REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); +REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); +REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); +REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); + +#endif // _WIN64 + +// Symbolically name the register arguments used by the Java calling convention. +// We have control over the convention for java so we can do what we please. +// What pleases us is to offset the java calling convention so that when +// we call a suitable jni method the arguments are lined up and we don't +// have to do little shuffling. A suitable jni method is non-static and a +// small number of arguments (two fewer args on windows) +// +// |-------------------------------------------------------| +// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | +// |-------------------------------------------------------| +// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) +// | rdi rsi rdx rcx r8 r9 | solaris/linux +// |-------------------------------------------------------| +// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | +// |-------------------------------------------------------| + +REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); +REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); +REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); +// Windows runs out of register args here +#ifdef _WIN64 +REGISTER_DECLARATION(Register, j_rarg3, rdi); +REGISTER_DECLARATION(Register, j_rarg4, rsi); +#else +REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); +REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); +#endif /* _WIN64 */ +REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); + +REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); +REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); +REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); +REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); +REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); +REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); +REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); +REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); + +REGISTER_DECLARATION(Register, rscratch1, r10); // volatile +REGISTER_DECLARATION(Register, rscratch2, r11); // volatile + +REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved +REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved + +#else +// rscratch1 will apear in 32bit code that is dead but of course must compile +// Using noreg ensures if the dead code is incorrectly live and executed it +// will cause an assertion failure +#define rscratch1 noreg + +#endif // _LP64 + +// Address is an abstraction used to represent a memory location +// using any of the amd64 addressing modes with one object. +// +// Note: A register location is represented via a Register, not +// via an address for efficiency & simplicity reasons. + +class ArrayAddress; + +class Address VALUE_OBJ_CLASS_SPEC { + public: + enum ScaleFactor { + no_scale = -1, + times_1 = 0, + times_2 = 1, + times_4 = 2, + times_8 = 3, + times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) + }; + + private: + Register _base; + Register _index; + ScaleFactor _scale; + int _disp; + RelocationHolder _rspec; + + // Easily misused constructors make them private + // %%% can we make these go away? + NOT_LP64(Address(address loc, RelocationHolder spec);) + Address(int disp, address loc, relocInfo::relocType rtype); + Address(int disp, address loc, RelocationHolder spec); + + public: + + int disp() { return _disp; } + // creation + Address() + : _base(noreg), + _index(noreg), + _scale(no_scale), + _disp(0) { + } + + // No default displacement otherwise Register can be implicitly + // converted to 0(Register) which is quite a different animal. + + Address(Register base, int disp) + : _base(base), + _index(noreg), + _scale(no_scale), + _disp(disp) { + } + + Address(Register base, Register index, ScaleFactor scale, int disp = 0) + : _base (base), + _index(index), + _scale(scale), + _disp (disp) { + assert(!index->is_valid() == (scale == Address::no_scale), + "inconsistent address"); + } + + // The following two overloads are used in connection with the + // ByteSize type (see sizes.hpp). They simplify the use of + // ByteSize'd arguments in assembly code. Note that their equivalent + // for the optimized build are the member functions with int disp + // argument since ByteSize is mapped to an int type in that case. + // + // Note: DO NOT introduce similar overloaded functions for WordSize + // arguments as in the optimized mode, both ByteSize and WordSize + // are mapped to the same type and thus the compiler cannot make a + // distinction anymore (=> compiler errors). + +#ifdef ASSERT + Address(Register base, ByteSize disp) + : _base(base), + _index(noreg), + _scale(no_scale), + _disp(in_bytes(disp)) { + } + + Address(Register base, Register index, ScaleFactor scale, ByteSize disp) + : _base(base), + _index(index), + _scale(scale), + _disp(in_bytes(disp)) { + assert(!index->is_valid() == (scale == Address::no_scale), + "inconsistent address"); + } +#endif // ASSERT + + // accessors + bool uses(Register reg) const { return _base == reg || _index == reg; } + Register base() const { return _base; } + Register index() const { return _index; } + ScaleFactor scale() const { return _scale; } + int disp() const { return _disp; } + + // Convert the raw encoding form into the form expected by the constructor for + // Address. An index of 4 (rsp) corresponds to having no index, so convert + // that to noreg for the Address constructor. + static Address make_raw(int base, int index, int scale, int disp); + + static Address make_array(ArrayAddress); + + + private: + bool base_needs_rex() const { + return _base != noreg && _base->encoding() >= 8; + } + + bool index_needs_rex() const { + return _index != noreg &&_index->encoding() >= 8; + } + + relocInfo::relocType reloc() const { return _rspec.type(); } + + friend class Assembler; + friend class MacroAssembler; + friend class LIR_Assembler; // base/index/scale/disp +}; + +// +// AddressLiteral has been split out from Address because operands of this type +// need to be treated specially on 32bit vs. 64bit platforms. By splitting it out +// the few instructions that need to deal with address literals are unique and the +// MacroAssembler does not have to implement every instruction in the Assembler +// in order to search for address literals that may need special handling depending +// on the instruction and the platform. As small step on the way to merging i486/amd64 +// directories. +// +class AddressLiteral VALUE_OBJ_CLASS_SPEC { + friend class ArrayAddress; + RelocationHolder _rspec; + // Typically we use AddressLiterals we want to use their rval + // However in some situations we want the lval (effect address) of the item. + // We provide a special factory for making those lvals. + bool _is_lval; + + // If the target is far we'll need to load the ea of this to + // a register to reach it. Otherwise if near we can do rip + // relative addressing. + + address _target; + + protected: + // creation + AddressLiteral() + : _is_lval(false), + _target(NULL) + {} + + public: + + + AddressLiteral(address target, relocInfo::relocType rtype); + + AddressLiteral(address target, RelocationHolder const& rspec) + : _rspec(rspec), + _is_lval(false), + _target(target) + {} + + AddressLiteral addr() { + AddressLiteral ret = *this; + ret._is_lval = true; + return ret; + } + + + private: + + address target() { return _target; } + bool is_lval() { return _is_lval; } + + relocInfo::relocType reloc() const { return _rspec.type(); } + const RelocationHolder& rspec() const { return _rspec; } + + friend class Assembler; + friend class MacroAssembler; + friend class Address; + friend class LIR_Assembler; +}; + +// Convience classes +class RuntimeAddress: public AddressLiteral { + + public: + + RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} + +}; + +class OopAddress: public AddressLiteral { + + public: + + OopAddress(address target) : AddressLiteral(target, relocInfo::oop_type){} + +}; + +class ExternalAddress: public AddressLiteral { + + public: + + ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){} + +}; + +class InternalAddress: public AddressLiteral { + + public: + + InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} + +}; + +// x86 can do array addressing as a single operation since disp can be an absolute +// address amd64 can't. We create a class that expresses the concept but does extra +// magic on amd64 to get the final result + +class ArrayAddress VALUE_OBJ_CLASS_SPEC { + private: + + AddressLiteral _base; + Address _index; + + public: + + ArrayAddress() {}; + ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; + AddressLiteral base() { return _base; } + Address index() { return _index; } + +}; + +const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize); + +// The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction +// level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write +// is what you get. The Assembler is generating code into a CodeBuffer. + +class Assembler : public AbstractAssembler { + friend class AbstractAssembler; // for the non-virtual hack + friend class LIR_Assembler; // as_Address() + friend class StubGenerator; + + public: + enum Condition { // The x86 condition codes used for conditional jumps/moves. + zero = 0x4, + notZero = 0x5, + equal = 0x4, + notEqual = 0x5, + less = 0xc, + lessEqual = 0xe, + greater = 0xf, + greaterEqual = 0xd, + below = 0x2, + belowEqual = 0x6, + above = 0x7, + aboveEqual = 0x3, + overflow = 0x0, + noOverflow = 0x1, + carrySet = 0x2, + carryClear = 0x3, + negative = 0x8, + positive = 0x9, + parity = 0xa, + noParity = 0xb + }; + + enum Prefix { + // segment overrides + CS_segment = 0x2e, + SS_segment = 0x36, + DS_segment = 0x3e, + ES_segment = 0x26, + FS_segment = 0x64, + GS_segment = 0x65, + + REX = 0x40, + + REX_B = 0x41, + REX_X = 0x42, + REX_XB = 0x43, + REX_R = 0x44, + REX_RB = 0x45, + REX_RX = 0x46, + REX_RXB = 0x47, + + REX_W = 0x48, + + REX_WB = 0x49, + REX_WX = 0x4A, + REX_WXB = 0x4B, + REX_WR = 0x4C, + REX_WRB = 0x4D, + REX_WRX = 0x4E, + REX_WRXB = 0x4F + }; + + enum WhichOperand { + // input to locate_operand, and format code for relocations + imm_operand = 0, // embedded 32-bit|64-bit immediate operand + disp32_operand = 1, // embedded 32-bit displacement or address + call32_operand = 2, // embedded 32-bit self-relative displacement +#ifndef _LP64 + _WhichOperand_limit = 3 +#else + narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop + _WhichOperand_limit = 4 +#endif + }; + + + + // NOTE: The general philopsophy of the declarations here is that 64bit versions + // of instructions are freely declared without the need for wrapping them an ifdef. + // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) + // In the .cpp file the implementations are wrapped so that they are dropped out + // of the resulting jvm. This is done mostly to keep the footprint of KERNEL + // to the size it was prior to merging up the 32bit and 64bit assemblers. + // + // This does mean you'll get a linker/runtime error if you use a 64bit only instruction + // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. + +private: + + + // 64bit prefixes + int prefix_and_encode(int reg_enc, bool byteinst = false); + int prefixq_and_encode(int reg_enc); + + int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false); + int prefixq_and_encode(int dst_enc, int src_enc); + + void prefix(Register reg); + void prefix(Address adr); + void prefixq(Address adr); + + void prefix(Address adr, Register reg, bool byteinst = false); + void prefixq(Address adr, Register reg); + + void prefix(Address adr, XMMRegister reg); + + void prefetch_prefix(Address src); + + // Helper functions for groups of instructions + void emit_arith_b(int op1, int op2, Register dst, int imm8); + + void emit_arith(int op1, int op2, Register dst, int32_t imm32); + // only 32bit?? + void emit_arith(int op1, int op2, Register dst, jobject obj); + void emit_arith(int op1, int op2, Register dst, Register src); + + void emit_operand(Register reg, + Register base, Register index, Address::ScaleFactor scale, + int disp, + RelocationHolder const& rspec, + int rip_relative_correction = 0); + + void emit_operand(Register reg, Address adr, int rip_relative_correction = 0); + + // operands that only take the original 32bit registers + void emit_operand32(Register reg, Address adr); + + void emit_operand(XMMRegister reg, + Register base, Register index, Address::ScaleFactor scale, + int disp, + RelocationHolder const& rspec); + + void emit_operand(XMMRegister reg, Address adr); + + void emit_operand(MMXRegister reg, Address adr); + + // workaround gcc (3.2.1-7) bug + void emit_operand(Address adr, MMXRegister reg); + + + // Immediate-to-memory forms + void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); + + void emit_farith(int b1, int b2, int i); + + + protected: + #ifdef ASSERT + void check_relocation(RelocationHolder const& rspec, int format); + #endif + + inline void emit_long64(jlong x); + + void emit_data(jint data, relocInfo::relocType rtype, int format); + void emit_data(jint data, RelocationHolder const& rspec, int format); + void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); + void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); + + + bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); + + // These are all easily abused and hence protected + + void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format = 0); + + // 32BIT ONLY SECTION +#ifndef _LP64 + // Make these disappear in 64bit mode since they would never be correct + void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY + void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY + + void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY + + void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY +#else + // 64BIT ONLY SECTION + void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY +#endif // _LP64 + + // These are unique in that we are ensured by the caller that the 32bit + // relative in these instructions will always be able to reach the potentially + // 64bit address described by entry. Since they can take a 64bit address they + // don't have the 32 suffix like the other instructions in this class. + + void call_literal(address entry, RelocationHolder const& rspec); + void jmp_literal(address entry, RelocationHolder const& rspec); + + // Avoid using directly section + // Instructions in this section are actually usable by anyone without danger + // of failure but have performance issues that are addressed my enhanced + // instructions which will do the proper thing base on the particular cpu. + // We protect them because we don't trust you... + + // Don't use next inc() and dec() methods directly. INC & DEC instructions + // could cause a partial flag stall since they don't set CF flag. + // Use MacroAssembler::decrement() & MacroAssembler::increment() methods + // which call inc() & dec() or add() & sub() in accordance with + // the product flag UseIncDec value. + + void decl(Register dst); + void decl(Address dst); + void decq(Register dst); + void decq(Address dst); + + void incl(Register dst); + void incl(Address dst); + void incq(Register dst); + void incq(Address dst); + + // New cpus require use of movsd and movss to avoid partial register stall + // when loading from memory. But for old Opteron use movlpd instead of movsd. + // The selection is done in MacroAssembler::movdbl() and movflt(). + + // Move Scalar Single-Precision Floating-Point Values + void movss(XMMRegister dst, Address src); + void movss(XMMRegister dst, XMMRegister src); + void movss(Address dst, XMMRegister src); + + // Move Scalar Double-Precision Floating-Point Values + void movsd(XMMRegister dst, Address src); + void movsd(XMMRegister dst, XMMRegister src); + void movsd(Address dst, XMMRegister src); + void movlpd(XMMRegister dst, Address src); + + // New cpus require use of movaps and movapd to avoid partial register stall + // when moving between registers. + void movaps(XMMRegister dst, XMMRegister src); + void movapd(XMMRegister dst, XMMRegister src); + + // End avoid using directly + + + // Instruction prefixes + void prefix(Prefix p); + + public: + + // Creation + Assembler(CodeBuffer* code) : AbstractAssembler(code) {} + + // Decoding + static address locate_operand(address inst, WhichOperand which); + static address locate_next_instruction(address inst); + + // Utilities + +#ifdef _LP64 + static bool is_simm(int64_t x, int nbits) { return -( CONST64(1) << (nbits-1) ) <= x && x < ( CONST64(1) << (nbits-1) ); } + static bool is_simm32(int64_t x) { return x == (int64_t)(int32_t)x; } +#else + static bool is_simm(int32_t x, int nbits) { return -( 1 << (nbits-1) ) <= x && x < ( 1 << (nbits-1) ); } + static bool is_simm32(int32_t x) { return true; } +#endif // LP64 + + // Generic instructions + // Does 32bit or 64bit as needed for the platform. In some sense these + // belong in macro assembler but there is no need for both varieties to exist + + void lea(Register dst, Address src); + + void mov(Register dst, Register src); + + void pusha(); + void popa(); + + void pushf(); + void popf(); + + void push(int32_t imm32); + + void push(Register src); + + void pop(Register dst); + + // These are dummies to prevent surprise implicit conversions to Register + void push(void* v); + void pop(void* v); + + + // These do register sized moves/scans + void rep_mov(); + void rep_set(); + void repne_scan(); +#ifdef _LP64 + void repne_scanl(); +#endif + + // Vanilla instructions in lexical order + + void adcl(Register dst, int32_t imm32); + void adcl(Register dst, Address src); + void adcl(Register dst, Register src); + + void adcq(Register dst, int32_t imm32); + void adcq(Register dst, Address src); + void adcq(Register dst, Register src); + + + void addl(Address dst, int32_t imm32); + void addl(Address dst, Register src); + void addl(Register dst, int32_t imm32); + void addl(Register dst, Address src); + void addl(Register dst, Register src); + + void addq(Address dst, int32_t imm32); + void addq(Address dst, Register src); + void addq(Register dst, int32_t imm32); + void addq(Register dst, Address src); + void addq(Register dst, Register src); + + + void addr_nop_4(); + void addr_nop_5(); + void addr_nop_7(); + void addr_nop_8(); + + // Add Scalar Double-Precision Floating-Point Values + void addsd(XMMRegister dst, Address src); + void addsd(XMMRegister dst, XMMRegister src); + + // Add Scalar Single-Precision Floating-Point Values + void addss(XMMRegister dst, Address src); + void addss(XMMRegister dst, XMMRegister src); + + void andl(Register dst, int32_t imm32); + void andl(Register dst, Address src); + void andl(Register dst, Register src); + + void andq(Register dst, int32_t imm32); + void andq(Register dst, Address src); + void andq(Register dst, Register src); + + + // Bitwise Logical AND of Packed Double-Precision Floating-Point Values + void andpd(XMMRegister dst, Address src); + void andpd(XMMRegister dst, XMMRegister src); + + void bswapl(Register reg); + + void bswapq(Register reg); + + void call(Label& L, relocInfo::relocType rtype); + void call(Register reg); // push pc; pc <- reg + void call(Address adr); // push pc; pc <- adr + + void cdql(); + + void cdqq(); + + void cld() { emit_byte(0xfc); } + + void clflush(Address adr); + + void cmovl(Condition cc, Register dst, Register src); + void cmovl(Condition cc, Register dst, Address src); + + void cmovq(Condition cc, Register dst, Register src); + void cmovq(Condition cc, Register dst, Address src); + + + void cmpb(Address dst, int imm8); + + void cmpl(Address dst, int32_t imm32); + + void cmpl(Register dst, int32_t imm32); + void cmpl(Register dst, Register src); + void cmpl(Register dst, Address src); + + void cmpq(Address dst, int32_t imm32); + void cmpq(Address dst, Register src); + + void cmpq(Register dst, int32_t imm32); + void cmpq(Register dst, Register src); + void cmpq(Register dst, Address src); + + // these are dummies used to catch attempting to convert NULL to Register + void cmpl(Register dst, void* junk); // dummy + void cmpq(Register dst, void* junk); // dummy + + void cmpw(Address dst, int imm16); + + void cmpxchg8 (Address adr); + + void cmpxchgl(Register reg, Address adr); + + void cmpxchgq(Register reg, Address adr); + + // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS + void comisd(XMMRegister dst, Address src); + + // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS + void comiss(XMMRegister dst, Address src); + + // Identify processor type and features + void cpuid() { + emit_byte(0x0F); + emit_byte(0xA2); + } + + // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value + void cvtsd2ss(XMMRegister dst, XMMRegister src); + + // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value + void cvtsi2sdl(XMMRegister dst, Register src); + void cvtsi2sdq(XMMRegister dst, Register src); + + // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value + void cvtsi2ssl(XMMRegister dst, Register src); + void cvtsi2ssq(XMMRegister dst, Register src); + + // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value + void cvtdq2pd(XMMRegister dst, XMMRegister src); + + // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value + void cvtdq2ps(XMMRegister dst, XMMRegister src); + + // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value + void cvtss2sd(XMMRegister dst, XMMRegister src); + + // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer + void cvttsd2sil(Register dst, Address src); + void cvttsd2sil(Register dst, XMMRegister src); + void cvttsd2siq(Register dst, XMMRegister src); + + // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer + void cvttss2sil(Register dst, XMMRegister src); + void cvttss2siq(Register dst, XMMRegister src); + + // Divide Scalar Double-Precision Floating-Point Values + void divsd(XMMRegister dst, Address src); + void divsd(XMMRegister dst, XMMRegister src); + + // Divide Scalar Single-Precision Floating-Point Values + void divss(XMMRegister dst, Address src); + void divss(XMMRegister dst, XMMRegister src); + + void emms(); + + void fabs(); + + void fadd(int i); + + void fadd_d(Address src); + void fadd_s(Address src); + + // "Alternate" versions of x87 instructions place result down in FPU + // stack instead of on TOS + + void fadda(int i); // "alternate" fadd + void faddp(int i = 1); + + void fchs(); + + void fcom(int i); + + void fcomp(int i = 1); + void fcomp_d(Address src); + void fcomp_s(Address src); + + void fcompp(); + + void fcos(); + + void fdecstp(); + + void fdiv(int i); + void fdiv_d(Address src); + void fdivr_s(Address src); + void fdiva(int i); // "alternate" fdiv + void fdivp(int i = 1); + + void fdivr(int i); + void fdivr_d(Address src); + void fdiv_s(Address src); + + void fdivra(int i); // "alternate" reversed fdiv + + void fdivrp(int i = 1); + + void ffree(int i = 0); + + void fild_d(Address adr); + void fild_s(Address adr); + + void fincstp(); + + void finit(); + + void fist_s (Address adr); + void fistp_d(Address adr); + void fistp_s(Address adr); + + void fld1(); + + void fld_d(Address adr); + void fld_s(Address adr); + void fld_s(int index); + void fld_x(Address adr); // extended-precision (80-bit) format + + void fldcw(Address src); + + void fldenv(Address src); + + void fldlg2(); + + void fldln2(); + + void fldz(); + + void flog(); + void flog10(); + + void fmul(int i); + + void fmul_d(Address src); + void fmul_s(Address src); + + void fmula(int i); // "alternate" fmul + + void fmulp(int i = 1); + + void fnsave(Address dst); + + void fnstcw(Address src); + + void fnstsw_ax(); + + void fprem(); + void fprem1(); + + void frstor(Address src); + + void fsin(); + + void fsqrt(); + + void fst_d(Address adr); + void fst_s(Address adr); + + void fstp_d(Address adr); + void fstp_d(int index); + void fstp_s(Address adr); + void fstp_x(Address adr); // extended-precision (80-bit) format + + void fsub(int i); + void fsub_d(Address src); + void fsub_s(Address src); + + void fsuba(int i); // "alternate" fsub + + void fsubp(int i = 1); + + void fsubr(int i); + void fsubr_d(Address src); + void fsubr_s(Address src); + + void fsubra(int i); // "alternate" reversed fsub + + void fsubrp(int i = 1); + + void ftan(); + + void ftst(); + + void fucomi(int i = 1); + void fucomip(int i = 1); + + void fwait(); + + void fxch(int i = 1); + + void fxrstor(Address src); + + void fxsave(Address dst); + + void fyl2x(); + + void hlt(); + + void idivl(Register src); + + void idivq(Register src); + + void imull(Register dst, Register src); + void imull(Register dst, Register src, int value); + + void imulq(Register dst, Register src); + void imulq(Register dst, Register src, int value); + + + // jcc is the generic conditional branch generator to run- + // time routines, jcc is used for branches to labels. jcc + // takes a branch opcode (cc) and a label (L) and generates + // either a backward branch or a forward branch and links it + // to the label fixup chain. Usage: + // + // Label L; // unbound label + // jcc(cc, L); // forward branch to unbound label + // bind(L); // bind label to the current pc + // jcc(cc, L); // backward branch to bound label + // bind(L); // illegal: a label may be bound only once + // + // Note: The same Label can be used for forward and backward branches + // but it may be bound only once. + + void jcc(Condition cc, Label& L, + relocInfo::relocType rtype = relocInfo::none); + + // Conditional jump to a 8-bit offset to L. + // WARNING: be very careful using this for forward jumps. If the label is + // not bound within an 8-bit offset of this instruction, a run-time error + // will occur. + void jccb(Condition cc, Label& L); + + void jmp(Address entry); // pc <- entry + + // Label operations & relative jumps (PPUM Appendix D) + void jmp(Label& L, relocInfo::relocType rtype = relocInfo::none); // unconditional jump to L + + void jmp(Register entry); // pc <- entry + + // Unconditional 8-bit offset jump to L. + // WARNING: be very careful using this for forward jumps. If the label is + // not bound within an 8-bit offset of this instruction, a run-time error + // will occur. + void jmpb(Label& L); + + void ldmxcsr( Address src ); + + void leal(Register dst, Address src); + + void leaq(Register dst, Address src); + + void lfence() { + emit_byte(0x0F); + emit_byte(0xAE); + emit_byte(0xE8); + } + + void lock(); + + enum Membar_mask_bits { + StoreStore = 1 << 3, + LoadStore = 1 << 2, + StoreLoad = 1 << 1, + LoadLoad = 1 << 0 + }; + + // Serializes memory. + void membar(Membar_mask_bits order_constraint) { + // We only have to handle StoreLoad and LoadLoad + if (order_constraint & StoreLoad) { + // MFENCE subsumes LFENCE + mfence(); + } /* [jk] not needed currently: else if (order_constraint & LoadLoad) { + lfence(); + } */ + } + + void mfence(); + + // Moves + + void mov64(Register dst, int64_t imm64); + + void movb(Address dst, Register src); + void movb(Address dst, int imm8); + void movb(Register dst, Address src); + + void movdl(XMMRegister dst, Register src); + void movdl(Register dst, XMMRegister src); + + // Move Double Quadword + void movdq(XMMRegister dst, Register src); + void movdq(Register dst, XMMRegister src); + + // Move Aligned Double Quadword + void movdqa(Address dst, XMMRegister src); + void movdqa(XMMRegister dst, Address src); + void movdqa(XMMRegister dst, XMMRegister src); + + // Move Unaligned Double Quadword + void movdqu(Address dst, XMMRegister src); + void movdqu(XMMRegister dst, Address src); + void movdqu(XMMRegister dst, XMMRegister src); + + void movl(Register dst, int32_t imm32); + void movl(Address dst, int32_t imm32); + void movl(Register dst, Register src); + void movl(Register dst, Address src); + void movl(Address dst, Register src); + + // These dummies prevent using movl from converting a zero (like NULL) into Register + // by giving the compiler two choices it can't resolve + + void movl(Address dst, void* junk); + void movl(Register dst, void* junk); + +#ifdef _LP64 + void movq(Register dst, Register src); + void movq(Register dst, Address src); + void movq(Address dst, Register src); +#endif + + void movq(Address dst, MMXRegister src ); + void movq(MMXRegister dst, Address src ); + +#ifdef _LP64 + // These dummies prevent using movq from converting a zero (like NULL) into Register + // by giving the compiler two choices it can't resolve + + void movq(Address dst, void* dummy); + void movq(Register dst, void* dummy); +#endif + + // Move Quadword + void movq(Address dst, XMMRegister src); + void movq(XMMRegister dst, Address src); + + void movsbl(Register dst, Address src); + void movsbl(Register dst, Register src); + +#ifdef _LP64 + // Move signed 32bit immediate to 64bit extending sign + void movslq(Address dst, int32_t imm64); + void movslq(Register dst, int32_t imm64); + + void movslq(Register dst, Address src); + void movslq(Register dst, Register src); + void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous +#endif + + void movswl(Register dst, Address src); + void movswl(Register dst, Register src); + + void movw(Address dst, int imm16); + void movw(Register dst, Address src); + void movw(Address dst, Register src); + + void movzbl(Register dst, Address src); + void movzbl(Register dst, Register src); + + void movzwl(Register dst, Address src); + void movzwl(Register dst, Register src); + + void mull(Address src); + void mull(Register src); + + // Multiply Scalar Double-Precision Floating-Point Values + void mulsd(XMMRegister dst, Address src); + void mulsd(XMMRegister dst, XMMRegister src); + + // Multiply Scalar Single-Precision Floating-Point Values + void mulss(XMMRegister dst, Address src); + void mulss(XMMRegister dst, XMMRegister src); + + void negl(Register dst); + +#ifdef _LP64 + void negq(Register dst); +#endif + + void nop(int i = 1); + + void notl(Register dst); + +#ifdef _LP64 + void notq(Register dst); +#endif + + void orl(Address dst, int32_t imm32); + void orl(Register dst, int32_t imm32); + void orl(Register dst, Address src); + void orl(Register dst, Register src); + + void orq(Address dst, int32_t imm32); + void orq(Register dst, int32_t imm32); + void orq(Register dst, Address src); + void orq(Register dst, Register src); + + void popl(Address dst); + +#ifdef _LP64 + void popq(Address dst); +#endif + + // Prefetches (SSE, SSE2, 3DNOW only) + + void prefetchnta(Address src); + void prefetchr(Address src); + void prefetcht0(Address src); + void prefetcht1(Address src); + void prefetcht2(Address src); + void prefetchw(Address src); + + // Shuffle Packed Doublewords + void pshufd(XMMRegister dst, XMMRegister src, int mode); + void pshufd(XMMRegister dst, Address src, int mode); + + // Shuffle Packed Low Words + void pshuflw(XMMRegister dst, XMMRegister src, int mode); + void pshuflw(XMMRegister dst, Address src, int mode); + + // Shift Right Logical Quadword Immediate + void psrlq(XMMRegister dst, int shift); + + // Interleave Low Bytes + void punpcklbw(XMMRegister dst, XMMRegister src); + + void pushl(Address src); + + void pushq(Address src); + + // Xor Packed Byte Integer Values + void pxor(XMMRegister dst, Address src); + void pxor(XMMRegister dst, XMMRegister src); + + void rcll(Register dst, int imm8); + + void rclq(Register dst, int imm8); + + void ret(int imm16); + + void sahf(); + + void sarl(Register dst, int imm8); + void sarl(Register dst); + + void sarq(Register dst, int imm8); + void sarq(Register dst); + + void sbbl(Address dst, int32_t imm32); + void sbbl(Register dst, int32_t imm32); + void sbbl(Register dst, Address src); + void sbbl(Register dst, Register src); + + void sbbq(Address dst, int32_t imm32); + void sbbq(Register dst, int32_t imm32); + void sbbq(Register dst, Address src); + void sbbq(Register dst, Register src); + + void setb(Condition cc, Register dst); + + void shldl(Register dst, Register src); + + void shll(Register dst, int imm8); + void shll(Register dst); + + void shlq(Register dst, int imm8); + void shlq(Register dst); + + void shrdl(Register dst, Register src); + + void shrl(Register dst, int imm8); + void shrl(Register dst); + + void shrq(Register dst, int imm8); + void shrq(Register dst); + + void smovl(); // QQQ generic? + + // Compute Square Root of Scalar Double-Precision Floating-Point Value + void sqrtsd(XMMRegister dst, Address src); + void sqrtsd(XMMRegister dst, XMMRegister src); + + void std() { emit_byte(0xfd); } + + void stmxcsr( Address dst ); + + void subl(Address dst, int32_t imm32); + void subl(Address dst, Register src); + void subl(Register dst, int32_t imm32); + void subl(Register dst, Address src); + void subl(Register dst, Register src); + + void subq(Address dst, int32_t imm32); + void subq(Address dst, Register src); + void subq(Register dst, int32_t imm32); + void subq(Register dst, Address src); + void subq(Register dst, Register src); + + + // Subtract Scalar Double-Precision Floating-Point Values + void subsd(XMMRegister dst, Address src); + void subsd(XMMRegister dst, XMMRegister src); + + // Subtract Scalar Single-Precision Floating-Point Values + void subss(XMMRegister dst, Address src); + void subss(XMMRegister dst, XMMRegister src); + + void testb(Register dst, int imm8); + + void testl(Register dst, int32_t imm32); + void testl(Register dst, Register src); + void testl(Register dst, Address src); + + void testq(Register dst, int32_t imm32); + void testq(Register dst, Register src); + + + // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS + void ucomisd(XMMRegister dst, Address src); + void ucomisd(XMMRegister dst, XMMRegister src); + + // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS + void ucomiss(XMMRegister dst, Address src); + void ucomiss(XMMRegister dst, XMMRegister src); + + void xaddl(Address dst, Register src); + + void xaddq(Address dst, Register src); + + void xchgl(Register reg, Address adr); + void xchgl(Register dst, Register src); + + void xchgq(Register reg, Address adr); + void xchgq(Register dst, Register src); + + void xorl(Register dst, int32_t imm32); + void xorl(Register dst, Address src); + void xorl(Register dst, Register src); + + void xorq(Register dst, Address src); + void xorq(Register dst, Register src); + + // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values + void xorpd(XMMRegister dst, Address src); + void xorpd(XMMRegister dst, XMMRegister src); + + // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values + void xorps(XMMRegister dst, Address src); + void xorps(XMMRegister dst, XMMRegister src); + + void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 +}; + + +// MacroAssembler extends Assembler by frequently used macros. +// +// Instructions for which a 'better' code sequence exists depending +// on arguments should also go in here. + +class MacroAssembler: public Assembler { + friend class LIR_Assembler; + friend class Runtime1; // as_Address() + protected: + + Address as_Address(AddressLiteral adr); + Address as_Address(ArrayAddress adr); + + // Support for VM calls + // + // This is the base routine called by the different versions of call_VM_leaf. The interpreter + // may customize this version by overriding it for its purposes (e.g., to save/restore + // additional registers when doing a VM call). +#ifdef CC_INTERP + // c++ interpreter never wants to use interp_masm version of call_VM + #define VIRTUAL +#else + #define VIRTUAL virtual +#endif + + VIRTUAL void call_VM_leaf_base( + address entry_point, // the entry point + int number_of_arguments // the number of arguments to pop after the call + ); + + // This is the base routine called by the different versions of call_VM. The interpreter + // may customize this version by overriding it for its purposes (e.g., to save/restore + // additional registers when doing a VM call). + // + // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base + // returns the register which contains the thread upon return. If a thread register has been + // specified, the return value will correspond to that register. If no last_java_sp is specified + // (noreg) than rsp will be used instead. + VIRTUAL void call_VM_base( // returns the register containing the thread upon return + Register oop_result, // where an oop-result ends up if any; use noreg otherwise + Register java_thread, // the thread if computed before ; use noreg otherwise + Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise + address entry_point, // the entry point + int number_of_arguments, // the number of arguments (w/o thread) to pop after the call + bool check_exceptions // whether to check for pending exceptions after return + ); + + // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. + // The implementation is only non-empty for the InterpreterMacroAssembler, + // as only the interpreter handles PopFrame and ForceEarlyReturn requests. + virtual void check_and_handle_popframe(Register java_thread); + virtual void check_and_handle_earlyret(Register java_thread); + + void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); + + // helpers for FPU flag access + // tmp is a temporary register, if none is available use noreg + void save_rax (Register tmp); + void restore_rax(Register tmp); + + public: + MacroAssembler(CodeBuffer* code) : Assembler(code) {} + + // Support for NULL-checks + // + // Generates code that causes a NULL OS exception if the content of reg is NULL. + // If the accessed location is M[reg + offset] and the offset is known, provide the + // offset. No explicit code generation is needed if the offset is within a certain + // range (0 <= offset <= page_size). + + void null_check(Register reg, int offset = -1); + static bool needs_explicit_null_check(intptr_t offset); + + // Required platform-specific helpers for Label::patch_instructions. + // They _shadow_ the declarations in AbstractAssembler, which are undefined. + void pd_patch_instruction(address branch, address target); +#ifndef PRODUCT + static void pd_print_patched_instruction(address branch); +#endif + + // The following 4 methods return the offset of the appropriate move instruction + + // Support for fast byte/word loading with zero extension (depending on particular CPU) + int load_unsigned_byte(Register dst, Address src); + int load_unsigned_word(Register dst, Address src); + + // Support for fast byte/word loading with sign extension (depending on particular CPU) + int load_signed_byte(Register dst, Address src); + int load_signed_word(Register dst, Address src); + + // Support for sign-extension (hi:lo = extend_sign(lo)) + void extend_sign(Register hi, Register lo); + + // Support for inc/dec with optimal instruction selection depending on value + + void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } + void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } + + void decrementl(Address dst, int value = 1); + void decrementl(Register reg, int value = 1); + + void decrementq(Register reg, int value = 1); + void decrementq(Address dst, int value = 1); + + void incrementl(Address dst, int value = 1); + void incrementl(Register reg, int value = 1); + + void incrementq(Register reg, int value = 1); + void incrementq(Address dst, int value = 1); + + + // Support optimal SSE move instructions. + void movflt(XMMRegister dst, XMMRegister src) { + if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } + else { movss (dst, src); return; } + } + void movflt(XMMRegister dst, Address src) { movss(dst, src); } + void movflt(XMMRegister dst, AddressLiteral src); + void movflt(Address dst, XMMRegister src) { movss(dst, src); } + + void movdbl(XMMRegister dst, XMMRegister src) { + if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } + else { movsd (dst, src); return; } + } + + void movdbl(XMMRegister dst, AddressLiteral src); + + void movdbl(XMMRegister dst, Address src) { + if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } + else { movlpd(dst, src); return; } + } + void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } + + void incrementl(AddressLiteral dst); + void incrementl(ArrayAddress dst); + + // Alignment + void align(int modulus); + + // Misc + void fat_nop(); // 5 byte nop + + // Stack frame creation/removal + void enter(); + void leave(); + + // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) + // The pointer will be loaded into the thread register. + void get_thread(Register thread); + + + // Support for VM calls + // + // It is imperative that all calls into the VM are handled via the call_VM macros. + // They make sure that the stack linkage is setup correctly. call_VM's correspond + // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. + + + void call_VM(Register oop_result, + address entry_point, + bool check_exceptions = true); + void call_VM(Register oop_result, + address entry_point, + Register arg_1, + bool check_exceptions = true); + void call_VM(Register oop_result, + address entry_point, + Register arg_1, Register arg_2, + bool check_exceptions = true); + void call_VM(Register oop_result, + address entry_point, + Register arg_1, Register arg_2, Register arg_3, + bool check_exceptions = true); + + // Overloadings with last_Java_sp + void call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + int number_of_arguments = 0, + bool check_exceptions = true); + void call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + Register arg_1, bool + check_exceptions = true); + void call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + Register arg_1, Register arg_2, + bool check_exceptions = true); + void call_VM(Register oop_result, + Register last_java_sp, + address entry_point, + Register arg_1, Register arg_2, Register arg_3, + bool check_exceptions = true); + + void call_VM_leaf(address entry_point, + int number_of_arguments = 0); + void call_VM_leaf(address entry_point, + Register arg_1); + void call_VM_leaf(address entry_point, + Register arg_1, Register arg_2); + void call_VM_leaf(address entry_point, + Register arg_1, Register arg_2, Register arg_3); + + // last Java Frame (fills frame anchor) + void set_last_Java_frame(Register thread, + Register last_java_sp, + Register last_java_fp, + address last_java_pc); + + // thread in the default location (r15_thread on 64bit) + void set_last_Java_frame(Register last_java_sp, + Register last_java_fp, + address last_java_pc); + + void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); + + // thread in the default location (r15_thread on 64bit) + void reset_last_Java_frame(bool clear_fp, bool clear_pc); + + // Stores + void store_check(Register obj); // store check for obj - register is destroyed afterwards + void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) + + void g1_write_barrier_pre(Register obj, +#ifndef _LP64 + Register thread, +#endif + Register tmp, + Register tmp2, + bool tosca_live); + void g1_write_barrier_post(Register store_addr, + Register new_val, +#ifndef _LP64 + Register thread, +#endif + Register tmp, + Register tmp2); + + + // split store_check(Register obj) to enhance instruction interleaving + void store_check_part_1(Register obj); + void store_check_part_2(Register obj); + + // C 'boolean' to Java boolean: x == 0 ? 0 : 1 + void c2bool(Register x); + + // C++ bool manipulation + + void movbool(Register dst, Address src); + void movbool(Address dst, bool boolconst); + void movbool(Address dst, Register src); + void testbool(Register dst); + + // oop manipulations + void load_klass(Register dst, Register src); + void store_klass(Register dst, Register src); + + void load_prototype_header(Register dst, Register src); + +#ifdef _LP64 + void store_klass_gap(Register dst, Register src); + + void load_heap_oop(Register dst, Address src); + void store_heap_oop(Address dst, Register src); + void encode_heap_oop(Register r); + void decode_heap_oop(Register r); + void encode_heap_oop_not_null(Register r); + void decode_heap_oop_not_null(Register r); + void encode_heap_oop_not_null(Register dst, Register src); + void decode_heap_oop_not_null(Register dst, Register src); + + void set_narrow_oop(Register dst, jobject obj); + + // if heap base register is used - reinit it with the correct value + void reinit_heapbase(); +#endif // _LP64 + + // Int division/remainder for Java + // (as idivl, but checks for special case as described in JVM spec.) + // returns idivl instruction offset for implicit exception handling + int corrected_idivl(Register reg); + + // Long division/remainder for Java + // (as idivq, but checks for special case as described in JVM spec.) + // returns idivq instruction offset for implicit exception handling + int corrected_idivq(Register reg); + + void int3(); + + // Long operation macros for a 32bit cpu + // Long negation for Java + void lneg(Register hi, Register lo); + + // Long multiplication for Java + // (destroys contents of eax, ebx, ecx and edx) + void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y + + // Long shifts for Java + // (semantics as described in JVM spec.) + void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) + void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) + + // Long compare for Java + // (semantics as described in JVM spec.) + void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) + + + // misc + + // Sign extension + void sign_extend_short(Register reg); + void sign_extend_byte(Register reg); + + // Division by power of 2, rounding towards 0 + void division_with_shift(Register reg, int shift_value); + + // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: + // + // CF (corresponds to C0) if x < y + // PF (corresponds to C2) if unordered + // ZF (corresponds to C3) if x = y + // + // The arguments are in reversed order on the stack (i.e., top of stack is first argument). + // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) + void fcmp(Register tmp); + // Variant of the above which allows y to be further down the stack + // and which only pops x and y if specified. If pop_right is + // specified then pop_left must also be specified. + void fcmp(Register tmp, int index, bool pop_left, bool pop_right); + + // Floating-point comparison for Java + // Compares the top-most stack entries on the FPU stack and stores the result in dst. + // The arguments are in reversed order on the stack (i.e., top of stack is first argument). + // (semantics as described in JVM spec.) + void fcmp2int(Register dst, bool unordered_is_less); + // Variant of the above which allows y to be further down the stack + // and which only pops x and y if specified. If pop_right is + // specified then pop_left must also be specified. + void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); + + // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) + // tmp is a temporary register, if none is available use noreg + void fremr(Register tmp); + + + // same as fcmp2int, but using SSE2 + void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); + void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); + + // Inlined sin/cos generator for Java; must not use CPU instruction + // directly on Intel as it does not have high enough precision + // outside of the range [-pi/4, pi/4]. Extra argument indicate the + // number of FPU stack slots in use; all but the topmost will + // require saving if a slow case is necessary. Assumes argument is + // on FP TOS; result is on FP TOS. No cpu registers are changed by + // this code. + void trigfunc(char trig, int num_fpu_regs_in_use = 1); + + // branch to L if FPU flag C2 is set/not set + // tmp is a temporary register, if none is available use noreg + void jC2 (Register tmp, Label& L); + void jnC2(Register tmp, Label& L); + + // Pop ST (ffree & fincstp combined) + void fpop(); + + // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack + void push_fTOS(); + + // pops double TOS element from CPU stack and pushes on FPU stack + void pop_fTOS(); + + void empty_FPU_stack(); + + void push_IU_state(); + void pop_IU_state(); + + void push_FPU_state(); + void pop_FPU_state(); + + void push_CPU_state(); + void pop_CPU_state(); + + // Round up to a power of two + void round_to(Register reg, int modulus); + + // Callee saved registers handling + void push_callee_saved_registers(); + void pop_callee_saved_registers(); + + // allocation + void eden_allocate( + Register obj, // result: pointer to object after successful allocation + Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise + int con_size_in_bytes, // object size in bytes if known at compile time + Register t1, // temp register + Label& slow_case // continuation point if fast allocation fails + ); + void tlab_allocate( + Register obj, // result: pointer to object after successful allocation + Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise + int con_size_in_bytes, // object size in bytes if known at compile time + Register t1, // temp register + Register t2, // temp register + Label& slow_case // continuation point if fast allocation fails + ); + void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); + + //---- + void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 + + // Debugging + + // only if +VerifyOops + void verify_oop(Register reg, const char* s = "broken oop"); + void verify_oop_addr(Address addr, const char * s = "broken oop addr"); + + // only if +VerifyFPU + void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); + + // prints msg, dumps registers and stops execution + void stop(const char* msg); + + // prints msg and continues + void warn(const char* msg); + + static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); + static void debug64(char* msg, int64_t pc, int64_t regs[]); + + void os_breakpoint(); + + void untested() { stop("untested"); } + + void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, sizeof(b), "unimplemented: %s", what); stop(b); } + + void should_not_reach_here() { stop("should not reach here"); } + + void print_CPU_state(); + + // Stack overflow checking + void bang_stack_with_offset(int offset) { + // stack grows down, caller passes positive offset + assert(offset > 0, "must bang with negative offset"); + movl(Address(rsp, (-offset)), rax); + } + + // Writes to stack successive pages until offset reached to check for + // stack overflow + shadow pages. Also, clobbers tmp + void bang_stack_size(Register size, Register tmp); + + // Support for serializing memory accesses between threads + void serialize_memory(Register thread, Register tmp); + + void verify_tlab(); + + // Biased locking support + // lock_reg and obj_reg must be loaded up with the appropriate values. + // swap_reg must be rax, and is killed. + // tmp_reg is optional. If it is supplied (i.e., != noreg) it will + // be killed; if not supplied, push/pop will be used internally to + // allocate a temporary (inefficient, avoid if possible). + // Optional slow case is for implementations (interpreter and C1) which branch to + // slow case directly. Leaves condition codes set for C2's Fast_Lock node. + // Returns offset of first potentially-faulting instruction for null + // check info (currently consumed only by C1). If + // swap_reg_contains_mark is true then returns -1 as it is assumed + // the calling code has already passed any potential faults. + int biased_locking_enter(Register lock_reg, Register obj_reg, + Register swap_reg, Register tmp_reg, + bool swap_reg_contains_mark, + Label& done, Label* slow_case = NULL, + BiasedLockingCounters* counters = NULL); + void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); + + + Condition negate_condition(Condition cond); + + // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit + // operands. In general the names are modified to avoid hiding the instruction in Assembler + // so that we don't need to implement all the varieties in the Assembler with trivial wrappers + // here in MacroAssembler. The major exception to this rule is call + + // Arithmetics + + + void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } + void addptr(Address dst, Register src); + + void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } + void addptr(Register dst, int32_t src); + void addptr(Register dst, Register src); + + void andptr(Register dst, int32_t src); + void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } + + void cmp8(AddressLiteral src1, int imm); + + // renamed to drag out the casting of address to int32_t/intptr_t + void cmp32(Register src1, int32_t imm); + + void cmp32(AddressLiteral src1, int32_t imm); + // compare reg - mem, or reg - &mem + void cmp32(Register src1, AddressLiteral src2); + + void cmp32(Register src1, Address src2); + +#ifndef _LP64 + void cmpoop(Address dst, jobject obj); + void cmpoop(Register dst, jobject obj); +#endif // _LP64 + + // NOTE src2 must be the lval. This is NOT an mem-mem compare + void cmpptr(Address src1, AddressLiteral src2); + + void cmpptr(Register src1, AddressLiteral src2); + + void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } + void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } + // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } + + void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } + void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } + + // cmp64 to avoild hiding cmpq + void cmp64(Register src1, AddressLiteral src); + + void cmpxchgptr(Register reg, Address adr); + + void locked_cmpxchgptr(Register reg, AddressLiteral adr); + + + void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } + + + void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } + + void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } + + void shlptr(Register dst, int32_t shift); + void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } + + void shrptr(Register dst, int32_t shift); + void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } + + void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } + void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } + + void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } + + void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } + void subptr(Register dst, int32_t src); + void subptr(Register dst, Register src); + + + void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } + void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } + + void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } + void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } + + void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } + + + + // Helper functions for statistics gathering. + // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. + void cond_inc32(Condition cond, AddressLiteral counter_addr); + // Unconditional atomic increment. + void atomic_incl(AddressLiteral counter_addr); + + void lea(Register dst, AddressLiteral adr); + void lea(Address dst, AddressLiteral adr); + void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } + + void leal32(Register dst, Address src) { leal(dst, src); } + + void test32(Register src1, AddressLiteral src2); + + void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } + void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } + void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } + + void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } + void testptr(Register src1, Register src2); + + void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } + void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } + + // Calls + + void call(Label& L, relocInfo::relocType rtype); + void call(Register entry); + + // NOTE: this call tranfers to the effective address of entry NOT + // the address contained by entry. This is because this is more natural + // for jumps/calls. + void call(AddressLiteral entry); + + // Jumps + + // NOTE: these jumps tranfer to the effective address of dst NOT + // the address contained by dst. This is because this is more natural + // for jumps/calls. + void jump(AddressLiteral dst); + void jump_cc(Condition cc, AddressLiteral dst); + + // 32bit can do a case table jump in one instruction but we no longer allow the base + // to be installed in the Address class. This jump will tranfers to the address + // contained in the location described by entry (not the address of entry) + void jump(ArrayAddress entry); + + // Floating + + void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } + void andpd(XMMRegister dst, AddressLiteral src); + + void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } + void comiss(XMMRegister dst, AddressLiteral src); + + void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } + void comisd(XMMRegister dst, AddressLiteral src); + + void fldcw(Address src) { Assembler::fldcw(src); } + void fldcw(AddressLiteral src); + + void fld_s(int index) { Assembler::fld_s(index); } + void fld_s(Address src) { Assembler::fld_s(src); } + void fld_s(AddressLiteral src); + + void fld_d(Address src) { Assembler::fld_d(src); } + void fld_d(AddressLiteral src); + + void fld_x(Address src) { Assembler::fld_x(src); } + void fld_x(AddressLiteral src); + + void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } + void ldmxcsr(AddressLiteral src); + +private: + // these are private because users should be doing movflt/movdbl + + void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } + void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } + void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } + void movss(XMMRegister dst, AddressLiteral src); + + void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } + void movlpd(XMMRegister dst, AddressLiteral src); + +public: + + void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } + void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } + void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } + void movsd(XMMRegister dst, AddressLiteral src); + + void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } + void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } + void ucomiss(XMMRegister dst, AddressLiteral src); + + void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } + void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } + void ucomisd(XMMRegister dst, AddressLiteral src); + + // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values + void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); } + void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } + void xorpd(XMMRegister dst, AddressLiteral src); + + // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values + void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); } + void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } + void xorps(XMMRegister dst, AddressLiteral src); + + // Data + + void cmov(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); } + + void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); } + void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); } + + void movoop(Register dst, jobject obj); + void movoop(Address dst, jobject obj); + + void movptr(ArrayAddress dst, Register src); + // can this do an lea? + void movptr(Register dst, ArrayAddress src); + + void movptr(Register dst, Address src); + + void movptr(Register dst, AddressLiteral src); + + void movptr(Register dst, intptr_t src); + void movptr(Register dst, Register src); + void movptr(Address dst, intptr_t src); + + void movptr(Address dst, Register src); + +#ifdef _LP64 + // Generally the next two are only used for moving NULL + // Although there are situations in initializing the mark word where + // they could be used. They are dangerous. + + // They only exist on LP64 so that int32_t and intptr_t are not the same + // and we have ambiguous declarations. + + void movptr(Address dst, int32_t imm32); + void movptr(Register dst, int32_t imm32); +#endif // _LP64 + + // to avoid hiding movl + void mov32(AddressLiteral dst, Register src); + void mov32(Register dst, AddressLiteral src); + + // to avoid hiding movb + void movbyte(ArrayAddress dst, int src); + + // Can push value or effective address + void pushptr(AddressLiteral src); + + void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } + void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } + + void pushoop(jobject obj); + + // sign extend as need a l to ptr sized element + void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } + void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } + + +#undef VIRTUAL + +}; + +/** + * class SkipIfEqual: + * + * Instantiating this class will result in assembly code being output that will + * jump around any code emitted between the creation of the instance and it's + * automatic destruction at the end of a scope block, depending on the value of + * the flag passed to the constructor, which will be checked at run-time. + */ +class SkipIfEqual { + private: + MacroAssembler* _masm; + Label _label; + + public: + SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); + ~SkipIfEqual(); +}; + +#ifdef ASSERT +inline bool AbstractAssembler::pd_check_instruction_mark() { return true; } +#endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/cpu/x86/vm/assembler_x86.inline.hpp 2009-08-01 04:17:33.723089509 +0100 @@ -0,0 +1,87 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +inline void MacroAssembler::pd_patch_instruction(address branch, address target) { + unsigned char op = branch[0]; + assert(op == 0xE8 /* call */ || + op == 0xE9 /* jmp */ || + op == 0xEB /* short jmp */ || + (op & 0xF0) == 0x70 /* short jcc */ || + op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */, + "Invalid opcode at patch point"); + + if (op == 0xEB || (op & 0xF0) == 0x70) { + // short offset operators (jmp and jcc) + char* disp = (char*) &branch[1]; + int imm8 = target - (address) &disp[1]; + guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); + *disp = imm8; + } else { + int* disp = (int*) &branch[(op == 0x0F)? 2: 1]; + int imm32 = target - (address) &disp[1]; + *disp = imm32; + } +} + +#ifndef PRODUCT +inline void MacroAssembler::pd_print_patched_instruction(address branch) { + const char* s; + unsigned char op = branch[0]; + if (op == 0xE8) { + s = "call"; + } else if (op == 0xE9 || op == 0xEB) { + s = "jmp"; + } else if ((op & 0xF0) == 0x70) { + s = "jcc"; + } else if (op == 0x0F) { + s = "jcc"; + } else { + s = "????"; + } + tty->print("%s (unresolved)", s); +} +#endif // ndef PRODUCT + +#ifndef _LP64 +inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; } +inline int Assembler::prefixq_and_encode(int reg_enc) { return reg_enc; } + +inline int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { return dst_enc << 3 | src_enc; } +inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { return dst_enc << 3 | src_enc; } + +inline void Assembler::prefix(Register reg) {} +inline void Assembler::prefix(Address adr) {} +inline void Assembler::prefixq(Address adr) {} + +inline void Assembler::prefix(Address adr, Register reg, bool byteinst) {} +inline void Assembler::prefixq(Address adr, Register reg) {} + +inline void Assembler::prefix(Address adr, XMMRegister reg) {} +#else +inline void Assembler::emit_long64(jlong x) { + *(jlong*) _code_pos = x; + _code_pos += sizeof(jlong); + code_section()->set_end(_code_pos); +} +#endif // _LP64 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os/linux/vm/dtraceJSDT_linux.cpp 2009-08-01 04:17:34.148488920 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_dtraceJSDT_linux.cpp.incl" + +int DTraceJSDT::pd_activate( + void* baseAddress, jstring module, + jint providers_count, JVM_DTraceProvider* providers) { + return -1; +} + +void DTraceJSDT::pd_dispose(int handle) { +} + +jboolean DTraceJSDT::pd_is_supported() { + return false; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os/solaris/vm/dtraceJSDT_solaris.cpp 2009-08-01 04:17:34.544094034 +0100 @@ -0,0 +1,685 @@ +/* + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_dtraceJSDT_solaris.cpp.incl" + +#ifdef HAVE_DTRACE_H + +#include +#include +#include +#include +#include + +static const char* devname = "/dev/dtrace/helper"; +static const char* olddevname = "/devices/pseudo/dtrace@0:helper"; + +static const char* string_sig = "uintptr_t"; +static const char* int_sig = "long"; +static const char* long_sig = "long long"; + +static void printDOFHelper(dof_helper_t* helper); + +static int dofhelper_open() { + int fd; + if ((fd = open64(devname, O_RDWR)) < 0) { + // Optimize next calls + devname = olddevname; + if ((fd = open64(devname, O_RDWR)) < 0) { + return -1; + } + } + return fd; +} + +static jint dof_register(jstring module, uint8_t* dof, void* modaddr) { + int probe; + dof_helper_t dh; + int fd; + + memset(&dh, 0, sizeof(dh)); + + char* module_name = java_lang_String::as_utf8_string( + JNIHandles::resolve_non_null(module)); + jio_snprintf(dh.dofhp_mod, sizeof(dh.dofhp_mod), "%s", module_name); + dh.dofhp_dof = (uint64_t)dof; + dh.dofhp_addr = (uint64_t)modaddr; + + fd = dofhelper_open(); + if (fd < 0) + return -1; + probe = ioctl(fd, DTRACEHIOC_ADDDOF, &dh); + close(fd); + if (PrintDTraceDOF) { + printDOFHelper(&dh); + tty->print_cr("DOF helper id = %d", probe); + } + return probe; +} + +int DTraceJSDT::pd_activate( + void* moduleBaseAddress, jstring module, + jint providers_count, JVM_DTraceProvider* providers) { + + // We need sections: + // (1) STRTAB + // ( + // (2) PROVIDER + // (3) PROBES + // (4) PROBOFFS + // (5) PROBARGS + // ) * Number of Providers + + // Type of sections we create + enum { + STRTAB = 0, + PROVIDERS = 1, + PROBES = 2, + PROBE_OFFSETS = 3, + ARG_OFFSETS = 4, + NUM_SECTIONS = 5 + }; + + static int alignment_for[NUM_SECTIONS] = { 1, 4, 8, 4, 1 }; + + ResourceMark rm; + + uint32_t num_sections = 1 + 4 * providers_count; + uint32_t offset = sizeof(dof_hdr_t) + (num_sections * sizeof(dof_sec_t)); + uint32_t* secoffs = NEW_RESOURCE_ARRAY(uint32_t, num_sections); + uint32_t* secsize = NEW_RESOURCE_ARRAY(uint32_t, num_sections); + + // Store offsets of all strings here in such order: + // zero-string (always 0) + // provider1-name + // probe1-function + // probe1-name + // arg-1 + // arg-2 + // ... + // probe2-function + // probe2-name + // arg-1 + // arg-2 + // provider2-name + // ... + + uint32_t strcount = 0; + // Count the number of strings we'll need + for(int prvc = 0; prvc < providers_count; ++prvc) { + JVM_DTraceProvider* provider = &providers[prvc]; + // Provider name + ++strcount; + for(int prbc = 0; prbc < provider->probe_count; ++prbc) { + JVM_DTraceProbe* p = &(provider->probes[prbc]); + symbolOop sig = JNIHandles::resolve_jmethod_id(p->method)->signature(); + // function + name + one per argument + strcount += 2 + ArgumentCount(sig).size(); + } + } + + // Create place for string offsets + uint32_t* stroffs = NEW_RESOURCE_ARRAY(uint32_t, strcount + 1); + uint32_t string_index = 0; + uint32_t curstr = 0; + + // First we need an empty string: "" + stroffs[curstr++] = string_index; + string_index += strlen("") + 1; + + for(int prvc = 0; prvc < providers_count; ++prvc) { + JVM_DTraceProvider* provider = &providers[prvc]; + char* provider_name = java_lang_String::as_utf8_string( + JNIHandles::resolve_non_null(provider->name)); + stroffs[curstr++] = string_index; + string_index += strlen(provider_name) + 1; + + // All probes + for(int prbc = 0; prbc < provider->probe_count; ++prbc) { + JVM_DTraceProbe* p = &(provider->probes[prbc]); + + char* function = java_lang_String::as_utf8_string( + JNIHandles::resolve_non_null(p->function)); + stroffs[curstr++] = string_index; + string_index += strlen(function) + 1; + + char* name = java_lang_String::as_utf8_string( + JNIHandles::resolve_non_null(p->name)); + stroffs[curstr++] = string_index; + string_index += strlen(name) + 1; + + symbolOop sig = JNIHandles::resolve_jmethod_id(p->method)->signature(); + SignatureStream ss(sig); + for ( ; !ss.at_return_type(); ss.next()) { + BasicType bt = ss.type(); + const char* t = NULL; + if (bt == T_OBJECT && + ss.as_symbol_or_null() == vmSymbols::java_lang_String()) { + t = string_sig; + } else if (bt == T_LONG) { + t = long_sig; + } else { + t = int_sig; + } + stroffs[curstr++] = string_index; + string_index += strlen(t) + 1; + } + } + } + secoffs[STRTAB] = offset; + secsize[STRTAB] = string_index; + offset += string_index; + + // Calculate the size of the rest + for(int prvc = 0; prvc < providers_count; ++prvc) { + JVM_DTraceProvider* provider = &providers[prvc]; + size_t provider_sec = PROVIDERS + prvc * 4; + size_t probe_sec = PROBES + prvc * 4; + size_t probeoffs_sec = PROBE_OFFSETS + prvc * 4; + size_t argoffs_sec = ARG_OFFSETS + prvc * 4; + + // Allocate space for the provider data struction + secoffs[provider_sec] = align_size_up(offset, alignment_for[PROVIDERS]); + secsize[provider_sec] = sizeof(dof_provider_t); + offset = secoffs[provider_sec] + secsize[provider_sec]; + + // Allocate space for all the probes + secoffs[probe_sec] = align_size_up(offset, alignment_for[PROBES]); + secsize[probe_sec] = sizeof(dof_probe_t) * provider->probe_count; + offset = secoffs[probe_sec] + secsize[probe_sec]; + + // Allocate space for the probe offsets + secoffs[probeoffs_sec] = align_size_up(offset, alignment_for[PROBE_OFFSETS]); + secsize[probeoffs_sec] = sizeof(uint32_t) * provider->probe_count; + offset = secoffs[probeoffs_sec] + secsize[probeoffs_sec]; + + // We need number of arguments argoffs + uint32_t argscount = 0; + for(int prbc = 0; prbc < provider->probe_count; ++prbc) { + JVM_DTraceProbe* p = &(provider->probes[prbc]); + symbolOop sig = JNIHandles::resolve_jmethod_id(p->method)->signature(); + argscount += ArgumentCount(sig).size(); + } + secoffs[argoffs_sec] = align_size_up(offset, alignment_for[ARG_OFFSETS]); + secsize[argoffs_sec] = sizeof(uint8_t) * argscount; + offset = secoffs[argoffs_sec] + secsize[argoffs_sec]; + } + + uint32_t size = offset; + + uint8_t* dof = NEW_RESOURCE_ARRAY(uint8_t, size); + if (!dof) { + return -1; + } + memset((void*)dof, 0, size); + + // Fill memory with proper values + dof_hdr_t* hdr = (dof_hdr_t*)dof; + hdr->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; + hdr->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; + hdr->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; + hdr->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; + hdr->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; // No variants + hdr->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; // No variants + hdr->dofh_ident[DOF_ID_VERSION] = DOF_VERSION_1; // No variants + hdr->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION_2; // No variants + // all other fields of ident to zero + + hdr->dofh_flags = 0; + hdr->dofh_hdrsize = sizeof(dof_hdr_t); + hdr->dofh_secsize = sizeof(dof_sec_t); + hdr->dofh_secnum = num_sections; + hdr->dofh_secoff = sizeof(dof_hdr_t); + hdr->dofh_loadsz = size; + hdr->dofh_filesz = size; + + // First section: STRTAB + dof_sec_t* sec = (dof_sec_t*)(dof + sizeof(dof_hdr_t)); + sec->dofs_type = DOF_SECT_STRTAB; + sec->dofs_align = alignment_for[STRTAB]; + sec->dofs_flags = DOF_SECF_LOAD; + sec->dofs_entsize = 0; + sec->dofs_offset = secoffs[STRTAB]; + sec->dofs_size = secsize[STRTAB]; + // Make data for this section + char* str = (char*)(dof + sec->dofs_offset); + + *str = 0; str += 1; // "" + + // Run through all strings again + for(int prvc = 0; prvc < providers_count; ++prvc) { + JVM_DTraceProvider* provider = &providers[prvc]; + char* provider_name = java_lang_String::as_utf8_string( + JNIHandles::resolve_non_null(provider->name)); + strcpy(str, provider_name); + str += strlen(provider_name) + 1; + + // All probes + for(int prbc = 0; prbc < provider->probe_count; ++prbc) { + JVM_DTraceProbe* p = &(provider->probes[prbc]); + + char* function = java_lang_String::as_utf8_string( + JNIHandles::resolve_non_null(p->function)); + strcpy(str, function); + str += strlen(str) + 1; + + char* name = java_lang_String::as_utf8_string( + JNIHandles::resolve_non_null(p->name)); + strcpy(str, name); + str += strlen(name) + 1; + + symbolOop sig = JNIHandles::resolve_jmethod_id(p->method)->signature(); + SignatureStream ss(sig); + for ( ; !ss.at_return_type(); ss.next()) { + BasicType bt = ss.type(); + const char* t; + if (bt == T_OBJECT && + ss.as_symbol_or_null() == vmSymbols::java_lang_String()) { + t = string_sig; + } else if (bt == T_LONG) { + t = long_sig; + } else { + t = int_sig; + } + strcpy(str, t); + str += strlen(t) + 1; + } + } + } + + curstr = 1; + for(int prvc = 0; prvc < providers_count; ++prvc) { + JVM_DTraceProvider* provider = &providers[prvc]; + size_t provider_sec = PROVIDERS + prvc * 4; + size_t probe_sec = PROBES + prvc * 4; + size_t probeoffs_sec = PROBE_OFFSETS + prvc * 4; + size_t argoffs_sec = ARG_OFFSETS + prvc * 4; + + // PROVIDER /////////////////////////////////////////////////////////////// + // Section header + sec = (dof_sec_t*) + (dof + sizeof(dof_hdr_t) + sizeof(dof_sec_t) * provider_sec); + sec->dofs_type = DOF_SECT_PROVIDER; + sec->dofs_align = alignment_for[PROVIDERS]; + sec->dofs_flags = DOF_SECF_LOAD; + sec->dofs_entsize = 0; + sec->dofs_offset = secoffs[provider_sec]; + sec->dofs_size = secsize[provider_sec]; + // Make provider decriiption + dof_provider_t* prv = (dof_provider_t*)(dof + sec->dofs_offset); + prv->dofpv_strtab = STRTAB; + prv->dofpv_probes = probe_sec; + prv->dofpv_prargs = argoffs_sec; + prv->dofpv_proffs = probeoffs_sec; + prv->dofpv_name = stroffs[curstr++]; // Index in string table + prv->dofpv_provattr = DOF_ATTR( + provider->providerAttributes.nameStability, + provider->providerAttributes.dataStability, + provider->providerAttributes.dependencyClass); + prv->dofpv_modattr = DOF_ATTR( + provider->moduleAttributes.nameStability, + provider->moduleAttributes.dataStability, + provider->moduleAttributes.dependencyClass); + prv->dofpv_funcattr = DOF_ATTR( + provider->functionAttributes.nameStability, + provider->functionAttributes.dataStability, + provider->functionAttributes.dependencyClass); + prv->dofpv_nameattr = DOF_ATTR( + provider->nameAttributes.nameStability, + provider->nameAttributes.dataStability, + provider->nameAttributes.dependencyClass); + prv->dofpv_argsattr = DOF_ATTR( + provider->argsAttributes.nameStability, + provider->argsAttributes.dataStability, + provider->argsAttributes.dependencyClass); + + // PROBES ///////////////////////////////////////////////////////////////// + // Section header + sec = (dof_sec_t*) + (dof + sizeof(dof_hdr_t) + sizeof(dof_sec_t) * probe_sec); + sec->dofs_type = DOF_SECT_PROBES; + sec->dofs_align = alignment_for[PROBES]; + sec->dofs_flags = DOF_SECF_LOAD; + sec->dofs_entsize = sizeof(dof_probe_t); + sec->dofs_offset = secoffs[probe_sec]; + sec->dofs_size = secsize[probe_sec]; + // Make probes descriptions + uint32_t argsoffs = 0; + for(int prbc = 0; prbc < provider->probe_count; ++prbc) { + JVM_DTraceProbe* probe = &(provider->probes[prbc]); + methodOop m = JNIHandles::resolve_jmethod_id(probe->method); + int arg_count = ArgumentCount(m->signature()).size(); + assert(m->code() != NULL, "must have an nmethod"); + + dof_probe_t* prb = + (dof_probe_t*)(dof + sec->dofs_offset + prbc * sizeof(dof_probe_t)); + + prb->dofpr_addr = (uint64_t)m->code()->entry_point(); + prb->dofpr_func = stroffs[curstr++]; // Index in string table + prb->dofpr_name = stroffs[curstr++]; // Index in string table + prb->dofpr_nargv = stroffs[curstr ]; // Index in string table + // We spent siglen strings here + curstr += arg_count; + prb->dofpr_xargv = prb->dofpr_nargv; // Same bunch of strings + prb->dofpr_argidx = argsoffs; + prb->dofpr_offidx = prbc; + prb->dofpr_nargc = arg_count; + prb->dofpr_xargc = arg_count; + prb->dofpr_noffs = 1; // Number of offsets + // Next bunch of offsets + argsoffs += arg_count; + } + + // PROFFS ///////////////////////////////////////////////////////////////// + // Section header + sec = (dof_sec_t*) + (dof + sizeof(dof_hdr_t) + sizeof(dof_sec_t) * probeoffs_sec); + sec->dofs_type = DOF_SECT_PROFFS; + sec->dofs_align = alignment_for[PROBE_OFFSETS]; + sec->dofs_flags = DOF_SECF_LOAD; + sec->dofs_entsize = sizeof(uint32_t); + sec->dofs_offset = secoffs[probeoffs_sec]; + sec->dofs_size = secsize[probeoffs_sec]; + // Make offsets + for (int prbc = 0; prbc < provider->probe_count; ++prbc) { + uint32_t* pof = + (uint32_t*)(dof + sec->dofs_offset + sizeof(uint32_t) * prbc); + JVM_DTraceProbe* probe = &(provider->probes[prbc]); + methodOop m = JNIHandles::resolve_jmethod_id(probe->method); + *pof = m->code()->trap_offset(); + } + + // PRARGS ///////////////////////////////////////////////////////////////// + // Section header + sec = (dof_sec_t*) + (dof + sizeof(dof_hdr_t) + sizeof(dof_sec_t) * argoffs_sec); + sec->dofs_type = DOF_SECT_PRARGS; + sec->dofs_align = alignment_for[ARG_OFFSETS]; + sec->dofs_flags = DOF_SECF_LOAD; + sec->dofs_entsize = sizeof(uint8_t); + sec->dofs_offset = secoffs[argoffs_sec]; + sec->dofs_size = secsize[argoffs_sec]; + // Make arguments + uint8_t* par = (uint8_t*)(dof + sec->dofs_offset); + for (int prbc = 0; prbc < provider->probe_count; ++prbc) { + JVM_DTraceProbe* p = &(provider->probes[prbc]); + symbolOop sig = JNIHandles::resolve_jmethod_id(p->method)->signature(); + uint8_t count = (uint8_t)ArgumentCount(sig).size(); + for (uint8_t i = 0; i < count; ++i) { + *par++ = i; + } + } + } + + // Register module + return dof_register(module, dof, moduleBaseAddress); +} + + +void DTraceJSDT::pd_dispose(int handle) { + int fd; + if (handle == -1) { + return; + } + fd = dofhelper_open(); + if (fd < 0) + return; + ioctl(fd, DTRACEHIOC_REMOVE, handle); + close(fd); +} + +jboolean DTraceJSDT::pd_is_supported() { + int fd = dofhelper_open(); + if (fd < 0) { + return false; + } + close(fd); + return true; +} + +static const char* dofSecTypeFor(uint32_t type) { + switch (type) { + case 0: return "DOF_SECT_NONE"; + case 1: return "DOF_SECT_COMMENTS"; + case 2: return "DOF_SECT_SOURCE"; + case 3: return "DOF_SECT_ECBDESC"; + case 4: return "DOF_SECT_PROBEDESC"; + case 5: return "DOF_SECT_ACTDESC"; + case 6: return "DOF_SECT_DIFOHDR"; + case 7: return "DOF_SECT_DIF"; + case 8: return "DOF_SECT_STRTAB"; + case 9: return "DOF_SECT_VARTAB"; + case 10: return "DOF_SECT_RELTAB"; + case 11: return "DOF_SECT_TYPETAB"; + case 12: return "DOF_SECT_URELHDR"; + case 13: return "DOF_SECT_KRELHDR"; + case 14: return "DOF_SECT_OPTDESC"; + case 15: return "DOF_SECT_PROVIDER"; + case 16: return "DOF_SECT_PROBES"; + case 17: return "DOF_SECT_PRARGS"; + case 18: return "DOF_SECT_PROFFS"; + case 19: return "DOF_SECT_INTTAB"; + case 20: return "DOF_SECT_UTSNAME"; + case 21: return "DOF_SECT_XLTAB"; + case 22: return "DOF_SECT_XLMEMBERS"; + case 23: return "DOF_SECT_XLIMPORT"; + case 24: return "DOF_SECT_XLEXPORT"; + case 25: return "DOF_SECT_PREXPORT"; + case 26: return "DOF_SECT_PRENOFFS"; + default: return ""; + } +} + +static void printDOFStringTabSec(void* dof, dof_sec_t* sec) { + size_t tab = sec->dofs_offset; + size_t limit = sec->dofs_size; + tty->print_cr("// String Table:"); + for (size_t idx = 0; idx < limit; /*empty*/) { + char* str = ((char*)dof) + tab + idx; + tty->print_cr("// [0x%x + 0x%x] '%s'", tab, idx, str); + idx += strlen(str) + 1; + } +} + +static void printDOFProviderSec(void* dof, dof_sec_t* sec) { + dof_provider_t* prov = (dof_provider_t*)((char*)dof + sec->dofs_offset); + tty->print_cr("// dof_provider_t {"); + tty->print_cr("// dofpv_strtab = %d", prov->dofpv_strtab); + tty->print_cr("// dofpv_probes = %d", prov->dofpv_probes); + tty->print_cr("// dofpv_prargs = %d", prov->dofpv_prargs); + tty->print_cr("// dofpv_proffs = %d", prov->dofpv_proffs); + tty->print_cr("// dofpv_name = 0x%x", prov->dofpv_name); + tty->print_cr("// dofpv_provattr = 0x%08x", prov->dofpv_provattr); + tty->print_cr("// dofpv_modattr = 0x%08x", prov->dofpv_modattr); + tty->print_cr("// dofpv_funcattr = 0x%08x", prov->dofpv_funcattr); + tty->print_cr("// dofpv_nameattr = 0x%08x", prov->dofpv_nameattr); + tty->print_cr("// dofpv_argsattr = 0x%08x", prov->dofpv_argsattr); + tty->print_cr("// }"); +} + +static void printDOFProbesSec(void* dof, dof_sec_t* sec) { + size_t idx = sec->dofs_offset; + size_t limit = idx + sec->dofs_size; + for (size_t idx = sec->dofs_offset; idx < limit; idx += sec->dofs_entsize) { + dof_probe_t* prb = (dof_probe_t*)((char*)dof + idx); + tty->print_cr("// dof_probe_t {"); + tty->print_cr("// dofpr_addr = 0x%016llx", prb->dofpr_addr); + tty->print_cr("// dofpr_func = 0x%x", prb->dofpr_func); + tty->print_cr("// dofpr_name = 0x%x", prb->dofpr_name); + tty->print_cr("// dofpr_nargv = 0x%x", prb->dofpr_nargv); + tty->print_cr("// dofpr_xargv = 0x%x", prb->dofpr_xargv); + tty->print_cr("// dofpr_argidx = 0x%x", prb->dofpr_argidx); + tty->print_cr("// dofpr_offidx = 0x%x", prb->dofpr_offidx); + tty->print_cr("// dofpr_nargc = %d", prb->dofpr_nargc); + tty->print_cr("// dofpr_xargc = %d", prb->dofpr_xargc); + tty->print_cr("// dofpr_noffs = %d", prb->dofpr_noffs); + tty->print_cr("// }"); + } +} + +static void printDOFOffsetsSec(void* dof, dof_sec_t* sec) { + size_t tab = sec->dofs_offset; + size_t limit = sec->dofs_size; + tty->print_cr("// Offsets:"); + for (size_t idx = 0; idx < limit; idx += sec->dofs_entsize) { + uint32_t* off = (uint32_t*)((char*)dof + tab + idx); + tty->print_cr("// [0x%x + 0x%x]: %d", tab, idx, *off); + } +} + +static void printDOFArgsSec(void* dof, dof_sec_t* sec) { + size_t tab = sec->dofs_offset; + size_t limit = sec->dofs_size; + tty->print_cr("// Arguments:"); + for (size_t idx = 0; idx < limit; idx += sec->dofs_entsize) { + uint8_t* arg = (uint8_t*)((char*)dof + tab + idx); + tty->print_cr("// [0x%x + 0x%x]: %d", tab, idx, *arg); + } +} + +static void printDOFSection(void* dof, dof_sec_t* sec) { + tty->print_cr("// dof_sec_t {"); + tty->print_cr("// dofs_type = 0x%x /* %s */", + sec->dofs_type, dofSecTypeFor(sec->dofs_type)); + tty->print_cr("// dofs_align = %d", sec->dofs_align); + tty->print_cr("// dofs_flags = 0x%x", sec->dofs_flags); + tty->print_cr("// dofs_entsize = %d", sec->dofs_entsize); + tty->print_cr("// dofs_offset = 0x%llx", sec->dofs_offset); + tty->print_cr("// dofs_size = %lld", sec->dofs_size); + tty->print_cr("// }"); + switch (sec->dofs_type) { + case DOF_SECT_STRTAB: printDOFStringTabSec(dof, sec); break; + case DOF_SECT_PROVIDER: printDOFProviderSec(dof, sec); break; + case DOF_SECT_PROBES: printDOFProbesSec(dof, sec); break; + case DOF_SECT_PROFFS: printDOFOffsetsSec(dof, sec); break; + case DOF_SECT_PRARGS: printDOFArgsSec(dof, sec); break; + default: tty->print_cr("//
"); + } +} + +static void printDOFHeader(dof_hdr_t* hdr) { + tty->print_cr("// dof_hdr_t {"); + tty->print_cr("// dofh_ident[DOF_ID_MAG0] = 0x%x", + hdr->dofh_ident[DOF_ID_MAG0]); + tty->print_cr("// dofh_ident[DOF_ID_MAG1] = 0x%x", + hdr->dofh_ident[DOF_ID_MAG1]); + tty->print_cr("// dofh_ident[DOF_ID_MAG2] = 0x%x", + hdr->dofh_ident[DOF_ID_MAG2]); + tty->print_cr("// dofh_ident[DOF_ID_MAG3] = 0x%x", + hdr->dofh_ident[DOF_ID_MAG3]); + tty->print_cr("// dofh_ident[DOF_ID_MODEL] = 0x%x", + hdr->dofh_ident[DOF_ID_MODEL]); + tty->print_cr("// dofh_ident[DOF_ID_ENCODING] = 0x%x", + hdr->dofh_ident[DOF_ID_ENCODING]); + tty->print_cr("// dofh_ident[DOF_ID_VERSION] = 0x%x", + hdr->dofh_ident[DOF_ID_VERSION]); + tty->print_cr("// dofh_ident[DOF_ID_DIFVERS] = 0x%x", + hdr->dofh_ident[DOF_ID_DIFVERS]); + tty->print_cr("// dofh_flags = 0x%x", hdr->dofh_flags); + tty->print_cr("// dofh_hdrsize = %d", hdr->dofh_hdrsize); + tty->print_cr("// dofh_secsize = %d", hdr->dofh_secsize); + tty->print_cr("// dofh_secnum = %d", hdr->dofh_secnum); + tty->print_cr("// dofh_secoff = %lld", hdr->dofh_secoff); + tty->print_cr("// dofh_loadsz = %lld", hdr->dofh_loadsz); + tty->print_cr("// dofh_filesz = %lld", hdr->dofh_filesz); + tty->print_cr("// }"); +} + +static void printDOF(void* dof) { + dof_hdr_t* hdr = (dof_hdr_t*)dof; + printDOFHeader(hdr); + for (int i = 0; i < hdr->dofh_secnum; ++i) { + dof_sec_t* sec = + (dof_sec_t*)((char*)dof + sizeof(dof_hdr_t) + i * sizeof(dof_sec_t)); + tty->print_cr("// [Section #%d]", i); + printDOFSection(dof, sec); + } +} + +/** + * This prints out hex data in a 'windbg' or 'xxd' form, where each line is: + * : 8 * + * example: + * 0000000: 7f44 4f46 0102 0102 0000 0000 0000 0000 .DOF............ + * 0000010: 0000 0000 0000 0040 0000 0020 0000 0005 .......@... .... + * 0000020: 0000 0000 0000 0040 0000 0000 0000 015d .......@.......] + * ... + */ +static void printDOFRawData(void* dof) { + size_t size = ((dof_hdr_t*)dof)->dofh_loadsz; + size_t limit = (size + 16) / 16 * 16; + for (size_t i = 0; i < limit; ++i) { + if (i % 16 == 0) { + tty->print("%07x:", i); + } + if (i % 2 == 0) { + tty->print(" "); + } + if (i < size) { + tty->print("%02x", ((unsigned char*)dof)[i]); + } else { + tty->print(" "); + } + if ((i + 1) % 16 == 0) { + tty->print(" "); + for (size_t j = 0; j < 16; ++j) { + size_t idx = i + j - 15; + char c = ((char*)dof)[idx]; + if (idx < size) { + tty->print("%c", c >= 32 && c <= 126 ? c : '.'); + } + } + tty->print_cr(""); + } + } + tty->print_cr(""); +} + +static void printDOFHelper(dof_helper_t* helper) { + tty->print_cr("// dof_helper_t {"); + tty->print_cr("// dofhp_mod = \"%s\"", helper->dofhp_mod); + tty->print_cr("// dofhp_addr = 0x%016llx", helper->dofhp_addr); + tty->print_cr("// dofhp_dof = 0x%016llx", helper->dofhp_dof); + printDOF((void*)helper->dofhp_dof); + tty->print_cr("// }"); + printDOFRawData((void*)helper->dofhp_dof); +} + +#else // ndef HAVE_DTRACE_H + +// Get here if we're not building on at least Solaris 10 +int DTraceJSDT::pd_activate( + void* baseAddress, jstring module, + jint provider_count, JVM_DTraceProvider* providers) { + return -1; +} + +void DTraceJSDT::pd_dispose(int handle) { +} + +jboolean DTraceJSDT::pd_is_supported() { + return false; +} +#endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os/windows/vm/dtraceJSDT_windows.cpp 2009-08-01 04:17:34.968075425 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_dtraceJSDT_windows.cpp.incl" + +int DTraceJSDT::pd_activate( + void* baseAddress, jstring module, + jint providers_count, JVM_DTraceProvider* providers) { + return -1; +} + +void DTraceJSDT::pd_dispose(int handle) { +} + +jboolean DTraceJSDT::pd_is_supported() { + return false; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp 2009-08-01 04:17:35.443902152 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_assembler_linux_sparc.cpp.incl" + +#include + +void MacroAssembler::read_ccr_trap(Register ccr_save) { + // No implementation + breakpoint_trap(); +} + +void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) { + // No implementation + breakpoint_trap(); +} + +void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); } +void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); } + +// Use software breakpoint trap until we figure out how to do this on Linux +void MacroAssembler::get_psr_trap() { trap(SP_TRAP_SBPT); } +void MacroAssembler::set_psr_trap() { trap(SP_TRAP_SBPT); } --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp 2009-08-01 04:17:35.852465837 +0100 @@ -0,0 +1,206 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Implementation of class atomic + +inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } +inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } +inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } +inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } +inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } +inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } + +inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } +inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } +inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } +inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } +inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } +inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } + +inline void Atomic::inc (volatile jint* dest) { (void)add (1, dest); } +inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); } +inline void Atomic::inc_ptr(volatile void* dest) { (void)add_ptr(1, dest); } + +inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest); } +inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } +inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } + +inline jint Atomic::add (jint add_value, volatile jint* dest) { + intptr_t rv; + __asm__ volatile( + "1: \n\t" + " ld [%2], %%o2\n\t" + " add %1, %%o2, %%o3\n\t" + " cas [%2], %%o2, %%o3\n\t" + " cmp %%o2, %%o3\n\t" + " bne 1b\n\t" + " nop\n\t" + " add %1, %%o2, %0\n\t" + : "=r" (rv) + : "r" (add_value), "r" (dest) + : "memory", "o2", "o3"); + return rv; +} + +inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { + intptr_t rv; +#ifdef _LP64 + __asm__ volatile( + "1: \n\t" + " ldx [%2], %%o2\n\t" + " add %0, %%o2, %%o3\n\t" + " casx [%2], %%o2, %%o3\n\t" + " cmp %%o2, %%o3\n\t" + " bne %%xcc, 1b\n\t" + " nop\n\t" + " add %0, %%o2, %0\n\t" + : "=r" (rv) + : "r" (add_value), "r" (dest) + : "memory", "o2", "o3"); +#else + __asm__ volatile( + "1: \n\t" + " ld [%2], %%o2\n\t" + " add %1, %%o2, %%o3\n\t" + " cas [%2], %%o2, %%o3\n\t" + " cmp %%o2, %%o3\n\t" + " bne 1b\n\t" + " nop\n\t" + " add %1, %%o2, %0\n\t" + : "=r" (rv) + : "r" (add_value), "r" (dest) + : "memory", "o2", "o3"); +#endif // _LP64 + return rv; +} + +inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { + return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); +} + + +inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { + intptr_t rv = exchange_value; + __asm__ volatile( + " swap [%2],%1\n\t" + : "=r" (rv) + : "0" (exchange_value) /* we use same register as for return value */, "r" (dest) + : "memory"); + return rv; +} + +inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { + intptr_t rv = exchange_value; +#ifdef _LP64 + __asm__ volatile( + "1:\n\t" + " mov %1, %%o3\n\t" + " ldx [%2], %%o2\n\t" + " casx [%2], %%o2, %%o3\n\t" + " cmp %%o2, %%o3\n\t" + " bne %%xcc, 1b\n\t" + " nop\n\t" + " mov %%o2, %0\n\t" + : "=r" (rv) + : "r" (exchange_value), "r" (dest) + : "memory", "o2", "o3"); +#else + __asm__ volatile( + "swap [%2],%1\n\t" + : "=r" (rv) + : "0" (exchange_value) /* we use same register as for return value */, "r" (dest) + : "memory"); +#endif // _LP64 + return rv; +} + +inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { + return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); +} + + +inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { + jint rv; + __asm__ volatile( + " cas [%2], %3, %0" + : "=r" (rv) + : "0" (exchange_value), "r" (dest), "r" (compare_value) + : "memory"); + return rv; +} + +inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { +#ifdef _LP64 + jlong rv; + __asm__ volatile( + " casx [%2], %3, %0" + : "=r" (rv) + : "0" (exchange_value), "r" (dest), "r" (compare_value) + : "memory"); + return rv; +#else + assert(VM_Version::v9_instructions_work(), "cas only supported on v9"); + volatile jlong_accessor evl, cvl, rv; + evl.long_value = exchange_value; + cvl.long_value = compare_value; + + __asm__ volatile( + " sllx %2, 32, %2\n\t" + " srl %3, 0, %3\n\t" + " or %2, %3, %2\n\t" + " sllx %5, 32, %5\n\t" + " srl %6, 0, %6\n\t" + " or %5, %6, %5\n\t" + " casx [%4], %5, %2\n\t" + " srl %2, 0, %1\n\t" + " srlx %2, 32, %0\n\t" + : "=r" (rv.words[0]), "=r" (rv.words[1]) + : "r" (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1]) + : "memory"); + + return rv.long_value; +#endif +} + +inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { + intptr_t rv; +#ifdef _LP64 + __asm__ volatile( + " casx [%2], %3, %0" + : "=r" (rv) + : "0" (exchange_value), "r" (dest), "r" (compare_value) + : "memory"); +#else + __asm__ volatile( + " cas [%2], %3, %0" + : "=r" (rv) + : "0" (exchange_value), "r" (dest), "r" (compare_value) + : "memory"); +#endif // _LP64 + return rv; +} + +inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { + return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp 2009-08-01 04:17:36.287431778 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// +// Sets the default values for platform dependent flags used by the +// runtime system. (see globals.hpp) +// + +define_pd_global(uintx, JVMInvokeMethodSlack, 12288); +define_pd_global(intx, CompilerThreadStackSize, 0); + +// Only used on 64 bit Windows platforms +define_pd_global(bool, UseVectoredExceptions, false); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/linux_sparc.ad 2009-08-01 04:17:36.671218657 +0100 @@ -0,0 +1,27 @@ +// +// Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +// CA 95054 USA or visit www.sun.com if you need additional information or +// have any questions. +// + +// +// + +// SPARC Linux Architecture Description File --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/linux_sparc.s 2009-08-01 04:17:37.072206042 +0100 @@ -0,0 +1,105 @@ +# +# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# + + # Prototype: int SafeFetch32 (int * adr, int ErrValue) + # The "ld" at Fetch32 is potentially faulting instruction. + # If the instruction traps the trap handler will arrange + # for control to resume at Fetch32Resume. + # By convention with the trap handler we ensure there is a non-CTI + # instruction in the trap shadow. + + + .globl SafeFetch32, Fetch32PFI, Fetch32Resume + .globl SafeFetchN + .align 32 + .type SafeFetch32,@function +SafeFetch32: + mov %o0, %g1 + mov %o1, %o0 +Fetch32PFI: + # <-- Potentially faulting instruction + ld [%g1], %o0 +Fetch32Resume: + nop + retl + nop + + .globl SafeFetchN, FetchNPFI, FetchNResume + .type SafeFetchN,@function + .align 32 +SafeFetchN: + mov %o0, %g1 + mov %o1, %o0 +FetchNPFI: + ldn [%g1], %o0 +FetchNResume: + nop + retl + nop + + # Possibilities: + # -- membar + # -- CAS (SP + BIAS, G0, G0) + # -- wr %g0, %asi + + .globl SpinPause + .type SpinPause,@function + .align 32 +SpinPause: + retl + mov %g0, %o0 + + .globl _Copy_conjoint_jlongs_atomic + .type _Copy_conjoint_jlongs_atomic,@function +_Copy_conjoint_jlongs_atomic: + cmp %o0, %o1 + bleu 4f + sll %o2, 3, %o4 + ba 2f + 1: + subcc %o4, 8, %o4 + std %o2, [%o1] + add %o0, 8, %o0 + add %o1, 8, %o1 + 2: + bge,a 1b + ldd [%o0], %o2 + ba 5f + nop + 3: + std %o2, [%o1+%o4] + 4: + subcc %o4, 8, %o4 + bge,a 3b + ldd [%o0+%o4], %o2 + 5: + retl + nop + + + .globl _flush_reg_windows + .align 32 + _flush_reg_windows: + ta 0x03 + retl + mov %fp, %o0 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/orderAccess_linux_sparc.inline.hpp 2009-08-01 04:17:37.502029396 +0100 @@ -0,0 +1,102 @@ +/* + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Implementation of class OrderAccess. + +// Assume TSO. + +inline void OrderAccess::loadload() { acquire(); } +inline void OrderAccess::storestore() { release(); } +inline void OrderAccess::loadstore() { acquire(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { + __asm__ volatile ("nop" : : :); +} + +inline void OrderAccess::release() { + jint* dummy = (jint*)&dummy; + __asm__ volatile("stw %%g0, [%0]" : : "r" (dummy) : "memory"); +} + +inline void OrderAccess::fence() { + __asm__ volatile ("membar #StoreLoad" : : :); +} + +inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } +inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } +inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } +inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; } +inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } +inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } +inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } +inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; } +inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } +inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; } + +inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } +inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } +inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } + +inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } +inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } +inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } +inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; } +inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } +inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } +inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } +inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; } +inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } +inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; } + +inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } +inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } + +inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); } +inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); } +inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); } +inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); } +inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); } +inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); } +inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); } +inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); } +inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } +inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); } + +inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); } +inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); } + +inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } +inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); } + +inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); } +inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { *(void* volatile *)p = v; fence(); } --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp 2009-08-01 04:17:37.946029001 +0100 @@ -0,0 +1,648 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// do not include precompiled header file + +#include "incls/_os_linux_sparc.cpp.incl" + +// Linux/Sparc has rather obscure naming of registers in sigcontext +// different between 32 and 64 bits +#ifdef _LP64 +#define SIG_PC(x) ((x)->sigc_regs.tpc) +#define SIG_NPC(x) ((x)->sigc_regs.tnpc) +#define SIG_REGS(x) ((x)->sigc_regs) +#else +#define SIG_PC(x) ((x)->si_regs.pc) +#define SIG_NPC(x) ((x)->si_regs.npc) +#define SIG_REGS(x) ((x)->si_regs) +#endif + +// those are to reference registers in sigcontext +enum { + CON_G0 = 0, + CON_G1, + CON_G2, + CON_G3, + CON_G4, + CON_G5, + CON_G6, + CON_G7, + CON_O0, + CON_O1, + CON_O2, + CON_O3, + CON_O4, + CON_O5, + CON_O6, + CON_O7, +}; + +static inline void set_cont_address(sigcontext* ctx, address addr) { + SIG_PC(ctx) = (intptr_t)addr; + SIG_NPC(ctx) = (intptr_t)(addr+4); +} + +// For Forte Analyzer AsyncGetCallTrace profiling support - thread is +// currently interrupted by SIGPROF. +// os::Solaris::fetch_frame_from_ucontext() tries to skip nested +// signal frames. Currently we don't do that on Linux, so it's the +// same as os::fetch_frame_from_context(). +ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, + ucontext_t* uc, + intptr_t** ret_sp, + intptr_t** ret_fp) { + assert(thread != NULL, "just checking"); + assert(ret_sp != NULL, "just checking"); + assert(ret_fp != NULL, "just checking"); + + return os::fetch_frame_from_context(uc, ret_sp, ret_fp); +} + +ExtendedPC os::fetch_frame_from_context(void* ucVoid, + intptr_t** ret_sp, + intptr_t** ret_fp) { + ucontext_t* uc = (ucontext_t*) ucVoid; + ExtendedPC epc; + + if (uc != NULL) { + epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); + if (ret_sp) { + *ret_sp = os::Linux::ucontext_get_sp(uc); + } + if (ret_fp) { + *ret_fp = os::Linux::ucontext_get_fp(uc); + } + } else { + // construct empty ExtendedPC for return value checking + epc = ExtendedPC(NULL); + if (ret_sp) { + *ret_sp = (intptr_t*) NULL; + } + if (ret_fp) { + *ret_fp = (intptr_t*) NULL; + } + } + + return epc; +} + +frame os::fetch_frame_from_context(void* ucVoid) { + intptr_t* sp; + intptr_t* fp; + ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); + return frame(sp, fp, epc.pc()); +} + +frame os::get_sender_for_C_frame(frame* fr) { + return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); +} + +frame os::current_frame() { + fprintf(stderr, "current_frame()"); + + intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()(); + frame myframe(sp, frame::unpatchable, + CAST_FROM_FN_PTR(address, os::current_frame)); + if (os::is_first_C_frame(&myframe)) { + // stack is not walkable + return frame(NULL, frame::unpatchable, NULL); + } else { + return os::get_sender_for_C_frame(&myframe); + } +} + +address os::current_stack_pointer() { + register void *sp __asm__ ("sp"); + return (address)sp; +} + +static void current_stack_region(address* bottom, size_t* size) { + if (os::Linux::is_initial_thread()) { + // initial thread needs special handling because pthread_getattr_np() + // may return bogus value. + *bottom = os::Linux::initial_thread_stack_bottom(); + *size = os::Linux::initial_thread_stack_size(); + } else { + pthread_attr_t attr; + + int rslt = pthread_getattr_np(pthread_self(), &attr); + + // JVM needs to know exact stack location, abort if it fails + if (rslt != 0) { + if (rslt == ENOMEM) { + vm_exit_out_of_memory(0, "pthread_getattr_np"); + } else { + fatal1("pthread_getattr_np failed with errno = %d", rslt); + } + } + + if (pthread_attr_getstack(&attr, (void**)bottom, size) != 0) { + fatal("Can not locate current stack attributes!"); + } + + pthread_attr_destroy(&attr); + } + assert(os::current_stack_pointer() >= *bottom && + os::current_stack_pointer() < *bottom + *size, "just checking"); +} + +address os::current_stack_base() { + address bottom; + size_t size; + current_stack_region(&bottom, &size); + return bottom + size; +} + +size_t os::current_stack_size() { + // stack size includes normal stack and HotSpot guard pages + address bottom; + size_t size; + current_stack_region(&bottom, &size); + return size; +} + +char* os::non_memory_address_word() { + // Must never look like an address returned by reserve_memory, + // even in its subfields (as defined by the CPU immediate fields, + // if the CPU splits constants across multiple instructions). + // On SPARC, 0 != %hi(any real address), because there is no + // allocation in the first 1Kb of the virtual address space. + return (char*) 0; +} + +void os::initialize_thread() {} + +void os::print_context(outputStream *st, void *context) { + if (context == NULL) return; + + ucontext_t* uc = (ucontext_t*)context; + sigcontext* sc = (sigcontext*)context; + st->print_cr("Registers:"); + + st->print_cr(" O0=" INTPTR_FORMAT " O1=" INTPTR_FORMAT + " O2=" INTPTR_FORMAT " O3=" INTPTR_FORMAT, + SIG_REGS(sc).u_regs[CON_O0], + SIG_REGS(sc).u_regs[CON_O1], + SIG_REGS(sc).u_regs[CON_O2], + SIG_REGS(sc).u_regs[CON_O3]); + st->print_cr(" O4=" INTPTR_FORMAT " O5=" INTPTR_FORMAT + " O6=" INTPTR_FORMAT " O7=" INTPTR_FORMAT, + SIG_REGS(sc).u_regs[CON_O4], + SIG_REGS(sc).u_regs[CON_O5], + SIG_REGS(sc).u_regs[CON_O6], + SIG_REGS(sc).u_regs[CON_O7]); + + st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT + " G3=" INTPTR_FORMAT " G4=" INTPTR_FORMAT, + SIG_REGS(sc).u_regs[CON_G1], + SIG_REGS(sc).u_regs[CON_G2], + SIG_REGS(sc).u_regs[CON_G3], + SIG_REGS(sc).u_regs[CON_G4]); + st->print_cr(" G5=" INTPTR_FORMAT " G6=" INTPTR_FORMAT + " G7=" INTPTR_FORMAT " Y=" INTPTR_FORMAT, + SIG_REGS(sc).u_regs[CON_G5], + SIG_REGS(sc).u_regs[CON_G6], + SIG_REGS(sc).u_regs[CON_G7], + SIG_REGS(sc).y); + + st->print_cr(" PC=" INTPTR_FORMAT " nPC=" INTPTR_FORMAT, + SIG_PC(sc), + SIG_NPC(sc)); + st->cr(); + st->cr(); + + intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); + st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); + print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t)); + st->cr(); + + // Note: it may be unsafe to inspect memory near pc. For example, pc may + // point to garbage if entry point in an nmethod is corrupted. Leave + // this at the end, and hope for the best. + address pc = os::Linux::ucontext_get_pc(uc); + st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); + print_hex_dump(st, pc - 16, pc + 16, sizeof(char)); +} + + +address os::Linux::ucontext_get_pc(ucontext_t* uc) { + return (address) SIG_PC((sigcontext*)uc); +} + +intptr_t* os::Linux::ucontext_get_sp(ucontext_t *uc) { + return (intptr_t*) + ((intptr_t)SIG_REGS((sigcontext*)uc).u_regs[CON_O6] + STACK_BIAS); +} + +// not used on Sparc +intptr_t* os::Linux::ucontext_get_fp(ucontext_t *uc) { + ShouldNotReachHere(); + return NULL; +} + +// Utility functions + +extern "C" void Fetch32PFI(); +extern "C" void Fetch32Resume(); +extern "C" void FetchNPFI(); +extern "C" void FetchNResume(); + +inline static bool checkPrefetch(sigcontext* uc, address pc) { + if (pc == (address) Fetch32PFI) { + set_cont_address(uc, address(Fetch32Resume)); + return true; + } + if (pc == (address) FetchNPFI) { + set_cont_address(uc, address(FetchNResume)); + return true; + } + return false; +} + +inline static bool checkOverflow(sigcontext* uc, + address pc, + address addr, + JavaThread* thread, + address* stub) { + // check if fault address is within thread stack + if (addr < thread->stack_base() && + addr >= thread->stack_base() - thread->stack_size()) { + // stack overflow + if (thread->in_stack_yellow_zone(addr)) { + thread->disable_stack_yellow_zone(); + if (thread->thread_state() == _thread_in_Java) { + // Throw a stack overflow exception. Guard pages will be reenabled + // while unwinding the stack. + *stub = + SharedRuntime::continuation_for_implicit_exception(thread, + pc, + SharedRuntime::STACK_OVERFLOW); + } else { + // Thread was in the vm or native code. Return and try to finish. + return true; + } + } else if (thread->in_stack_red_zone(addr)) { + // Fatal red zone violation. Disable the guard pages and fall through + // to handle_unexpected_exception way down below. + thread->disable_stack_red_zone(); + tty->print_raw_cr("An irrecoverable stack overflow has occurred."); + } else { + // Accessing stack address below sp may cause SEGV if current + // thread has MAP_GROWSDOWN stack. This should only happen when + // current thread was created by user code with MAP_GROWSDOWN flag + // and then attached to VM. See notes in os_linux.cpp. + if (thread->osthread()->expanding_stack() == 0) { + thread->osthread()->set_expanding_stack(); + if (os::Linux::manually_expand_stack(thread, addr)) { + thread->osthread()->clear_expanding_stack(); + return true; + } + thread->osthread()->clear_expanding_stack(); + } else { + fatal("recursive segv. expanding stack."); + } + } + } + return false; +} + +inline static bool checkPollingPage(address pc, address fault, address* stub) { + if (fault == os::get_polling_page()) { + *stub = SharedRuntime::get_poll_stub(pc); + return true; + } + return false; +} + +inline static bool checkByteBuffer(address pc, address* stub) { + // BugId 4454115: A read from a MappedByteBuffer can fault + // here if the underlying file has been truncated. + // Do not crash the VM in such a case. + CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; + if (nm != NULL && nm->has_unsafe_access()) { + *stub = StubRoutines::handler_for_unsafe_access(); + return true; + } + return false; +} + +inline static bool checkVerifyOops(address pc, address fault, address* stub) { + if (pc >= MacroAssembler::_verify_oop_implicit_branch[0] + && pc < MacroAssembler::_verify_oop_implicit_branch[1] ) { + *stub = MacroAssembler::_verify_oop_implicit_branch[2]; + warning("fixed up memory fault in +VerifyOops at address " + INTPTR_FORMAT, fault); + return true; + } + return false; +} + +inline static bool checkFPFault(address pc, int code, + JavaThread* thread, address* stub) { + if (code == FPE_INTDIV || code == FPE_FLTDIV) { + *stub = + SharedRuntime:: + continuation_for_implicit_exception(thread, + pc, + SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); + return true; + } + return false; +} + +inline static bool checkNullPointer(address pc, intptr_t fault, + JavaThread* thread, address* stub) { + if (!MacroAssembler::needs_explicit_null_check(fault)) { + // Determination of interpreter/vtable stub/compiled code null + // exception + *stub = + SharedRuntime:: + continuation_for_implicit_exception(thread, pc, + SharedRuntime::IMPLICIT_NULL); + return true; + } + return false; +} + +inline static bool checkFastJNIAccess(address pc, address* stub) { + address addr = JNI_FastGetField::find_slowcase_pc(pc); + if (addr != (address)-1) { + *stub = addr; + return true; + } + return false; +} + +inline static bool checkSerializePage(JavaThread* thread, address addr) { + return os::is_memory_serialize_page(thread, addr); +} + +inline static bool checkZombie(sigcontext* uc, address* pc, address* stub) { + if (nativeInstruction_at(*pc)->is_zombie()) { + // zombie method (ld [%g0],%o7 instruction) + *stub = SharedRuntime::get_handle_wrong_method_stub(); + + // At the stub it needs to look like a call from the caller of this + // method (not a call from the segv site). + *pc = (address)SIG_REGS(uc).u_regs[CON_O7]; + return true; + } + return false; +} + +inline static bool checkICMiss(sigcontext* uc, address* pc, address* stub) { +#ifdef COMPILER2 + if (nativeInstruction_at(*pc)->is_ic_miss_trap()) { +#ifdef ASSERT +#ifdef TIERED + CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + assert(cb->is_compiled_by_c2(), "Wrong compiler"); +#endif // TIERED +#endif // ASSERT + // Inline cache missed and user trap "Tne G0+ST_RESERVED_FOR_USER_0+2" taken. + *stub = SharedRuntime::get_ic_miss_stub(); + // At the stub it needs to look like a call from the caller of this + // method (not a call from the segv site). + *pc = (address)SIG_REGS(uc).u_regs[CON_O7]; + return true; + } +#endif // COMPILER2 + return false; +} + +extern "C" int +JVM_handle_linux_signal(int sig, + siginfo_t* info, + void* ucVoid, + int abort_if_unrecognized) { + // in fact this isn't ucontext_t* at all, but struct sigcontext* + // but Linux porting layer uses ucontext_t, so to minimize code change + // we cast as needed + ucontext_t* ucFake = (ucontext_t*) ucVoid; + sigcontext* uc = (sigcontext*)ucVoid; + + Thread* t = ThreadLocalStorage::get_thread_slow(); + + SignalHandlerMark shm(t); + + // Note: it's not uncommon that JNI code uses signal/sigset to install + // then restore certain signal handler (e.g. to temporarily block SIGPIPE, + // or have a SIGILL handler when detecting CPU type). When that happens, + // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To + // avoid unnecessary crash when libjsig is not preloaded, try handle signals + // that do not require siginfo/ucontext first. + + if (sig == SIGPIPE || sig == SIGXFSZ) { + // allow chained handler to go first + if (os::Linux::chained_handler(sig, info, ucVoid)) { + return true; + } else { + if (PrintMiscellaneous && (WizardMode || Verbose)) { + char buf[64]; + warning("Ignoring %s - see bugs 4229104 or 646499219", + os::exception_name(sig, buf, sizeof(buf))); + } + return true; + } + } + + JavaThread* thread = NULL; + VMThread* vmthread = NULL; + if (os::Linux::signal_handlers_are_installed) { + if (t != NULL ){ + if(t->is_Java_thread()) { + thread = (JavaThread*)t; + } + else if(t->is_VM_thread()){ + vmthread = (VMThread *)t; + } + } + } + + // decide if this trap can be handled by a stub + address stub = NULL; + address pc = NULL; + address npc = NULL; + + //%note os_trap_1 + if (info != NULL && uc != NULL && thread != NULL) { + pc = address(SIG_PC(uc)); + npc = address(SIG_NPC(uc)); + + // Check to see if we caught the safepoint code in the + // process of write protecting the memory serialization page. + // It write enables the page immediately after protecting it + // so we can just return to retry the write. + if ((sig == SIGSEGV) && checkSerializePage(thread, (address)info->si_addr)) { + // Block current thread until the memory serialize page permission restored. + os::block_on_serialize_page_trap(); + return 1; + } + + if (checkPrefetch(uc, pc)) { + return 1; + } + + // Handle ALL stack overflow variations here + if (sig == SIGSEGV) { + if (checkOverflow(uc, pc, (address)info->si_addr, thread, &stub)) { + return 1; + } + } + + if (sig == SIGBUS && + thread->thread_state() == _thread_in_vm && + thread->doing_unsafe_access()) { + stub = StubRoutines::handler_for_unsafe_access(); + } + + if (thread->thread_state() == _thread_in_Java) { + do { + // Java thread running in Java code => find exception handler if any + // a fault inside compiled code, the interpreter, or a stub + + if ((sig == SIGSEGV) && checkPollingPage(pc, (address)info->si_addr, &stub)) { + break; + } + + if ((sig == SIGBUS) && checkByteBuffer(pc, &stub)) { + break; + } + + if ((sig == SIGSEGV || sig == SIGBUS) && + checkVerifyOops(pc, (address)info->si_addr, &stub)) { + break; + } + + if ((sig == SIGSEGV) && checkZombie(uc, &pc, &stub)) { + break; + } + + if ((sig == SIGILL) && checkICMiss(uc, &pc, &stub)) { + break; + } + + if ((sig == SIGFPE) && checkFPFault(pc, info->si_code, thread, &stub)) { + break; + } + + if ((sig == SIGSEGV) && + checkNullPointer(pc, (intptr_t)info->si_addr, thread, &stub)) { + break; + } + } while (0); + + // jni_fast_GetField can trap at certain pc's if a GC kicks in + // and the heap gets shrunk before the field access. + if ((sig == SIGSEGV) || (sig == SIGBUS)) { + checkFastJNIAccess(pc, &stub); + } + } + + if (stub != NULL) { + // save all thread context in case we need to restore it + thread->set_saved_exception_pc(pc); + thread->set_saved_exception_npc(npc); + set_cont_address(uc, stub); + return true; + } + } + + // signal-chaining + if (os::Linux::chained_handler(sig, info, ucVoid)) { + return true; + } + + if (!abort_if_unrecognized) { + // caller wants another chance, so give it to him + return false; + } + + if (pc == NULL && uc != NULL) { + pc = os::Linux::ucontext_get_pc((ucontext_t*)uc); + } + + // unmask current signal + sigset_t newset; + sigemptyset(&newset); + sigaddset(&newset, sig); + sigprocmask(SIG_UNBLOCK, &newset, NULL); + + VMError err(t, sig, pc, info, ucVoid); + err.report_and_die(); + + ShouldNotReachHere(); +} + +void os::Linux::init_thread_fpu_state(void) { + // Nothing to do +} + +int os::Linux::get_fpu_control_word() { + return 0; +} + +void os::Linux::set_fpu_control_word(int fpu) { + // nothing +} + +bool os::is_allocatable(size_t bytes) { +#ifdef _LP64 + return true; +#else + if (bytes < 2 * G) { + return true; + } + + char* addr = reserve_memory(bytes, NULL); + + if (addr != NULL) { + release_memory(addr, bytes); + } + + return addr != NULL; +#endif // _LP64 +} + +/////////////////////////////////////////////////////////////////////////////// +// thread stack + +size_t os::Linux::min_stack_allowed = 128 * K; + +// pthread on Ubuntu is always in floating stack mode +bool os::Linux::supports_variable_stack_size() { return true; } + +// return default stack size for thr_type +size_t os::Linux::default_stack_size(os::ThreadType thr_type) { + // default stack size (compiler thread needs larger stack) + size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); + return s; +} + +size_t os::Linux::default_guard_size(os::ThreadType thr_type) { + // Creating guard page is very expensive. Java thread has HotSpot + // guard page, only enable glibc guard page for non-Java threads. + return (thr_type == java_thread ? 0 : page_size()); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp 2009-08-01 04:17:38.378963092 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + + // + // NOTE: we are back in class os here, not Linux + // + static jint (*atomic_xchg_func) (jint, volatile jint*); + static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); + static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); + static jint (*atomic_add_func) (jint, volatile jint*); + static void (*fence_func) (); + + static jint atomic_xchg_bootstrap (jint, volatile jint*); + static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); + static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); + static jint atomic_add_bootstrap (jint, volatile jint*); + static void fence_bootstrap (); + + static void setup_fpu() {} + + static bool is_allocatable(size_t bytes); + + // Used to register dynamic code cache area with the OS + // Note: Currently only used in 64 bit Windows implementations + static bool register_code_area(char *low, char *high) { return true; } --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/prefetch_linux_sparc.inline.hpp 2009-08-01 04:17:38.842475946 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#if defined(COMPILER2) || defined(_LP64) + +inline void Prefetch::read(void *loc, intx interval) { + __asm__ volatile("prefetch [%0+%1], 0" : : "r" (loc), "r" (interval) : "memory" ); +} + +inline void Prefetch::write(void *loc, intx interval) { + __asm__ volatile("prefetch [%0+%1], 2" : : "r" (loc), "r" (interval) : "memory" ); +} + +#else + +inline void Prefetch::read (void *loc, intx interval) {} +inline void Prefetch::write(void *loc, intx interval) {} + +#endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp 2009-08-01 04:17:39.259242245 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_threadLS_linux_sparc.cpp.incl" + +void ThreadLocalStorage::generate_code_for_get_thread() { +} + +void ThreadLocalStorage::pd_init() { + // Nothing to do +} + +void ThreadLocalStorage::pd_set_thread(Thread* thread) { + os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.hpp 2009-08-01 04:17:39.672491618 +0100 @@ -0,0 +1,28 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +public: + static Thread* thread() { + return (Thread*) os::thread_local_storage_at(thread_index()); + } --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp 2009-08-01 04:17:40.077084602 +0100 @@ -0,0 +1,107 @@ +/* + * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_thread_linux_sparc.cpp.incl" + +// For Forte Analyzer AsyncGetCallTrace profiling support - thread is +// currently interrupted by SIGPROF +bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, + void* ucontext, + bool isInJava) { + assert(Thread::current() == this, "caller must be current thread"); + assert(this->is_Java_thread(), "must be JavaThread"); + + JavaThread* jt = (JavaThread *)this; + + if (!isInJava) { + // make_walkable flushes register windows and grabs last_Java_pc + // which can not be done if the ucontext sp matches last_Java_sp + // stack walking utilities assume last_Java_pc set if marked flushed + jt->frame_anchor()->make_walkable(jt); + } + + // If we have a walkable last_Java_frame, then we should use it + // even if isInJava == true. It should be more reliable than + // ucontext info. + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { + *fr_addr = jt->pd_last_frame(); + return true; + } + + ucontext_t* uc = (ucontext_t*) ucontext; + + // At this point, we don't have a walkable last_Java_frame, so + // we try to glean some information out of the ucontext. + intptr_t* ret_sp; + ExtendedPC addr = + os::fetch_frame_from_context(uc, &ret_sp, + NULL /* ret_fp only used on X86 */); + if (addr.pc() == NULL || ret_sp == NULL) { + // ucontext wasn't useful + return false; + } + + // we were running Java code when SIGPROF came in + if (isInJava) { + // If we have a last_Java_sp, then the SIGPROF signal caught us + // right when we were transitioning from _thread_in_Java to a new + // JavaThreadState. We use last_Java_sp instead of the sp from + // the ucontext since it should be more reliable. + if (jt->has_last_Java_frame()) { + ret_sp = jt->last_Java_sp(); + } + // Implied else: we don't have a last_Java_sp so we use what we + // got from the ucontext. + + frame ret_frame(ret_sp, frame::unpatchable, addr.pc()); + if (!ret_frame.safe_for_sender(jt)) { + // nothing else to try if the frame isn't good + return false; + } + *fr_addr = ret_frame; + return true; + } + + // At this point, we know we weren't running Java code. We might + // have a last_Java_sp, but we don't have a walkable frame. + // However, we might still be able to construct something useful + // if the thread was running native code. + if (jt->has_last_Java_frame()) { + assert(!jt->frame_anchor()->walkable(), "case covered above"); + + if (jt->thread_state() == _thread_in_native) { + frame ret_frame(jt->last_Java_sp(), frame::unpatchable, addr.pc()); + if (!ret_frame.safe_for_sender(jt)) { + // nothing else to try if the frame isn't good + return false; + } + *fr_addr = ret_frame; + return true; + } + } + + // nothing else to try + return false; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.hpp 2009-08-01 04:17:40.518865421 +0100 @@ -0,0 +1,98 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +private: + + void pd_initialize() { + _anchor.clear(); + _base_of_stack_pointer = NULL; + } + + frame pd_last_frame() { + assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); + assert(_anchor.walkable(), "thread has not dumped its register windows yet"); + + assert(_anchor.last_Java_pc() != NULL, "Ack no pc!"); + return frame(last_Java_sp(), frame::unpatchable, _anchor.last_Java_pc()); + } + + // Sometimes the trap handler needs to record both PC and NPC. + // This is a SPARC-specific companion to Thread::set_saved_exception_pc. + address _saved_exception_npc; + + // In polling_page_safepoint_handler_blob(s) we have to tail call other + // blobs without blowing any registers. A tail call requires some + // register to jump with and we can't blow any registers, so it must + // be restored in the delay slot. 'restore' cannot be used as it + // will chop the heads off of 64-bit %o registers in the 32-bit + // build. Instead we reload the registers using G2_thread and this + // location. Must be 64bits in the 32-bit LION build. + jdouble _o_reg_temps[6]; + + // a stack pointer older than any java frame stack pointer. It is + // used to validate stack pointers in frame::next_younger_sp (it + // provides the upper bound in the range check). This is necessary + // on Solaris/SPARC since the ucontext passed to a signal handler is + // sometimes corrupt and we need a way to check the extracted sp. + intptr_t* _base_of_stack_pointer; + +public: + + static int o_reg_temps_offset_in_bytes() { return offset_of(JavaThread, _o_reg_temps); } + +#ifndef _LP64 + address o_reg_temps(int i) { return (address)&_o_reg_temps[i]; } +#endif + + static int saved_exception_npc_offset_in_bytes() { return offset_of(JavaThread,_saved_exception_npc); } + + address saved_exception_npc() { return _saved_exception_npc; } + void set_saved_exception_npc(address a) { _saved_exception_npc = a; } + + +public: + + intptr_t* base_of_stack_pointer() { return _base_of_stack_pointer; } + + void set_base_of_stack_pointer(intptr_t* base_sp) { + _base_of_stack_pointer = base_sp; + } + + void record_base_of_stack_pointer() { + intptr_t *sp = (intptr_t *)(((intptr_t)StubRoutines::Sparc::flush_callers_register_windows_func()())); + intptr_t *ysp; + while((ysp = (intptr_t*)sp[FP->sp_offset_in_saved_window()]) != NULL) { + sp = (intptr_t *)((intptr_t)ysp + STACK_BIAS); + } + _base_of_stack_pointer = sp; + } + + bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, + bool isInJava); + + // These routines are only used on cpu architectures that + // have separate register stacks (Itanium). + static bool register_stack_overflow() { return false; } + static void enable_register_stack_guard() {} + static void disable_register_stack_guard() {} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/vmStructs_linux_sparc.hpp 2009-08-01 04:17:40.982697579 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// These are the OS and CPU-specific fields, types and integer +// constants required by the Serviceability Agent. This file is +// referenced by vmStructs.cpp. + +#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \ + \ + /******************************/ \ + /* Threads (NOTE: incomplete) */ \ + /******************************/ \ + \ + nonstatic_field(JavaThread, _base_of_stack_pointer, intptr_t*) \ + nonstatic_field(OSThread, _thread_id, pid_t) \ + nonstatic_field(OSThread, _pthread_id, pthread_t) \ + /* This must be the last entry, and must be present */ \ + last_entry() + + +#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \ + \ + /**********************/ \ + /* POSIX Thread IDs */ \ + /**********************/ \ + \ + declare_integer_type(pid_t) \ + declare_unsigned_integer_type(pthread_t) \ + \ + /* This must be the last entry, and must be present */ \ + last_entry() + + +#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \ + \ + /************************/ \ + /* JavaThread constants */ \ + /************************/ \ + \ + declare_constant(JavaFrameAnchor::flushed) \ + \ + /* This must be the last entry, and must be present */ \ + last_entry() + +#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \ + \ + /* This must be the last entry, and must be present */ \ + last_entry() --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp 2009-08-01 04:17:41.419217219 +0100 @@ -0,0 +1,61 @@ +/* + * Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_vm_version_linux_sparc.cpp.incl" + +static bool detect_niagara() { + char cpu[128]; + bool rv = false; + + FILE* fp = fopen("/proc/cpuinfo", "r"); + if (fp == NULL) { + return rv; + } + + while (!feof(fp)) { + if (fscanf(fp, "cpu\t\t: %100[^\n]", &cpu) == 1) { + if (strstr(cpu, "Niagara") != NULL) { + rv = true; + } + break; + } + } + + fclose(fp); + + return rv; +} + +int VM_Version::platform_features(int features) { + // Default to generic v9 + features = generic_v9_m; + + if (detect_niagara()) { + NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");) + features = niagara1_m; + } + + return features; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp 2009-08-01 04:17:41.836335859 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_assembler_linux_x86.cpp.incl" + +#ifndef _LP64 +void MacroAssembler::int3() { + call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); +} + +void MacroAssembler::get_thread(Register thread) { + movl(thread, rsp); + shrl(thread, PAGE_SHIFT); + + ExternalAddress tls_base((address)ThreadLocalStorage::sp_map_addr()); + Address index(noreg, thread, Address::times_4); + ArrayAddress tls(tls_base, index); + + movptr(thread, tls); +} +#else +void MacroAssembler::int3() { + call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); +} + +void MacroAssembler::get_thread(Register thread) { + // call pthread_getspecific + // void * pthread_getspecific(pthread_key_t key); + if (thread != rax) { + push(rax); + } + push(rdi); + push(rsi); + push(rdx); + push(rcx); + push(r8); + push(r9); + push(r10); + // XXX + mov(r10, rsp); + andq(rsp, -16); + push(r10); + push(r11); + + movl(rdi, ThreadLocalStorage::thread_index()); + call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific))); + + pop(r11); + pop(rsp); + pop(r10); + pop(r9); + pop(r8); + pop(rcx); + pop(rdx); + pop(rsi); + pop(rdi); + if (thread != rax) { + mov(thread, rax); + pop(rax); + } +} +#endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp 2009-08-01 04:17:42.267813405 +0100 @@ -0,0 +1,133 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_assembler_solaris_x86.cpp.incl" + + +void MacroAssembler::int3() { + push(rax); + push(rdx); + push(rcx); + call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); + pop(rcx); + pop(rdx); + pop(rax); +} + +#define __ _masm-> +#ifndef _LP64 +static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) { + + // slow call to of thr_getspecific + // int thr_getspecific(thread_key_t key, void **value); + // Consider using pthread_getspecific instead. + +__ push(0); // allocate space for return value + if (thread != rax) __ push(rax); // save rax, if caller still wants it +__ push(rcx); // save caller save +__ push(rdx); // save caller save + if (thread != rax) { +__ lea(thread, Address(rsp, 3 * sizeof(int))); // address of return value + } else { +__ lea(thread, Address(rsp, 2 * sizeof(int))); // address of return value + } +__ push(thread); // and pass the address +__ push(ThreadLocalStorage::thread_index()); // the key +__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific))); +__ increment(rsp, 2 * wordSize); +__ pop(rdx); +__ pop(rcx); + if (thread != rax) __ pop(rax); +__ pop(thread); + +} +#else +static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) { + // slow call to of thr_getspecific + // int thr_getspecific(thread_key_t key, void **value); + // Consider using pthread_getspecific instead. + + if (thread != rax) { +__ push(rax); + } +__ push(0); // space for return value +__ push(rdi); +__ push(rsi); +__ lea(rsi, Address(rsp, 16)); // pass return value address +__ push(rdx); +__ push(rcx); +__ push(r8); +__ push(r9); +__ push(r10); + // XXX +__ mov(r10, rsp); +__ andptr(rsp, -16); +__ push(r10); +__ push(r11); + +__ movl(rdi, ThreadLocalStorage::thread_index()); +__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific))); + +__ pop(r11); +__ pop(rsp); +__ pop(r10); +__ pop(r9); +__ pop(r8); +__ pop(rcx); +__ pop(rdx); +__ pop(rsi); +__ pop(rdi); +__ pop(thread); // load return value + if (thread != rax) { +__ pop(rax); + } +} +#endif //LP64 + +void MacroAssembler::get_thread(Register thread) { + + int segment = NOT_LP64(Assembler::GS_segment) LP64_ONLY(Assembler::FS_segment); + // Try to emit a Solaris-specific fast TSD/TLS accessor. + ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode (); + if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1 + // Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset] + emit_byte (segment); + // ExternalAddress doesn't work because it can't take NULL + AddressLiteral null(0, relocInfo::none); + movptr (thread, null); + movptr(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ; + return ; + } else + if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2 + // mov r, gs:[tlsOffset] + emit_byte (segment); + AddressLiteral tls_off((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none); + movptr (thread, tls_off); + return ; + } + + slow_call_thr_specific(this, thread); + +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp 2009-08-01 04:17:42.713205135 +0100 @@ -0,0 +1,98 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_assembler_windows_x86.cpp.incl" + + +void MacroAssembler::int3() { + emit_byte(0xCC); +} + +#ifndef _LP64 +// The current scheme to accelerate access to the thread +// pointer is to store the current thread in the os_exception_wrapper +// and reference the current thread from stubs and compiled code +// via the FS register. FS[0] contains a pointer to the structured +// exception block which is actually a stack address. The first time +// we call the os exception wrapper, we calculate and store the +// offset from this exception block and use that offset here. +// +// The last mechanism we used was problematic in that the +// the offset we had hard coded in the VM kept changing as Microsoft +// evolved the OS. +// +// Warning: This mechanism assumes that we only attempt to get the +// thread when we are nested below a call wrapper. +// +// movl reg, fs:[0] Get exeception pointer +// movl reg, [reg + thread_ptr_offset] Load thread +// +void MacroAssembler::get_thread(Register thread) { + // can't use ExternalAddress because it can't take NULL + AddressLiteral null(0, relocInfo::none); + + prefix(FS_segment); + movptr(thread, null); + assert(ThreadLocalStorage::get_thread_ptr_offset() != 0, + "Thread Pointer Offset has not been initialized"); + movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset())); +} +#else +// call (Thread*)TlsGetValue(thread_index()); +void MacroAssembler::get_thread(Register thread) { + if (thread != rax) { + push(rax); + } + push(rdi); + push(rsi); + push(rdx); + push(rcx); + push(r8); + push(r9); + push(r10); + // XXX + mov(r10, rsp); + andq(rsp, -16); + push(r10); + push(r11); + + movl(c_rarg0, ThreadLocalStorage::thread_index()); + call(RuntimeAddress((address)TlsGetValue)); + + pop(r11); + pop(rsp); + pop(r10); + pop(r9); + pop(r8); + pop(rcx); + pop(rdx); + pop(rsi); + pop(rdi); + if (thread != rax) { + mov(thread, rax); + pop(rax); + } +} +#endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/build.xml 2009-08-01 04:17:43.196674513 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.svg. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/manifest.mf 2009-08-01 04:17:43.572205357 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.svg +OpenIDE-Module-Layer: com/sun/hotspot/igv/svg/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/svg/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/nbproject/build-impl.xml 2009-08-01 04:17:43.997954520 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/nbproject/genfiles.properties 2009-08-01 04:17:44.431326263 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=ebcf0422 +build.xml.script.CRC32=d7a2678d +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=ebcf0422 +nbproject/build-impl.xml.script.CRC32=57997f94 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/nbproject/project.properties 2009-08-01 04:17:44.831845537 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/nbproject/project.xml 2009-08-01 04:17:45.218690488 +0100 @@ -0,0 +1,14 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.svg + + + + com.sun.hotspot.igv.svg + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/nbproject/suite.properties 2009-08-01 04:17:45.685144208 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/src/com/sun/hotspot/igv/svg/BatikSVG.java 2009-08-01 04:17:46.306552677 +0100 @@ -0,0 +1,86 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.svg; + +import java.awt.Graphics2D; +import java.io.Writer; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import org.w3c.dom.DOMImplementation; + +/** + * + * @author Thomas Wuerthinger + */ +public class BatikSVG { + + private static Constructor SVGGraphics2DConstructor; + private static Method Method_stream; + private static Method Method_createDefault; + private static Method Method_getDOMImplementation; + private static Method Method_setEmbeddedFontsOn; + + public static Graphics2D createGraphicsObject() { + try { + if (SVGGraphics2DConstructor == null) { + ClassLoader cl = BatikSVG.class.getClassLoader(); + Class Class_GenericDOMImplementation = cl.loadClass("org.apache.batik.dom.GenericDOMImplementation"); + Class Class_SVGGeneratorContext = cl.loadClass("org.apache.batik.svggen.SVGGeneratorContext"); + Class Class_SVGGraphics2D = cl.loadClass("org.apache.batik.svggen.SVGGraphics2D"); + Method_getDOMImplementation = Class_GenericDOMImplementation.getDeclaredMethod("getDOMImplementation", new Class[0]); + Method_createDefault = Class_SVGGeneratorContext.getDeclaredMethod("createDefault", new Class[]{org.w3c.dom.Document.class}); + Method_setEmbeddedFontsOn = Class_SVGGeneratorContext.getDeclaredMethod("setEmbeddedFontsOn", new Class[]{boolean.class}); + Method_stream = Class_SVGGraphics2D.getDeclaredMethod("stream", Writer.class, boolean.class); + SVGGraphics2DConstructor = Class_SVGGraphics2D.getConstructor(Class_SVGGeneratorContext, boolean.class); + } + DOMImplementation dom = (DOMImplementation) Method_getDOMImplementation.invoke(null); + org.w3c.dom.Document document = dom.createDocument("http://www.w3.org/2000/svg", "svg", null); + Object ctx = Method_createDefault.invoke(null, document); + Method_setEmbeddedFontsOn.invoke(ctx, true); + Graphics2D svgGenerator = (Graphics2D) SVGGraphics2DConstructor.newInstance(ctx, true); + return svgGenerator; + } catch (ClassNotFoundException e) { + return null; + } catch (NoSuchMethodException e) { + return null; + } catch (IllegalAccessException e) { + return null; + } catch (InvocationTargetException e) { + return null; + } catch (InstantiationException e) { + return null; + } + } + + public static void printToStream(Graphics2D svgGenerator, Writer stream, boolean useCSS) { + try { + Method_stream.invoke(svgGenerator, stream, useCSS); + } catch (IllegalAccessException e) { + assert false; + } catch (InvocationTargetException e) { + assert false; + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/src/com/sun/hotspot/igv/svg/Bundle.properties 2009-08-01 04:17:46.789868645 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=BatikSVGProxy --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/BatikSVGProxy/src/com/sun/hotspot/igv/svg/layer.xml 2009-08-01 04:17:47.207190962 +0100 @@ -0,0 +1,4 @@ + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/build.xml 2009-08-01 04:17:47.641268932 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.bytecodes. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/manifest.mf 2009-08-01 04:17:48.042047646 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.bytecodes +OpenIDE-Module-Layer: com/sun/hotspot/igv/bytecodes/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/bytecodes/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/nbproject/build-impl.xml 2009-08-01 04:17:48.485022848 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/nbproject/genfiles.properties 2009-08-01 04:17:48.907134824 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=1dee290d +build.xml.script.CRC32=d594034f +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=1dee290d +nbproject/build-impl.xml.script.CRC32=b4dab126 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/nbproject/platform.properties 2009-08-01 04:17:49.358504951 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/nbproject/project.properties 2009-08-01 04:17:49.781150522 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/nbproject/project.xml 2009-08-01 04:17:50.176051500 +0100 @@ -0,0 +1,62 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.bytecodes + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + org.jdesktop.layout + + + + 1 + 1.4 + + + + org.openide.explorer + + + + 6.11 + + + + org.openide.nodes + + + + 7.2.0.1 + + + + org.openide.util + + + + 7.9.0.1 + + + + org.openide.windows + + + + 6.16 + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/nbproject/suite.properties 2009-08-01 04:17:50.601249104 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/Bundle.properties 2009-08-01 04:17:51.169261131 +0100 @@ -0,0 +1,5 @@ +CTL_BytecodeViewAction=Open BytecodeView Window +CTL_BytecodeViewTopComponent=BytecodeView Window +CTL_SelectBytecodesAction=Select nodes +HINT_BytecodeViewTopComponent=This is a BytecodeView window +OpenIDE-Module-Name=Bytecodes --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/BytecodeNode.java 2009-08-01 04:17:51.629680854 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.bytecodes; + +import com.sun.hotspot.igv.data.InputBytecode; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Properties.StringPropertyMatcher; +import java.awt.Image; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import javax.swing.Action; +import org.openide.nodes.AbstractNode; +import org.openide.nodes.Children; +import org.openide.nodes.Node; +import org.openide.util.Utilities; + +/** + * + * @author Thomas Wuerthinger + */ +public class BytecodeNode extends AbstractNode { + + private Set nodes; + + public BytecodeNode(InputBytecode bytecode, InputGraph graph, String bciValue) { + + super(Children.LEAF); + this.setDisplayName(bytecode.getBci() + " " + bytecode.getName()); + + bciValue = bytecode.getBci() + " " + bciValue; + bciValue = bciValue.trim(); + + Properties.PropertySelector selector = new Properties.PropertySelector(graph.getNodes()); + StringPropertyMatcher matcher = new StringPropertyMatcher("bci", bciValue); + List nodeList = selector.selectMultiple(matcher); + if (nodeList.size() > 0) { + nodes = new HashSet(); + for (InputNode n : nodeList) { + nodes.add(n); + } + this.setDisplayName(this.getDisplayName() + " (" + nodes.size() + " nodes)"); + } + } + + @Override + public Image getIcon(int i) { + if (nodes != null) { + return Utilities.loadImage("com/sun/hotspot/igv/bytecodes/images/link.gif"); + } else { + return Utilities.loadImage("com/sun/hotspot/igv/bytecodes/images/bytecode.gif"); + } + } + + @Override + public Image getOpenedIcon(int i) { + return getIcon(i); + } + + @Override + public Action[] getActions(boolean b) { + return new Action[]{(Action) SelectBytecodesAction.findObject(SelectBytecodesAction.class, true)}; + } + + @Override + public Action getPreferredAction() { + return (Action) SelectBytecodesAction.findObject(SelectBytecodesAction.class, true); + } + + @Override + public T getCookie(Class aClass) { + if (aClass == SelectBytecodesCookie.class && nodes != null) { + return (T) (new SelectBytecodesCookie(nodes)); + } + return super.getCookie(aClass); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/BytecodeViewAction.java 2009-08-01 04:17:52.069294589 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.bytecodes; + +import java.awt.event.ActionEvent; +import javax.swing.AbstractAction; +import org.openide.util.NbBundle; +import org.openide.windows.TopComponent; + +/** + * @author Thomas Wuerthinger + */ +public class BytecodeViewAction extends AbstractAction { + + public BytecodeViewAction() { + super(NbBundle.getMessage(BytecodeViewAction.class, "CTL_BytecodeViewAction")); + } + + public void actionPerformed(ActionEvent evt) { + TopComponent win = BytecodeViewTopComponent.findInstance(); + win.open(); + win.requestActive(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/BytecodeViewTopComponent.form 2009-08-01 04:17:52.488551323 +0100 @@ -0,0 +1,26 @@ + + +
+ + + + + + + + + + + + + + + + + + + + + + +
--- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/BytecodeViewTopComponent.java 2009-08-01 04:17:52.889284979 +0100 @@ -0,0 +1,177 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.bytecodes; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.services.InputGraphProvider; +import java.awt.BorderLayout; +import java.io.Serializable; +import javax.swing.SwingUtilities; +import org.openide.ErrorManager; +import org.openide.explorer.ExplorerManager; +import org.openide.explorer.ExplorerUtils; +import org.openide.explorer.view.BeanTreeView; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.NbBundle; +import org.openide.util.Utilities; +import org.openide.windows.TopComponent; +import org.openide.windows.WindowManager; + +/** + * @author Thomas Wuerthinger + */ +final class BytecodeViewTopComponent extends TopComponent implements ExplorerManager.Provider, LookupListener { + + private static BytecodeViewTopComponent instance; + private static final String PREFERRED_ID = "BytecodeViewTopComponent"; + private ExplorerManager manager; + private BeanTreeView treeView; + private Lookup.Result result = null; + private MethodNode rootNode; + + private BytecodeViewTopComponent() { + initComponents(); + setName(NbBundle.getMessage(BytecodeViewTopComponent.class, "CTL_BytecodeViewTopComponent")); + setToolTipText(NbBundle.getMessage(BytecodeViewTopComponent.class, "HINT_BytecodeViewTopComponent")); + + manager = new ExplorerManager(); + rootNode = new MethodNode(null, null, ""); + manager.setRootContext(rootNode); + + setLayout(new BorderLayout()); + + treeView = new BeanTreeView(); + treeView.setRootVisible(false); + this.add(BorderLayout.CENTER, treeView); + associateLookup(ExplorerUtils.createLookup(manager, getActionMap())); + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + // //GEN-BEGIN:initComponents + private void initComponents() { + + org.jdesktop.layout.GroupLayout layout = new org.jdesktop.layout.GroupLayout(this); + this.setLayout(layout); + layout.setHorizontalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(0, 400, Short.MAX_VALUE) + ); + layout.setVerticalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(0, 300, Short.MAX_VALUE) + ); + }// //GEN-END:initComponents + // Variables declaration - do not modify//GEN-BEGIN:variables + // End of variables declaration//GEN-END:variables + /** + * Gets default instance. Do not use directly: reserved for *.settings files only, + * i.e. deserialization routines; otherwise you could get a non-deserialized instance. + * To obtain the singleton instance, use {@link findInstance}. + */ + public static synchronized BytecodeViewTopComponent getDefault() { + if (instance == null) { + instance = new BytecodeViewTopComponent(); + } + return instance; + } + + /** + * Obtain the BytecodeViewTopComponent instance. Never call {@link #getDefault} directly! + */ + public static synchronized BytecodeViewTopComponent findInstance() { + TopComponent win = WindowManager.getDefault().findTopComponent(PREFERRED_ID); + if (win == null) { + ErrorManager.getDefault().log(ErrorManager.WARNING, "Cannot find BytecodeView component. It will not be located properly in the window system."); + return getDefault(); + } + if (win instanceof BytecodeViewTopComponent) { + return (BytecodeViewTopComponent) win; + } + ErrorManager.getDefault().log(ErrorManager.WARNING, "There seem to be multiple components with the '" + PREFERRED_ID + "' ID. That is a potential source of errors and unexpected behavior."); + return getDefault(); + } + + @Override + public int getPersistenceType() { + return TopComponent.PERSISTENCE_ALWAYS; + } + + @Override + public void componentOpened() { + Lookup.Template tpl = new Lookup.Template(Object.class); + result = Utilities.actionsGlobalContext().lookup(tpl); + result.addLookupListener(this); + } + + @Override + public void componentClosed() { + result.removeLookupListener(this); + result = null; + } + + @Override + public Object writeReplace() { + return new ResolvableHelper(); + } + + @Override + protected String preferredID() { + return PREFERRED_ID; + } + + public ExplorerManager getExplorerManager() { + return manager; + } + + public void resultChanged(LookupEvent lookupEvent) { + final InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class); + if (p != null) { + SwingUtilities.invokeLater(new Runnable() { + public void run() { + InputGraph graph = p.getGraph(); + if (graph != null) { + Group g = graph.getGroup(); + rootNode.update(graph, g.getMethod()); + } + } + }); + } + } + + final static class ResolvableHelper implements Serializable { + + private static final long serialVersionUID = 1L; + + public Object readResolve() { + return BytecodeViewTopComponent.getDefault(); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/BytecodeViewTopComponentSettings.xml 2009-08-01 04:17:53.331231759 +0100 @@ -0,0 +1,8 @@ + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/BytecodeViewTopComponentWstcref.xml 2009-08-01 04:17:53.732008727 +0100 @@ -0,0 +1,7 @@ + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/MethodNode.java 2009-08-01 04:17:54.166372297 +0100 @@ -0,0 +1,102 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.bytecodes; + +import com.sun.hotspot.igv.data.InputBytecode; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputMethod; +import java.awt.Image; +import org.openide.nodes.AbstractNode; +import org.openide.nodes.Children; +import org.openide.nodes.Node; +import org.openide.util.Utilities; + +/** + * + * @author Thomas Wuerthinger + */ +public class MethodNode extends AbstractNode { + + private static class MethodNodeChildren extends Children.Keys { + + private InputMethod method; + private InputGraph graph; + private String bciString; + + public MethodNodeChildren(InputMethod method, InputGraph graph, String bciString) { + this.method = method; + this.bciString = bciString; + this.graph = graph; + } + + protected Node[] createNodes(Object object) { + assert object instanceof InputBytecode; + InputBytecode bc = (InputBytecode) object; + if (bc.getInlined() == null) { + return new Node[]{new BytecodeNode(bc, graph, bciString)}; + } else { + return new Node[]{new BytecodeNode(bc, graph, bciString), new MethodNode(bc.getInlined(), graph, bc.getBci() + " " + bciString)}; + } + } + + @Override + public void addNotify() { + if (method != null) { + setKeys(method.getBytecodes()); + } + } + + public void setMethod(InputMethod method, InputGraph graph) { + this.method = method; + this.graph = graph; + addNotify(); + } + } + + /** Creates a new instance of MethodNode */ + public MethodNode(InputMethod method, InputGraph graph, String bciString) { + super((method != null && method.getBytecodes().size() == 0) ? Children.LEAF : new MethodNodeChildren(method, graph, bciString)); + if (method != null) { + this.setDisplayName(method.getName()); + } + } + + @Override + public Image getIcon(int i) { + return Utilities.loadImage("com/sun/hotspot/igv/bytecodes/images/method.gif"); + } + + @Override + public Image getOpenedIcon(int i) { + return getIcon(i); + } + + public void update(InputGraph graph, InputMethod method) { + ((MethodNodeChildren) this.getChildren()).setMethod(method, graph); + if (method != null) { + this.setDisplayName(method.getName()); + } + + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/SelectBytecodesAction.java 2009-08-01 04:17:54.590838133 +0100 @@ -0,0 +1,75 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.bytecodes; + +import com.sun.hotspot.igv.data.services.InputGraphProvider; +import org.openide.nodes.Node; +import org.openide.util.HelpCtx; +import org.openide.util.Lookup; +import org.openide.util.NbBundle; +import org.openide.util.actions.CookieAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class SelectBytecodesAction extends CookieAction { + + protected void performAction(Node[] activatedNodes) { + SelectBytecodesCookie c = activatedNodes[0].getCookie(SelectBytecodesCookie.class); + InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class); + if (p != null) { + p.setSelectedNodes(c.getNodes()); + } + } + + protected int mode() { + return CookieAction.MODE_EXACTLY_ONE; + } + + public String getName() { + return NbBundle.getMessage(SelectBytecodesAction.class, "CTL_SelectBytecodesAction"); + } + + protected Class[] cookieClasses() { + return new Class[]{ + SelectBytecodesCookie.class + }; + } + + @Override + protected void initialize() { + super.initialize(); + putValue("noIconInMenu", Boolean.TRUE); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/SelectBytecodesCookie.java 2009-08-01 04:17:55.017576094 +0100 @@ -0,0 +1,47 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.bytecodes; + +import com.sun.hotspot.igv.data.InputNode; +import java.util.Collections; +import java.util.Set; +import org.openide.nodes.Node; + +/** + * + * @author Thomas Wuerthinger + */ +public class SelectBytecodesCookie implements Node.Cookie { + + private Set nodes; + + /** Creates a new instance of SelectBytecodesCookie */ + public SelectBytecodesCookie(Set nodes) { + this.nodes = nodes; + } + + public Set getNodes() { + return Collections.unmodifiableSet(nodes); + } +} Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/images/bytecode.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/images/link.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/images/method.gif differ --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Bytecodes/src/com/sun/hotspot/igv/bytecodes/layer.xml 2009-08-01 04:17:56.696943463 +0100 @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/build.xml 2009-08-01 04:17:57.131141784 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.controlflow. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/manifest.mf 2009-08-01 04:17:57.529642221 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.controlflow +OpenIDE-Module-Layer: com/sun/hotspot/igv/controlflow/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/controlflow/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/nbproject/build-impl.xml 2009-08-01 04:17:57.959374798 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/nbproject/genfiles.properties 2009-08-01 04:17:58.376996547 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=b524efb3 +build.xml.script.CRC32=79a27be9 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=b524efb3 +nbproject/build-impl.xml.script.CRC32=582bdab7 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/nbproject/platform.properties 2009-08-01 04:17:58.773367442 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/nbproject/project.properties 2009-08-01 04:17:59.173529440 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/nbproject/project.xml 2009-08-01 04:17:59.946303973 +0100 @@ -0,0 +1,70 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.controlflow + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + com.sun.hotspot.igv.hierarchicallayout + + + + 1.0 + + + + com.sun.hotspot.igv.layout + + + + 1.0 + + + + org.jdesktop.layout + + + + 1 + 1.4 + + + + org.netbeans.api.visual + + + + 2.9 + + + + org.openide.util + + + + 7.9.0.1 + + + + org.openide.windows + + + + 6.16 + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/nbproject/suite.properties 2009-08-01 04:18:00.537864593 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/BlockConnectionWidget.java 2009-08-01 04:18:01.089399036 +0100 @@ -0,0 +1,83 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.controlflow; + +import com.sun.hotspot.igv.data.InputBlockEdge; +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import java.awt.Point; +import java.util.ArrayList; +import java.util.List; +import org.netbeans.api.visual.widget.ConnectionWidget; + +/** + * + * @author Thomas Wuerthinger + */ +public class BlockConnectionWidget extends ConnectionWidget implements Link { + + private BlockWidget from; + private BlockWidget to; + private Port inputSlot; + private Port outputSlot; + private List points; + private InputBlockEdge edge; + + public BlockConnectionWidget(ControlFlowScene scene, InputBlockEdge edge) { + super(scene); + + this.edge = edge; + this.from = (BlockWidget) scene.findWidget(edge.getFrom()); + this.to = (BlockWidget) scene.findWidget(edge.getTo()); + inputSlot = to.getInputSlot(); + outputSlot = from.getOutputSlot(); + points = new ArrayList(); + } + + public InputBlockEdge getEdge() { + return edge; + } + + public Port getTo() { + return inputSlot; + } + + public Port getFrom() { + return outputSlot; + } + + public void setControlPoints(List p) { + this.points = p; + } + + @Override + public List getControlPoints() { + return points; + } + + @Override + public String toString() { + return "Connection[ " + from.toString() + " - " + to.toString() + "]"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/BlockWidget.java 2009-08-01 04:18:01.498421068 +0100 @@ -0,0 +1,160 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.controlflow; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.layout.Cluster; +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; +import java.awt.Color; +import java.awt.Dimension; +import java.awt.Font; +import java.awt.Point; +import org.netbeans.api.visual.border.BorderFactory; +import org.netbeans.api.visual.model.ObjectState; +import org.netbeans.api.visual.widget.LabelWidget; + +/** + * + * @author Thomas Wuerthinger + */ +public class BlockWidget extends LabelWidget implements Vertex { + + public static final Dimension SIZE = new Dimension(20, 20); + private InputBlock block; + private Port inputSlot; + private Port outputSlot; + private Cluster cluster; + private boolean root; + private static final Font font = new Font(Font.SERIF, Font.PLAIN, 12); + private static final Font boldFont = font.deriveFont(Font.BOLD); + public static final Color NORMAL_FOREGROUND_COLOR = Color.BLACK; + public static final Color HOVER_FOREGROUND_COLOR = Color.BLUE; + + /** Creates a new instance of BlockWidget */ + public BlockWidget(ControlFlowScene scene, InputBlock block) { + super(scene); + this.block = block; + this.setLabel(block.getName()); + this.setForeground(NORMAL_FOREGROUND_COLOR); + this.setBorder(BorderFactory.createLineBorder(1, NORMAL_FOREGROUND_COLOR)); + this.setMinimumSize(SIZE); + this.setMaximumSize(SIZE); + + this.setFont(font); + + final BlockWidget widget = this; + inputSlot = new Port() { + + public Point getRelativePosition() { + return new Point((int) (SIZE.getWidth() / 2), (int) (SIZE.getHeight() / 2)); + } + + public Vertex getVertex() { + return widget; + } + }; + + outputSlot = new Port() { + + public Point getRelativePosition() { + return new Point((int) (SIZE.getWidth() / 2), (int) (SIZE.getHeight() / 2)); + } + + public Vertex getVertex() { + return widget; + } + }; + } + + public Port getInputSlot() { + return inputSlot; + } + + public Port getOutputSlot() { + return outputSlot; + } + + public InputBlock getBlock() { + return block; + } + + public Dimension getSize() { + return SIZE; + } + + public void setPosition(Point p) { + this.setPreferredLocation(p); + } + + @Override + public String toString() { + return block.getName(); + } + + public Point getPosition() { + return this.getPreferredLocation(); + } + + public Cluster getCluster() { + return cluster; + } + + public boolean isRoot() { + return root; + } + + public void setCluster(Cluster c) { + cluster = c; + } + + public void setRoot(boolean b) { + root = b; + } + + public int compareTo(Vertex o) { + return toString().compareTo(o.toString()); + } + + @Override + protected void notifyStateChanged(ObjectState previousState, ObjectState state) { + super.notifyStateChanged(previousState, state); + + if (previousState.isHovered() != state.isHovered()) { + if (state.isHovered()) { + this.setBorder(BorderFactory.createLineBorder(1, HOVER_FOREGROUND_COLOR)); + } else { + this.setBorder(BorderFactory.createLineBorder(1, NORMAL_FOREGROUND_COLOR)); + } + } + + if (previousState.isSelected() != state.isSelected()) { + if (state.isSelected()) { + this.setFont(boldFont); + } else { + this.setFont(font); + } + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/Bundle.properties 2009-08-01 04:18:01.914714952 +0100 @@ -0,0 +1,4 @@ +CTL_ControlFlowAction=Open ControlFlow Window +CTL_ControlFlowTopComponent=ControlFlow Window +HINT_ControlFlowTopComponent=This is a ControlFlow window +OpenIDE-Module-Name=ControlFlow --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/ControlFlowAction.java 2009-08-01 04:18:02.381929424 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.controlflow; + +import java.awt.event.ActionEvent; +import javax.swing.AbstractAction; +import org.openide.util.NbBundle; +import org.openide.windows.TopComponent; + +/** + * + * @author Thomas Wuerthinger + */ +public class ControlFlowAction extends AbstractAction { + + public ControlFlowAction() { + super(NbBundle.getMessage(ControlFlowAction.class, "CTL_ControlFlowAction")); + } + + public void actionPerformed(ActionEvent evt) { + TopComponent win = ControlFlowTopComponent.findInstance(); + win.open(); + win.requestActive(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/ControlFlowScene.java 2009-08-01 04:18:02.791292340 +0100 @@ -0,0 +1,295 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.controlflow; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputBlockEdge; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.services.InputGraphProvider; +import com.sun.hotspot.igv.data.InputNode; +import java.awt.Color; +import java.awt.Point; +import java.awt.Rectangle; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.HashMap; +import java.util.Set; +import javax.swing.BorderFactory; +import org.netbeans.api.visual.action.ActionFactory; +import org.netbeans.api.visual.action.MoveProvider; +import org.netbeans.api.visual.action.RectangularSelectDecorator; +import org.netbeans.api.visual.action.RectangularSelectProvider; +import org.netbeans.api.visual.action.SelectProvider; +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.anchor.AnchorFactory; +import org.netbeans.api.visual.anchor.AnchorShape; +import org.netbeans.api.visual.layout.LayoutFactory; +import org.netbeans.api.visual.router.RouterFactory; +import org.netbeans.api.visual.widget.LayerWidget; +import org.netbeans.api.visual.widget.Widget; +import org.netbeans.api.visual.graph.GraphScene; +import org.netbeans.api.visual.graph.layout.GraphLayout; +import org.netbeans.api.visual.layout.SceneLayout; +import org.netbeans.api.visual.widget.ConnectionWidget; +import org.openide.util.Lookup; + +/** + * + * @author Thomas Wuerthinger + */ +public class ControlFlowScene extends GraphScene implements SelectProvider, MoveProvider, RectangularSelectDecorator, RectangularSelectProvider { + + private HashSet selection; + private HashMap blockMap; + private InputGraph oldGraph; + private LayerWidget edgeLayer; + private LayerWidget mainLayer; + private LayerWidget selectLayer; + private WidgetAction hoverAction = this.createWidgetHoverAction(); + private WidgetAction selectAction = ActionFactory.createSelectAction(this); + private WidgetAction moveAction = ActionFactory.createMoveAction(null, this); + + public ControlFlowScene() { + selection = new HashSet(); + + this.getInputBindings().setZoomActionModifiers(0); + this.setLayout(LayoutFactory.createAbsoluteLayout()); + + mainLayer = new LayerWidget(this); + this.addChild(mainLayer); + + edgeLayer = new LayerWidget(this); + this.addChild(edgeLayer); + + selectLayer = new LayerWidget(this); + this.addChild(selectLayer); + + this.getActions().addAction(hoverAction); + this.getActions().addAction(selectAction); + this.getActions().addAction(ActionFactory.createRectangularSelectAction(this, selectLayer, this)); + this.getActions().addAction(ActionFactory.createMouseCenteredZoomAction(1.1)); + } + + public void setGraph(InputGraph g) { + if (g == oldGraph) { + return; + } + oldGraph = g; + + ArrayList blocks = new ArrayList(this.getNodes()); + for (InputBlock b : blocks) { + removeNode(b); + } + + ArrayList edges = new ArrayList(this.getEdges()); + for (InputBlockEdge e : edges) { + removeEdge(e); + } + + for (InputBlock b : g.getBlocks()) { + addNode(b); + } + + for (InputBlock b : g.getBlocks()) { + for (InputBlockEdge e : b.getOutputs()) { + addEdge(e); + assert g.getBlocks().contains(e.getFrom()); + assert g.getBlocks().contains(e.getTo()); + this.setEdgeSource(e, e.getFrom()); + this.setEdgeTarget(e, e.getTo()); + } + } + + GraphLayout layout = new HierarchicalGraphLayout();//GridGraphLayout(); + SceneLayout sceneLayout = LayoutFactory.createSceneGraphLayout(this, layout); + sceneLayout.invokeLayout(); + + this.validate(); + } + + public BlockWidget getBlockWidget(InputBlock b) { + return blockMap.get(b); + } + + public void clearSelection() { + for (BlockWidget w : selection) { + w.setState(w.getState().deriveSelected(false)); + } + selection.clear(); + selectionChanged(); + } + + public void selectionChanged() { + InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class); + if (p != null) { + Set inputNodes = new HashSet(); + for (BlockWidget w : selection) { + inputNodes.addAll(w.getBlock().getNodes()); + } + p.setSelectedNodes(inputNodes); + } + } + + public void addToSelection(BlockWidget widget) { + widget.setState(widget.getState().deriveSelected(true)); + selection.add(widget); + selectionChanged(); + } + + public void removeFromSelection(BlockWidget widget) { + widget.setState(widget.getState().deriveSelected(false)); + selection.remove(widget); + selectionChanged(); + } + + public boolean isAimingAllowed(Widget widget, Point point, boolean b) { + return false; + } + + public boolean isSelectionAllowed(Widget widget, Point point, boolean b) { + return true; + } + + public void select(Widget widget, Point point, boolean change) { + if (widget == this) { + clearSelection(); + } else { + + assert widget instanceof BlockWidget; + BlockWidget bw = (BlockWidget) widget; + if (change) { + if (selection.contains(bw)) { + removeFromSelection(bw); + } else { + addToSelection(bw); + } + } else { + if (!selection.contains(bw)) { + clearSelection(); + addToSelection(bw); + } + } + } + } + + public void movementStarted(Widget widget) { + } + + public void movementFinished(Widget widget) { + } + + public Point getOriginalLocation(Widget widget) { + return widget.getPreferredLocation(); + } + + public void setNewLocation(Widget widget, Point location) { + Point originalLocation = getOriginalLocation(widget); + int xOffset = location.x - originalLocation.x; + int yOffset = location.y - originalLocation.y; + for (Widget w : this.selection) { + Point p = new Point(w.getPreferredLocation()); + p.translate(xOffset, yOffset); + w.setPreferredLocation(p); + } + + } + + public Widget createSelectionWidget() { + Widget widget = new Widget(this); + widget.setOpaque(false); + widget.setBorder(BorderFactory.createLineBorder(Color.black, 2)); + widget.setForeground(Color.red); + return widget; + } + + public void performSelection(Rectangle rectangle) { + + if (rectangle.width < 0) { + rectangle.x += rectangle.width; + rectangle.width *= -1; + } + + if (rectangle.height < 0) { + rectangle.y += rectangle.height; + rectangle.height *= -1; + } + + boolean changed = false; + for (InputBlock b : this.getNodes()) { + BlockWidget w = (BlockWidget) findWidget(b); + Rectangle r = new Rectangle(w.getBounds()); + r.setLocation(w.getLocation()); + if (r.intersects(rectangle)) { + if (!selection.contains(w)) { + changed = true; + selection.add(w); + w.setState(w.getState().deriveSelected(true)); + } + } else { + if (selection.contains(w)) { + changed = true; + selection.remove(w); + w.setState(w.getState().deriveSelected(false)); + } + } + } + + if (changed) { + selectionChanged(); + } + + } + + protected Widget attachNodeWidget(InputBlock node) { + BlockWidget w = new BlockWidget(this, node); + mainLayer.addChild(w); + w.getActions().addAction(hoverAction); + w.getActions().addAction(selectAction); + w.getActions().addAction(moveAction); + return w; + } + + protected Widget attachEdgeWidget(InputBlockEdge edge) { + ConnectionWidget w = new BlockConnectionWidget(this, edge); + w.setRouter(RouterFactory.createDirectRouter()); + w.setTargetAnchorShape(AnchorShape.TRIANGLE_FILLED); + edgeLayer.addChild(w); + return w; + } + + protected void attachEdgeSourceAnchor(InputBlockEdge edge, InputBlock oldSourceNode, InputBlock sourceNode) { + Widget w = this.findWidget(edge); + assert w instanceof ConnectionWidget; + ConnectionWidget cw = (ConnectionWidget) w; + cw.setSourceAnchor(AnchorFactory.createRectangularAnchor(findWidget(sourceNode))); + + } + + protected void attachEdgeTargetAnchor(InputBlockEdge edge, InputBlock oldTargetNode, InputBlock targetNode) { + Widget w = this.findWidget(edge); + assert w instanceof ConnectionWidget; + ConnectionWidget cw = (ConnectionWidget) w; + cw.setTargetAnchor(AnchorFactory.createRectangularAnchor(findWidget(targetNode))); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/ControlFlowTopComponent.form 2009-08-01 04:18:03.208499174 +0100 @@ -0,0 +1,26 @@ + + +
+ + + + + + + + + + + + + + + + + + + + + + +
--- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/ControlFlowTopComponent.java 2009-08-01 04:18:03.609765612 +0100 @@ -0,0 +1,184 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.controlflow; + +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.services.InputGraphProvider; +import java.awt.BorderLayout; +import java.io.Serializable; +import javax.swing.JScrollPane; +import javax.swing.SwingUtilities; +import org.openide.ErrorManager; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.NbBundle; +import org.openide.util.Utilities; +import org.openide.windows.TopComponent; +import org.openide.windows.WindowManager; + +/** + * + * @author Thomas Wuerthinger + */ +final class ControlFlowTopComponent extends TopComponent implements LookupListener { + + private static ControlFlowTopComponent instance; + private Lookup.Result result = null; + private static final String PREFERRED_ID = "ControlFlowTopComponent"; + private ControlFlowScene scene; + + private ControlFlowTopComponent() { + initComponents(); + setName(NbBundle.getMessage(ControlFlowTopComponent.class, "CTL_ControlFlowTopComponent")); + setToolTipText(NbBundle.getMessage(ControlFlowTopComponent.class, "HINT_ControlFlowTopComponent")); + + scene = new ControlFlowScene(); + this.setLayout(new BorderLayout()); + this.associateLookup(scene.getLookup()); + + + JScrollPane panel = new JScrollPane(scene.createView()); + this.add(panel, BorderLayout.CENTER); + } + + @Override + public void requestFocus() { + super.requestFocus(); + scene.getView().requestFocus(); + } + + @Override + public boolean requestFocusInWindow() { + super.requestFocusInWindow(); + return scene.getView().requestFocusInWindow(); + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + // //GEN-BEGIN:initComponents + private void initComponents() { + + org.jdesktop.layout.GroupLayout layout = new org.jdesktop.layout.GroupLayout(this); + this.setLayout(layout); + layout.setHorizontalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(0, 400, Short.MAX_VALUE) + ); + layout.setVerticalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(0, 300, Short.MAX_VALUE) + ); + }// //GEN-END:initComponents + // Variables declaration - do not modify//GEN-BEGIN:variables + // End of variables declaration//GEN-END:variables + /** + * Gets default instance. Do not use directly: reserved for *.settings files only, + * i.e. deserialization routines; otherwise you could get a non-deserialized instance. + * To obtain the singleton instance, use {@link findInstance}. + */ + public static synchronized ControlFlowTopComponent getDefault() { + if (instance == null) { + instance = new ControlFlowTopComponent(); + } + return instance; + } + + /** + * Obtain the ControlFlowTopComponent instance. Never call {@link #getDefault} directly! + */ + public static synchronized ControlFlowTopComponent findInstance() { + TopComponent win = WindowManager.getDefault().findTopComponent(PREFERRED_ID); + if (win == null) { + ErrorManager.getDefault().log(ErrorManager.WARNING, "Cannot find ControlFlow component. It will not be located properly in the window system."); + return getDefault(); + } + if (win instanceof ControlFlowTopComponent) { + return (ControlFlowTopComponent) win; + } + ErrorManager.getDefault().log(ErrorManager.WARNING, "There seem to be multiple components with the '" + PREFERRED_ID + "' ID. That is a potential source of errors and unexpected behavior."); + return getDefault(); + } + + @Override + public int getPersistenceType() { + return TopComponent.PERSISTENCE_ALWAYS; + } + + @Override + public void componentOpened() { + Lookup.Template tpl = new Lookup.Template(Object.class); + result = Utilities.actionsGlobalContext().lookup(tpl); + result.addLookupListener(this); + } + + @Override + public void componentClosed() { + result.removeLookupListener(this); + result = null; + } + + public void resultChanged(LookupEvent lookupEvent) { + + final InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class); + if (p != null) { + SwingUtilities.invokeLater(new Runnable() { + public void run() { + InputGraph g = p.getGraph(); + if (g != null) { + scene.setGraph(g); + } + } + }); + } + } + + @Override + public Object writeReplace() { + return new ResolvableHelper(); + } + + @Override + protected String preferredID() { + return PREFERRED_ID; + } + + @Override + public void requestActive() { + scene.getView().requestFocusInWindow(); + super.requestActive(); + } + + final static class ResolvableHelper implements Serializable { + + private static final long serialVersionUID = 1L; + + public Object readResolve() { + return ControlFlowTopComponent.getDefault(); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/ControlFlowTopComponentSettings.xml 2009-08-01 04:18:04.036460598 +0100 @@ -0,0 +1,8 @@ + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/ControlFlowTopComponentWstcref.xml 2009-08-01 04:18:04.442493291 +0100 @@ -0,0 +1,7 @@ + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/HierarchicalGraphLayout.java 2009-08-01 04:18:04.876798717 +0100 @@ -0,0 +1,167 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.controlflow; + +import com.sun.hotspot.igv.hierarchicallayout.HierarchicalLayoutManager; +import com.sun.hotspot.igv.layout.Cluster; +import com.sun.hotspot.igv.layout.LayoutGraph; +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; +import java.awt.Dimension; +import java.awt.Point; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.netbeans.api.visual.graph.layout.GraphLayout; +import org.netbeans.api.visual.graph.layout.UniversalGraph; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class HierarchicalGraphLayout extends GraphLayout { + + public HierarchicalGraphLayout() { + } + + private class LinkWrapper implements Link { + + private VertexWrapper from; + private VertexWrapper to; + + public LinkWrapper(VertexWrapper from, VertexWrapper to) { + this.from = from; + this.to = to; + } + + public Port getFrom() { + return from.getSlot(); + } + + public Port getTo() { + return to.getSlot(); + } + + public List getControlPoints() { + return new ArrayList(); + } + + public void setControlPoints(List list) { + // Do nothing for now + } + } + + private class VertexWrapper implements Vertex { + + private N node; + private UniversalGraph graph; + private Port slot; + private Point position; + + public VertexWrapper(N node, UniversalGraph graph) { + this.node = node; + this.graph = graph; + final VertexWrapper vertex = this; + this.slot = new Port() { + + public Vertex getVertex() { + return vertex; + } + + public Point getRelativePosition() { + return new Point((int) (vertex.getSize().getWidth() / 2), (int) (vertex.getSize().getHeight() / 2)); + } + }; + + Widget w = graph.getScene().findWidget(node); + this.position = w.getPreferredLocation(); + } + + public Cluster getCluster() { + return null; + } + + public Dimension getSize() { + Widget w = graph.getScene().findWidget(node); + return w.getBounds().getSize(); + } + + public Point getPosition() { + return position; + } + + public void setPosition(Point p) { + HierarchicalGraphLayout.this.setResolvedNodeLocation(graph, node, p); + position = p; + } + + public boolean isRoot() { + return false; + } + + public int compareTo(Vertex o) { + VertexWrapper vw = (VertexWrapper) o; + return node.toString().compareTo(vw.node.toString()); + } + + public Port getSlot() { + return slot; + } + } + + protected void performGraphLayout(UniversalGraph graph) { + + Set links = new HashSet(); + Set vertices = new HashSet(); + Map vertexMap = new HashMap(); + + for (N node : graph.getNodes()) { + VertexWrapper v = new VertexWrapper(node, graph); + vertexMap.put(node, v); + vertices.add(v); + } + + for (E edge : graph.getEdges()) { + N source = graph.getEdgeSource(edge); + N target = graph.getEdgeTarget(edge); + LinkWrapper l = new LinkWrapper(vertexMap.get(source), vertexMap.get(target)); + links.add(l); + } + + HierarchicalLayoutManager m = new HierarchicalLayoutManager(HierarchicalLayoutManager.Combine.NONE); + + LayoutGraph layoutGraph = new LayoutGraph(links, vertices); + m.doLayout(layoutGraph); + } + + protected void performNodesLayout(UniversalGraph graph, Collection nodes) { + throw new UnsupportedOperationException(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ControlFlow/src/com/sun/hotspot/igv/controlflow/layer.xml 2009-08-01 04:18:05.321955156 +0100 @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/build.xml 2009-08-01 04:18:05.747616847 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.coordinator. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/manifest.mf 2009-08-01 04:18:06.153865563 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.coordinator +OpenIDE-Module-Layer: com/sun/hotspot/igv/coordinator/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/coordinator/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/nbproject/build-impl.xml 2009-08-01 04:18:06.591098045 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/nbproject/genfiles.properties 2009-08-01 04:18:06.999729383 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=077de97c +build.xml.script.CRC32=d29d586c +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=077de97c +nbproject/build-impl.xml.script.CRC32=03daa42d +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/nbproject/platform.properties 2009-08-01 04:18:07.396873696 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/nbproject/project.properties 2009-08-01 04:18:07.797283690 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/nbproject/project.xml 2009-08-01 04:18:08.210682501 +0100 @@ -0,0 +1,126 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.coordinator + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + com.sun.hotspot.igv.difference + + + + 1.0 + + + + com.sun.hotspot.igv.settings + + + + 1.0 + + + + com.sun.hotspot.igv.util + + + + 1.0 + + + + org.netbeans.api.progress + + + + 1 + 1.10.0.1 + + + + org.openide.actions + + + + 6.6.1.1 + + + + org.openide.awt + + + + 6.11.0.1 + + + + org.openide.dialogs + + + + 7.5.1 + + + + org.openide.explorer + + + + 6.11 + + + + org.openide.filesystems + + + + 7.3 + + + + org.openide.loaders + + + + 6.7 + + + + org.openide.nodes + + + + 7.2.0.1 + + + + org.openide.util + + + + 7.9.0.1 + + + + org.openide.windows + + + + 6.16 + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/nbproject/suite.properties 2009-08-01 04:18:08.640117547 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/META-INF/services/com.sun.hotspot.igv.data.services.GroupOrganizer 2009-08-01 04:18:09.099158206 +0100 @@ -0,0 +1,2 @@ +com.sun.hotspot.igv.coordinator.StandardGroupOrganizer +com.sun.hotspot.igv.coordinator.GraphCountGroupOrganizer \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/Bundle.properties 2009-08-01 04:18:09.625416803 +0100 @@ -0,0 +1,7 @@ + +AdvancedOption_DisplayName_Coordinator=Settings +AdvancedOption_Tooltip_Coordinator=Visualization Tool Settings +CTL_OutlineTopComponent=Outline Window +CTL_SomeAction=test +HINT_OutlineTopComponent=This is a Outline window +OpenIDE-Module-Name=Coordinator --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/FolderNode.java 2009-08-01 04:18:10.026256842 +0100 @@ -0,0 +1,155 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.coordinator; + +import com.sun.hotspot.igv.coordinator.actions.RemoveCookie; +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.services.GroupOrganizer; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.Pair; +import java.awt.Image; +import java.util.ArrayList; +import java.util.List; +import org.openide.nodes.AbstractNode; +import org.openide.nodes.Children; +import org.openide.nodes.Node; +import org.openide.util.Utilities; +import org.openide.util.lookup.AbstractLookup; +import org.openide.util.lookup.InstanceContent; + +/** + * + * @author Thomas Wuerthinger + */ +public class FolderNode extends AbstractNode { + + private GroupOrganizer organizer; + private InstanceContent content; + private List>> structure; + private List subFolders; + private FolderChildren children; + + private static class FolderChildren extends Children.Keys implements ChangedListener { + + private FolderNode parent; + private List registeredGroups; + + public void setParent(FolderNode parent) { + this.parent = parent; + this.registeredGroups = new ArrayList(); + } + + @Override + protected Node[] createNodes(Object arg0) { + + for(Group g : registeredGroups) { + g.getChangedEvent().removeListener(this); + } + registeredGroups.clear(); + + Pair> p = (Pair>) arg0; + if (p.getLeft().length() == 0) { + + List curNodes = new ArrayList(); + for (Group g : p.getRight()) { + for (InputGraph graph : g.getGraphs()) { + curNodes.add(new GraphNode(graph)); + } + g.getChangedEvent().addListener(this); + registeredGroups.add(g); + } + + Node[] result = new Node[curNodes.size()]; + for (int i = 0; i < curNodes.size(); i++) { + result[i] = curNodes.get(i); + } + return result; + + } else { + return new Node[]{new FolderNode(p.getLeft(), parent.organizer, parent.subFolders, p.getRight())}; + } + } + + @Override + public void addNotify() { + this.setKeys(parent.structure); + } + + public void changed(Group source) { + List>> newStructure = new ArrayList>>(); + for(Pair> p : parent.structure) { + refreshKey(p); + } + } + } + + protected InstanceContent getContent() { + return content; + } + + @Override + public Image getIcon(int i) { + return Utilities.loadImage("com/sun/hotspot/igv/coordinator/images/folder.gif"); + } + + protected FolderNode(String name, GroupOrganizer organizer, List subFolders, List groups) { + this(name, organizer, subFolders, groups, new FolderChildren(), new InstanceContent()); + } + + private FolderNode(String name, GroupOrganizer organizer, List oldSubFolders, final List groups, FolderChildren children, InstanceContent content) { + super(children, new AbstractLookup(content)); + children.setParent(this); + this.content = content; + this.children = children; + content.add(new RemoveCookie() { + + public void remove() { + for (Group g : groups) { + if (g.getDocument() != null) { + g.getDocument().removeGroup(g); + } + } + } + }); + init(name, organizer, oldSubFolders, groups); + } + + public void init(String name, GroupOrganizer organizer, List oldSubFolders, List groups) { + this.setDisplayName(name); + this.organizer = organizer; + this.subFolders = new ArrayList(oldSubFolders); + if (name.length() > 0) { + this.subFolders.add(name); + } + structure = organizer.organize(subFolders, groups); + assert structure != null; + children.addNotify(); + } + + @Override + public Image getOpenedIcon(int i) { + return getIcon(i); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/GraphCountGroupOrganizer.java 2009-08-01 04:18:10.467555805 +0100 @@ -0,0 +1,81 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.coordinator; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.Pair; +import com.sun.hotspot.igv.data.services.GroupOrganizer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; + +/** + * + * @author Thomas Wuerthinger + */ +public class GraphCountGroupOrganizer implements GroupOrganizer { + + public String getName() { + return "Graph count structure"; + } + + public List>> organize(List subFolders, List groups) { + + List>> result = new ArrayList>>(); + + if (subFolders.size() == 0) { + Map> map = new HashMap>(); + for (Group g : groups) { + Integer cur = g.getGraphs().size(); + if (!map.containsKey(cur)) { + map.put(cur, new ArrayList()); + } + map.get(cur).add(g); + } + + SortedSet keys = new TreeSet(map.keySet()); + for (Integer i : keys) { + result.add(new Pair>("Graph count " + i, map.get(i))); + } + + } else if (subFolders.size() == 1) { + for (Group g : groups) { + List children = new ArrayList(); + children.add(g); + Pair> p = new Pair>(); + p.setLeft(g.getName()); + p.setRight(children); + result.add(p); + } + } else if (subFolders.size() == 2) { + result.add(new Pair>("", groups)); + } + + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/GraphNode.java 2009-08-01 04:18:10.895260290 +0100 @@ -0,0 +1,130 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.coordinator; + +import com.sun.hotspot.igv.coordinator.actions.DiffGraphAction; +import com.sun.hotspot.igv.coordinator.actions.DiffGraphCookie; +import com.sun.hotspot.igv.coordinator.actions.RemoveCookie; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.services.GraphViewer; +import com.sun.hotspot.igv.data.services.InputGraphProvider; +import com.sun.hotspot.igv.util.PropertiesSheet; +import java.awt.Image; +import javax.swing.Action; +import org.openide.actions.OpenAction; +import org.openide.cookies.OpenCookie; +import org.openide.nodes.AbstractNode; +import org.openide.nodes.Children; +import org.openide.nodes.Node; +import org.openide.nodes.Sheet; +import org.openide.util.Lookup; +import org.openide.util.Utilities; +import org.openide.util.lookup.AbstractLookup; +import org.openide.util.lookup.InstanceContent; + +/** + * + * @author Thomas Wuerthinger + */ +public class GraphNode extends AbstractNode { + + private InputGraph graph; + + /** Creates a new instance of GraphNode */ + public GraphNode(InputGraph graph) { + this(graph, new InstanceContent()); + } + + private GraphNode(final InputGraph graph, InstanceContent content) { + super(Children.LEAF, new AbstractLookup(content)); + this.graph = graph; + this.setDisplayName(graph.getName()); + content.add(graph); + + final GraphViewer viewer = Lookup.getDefault().lookup(GraphViewer.class); + + if (viewer != null) { + // Action for opening the graph + content.add(new OpenCookie() { + + public void open() { + viewer.view(graph); + } + }); + } + + // Action for removing a graph + content.add(new RemoveCookie() { + + public void remove() { + graph.getGroup().removeGraph(graph); + } + }); + } + + @Override + protected Sheet createSheet() { + Sheet s = super.createSheet(); + PropertiesSheet.initializeSheet(graph.getProperties(), s); + return s; + } + + @Override + public Image getIcon(int i) { + return Utilities.loadImage("com/sun/hotspot/igv/coordinator/images/graph.gif"); + } + + @Override + public Image getOpenedIcon(int i) { + return getIcon(i); + } + + @Override + public T getCookie(Class aClass) { + if (aClass == DiffGraphCookie.class) { + InputGraphProvider graphProvider = Utilities.actionsGlobalContext().lookup(InputGraphProvider.class); + + InputGraph graphA = null; + if (graphProvider != null) { + graphA = graphProvider.getGraph(); + } + + if (graphA != null && !graphA.isDifferenceGraph()) { + return (T) new DiffGraphCookie(graphA, graph); + } + } + + return super.getCookie(aClass); + } + + @Override + public Action[] getActions(boolean b) { + return new Action[]{(Action) DiffGraphAction.findObject(DiffGraphAction.class, true), (Action) OpenAction.findObject(OpenAction.class, true)}; + } + + @Override + public Action getPreferredAction() { + return (Action) OpenAction.findObject(OpenAction.class, true); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/OutlineTopComponent.form 2009-08-01 04:18:11.345582845 +0100 @@ -0,0 +1,41 @@ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/OutlineTopComponent.java 2009-08-01 04:18:11.763268699 +0100 @@ -0,0 +1,271 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.coordinator; + +import com.sun.hotspot.igv.coordinator.actions.ImportAction; +import com.sun.hotspot.igv.coordinator.actions.RemoveAction; +import com.sun.hotspot.igv.coordinator.actions.RemoveAllAction; +import com.sun.hotspot.igv.coordinator.actions.SaveAllAction; +import com.sun.hotspot.igv.coordinator.actions.SaveAsAction; +import com.sun.hotspot.igv.coordinator.actions.StructuredViewAction; +import com.sun.hotspot.igv.data.GraphDocument; +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.services.GroupCallback; +import com.sun.hotspot.igv.data.services.GroupOrganizer; +import com.sun.hotspot.igv.data.services.GroupReceiver; +import java.awt.BorderLayout; +import java.awt.Component; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collection; +import javax.swing.BoxLayout; +import javax.swing.JPanel; +import javax.swing.UIManager; +import javax.swing.border.Border; +import org.openide.ErrorManager; +import org.openide.awt.Toolbar; +import org.openide.awt.ToolbarPool; +import org.openide.explorer.ExplorerManager; +import org.openide.explorer.ExplorerUtils; +import org.openide.explorer.view.BeanTreeView; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.NbBundle; +import org.openide.util.actions.NodeAction; +import org.openide.windows.TopComponent; +import org.openide.windows.WindowManager; + +/** + * + * @author Thomas Wuerthinger + */ +public final class OutlineTopComponent extends TopComponent implements ExplorerManager.Provider, LookupListener { + + public static OutlineTopComponent instance; + public static final String PREFERRED_ID = "OutlineTopComponent"; + private ExplorerManager manager; + private GraphDocument document; + private FolderNode root; + private GroupOrganizer organizer; + + private OutlineTopComponent() { + initComponents(); + + setName(NbBundle.getMessage(OutlineTopComponent.class, "CTL_OutlineTopComponent")); + setToolTipText(NbBundle.getMessage(OutlineTopComponent.class, "HINT_OutlineTopComponent")); + + document = new GraphDocument(); + initListView(); + initToolbar(); + initReceivers(); + } + + private void initListView() { + manager = new ExplorerManager(); + organizer = new StandardGroupOrganizer(); + root = new FolderNode("", organizer, new ArrayList(), document.getGroups()); + manager.setRootContext(root); + ((BeanTreeView) this.jScrollPane1).setRootVisible(false); + + document.getChangedEvent().addListener(new ChangedListener() { + + public void changed(GraphDocument document) { + updateStructure(); + } + }); + + associateLookup(ExplorerUtils.createLookup(manager, getActionMap())); + } + + private void initToolbar() { + + Toolbar toolbar = new Toolbar(); + Border b = (Border) UIManager.get("Nb.Editor.Toolbar.border"); //NOI18N + toolbar.setBorder(b); + this.add(toolbar, BorderLayout.NORTH); + + toolbar.add(ImportAction.get(ImportAction.class)); + toolbar.add(((NodeAction) RemoveAction.get(RemoveAction.class)).createContextAwareInstance(this.getLookup())); + toolbar.add(RemoveAllAction.get(RemoveAllAction.class)); + + toolbar.add(((NodeAction) SaveAsAction.get(SaveAsAction.class)).createContextAwareInstance(this.getLookup())); + toolbar.add(SaveAllAction.get(SaveAllAction.class)); + + toolbar.add(StructuredViewAction.get(StructuredViewAction.class).getToolbarPresenter()); + + for (Toolbar tb : ToolbarPool.getDefault().getToolbars()) { + tb.setVisible(false); + } + + initOrganizers(); + } + + public void setOrganizer(GroupOrganizer organizer) { + this.organizer = organizer; + updateStructure(); + } + + private void initOrganizers() { + + } + + private void initReceivers() { + + final GroupCallback callback = new GroupCallback() { + + public void started(Group g) { + getDocument().addGroup(g); + } + }; + + Collection receivers = Lookup.getDefault().lookupAll(GroupReceiver.class); + if (receivers.size() > 0) { + JPanel panel = new JPanel(); + panel.setLayout(new BoxLayout(panel, BoxLayout.Y_AXIS)); + + for (GroupReceiver r : receivers) { + Component c = r.init(callback); + panel.add(c); + } + + jPanel2.add(panel, BorderLayout.PAGE_START); + } + } + + private void updateStructure() { + root.init("", organizer, new ArrayList(), document.getGroups()); + } + + public void clear() { + document.clear(); + } + + public ExplorerManager getExplorerManager() { + return manager; + } + + public GraphDocument getDocument() { + return document; + } + + /** + * Gets default instance. Do not use directly: reserved for *.settings files only, + * i.e. deserialization routines; otherwise you could get a non-deserialized instance. + * To obtain the singleton instance, use {@link findInstance}. + */ + public static synchronized OutlineTopComponent getDefault() { + if (instance == null) { + instance = new OutlineTopComponent(); + } + return instance; + } + + /** + * Obtain the OutlineTopComponent instance. Never call {@link #getDefault} directly! + */ + public static synchronized OutlineTopComponent findInstance() { + TopComponent win = WindowManager.getDefault().findTopComponent(PREFERRED_ID); + if (win == null) { + ErrorManager.getDefault().log(ErrorManager.WARNING, "Cannot find Outline component. It will not be located properly in the window system."); + return getDefault(); + } + if (win instanceof OutlineTopComponent) { + return (OutlineTopComponent) win; + } + ErrorManager.getDefault().log(ErrorManager.WARNING, "There seem to be multiple components with the '" + PREFERRED_ID + "' ID. That is a potential source of errors and unexpected behavior."); + return getDefault(); + } + + @Override + public int getPersistenceType() { + return TopComponent.PERSISTENCE_ALWAYS; + } + + @Override + public void componentOpened() { + this.requestActive(); + } + + @Override + public void componentClosed() { + } + + @Override + protected String preferredID() { + return PREFERRED_ID; + } + + public void resultChanged(LookupEvent lookupEvent) { + } + + @Override + public void readExternal(ObjectInput objectInput) throws IOException, ClassNotFoundException { + // Not called when user starts application for the first time + super.readExternal(objectInput); + ((BeanTreeView) this.jScrollPane1).setRootVisible(false); + } + + @Override + public void writeExternal(ObjectOutput objectOutput) throws IOException { + super.writeExternal(objectOutput); + } + + static final class ResolvableHelper implements Serializable { + + private static final long serialVersionUID = 1L; + + public Object readResolve() { + return OutlineTopComponent.getDefault(); + } + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + // //GEN-BEGIN:initComponents + private void initComponents() { + + jPanel2 = new javax.swing.JPanel(); + jScrollPane1 = new BeanTreeView(); + + setLayout(new java.awt.BorderLayout()); + + jPanel2.setLayout(new java.awt.BorderLayout()); + jPanel2.add(jScrollPane1, java.awt.BorderLayout.CENTER); + + add(jPanel2, java.awt.BorderLayout.CENTER); + }// //GEN-END:initComponents + + // Variables declaration - do not modify//GEN-BEGIN:variables + private javax.swing.JPanel jPanel2; + private javax.swing.JScrollPane jScrollPane1; + // End of variables declaration//GEN-END:variables +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/OutlineTopComponentSettings.xml 2009-08-01 04:18:12.196450511 +0100 @@ -0,0 +1,8 @@ + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/OutlineTopComponentWstcref.xml 2009-08-01 04:18:12.631207573 +0100 @@ -0,0 +1,7 @@ + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/StandardConfiguration.xml 2009-08-01 04:18:13.023522516 +0100 @@ -0,0 +1,13 @@ + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/StandardGroupOrganizer.java 2009-08-01 04:18:13.441043987 +0100 @@ -0,0 +1,62 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.coordinator; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.services.GroupOrganizer; +import com.sun.hotspot.igv.data.Pair; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class StandardGroupOrganizer implements GroupOrganizer { + + public String getName() { + return "-- None --"; + } + + public List>> organize(List subFolders, List groups) { + + List>> result = new ArrayList>>(); + + if (groups.size() == 1 && subFolders.size() > 0) { + result.add(new Pair>("", groups)); + } else { + for (Group g : groups) { + List children = new ArrayList(); + children.add(g); + Pair> p = new Pair>(); + p.setLeft(g.getName()); + p.setRight(children); + result.add(p); + } + } + + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/Bundle.properties 2009-08-01 04:18:13.908355719 +0100 @@ -0,0 +1,18 @@ +CTL_EditFilterAction=Edit... +CTL_ImportAction=Open... +CTL_OpenGraphAction=View graph +CTL_DiffGraphAction=Difference to current graph +CTL_RemoveAction=Remove methods +CTL_ApplyFilterAction=Apply +CTL_FilterAction=Open Filter Window +CTL_AppliedFilterAction=Open AppliedFilter Window +CTL_OutlineAction=Open Outline Window +CTL_MoveFilterUpAction=Move upwards +CTL_MoveFilterDownAction=Move downwards +CTL_RemoveFilterAction=Remove +CTL_RemoveFilterSettingsAction=Remove filter setting +CTL_SaveAsAction=Save selected methods... +CTL_SaveAllAction=Save all... +CTL_SaveFilterSettingsAction=Save filter settings... +CTL_PropertiesAction=Open Properties Window +CTL_NewFilterAction=New filter... --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/DiffGraphAction.java 2009-08-01 04:18:14.309063408 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package com.sun.hotspot.igv.coordinator.actions; + +import org.openide.nodes.Node; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CookieAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class DiffGraphAction extends CookieAction { + + protected void performAction(Node[] activatedNodes) { + DiffGraphCookie c = activatedNodes[0].getCookie(DiffGraphCookie.class); + c.openDiff(); + } + + protected int mode() { + return CookieAction.MODE_EXACTLY_ONE; + } + + public String getName() { + return NbBundle.getMessage(DiffGraphAction.class, "CTL_DiffGraphAction"); + } + + protected Class[] cookieClasses() { + return new Class[]{ + DiffGraphCookie.class + }; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/coordinator/images/diff.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/DiffGraphCookie.java 2009-08-01 04:18:14.749110635 +0100 @@ -0,0 +1,56 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package com.sun.hotspot.igv.coordinator.actions; + +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.services.GraphViewer; +import com.sun.hotspot.igv.difference.Difference; +import org.openide.nodes.Node; +import org.openide.util.Lookup; + +/** + * + * @author Thomas Wuerthinger + */ +public class DiffGraphCookie implements Node.Cookie { + + private InputGraph a; + private InputGraph b; + + public DiffGraphCookie(InputGraph a, InputGraph b) { + this.a = a; + this.b = b; + } + + public void openDiff() { + + final GraphViewer viewer = Lookup.getDefault().lookup(GraphViewer.class); + + if(viewer != null) { + InputGraph diffGraph = Difference.createDiffGraph(a, b); + viewer.view(diffGraph); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/ImportAction.java 2009-08-01 04:18:15.177316902 +0100 @@ -0,0 +1,175 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package com.sun.hotspot.igv.coordinator.actions; + +import com.sun.hotspot.igv.coordinator.OutlineTopComponent; +import com.sun.hotspot.igv.data.GraphDocument; +import com.sun.hotspot.igv.data.serialization.Parser; +import com.sun.hotspot.igv.settings.Settings; +import com.sun.hotspot.igv.data.serialization.XMLParser; +import java.awt.event.InputEvent; +import java.awt.event.KeyEvent; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import javax.swing.Action; +import javax.swing.JFileChooser; +import javax.swing.KeyStroke; +import javax.swing.filechooser.FileFilter; +import org.netbeans.api.progress.ProgressHandle; +import org.netbeans.api.progress.ProgressHandleFactory; +import org.openide.DialogDisplayer; +import org.openide.NotifyDescriptor; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.RequestProcessor; +import org.openide.util.actions.CallableSystemAction; +import org.openide.xml.XMLUtil; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ImportAction extends CallableSystemAction { + + public static FileFilter getFileFilter() { + return new FileFilter() { + + public boolean accept(File f) { + return f.getName().toLowerCase().endsWith(".xml") || f.isDirectory(); + } + + public String getDescription() { + return "XML files (*.xml)"; + } + }; + } + + public void performAction() { + + JFileChooser fc = new JFileChooser(); + fc.setFileFilter(ImportAction.getFileFilter()); + fc.setCurrentDirectory(new File(Settings.get().get(Settings.DIRECTORY, Settings.DIRECTORY_DEFAULT))); + + if (fc.showOpenDialog(null) == JFileChooser.APPROVE_OPTION) { + File file = fc.getSelectedFile(); + + File dir = file; + if (!dir.isDirectory()) { + dir = dir.getParentFile(); + } + + Settings.get().put(Settings.DIRECTORY, dir.getAbsolutePath()); + + try { + final XMLReader reader = XMLUtil.createXMLReader(); + final FileInputStream inputStream = new FileInputStream(file); + final InputSource is = new InputSource(inputStream); + + final ProgressHandle handle = ProgressHandleFactory.createHandle("Opening file " + file.getName()); + final int basis = 1000; + handle.start(basis); + final int start = inputStream.available(); + + final XMLParser.ParseMonitor parseMonitor = new XMLParser.ParseMonitor() { + + public void setProgress(double d) { + try { + int curAvailable = inputStream.available(); + int prog = (int) (basis * (double) (start - curAvailable) / (double) start); + handle.progress(prog); + } catch (IOException ex) { + } + } + + public void setState(String state) { + setProgress(0.0); + handle.progress(state); + } + }; + final Parser parser = new Parser(); + final OutlineTopComponent component = OutlineTopComponent.findInstance(); + + component.requestActive(); + + RequestProcessor.getDefault().post(new Runnable() { + + public void run() { + GraphDocument document = null; + try { + document = parser.parse(reader, is, parseMonitor); + parseMonitor.setState("Finishing"); + component.getDocument().addGraphDocument(document); + } catch (SAXException ex) { + String s = "Exception during parsing the XML file, could not load document!"; + if (ex instanceof XMLParser.MissingAttributeException) { + XMLParser.MissingAttributeException e = (XMLParser.MissingAttributeException) ex; + s += "\nMissing attribute \"" + e.getAttributeName() + "\""; + } + ex.printStackTrace(); + NotifyDescriptor d = new NotifyDescriptor.Message(s, NotifyDescriptor.ERROR_MESSAGE); + DialogDisplayer.getDefault().notify(d); + } + handle.finish(); + } + }); + + } catch (SAXException ex) { + ex.printStackTrace(); + } catch (FileNotFoundException ex) { + ex.printStackTrace(); + } catch (IOException ex) { + ex.printStackTrace(); + } + } + } + + public String getName() { + return NbBundle.getMessage(ImportAction.class, "CTL_ImportAction"); + } + + public ImportAction() { + putValue(Action.SHORT_DESCRIPTION, "Open an XML graph document"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_O, InputEvent.CTRL_MASK)); + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/coordinator/images/import.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/OutlineAction.java 2009-08-01 04:18:15.650149258 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package com.sun.hotspot.igv.coordinator.actions; + +import com.sun.hotspot.igv.coordinator.*; +import java.awt.event.ActionEvent; +import javax.swing.AbstractAction; +import org.openide.util.NbBundle; +import org.openide.windows.TopComponent; + +/** + * + * @author Thomas Wuerthinger + */ +public class OutlineAction extends AbstractAction { + + public OutlineAction() { + super(NbBundle.getMessage(OutlineAction.class, "CTL_OutlineAction")); + } + + public void actionPerformed(ActionEvent evt) { + TopComponent win = OutlineTopComponent.findInstance(); + win.open(); + win.requestActive(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/RemoveAction.java 2009-08-01 04:18:16.085212658 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package com.sun.hotspot.igv.coordinator.actions; + +import javax.swing.Action; +import org.openide.nodes.Node; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.NodeAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class RemoveAction extends NodeAction { + + protected void performAction(Node[] activatedNodes) { + for (Node n : activatedNodes) { + RemoveCookie removeCookie = n.getCookie(RemoveCookie.class); + if (removeCookie != null) { + removeCookie.remove(); + } + } + } + + public RemoveAction() { + putValue(Action.SHORT_DESCRIPTION, "Remove"); + } + + public String getName() { + return NbBundle.getMessage(RemoveAction.class, "CTL_RemoveAction"); + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/coordinator/images/remove.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + protected boolean enable(Node[] nodes) { + return nodes.length > 0; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/RemoveAllAction.java 2009-08-01 04:18:16.522015817 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.coordinator.actions; + +import com.sun.hotspot.igv.coordinator.OutlineTopComponent; +import java.awt.event.InputEvent; +import java.awt.event.KeyEvent; +import javax.swing.Action; +import javax.swing.KeyStroke; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class RemoveAllAction extends CallableSystemAction { + + + public String getName() { + return NbBundle.getMessage(RemoveAllAction.class, "CTL_ImportAction"); + } + + public RemoveAllAction() { + putValue(Action.SHORT_DESCRIPTION, "Remove all methods"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_SHIFT, InputEvent.CTRL_MASK)); + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/coordinator/images/removeall.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + public void performAction() { + OutlineTopComponent.findInstance().clear(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/RemoveCookie.java 2009-08-01 04:18:16.954209515 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +package com.sun.hotspot.igv.coordinator.actions; + +import org.openide.nodes.Node; + +/** + * + * @author Thomas Wuerthinger + */ +public interface RemoveCookie extends Node.Cookie { + void remove(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/SaveAllAction.java 2009-08-01 04:18:17.367847233 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.coordinator.actions; + +import com.sun.hotspot.igv.coordinator.OutlineTopComponent; +import java.awt.event.InputEvent; +import java.awt.event.KeyEvent; +import javax.swing.Action; +import javax.swing.KeyStroke; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class SaveAllAction extends CallableSystemAction { + + public void performAction() { + final OutlineTopComponent component = OutlineTopComponent.findInstance(); + SaveAsAction.save(component.getDocument()); + } + + public String getName() { + return NbBundle.getMessage(SaveAllAction.class, "CTL_SaveAllAction"); + } + + public SaveAllAction() { + putValue(Action.SHORT_DESCRIPTION, "Save all methods to XML file"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_S, InputEvent.CTRL_MASK)); + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/coordinator/images/saveall.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/SaveAsAction.java 2009-08-01 04:18:17.796753638 +0100 @@ -0,0 +1,122 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +package com.sun.hotspot.igv.coordinator.actions; + +import com.sun.hotspot.igv.data.GraphDocument; +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.serialization.Printer; +import com.sun.hotspot.igv.settings.Settings; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import javax.swing.JFileChooser; +import org.openide.nodes.Node; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CookieAction; +import org.openide.util.actions.NodeAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class SaveAsAction extends NodeAction { + + protected void performAction(Node[] activatedNodes) { + + GraphDocument doc = new GraphDocument(); + for (Node n : activatedNodes) { + Group group = n.getLookup().lookup(Group.class); + doc.addGroup(group); + } + + save(doc); + } + + public static void save(GraphDocument doc) { + JFileChooser fc = new JFileChooser(); + fc.setFileFilter(ImportAction.getFileFilter()); + fc.setCurrentDirectory(new File(Settings.get().get(Settings.DIRECTORY, Settings.DIRECTORY_DEFAULT))); + + if (fc.showSaveDialog(null) == JFileChooser.APPROVE_OPTION) { + File file = fc.getSelectedFile(); + if (!file.getName().contains(".")) { + file = new File(file.getAbsolutePath() + ".xml"); + } + + File dir = file; + if (!dir.isDirectory()) { + dir = dir.getParentFile(); + } + Settings.get().put(Settings.DIRECTORY, dir.getAbsolutePath()); + try { + Writer writer = new OutputStreamWriter(new FileOutputStream(file)); + Printer p = new Printer(); + p.export(writer, doc); + writer.close(); + } catch (FileNotFoundException e) { + e.printStackTrace(); + } catch (IOException e) { + e.printStackTrace(); + + } + } + } + + protected int mode() { + return CookieAction.MODE_SOME; + } + + public String getName() { + return NbBundle.getMessage(SaveAsAction.class, "CTL_SaveAsAction"); + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/coordinator/images/save.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + protected boolean enable(Node[] nodes) { + + int cnt = 0; + for (Node n : nodes) { + cnt += n.getLookup().lookupAll(Group.class).size(); + } + + return cnt > 0; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/actions/StructuredViewAction.java 2009-08-01 04:18:18.230512278 +0100 @@ -0,0 +1,180 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.coordinator.actions; + +import com.sun.hotspot.igv.coordinator.OutlineTopComponent; +import com.sun.hotspot.igv.data.services.GroupOrganizer; +import java.awt.Component; +import java.awt.Image; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.ItemEvent; +import java.awt.event.ItemListener; +import java.awt.image.BufferedImage; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.swing.Action; +import javax.swing.ButtonGroup; +import javax.swing.ImageIcon; +import javax.swing.JButton; +import javax.swing.JCheckBoxMenuItem; +import javax.swing.JMenuItem; +import javax.swing.JPopupMenu; +import javax.swing.event.PopupMenuEvent; +import javax.swing.event.PopupMenuListener; +import org.openide.awt.DropDownButtonFactory; +import org.openide.util.HelpCtx; +import org.openide.util.Lookup; +import org.openide.util.Utilities; +import org.openide.util.actions.CallableSystemAction; + +public class StructuredViewAction extends CallableSystemAction { + + private static JButton dropDownButton; + private static ButtonGroup buttonGroup; + private static JPopupMenu popup; + private MyMenuItemListener menuItemListener; + private Map map; + + public StructuredViewAction() { + + putValue(Action.SHORT_DESCRIPTION, "Cluster nodes into blocks"); + } + + @Override + public Component getToolbarPresenter() { + + Image iconImage = Utilities.loadImage("com/sun/hotspot/igv/coordinator/images/structure.gif"); + ImageIcon icon = new ImageIcon(iconImage); + + popup = new JPopupMenu(); + + menuItemListener = new MyMenuItemListener(); + + buttonGroup = new ButtonGroup(); + + Collection organizersCollection = Lookup.getDefault().lookupAll(GroupOrganizer.class); + + List organizers = new ArrayList(organizersCollection); + Collections.sort(organizers, new Comparator() { + public int compare(GroupOrganizer a, GroupOrganizer b) { + return a.getName().compareTo(b.getName()); + } + }); + + map = new HashMap(); + + boolean first = true; + for(GroupOrganizer organizer : organizers) { + JCheckBoxMenuItem item = new JCheckBoxMenuItem(organizer.getName()); + map.put(item, organizer); + item.addActionListener(menuItemListener); + buttonGroup.add(item); + popup.add(item); + if(first) { + item.setSelected(true); + first = false; + } + } + + dropDownButton = DropDownButtonFactory.createDropDownButton( + new ImageIcon( + new BufferedImage(32, 32, BufferedImage.TYPE_BYTE_GRAY)), + popup); + + dropDownButton.setIcon(icon); + + dropDownButton.setToolTipText("Insert Layer Registration"); + + dropDownButton.addItemListener(new ItemListener() { + + public void itemStateChanged(ItemEvent e) { + int state = e.getStateChange(); + if (state == ItemEvent.SELECTED) { + performAction(); + } + } + }); + + dropDownButton.addActionListener(new ActionListener() { + public void actionPerformed(ActionEvent e) { + performAction(); + } + }); + + popup.addPopupMenuListener(new PopupMenuListener() { + + public void popupMenuCanceled(PopupMenuEvent e) { + dropDownButton.setSelected(false); + } + + public void popupMenuWillBecomeInvisible(PopupMenuEvent e) { + dropDownButton.setSelected(false); + } + + public void popupMenuWillBecomeVisible(PopupMenuEvent e) { + dropDownButton.setSelected(true); + } + }); + + return dropDownButton; + + } + + private class MyMenuItemListener implements ActionListener { + + public void actionPerformed(ActionEvent ev) { + JMenuItem item = (JMenuItem) ev.getSource(); + GroupOrganizer organizer = map.get(item); + assert organizer != null : "Organizer must exist!"; + OutlineTopComponent.findInstance().setOrganizer(organizer); + } + } + + + @Override + public void performAction() { + popup.show(dropDownButton, 0, dropDownButton.getHeight()); + } + + public String getName() { + return "Structured View"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/customLeftWsmode.xml 2009-08-01 04:18:18.655505118 +0100 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/diff.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/folder.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/graph.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/import.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/remove.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/removeall.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/save.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/saveall.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/structure.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/images/structured.gif differ --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Coordinator/src/com/sun/hotspot/igv/coordinator/layer.xml 2009-08-01 04:18:23.079125068 +0100 @@ -0,0 +1,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/build.xml 2009-08-01 04:18:23.549032179 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.data. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/manifest.mf 2009-08-01 04:18:23.948038449 +0100 @@ -0,0 +1,5 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.data +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/data/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/nbproject/build-impl.xml 2009-08-01 04:18:24.378543646 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/nbproject/genfiles.properties 2009-08-01 04:18:24.792890640 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=a403efd8 +build.xml.script.CRC32=b87f73ba +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=a403efd8 +nbproject/build-impl.xml.script.CRC32=cc649146 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/nbproject/platform.properties 2009-08-01 04:18:25.193260066 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/nbproject/project.properties 2009-08-01 04:18:25.605196019 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/nbproject/project.xml 2009-08-01 04:18:25.991845173 +0100 @@ -0,0 +1,16 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.data + + + + com.sun.hotspot.igv.data + com.sun.hotspot.igv.data.serialization + com.sun.hotspot.igv.data.services + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/nbproject/suite.properties 2009-08-01 04:18:26.385032602 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/Bundle.properties 2009-08-01 04:18:26.936267316 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=Data --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/ChangedEvent.java 2009-08-01 04:18:27.338035446 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public class ChangedEvent extends Event> { + + private T object; + + public ChangedEvent() { + } + + public ChangedEvent(T object) { + this.object = object; + } + + protected void fire(ChangedListener l) { + l.changed(object); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/ChangedEventProvider.java 2009-08-01 04:18:27.755363279 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public interface ChangedEventProvider { + + public ChangedEvent getChangedEvent(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/ChangedListener.java 2009-08-01 04:18:28.164850259 +0100 @@ -0,0 +1,33 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public interface ChangedListener { + + public void changed(T source); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/Event.java 2009-08-01 04:18:28.557229714 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public abstract class Event { + + private List listener; + + public Event() { + listener = new ArrayList(); + } + + public void addListener(L l) { + listener.add(l); + } + + public void removeListener(L l) { + listener.remove(l); + } + + public void fire() { + List tmpList = new ArrayList(listener); + for (L l : tmpList) { + fire(l); + } + } + + protected abstract void fire(L l); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/GraphDocument.java 2009-08-01 04:18:28.974612984 +0100 @@ -0,0 +1,91 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class GraphDocument extends Properties.Entity implements ChangedEventProvider { + + private List groups; + private ChangedEvent changedEvent; + + public GraphDocument() { + groups = new ArrayList(); + changedEvent = new ChangedEvent(this); + } + + public void clear() { + groups.clear(); + getChangedEvent().fire(); + } + + public ChangedEvent getChangedEvent() { + return changedEvent; + } + + public List getGroups() { + return Collections.unmodifiableList(groups); + } + + public void addGroup(Group group) { + group.setDocument(this); + groups.add(group); + getChangedEvent().fire(); + } + + public void removeGroup(Group group) { + if (groups.contains(group)) { + group.setDocument(null); + groups.remove(group); + getChangedEvent().fire(); + } + } + + public void addGraphDocument(GraphDocument document) { + for (Group g : document.groups) { + this.addGroup(g); + } + document.clear(); + getChangedEvent().fire(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("GraphDocument: " + getProperties().toString() + " \n\n"); + for (Group g : getGroups()) { + sb.append(g.toString()); + sb.append("\n\n"); + } + + return sb.toString(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/Group.java 2009-08-01 04:18:29.383766357 +0100 @@ -0,0 +1,141 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import com.sun.hotspot.igv.data.ChangedEvent; +import com.sun.hotspot.igv.data.ChangedEventProvider; +import com.sun.hotspot.igv.data.Properties; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class Group extends Properties.Entity implements ChangedEventProvider { + + private List graphs; + private transient ChangedEvent changedEvent; + private GraphDocument document; + private InputMethod method; + private String assembly; + + public Group() { + graphs = new ArrayList(); + init(); + } + + private void init() { + changedEvent = new ChangedEvent(this); + } + + public void fireChangedEvent() { + changedEvent.fire(); + } + + public void setAssembly(String s) { + this.assembly = s; + } + + public String getAssembly() { + return assembly; + } + + public void setMethod(InputMethod method) { + this.method = method; + } + + public InputMethod getMethod() { + return method; + } + + void setDocument(GraphDocument document) { + this.document = document; + } + + public GraphDocument getDocument() { + return document; + } + + public ChangedEvent getChangedEvent() { + return changedEvent; + } + + public List getGraphs() { + return Collections.unmodifiableList(graphs); + } + + public void addGraph(InputGraph g) { + assert g != null; + assert !graphs.contains(g); + graphs.add(g); + changedEvent.fire(); + } + + public void removeGraph(InputGraph g) { + int index = graphs.indexOf(g); + if (index != -1) { + graphs.remove(g); + changedEvent.fire(); + } + } + + public Set getAllNodes() { + Set result = new HashSet(); + for (InputGraph g : graphs) { + Set ids = g.getNodesAsSet(); + result.addAll(g.getNodesAsSet()); + for (Integer i : ids) { + result.add(-i); + } + } + return result; + } + + public InputGraph getLastAdded() { + if (graphs.size() == 0) { + return null; + } + return graphs.get(graphs.size() - 1); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Group " + getProperties().toString() + "\n"); + for (InputGraph g : graphs) { + sb.append(g.toString()); + sb.append("\n"); + } + return sb.toString(); + } + + public String getName() { + return getProperties().get("name"); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/InputBlock.java 2009-08-01 04:18:30.429019880 +0100 @@ -0,0 +1,140 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import java.awt.Rectangle; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputBlock { + + private List nodes; + private List successorNames; + private String name; + private InputGraph graph; + private Rectangle bounds; + private Set successors; + private Set predecessors; + private Set inputs; + private Set outputs; + + public InputBlock(InputGraph graph, String name) { + this.graph = graph; + this.name = name; + nodes = new ArrayList(); + successorNames = new ArrayList(); + successors = new HashSet(); + predecessors = new HashSet(); + inputs = new HashSet(); + outputs = new HashSet(); + } + + public void removeSuccessor(InputBlock b) { + if (successors.contains(b)) { + successors.remove(b); + b.predecessors.remove(this); + InputBlockEdge e = new InputBlockEdge(this, b); + assert outputs.contains(e); + outputs.remove(e); + assert b.inputs.contains(e); + b.inputs.remove(e); + } + } + + public String getName() { + return name; + } + + public void setName(String s) { + name = s; + } + + public List getNodes() { + return Collections.unmodifiableList(nodes); + } + + public void addNode(int id) { + InputNode n = graph.getNode(id); + assert n != null; + graph.setBlock(n, this); + addNode(graph.getNode(id)); + } + + public void addNode(InputNode node) { + assert !nodes.contains(node); + nodes.add(node); + } + + public Set getPredecessors() { + return Collections.unmodifiableSet(predecessors); + } + + public Set getSuccessors() { + return Collections.unmodifiableSet(successors); + } + + public Set getInputs() { + return Collections.unmodifiableSet(inputs); + } + + public Set getOutputs() { + return Collections.unmodifiableSet(outputs); + } + + // resolveBlockLinks must be called afterwards + public void addSuccessor(String name) { + successorNames.add(name); + } + + public void resolveBlockLinks() { + for (String s : successorNames) { + InputBlock b = graph.getBlock(s); + addSuccessor(b); + } + + successorNames.clear(); + } + + public void addSuccessor(InputBlock b) { + if (!successors.contains(b)) { + successors.add(b); + b.predecessors.add(this); + InputBlockEdge e = new InputBlockEdge(this, b); + outputs.add(e); + b.inputs.add(e); + } + } + + @Override + public String toString() { + return this.getName(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/InputBlockEdge.java 2009-08-01 04:18:30.888616651 +0100 @@ -0,0 +1,65 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputBlockEdge { + + private InputBlock from; + private InputBlock to; + + public InputBlockEdge(InputBlock from, InputBlock to) { + assert from != null; + assert to != null; + this.from = from; + this.to = to; + } + + public InputBlock getFrom() { + return from; + } + + public InputBlock getTo() { + return to; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof InputBlockEdge && obj != null) { + InputBlockEdge e = (InputBlockEdge) obj; + return e.from.equals(from) && e.to.equals(to); + } + return super.equals(obj); + } + + @Override + public int hashCode() { + int hash = from.hashCode(); + hash = 59 * hash + to.hashCode(); + return hash; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/InputBytecode.java 2009-08-01 04:18:31.330895833 +0100 @@ -0,0 +1,56 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputBytecode { + + private int bci; + private String name; + private InputMethod inlined; + + public InputBytecode(int bci, String name) { + this.bci = bci; + this.name = name; + } + + public InputMethod getInlined() { + return inlined; + } + + public void setInlined(InputMethod inlined) { + this.inlined = inlined; + } + + public int getBci() { + return bci; + } + + public String getName() { + return name; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/InputEdge.java 2009-08-01 04:18:31.756515496 +0100 @@ -0,0 +1,92 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputEdge { + + public enum State { + + SAME, + NEW, + DELETED + } + private char toIndex; + private int from; + private int to; + private State state; + + public InputEdge(char toIndex, int from, int to) { + this.toIndex = toIndex; + this.from = from; + this.to = to; + this.state = State.SAME; + } + + public State getState() { + return state; + } + + public void setState(State x) { + this.state = x; + } + + public char getToIndex() { + return toIndex; + } + + public String getName() { + return "in" + toIndex; + } + + public int getFrom() { + return from; + } + + public int getTo() { + return to; + } + + @Override + public boolean equals(Object o) { + if (o == null || !(o instanceof InputEdge)) { + return false; + } + InputEdge conn2 = (InputEdge) o; + return conn2.toIndex == toIndex && conn2.from == from && conn2.to == to; + } + + @Override + public String toString() { + return "Edge from " + from + " to " + to + "(" + (int) toIndex + ") "; + } + + @Override + public int hashCode() { + return (from << 20 | to << 8 | toIndex); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/InputGraph.java 2009-08-01 04:18:32.192404105 +0100 @@ -0,0 +1,248 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.HashMap; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputGraph extends Properties.Entity { + + private HashMap nodes; + private ArrayList edges; + private Group parent; + private HashMap blocks; + private HashMap nodeToBlock; + private boolean isDifferenceGraph; + + public InputGraph(Group parent) { + this(parent, null); + } + + public InputGraph(Group parent, InputGraph last) { + this(parent, last, ""); + } + + private void clearBlocks() { + blocks.clear(); + nodeToBlock.clear(); + } + + public InputGraph(Group parent, InputGraph last, String name) { + this.parent = parent; + setName(name); + nodes = new HashMap(); + edges = new ArrayList(); + blocks = new HashMap(); + nodeToBlock = new HashMap(); + if (last != null) { + + for (InputNode n : last.getNodes()) { + addNode(n); + } + + for (InputEdge c : last.getEdges()) { + addEdge(c); + } + } + } + + public void schedule(Collection newBlocks) { + clearBlocks(); + InputBlock noBlock = new InputBlock(this, "no block"); + Set scheduledNodes = new HashSet(); + + for (InputBlock b : newBlocks) { + for (InputNode n : b.getNodes()) { + assert !scheduledNodes.contains(n); + scheduledNodes.add(n); + } + } + + for (InputNode n : this.getNodes()) { + assert nodes.get(n.getId()) == n; + if (!scheduledNodes.contains(n)) { + noBlock.addNode(n.getId()); + } + } + + if (noBlock.getNodes().size() != 0) { + newBlocks.add(noBlock); + } + for (InputBlock b : newBlocks) { + addBlock(b); + } + + for (InputNode n : this.getNodes()) { + assert this.getBlock(n) != null; + } + } + + public void setBlock(InputNode node, InputBlock block) { + nodeToBlock.put(node.getId(), block); + } + + public InputBlock getBlock(int nodeId) { + return nodeToBlock.get(nodeId); + } + + public InputBlock getBlock(InputNode node) { + return getBlock(node.getId()); + } + + public InputGraph getNext() { + List list = parent.getGraphs(); + if (!list.contains(this)) { + return null; + } + int index = list.indexOf(this); + if (index == list.size() - 1) { + return null; + } else { + return list.get(index + 1); + } + } + + public InputGraph getPrev() { + List list = parent.getGraphs(); + if (!list.contains(this)) { + return null; + } + int index = list.indexOf(this); + if (index == 0) { + return null; + } else { + return list.get(index - 1); + } + } + + public String getName() { + return getProperties().get("name"); + } + + public String getAbsoluteName() { + String result = getName(); + if (this.parent != null) { + result = parent.getName() + ": " + result; + } + return result; + } + + public Collection getNodes() { + return Collections.unmodifiableCollection(nodes.values()); + } + + public Set getNodesAsSet() { + return Collections.unmodifiableSet(nodes.keySet()); + } + + public Collection getBlocks() { + return Collections.unmodifiableCollection(blocks.values()); + } + + public void addNode(InputNode node) { + nodes.put(node.getId(), node); + } + + public InputNode getNode(int id) { + return nodes.get(id); + } + + public InputNode removeNode(int index) { + return nodes.remove(index); + } + + public Collection getEdges() { + return Collections.unmodifiableList(edges); + } + + public void removeEdge(InputEdge c) { + assert edges.contains(c); + edges.remove(c); + assert !edges.contains(c); + } + + public void addEdge(InputEdge c) { + assert !edges.contains(c); + edges.add(c); + assert edges.contains(c); + } + + public Group getGroup() { + return parent; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Graph " + getName() + " " + getProperties().toString() + "\n"); + for (InputNode n : nodes.values()) { + sb.append(n.toString()); + sb.append("\n"); + } + + for (InputEdge c : edges) { + sb.append(c.toString()); + sb.append("\n"); + } + return sb.toString(); + } + + public void addBlock(InputBlock b) { + blocks.put(b.getName(), b); + for (InputNode n : b.getNodes()) { + this.nodeToBlock.put(n.getId(), b); + } + } + + public void resolveBlockLinks() { + for (InputBlock b : blocks.values()) { + b.resolveBlockLinks(); + } + } + + public void setName(String s) { + getProperties().setProperty("name", s); + } + + public InputBlock getBlock(String s) { + return blocks.get(s); + } + + public boolean isDifferenceGraph() { + return this.isDifferenceGraph; + } + + public void setIsDifferenceGraph(boolean b) { + isDifferenceGraph = b; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/InputMethod.java 2009-08-01 04:18:32.609952488 +0100 @@ -0,0 +1,127 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import com.sun.hotspot.igv.data.Properties; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputMethod extends Properties.Entity { + + private String name; + private int bci; + private String shortName; + private List inlined; + private InputMethod parentMethod; + private Group group; + private List bytecodes; + + /** Creates a new instance of InputMethod */ + public InputMethod(Group parent, String name, String shortName, int bci) { + this.group = parent; + this.name = name; + this.bci = bci; + this.shortName = shortName; + inlined = new ArrayList(); + bytecodes = new ArrayList(); + } + + public List getBytecodes() { + return Collections.unmodifiableList(bytecodes); + } + + public List getInlined() { + return Collections.unmodifiableList(inlined); + } + + public void addInlined(InputMethod m) { + + // assert bci unique + for (InputMethod m2 : inlined) { + assert m2.getBci() != m.getBci(); + } + + inlined.add(m); + assert m.parentMethod == null; + m.parentMethod = this; + + for (InputBytecode bc : bytecodes) { + if (bc.getBci() == m.getBci()) { + bc.setInlined(m); + } + } + } + + public Group getGroup() { + return group; + } + + public String getShortName() { + return shortName; + } + + public void setBytecodes(String text) { + + String[] strings = text.split("\n"); + int oldNumber = -1; + for (String s : strings) { + + if (s.length() > 0 && Character.isDigit(s.charAt(0))) { + s = s.trim(); + int spaceIndex = s.indexOf(' '); + String numberString = s.substring(0, spaceIndex); + String tmpName = s.substring(spaceIndex + 1, s.length()); + + int number = -1; + number = Integer.parseInt(numberString); + + // assert correct order of bytecodes + assert number > oldNumber; + + InputBytecode bc = new InputBytecode(number, tmpName); + bytecodes.add(bc); + + for (InputMethod m : inlined) { + if (m.getBci() == number) { + bc.setInlined(m); + break; + } + } + } + } + } + + public String getName() { + return name; + } + + public int getBci() { + return bci; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/InputNode.java 2009-08-01 04:18:33.041081617 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputNode extends Properties.Entity { + + private int id; + + public InputNode(InputNode n) { + super(n); + setId(n.id); + } + + public InputNode(int id) { + setId(id); + } + + public void setId(int id) { + this.id = id; + getProperties().setProperty("id", "" + id); + } + + public int getId() { + return id; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof InputNode)) { + return false; + } + InputNode n = (InputNode) o; + if (n.id != id) { + return false; + } + return getProperties().equals(n.getProperties()); + } + + @Override + public int hashCode() { + return id; + } + + @Override + public String toString() { + return "Node " + id + " " + getProperties().toString(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/Pair.java 2009-08-01 04:18:33.466366033 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +/** + * + * @author Thomas Wuerthinger + */ +public class Pair { + + private L l; + private R r; + + public Pair() { + } + + public Pair(L l, R r) { + this.l = l; + this.r = r; + } + + public L getLeft() { + return l; + } + + public void setLeft(L l) { + this.l = l; + } + + public R getRight() { + return r; + } + + public void setRight(R r) { + this.r = r; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Pair)) { + return false; + } + Pair obj = (Pair) o; + return l.equals(obj.l) && r.equals(obj.r); + } + + @Override + public int hashCode() { + return l.hashCode() * 71 + r.hashCode(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/Properties.java 2009-08-01 04:18:33.934073724 +0100 @@ -0,0 +1,336 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +/** + * + * @author Thomas Wuerthinger + */ +public class Properties implements Serializable, Iterable { + + public static final long serialVersionUID = 1L; + private String[] map = new String[4]; + + public Properties() { + } + + @Override + public boolean equals(java.lang.Object o) { + if (!(o instanceof Properties)) { + return false; + } + + Properties p = (Properties) o; + + for (Property prop : this) { + String value = p.get(prop.getName()); + if (value == null || !value.equals(prop.getValue())) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + int hash = 5; + hash = 83 * hash + (this.map != null ? this.map.hashCode() : 0); + return hash; + } + + public Properties(String name, String value) { + this(); + this.setProperty(name, value); + } + + public Properties(String name, String value, String name1, String value1) { + this(name, value); + this.setProperty(name1, value1); + } + + public Properties(String name, String value, String name1, String value1, String name2, String value2) { + this(name, value, name1, value1); + this.setProperty(name2, value2); + } + + public Properties(Properties p) { + map = new String[p.map.length]; + System.arraycopy(map, 0, p.map, 0, p.map.length); + } + + public static class Entity implements Provider { + + private Properties properties; + + public Entity() { + properties = new Properties(); + } + + public Entity(Properties.Entity object) { + properties = new Properties(object.getProperties()); + } + + public Properties getProperties() { + return properties; + } + } + + private String getProperty(String key) { + for (int i = 0; i < map.length; i += 2) + if (map[i] != null && map[i].equals(key)) { + return map[i + 1]; + } + return null; + } + + public interface PropertyMatcher { + + String getName(); + + boolean match(String value); + } + + public static class InvertPropertyMatcher implements PropertyMatcher { + + private PropertyMatcher matcher; + + public InvertPropertyMatcher(PropertyMatcher matcher) { + this.matcher = matcher; + } + + public String getName() { + return matcher.getName(); + } + + public boolean match(String p) { + return !matcher.match(p); + } + } + + public static class StringPropertyMatcher implements PropertyMatcher { + + private String name; + private String value; + + public StringPropertyMatcher(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public boolean match(String p) { + return p.equals(value); + } + } + + public static class RegexpPropertyMatcher implements PropertyMatcher { + + private String name; + private Pattern valuePattern; + + public RegexpPropertyMatcher(String name, String value) { + this.name = name; + valuePattern = Pattern.compile(value); + } + + public String getName() { + return name; + } + + public boolean match(String p) { + Matcher m = valuePattern.matcher(p); + return m.matches(); + } + } + + public Property selectSingle(PropertyMatcher matcher) { + String value = null; + for (int i = 0; i < map.length; i += 2) { + if (map[i] != null && matcher.getName().equals(map[i])) { + value = map[i + 1]; + break; + } + } + if (value != null && matcher.match(value)) { + return new Property(matcher.getName(), value); + } else { + return null; + } + } + + public interface Provider { + + public Properties getProperties(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("["); + for (int i = 0; i < map.length; i += 2) { + if (map[i + 1] != null) { + String p = map[i + 1]; + sb.append(map[i] + " = " + map[i + 1] + "; "); + } + } + return sb.append("]").toString(); + } + + public static class PropertySelector { + + private Collection objects; + + public PropertySelector(Collection objects) { + this.objects = objects; + } + + public T selectSingle(final String name, final String value) { + return selectSingle(new StringPropertyMatcher(name, value)); + } + + public T selectSingle(PropertyMatcher matcher) { + + for (T t : objects) { + Property p = t.getProperties().selectSingle(matcher); + if (p != null) { + return t; + } + } + + return null; + } + + public List selectMultiple(final String name, final String value) { + return selectMultiple(new StringPropertyMatcher(name, value)); + } + + public List selectMultiple(PropertyMatcher matcher) { + List result = new ArrayList(); + for (T t : objects) { + Property p = t.getProperties().selectSingle(matcher); + if (p != null) { + result.add(t); + } + } + return result; + } + } + + public String get(String key) { + for (int i = 0; i < map.length; i += 2) { + if (map[i] != null && map[i].equals(key)) { + return map[i + 1]; + } + } + return null; + } + + public void setProperty(String name, String value) { + for (int i = 0; i < map.length; i += 2) { + if (map[i] != null && map[i].equals(name)) { + String p = map[i + 1]; + if (value == null) { + // remove this property + map[i] = null; + map[i + 1] = null; + } else { + map[i + 1] = value; + } + return; + } + } + if (value == null) { + return; + } + for (int i = 0; i < map.length; i += 2) { + if (map[i] == null) { + map[i] = name; + map[i + 1] = value; + return; + } + } + String[] newMap = new String[map.length + 4]; + System.arraycopy(map, 0, newMap, 0, map.length); + newMap[map.length] = name; + newMap[map.length + 1] = value; + map = newMap; + } + + public Iterator getProperties() { + return iterator(); + } + + public void add(Properties properties) { + for (Property p : properties) { + add(p); + } + } + + public void add(Property property) { + assert property.getName() != null; + assert property.getValue() != null; + setProperty(property.getName(), property.getValue()); + } + class PropertiesIterator implements Iterator, Iterable { + public Iterator iterator() { + return this; + } + + int index; + + public boolean hasNext() { + while (index < map.length && map[index + 1] == null) + index += 2; + return index < map.length; + } + + public Property next() { + if (index < map.length) { + index += 2; + return new Property(map[index - 2], map[index - 1]); + } + return null; + } + + public void remove() { + throw new UnsupportedOperationException("Not supported yet."); + } + + } + public Iterator iterator() { + return new PropertiesIterator(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/Property.java 2009-08-01 04:18:34.368437155 +0100 @@ -0,0 +1,79 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data; + +import java.io.Serializable; + +/** + * + * @author Thomas Wuerthinger + */ +public class Property implements Serializable { + + public static final long serialVersionUID = 1L; + + private String name; + private String value; + + private Property() { + this(null, null); + } + + private Property(Property p) { + this(p.getName(), p.getValue()); + } + + private Property(String name) { + this(name, null); + } + + public Property(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public String getValue() { + return value; + } + + @Override + public String toString() { + return name + " = " + value + "; "; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Property)) return false; + Property p2 = (Property)o; + return name.equals(p2.name) && value.equals(p2.value); + } + @Override + public int hashCode() { + return name.hashCode() + value == null ? 0 : value.hashCode(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/serialization/Parser.java 2009-08-01 04:18:34.845306791 +0100 @@ -0,0 +1,433 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data.serialization; + +import com.sun.hotspot.igv.data.GraphDocument; +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputEdge; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputMethod; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Property; +import com.sun.hotspot.igv.data.services.GroupCallback; +import com.sun.hotspot.igv.data.serialization.XMLParser.ElementHandler; +import com.sun.hotspot.igv.data.serialization.XMLParser.HandoverElementHandler; +import com.sun.hotspot.igv.data.serialization.XMLParser.ParseMonitor; +import com.sun.hotspot.igv.data.serialization.XMLParser.TopElementHandler; +import java.io.IOException; +import java.util.HashMap; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; + +/** + * + * @author Thomas Wuerthinger + */ +public class Parser { + + public static final String INDENT = " "; + public static final String TOP_ELEMENT = "graphDocument"; + public static final String GROUP_ELEMENT = "group"; + public static final String GRAPH_ELEMENT = "graph"; + public static final String ROOT_ELEMENT = "graphDocument"; + public static final String PROPERTIES_ELEMENT = "properties"; + public static final String EDGES_ELEMENT = "edges"; + public static final String PROPERTY_ELEMENT = "p"; + public static final String EDGE_ELEMENT = "edge"; + public static final String NODE_ELEMENT = "node"; + public static final String NODES_ELEMENT = "nodes"; + public static final String REMOVE_EDGE_ELEMENT = "removeEdge"; + public static final String REMOVE_NODE_ELEMENT = "removeNode"; + public static final String METHOD_NAME_PROPERTY = "name"; + public static final String METHOD_IS_PUBLIC_PROPERTY = "public"; + public static final String METHOD_IS_STATIC_PROPERTY = "static"; + public static final String TRUE_VALUE = "true"; + public static final String NODE_NAME_PROPERTY = "name"; + public static final String EDGE_NAME_PROPERTY = "name"; + public static final String NODE_ID_PROPERTY = "id"; + public static final String FROM_PROPERTY = "from"; + public static final String TO_PROPERTY = "to"; + public static final String PROPERTY_NAME_PROPERTY = "name"; + public static final String GRAPH_NAME_PROPERTY = "name"; + public static final String TO_INDEX_PROPERTY = "index"; + public static final String METHOD_ELEMENT = "method"; + public static final String INLINE_ELEMENT = "inline"; + public static final String BYTECODES_ELEMENT = "bytecodes"; + public static final String METHOD_BCI_PROPERTY = "bci"; + public static final String METHOD_SHORT_NAME_PROPERTY = "shortName"; + public static final String CONTROL_FLOW_ELEMENT = "controlFlow"; + public static final String BLOCK_NAME_PROPERTY = "name"; + public static final String BLOCK_ELEMENT = "block"; + public static final String SUCCESSORS_ELEMENT = "successors"; + public static final String SUCCESSOR_ELEMENT = "successor"; + public static final String ASSEMBLY_ELEMENT = "assembly"; + public static final String DIFFERENCE_PROPERTY = "difference"; + private TopElementHandler xmlDocument = new TopElementHandler(); + private boolean difference; + private GroupCallback groupCallback; + private HashMap idCache = new HashMap(); + private int maxId = 0; + + private int lookupID(String i) { + Integer id = idCache.get(i); + if (id == null) { + id = maxId++; + idCache.put(i, id); + } + return id.intValue(); + } + + // + private ElementHandler topHandler = new ElementHandler(TOP_ELEMENT) { + + @Override + protected GraphDocument start() throws SAXException { + return new GraphDocument(); + } + }; + // + private ElementHandler groupHandler = new XMLParser.ElementHandler(GROUP_ELEMENT) { + + @Override + protected Group start() throws SAXException { + Group group = new Group(); + Parser.this.difference = false; + String differenceProperty = this.readAttribute(DIFFERENCE_PROPERTY); + if (differenceProperty != null && (differenceProperty.equals("1") || differenceProperty.equals("true"))) { + Parser.this.difference = true; + } + + ParseMonitor monitor = getMonitor(); + if (monitor != null) { + monitor.setState(group.getName()); + } + + return group; + } + + @Override + protected void end(String text) throws SAXException { + if (groupCallback == null) { + getParentObject().addGroup(getObject()); + } + } + }; + private HandoverElementHandler assemblyHandler = new XMLParser.HandoverElementHandler(ASSEMBLY_ELEMENT, true) { + + @Override + protected void end(String text) throws SAXException { + getParentObject().setAssembly(text); + } + }; + // + private ElementHandler methodHandler = new XMLParser.ElementHandler(METHOD_ELEMENT) { + + @Override + protected InputMethod start() throws SAXException { + + InputMethod method = parseMethod(this, getParentObject()); + getParentObject().setMethod(method); + return method; + } + }; + + private InputMethod parseMethod(XMLParser.ElementHandler handler, Group group) throws SAXException { + String s = handler.readRequiredAttribute(METHOD_BCI_PROPERTY); + int bci = 0; + try { + bci = Integer.parseInt(s); + } catch (NumberFormatException e) { + throw new SAXException(e); + } + InputMethod method = new InputMethod(group, handler.readRequiredAttribute(METHOD_NAME_PROPERTY), handler.readRequiredAttribute(METHOD_SHORT_NAME_PROPERTY), bci); + return method; + } + // + private HandoverElementHandler bytecodesHandler = new XMLParser.HandoverElementHandler(BYTECODES_ELEMENT, true) { + + @Override + protected void end(String text) throws SAXException { + getParentObject().setBytecodes(text); + } + }; + // + private HandoverElementHandler inlinedHandler = new XMLParser.HandoverElementHandler(INLINE_ELEMENT); + // + private ElementHandler inlinedMethodHandler = new XMLParser.ElementHandler(METHOD_ELEMENT) { + + @Override + protected InputMethod start() throws SAXException { + InputMethod method = parseMethod(this, getParentObject().getGroup()); + getParentObject().addInlined(method); + return method; + } + }; + // + private ElementHandler graphHandler = new XMLParser.ElementHandler(GRAPH_ELEMENT) { + + private InputGraph graph; + + @Override + protected InputGraph start() throws SAXException { + + String name = readAttribute(GRAPH_NAME_PROPERTY); + InputGraph previous = getParentObject().getLastAdded(); + if (!difference) { + previous = null; + } + InputGraph curGraph = new InputGraph(getParentObject(), previous, name); + this.graph = curGraph; + return curGraph; + } + + @Override + protected void end(String text) throws SAXException { + getParentObject().addGraph(graph); + graph.resolveBlockLinks(); + } + }; + // + private HandoverElementHandler nodesHandler = new HandoverElementHandler(NODES_ELEMENT); + // + private HandoverElementHandler controlFlowHandler = new HandoverElementHandler(CONTROL_FLOW_ELEMENT); + // + private ElementHandler blockHandler = new ElementHandler(BLOCK_ELEMENT) { + + @Override + protected InputBlock start() throws SAXException { + InputGraph graph = getParentObject(); + String name = readRequiredAttribute(BLOCK_NAME_PROPERTY).intern(); + InputBlock b = new InputBlock(getParentObject(), name); + graph.addBlock(b); + return b; + } + }; + // + private HandoverElementHandler blockNodesHandler = new HandoverElementHandler(NODES_ELEMENT); + // + private ElementHandler blockNodeHandler = new ElementHandler(NODE_ELEMENT) { + + @Override + protected InputBlock start() throws SAXException { + String s = readRequiredAttribute(NODE_ID_PROPERTY); + + int id = 0; + try { + id = lookupID(s); + } catch (NumberFormatException e) { + throw new SAXException(e); + } + getParentObject().addNode(id); + return getParentObject(); + } + }; + // + private HandoverElementHandler successorsHandler = new HandoverElementHandler(SUCCESSORS_ELEMENT); + // + private ElementHandler successorHandler = new ElementHandler(SUCCESSOR_ELEMENT) { + + @Override + protected InputBlock start() throws SAXException { + String name = readRequiredAttribute(BLOCK_NAME_PROPERTY); + getParentObject().addSuccessor(name); + return getParentObject(); + } + }; + // + private ElementHandler nodeHandler = new ElementHandler(NODE_ELEMENT) { + + @Override + protected InputNode start() throws SAXException { + String s = readRequiredAttribute(NODE_ID_PROPERTY); + int id = 0; + try { + id = lookupID(s); + } catch (NumberFormatException e) { + throw new SAXException(e); + } + InputNode node = new InputNode(id); + getParentObject().addNode(node); + return node; + } + }; + // + private ElementHandler removeNodeHandler = new ElementHandler(REMOVE_NODE_ELEMENT) { + + @Override + protected InputNode start() throws SAXException { + String s = readRequiredAttribute(NODE_ID_PROPERTY); + int id = 0; + try { + id = lookupID(s); + } catch (NumberFormatException e) { + throw new SAXException(e); + } + return getParentObject().removeNode(id); + } + }; + // + private HandoverElementHandler edgesHandler = new HandoverElementHandler(EDGES_ELEMENT); + + // Local class for edge elements + private class EdgeElementHandler extends ElementHandler { + + public EdgeElementHandler(String name) { + super(name); + } + + @Override + protected InputEdge start() throws SAXException { + int toIndex = 0; + int from = -1; + int to = -1; + + try { + String toIndexString = readAttribute(TO_INDEX_PROPERTY); + if (toIndexString != null) { + toIndex = Integer.parseInt(toIndexString); + } + + from = lookupID(readRequiredAttribute(FROM_PROPERTY)); + to = lookupID(readRequiredAttribute(TO_PROPERTY)); + } catch (NumberFormatException e) { + throw new SAXException(e); + } + + + InputEdge conn = new InputEdge((char) toIndex, from, to); + return start(conn); + } + + protected InputEdge start(InputEdge conn) throws SAXException { + return conn; + } + } + // + private EdgeElementHandler edgeHandler = new EdgeElementHandler(EDGE_ELEMENT) { + + @Override + protected InputEdge start(InputEdge conn) throws SAXException { + getParentObject().addEdge(conn); + return conn; + } + }; + // + private EdgeElementHandler removeEdgeHandler = new EdgeElementHandler(REMOVE_EDGE_ELEMENT) { + + @Override + protected InputEdge start(InputEdge conn) throws SAXException { + getParentObject().removeEdge(conn); + return conn; + } + }; + // + private HandoverElementHandler propertiesHandler = new HandoverElementHandler(PROPERTIES_ELEMENT); + // + private HandoverElementHandler groupPropertiesHandler = new HandoverElementHandler(PROPERTIES_ELEMENT) { + + @Override + public void end(String text) throws SAXException { + if (groupCallback != null) { + groupCallback.started(getParentObject()); + } + } + }; + // + private ElementHandler propertyHandler = new XMLParser.ElementHandler(PROPERTY_ELEMENT, true) { + + @Override + public String start() throws SAXException { + return readRequiredAttribute(PROPERTY_NAME_PROPERTY).intern(); + } + + @Override + public void end(String text) { + getParentObject().getProperties().setProperty(getObject(), text.trim().intern()); + } + }; + + public Parser() { + this(null); + } + + public Parser(GroupCallback groupCallback) { + + this.groupCallback = groupCallback; + + // Initialize dependencies + xmlDocument.addChild(topHandler); + topHandler.addChild(groupHandler); + + groupHandler.addChild(methodHandler); + groupHandler.addChild(assemblyHandler); + groupHandler.addChild(graphHandler); + + methodHandler.addChild(inlinedHandler); + methodHandler.addChild(bytecodesHandler); + + inlinedHandler.addChild(inlinedMethodHandler); + inlinedMethodHandler.addChild(bytecodesHandler); + inlinedMethodHandler.addChild(inlinedHandler); + + graphHandler.addChild(nodesHandler); + graphHandler.addChild(edgesHandler); + graphHandler.addChild(controlFlowHandler); + + controlFlowHandler.addChild(blockHandler); + + blockHandler.addChild(successorsHandler); + successorsHandler.addChild(successorHandler); + blockHandler.addChild(blockNodesHandler); + blockNodesHandler.addChild(blockNodeHandler); + + nodesHandler.addChild(nodeHandler); + nodesHandler.addChild(removeNodeHandler); + edgesHandler.addChild(edgeHandler); + edgesHandler.addChild(removeEdgeHandler); + + methodHandler.addChild(propertiesHandler); + inlinedMethodHandler.addChild(propertiesHandler); + topHandler.addChild(propertiesHandler); + groupHandler.addChild(groupPropertiesHandler); + graphHandler.addChild(propertiesHandler); + nodeHandler.addChild(propertiesHandler); + propertiesHandler.addChild(propertyHandler); + groupPropertiesHandler.addChild(propertyHandler); + } + + // Returns a new GraphDocument object deserialized from an XML input source. + public GraphDocument parse(XMLReader reader, InputSource source, XMLParser.ParseMonitor monitor) throws SAXException { + reader.setContentHandler(new XMLParser(xmlDocument, monitor)); + try { + reader.parse(source); + } catch (IOException ex) { + throw new SAXException(ex); + } + + return topHandler.getObject(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/serialization/Printer.java 2009-08-01 04:18:35.268715014 +0100 @@ -0,0 +1,217 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data.serialization; + +import com.sun.hotspot.igv.data.GraphDocument; +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputBytecode; +import com.sun.hotspot.igv.data.InputEdge; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputMethod; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Property; +import java.io.IOException; +import java.io.Writer; +import java.util.HashSet; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class Printer { + + public void export(Writer writer, GraphDocument document) { + + XMLWriter xmlWriter = new XMLWriter(writer); + + try { + export(xmlWriter, document); + } catch (IOException ex) { + } + } + + private void export(XMLWriter xmlWriter, GraphDocument document) throws IOException { + xmlWriter.startTag(Parser.ROOT_ELEMENT); + xmlWriter.writeProperties(document.getProperties()); + for (Group g : document.getGroups()) { + export(xmlWriter, g); + } + + xmlWriter.endTag(); + xmlWriter.flush(); + } + + private void export(XMLWriter writer, Group g) throws IOException { + Properties attributes = new Properties(); + attributes.setProperty("difference", Boolean.toString(true)); + writer.startTag(Parser.GROUP_ELEMENT, attributes); + writer.writeProperties(g.getProperties()); + + if (g.getMethod() != null) { + export(writer, g.getMethod()); + } + + InputGraph previous = null; + for (InputGraph graph : g.getGraphs()) { + export(writer, graph, previous, true); + previous = graph; + } + + writer.endTag(); + } + + public void export(XMLWriter writer, InputGraph graph, InputGraph previous, boolean difference) throws IOException { + + writer.startTag(Parser.GRAPH_ELEMENT); + writer.writeProperties(graph.getProperties()); + writer.startTag(Parser.NODES_ELEMENT); + + Set removed = new HashSet(); + Set equal = new HashSet(); + + if (previous != null) { + for (InputNode n : previous.getNodes()) { + int id = n.getId(); + InputNode n2 = graph.getNode(id); + if (n2 == null) { + removed.add(n); + } else if (n.equals(n2)) { + equal.add(n); + } + } + } + + if (difference) { + for (InputNode n : removed) { + writer.simpleTag(Parser.REMOVE_NODE_ELEMENT, new Properties(Parser.NODE_ID_PROPERTY, Integer.toString(n.getId()))); + } + } + + for (InputNode n : graph.getNodes()) { + if (!difference || !equal.contains(n)) { + writer.startTag(Parser.NODE_ELEMENT, new Properties(Parser.NODE_ID_PROPERTY, Integer.toString(n.getId()))); + writer.writeProperties(n.getProperties()); + writer.endTag(); + } + } + + writer.endTag(); + + writer.startTag(Parser.EDGES_ELEMENT); + Set removedEdges = new HashSet(); + Set equalEdges = new HashSet(); + + if (previous != null) { + for (InputEdge e : previous.getEdges()) { + if (graph.getEdges().contains(e)) { + equalEdges.add(e); + } else { + removedEdges.add(e); + } + } + } + + if (difference) { + for (InputEdge e : removedEdges) { + writer.simpleTag(Parser.REMOVE_EDGE_ELEMENT, createProperties(e)); + } + } + + for (InputEdge e : graph.getEdges()) { + if (!difference || !equalEdges.contains(e)) { + if (!equalEdges.contains(e)) { + writer.simpleTag(Parser.EDGE_ELEMENT, createProperties(e)); + } + } + } + + writer.endTag(); + + writer.startTag(Parser.CONTROL_FLOW_ELEMENT); + for (InputBlock b : graph.getBlocks()) { + + writer.startTag(Parser.BLOCK_ELEMENT, new Properties(Parser.BLOCK_NAME_PROPERTY, b.getName())); + + writer.startTag(Parser.SUCCESSORS_ELEMENT); + for (InputBlock s : b.getSuccessors()) { + writer.simpleTag(Parser.SUCCESSOR_ELEMENT, new Properties(Parser.BLOCK_NAME_PROPERTY, s.getName())); + } + writer.endTag(); + + writer.startTag(Parser.NODES_ELEMENT); + for (InputNode n : b.getNodes()) { + writer.simpleTag(Parser.NODE_ELEMENT, new Properties(Parser.NODE_ID_PROPERTY, n.getId() + "")); + } + writer.endTag(); + + writer.endTag(); + + } + + writer.endTag(); + writer.endTag(); + } + + private void export(XMLWriter w, InputMethod method) throws IOException { + + w.startTag(Parser.METHOD_ELEMENT, new Properties(Parser.METHOD_BCI_PROPERTY, method.getBci() + "", Parser.METHOD_NAME_PROPERTY, method.getName(), Parser.METHOD_SHORT_NAME_PROPERTY, method.getShortName())); + + w.writeProperties(method.getProperties()); + + if (method.getInlined().size() > 0) { + w.startTag(Parser.INLINE_ELEMENT); + for (InputMethod m : method.getInlined()) { + export(w, m); + } + w.endTag(); + } + + w.startTag(Parser.BYTECODES_ELEMENT); + + StringBuilder b = new StringBuilder(); + b.append(""); + w.write(b.toString()); + w.endTag(); + w.endTag(); + } + + private Properties createProperties(InputEdge edge) { + Properties p = new Properties(); + p.setProperty(Parser.TO_INDEX_PROPERTY, Integer.toString(edge.getToIndex())); + p.setProperty(Parser.TO_PROPERTY, Integer.toString(edge.getTo())); + p.setProperty(Parser.FROM_PROPERTY, Integer.toString(edge.getFrom())); + return p; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/serialization/XMLParser.java 2009-08-01 04:18:35.723531463 +0100 @@ -0,0 +1,254 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data.serialization; + +import com.sun.hotspot.igv.data.Property; +import com.sun.hotspot.igv.data.Properties; +import java.util.HashMap; +import java.util.Stack; +import org.xml.sax.Attributes; +import org.xml.sax.ContentHandler; +import org.xml.sax.Locator; +import org.xml.sax.SAXException; + +/** + * + * @author Thomas Wuerthinger + */ +public class XMLParser implements ContentHandler { + + public static interface ParseMonitor { + + public void setProgress(double d); + + public void setState(String state); + } + + public static class MissingAttributeException extends SAXException { + + private String name; + + public MissingAttributeException(String name) { + super("Missing attribute \"" + name + "\""); + this.name = name; + } + + public String getAttributeName() { + return this.getMessage(); + } + } + + public static class HandoverElementHandler

extends ElementHandler { + + @Override + protected P start() throws SAXException { + return getParentObject(); + } + + public HandoverElementHandler(String name) { + super(name); + } + + public HandoverElementHandler(String name, boolean needsText) { + super(name, needsText); + } + } + + public static class TopElementHandler

extends ElementHandler { + + public TopElementHandler() { + super(null); + } + } + + public static class ElementHandler { + + private String name; + private T object; + private Attributes attr; + private StringBuilder currentText; + private ParseMonitor monitor; + private HashMap> hashtable; + private boolean needsText; + private ElementHandler parentElement; + + public ElementHandler(String name) { + this(name, false); + } + + public ElementHandler getParentElement() { + return parentElement; + } + + public P getParentObject() { + return getParentElement().getObject(); + } + + protected boolean needsText() { + return needsText; + } + + public ElementHandler(String name, boolean needsText) { + this.hashtable = new HashMap>(); + this.name = name; + this.needsText = needsText; + } + + public ParseMonitor getMonitor() { + return monitor; + } + + public ElementHandler getChild(String name) { + return hashtable.get(name); + } + + public void addChild(ElementHandler handler) { + assert handler != null; + hashtable.put(handler.getName(), handler); + } + + public String getName() { + return name; + } + + public T getObject() { + return object; + } + + public String readAttribute(String name) { + return attr.getValue(name); + } + + public String readRequiredAttribute(String name) throws SAXException { + String s = readAttribute(name); + if (s == null) { + throw new MissingAttributeException(name); + } + return s; + } + + public void processAttributesAsProperties(Properties p) { + int length = attr.getLength(); + for (int i = 0; i < length; i++) { + String val = attr.getValue(i).intern(); + String localName = attr.getLocalName(i).intern(); + p.setProperty(val, localName); + } + } + + public void startElement(ElementHandler parentElement, Attributes attr, ParseMonitor monitor) throws SAXException { + this.currentText = new StringBuilder(); + this.attr = attr; + this.monitor = monitor; + this.parentElement = parentElement; + object = start(); + } + + protected T start() throws SAXException { + return null; + } + + protected void end(String text) throws SAXException { + + } + + public void endElement() throws SAXException { + end(currentText.toString()); + } + + protected void text(char[] c, int start, int length) { + assert currentText != null; + currentText.append(c, start, length); + } + } + private Stack stack; + private ParseMonitor monitor; + + public XMLParser(TopElementHandler rootHandler, ParseMonitor monitor) { + this.stack = new Stack(); + this.monitor = monitor; + this.stack.push(rootHandler); + } + + public void setDocumentLocator(Locator locator) { + if (monitor != null) { + monitor.setState("Starting parsing"); + } + } + + public void startDocument() throws SAXException { + } + + public void endDocument() throws SAXException { + } + + public void startPrefixMapping(String prefix, String uri) throws SAXException { + } + + public void endPrefixMapping(String prefix) throws SAXException { + } + + public void startElement(String uri, String localName, String qName, Attributes atts) throws SAXException { + + assert !stack.isEmpty(); + ElementHandler parent = stack.peek(); + if (parent != null) { + ElementHandler child = parent.getChild(qName); + if (child != null) { + child.startElement(parent, atts, monitor); + stack.push(child); + return; + } + } + + stack.push(null); + } + + public void endElement(String uri, String localName, String qName) throws SAXException { + ElementHandler handler = stack.pop(); + if (handler != null) { + handler.endElement(); + } + } + + public void characters(char[] ch, int start, int length) throws SAXException { + + assert !stack.isEmpty(); + + + ElementHandler top = stack.peek(); + if (top != null && top.needsText()) { + top.text(ch, start, length); + } + } + + public void ignorableWhitespace(char[] ch, int start, int length) throws SAXException { + } + + public void processingInstruction(String target, String data) throws SAXException { + } + + public void skippedEntity(String name) throws SAXException { + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/serialization/XMLWriter.java 2009-08-01 04:18:36.143237509 +0100 @@ -0,0 +1,128 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.data.serialization; + +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Property; +import java.io.IOException; +import java.io.Writer; +import java.util.Stack; + +/** + * + * @author Thomas Wuerthinger + */ +public class XMLWriter extends Writer { + + private Writer inner; + private Stack elementStack; + + public XMLWriter(Writer inner) { + this.inner = inner; + elementStack = new Stack(); + } + + @Override + public void write(char[] arr) throws IOException { + write(arr, 0, arr.length); + } + + public void write(char[] cbuf, int off, int len) throws IOException { + for (int i = off; i < off + len; i++) { + char c = cbuf[i]; + if (c == '>') { + inner.write(">"); + } else if (c == '<') { + inner.write("<"); + } else if (c == '&') { + inner.write("&"); + } else { + inner.write(c); + } + } + } + + public void flush() throws IOException { + inner.flush(); + } + + public void close() throws IOException { + inner.close(); + } + + public void endTag() throws IOException { + inner.write("\n"); + } + + public void startTag(String name) throws IOException { + inner.write("<" + name + ">\n"); + elementStack.push(name); + } + + public void simpleTag(String name) throws IOException { + inner.write("<" + name + "/>\n"); + } + + public void startTag(String name, Properties attributes) throws IOException { + inner.write("<" + name); + elementStack.push(name); + + for (Property p : attributes) { + inner.write(" " + p.getName() + "=\""); + write(p.getValue().toCharArray()); + inner.write("\""); + } + + inner.write(">\n"); + } + + public void simpleTag(String name, Properties attributes) throws IOException { + inner.write("<" + name); + + for (Property p : attributes) { + inner.write(" " + p.getName() + "=\""); + write(p.getValue().toCharArray()); + inner.write("\""); + } + + inner.write("/>\n"); + } + + public void writeProperties(Properties props) throws IOException { + if (props.getProperties().hasNext() == false) { + return; + } + + startTag(Parser.PROPERTIES_ELEMENT); + + for (Property p : props) { + startTag(Parser.PROPERTY_ELEMENT, new Properties(Parser.PROPERTY_NAME_PROPERTY, p.getName())); + this.write(p.getValue().toCharArray()); + endTag(); + } + + endTag(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/services/GraphViewer.java 2009-08-01 04:18:36.593274001 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data.services; + +import com.sun.hotspot.igv.data.InputGraph; + +/** + * + * @author Thomas Wuerthinger + */ +public interface GraphViewer { + + public void view(InputGraph graph); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/services/GroupCallback.java 2009-08-01 04:18:36.993840473 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.data.services; + +import com.sun.hotspot.igv.data.Group; + +/** + * + * @author Thomas Wuerthinger + */ +public interface GroupCallback { + + public void started(Group g); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/services/GroupOrganizer.java 2009-08-01 04:18:37.386141344 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data.services; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.Pair; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public interface GroupOrganizer { + + public String getName(); + + public List>> organize(List subFolders, List groups); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/services/GroupReceiver.java 2009-08-01 04:18:37.794947524 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.data.services; + +import java.awt.Component; + +/** + * + * @author Thomas Wuerthinger + */ +public interface GroupReceiver { + + public Component init(GroupCallback callback); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/services/InputGraphProvider.java 2009-08-01 04:18:38.212523766 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.data.services; + +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputNode; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public interface InputGraphProvider { + + InputGraph getGraph(); + + void setSelectedNodes(Set nodes); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Data/src/com/sun/hotspot/igv/data/services/Scheduler.java 2009-08-01 04:18:38.628330297 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.data.services; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputGraph; +import java.util.Collection; + +/** + * + * @author Thomas Wuerthinger + */ +public interface Scheduler { + + public Collection schedule(InputGraph graph); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/build.xml 2009-08-01 04:18:39.078699176 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.difference. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/manifest.mf 2009-08-01 04:18:39.479427168 +0100 @@ -0,0 +1,5 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.difference +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/difference/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/nbproject/build-impl.xml 2009-08-01 04:18:39.913953112 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/nbproject/genfiles.properties 2009-08-01 04:18:40.330800170 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=1657ecfe +build.xml.script.CRC32=03909051 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=1657ecfe +nbproject/build-impl.xml.script.CRC32=2208e770 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/nbproject/platform.properties 2009-08-01 04:18:40.727415822 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/nbproject/project.properties 2009-08-01 04:18:41.137633456 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/nbproject/project.xml 2009-08-01 04:18:41.530084129 +0100 @@ -0,0 +1,23 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.difference + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + + com.sun.hotspot.igv.difference + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/nbproject/suite.properties 2009-08-01 04:18:41.933704767 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/src/com/sun/hotspot/igv/difference/Bundle.properties 2009-08-01 04:18:42.478706036 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=Difference --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Difference/src/com/sun/hotspot/igv/difference/Difference.java 2009-08-01 04:18:42.880548709 +0100 @@ -0,0 +1,321 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.difference; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.InputEdge; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.data.Property; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class Difference { + + public static final String PROPERTY_STATE = "state"; + public static final String VALUE_NEW = "new"; + public static final String VALUE_CHANGED = "changed"; + public static final String VALUE_SAME = "same"; + public static final String VALUE_DELETED = "deleted"; + public static final String OLD_PREFIX = "OLD_"; + public static final String MAIN_PROPERTY = "name"; + public static final double LIMIT = 100.0; + public static final String[] IGNORE_PROPERTIES = new String[]{"idx", "debug_idx"}; + + public static InputGraph createDiffGraph(InputGraph a, InputGraph b) { + if (a.getGroup() == b.getGroup()) { + return createDiffSameGroup(a, b); + } else { + return createDiff(a, b); + } + } + + private static InputGraph createDiffSameGroup(InputGraph a, InputGraph b) { + Map keyMapB = new HashMap(); + for (InputNode n : b.getNodes()) { + Integer key = n.getId(); + assert !keyMapB.containsKey(key); + keyMapB.put(key, n); + } + + Set pairs = new HashSet(); + + for (InputNode n : a.getNodes()) { + Integer key = n.getId(); + + + if (keyMapB.containsKey(key)) { + InputNode nB = keyMapB.get(key); + pairs.add(new Pair(n, nB)); + } + } + + return createDiff(a, b, pairs); + } + + private static InputGraph createDiff(InputGraph a, InputGraph b, Set pairs) { + Group g = new Group(); + g.setMethod(a.getGroup().getMethod()); + g.setAssembly(a.getGroup().getAssembly()); + g.getProperties().setProperty("name", "Difference"); + InputGraph graph = new InputGraph(g, null); + graph.setName(a.getName() + ", " + b.getName()); + graph.setIsDifferenceGraph(true); + + Set nodesA = new HashSet(a.getNodes()); + Set nodesB = new HashSet(b.getNodes()); + + Map inputNodeMap = new HashMap(); + for (Pair p : pairs) { + InputNode n = p.getN1(); + assert nodesA.contains(n); + InputNode nB = p.getN2(); + assert nodesB.contains(nB); + + nodesA.remove(n); + nodesB.remove(nB); + InputNode n2 = new InputNode(n); + inputNodeMap.put(n, n2); + inputNodeMap.put(nB, n2); + graph.addNode(n2); + markAsChanged(n2, n, nB); + } + + for (InputNode n : nodesA) { + InputNode n2 = new InputNode(n); + graph.addNode(n2); + markAsNew(n2); + inputNodeMap.put(n, n2); + } + + for (InputNode n : nodesB) { + InputNode n2 = new InputNode(n); + n2.setId(-n2.getId()); + graph.addNode(n2); + markAsDeleted(n2); + inputNodeMap.put(n, n2); + } + + Collection edgesA = a.getEdges(); + Collection edgesB = b.getEdges(); + + Set newEdges = new HashSet(); + + for (InputEdge e : edgesA) { + int from = e.getFrom(); + int to = e.getTo(); + InputNode nodeFrom = inputNodeMap.get(a.getNode(from)); + InputNode nodeTo = inputNodeMap.get(a.getNode(to)); + char index = e.getToIndex(); + + InputEdge newEdge = new InputEdge(index, nodeFrom.getId(), nodeTo.getId()); + if (!newEdges.contains(newEdge)) { + markAsNew(newEdge); + newEdges.add(newEdge); + graph.addEdge(newEdge); + } + } + + for (InputEdge e : edgesB) { + int from = e.getFrom(); + int to = e.getTo(); + InputNode nodeFrom = inputNodeMap.get(b.getNode(from)); + InputNode nodeTo = inputNodeMap.get(b.getNode(to)); + char index = e.getToIndex(); + + InputEdge newEdge = new InputEdge(index, nodeFrom.getId(), nodeTo.getId()); + if (!newEdges.contains(newEdge)) { + markAsDeleted(newEdge); + newEdges.add(newEdge); + graph.addEdge(newEdge); + } else { + newEdges.remove(newEdge); + graph.removeEdge(newEdge); + markAsSame(newEdge); + newEdges.add(newEdge); + graph.addEdge(newEdge); + } + } + + g.addGraph(graph); + return graph; + } + + private static class Pair { + + private InputNode n1; + private InputNode n2; + + public Pair(InputNode n1, InputNode n2) { + this.n1 = n1; + this.n2 = n2; + } + + public double getValue() { + + double result = 0.0; + for (Property p : n1.getProperties()) { + double faktor = 1.0; + for (String forbidden : IGNORE_PROPERTIES) { + if (p.getName().equals(forbidden)) { + faktor = 0.1; + break; + } + } + String p2 = n2.getProperties().get(p.getName()); + result += evaluate(p.getValue(), p2) * faktor; + } + + return result; + } + + private double evaluate(String p, String p2) { + if (p2 == null) { + return 1.0; + } + if (p.equals(p2)) { + return 0.0; + } else { + return (double) (Math.abs(p.length() - p2.length())) / p.length() + 0.5; + } + } + + public InputNode getN1() { + return n1; + } + + public InputNode getN2() { + return n2; + } + } + + private static InputGraph createDiff(InputGraph a, InputGraph b) { + + Set matched = new HashSet(); + + Set pairs = new HashSet(); + for (InputNode n : a.getNodes()) { + String s = n.getProperties().get(MAIN_PROPERTY); + if (s == null) { + s = ""; + } + for (InputNode n2 : b.getNodes()) { + String s2 = n2.getProperties().get(MAIN_PROPERTY); + if (s2 == null) { + s2 = ""; + } + + if (s.equals(s2)) { + Pair p = new Pair(n, n2); + pairs.add(p); + } + } + } + + Set selectedPairs = new HashSet(); + while (pairs.size() > 0) { + + double min = Double.MAX_VALUE; + Pair minPair = null; + for (Pair p : pairs) { + double cur = p.getValue(); + if (cur < min) { + minPair = p; + min = cur; + } + } + + if (min > LIMIT) { + break; + } else { + selectedPairs.add(minPair); + + Set toRemove = new HashSet(); + for (Pair p : pairs) { + if (p.getN1() == minPair.getN1() || p.getN2() == minPair.getN2()) { + toRemove.add(p); + } + } + pairs.removeAll(toRemove); + } + } + + return createDiff(a, b, selectedPairs); + } + + private static void markAsNew(InputEdge e) { + e.setState(InputEdge.State.NEW); + } + + private static void markAsDeleted(InputEdge e) { + e.setState(InputEdge.State.DELETED); + + } + + private static void markAsSame(InputEdge e) { + e.setState(InputEdge.State.SAME); + } + + private static void markAsChanged(InputNode n, InputNode firstNode, InputNode otherNode) { + + boolean difference = false; + for (Property p : otherNode.getProperties()) { + String s = firstNode.getProperties().get(p.getName()); + if (!p.getValue().equals(s)) { + difference = true; + n.getProperties().setProperty(OLD_PREFIX + p.getName(), p.getValue()); + } + } + + for (Property p : firstNode.getProperties()) { + String s = otherNode.getProperties().get(p.getName()); + if (s == null && p.getValue().length() > 0) { + difference = true; + n.getProperties().setProperty(OLD_PREFIX + p.getName(), ""); + } + } + + if (difference) { + n.getProperties().setProperty(PROPERTY_STATE, VALUE_CHANGED); + } else { + n.getProperties().setProperty(PROPERTY_STATE, VALUE_SAME); + } + } + + private static void markAsDeleted(InputNode n) { + n.getProperties().setProperty(PROPERTY_STATE, VALUE_DELETED); + } + + private static void markAsNew(InputNode n) { + n.getProperties().setProperty(PROPERTY_STATE, VALUE_NEW); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/build.xml 2009-08-01 04:18:43.348847633 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.filter. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/manifest.mf 2009-08-01 04:18:43.725360042 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.filter +OpenIDE-Module-Layer: com/sun/hotspot/igv/filter/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/filter/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/nbproject/build-impl.xml 2009-08-01 04:18:44.155381162 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/nbproject/genfiles.properties 2009-08-01 04:18:44.588979193 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=7c032ebf +build.xml.script.CRC32=3b022a25 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=7c032ebf +nbproject/build-impl.xml.script.CRC32=26513f91 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/nbproject/platform.properties 2009-08-01 04:18:44.988099044 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/nbproject/project.properties 2009-08-01 04:18:45.389503215 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/nbproject/project.xml 2009-08-01 04:18:45.788753735 +0100 @@ -0,0 +1,80 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.filter + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + com.sun.hotspot.igv.graph + + + + 1.0 + + + + org.jdesktop.layout + + + + 1 + 1.4.1 + + + + org.openide.dialogs + + + + 7.5.1 + + + + org.openide.filesystems + + + + 7.3 + + + + org.openide.nodes + + + + 7.2.0.1 + + + + org.openide.util + + + + 7.9.0.1 + + + + org.openide.windows + + + + 6.16 + + + + + com.sun.hotspot.igv.filter + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/nbproject/suite.properties 2009-08-01 04:18:46.199881940 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/META-INF/services/com.sun.hotspot.igv.filter.ScriptEngineAbstraction 2009-08-01 04:18:46.684628621 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.filter.JavaSE6ScriptEngine \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/AbstractFilter.java 2009-08-01 04:18:47.203142402 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.data.ChangedEvent; +import com.sun.hotspot.igv.data.Properties; +import org.openide.cookies.OpenCookie; + +/** + * + * @author Thomas Wuerthinger + */ +public abstract class AbstractFilter implements Filter { + + private ChangedEvent changedEvent; + private Properties properties; + + public AbstractFilter() { + changedEvent = new ChangedEvent(this); + properties = new Properties(); + } + + public Properties getProperties() { + return properties; + } + + public OpenCookie getEditor() { + return null; + } + + public ChangedEvent getChangedEvent() { + return changedEvent; + } + + protected void fireChangedEvent() { + changedEvent.fire(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/Bundle.properties 2009-08-01 04:18:47.620441554 +0100 @@ -0,0 +1,11 @@ +OpenIDE-Module-Name=Filter + +jLabel1.text=Name\: +jLabel2.text=Source\: + +nameTextField.text= + +jButton1.text=OK +jButton2.text=Cancel + +title=Edit Filter Dialog --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/ColorFilter.java 2009-08-01 04:18:48.012946769 +0100 @@ -0,0 +1,138 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Connection.ConnectionStyle; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.graph.Selector; +import com.sun.hotspot.igv.data.Properties; +import java.awt.Color; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class ColorFilter extends AbstractFilter { + + private List colorRules; + private String name; + + public ColorFilter(String name) { + this.name = name; + colorRules = new ArrayList(); + } + + public String getName() { + return name; + } + + public void apply(Diagram diagram) { + + Properties.PropertySelector

selector = new Properties.PropertySelector
(diagram.getFigures()); + for (ColorRule rule : colorRules) { + if (rule.getSelector() != null) { + List
figures = rule.getSelector().selected(diagram); + for (Figure f : figures) { + applyRule(rule, f); + if (rule.getColor() != null) { + f.setColor(rule.getColor()); + } + } + } else { + for (Figure f : diagram.getFigures()) { + applyRule(rule, f); + } + } + } + } + + private void applyRule(ColorRule rule, Figure f) { + if (rule.getColor() != null) { + f.setColor(rule.getColor()); + } + Color color = rule.getLineColor(); + ConnectionStyle style = rule.getLineStyle(); + + for (OutputSlot s : f.getOutputSlots()) { + for (Connection c : s.getConnections()) { + if (color != null) { + c.setColor(color); + } + + if (style != null) { + c.setStyle(style); + } + } + } + } + + public void addRule(ColorRule r) { + colorRules.add(r); + } + + public static class ColorRule { + + private Color color; + private Color lineColor; + private Connection.ConnectionStyle lineStyle; + private Selector selector; + + public ColorRule(Selector selector, Color c) { + this(selector, c, null, null); + } + + public ColorRule(Selector selector, Color c, Color lineColor, Connection.ConnectionStyle lineStyle) { + this.selector = selector; + this.color = c; + this.lineColor = lineColor; + this.lineStyle = lineStyle; + + } + + public ColorRule(Color c) { + this(null, c); + } + + public Color getColor() { + return color; + } + + public Selector getSelector() { + return selector; + } + + public Color getLineColor() { + return lineColor; + } + + public Connection.ConnectionStyle getLineStyle() { + return lineStyle; + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/CombineFilter.java 2009-08-01 04:18:48.455540463 +0100 @@ -0,0 +1,194 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Properties.PropertyMatcher; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class CombineFilter extends AbstractFilter { + + private List rules; + private String name; + + public CombineFilter(String name) { + this.name = name; + rules = new ArrayList(); + } + + public String getName() { + return name; + } + + public void apply(Diagram diagram) { + + Properties.PropertySelector
selector = new Properties.PropertySelector
(diagram.getFigures()); + for (CombineRule r : rules) { + + List
list = selector.selectMultiple(r.getFirstMatcher()); + Set
figuresToRemove = new HashSet
(); + for (Figure f : list) { + + List
successors = new ArrayList
(f.getSuccessors()); + if (r.isReversed()) { + if (successors.size() == 1) { + Figure succ = successors.get(0); + InputSlot slot = null; + + for (InputSlot s : succ.getInputSlots()) { + for (Connection c : s.getConnections()) { + if (c.getOutputSlot().getFigure() == f) { + slot = s; + } + } + } + + assert slot != null; + slot.setName(f.getProperties().get("dump_spec")); + if (f.getProperties().get("short_name") != null) { + slot.setShortName(f.getProperties().get("short_name")); + } else { + String s = f.getProperties().get("dump_spec"); + if (s != null && s.length() <= 5) { + slot.setShortName(s); + } + + } + + for (InputSlot s : f.getInputSlots()) { + for (Connection c : s.getConnections()) { + Connection newConn = diagram.createConnection(slot, c.getOutputSlot()); + newConn.setColor(c.getColor()); + newConn.setStyle(c.getStyle()); + } + } + + figuresToRemove.add(f); + } + } else { + + for (Figure succ : successors) { + if (succ.getPredecessors().size() == 1) { + if (succ.getProperties().selectSingle(r.getSecondMatcher()) != null && succ.getOutputSlots().size() == 1) { + + + OutputSlot oldSlot = null; + for (OutputSlot s : f.getOutputSlots()) { + for (Connection c : s.getConnections()) { + if (c.getInputSlot().getFigure() == succ) { + oldSlot = s; + } + } + } + + assert oldSlot != null; + + OutputSlot nextSlot = succ.getOutputSlots().get(0); + int pos = 0; + if (succ.getProperties().get("con") != null) { + pos = Integer.parseInt(succ.getProperties().get("con")); + } + OutputSlot slot = f.createOutputSlot(pos); + slot.setName(succ.getProperties().get("dump_spec")); + if (succ.getProperties().get("short_name") != null) { + slot.setShortName(succ.getProperties().get("short_name")); + } else { + String s = succ.getProperties().get("dump_spec"); + if (s != null && s.length() <= 2) { + slot.setShortName(s); + } else { + String tmpName = succ.getProperties().get("name"); + if (tmpName != null && tmpName.length() > 0) { + slot.setShortName(tmpName.substring(0, 1)); + } + } + } + for (Connection c : nextSlot.getConnections()) { + Connection newConn = diagram.createConnection(c.getInputSlot(), slot); + newConn.setColor(c.getColor()); + newConn.setStyle(c.getStyle()); + } + + + figuresToRemove.add(succ); + + if (oldSlot.getConnections().size() == 0) { + f.removeSlot(oldSlot); + } + } + } + } + } + } + + diagram.removeAllFigures(figuresToRemove); + } + } + + public void addRule(CombineRule combineRule) { + rules.add(combineRule); + } + + public static class CombineRule { + + private PropertyMatcher first; + private PropertyMatcher second; + private boolean reversed; + + public CombineRule(PropertyMatcher first, PropertyMatcher second) { + this(first, second, false); + + } + + public CombineRule(PropertyMatcher first, PropertyMatcher second, boolean reversed) { + this.first = first; + this.second = second; + this.reversed = reversed; + } + + public boolean isReversed() { + return reversed; + } + + public PropertyMatcher getFirstMatcher() { + return first; + } + + public PropertyMatcher getSecondMatcher() { + return second; + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/ConnectionFilter.java 2009-08-01 04:18:48.881466276 +0100 @@ -0,0 +1,106 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.graph.Selector; +import com.sun.hotspot.igv.data.Properties; +import java.awt.Color; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class ConnectionFilter extends AbstractFilter { + + private List connectionStyleRules; + private String name; + + public ConnectionFilter(String name) { + this.name = name; + connectionStyleRules = new ArrayList(); + } + + public String getName() { + return name; + } + + public void apply(Diagram diagram) { + + Properties.PropertySelector
selector = new Properties.PropertySelector
(diagram.getFigures()); + for (ConnectionStyleRule rule : connectionStyleRules) { + List
figures = null; + if (rule.getSelector() != null) { + figures = rule.getSelector().selected(diagram); + } else { + figures = diagram.getFigures(); + } + + for (Figure f : figures) { + for (OutputSlot os : f.getOutputSlots()) { + for (Connection c : os.getConnections()) { + if (figures.contains(c.getInputSlot().getFigure())) { + c.setStyle(rule.getLineStyle()); + c.setColor(rule.getLineColor()); + } + } + } + } + } + } + + public void addRule(ConnectionStyleRule r) { + connectionStyleRules.add(r); + } + + public static class ConnectionStyleRule { + + private Color lineColor; + private Connection.ConnectionStyle lineStyle; + private Selector selector; + + public ConnectionStyleRule(Selector selector, Color lineColor, Connection.ConnectionStyle lineStyle) { + this.selector = selector; + this.lineColor = lineColor; + this.lineStyle = lineStyle; + } + + public Selector getSelector() { + return selector; + } + + public Color getLineColor() { + return lineColor; + } + + public Connection.ConnectionStyle getLineStyle() { + return lineStyle; + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/CustomFilter.java 2009-08-01 04:18:49.304438711 +0100 @@ -0,0 +1,158 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Diagram; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.Collection; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.openide.DialogDisplayer; +import org.openide.NotifyDescriptor; +import org.openide.cookies.OpenCookie; +import org.openide.filesystems.Repository; +import org.openide.filesystems.FileSystem; +import org.openide.filesystems.FileObject; +import org.openide.util.Exceptions; +import org.openide.util.Lookup; + +/** + * + * @author Thomas Wuerthinger + */ +public class CustomFilter extends AbstractFilter { + + public static final String JAVASCRIPT_HELPER_ID = "JavaScriptHelper"; + private static ScriptEngineAbstraction engine; + private String code; + private String name; + + public CustomFilter(String name, String code) { + this.name = name; + this.code = code; + getProperties().setProperty("name", name); + } + + public String getName() { + return name; + } + + public String getCode() { + return code; + } + + public void setName(String s) { + name = s; + fireChangedEvent(); + } + + public void setCode(String s) { + code = s; + fireChangedEvent(); + } + + @Override + public OpenCookie getEditor() { + return new OpenCookie() { + + public void open() { + openInEditor(); + } + }; + } + + public boolean openInEditor() { + EditFilterDialog dialog = new EditFilterDialog(CustomFilter.this); + dialog.setVisible(true); + return dialog.wasAccepted(); + } + + @Override + public String toString() { + return getName(); + } + + public static ScriptEngineAbstraction getEngine() { + if (engine == null) { + + ScriptEngineAbstraction chosen = null; + try { + Collection list = Lookup.getDefault().lookupAll(ScriptEngineAbstraction.class); + for (ScriptEngineAbstraction s : list) { + if (s.initialize(getJsHelperText())) { + if (chosen == null || !(chosen instanceof JavaSE6ScriptEngine)) { + chosen = s; + } + } + } + } catch (NoClassDefFoundError ncdfe) { + Logger.getLogger("global").log(Level.SEVERE, null, ncdfe); + } + + if (chosen == null) { + NotifyDescriptor message = new NotifyDescriptor.Message("Could not find a scripting engine. Please make sure that the Rhino scripting engine is available. Otherwise filter cannot be used.", NotifyDescriptor.ERROR_MESSAGE); + DialogDisplayer.getDefault().notifyLater(message); + chosen = new NullScriptEngine(); + } + + engine = chosen; + } + + return engine; + } + + private static String getJsHelperText() { + InputStream is = null; + StringBuilder sb = new StringBuilder("importPackage(Packages.com.sun.hotspot.igv.filter);importPackage(Packages.com.sun.hotspot.igv.graph);importPackage(Packages.com.sun.hotspot.igv.data);importPackage(Packages.com.sun.hotspot.igv.util);importPackage(java.awt);"); + try { + FileSystem fs = Repository.getDefault().getDefaultFileSystem(); + FileObject fo = fs.getRoot().getFileObject(JAVASCRIPT_HELPER_ID); + is = fo.getInputStream(); + BufferedReader r = new BufferedReader(new InputStreamReader(is)); + String s; + while ((s = r.readLine()) != null) { + sb.append(s); + sb.append("\n"); + } + + } catch (IOException ex) { + Logger.getLogger("global").log(Level.SEVERE, null, ex); + } finally { + try { + is.close(); + } catch (IOException ex) { + Exceptions.printStackTrace(ex); + } + } + return sb.toString(); + } + + public void apply(Diagram d) { + getEngine().execute(d, code); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/EditFilterDialog.form 2009-08-01 04:18:49.738417183 +0100 @@ -0,0 +1,133 @@ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/EditFilterDialog.java 2009-08-01 04:18:50.187205954 +0100 @@ -0,0 +1,161 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.filter; + +import org.openide.windows.WindowManager; + +/** + * + * @author Thomas Wuerthinger + */ +public class EditFilterDialog extends javax.swing.JDialog { + + private CustomFilter customFilter; + private boolean accepted; + + /** Creates new form EditFilterDialog */ + public EditFilterDialog(CustomFilter customFilter) { + super(WindowManager.getDefault().getMainWindow(), true); + this.customFilter = customFilter; + initComponents(); + + sourceTextArea.setText(customFilter.getCode()); + nameTextField.setText(customFilter.getName()); + } + + public boolean wasAccepted() { + return accepted; + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + // //GEN-BEGIN:initComponents + private void initComponents() { + + jScrollPane1 = new javax.swing.JScrollPane(); + sourceTextArea = new javax.swing.JTextArea(); + nameTextField = new javax.swing.JTextField(); + nameLabel = new javax.swing.JLabel(); + sourceLabel = new javax.swing.JLabel(); + okButton = new javax.swing.JButton(); + cancelButton = new javax.swing.JButton(); + + setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE); + setTitle(org.openide.util.NbBundle.getMessage(EditFilterDialog.class, "title")); // NOI18N + setResizable(false); + + sourceTextArea.setColumns(20); + sourceTextArea.setRows(5); + jScrollPane1.setViewportView(sourceTextArea); + + nameTextField.setText("null"); + + nameLabel.setText(org.openide.util.NbBundle.getMessage(EditFilterDialog.class, "jLabel1.text")); // NOI18N + + sourceLabel.setText(org.openide.util.NbBundle.getMessage(EditFilterDialog.class, "jLabel2.text")); // NOI18N + + okButton.setText(org.openide.util.NbBundle.getMessage(EditFilterDialog.class, "jButton1.text")); // NOI18N + okButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + cancelButtonClicked(evt); + okButtonClicked(evt); + } + }); + + cancelButton.setText(org.openide.util.NbBundle.getMessage(EditFilterDialog.class, "jButton2.text")); // NOI18N + cancelButton.addActionListener(new java.awt.event.ActionListener() { + public void actionPerformed(java.awt.event.ActionEvent evt) { + cancelButtonClicked(evt); + } + }); + + org.jdesktop.layout.GroupLayout layout = new org.jdesktop.layout.GroupLayout(getContentPane()); + getContentPane().setLayout(layout); + layout.setHorizontalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(layout.createSequentialGroup() + .addContainerGap() + .add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(org.jdesktop.layout.GroupLayout.TRAILING, layout.createSequentialGroup() + .add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(sourceLabel) + .add(nameLabel)) + .add(25, 25, 25) + .add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(jScrollPane1, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, 695, Short.MAX_VALUE) + .add(nameTextField, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, 695, Short.MAX_VALUE))) + .add(org.jdesktop.layout.GroupLayout.TRAILING, layout.createSequentialGroup() + .add(okButton, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 76, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE) + .addPreferredGap(org.jdesktop.layout.LayoutStyle.RELATED) + .add(cancelButton))) + .addContainerGap()) + ); + layout.setVerticalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(layout.createSequentialGroup() + .addContainerGap() + .add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.BASELINE) + .add(nameLabel) + .add(nameTextField, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)) + .addPreferredGap(org.jdesktop.layout.LayoutStyle.UNRELATED) + .add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(sourceLabel) + .add(jScrollPane1, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 337, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)) + .addPreferredGap(org.jdesktop.layout.LayoutStyle.RELATED, 16, Short.MAX_VALUE) + .add(layout.createParallelGroup(org.jdesktop.layout.GroupLayout.BASELINE) + .add(cancelButton) + .add(okButton)) + .addContainerGap()) + ); + + pack(); + }// //GEN-END:initComponents + +private void okButtonClicked(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_okButtonClicked + this.customFilter.setName(this.nameTextField.getText()); + this.customFilter.setCode(this.sourceTextArea.getText()); + accepted = true; + setVisible(false); +}//GEN-LAST:event_okButtonClicked + +private void cancelButtonClicked(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cancelButtonClicked + setVisible(false); +}//GEN-LAST:event_cancelButtonClicked + + + // Variables declaration - do not modify//GEN-BEGIN:variables + private javax.swing.JButton cancelButton; + private javax.swing.JScrollPane jScrollPane1; + private javax.swing.JLabel nameLabel; + private javax.swing.JTextField nameTextField; + private javax.swing.JButton okButton; + private javax.swing.JLabel sourceLabel; + private javax.swing.JTextArea sourceTextArea; + // End of variables declaration//GEN-END:variables + +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/Filter.java 2009-08-01 04:18:50.597731802 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.data.ChangedEvent; +import com.sun.hotspot.igv.data.ChangedEventProvider; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.graph.Diagram; +import org.openide.cookies.OpenCookie; + +/** + * + * @author Thomas Wuerthinger + */ +public interface Filter extends Properties.Provider, ChangedEventProvider { + + public String getName(); + + public void apply(Diagram d); + + OpenCookie getEditor(); + + ChangedEvent getChangedEvent(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/FilterChain.java 2009-08-01 04:18:51.014722203 +0100 @@ -0,0 +1,155 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.data.ChangedEvent; +import com.sun.hotspot.igv.data.ChangedEventProvider; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class FilterChain implements ChangedEventProvider { + + private List filters; + private transient ChangedEvent changedEvent; + private boolean fireEvents; + + public FilterChain() { + filters = new ArrayList(); + changedEvent = new ChangedEvent(this); + this.fireEvents = true; + } + + public FilterChain(FilterChain f) { + this.filters = new ArrayList(f.filters); + changedEvent = new ChangedEvent(this); + this.fireEvents = true; + } + + public ChangedEvent getChangedEvent() { + return changedEvent; + } + + public Filter getFilterAt(int index) { + assert index >= 0 && index < filters.size(); + return filters.get(index); + } + + public void apply(Diagram d) { + for (Filter f : filters) { + f.apply(d); + } + } + + public void apply(Diagram d, FilterChain sequence) { + List applied = new ArrayList(); + for (Filter f : sequence.getFilters()) { + if (filters.contains(f)) { + f.apply(d); + applied.add(f); + } + } + + + for (Filter f : filters) { + if (!applied.contains(f)) { + f.apply(d); + } + } + } + + public void beginAtomic() { + this.fireEvents = false; + } + + public void endAtomic() { + this.fireEvents = true; + changedEvent.fire(); + } + + public void addFilter(Filter filter) { + assert filter != null; + filters.add(filter); + if (fireEvents) { + changedEvent.fire(); + } + } + + public void addFilterSameSequence(Filter filter) { + assert filter != null; + filters.add(filter); + if (fireEvents) { + changedEvent.fire(); + } + } + + public boolean containsFilter(Filter filter) { + return filters.contains(filter); + } + + public void removeFilter(Filter filter) { + assert filters.contains(filter); + filters.remove(filter); + if (fireEvents) { + changedEvent.fire(); + } + } + + public void moveFilterUp(Filter filter) { + assert filters.contains(filter); + int index = filters.indexOf(filter); + if (index != 0) { + filters.remove(index); + filters.add(index - 1, filter); + } + if (fireEvents) { + changedEvent.fire(); + } + } + + public void moveFilterDown(Filter filter) { + assert filters.contains(filter); + int index = filters.indexOf(filter); + if (index != filters.size() - 1) { + filters.remove(index); + filters.add(index + 1, filter); + } + if (fireEvents) { + changedEvent.fire(); + } + } + + public List getFilters() { + return Collections.unmodifiableList(filters); + } + + public void clear() { + filters.clear(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/FilterChainProvider.java 2009-08-01 04:18:51.456785005 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +/** + * + * @author Thomas Wuerthinger + */ +public interface FilterChainProvider { + + public FilterChain getFilterChain(); + + public FilterChain getSequence(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/FilterSetting.java 2009-08-01 04:18:51.848878084 +0100 @@ -0,0 +1,82 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class FilterSetting { + + private Set filters; + private String name; + + public FilterSetting() { + this(null); + } + + public FilterSetting(String name) { + this.name = name; + filters = new HashSet(); + } + + public Set getFilters() { + return Collections.unmodifiableSet(filters); + } + + public void addFilter(Filter f) { + assert !filters.contains(f); + filters.add(f); + } + + public void removeFilter(Filter f) { + assert filters.contains(f); + filters.remove(f); + } + + public boolean containsFilter(Filter f) { + return filters.contains(f); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public int getFilterCount() { + return filters.size(); + } + + @Override + public String toString() { + return getName(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/JavaSE6ScriptEngine.java 2009-08-01 04:18:52.276196636 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Diagram; +import javax.script.Bindings; +import javax.script.ScriptContext; +import javax.script.ScriptEngine; +import javax.script.ScriptEngineManager; +import javax.script.ScriptException; +import org.openide.util.Exceptions; + +/** + * + * @author Thomas Wuerthinger + */ +public class JavaSE6ScriptEngine implements ScriptEngineAbstraction { + + private ScriptEngine engine; + private Bindings bindings; + + public boolean initialize(String jsHelperText) { + try { + ScriptEngineManager sem = new ScriptEngineManager(); + ScriptEngine e = sem.getEngineByName("ECMAScript"); + engine = e; + e.eval(jsHelperText); + Bindings b = e.getContext().getBindings(ScriptContext.ENGINE_SCOPE); + b.put("IO", System.out); + bindings = b; + return true; + } catch (Exception e) { + return false; + } + } + + public void execute(Diagram d, String code) { + try { + Bindings b = bindings; + b.put("graph", d); + engine.eval(code, b); + } catch (ScriptException ex) { + Exceptions.printStackTrace(ex); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/NullScriptEngine.java 2009-08-01 04:18:52.684913005 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Diagram; + +/** + * + * @author Thomas Wuerthinger + */ +public class NullScriptEngine implements ScriptEngineAbstraction { + + public boolean initialize(String jsHelperText) { + return true; + } + + public void execute(Diagram d, String code) { + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/RemoveFilter.java 2009-08-01 04:18:53.121948724 +0100 @@ -0,0 +1,132 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.Selector; +import com.sun.hotspot.igv.data.Properties; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class RemoveFilter extends AbstractFilter { + + private List rules; + private String name; + + public RemoveFilter(String name) { + this.name = name; + rules = new ArrayList(); + } + + public String getName() { + return name; + } + + public void apply(Diagram diagram) { + + for (RemoveRule r : rules) { + + List
list = r.getSelector().selected(diagram); + Set
figuresToRemove = new HashSet
(); + + List
protectedFigures = null; + if (r.getRemoveAllWithoutPredecessor()) { + protectedFigures = diagram.getRootFigures(); + } + + for (Figure f : list) { + if (r.getRemoveOnlyInputs()) { + List inputSlots = new ArrayList(); + for (InputSlot is : f.getInputSlots()) { + inputSlots.add(is); + } + for (InputSlot is : inputSlots) { + f.removeSlot(is); + } + + f.createInputSlot(); + } else { + figuresToRemove.add(f); + } + } + + if (r.getRemoveAllWithoutPredecessor()) { + boolean progress = true; + while (progress) { + List
rootFigures = diagram.getRootFigures(); + progress = false; + for (Figure f : rootFigures) { + if (!protectedFigures.contains(f)) { + figuresToRemove.add(f); + progress = true; + } + } + } + } + + diagram.removeAllFigures(figuresToRemove); + } + } + + public void addRule(RemoveRule rule) { + rules.add(rule); + } + + public static class RemoveRule { + + private Selector selector; + private boolean removeAllWithoutPredecessor; + private boolean removeOnlyInputs; + + public RemoveRule(Selector selector, boolean b) { + this(selector, b, false); + } + + public RemoveRule(Selector selector, boolean removeAllWithoutPredecessor, boolean removeOnlyInputs) { + this.selector = selector; + this.removeOnlyInputs = removeOnlyInputs; + this.removeAllWithoutPredecessor = removeAllWithoutPredecessor; + } + + public Selector getSelector() { + return selector; + } + + public boolean getRemoveOnlyInputs() { + return removeOnlyInputs; + } + + public boolean getRemoveAllWithoutPredecessor() { + return removeAllWithoutPredecessor; + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/RemoveInputsFilter.java 2009-08-01 04:18:53.572265509 +0100 @@ -0,0 +1,143 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.graph.Selector; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class RemoveInputsFilter extends AbstractFilter { + + private List rules; + private String name; + + public RemoveInputsFilter(String name) { + this.name = name; + rules = new ArrayList(); + } + + public String getName() { + return name; + } + + public void apply(Diagram diagram) { + + for (RemoveInputsRule r : rules) { + + List
list = r.getSelector().selected(diagram); + for (Figure f : list) { + int z = 0; + List last = new ArrayList(); + for (InputSlot is : f.getInputSlots()) { + if (z >= r.getStartingIndex() && z <= r.getEndIndex() && is.getConnections().size() > 0) { + StringBuilder sb = new StringBuilder(); + List conns = is.getConnections(); + for (int i = 0; i < conns.size(); i++) { + Connection c = conns.get(i); + OutputSlot os = c.getOutputSlot(); + Figure pred = os.getFigure(); + if (i != 0) { + sb.append("
"); + } + sb.append(pred.getLines()[0]); + } + is.removeAllConnections(); + is.setShortName("X"); + is.setName(sb.toString()); + last.add(is); + } else { + last.clear(); + } + z++; + } + + if (last.size() > 3) { + InputSlot first = last.get(0); + first.setShortName("XX"); + + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < last.size(); i++) { + InputSlot is2 = last.get(i); + if (i != 0) { + sb.append("
"); + } + sb.append(is2.getName()); + } + + first.setName(sb.toString()); + + for (int i = 1; i < last.size(); i++) { + f.removeSlot(last.get(i)); + } + } + } + } + } + + public void addRule(RemoveInputsRule rule) { + rules.add(rule); + } + + public static class RemoveInputsRule { + + private Selector selector; + private int startingIndex; + private int endIndex; + + public RemoveInputsRule(Selector selector) { + this(selector, 0); + } + + public RemoveInputsRule(Selector selector, int startIndex) { + this(selector, startIndex, Integer.MAX_VALUE); + } + + public RemoveInputsRule(Selector selector, int startIndex, int endIndex) { + this.startingIndex = startIndex; + this.endIndex = endIndex; + this.selector = selector; + } + + public int getStartingIndex() { + return startingIndex; + } + + public int getEndIndex() { + return endIndex; + } + + public Selector getSelector() { + return selector; + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/RemoveSelfLoopsFilter.java 2009-08-01 04:18:54.025085082 +0100 @@ -0,0 +1,80 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.OutputSlot; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class RemoveSelfLoopsFilter extends AbstractFilter { + + private String name; + + /** Creates a new instance of RemoveSelfLoops */ + public RemoveSelfLoopsFilter(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void apply(Diagram d) { + + for (Figure f : d.getFigures()) { + + for (InputSlot is : f.getInputSlots()) { + + List toRemove = new ArrayList(); + for (Connection c : is.getConnections()) { + + if (c.getOutputSlot().getFigure() == f) { + toRemove.add(c); + } + } + + for (Connection c : toRemove) { + + c.remove(); + + OutputSlot os = c.getOutputSlot(); + if (os.getConnections().size() == 0) { + f.removeSlot(os); + } + + c.getInputSlot().setShortName("O"); + c.getInputSlot().setName("Self Loop"); + } + } + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/ScriptEngineAbstraction.java 2009-08-01 04:18:54.455823226 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Diagram; + +/** + * + * @author Thomas Wuerthinger + */ +public interface ScriptEngineAbstraction { + + public boolean initialize(String jsHelperText); + + public void execute(Diagram d, String code); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/SplitFilter.java 2009-08-01 04:18:54.885906525 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filter; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.graph.Selector; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class SplitFilter extends AbstractFilter { + + private String name; + private Selector selector; + + public SplitFilter(String name, Selector selector) { + this.name = name; + this.selector = selector; + } + + public String getName() { + return name; + } + + public void apply(Diagram d) { + List
list = selector.selected(d); + + for (Figure f : list) { + for (OutputSlot os : f.getOutputSlots()) { + for (Connection c : os.getConnections()) { + InputSlot is = c.getInputSlot(); + is.setName(f.getProperties().get("dump_spec")); + String s = f.getProperties().get("short_name"); + if (s != null) { + is.setShortName(s); + } + } + } + + d.removeFigure(f); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/helper.js 2009-08-01 04:18:55.316719586 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + + /** + * + * @author Thomas Wuerthinger + */ + +function colorize(property, regexp, color) { + var f = new ColorFilter(""); + f.addRule(new ColorFilter.ColorRule(new MatcherSelector(new Properties.RegexpPropertyMatcher(property, regexp)), color)); + f.apply(graph); +} + +function remove(property, regexp) { + var f = new RemoveFilter(""); + f.addRule(new RemoveFilter.RemoveRule(new MatcherSelector(new Properties.RegexpPropertyMatcher(property, regexp)), false, false)); + f.apply(graph); +} + +function split(property, regexp) { + var f = new SplitFilter("", new MatcherSelector(new Properties.RegexpPropertyMatcher(property, regexp))); + f.apply(graph); +} + +function removeInputs(property, regexp, from, to) { + var f = new RemoveInputsFilter(""); + if(from == undefined && to == undefined) { + f.addRule(new RemoveInputsFilter.RemoveInputsRule(new MatcherSelector(new Properties.RegexpPropertyMatcher(property, regexp)))); + } else if(to == undefined) { + f.addRule(new RemoveInputsFilter.RemoveInputsRule(new MatcherSelector(new Properties.RegexpPropertyMatcher(property, regexp)), from)); + } else { + f.addRule(new RemoveInputsFilter.RemoveInputsRule(new MatcherSelector(new Properties.RegexpPropertyMatcher(property, regexp)), from, to)); + } + f.apply(graph); +} + +var black = Color.black; +var blue = Color.blue; +var cyan = Color.cyan; +var darkGray = Color.darkGray; +var gray = Color.gray; +var green = Color.green; +var lightGray = Color.lightGray; +var magenta = Color.magenta; +var orange = Color.orange; +var pink = Color.pink +var red = Color.red; +var yellow = Color.yellow; +var white = Color.white; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Filter/src/com/sun/hotspot/igv/filter/layer.xml 2009-08-01 04:18:55.750222154 +0100 @@ -0,0 +1,5 @@ + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/build.xml 2009-08-01 04:18:56.251294618 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.filterwindow. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/manifest.mf 2009-08-01 04:18:56.691248881 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.filterwindow +OpenIDE-Module-Layer: com/sun/hotspot/igv/filterwindow/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/filterwindow/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/nbproject/build-impl.xml 2009-08-01 04:18:57.096230267 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/nbproject/genfiles.properties 2009-08-01 04:18:57.554848094 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=401b2654 +build.xml.script.CRC32=9c158403 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=401b2654 +nbproject/build-impl.xml.script.CRC32=19fb08e0 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/nbproject/project.properties 2009-08-01 04:18:57.954339714 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/nbproject/project.xml 2009-08-01 04:18:58.365170028 +0100 @@ -0,0 +1,109 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.filterwindow + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + com.sun.hotspot.igv.filter + + + + 1.0 + + + + com.sun.hotspot.igv.graph + + + + 1.0 + + + + com.sun.hotspot.igv.util + + + + 1.0 + + + + org.openide.actions + + + + 6.6.1.1 + + + + org.openide.dialogs + + + + 7.5.1 + + + + org.openide.explorer + + + + 6.12.1 + + + + org.openide.filesystems + + + + 7.3.1 + + + + org.openide.loaders + + + + 6.7.1 + + + + org.openide.nodes + + + + 7.2.1.1 + + + + org.openide.util + + + + 7.10.1.1 + + + + org.openide.windows + + + + 6.18.1 + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/nbproject/suite.properties 2009-08-01 04:18:58.790422182 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/META-INF/services/com.sun.hotspot.igv.filter.FilterChainProvider 2009-08-01 04:18:59.323670514 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.filterwindow.FilterChainProviderImplementation --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/Bundle.properties 2009-08-01 04:19:00.523332418 +0100 @@ -0,0 +1,3 @@ +OpenIDE-Module-Name=FilterWindow +CTL_FilterTopComponent=Filter Window +HINT_FilterTopComponent=This is a Filter window --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/CheckListView.java 2009-08-01 04:19:00.924357616 +0100 @@ -0,0 +1,52 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow; + +import javax.swing.JList; +import org.openide.explorer.view.ListView; +import org.openide.explorer.view.NodeListModel; + +/** + * + * @author Thomas Wuerthinger + */ +public class CheckListView extends ListView { + + @Override + public void showSelection(int[] indices) { + super.showSelection(indices); + } + + @Override + protected NodeListModel createModel() { + return new CheckNodeListModel(); + } + + @Override + protected JList createList() { + JList tmpList = super.createList(); + tmpList.setCellRenderer(new CheckRenderer(tmpList)); + return tmpList; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/CheckNode.java 2009-08-01 04:19:01.356916002 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow; + +import com.sun.hotspot.igv.data.ChangedEvent; +import org.openide.nodes.AbstractNode; +import org.openide.nodes.Children; +import org.openide.util.Lookup; + +/** + * + * @author Thomas Wuerthinger + */ +public class CheckNode extends AbstractNode { + + private ChangedEvent selectionChangedEvent; + public boolean selected; + public boolean enabled; + + public CheckNode(Children c, Lookup lookup) { + super(c, lookup); + selectionChangedEvent = new ChangedEvent(this); + selected = false; + enabled = true; + } + + public ChangedEvent getSelectionChangedEvent() { + return selectionChangedEvent; + } + + public boolean isSelected() { + return selected; + } + + public void setSelected(boolean b) { + if (b != selected) { + selected = b; + selectionChangedEvent.fire(); + } + } + + public void setEnabled(boolean b) { + enabled = b; + } + + public boolean isEnabled() { + return enabled; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/CheckNodeListModel.java 2009-08-01 04:19:01.783667662 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow; + +import org.openide.explorer.view.NodeListModel; +import org.openide.nodes.Node; + +/** + * + * @author Thomas Wuerthinger + */ +public class CheckNodeListModel extends NodeListModel { + + private Node rootNode; + + @Override + public void setNode(Node rootNode) { + this.rootNode = rootNode; + super.setNode(rootNode); + } + + public CheckNode getCheckNodeAt(int index) { + return (CheckNode) rootNode.getChildren().getNodes()[index]; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/CheckRenderer.java 2009-08-01 04:19:02.207627677 +0100 @@ -0,0 +1,88 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow; + +import java.awt.Color; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.event.MouseAdapter; +import java.awt.event.MouseEvent; +import javax.swing.JCheckBox; +import javax.swing.JList; +import javax.swing.ListCellRenderer; + +/** + * @author Thomas Wuerthinger + */ +public class CheckRenderer extends JCheckBox implements ListCellRenderer { + + private JList list; + private Color startBackground; + + public CheckRenderer(final JList list) { + this.list = list; + list.addMouseListener( + new MouseAdapter() { + + @Override + public void mouseClicked(MouseEvent e) { + int index = list.locationToIndex(e.getPoint()); + Point p2 = list.indexToLocation(index); + Rectangle r = new Rectangle(p2.x, p2.y, getPreferredSize().height, getPreferredSize().height); + if (r.contains(e.getPoint())) { + CheckNode node = ((CheckNodeListModel) list.getModel()).getCheckNodeAt(index); + node.setSelected(!node.isSelected()); + list.repaint(); + e.consume(); + } + } + }); + + this.setPreferredSize(new Dimension(getPreferredSize().width, getPreferredSize().height - 5)); + startBackground = this.getBackground(); + } + + public Component getListCellRendererComponent(final JList list, Object value, final int index, boolean isSelected, boolean cellHasFocus) { + setText(value.toString()); + CheckNode node = ((CheckNodeListModel) list.getModel()).getCheckNodeAt(index); + this.setSelected(node.isSelected()); + this.setEnabled(list.isEnabled()); + + if (isSelected && list.hasFocus()) { + this.setBackground(list.getSelectionBackground()); + this.setForeground(list.getSelectionForeground()); + } else if (isSelected) { + assert !list.hasFocus(); + this.setBackground(startBackground); + this.setForeground(list.getForeground()); + + } else { + this.setBackground(list.getBackground()); + this.setForeground(list.getForeground()); + } + return this; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/FilterChainProviderImplementation.java 2009-08-01 04:19:02.649237375 +0100 @@ -0,0 +1,42 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow; + +import com.sun.hotspot.igv.filter.FilterChain; +import com.sun.hotspot.igv.filter.FilterChainProvider; + +/** + * + * @author Thomas Wuerthinger + */ +public class FilterChainProviderImplementation implements FilterChainProvider { + + public FilterChain getFilterChain() { + return FilterTopComponent.findInstance().getFilterChain(); + } + + public FilterChain getSequence() { + return FilterTopComponent.findInstance().getSequence(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/FilterNode.java 2009-08-01 04:19:03.079772447 +0100 @@ -0,0 +1,112 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow; + +import com.sun.hotspot.igv.filterwindow.actions.MoveFilterDownAction; +import com.sun.hotspot.igv.filterwindow.actions.MoveFilterUpAction; +import com.sun.hotspot.igv.filterwindow.actions.RemoveFilterAction; +import com.sun.hotspot.igv.filter.Filter; +import com.sun.hotspot.igv.filter.FilterChain; +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.util.PropertiesSheet; +import javax.swing.Action; +import org.openide.actions.OpenAction; +import org.openide.nodes.Children; +import org.openide.nodes.Sheet; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.Utilities; +import org.openide.util.lookup.AbstractLookup; +import org.openide.util.lookup.InstanceContent; + +/** + * + * @author Thomas Wuerthinger + */ +public class FilterNode extends CheckNode implements LookupListener, ChangedListener { + + private Filter filter; + private Lookup.Result result; + + public FilterNode(Filter filter) { + this(filter, new InstanceContent()); + } + + private FilterNode(Filter filter, InstanceContent content) { + super(Children.LEAF, new AbstractLookup(content)); + content.add(filter); + + content.add(filter.getEditor()); + this.filter = filter; + filter.getChangedEvent().addListener(new ChangedListener() { + + public void changed(Filter source) { + update(); + } + }); + + update(); + + Lookup.Template tpl = new Lookup.Template(FilterChain.class); + result = Utilities.actionsGlobalContext().lookup(tpl); + result.addLookupListener(this); + + FilterTopComponent.findInstance().getFilterSettingsChangedEvent().addListener(this); + resultChanged(null); + } + + private void update() { + this.setDisplayName(filter.getName()); + } + + public Filter getFilter() { + return filter; + } + + @Override + protected Sheet createSheet() { + Sheet s = super.createSheet(); + PropertiesSheet.initializeSheet(getFilter().getProperties(), s); + return s; + } + + @Override + public Action[] getActions(boolean b) { + return new Action[]{(Action) OpenAction.findObject(OpenAction.class, true), (Action) MoveFilterUpAction.findObject(MoveFilterUpAction.class, true), (Action) MoveFilterDownAction.findObject(MoveFilterDownAction.class, true), (Action) RemoveFilterAction.findObject(RemoveFilterAction.class, true)}; + } + + @Override + public Action getPreferredAction() { + return OpenAction.get(OpenAction.class).createContextAwareInstance(Utilities.actionsGlobalContext()); + } + + public void resultChanged(LookupEvent lookupEvent) { + changed(FilterTopComponent.findInstance()); + } + + public void changed(FilterTopComponent source) { + setSelected(source.getFilterChain().containsFilter(filter)); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/FilterTopComponent.form 2009-08-01 04:19:03.524247395 +0100 @@ -0,0 +1,16 @@ + + +
+ + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/FilterTopComponent.java 2009-08-01 04:19:03.929405540 +0100 @@ -0,0 +1,689 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow; + +import com.sun.hotspot.igv.filterwindow.actions.MoveFilterDownAction; +import com.sun.hotspot.igv.filterwindow.actions.MoveFilterUpAction; +import com.sun.hotspot.igv.filterwindow.actions.NewFilterAction; +import com.sun.hotspot.igv.filterwindow.actions.RemoveFilterAction; +import com.sun.hotspot.igv.filterwindow.actions.RemoveFilterSettingsAction; +import com.sun.hotspot.igv.filterwindow.actions.SaveFilterSettingsAction; +import com.sun.hotspot.igv.filter.CustomFilter; +import com.sun.hotspot.igv.filter.Filter; +import com.sun.hotspot.igv.filter.FilterChain; +import com.sun.hotspot.igv.filter.FilterSetting; +import com.sun.hotspot.igv.data.ChangedEvent; +import com.sun.hotspot.igv.data.ChangedListener; +import java.awt.BorderLayout; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Serializable; +import java.io.Writer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import javax.swing.JComboBox; +import javax.swing.UIManager; +import javax.swing.border.Border; +import org.openide.DialogDisplayer; +import org.openide.ErrorManager; +import org.openide.NotifyDescriptor; +import org.openide.awt.ToolbarPool; +import org.openide.explorer.ExplorerManager; +import org.openide.explorer.ExplorerUtils; +import org.openide.nodes.AbstractNode; +import org.openide.nodes.Children; +import org.openide.nodes.Node; +import org.openide.util.Exceptions; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.NbBundle; +import org.openide.util.Utilities; +import org.openide.awt.Toolbar; +import org.openide.filesystems.FileLock; +import org.openide.util.actions.SystemAction; +import org.openide.windows.TopComponent; +import org.openide.windows.WindowManager; +import org.openide.filesystems.Repository; +import org.openide.filesystems.FileSystem; +import org.openide.filesystems.FileObject; + +/** + * + * @author Thomas Wuerthinger + */ +public final class FilterTopComponent extends TopComponent implements LookupListener, ExplorerManager.Provider { + + private static FilterTopComponent instance; + public static final String FOLDER_ID = "Filters"; + public static final String AFTER_ID = "after"; + public static final String ENABLED_ID = "enabled"; + public static final String PREFERRED_ID = "FilterTopComponent"; + private CheckListView view; + private ExplorerManager manager; + private FilterChain filterChain; + private FilterChain sequence; + private Lookup.Result result; + private JComboBox comboBox; + private List filterSettings; + private FilterSetting customFilterSetting = new FilterSetting("-- Custom --"); + private ChangedEvent filterSettingsChangedEvent; + private ActionListener comboBoxActionListener = new ActionListener() { + + public void actionPerformed(ActionEvent e) { + comboBoxSelectionChanged(); + } + }; + + public ChangedEvent getFilterSettingsChangedEvent() { + return filterSettingsChangedEvent; + } + + public FilterChain getSequence() { + return sequence; + } + + public void updateSelection() { + Node[] nodes = this.getExplorerManager().getSelectedNodes(); + int[] arr = new int[nodes.length]; + for (int i = 0; i < nodes.length; i++) { + int index = sequence.getFilters().indexOf(((FilterNode) nodes[i]).getFilter()); + arr[i] = index; + } + view.showSelection(arr); + } + + private void comboBoxSelectionChanged() { + + Object o = comboBox.getSelectedItem(); + if (o == null) { + return; + } + assert o instanceof FilterSetting; + FilterSetting s = (FilterSetting) o; + + if (s != customFilterSetting) { + FilterChain chain = getFilterChain(); + chain.beginAtomic(); + List toRemove = new ArrayList(); + for (Filter f : chain.getFilters()) { + if (!s.containsFilter(f)) { + toRemove.add(f); + } + } + for (Filter f : toRemove) { + chain.removeFilter(f); + } + + for (Filter f : s.getFilters()) { + if (!chain.containsFilter(f)) { + chain.addFilter(f); + } + } + + chain.endAtomic(); + filterSettingsChangedEvent.fire(); + } else { + this.updateComboBoxSelection(); + } + + SystemAction.get(RemoveFilterSettingsAction.class).setEnabled(comboBox.getSelectedItem() != this.customFilterSetting); + SystemAction.get(SaveFilterSettingsAction.class).setEnabled(comboBox.getSelectedItem() == this.customFilterSetting); + } + + private void updateComboBox() { + comboBox.removeAllItems(); + comboBox.addItem(customFilterSetting); + for (FilterSetting s : filterSettings) { + comboBox.addItem(s); + } + + this.updateComboBoxSelection(); + } + + public void addFilterSetting() { + NotifyDescriptor.InputLine l = new NotifyDescriptor.InputLine("Enter a name:", "Filter"); + if (DialogDisplayer.getDefault().notify(l) == NotifyDescriptor.OK_OPTION) { + String name = l.getInputText(); + + FilterSetting toRemove = null; + for (FilterSetting s : filterSettings) { + if (s.getName().equals(name)) { + NotifyDescriptor.Confirmation conf = new NotifyDescriptor.Confirmation("Filter \"" + name + "\" already exists, to you want to overwrite?", "Filter"); + if (DialogDisplayer.getDefault().notify(conf) == NotifyDescriptor.YES_OPTION) { + toRemove = s; + break; + } else { + return; + } + } + } + + if (toRemove != null) { + filterSettings.remove(toRemove); + } + FilterSetting setting = createFilterSetting(name); + filterSettings.add(setting); + + // Sort alphabetically + Collections.sort(filterSettings, new Comparator() { + + public int compare(FilterSetting o1, FilterSetting o2) { + return o1.getName().compareTo(o2.getName()); + } + }); + + updateComboBox(); + } + } + + public boolean canRemoveFilterSetting() { + return comboBox.getSelectedItem() != customFilterSetting; + } + + public void removeFilterSetting() { + if (canRemoveFilterSetting()) { + Object o = comboBox.getSelectedItem(); + assert o instanceof FilterSetting; + FilterSetting f = (FilterSetting) o; + assert f != customFilterSetting; + assert filterSettings.contains(f); + NotifyDescriptor.Confirmation l = new NotifyDescriptor.Confirmation("Do you really want to remove filter \"" + f + "\"?", "Filter"); + if (DialogDisplayer.getDefault().notify(l) == NotifyDescriptor.YES_OPTION) { + filterSettings.remove(f); + updateComboBox(); + } + } + } + + private FilterSetting createFilterSetting(String name) { + FilterSetting s = new FilterSetting(name); + FilterChain chain = this.getFilterChain(); + for (Filter f : chain.getFilters()) { + s.addFilter(f); + } + return s; + } + + private void updateComboBoxSelection() { + List filters = this.getFilterChain().getFilters(); + boolean found = false; + for (FilterSetting s : filterSettings) { + if (s.getFilterCount() == filters.size()) { + boolean ok = true; + for (Filter f : filters) { + if (!s.containsFilter(f)) { + ok = false; + } + } + + if (ok) { + if (comboBox.getSelectedItem() != s) { + comboBox.setSelectedItem(s); + } + found = true; + break; + } + } + } + + if (!found && comboBox.getSelectedItem() != customFilterSetting) { + comboBox.setSelectedItem(customFilterSetting); + } + } + + private class FilterChildren extends Children.Keys implements ChangedListener { + + //private Node[] oldSelection; + //private ArrayList newSelection; + private HashMap nodeHash = new HashMap(); + + protected Node[] createNodes(Object object) { + if (nodeHash.containsKey(object)) { + return new Node[]{nodeHash.get(object)}; + } + + assert object instanceof Filter; + Filter filter = (Filter) object; + com.sun.hotspot.igv.filterwindow.FilterNode node = new com.sun.hotspot.igv.filterwindow.FilterNode(filter); + node.getSelectionChangedEvent().addListener(this); + nodeHash.put(object, node); + return new Node[]{node}; + } + + public FilterChildren() { + sequence.getChangedEvent().addListener(new ChangedListener() { + + public void changed(FilterChain source) { + addNotify(); + } + }); + + setBefore(false); + } + + protected void addNotify() { + setKeys(sequence.getFilters()); + updateSelection(); + } + + public void changed(CheckNode source) { + FilterNode node = (FilterNode) source; + Filter f = node.getFilter(); + FilterChain chain = getFilterChain(); + if (node.isSelected()) { + if (!chain.containsFilter(f)) { + chain.addFilter(f); + } + } else { + if (chain.containsFilter(f)) { + chain.removeFilter(f); + } + } + view.revalidate(); + view.repaint(); + updateComboBoxSelection(); + } + } + + public FilterChain getFilterChain() { + return filterChain;/* + EditorTopComponent tc = EditorTopComponent.getActive(); + if (tc == null) { + return filterChain; + } + return tc.getFilterChain();*/ + } + + private FilterTopComponent() { + filterSettingsChangedEvent = new ChangedEvent(this); + initComponents(); + setName(NbBundle.getMessage(FilterTopComponent.class, "CTL_FilterTopComponent")); + setToolTipText(NbBundle.getMessage(FilterTopComponent.class, "HINT_FilterTopComponent")); + // setIcon(Utilities.loadImage(ICON_PATH, true)); + + sequence = new FilterChain(); + filterChain = new FilterChain(); + initFilters(); + manager = new ExplorerManager(); + manager.setRootContext(new AbstractNode(new FilterChildren())); + associateLookup(ExplorerUtils.createLookup(manager, getActionMap())); + view = new CheckListView(); + + ToolbarPool.getDefault().setPreferredIconSize(16); + Toolbar toolBar = new Toolbar(); + Border b = (Border) UIManager.get("Nb.Editor.Toolbar.border"); //NOI18N + toolBar.setBorder(b); + comboBox = new JComboBox(); + toolBar.add(comboBox); + this.add(toolBar, BorderLayout.NORTH); + toolBar.add(SaveFilterSettingsAction.get(SaveFilterSettingsAction.class)); + toolBar.add(RemoveFilterSettingsAction.get(RemoveFilterSettingsAction.class)); + toolBar.addSeparator(); + toolBar.add(MoveFilterUpAction.get(MoveFilterUpAction.class).createContextAwareInstance(this.getLookup())); + toolBar.add(MoveFilterDownAction.get(MoveFilterDownAction.class).createContextAwareInstance(this.getLookup())); + toolBar.add(RemoveFilterAction.get(RemoveFilterAction.class).createContextAwareInstance(this.getLookup())); + toolBar.add(NewFilterAction.get(NewFilterAction.class)); + this.add(view, BorderLayout.CENTER); + + filterSettings = new ArrayList(); + updateComboBox(); + + comboBox.addActionListener(comboBoxActionListener); + setChain(filterChain); + } + + public void newFilter() { + CustomFilter cf = new CustomFilter("My custom filter", ""); + if (cf.openInEditor()) { + sequence.addFilter(cf); + FileObject fo = getFileObject(cf); + FilterChangedListener listener = new FilterChangedListener(fo, cf); + listener.changed(cf); + cf.getChangedEvent().addListener(listener); + } + } + + public void removeFilter(Filter f) { + com.sun.hotspot.igv.filter.CustomFilter cf = (com.sun.hotspot.igv.filter.CustomFilter) f; + + sequence.removeFilter(cf); + try { + getFileObject(cf).delete(); + } catch (IOException ex) { + Exceptions.printStackTrace(ex); + } + + } + + private static class FilterChangedListener implements ChangedListener { + + private FileObject fileObject; + private CustomFilter filter; + + public FilterChangedListener(FileObject fo, CustomFilter cf) { + fileObject = fo; + filter = cf; + } + + public void changed(Filter source) { + try { + if (!fileObject.getName().equals(filter.getName())) { + FileLock lock = fileObject.lock(); + fileObject.move(lock, fileObject.getParent(), filter.getName(), ""); + lock.releaseLock(); + FileObject newFileObject = fileObject.getParent().getFileObject(filter.getName()); + fileObject = newFileObject; + + } + + FileLock lock = fileObject.lock(); + OutputStream os = fileObject.getOutputStream(lock); + Writer w = new OutputStreamWriter(os); + String s = filter.getCode(); + w.write(s); + w.close(); + lock.releaseLock(); + + } catch (IOException ex) { + Exceptions.printStackTrace(ex); + } + } + } + + public void initFilters() { + + FileSystem fs = Repository.getDefault().getDefaultFileSystem(); + FileObject folder = fs.getRoot().getFileObject(FOLDER_ID); + FileObject[] children = folder.getChildren(); + + List customFilters = new ArrayList(); + HashMap afterMap = new HashMap(); + Set enabledSet = new HashSet(); + HashMap map = new HashMap(); + + for (final FileObject fo : children) { + InputStream is = null; + + String code = ""; + FileLock lock = null; + try { + lock = fo.lock(); + is = fo.getInputStream(); + BufferedReader r = new BufferedReader(new InputStreamReader(is)); + String s; + StringBuffer sb = new StringBuffer(); + while ((s = r.readLine()) != null) { + sb.append(s); + sb.append("\n"); + } + code = sb.toString(); + + } catch (FileNotFoundException ex) { + Exceptions.printStackTrace(ex); + } catch (IOException ex) { + Exceptions.printStackTrace(ex); + } finally { + try { + is.close(); + } catch (IOException ex) { + Exceptions.printStackTrace(ex); + } + lock.releaseLock(); + } + + String displayName = fo.getName(); + + + final CustomFilter cf = new CustomFilter(displayName, code); + map.put(displayName, cf); + + String after = (String) fo.getAttribute(AFTER_ID); + afterMap.put(cf, after); + + Boolean enabled = (Boolean) fo.getAttribute(ENABLED_ID); + if (enabled != null && (boolean) enabled) { + enabledSet.add(cf); + } + + cf.getChangedEvent().addListener(new FilterChangedListener(fo, cf)); + + customFilters.add(cf); + } + + for (int j = 0; j < customFilters.size(); j++) { + for (int i = 0; i < customFilters.size(); i++) { + List copiedList = new ArrayList(customFilters); + for (CustomFilter cf : copiedList) { + + String after = afterMap.get(cf); + + if (map.containsKey(after)) { + CustomFilter afterCf = map.get(after); + int index = customFilters.indexOf(afterCf); + int currentIndex = customFilters.indexOf(cf); + + if (currentIndex < index) { + customFilters.remove(currentIndex); + customFilters.add(index, cf); + } + } + } + } + } + + for (CustomFilter cf : customFilters) { + sequence.addFilter(cf); + if (enabledSet.contains(cf)) { + filterChain.addFilter(cf); + } + } + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + // //GEN-BEGIN:initComponents + private void initComponents() { + + setLayout(new java.awt.BorderLayout()); + + }// //GEN-END:initComponents + // Variables declaration - do not modify//GEN-BEGIN:variables + // End of variables declaration//GEN-END:variables + /** + * Gets default instance. Do not use directly: reserved for *.settings files only, + * i.e. deserialization routines; otherwise you could get a non-deserialized instance. + * To obtain the singleton instance, use {@link findInstance}. + */ + public static synchronized FilterTopComponent getDefault() { + if (instance == null) { + instance = new FilterTopComponent(); + } + return instance; + } + + /** + * Obtain the FilterTopComponent instance. Never call {@link #getDefault} directly! + */ + public static synchronized FilterTopComponent findInstance() { + TopComponent win = WindowManager.getDefault().findTopComponent(PREFERRED_ID); + if (win == null) { + ErrorManager.getDefault().log(ErrorManager.WARNING, "Cannot find Filter component. It will not be located properly in the window system."); + return getDefault(); + } + if (win instanceof FilterTopComponent) { + return (FilterTopComponent) win; + } + ErrorManager.getDefault().log(ErrorManager.WARNING, "There seem to be multiple components with the '" + PREFERRED_ID + "' ID. That is a potential source of errors and unexpected behavior."); + return getDefault(); + } + + @Override + public int getPersistenceType() { + return TopComponent.PERSISTENCE_ALWAYS; + } + + @Override + protected String preferredID() { + return PREFERRED_ID; + } + + @Override + public ExplorerManager getExplorerManager() { + return manager; + } + + @Override + public void componentOpened() { + Lookup.Template tpl = new Lookup.Template(FilterChain.class); + result = Utilities.actionsGlobalContext().lookup(tpl); + result.addLookupListener(this); + } + + @Override + public void componentClosed() { + result.removeLookupListener(this); + result = null; + } + + public void resultChanged(LookupEvent lookupEvent) { + setChain(Utilities.actionsGlobalContext().lookup(FilterChain.class)); + /* + EditorTopComponent tc = EditorTopComponent.getActive(); + if (tc != null) { + setChain(tc.getFilterChain()); + }*/ + } + + public void setChain(FilterChain chain) { + updateComboBoxSelection(); + } + + private FileObject getFileObject(CustomFilter cf) { + FileObject fo = Repository.getDefault().getDefaultFileSystem().getRoot().getFileObject(FOLDER_ID + "/" + cf.getName()); + if (fo == null) { + try { + fo = org.openide.filesystems.Repository.getDefault().getDefaultFileSystem().getRoot().getFileObject(FOLDER_ID).createData(cf.getName()); + } catch (IOException ex) { + Exceptions.printStackTrace(ex); + } + } + return fo; + } + + @Override + public void writeExternal(ObjectOutput out) throws IOException { + super.writeExternal(out); + + out.writeInt(filterSettings.size()); + for (FilterSetting f : filterSettings) { + out.writeUTF(f.getName()); + + out.writeInt(f.getFilterCount()); + for (Filter filter : f.getFilters()) { + CustomFilter cf = (CustomFilter) filter; + out.writeUTF(cf.getName()); + } + } + + CustomFilter prev = null; + for (Filter f : this.sequence.getFilters()) { + CustomFilter cf = (CustomFilter) f; + FileObject fo = getFileObject(cf); + if (getFilterChain().containsFilter(cf)) { + fo.setAttribute(ENABLED_ID, true); + } else { + fo.setAttribute(ENABLED_ID, false); + } + + if (prev == null) { + fo.setAttribute(AFTER_ID, null); + } else { + fo.setAttribute(AFTER_ID, prev.getName()); + } + + prev = cf; + } + } + + public CustomFilter findFilter(String name) { + for (Filter f : sequence.getFilters()) { + + CustomFilter cf = (CustomFilter) f; + if (cf.getName().equals(name)) { + return cf; + } + } + + return null; + } + + @Override + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + super.readExternal(in); + + int filterSettingsCount = in.readInt(); + for (int i = 0; i < filterSettingsCount; i++) { + String name = in.readUTF(); + FilterSetting s = new FilterSetting(name); + int filterCount = in.readInt(); + for (int j = 0; j < filterCount; j++) { + String filterName = in.readUTF(); + CustomFilter filter = findFilter(filterName); + if (filter != null) { + s.addFilter(filter); + } + } + + filterSettings.add(s); + } + updateComboBox(); + } + + final static class ResolvableHelper implements Serializable { + + private static final long serialVersionUID = 1L; + + public Object readResolve() { + return FilterTopComponent.getDefault(); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/FilterTopComponentSettings.xml 2009-08-01 04:19:04.372486841 +0100 @@ -0,0 +1,8 @@ + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/FilterTopComponentWstcref.xml 2009-08-01 04:19:04.780777105 +0100 @@ -0,0 +1,7 @@ + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/Bundle.properties 2009-08-01 04:19:05.191298005 +0100 @@ -0,0 +1,10 @@ +# To change this template, choose Tools | Templates +# and open the template in the editor. + +CTL_FilterAction=Open Filter Window +CTL_MoveFilterDownAction=Move downwards +CTL_MoveFilterUpAction=Move upwards +CTL_NewFilterAction=New filter... +CTL_RemoveFilterAction=Remove +CTL_RemoveFilterSettingsAction=Remove filter setting +CTL_SaveFilterSettingsAction=Save filter settings... --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/FilterAction.java 2009-08-01 04:19:05.594072286 +0100 @@ -0,0 +1,47 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow.actions; + +import com.sun.hotspot.igv.filterwindow.*; +import java.awt.event.ActionEvent; +import javax.swing.AbstractAction; +import org.openide.util.NbBundle; +import org.openide.windows.TopComponent; + +/** + * + * @author Thomas Wuerthinger + */ +public class FilterAction extends AbstractAction { + + public FilterAction() { + super(NbBundle.getMessage(FilterAction.class, "CTL_FilterAction")); + } + + public void actionPerformed(ActionEvent evt) { + TopComponent win = FilterTopComponent.findInstance(); + win.open(); + win.requestActive(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/MoveFilterDownAction.java 2009-08-01 04:19:06.028398650 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow.actions; + +import com.sun.hotspot.igv.filterwindow.FilterTopComponent; +import com.sun.hotspot.igv.filter.Filter; +import javax.swing.Action; +import org.openide.nodes.Node; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CookieAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class MoveFilterDownAction extends CookieAction { + + protected void performAction(Node[] activatedNodes) { + for (Node n : activatedNodes) { + Filter c = n.getLookup().lookup(Filter.class); + FilterTopComponent.findInstance().getSequence().moveFilterDown(c); + } + } + + protected int mode() { + return CookieAction.MODE_EXACTLY_ONE; + } + + public MoveFilterDownAction() { + + putValue(Action.SHORT_DESCRIPTION, "Move filter downwards"); + } + + public String getName() { + return NbBundle.getMessage(MoveFilterUpAction.class, "CTL_MoveFilterDownAction"); + } + + protected Class[] cookieClasses() { + return new Class[]{ + Filter.class + }; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/filterwindow/images/down.gif"; + } + + @Override + protected void initialize() { + super.initialize(); + putValue("noIconInMenu", Boolean.TRUE); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/MoveFilterUpAction.java 2009-08-01 04:19:06.504425114 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow.actions; + +import com.sun.hotspot.igv.filterwindow.FilterTopComponent; +import com.sun.hotspot.igv.filter.Filter; +import javax.swing.Action; +import org.openide.nodes.Node; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CookieAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class MoveFilterUpAction extends CookieAction { + + protected void performAction(Node[] activatedNodes) { + for (Node n : activatedNodes) { + Filter c = n.getLookup().lookup(Filter.class); + FilterTopComponent.findInstance().getSequence().moveFilterUp(c); + } + } + + protected int mode() { + return CookieAction.MODE_EXACTLY_ONE; + } + + public MoveFilterUpAction() { + putValue(Action.SHORT_DESCRIPTION, "Move filter upwards"); + } + + public String getName() { + return NbBundle.getMessage(MoveFilterUpAction.class, "CTL_MoveFilterUpAction"); + } + + protected Class[] cookieClasses() { + return new Class[]{ + Filter.class + }; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/filterwindow/images/up.gif"; + } + + @Override + protected void initialize() { + super.initialize(); + putValue("noIconInMenu", Boolean.TRUE); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/NewFilterAction.java 2009-08-01 04:19:07.088345222 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow.actions; + +import com.sun.hotspot.igv.filterwindow.FilterTopComponent; +import javax.swing.Action; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class NewFilterAction extends CallableSystemAction { + + public NewFilterAction() { + putValue(Action.SHORT_DESCRIPTION, "Create new filter"); + } + + public void performAction() { + FilterTopComponent.findInstance().newFilter(); + } + + public String getName() { + return NbBundle.getMessage(SaveFilterSettingsAction.class, "CTL_NewFilterAction"); + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/filterwindow/images/plus.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/RemoveFilterAction.java 2009-08-01 04:19:07.517723598 +0100 @@ -0,0 +1,99 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow.actions; + +import com.sun.hotspot.igv.filterwindow.FilterTopComponent; +import com.sun.hotspot.igv.filter.Filter; +import javax.swing.Action; +import javax.swing.JOptionPane; +import org.openide.nodes.Node; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CookieAction; +import org.openide.windows.WindowManager; + +/** + * + * @author Thomas Wuerthinger + */ +public final class RemoveFilterAction extends CookieAction { + + protected void performAction(Node[] activatedNodes) { + Object[] options = {"Yes", + "No", + "Cancel" + }; + int n = JOptionPane.showOptionDialog(WindowManager.getDefault().getMainWindow(), + "Do you really want to delete " + activatedNodes.length + " filter/s?", "Delete?", + JOptionPane.YES_NO_CANCEL_OPTION, + JOptionPane.QUESTION_MESSAGE, + null, + options, + options[2]); + + if (n == JOptionPane.YES_OPTION) { + for (int i = 0; i < activatedNodes.length; i++) { + FilterTopComponent.findInstance().removeFilter(activatedNodes[i].getLookup().lookup(Filter.class)); + } + } + } + + protected int mode() { + return CookieAction.MODE_ALL; + } + + public String getName() { + return NbBundle.getMessage(RemoveFilterAction.class, "CTL_RemoveFilterAction"); + } + + public RemoveFilterAction() { + putValue(Action.SHORT_DESCRIPTION, "Remove filter"); + } + + protected Class[] cookieClasses() { + return new Class[]{ + Filter.class + }; + } + + @Override + protected void initialize() { + super.initialize(); + putValue("noIconInMenu", Boolean.TRUE); + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/filterwindow/images/minus.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/RemoveFilterSettingsAction.java 2009-08-01 04:19:07.957429988 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow.actions; + +import com.sun.hotspot.igv.filterwindow.FilterTopComponent; +import javax.swing.Action; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class RemoveFilterSettingsAction extends CallableSystemAction { + + public void performAction() { + FilterTopComponent.findInstance().removeFilterSetting(); + } + + public String getName() { + return NbBundle.getMessage(RemoveFilterSettingsAction.class, "CTL_RemoveFilterSettingsAction"); + } + + public RemoveFilterSettingsAction() { + putValue(Action.SHORT_DESCRIPTION, "Remove filter profile"); + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/filterwindow/images/delete.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/actions/SaveFilterSettingsAction.java 2009-08-01 04:19:08.395940996 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.filterwindow.actions; + +import com.sun.hotspot.igv.filterwindow.FilterTopComponent; +import javax.swing.Action; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class SaveFilterSettingsAction extends CallableSystemAction { + + public void performAction() { + FilterTopComponent.findInstance().addFilterSetting(); + } + + public String getName() { + return NbBundle.getMessage(SaveFilterSettingsAction.class, "CTL_SaveFilterSettingsAction"); + } + + @Override + protected void initialize() { + super.initialize(); + } + + public SaveFilterSettingsAction() { + putValue(Action.SHORT_DESCRIPTION, "Create new filter profile"); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/filterwindow/images/add.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/customRightTopWsmode.xml 2009-08-01 04:19:08.821293864 +0100 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/images/add.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/images/delete.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/images/down.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/images/minus.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/images/plus.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/images/up.gif differ --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/FilterWindow/src/com/sun/hotspot/igv/filterwindow/layer.xml 2009-08-01 04:19:11.646309285 +0100 @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/build.xml 2009-08-01 04:19:12.088165956 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.graph. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/manifest.mf 2009-08-01 04:19:12.505147625 +0100 @@ -0,0 +1,5 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.graph +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/graph/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/nbproject/build-impl.xml 2009-08-01 04:19:12.916135997 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/nbproject/genfiles.properties 2009-08-01 04:19:13.333029019 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=b2bc2f02 +build.xml.script.CRC32=486d5dab +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=b2bc2f02 +nbproject/build-impl.xml.script.CRC32=17fa0f49 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/nbproject/platform.properties 2009-08-01 04:19:13.731640442 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/nbproject/project.properties 2009-08-01 04:19:14.140038743 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/nbproject/project.xml 2009-08-01 04:19:14.535320461 +0100 @@ -0,0 +1,31 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.graph + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + com.sun.hotspot.igv.layout + + + + 1.0 + + + + + com.sun.hotspot.igv.graph + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/nbproject/suite.properties 2009-08-01 04:19:14.935640328 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/AndSelector.java 2009-08-01 04:19:15.481404275 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class AndSelector implements Selector { + + private Selector selector1; + private Selector selector2; + + public AndSelector(Selector s1, Selector s2) { + this.selector1 = s1; + this.selector2 = s2; + } + + public List
selected(Diagram d) { + List
l1 = selector1.selected(d); + List
l2 = selector2.selected(d); + List
result = new ArrayList
(); + for (Figure f : l2) { + if (l1.contains(f)) { + result.add(f); + } + } + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Block.java 2009-08-01 04:19:15.906977589 +0100 @@ -0,0 +1,82 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.layout.Cluster; +import java.awt.Rectangle; +import java.util.HashSet; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class Block implements Cluster { + + private InputBlock inputBlock; + private Rectangle bounds; + private Diagram diagram; + + public Block(InputBlock inputBlock, Diagram diagram) { + this.inputBlock = inputBlock; + this.diagram = diagram; + } + + public Cluster getOuter() { + return null; + } + + public InputBlock getInputBlock() { + return inputBlock; + } + + public Set getSuccessors() { + Set succs = new HashSet(); + for (InputBlock b : inputBlock.getSuccessors()) { + succs.add(diagram.getBlock(b)); + } + return succs; + } + + public Set getPredecessors() { + Set succs = new HashSet(); + for (InputBlock b : inputBlock.getPredecessors()) { + succs.add(diagram.getBlock(b)); + } + return succs; + } + + public void setBounds(Rectangle r) { + this.bounds = r; + } + + public Rectangle getBounds() { + return bounds; + } + + public int compareTo(Cluster o) { + return toString().compareTo(o.toString()); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Bundle.properties 2009-08-01 04:19:16.320713012 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=Graph --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Connection.java 2009-08-01 04:19:16.721394100 +0100 @@ -0,0 +1,123 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import java.awt.Color; +import java.awt.Point; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class Connection implements Source.Provider, Link { + + public enum ConnectionStyle { + + NORMAL, + DASHED, + BOLD + } + private InputSlot inputSlot; + private OutputSlot outputSlot; + private Source source; + private Color color; + private ConnectionStyle style; + private List controlPoints; + + protected Connection(InputSlot inputSlot, OutputSlot outputSlot) { + this.inputSlot = inputSlot; + this.outputSlot = outputSlot; + this.inputSlot.connections.add(this); + this.outputSlot.connections.add(this); + controlPoints = new ArrayList(); + Figure sourceFigure = this.outputSlot.getFigure(); + Figure destFigure = this.inputSlot.getFigure(); + sourceFigure.addSuccessor(destFigure); + destFigure.addPredecessor(sourceFigure); + source = new Source(); + + this.color = Color.BLACK; + this.style = ConnectionStyle.NORMAL; + } + + public InputSlot getInputSlot() { + return inputSlot; + } + + public OutputSlot getOutputSlot() { + return outputSlot; + } + + public Color getColor() { + return color; + } + + public ConnectionStyle getStyle() { + return style; + } + + public void setColor(Color c) { + color = c; + } + + public void setStyle(ConnectionStyle s) { + style = s; + } + + public Source getSource() { + return source; + } + + public void remove() { + inputSlot.getFigure().removePredecessor(outputSlot.getFigure()); + inputSlot.connections.remove(this); + outputSlot.getFigure().removeSuccessor(inputSlot.getFigure()); + outputSlot.connections.remove(this); + } + + @Override + public String toString() { + return "Connection(" + getFrom().getVertex() + " to " + getTo().getVertex() + ")"; + } + + public Port getFrom() { + return outputSlot; + } + + public Port getTo() { + return inputSlot; + } + + public List getControlPoints() { + return controlPoints; + } + + public void setControlPoints(List list) { + controlPoints = list; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Diagram.java 2009-08-01 04:19:17.230683296 +0100 @@ -0,0 +1,294 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputEdge; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.data.Properties; +import java.awt.Font; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class Diagram { + + private List
figures; + private Map blocks; + private InputGraph graph; + private int curId; + private String nodeText; + private Font font; + + public Font getFont() { + return font; + } + + private Diagram() { + figures = new ArrayList
(); + blocks = new HashMap(); + this.nodeText = ""; + this.font = new Font("Serif", Font.PLAIN, 14); + } + + public Block getBlock(InputBlock b) { + return blocks.get(b); + } + + public String getNodeText() { + return nodeText; + } + + public void schedule(Collection newBlocks) { + graph.schedule(newBlocks); + updateBlocks(); + } + + private void updateBlocks() { + blocks.clear(); + for (InputBlock b : graph.getBlocks()) { + Block curBlock = new Block(b, this); + blocks.put(b, curBlock); + } + } + + public Diagram getNext() { + return Diagram.createDiagram(graph.getNext(), nodeText); + } + + public Collection getBlocks() { + return Collections.unmodifiableCollection(blocks.values()); + } + + public Diagram getPrev() { + return Diagram.createDiagram(graph.getPrev(), nodeText); + } + + public List
getFigures() { + return Collections.unmodifiableList(figures); + } + + public Figure createFigure() { + Figure f = new Figure(this, curId); + curId++; + this.figures.add(f); + return f; + } + + public Connection createConnection(InputSlot inputSlot, OutputSlot outputSlot) { + assert inputSlot.getFigure().getDiagram() == this; + assert outputSlot.getFigure().getDiagram() == this; + return new Connection(inputSlot, outputSlot); + } + + public static Diagram createDiagram(InputGraph graph, String nodeText) { + if (graph == null) { + return null; + } + + Diagram d = new Diagram(); + d.graph = graph; + d.nodeText = nodeText; + + d.updateBlocks(); + + Collection nodes = graph.getNodes(); + HashMap figureHash = new HashMap(); + for (InputNode n : nodes) { + Figure f = d.createFigure(); + f.getSource().addSourceNode(n); + f.getProperties().add(n.getProperties()); + figureHash.put(n.getId(), f); + } + + for (InputEdge e : graph.getEdges()) { + + int from = e.getFrom(); + int to = e.getTo(); + Figure fromFigure = figureHash.get(from); + Figure toFigure = figureHash.get(to); + assert fromFigure != null && toFigure != null; + + int toIndex = e.getToIndex(); + + while (fromFigure.getOutputSlots().size() <= 0) { + fromFigure.createOutputSlot(); + } + + OutputSlot outputSlot = fromFigure.getOutputSlots().get(0); + + while (toFigure.getInputSlots().size() <= toIndex) { + toFigure.createInputSlot(); + } + + InputSlot inputSlot = toFigure.getInputSlots().get(toIndex); + + Connection c = d.createConnection(inputSlot, outputSlot); + + if (e.getState() == InputEdge.State.NEW) { + c.setStyle(Connection.ConnectionStyle.BOLD); + } else if (e.getState() == InputEdge.State.DELETED) { + c.setStyle(Connection.ConnectionStyle.DASHED); + } + } + + + return d; + } + + public void removeAllFigures(Set
figuresToRemove) { + for (Figure f : figuresToRemove) { + freeFigure(f); + } + + ArrayList
newFigures = new ArrayList
(); + for (Figure f : this.figures) { + if (!figuresToRemove.contains(f)) { + newFigures.add(f); + } + } + figures = newFigures; + } + + private void freeFigure(Figure succ) { + + List inputSlots = new ArrayList(succ.getInputSlots()); + for (InputSlot s : inputSlots) { + succ.removeInputSlot(s); + } + + List outputSlots = new ArrayList(succ.getOutputSlots()); + for (OutputSlot s : outputSlots) { + succ.removeOutputSlot(s); + } + + assert succ.getInputSlots().size() == 0; + assert succ.getOutputSlots().size() == 0; + assert succ.getPredecessors().size() == 0; + assert succ.getSuccessors().size() == 0; + + } + + public void removeFigure(Figure succ) { + + assert this.figures.contains(succ); + freeFigure(succ); + this.figures.remove(succ); + } + + public String getName() { + return graph.getName(); + } + + public InputGraph getGraph() { + return graph; + } + + public Set getConnections() { + + Set connections = new HashSet(); + for (Figure f : figures) { + + for (InputSlot s : f.getInputSlots()) { + connections.addAll(s.getConnections()); + } + } + + return connections; + } + + public Figure getRootFigure() { + Properties.PropertySelector
selector = new Properties.PropertySelector
(figures); + Figure root = selector.selectSingle("name", "Root"); + if (root == null) { + root = selector.selectSingle("name", "Start"); + } + if (root == null) { + List
rootFigures = getRootFigures(); + if (rootFigures.size() > 0) { + root = rootFigures.get(0); + } else if (figures.size() > 0) { + root = figures.get(0); + } + } + + return root; + } + + public void printStatistics() { + System.out.println("============================================================="); + System.out.println("Diagram statistics"); + + List
tmpFigures = getFigures(); + Set connections = getConnections(); + + System.out.println("Number of figures: " + tmpFigures.size()); + System.out.println("Number of connections: " + connections.size()); + + List
figuresSorted = new ArrayList
(tmpFigures); + Collections.sort(figuresSorted, new Comparator
() { + + public int compare(Figure a, Figure b) { + return b.getPredecessors().size() + b.getSuccessors().size() - a.getPredecessors().size() - a.getSuccessors().size(); + } + }); + + final int COUNT = 10; + int z = 0; + for (Figure f : figuresSorted) { + + z++; + int sum = f.getPredecessors().size() + f.getSuccessors().size(); + System.out.println("#" + z + ": " + f + ", predCount=" + f.getPredecessors().size() + " succCount=" + f.getSuccessors().size()); + if (sum < COUNT) { + break; + } + + } + + System.out.println("============================================================="); + } + + public List
getRootFigures() { + ArrayList
rootFigures = new ArrayList
(); + for (Figure f : figures) { + if (f.getPredecessors().size() == 0) { + rootFigures.add(f); + } + } + return rootFigures; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Figure.java 2009-08-01 04:19:17.678278225 +0100 @@ -0,0 +1,328 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import com.sun.hotspot.igv.layout.Cluster; +import com.sun.hotspot.igv.layout.Vertex; +import com.sun.hotspot.igv.data.Properties; +import java.awt.Color; +import java.awt.Dimension; +import java.awt.FontMetrics; +import java.awt.Graphics; +import java.awt.Point; +import java.awt.image.BufferedImage; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class Figure extends Properties.Entity implements Source.Provider, Vertex { + + public static final int INSET = 6; + public static final int SLOT_WIDTH = 10; + public static final int SLOT_START = 3; + public static final boolean VERTICAL_LAYOUT = true; + protected List inputSlots; + protected List outputSlots; + private Source source; + private Diagram diagram; + private Point position; + private List
predecessors; + private List
successors; + private Color color; + private int id; + private String idString; + private String[] lines; + private int heightCash = -1; + private int widthCash = -1; + + public int getHeight() { + if (heightCash == -1) { + BufferedImage image = new BufferedImage(1, 1, BufferedImage.TYPE_INT_RGB); + Graphics g = image.getGraphics(); + g.setFont(diagram.getFont()); + FontMetrics metrics = g.getFontMetrics(); + String nodeText = diagram.getNodeText(); + heightCash = nodeText.split("\n").length * metrics.getHeight() + INSET; + } + return heightCash; + } + + public int getWidth() { + if (widthCash == -1) { + int max = 0; + BufferedImage image = new BufferedImage(1, 1, BufferedImage.TYPE_INT_RGB); + Graphics g = image.getGraphics(); + g.setFont(diagram.getFont()); + FontMetrics metrics = g.getFontMetrics(); + for (String s : lines) { + int cur = metrics.stringWidth(s); + if (cur > max) { + max = cur; + } + } + widthCash = max + INSET; + } + return widthCash; + } + + protected Figure(Diagram diagram, int id) { + this.diagram = diagram; + this.source = new Source(); + inputSlots = new ArrayList(5); + outputSlots = new ArrayList(1); + predecessors = new ArrayList
(6); + successors = new ArrayList
(6); + this.id = id; + idString = Integer.toString(id); + + this.position = new Point(0, 0); + this.color = Color.WHITE; + } + + public int getId() { + return id; + } + + public void setColor(Color color) { + this.color = color; + } + + public Color getColor() { + return color; + } + + public List
getPredecessors() { + return Collections.unmodifiableList(predecessors); + } + + public Set
getPredecessorSet() { + Set
result = new HashSet
(); + for (Figure f : getPredecessors()) { + result.add(f); + } + return Collections.unmodifiableSet(result); + } + + public Set
getSuccessorSet() { + Set
result = new HashSet
(); + for (Figure f : getSuccessors()) { + result.add(f); + } + return Collections.unmodifiableSet(result); + } + + public List
getSuccessors() { + return Collections.unmodifiableList(successors); + } + + protected void addPredecessor(Figure f) { + this.predecessors.add(f); + } + + protected void addSuccessor(Figure f) { + this.successors.add(f); + } + + protected void removePredecessor(Figure f) { + assert predecessors.contains(f); + predecessors.remove(f); + } + + protected void removeSuccessor(Figure f) { + assert successors.contains(f); + successors.remove(f); + } + + public void setPosition(Point p) { + this.position = p; + } + + public Point getPosition() { + return position; + } + + public Diagram getDiagram() { + return diagram; + } + + public Source getSource() { + return source; + } + + public InputSlot createInputSlot() { + InputSlot slot = new InputSlot(this, -1); + inputSlots.add(slot); + return slot; + } + + public InputSlot createInputSlot(int index) { + InputSlot slot = new InputSlot(this, index); + inputSlots.add(slot); + Collections.sort(inputSlots, Slot.slotIndexComparator); + return slot; + } + + public void removeSlot(Slot s) { + + assert inputSlots.contains(s) || outputSlots.contains(s); + + List connections = new ArrayList(s.getConnections()); + for (Connection c : connections) { + c.remove(); + } + + if (inputSlots.contains(s)) { + inputSlots.remove(s); + } else if (outputSlots.contains(s)) { + outputSlots.remove(s); + } + } + + public OutputSlot createOutputSlot() { + OutputSlot slot = new OutputSlot(this, -1); + outputSlots.add(slot); + return slot; + } + + public OutputSlot createOutputSlot(int index) { + OutputSlot slot = new OutputSlot(this, index); + outputSlots.add(slot); + Collections.sort(outputSlots, Slot.slotIndexComparator); + return slot; + } + + public List getInputSlots() { + return Collections.unmodifiableList(inputSlots); + } + + public List getOutputSlots() { + return Collections.unmodifiableList(outputSlots); + } + + void removeInputSlot(InputSlot s) { + s.removeAllConnections(); + inputSlots.remove(s); + } + + void removeOutputSlot(OutputSlot s) { + s.removeAllConnections(); + outputSlots.remove(s); + } + + public String[] getLines() { + if (lines == null) { + updateLines(); + } + return lines; + } + + public void updateLines() { + String[] strings = diagram.getNodeText().split("\n"); + String[] result = new String[strings.length]; + + for (int i = 0; i < strings.length; i++) { + result[i] = resolveString(strings[i]); + } + + lines = result; + } + + private String resolveString(String string) { + + StringBuilder sb = new StringBuilder(); + boolean inBrackets = false; + StringBuilder curIdent = new StringBuilder(); + + for (int i = 0; i < string.length(); i++) { + char c = string.charAt(i); + if (inBrackets) { + if (c == ']') { + String value = getProperties().get(curIdent.toString()); + if (value == null) { + value = ""; + } + sb.append(value); + inBrackets = false; + } else { + curIdent.append(c); + } + } else { + if (c == '[') { + inBrackets = true; + curIdent = new StringBuilder(); + } else { + sb.append(c); + } + } + } + + return sb.toString(); + } + + public Dimension getSize() { + if (VERTICAL_LAYOUT) { + int width = Math.max(getWidth(), Figure.SLOT_WIDTH * (Math.max(inputSlots.size(), outputSlots.size()) + 1)); + int height = getHeight() + 2 * Figure.SLOT_WIDTH; + return new Dimension(width, height); + } else { + int width = getWidth() + 2 * Figure.SLOT_WIDTH; + int height = Figure.SLOT_WIDTH * (Math.max(inputSlots.size(), outputSlots.size()) + 1); + return new Dimension(width, height); + } + } + + @Override + public String toString() { + return idString; + } + + public Cluster getCluster() { + if (getSource().getSourceNodes().size() == 0) { + assert false : "Should never reach here, every figure must have at least one source node!"; + return null; + } else { + Cluster result = diagram.getBlock(diagram.getGraph().getBlock(getSource().getSourceNodes().get(0))); + assert result != null; + return result; + } + } + + public boolean isRoot() { + if (source.getSourceNodes().size() > 0 && source.getSourceNodes().get(0).getProperties().get("name").equals("Root")) { + return true; + } else { + return false; + } + } + + public int compareTo(Vertex f) { + return toString().compareTo(f.toString()); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/InputSlot.java 2009-08-01 04:19:18.111619702 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.awt.Point; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputSlot extends Slot { + + protected InputSlot(Figure figure, int wantedIndex) { + super(figure, wantedIndex); + } + + public int getPosition() { + return getFigure().getInputSlots().indexOf(this); + } + + public void setPosition(int position) { + List inputSlots = getFigure().inputSlots; + InputSlot s = inputSlots.remove(position); + inputSlots.add(position, s); + } + + public Point getRelativePosition() { + return new Point(getFigure().getWidth() * (getPosition() + 1) / (getFigure().getInputSlots().size() + 1), Figure.SLOT_WIDTH - Figure.SLOT_START); + } + + @Override + public String toString() { + return "InputSlot[figure=" + this.getFigure().toString() + ", position=" + getPosition() + "]"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/InvertSelector.java 2009-08-01 04:19:18.553584045 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class InvertSelector implements Selector { + + private Selector selector; + + public InvertSelector(Selector selector) { + this.selector = selector; + } + + public List
selected(Diagram d) { + + List
result = new ArrayList
(); + List
otherResult = selector.selected(d); + for (Figure f : d.getFigures()) { + if (!otherResult.contains(f)) { + result.add(f); + } + } + + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/MatcherSelector.java 2009-08-01 04:19:18.963630687 +0100 @@ -0,0 +1,47 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Properties.PropertyMatcher; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class MatcherSelector implements Selector { + + private PropertyMatcher matcher; + + public MatcherSelector(PropertyMatcher matcher) { + this.matcher = matcher; + } + + public List
selected(Diagram d) { + Properties.PropertySelector
selector = new Properties.PropertySelector
(d.getFigures()); + List
list = selector.selectMultiple(matcher); + return list; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/OrSelector.java 2009-08-01 04:19:19.397416328 +0100 @@ -0,0 +1,56 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class OrSelector implements Selector { + + private Selector selector1; + private Selector selector2; + + /** Creates a new instance of OrSelector */ + public OrSelector(Selector s1, Selector s2) { + this.selector1 = s1; + this.selector2 = s2; + } + + public List
selected(Diagram d) { + + List
l1 = selector1.selected(d); + List
l2 = selector2.selected(d); + + for (Figure f : l2) { + if (!l1.contains(f)) { + l1.add(f); + } + } + + return l1; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/OutputSlot.java 2009-08-01 04:19:19.822989270 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.awt.Point; + +/** + * + * @author Thomas Wuerthinger + */ +public class OutputSlot extends Slot { + + protected OutputSlot(Figure figure, int wantedIndex) { + super(figure, wantedIndex); + } + + public int getPosition() { + return getFigure().getOutputSlots().indexOf(this); + } + + public void setPosition(int position) { + OutputSlot s = getFigure().outputSlots.remove(position); + getFigure().outputSlots.add(position, s); + } + + public Point getRelativePosition() { + return new Point(getFigure().getWidth() * (getPosition() + 1) / (getFigure().getOutputSlots().size() + 1), getFigure().getSize().height - Figure.SLOT_WIDTH + Figure.SLOT_START); + } + + @Override + public String toString() { + return "OutputSlot[figure=" + this.getFigure().toString() + ", position=" + getPosition() + "]"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/PredecessorSelector.java 2009-08-01 04:19:20.248770149 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class PredecessorSelector implements Selector { + + private Selector innerSelector; + + public PredecessorSelector(Selector innerSelector) { + this.innerSelector = innerSelector; + } + + public List
selected(Diagram d) { + List
inner = innerSelector.selected(d); + List
result = new ArrayList
(); + for (Figure f : d.getFigures()) { + boolean saved = false; + for (Figure f2 : f.getSuccessors()) { + if (inner.contains(f2)) { + saved = true; + } + } + + if (saved) { + result.add(f); + } + } + + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Selector.java 2009-08-01 04:19:20.674234164 +0100 @@ -0,0 +1,35 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public interface Selector { + + List
selected(Diagram d); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Slot.java 2009-08-01 04:19:21.082218706 +0100 @@ -0,0 +1,125 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Comparator; + +/** + * + * @author Thomas Wuerthinger + */ +public abstract class Slot implements Port, Source.Provider { + + private int wantedIndex; + private String name; + private String shortName; // 1 - 2 characters + private Source source; + protected List connections; + private Figure figure; + + protected Slot(Figure figure, int wantedIndex) { + this.figure = figure; + connections = new ArrayList(2); + source = new Source(); + this.wantedIndex = wantedIndex; + name = ""; + shortName = ""; + assert figure != null; + } + public static final Comparator slotIndexComparator = new Comparator() { + + public int compare(Slot o1, Slot o2) { + return o1.wantedIndex - o2.wantedIndex; + } + }; + public static final Comparator slotFigureComparator = new Comparator() { + + public int compare(Slot o1, Slot o2) { + return o1.figure.getId() - o2.figure.getId(); + } + }; + + public int getWantedIndex() { + return wantedIndex; + } + + public Source getSource() { + return source; + } + + public String getName() { + return name; + } + + public void setShortName(String s) { + assert s != null; + assert s.length() <= 2; + this.shortName = s; + + } + + public String getShortName() { + return shortName; + } + + public boolean getShowName() { + return getShortName() != null && getShortName().length() > 0; + } + + public void setName(String s) { + if (s == null) { + s = ""; + } + this.name = s; + } + + public Figure getFigure() { + assert figure != null; + return figure; + } + + public List getConnections() { + return Collections.unmodifiableList(connections); + } + + public void removeAllConnections() { + List connectionsCopy = new ArrayList(this.connections); + for (Connection c : connectionsCopy) { + c.remove(); + } + } + + public Vertex getVertex() { + return figure; + } + + public abstract int getPosition(); + + public abstract void setPosition(int position); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/Source.java 2009-08-01 04:19:21.510982862 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputNode; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class Source { + + private List sourceNodes; + private Set set; + + public Source() { + sourceNodes = new ArrayList(1); + } + + public List getSourceNodes() { + return Collections.unmodifiableList(sourceNodes); + } + + public Set getSourceNodesAsSet() { + if (set == null) { + set = new HashSet(); + for (InputNode n : sourceNodes) { + int id = n.getId(); + //if(id < 0) id = -id; + set.add(id); + } + } + return set; + } + + public void addSourceNode(InputNode n) { + sourceNodes.add(n); + set = null; + } + + public void removeSourceNode(InputNode n) { + sourceNodes.remove(n); + set = null; + } + + public interface Provider { + + public Source getSource(); + } + + public void setSourceNodes(List sourceNodes) { + this.sourceNodes = sourceNodes; + set = null; + } + + public void addSourceNodes(Source s) { + for (InputNode n : s.getSourceNodes()) { + sourceNodes.add(n); + } + set = null; + } + + public boolean isInBlock(InputGraph g, InputBlock blockNode) { + + for (InputNode n : this.getSourceNodes()) { + if (g.getBlock(n) == blockNode) { + return true; + } + } + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Graph/src/com/sun/hotspot/igv/graph/SuccessorSelector.java 2009-08-01 04:19:21.938948919 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.graph; + +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class SuccessorSelector implements Selector { + + private Selector innerSelector; + + public SuccessorSelector(Selector innerSelector) { + this.innerSelector = innerSelector; + } + + public List
selected(Diagram d) { + List
inner = innerSelector.selected(d); + List
result = new ArrayList
(); + for (Figure f : d.getFigures()) { + boolean saved = false; + for (Figure f2 : f.getPredecessors()) { + if (inner.contains(f2)) { + saved = true; + } + } + + if (saved) { + result.add(f); + } + } + + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/build.xml 2009-08-01 04:19:22.389015443 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.hierarchicallayout. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/manifest.mf 2009-08-01 04:19:22.790265133 +0100 @@ -0,0 +1,5 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.hierarchicallayout +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/hierarchicallayout/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/nbproject/build-impl.xml 2009-08-01 04:19:23.216763298 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/nbproject/genfiles.properties 2009-08-01 04:19:23.619621765 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=de087df9 +build.xml.script.CRC32=98977c36 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=de087df9 +nbproject/build-impl.xml.script.CRC32=0d734625 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/nbproject/platform.properties 2009-08-01 04:19:24.045227736 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/nbproject/project.properties 2009-08-01 04:19:24.445900096 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/nbproject/project.xml 2009-08-01 04:19:24.833390529 +0100 @@ -0,0 +1,23 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.hierarchicallayout + + + + com.sun.hotspot.igv.layout + + + + 1.0 + + + + + com.sun.hotspot.igv.hierarchicallayout + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/nbproject/suite.properties 2009-08-01 04:19:25.234008628 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/Bundle.properties 2009-08-01 04:19:25.798483078 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=HierarchicalLayout --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/ClusterEdge.java 2009-08-01 04:19:26.208328847 +0100 @@ -0,0 +1,63 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import java.awt.Point; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class ClusterEdge implements Link { + + private ClusterNode from; + private ClusterNode to; + private List points; + + public ClusterEdge(ClusterNode from, ClusterNode to) { + assert from != null; + assert to != null; + this.from = from; + this.to = to; + } + + public Port getTo() { + return to.getInputSlot(); + } + + public Port getFrom() { + return from.getInputSlot(); + } + + public void setControlPoints(List p) { + this.points = p; + } + + public List getControlPoints() { + return points; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/ClusterIngoingConnection.java 2009-08-01 04:19:26.638599694 +0100 @@ -0,0 +1,76 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import java.awt.Point; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class ClusterIngoingConnection implements Link { + + private List controlPoints; + private ClusterInputSlotNode inputSlotNode; + private Link connection; + private Port inputSlot; + private Port outputSlot; + + public ClusterIngoingConnection(ClusterInputSlotNode inputSlotNode, Link c) { + this.inputSlotNode = inputSlotNode; + this.connection = c; + this.controlPoints = new ArrayList(); + + inputSlot = c.getTo(); + outputSlot = inputSlotNode.getOutputSlot(); + } + + public Link getConnection() { + return connection; + } + + public ClusterInputSlotNode getInputSlotNode() { + return inputSlotNode; + } + + public Port getTo() { + return inputSlot; + } + + public Port getFrom() { + return outputSlot; + } + + public void setControlPoints(List p) { + this.controlPoints = p; + } + + public List getControlPoints() { + return controlPoints; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/ClusterInputSlotNode.java 2009-08-01 04:19:27.054048715 +0100 @@ -0,0 +1,145 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.Cluster; +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; +import java.awt.Dimension; +import java.awt.Point; + +/** + * + * @author Thomas Wuerthinger + */ +public class ClusterInputSlotNode implements Vertex { + + private final int SIZE = 0; + private Point position; + private Port inputSlot; + private Port outputSlot; + private ClusterNode blockNode; + private InterClusterConnection interBlockConnection; + private Cluster cluster; + private ClusterIngoingConnection conn; + + public void setIngoingConnection(ClusterIngoingConnection c) { + conn = c; + } + + public ClusterIngoingConnection getIngoingConnection() { + return conn; + } + private String id; + + @Override + public String toString() { + return id; + } + + public ClusterInputSlotNode(ClusterNode n, String id) { + this.blockNode = n; + this.id = id; + + n.addSubNode(this); + + final Vertex thisNode = this; + final ClusterNode thisBlockNode = blockNode; + + outputSlot = new Port() { + + public Point getRelativePosition() { + return new Point(0, 0); + } + + public Vertex getVertex() { + return thisNode; + } + + @Override + public String toString() { + return "OutPort of " + thisNode.toString(); + } + }; + + inputSlot = new Port() { + + public Point getRelativePosition() { + Point p = new Point(thisNode.getPosition()); + p.x += ClusterNode.BORDER; + p.y = 0; + return p; + } + + public Vertex getVertex() { + return thisBlockNode; + } + + @Override + public String toString() { + return "InPort of " + thisNode.toString(); + } + }; + } + + public Port getInputSlot() { + return inputSlot; + } + + public InterClusterConnection getInterBlockConnection() { + return interBlockConnection; + } + + public Port getOutputSlot() { + return outputSlot; + } + + public Dimension getSize() { + return new Dimension(SIZE, SIZE); + } + + public void setPosition(Point p) { + this.position = p; + } + + public Point getPosition() { + return position; + } + + public void setInterBlockConnection(InterClusterConnection interBlockConnection) { + this.interBlockConnection = interBlockConnection; + } + + public Cluster getCluster() { + return cluster; + } + + public boolean isRoot() { + return true; + } + + public int compareTo(Vertex o) { + return toString().compareTo(o.toString()); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/ClusterNode.java 2009-08-01 04:19:27.511243851 +0100 @@ -0,0 +1,233 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.Cluster; +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; +import java.awt.Dimension; +import java.awt.Point; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class ClusterNode implements Vertex { + + private Cluster cluster; + private Port inputSlot; + private Port outputSlot; + private Set subNodes; + private Dimension size; + private Point position; + private Set subEdges; + private boolean dirty; + private boolean root; + private String name; + public static final int BORDER = 20; + + public ClusterNode(Cluster cluster, String name) { + this.subNodes = new HashSet(); + this.subEdges = new HashSet(); + this.cluster = cluster; + position = new Point(0, 0); + this.name = name; + } + + public void addSubNode(Vertex v) { + subNodes.add(v); + } + + public void addSubEdge(Link l) { + subEdges.add(l); + } + + public Set getSubEdges() { + return Collections.unmodifiableSet(subEdges); + } + + public void updateSize() { + + + calculateSize(); + + final ClusterNode widget = this; + inputSlot = new Port() { + + public Point getRelativePosition() { + return new Point(size.width / 2, 0); + } + + public Vertex getVertex() { + return widget; + } + }; + + outputSlot = new Port() { + + public Point getRelativePosition() { + return new Point(size.width / 2, size.height); + } + + public Vertex getVertex() { + return widget; + } + }; + } + + private void calculateSize() { + + if (subNodes.size() == 0) { + size = new Dimension(0, 0); + } + + int minX = Integer.MAX_VALUE; + int maxX = Integer.MIN_VALUE; + int minY = Integer.MAX_VALUE; + int maxY = Integer.MIN_VALUE; + + + for (Vertex n : subNodes) { + Point p = n.getPosition(); + minX = Math.min(minX, p.x); + minY = Math.min(minY, p.y); + maxX = Math.max(maxX, p.x + n.getSize().width); + maxY = Math.max(maxY, p.y + n.getSize().height); + } + + for (Link l : subEdges) { + List points = l.getControlPoints(); + for (Point p : points) { + if (p != null) { + minX = Math.min(minX, p.x); + maxX = Math.max(maxX, p.x); + minY = Math.min(minY, p.y); + maxY = Math.max(maxY, p.y); + } + } + } + + size = new Dimension(maxX - minX, maxY - minY); + + // Normalize coordinates + for (Vertex n : subNodes) { + n.setPosition(new Point(n.getPosition().x - minX, n.getPosition().y - minY)); + } + + for (Link l : subEdges) { + List points = new ArrayList(l.getControlPoints()); + for (Point p : points) { + p.x -= minX; + p.y -= minY; + } + l.setControlPoints(points); + + } + + size.width += 2 * BORDER; + size.height += 2 * BORDER; + } + + public Port getInputSlot() { + return inputSlot; + + } + + public Port getOutputSlot() { + return outputSlot; + } + + public Dimension getSize() { + return size; + } + + public Point getPosition() { + return position; + } + + public void setPosition(Point pos) { + + this.position = pos; + for (Vertex n : subNodes) { + Point cur = new Point(n.getPosition()); + cur.translate(pos.x + BORDER, pos.y + BORDER); + n.setPosition(cur); + } + + for (Link e : subEdges) { + List arr = e.getControlPoints(); + ArrayList newArr = new ArrayList(); + for (Point p : arr) { + if (p != null) { + Point p2 = new Point(p); + p2.translate(pos.x + BORDER, pos.y + BORDER); + newArr.add(p2); + } else { + newArr.add(null); + } + } + + e.setControlPoints(newArr); + } + } + + public Cluster getCluster() { + return cluster; + } + + public void setCluster(Cluster c) { + cluster = c; + } + + public void setDirty(boolean b) { + dirty = b; + } + + public void setRoot(boolean b) { + root = b; + } + + public boolean isRoot() { + return root; + } + + public int compareTo(Vertex o) { + return toString().compareTo(o.toString()); + } + + @Override + public String toString() { + return name; + } + + public Set getSubNodes() { + return subNodes; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/ClusterOutgoingConnection.java 2009-08-01 04:19:28.039408536 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import java.awt.Point; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class ClusterOutgoingConnection implements Link { + + private List intermediatePoints; + private ClusterOutputSlotNode outputSlotNode; + private Link connection; + private Port inputSlot; + private Port outputSlot; + + public ClusterOutgoingConnection(ClusterOutputSlotNode outputSlotNode, Link c) { + this.outputSlotNode = outputSlotNode; + this.connection = c; + this.intermediatePoints = new ArrayList(); + + outputSlot = c.getFrom(); + inputSlot = outputSlotNode.getInputSlot(); + } + + public Port getTo() { + return inputSlot; + } + + public Port getFrom() { + return outputSlot; + } + + public void setControlPoints(List p) { + this.intermediatePoints = p; + } + + public List getControlPoints() { + return intermediatePoints; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/ClusterOutputSlotNode.java 2009-08-01 04:19:28.472909721 +0100 @@ -0,0 +1,145 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.Cluster; +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; +import java.awt.Dimension; +import java.awt.Point; + +/** + * + * @author Thomas Wuerthinger + */ +public class ClusterOutputSlotNode implements Vertex { + + private final int SIZE = 0; + private Point position; + private Port inputSlot; + private Port outputSlot; + private ClusterNode blockNode; + private boolean root; + private Cluster cluster; + private ClusterOutgoingConnection conn; + private String id; + + public void setOutgoingConnection(ClusterOutgoingConnection c) { + this.conn = c; + } + + public ClusterOutgoingConnection getOutgoingConnection() { + return conn; + } + + @Override + public String toString() { + return id; + } + + public ClusterOutputSlotNode(ClusterNode n, String id) { + this.blockNode = n; + this.id = id; + + n.addSubNode(this); + + final Vertex thisNode = this; + final ClusterNode thisBlockNode = blockNode; + + inputSlot = new Port() { + + public Point getRelativePosition() { + return new Point(0, 0); + } + + public Vertex getVertex() { + return thisNode; + } + + @Override + public String toString() { + return "InPort of " + thisNode.toString(); + } + }; + + outputSlot = new Port() { + + public Point getRelativePosition() { + Point p = new Point(thisNode.getPosition()); + p.x += ClusterNode.BORDER; + p.y = thisBlockNode.getSize().height; + return p; + } + + public Vertex getVertex() { + return thisBlockNode; + } + + @Override + public String toString() { + return "OutPort of " + thisNode.toString(); + } + }; + } + + public Dimension getSize() { + return new Dimension(SIZE, SIZE); + } + + public void setPosition(Point p) { + this.position = p; + } + + public Point getPosition() { + return position; + } + + public Port getInputSlot() { + return inputSlot; + } + + public Port getOutputSlot() { + return outputSlot; + } + + public void setCluster(Cluster c) { + cluster = c; + } + + public void setRoot(boolean b) { + root = b; + } + + public Cluster getCluster() { + return cluster; + } + + public boolean isRoot() { + return root; + } + + public int compareTo(Vertex o) { + return toString().compareTo(o.toString()); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/Edge.java 2009-08-01 04:19:28.916462089 +0100 @@ -0,0 +1,89 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +/** + * + * @author Thomas Wuerthinger + */ +public class Edge { + + private E data; + private Node source; + private Node dest; + + protected Edge(Graph graph, Node source, Node dest, E data) { + setData(data); + this.source = source; + this.dest = dest; + assert source != null; + assert dest != null; + assert source.getGraph() == dest.getGraph(); + assert source.getGraph() != null; + assert dest.getGraph() != null; + } + + public Node getSource() { + return source; + } + + public Node getDest() { + return dest; + } + + public E getData() { + return data; + } + + public void setData(E e) { + data = e; + } + + public void remove() { + source.getGraph().removeEdge(this, null); + } + + public boolean isSelfLoop() { + return source == dest; + } + + public void reverse() { + + // Remove from current source / dest + source.removeOutEdge(this); + dest.removeInEdge(this); + + Node tmp = source; + source = dest; + dest = tmp; + + // Add to new source / dest + source.addOutEdge(this); + dest.addInEdge(this); + } + + public String toString() { + return "Edge (" + source + " -- " + dest + "): " + data; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/Graph.java 2009-08-01 04:19:29.333005549 +0100 @@ -0,0 +1,298 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +/** + * + * @author Thomas Wuerthinger + */ +public class Graph { + + private HashMap> nodes; + private HashMap> edges; + private List> nodeList; + + public Graph() { + nodes = new HashMap>(); + edges = new HashMap>(); + nodeList = new ArrayList>(); + } + + public Node createNode(N data, Object key) { + Node n = new Node(this, data); + assert key == null || !nodes.containsKey(key); + if (key != null) { + nodes.put(key, n); + } + nodeList.add(n); + return n; + } + + public Edge createEdge(Node source, Node dest, E data, Object key) { + Edge e = new Edge(this, source, dest, data); + source.addOutEdge(e); + dest.addInEdge(e); + if (key != null) { + edges.put(key, e); + } + return e; + } + + public Node getNode(Object key) { + return nodes.get(key); + } + + public Edge getEdge(Object key) { + return edges.get(key); + } + + public Collection> getEdges() { + return Collections.unmodifiableCollection(edges.values()); + } + + public Collection> getNodes() { + return Collections.unmodifiableList(nodeList); + } + + public void removeEdge(Edge e, Object key) { + assert key == null || edges.containsKey(key); + if (key != null) { + edges.remove(key); + } + e.getSource().removeOutEdge(e); + e.getDest().removeInEdge(e); + } + + public class DFSTraversalVisitor { + + public void visitNode(Node n) { + } + + public boolean visitEdge(Edge e, boolean backEdge) { + return true; + } + } + + public class BFSTraversalVisitor { + + public void visitNode(Node n, int depth) { + } + } + + public List> getNodesWithInDegree(int x) { + return getNodesWithInDegree(x, true); + } + + public List> getNodesWithInDegree(int x, boolean countSelfLoops) { + + List> result = new ArrayList>(); + for (Node n : getNodes()) { + if (n.getInDegree(countSelfLoops) == x) { + result.add(n); + } + } + + return result; + + } + + private void markReachable(Node startingNode) { + ArrayList> arr = new ArrayList>(); + arr.add(startingNode); + for (Node n : getNodes()) { + n.setReachable(false); + } + traverseDFS(arr, new DFSTraversalVisitor() { + + @Override + public void visitNode(Node n) { + n.setReachable(true); + } + }); + } + + public void traverseBFS(Node startingNode, BFSTraversalVisitor tv, boolean longestPath) { + + if (longestPath) { + markReachable(startingNode); + } + + for (Node n : getNodes()) { + n.setVisited(false); + n.setActive(false); + } + + Queue> queue = new LinkedList>(); + queue.add(startingNode); + startingNode.setVisited(true); + int layer = 0; + Node lastOfLayer = startingNode; + Node lastAdded = null; + + while (!queue.isEmpty()) { + + Node current = queue.poll(); + tv.visitNode(current, layer); + current.setActive(false); + + + for (Edge e : current.getOutEdges()) { + if (!e.getDest().isVisited()) { + + boolean allow = true; + if (longestPath) { + for (Node pred : e.getDest().getPredecessors()) { + if ((!pred.isVisited() || pred.isActive()) && pred.isReachable()) { + allow = false; + break; + } + } + } + + if (allow) { + queue.offer(e.getDest()); + lastAdded = e.getDest(); + e.getDest().setVisited(true); + e.getDest().setActive(true); + } + } + } + + if (current == lastOfLayer && !queue.isEmpty()) { + lastOfLayer = lastAdded; + layer++; + } + } + } + + public void traverseDFS(DFSTraversalVisitor tv) { + traverseDFS(getNodes(), tv); + } + + public void traverseDFS(Collection> startingNodes, DFSTraversalVisitor tv) { + + for (Node n : getNodes()) { + n.setVisited(false); + n.setActive(false); + } + + boolean result = false; + for (Node n : startingNodes) { + traverse(tv, n); + } + } + + private void traverse(DFSTraversalVisitor tv, Node n) { + + if (!n.isVisited()) { + n.setVisited(true); + n.setActive(true); + tv.visitNode(n); + + for (Edge e : n.getOutEdges()) { + + Node next = e.getDest(); + if (next.isActive()) { + tv.visitEdge(e, true); + } else { + if (tv.visitEdge(e, false)) { + traverse(tv, next); + } + } + } + + n.setActive(false); + } + + } + + public boolean hasCycles() { + + for (Node n : getNodes()) { + n.setVisited(false); + n.setActive(false); + } + + boolean result = false; + for (Node n : getNodes()) { + result |= checkCycles(n); + if (result) { + break; + } + } + return result; + } + + private boolean checkCycles(Node n) { + + if (n.isActive()) { + return true; + } + + if (!n.isVisited()) { + + n.setVisited(true); + n.setActive(true); + + for (Node succ : n.getSuccessors()) { + if (checkCycles(succ)) { + return true; + } + } + + n.setActive(false); + + } + + return false; + } + + @Override + public String toString() { + + StringBuilder s = new StringBuilder(); + s.append("Nodes: "); + for (Node n : getNodes()) { + s.append(n.toString()); + s.append("\n"); + } + + s.append("Edges: "); + + for (Edge e : getEdges()) { + s.append(e.toString()); + s.append("\n"); + } + + return s.toString(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/HierarchicalClusterLayoutManager.java 2009-08-01 04:19:29.996154816 +0100 @@ -0,0 +1,247 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import java.awt.Point; +import java.awt.Rectangle; +import java.util.HashMap; +import java.util.List; +import java.util.Set; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.TreeSet; +import com.sun.hotspot.igv.layout.Cluster; +import com.sun.hotspot.igv.layout.LayoutGraph; +import com.sun.hotspot.igv.layout.LayoutManager; +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; + +/** + * + * @author Thomas Wuerthinger + */ +public class HierarchicalClusterLayoutManager implements LayoutManager { + + private OldHierarchicalLayoutManager.Combine combine; + private LayoutManager subManager = new OldHierarchicalLayoutManager(combine); + private LayoutManager manager = new OldHierarchicalLayoutManager(combine, 150); + private static final boolean TRACE = false; + + public HierarchicalClusterLayoutManager(OldHierarchicalLayoutManager.Combine combine) { + this.combine = combine; + } + + public void doLayout(LayoutGraph graph) { + doLayout(graph, new HashSet(), new HashSet(), new HashSet()); + } + + public void setSubManager(LayoutManager manager) { + this.subManager = manager; + } + + public void setManager(LayoutManager manager) { + this.manager = manager; + } + + public void doLayout(LayoutGraph graph, Set firstLayerHint, Set lastLayerHint, Set importantLinks) { + + assert graph.verify(); + + HashMap> lists = new HashMap>(); + HashMap> listsConnection = new HashMap>(); + HashMap> clusterInputSlotHash = new HashMap>(); + HashMap> clusterOutputSlotHash = new HashMap>(); + + HashMap clusterNodes = new HashMap(); + HashMap> clusterInputSlotSet = new HashMap>(); + HashMap> clusterOutputSlotSet = new HashMap>(); + Set clusterEdges = new HashSet(); + Set interClusterEdges = new HashSet(); + HashMap linkClusterOutgoingConnection = new HashMap(); + HashMap linkInterClusterConnection = new HashMap(); + HashMap linkClusterIngoingConnection = new HashMap(); + Set clusterNodeSet = new HashSet(); + + Set cluster = graph.getClusters(); + int z = 0; + for (Cluster c : cluster) { + lists.put(c, new ArrayList()); + listsConnection.put(c, new ArrayList()); + clusterInputSlotHash.put(c, new HashMap()); + clusterOutputSlotHash.put(c, new HashMap()); + clusterOutputSlotSet.put(c, new TreeSet()); + clusterInputSlotSet.put(c, new TreeSet()); + ClusterNode cn = new ClusterNode(c, "" + z); + clusterNodes.put(c, cn); + clusterNodeSet.add(cn); + z++; + } + + // Add cluster edges + for (Cluster c : cluster) { + + ClusterNode start = clusterNodes.get(c); + + for (Cluster succ : c.getSuccessors()) { + ClusterNode end = clusterNodes.get(succ); + if (end != null && start != end) { + ClusterEdge e = new ClusterEdge(start, end); + clusterEdges.add(e); + interClusterEdges.add(e); + } + } + } + + for (Vertex v : graph.getVertices()) { + Cluster c = v.getCluster(); + assert c != null; + clusterNodes.get(c).addSubNode(v); + } + + for (Link l : graph.getLinks()) { + + Port fromPort = l.getFrom(); + Port toPort = l.getTo(); + Vertex fromVertex = fromPort.getVertex(); + Vertex toVertex = toPort.getVertex(); + Cluster fromCluster = fromVertex.getCluster(); + Cluster toCluster = toVertex.getCluster(); + + Port samePort = null; + if (combine == OldHierarchicalLayoutManager.Combine.SAME_INPUTS) { + samePort = toPort; + } else if (combine == OldHierarchicalLayoutManager.Combine.SAME_OUTPUTS) { + samePort = fromPort; + } + + assert listsConnection.containsKey(fromCluster); + assert listsConnection.containsKey(toCluster); + + if (fromCluster == toCluster) { + listsConnection.get(fromCluster).add(l); + clusterNodes.get(fromCluster).addSubEdge(l); + } else { + ClusterInputSlotNode inputSlotNode = null; + ClusterOutputSlotNode outputSlotNode = null; + + if (samePort != null) { + outputSlotNode = clusterOutputSlotHash.get(fromCluster).get(samePort); + inputSlotNode = clusterInputSlotHash.get(toCluster).get(samePort); + } + + if (outputSlotNode == null) { + outputSlotNode = new ClusterOutputSlotNode(clusterNodes.get(fromCluster), "Out " + fromCluster.toString() + " " + samePort.toString()); + clusterOutputSlotSet.get(fromCluster).add(outputSlotNode); + ClusterOutgoingConnection conn = new ClusterOutgoingConnection(outputSlotNode, l); + outputSlotNode.setOutgoingConnection(conn); + clusterNodes.get(fromCluster).addSubEdge(conn); + if (samePort != null) { + clusterOutputSlotHash.get(fromCluster).put(samePort, outputSlotNode); + } + + linkClusterOutgoingConnection.put(l, conn); + } else { + linkClusterOutgoingConnection.put(l, outputSlotNode.getOutgoingConnection()); + } + + if (inputSlotNode == null) { + inputSlotNode = new ClusterInputSlotNode(clusterNodes.get(toCluster), "In " + toCluster.toString() + " " + samePort.toString()); + clusterInputSlotSet.get(toCluster).add(inputSlotNode); + } + + ClusterIngoingConnection conn = new ClusterIngoingConnection(inputSlotNode, l); + inputSlotNode.setIngoingConnection(conn); + clusterNodes.get(toCluster).addSubEdge(conn); + if (samePort != null) { + clusterInputSlotHash.get(toCluster).put(samePort, inputSlotNode); + } + + linkClusterIngoingConnection.put(l, conn); + + + InterClusterConnection interConn = new InterClusterConnection(outputSlotNode, inputSlotNode); + linkInterClusterConnection.put(l, interConn); + clusterEdges.add(interConn); + } + } + + Timing t = null; + + if (TRACE) { + new Timing("Child timing"); + t.start(); + } + + for (Cluster c : cluster) { + ClusterNode n = clusterNodes.get(c); + subManager.doLayout(new LayoutGraph(n.getSubEdges(), n.getSubNodes()), clusterInputSlotSet.get(c), clusterOutputSlotSet.get(c), new HashSet()); + n.updateSize(); + } + + Set roots = new LayoutGraph(interClusterEdges).findRootVertices(); + for (Vertex v : roots) { + assert v instanceof ClusterNode; + ((ClusterNode) v).setRoot(true); + } + + manager.doLayout(new LayoutGraph(clusterEdges, clusterNodeSet), new HashSet(), new HashSet(), interClusterEdges); + + for (Cluster c : cluster) { + ClusterNode n = clusterNodes.get(c); + c.setBounds(new Rectangle(n.getPosition(), n.getSize())); + } + + // TODO: handle case where blocks are not fully connected + + if (TRACE) { + t.stop(); + t.print(); + } + + for (Link l : graph.getLinks()) { + + if (linkInterClusterConnection.containsKey(l)) { + ClusterOutgoingConnection conn1 = linkClusterOutgoingConnection.get(l); + InterClusterConnection conn2 = linkInterClusterConnection.get(l); + ClusterIngoingConnection conn3 = linkClusterIngoingConnection.get(l); + + assert conn1 != null; + assert conn2 != null; + assert conn3 != null; + + List points = new ArrayList(); + + points.addAll(conn1.getControlPoints()); + points.addAll(conn2.getControlPoints()); + points.addAll(conn3.getControlPoints()); + + l.setControlPoints(points); + } + } + } + + public void doRouting(LayoutGraph graph) { + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/HierarchicalLayoutManager.java 2009-08-01 04:19:30.444658788 +0100 @@ -0,0 +1,2110 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.LayoutGraph; +import com.sun.hotspot.igv.layout.LayoutManager; +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Vertex; +import java.awt.Dimension; +import java.awt.Point; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import java.util.Set; +import java.util.SortedSet; +import java.util.Stack; +import java.util.TreeSet; + +/** + * + * @author Thomas Wuerthinger + */ +public class HierarchicalLayoutManager implements LayoutManager { + + public static final boolean TRACE = false; + public static final boolean CHECK = false; + public static final int SWEEP_ITERATIONS = 1; + public static final int CROSSING_ITERATIONS = 2; + public static final int DUMMY_HEIGHT = 1; + public static final int DUMMY_WIDTH = 1; + public static final int X_OFFSET = 9; + public static final int LAYER_OFFSET = 30; + public static final int MAX_LAYER_LENGTH = -1; + public static final int MIN_LAYER_DIFFERENCE = 1; + + public enum Combine { + + NONE, + SAME_INPUTS, + SAME_OUTPUTS + } + // Options + private Combine combine; + private int dummyWidth; + private int dummyHeight; + private int xOffset; + private int layerOffset; + private int maxLayerLength; + private int minLayerDifference; + // Algorithm global datastructures + private Set reversedLinks; + private List nodes; + private HashMap vertexToLayoutNode; + private HashMap> reversedLinkStartPoints; + private HashMap> reversedLinkEndPoints; + private HashMap bottomEdgeHash; + private HashMap> splitStartPoints; + private HashMap> splitEndPoints; + private LayoutGraph graph; + private List[] layers; + private int layerCount; + private Set firstLayerHint; + private Set lastLayerHint; + private Set importantLinks; + private Set linksToFollow; + + private class LayoutNode { + + public int x; + public int y; + public int width; + public int height; + public int layer = -1; + public int xOffset; + public int yOffset; + public int bottomYOffset; + public Vertex vertex; // Only used for non-dummy nodes, otherwise null + public List preds = new ArrayList(); + public List succs = new ArrayList(); + public HashMap outOffsets = new HashMap(); + public HashMap inOffsets = new HashMap(); + public int pos = -1; // Position within layer + public int crossingNumber; + + @Override + public String toString() { + return "Node " + vertex; + } + } + + private class LayoutEdge { + + public LayoutNode from; + public LayoutNode to; + public int relativeFrom; + public int relativeTo; + public Link link; + } + + private abstract class AlgorithmPart { + + public void start() { + if (CHECK) { + preCheck(); + } + + long start = 0; + if (TRACE) { + System.out.println("##################################################"); + System.out.println("Starting part " + this.getClass().getName()); + start = System.currentTimeMillis(); + } + run(); + if (TRACE) { + System.out.println("Timing for " + this.getClass().getName() + " is " + (System.currentTimeMillis() - start)); + printStatistics(); + } + + if (CHECK) { + postCheck(); + } + } + + protected abstract void run(); + + protected void printStatistics() { + } + + protected void postCheck() { + } + + protected void preCheck() { + } + } + + public HierarchicalLayoutManager() { + this(Combine.NONE); + } + + public HierarchicalLayoutManager(Combine b) { + this.combine = b; + this.dummyWidth = DUMMY_WIDTH; + this.dummyHeight = DUMMY_HEIGHT; + this.xOffset = X_OFFSET; + this.layerOffset = LAYER_OFFSET; + this.maxLayerLength = MAX_LAYER_LENGTH; + this.minLayerDifference = MIN_LAYER_DIFFERENCE; + this.linksToFollow = new HashSet(); + } + + public int getMaxLayerLength() { + return maxLayerLength; + } + + public void setMaxLayerLength(int v) { + maxLayerLength = v; + } + + public void setMinLayerDifference(int v) { + minLayerDifference = v; + } + + public void doLayout(LayoutGraph graph) { + doLayout(graph, new HashSet(), new HashSet(), new HashSet()); + + } + + public void doLayout(LayoutGraph graph, Set firstLayerHint, Set lastLayerHint, Set importantLinks) { + + this.importantLinks = importantLinks; + this.graph = graph; + this.firstLayerHint = firstLayerHint; + this.lastLayerHint = lastLayerHint; + + vertexToLayoutNode = new HashMap(); + reversedLinks = new HashSet(); + reversedLinkStartPoints = new HashMap>(); + reversedLinkEndPoints = new HashMap>(); + bottomEdgeHash = new HashMap(); + nodes = new ArrayList(); + splitStartPoints = new HashMap>(); + splitEndPoints = new HashMap>(); + + // ############################################################# + // Step 1: Build up data structure + new BuildDatastructure().start(); + + // ############################################################# + // STEP 2: Reverse edges, handle backedges + new ReverseEdges().start(); + + for (LayoutNode n : nodes) { + ArrayList tmpArr = new ArrayList(); + for (LayoutEdge e : n.succs) { + if (importantLinks.contains(e.link)) { + tmpArr.add(e); + } + } + + for (LayoutEdge e : tmpArr) { + //System.out.println("Removed " + e); + e.from.succs.remove(e); + e.to.preds.remove(e); + } + } + + // ############################################################# + // STEP 3: Assign layers + new AssignLayers().start(); + + // ############################################################# + // STEP 4: Create dummy nodes + new CreateDummyNodes().start(); + + // ############################################################# + // STEP 5: Crossing Reduction + new CrossingReduction().start(); + + // ############################################################# + // STEP 7: Assign X coordinates + //new AssignXCoordinates().start(); + new AssignXCoordinates2().start(); + + // ############################################################# + // STEP 6: Assign Y coordinates + new AssignYCoordinates().start(); + + // ############################################################# + // STEP 8: Write back to interface + new WriteResult().start(); + } + + private class WriteResult extends AlgorithmPart { + + private int pointCount; + + protected void run() { + + HashMap vertexPositions = new HashMap(); + HashMap> linkPositions = new HashMap>(); + for (Vertex v : graph.getVertices()) { + LayoutNode n = vertexToLayoutNode.get(v); + assert !vertexPositions.containsKey(v); + vertexPositions.put(v, new Point(n.x + n.xOffset, n.y + n.yOffset)); + } + + for (LayoutNode n : nodes) { + + for (LayoutEdge e : n.preds) { + if (e.link != null) { + ArrayList points = new ArrayList(); + + Point p = new Point(e.to.x + e.relativeTo, e.to.y + e.to.yOffset); + points.add(p); + if (e.to.inOffsets.containsKey(e.relativeTo)) { + points.add(new Point(p.x, p.y + e.to.inOffsets.get(e.relativeTo))); + } + + LayoutNode cur = e.from; + LayoutNode other = e.to; + LayoutEdge curEdge = e; + while (cur.vertex == null && cur.preds.size() != 0) { + if (points.size() > 1 && points.get(points.size() - 1).x == cur.x + cur.width / 2 && points.get(points.size() - 2).x == cur.x + cur.width / 2) { + points.remove(points.size() - 1); + } + points.add(new Point(cur.x + cur.width / 2, cur.y + cur.height)); + if (points.size() > 1 && points.get(points.size() - 1).x == cur.x + cur.width / 2 && points.get(points.size() - 2).x == cur.x + cur.width / 2) { + points.remove(points.size() - 1); + } + points.add(new Point(cur.x + cur.width / 2, cur.y)); + assert cur.preds.size() == 1; + curEdge = cur.preds.get(0); + cur = curEdge.from; + } + + p = new Point(cur.x + curEdge.relativeFrom, cur.y + cur.height - cur.bottomYOffset); + if (curEdge.from.outOffsets.containsKey(curEdge.relativeFrom)) { + points.add(new Point(p.x, p.y + curEdge.from.outOffsets.get(curEdge.relativeFrom))); + } + points.add(p); + + Collections.reverse(points); + + + + if (cur.vertex == null && cur.preds.size() == 0) { + + if (reversedLinkEndPoints.containsKey(e.link)) { + for (Point p1 : reversedLinkEndPoints.get(e.link)) { + points.add(new Point(p1.x + e.to.x, p1.y + e.to.y)); + } + } + + if (splitStartPoints.containsKey(e.link)) { + points.add(0, null); + points.addAll(0, splitStartPoints.get(e.link)); + + //checkPoints(points); + if (reversedLinks.contains(e.link)) { + Collections.reverse(points); + } + assert !linkPositions.containsKey(e.link); + linkPositions.put(e.link, points); + } else { + splitEndPoints.put(e.link, points); + } + + } else { + if (reversedLinks.contains(e.link)) { + Collections.reverse(points); + } + if (reversedLinkStartPoints.containsKey(e.link)) { + for (Point p1 : reversedLinkStartPoints.get(e.link)) { + points.add(new Point(p1.x + cur.x, p1.y + cur.y)); + } + } + + if (reversedLinkEndPoints.containsKey(e.link)) { + for (Point p1 : reversedLinkEndPoints.get(e.link)) { + points.add(0, new Point(p1.x + other.x, p1.y + other.y)); + } + } + + assert !linkPositions.containsKey(e.link); + linkPositions.put(e.link, points); + } + pointCount += points.size(); + + // No longer needed! + e.link = null; + } + } + + for (LayoutEdge e : n.succs) { + if (e.link != null) { + ArrayList points = new ArrayList(); + Point p = new Point(e.from.x + e.relativeFrom, e.from.y + e.from.height - e.from.bottomYOffset); + points.add(p); + if (e.from.outOffsets.containsKey(e.relativeFrom)) { + points.add(new Point(p.x, p.y + e.from.outOffsets.get(e.relativeFrom))); + } + + LayoutNode cur = e.to; + LayoutNode other = e.from; + LayoutEdge curEdge = e; + while (cur.vertex == null && cur.succs.size() != 0) { + if (points.size() > 1 && points.get(points.size() - 1).x == cur.x + cur.width / 2 && points.get(points.size() - 2).x == cur.x + cur.width / 2) { + points.remove(points.size() - 1); + } + points.add(new Point(cur.x + cur.width / 2, cur.y)); + if (points.size() > 1 && points.get(points.size() - 1).x == cur.x + cur.width / 2 && points.get(points.size() - 2).x == cur.x + cur.width / 2) { + points.remove(points.size() - 1); + } + points.add(new Point(cur.x + cur.width / 2, cur.y + cur.height)); + if (cur.succs.size() == 0) { + break; + } + assert cur.succs.size() == 1; + curEdge = cur.succs.get(0); + cur = curEdge.to; + } + + + p = new Point(cur.x + curEdge.relativeTo, cur.y + cur.yOffset); + points.add(p); + if (curEdge.to.inOffsets.containsKey(curEdge.relativeTo)) { + points.add(new Point(p.x, p.y + curEdge.to.inOffsets.get(curEdge.relativeTo))); + } + + + if (cur.succs.size() == 0 && cur.vertex == null) { + if (reversedLinkStartPoints.containsKey(e.link)) { + for (Point p1 : reversedLinkStartPoints.get(e.link)) { + points.add(0, new Point(p1.x + other.x, p1.y + other.y)); + } + } + + if (splitEndPoints.containsKey(e.link)) { + points.add(null); + points.addAll(splitEndPoints.get(e.link)); + + //checkPoints(points); + if (reversedLinks.contains(e.link)) { + Collections.reverse(points); + } + assert !linkPositions.containsKey(e.link); + linkPositions.put(e.link, points); + } else { + splitStartPoints.put(e.link, points); + } + } else { + + if (reversedLinkStartPoints.containsKey(e.link)) { + for (Point p1 : reversedLinkStartPoints.get(e.link)) { + points.add(0, new Point(p1.x + other.x, p1.y + other.y)); + } + } + if (reversedLinkEndPoints.containsKey(e.link)) { + for (Point p1 : reversedLinkEndPoints.get(e.link)) { + points.add(new Point(p1.x + cur.x, p1.y + cur.y)); + } + } + if (reversedLinks.contains(e.link)) { + Collections.reverse(points); + } + //checkPoints(points); + assert !linkPositions.containsKey(e.link); + linkPositions.put(e.link, points); + } + + pointCount += points.size(); + e.link = null; + } + } + } + + int minX = Integer.MAX_VALUE; + int minY = Integer.MAX_VALUE; + for (Vertex v : vertexPositions.keySet()) { + Point p = vertexPositions.get(v); + minX = Math.min(minX, p.x); + minY = Math.min(minY, p.y); + } + + for (Link l : linkPositions.keySet()) { + List points = linkPositions.get(l); + for (Point p : points) { + if (p != null) { + minX = Math.min(minX, p.x); + minY = Math.min(minY, p.y); + } + } + + } + + for (Vertex v : vertexPositions.keySet()) { + Point p = vertexPositions.get(v); + p.x -= minX; + p.y -= minY; + v.setPosition(p); + } + + for (Link l : linkPositions.keySet()) { + List points = linkPositions.get(l); + for (Point p : points) { + if (p != null) { + p.x -= minX; + p.y -= minY; + } + } + l.setControlPoints(points); + + } + } + + @Override + protected void printStatistics() { + System.out.println("Number of nodes: " + nodes.size()); + int edgeCount = 0; + for (LayoutNode n : nodes) { + edgeCount += n.succs.size(); + } + System.out.println("Number of edges: " + edgeCount); + System.out.println("Number of points: " + pointCount); + } + } + + private static class Segment { + + public float d; + public int orderNumber = -1; + public ArrayList nodes = new ArrayList(); + public HashSet succs = new HashSet(); + public HashSet preds = new HashSet(); + public Region region; + } + private static final Comparator segmentComparator = new Comparator() { + + public int compare(Segment s1, Segment s2) { + return s1.orderNumber - s2.orderNumber; + } + }; + + private static class Region { + + public float d; + public int minOrderNumber; + public SortedSet segments = new TreeSet(segmentComparator); + public HashSet succs = new HashSet(4); + public HashSet preds = new HashSet(4); + } + private static final Comparator regionComparator = new Comparator() { + + public int compare(Region r1, Region r2) { + return r1.minOrderNumber - r2.minOrderNumber; + } + }; + private static final Comparator nodePositionComparator = new Comparator() { + + public int compare(LayoutNode n1, LayoutNode n2) { + return n1.pos - n2.pos; + } + }; + private static final Comparator nodeProcessingDownComparator = new Comparator() { + + public int compare(LayoutNode n1, LayoutNode n2) { + if (n1.vertex == null) { + return -1; + } + if (n2.vertex == null) { + return 1; + } + return n1.preds.size() - n2.preds.size(); + } + }; + private static final Comparator nodeProcessingUpComparator = new Comparator() { + + public int compare(LayoutNode n1, LayoutNode n2) { + if (n1.vertex == null) { + return -1; + } + if (n2.vertex == null) { + return 1; + } + return n1.succs.size() - n2.succs.size(); + } + }; + + private class AssignXCoordinates2 extends AlgorithmPart { + + private ArrayList[] space; + private ArrayList[] downProcessingOrder; + private ArrayList[] upProcessingOrder; + + private void initialPositions() { + for (LayoutNode n : nodes) { + n.x = space[n.layer].get(n.pos); + } + } + + protected void run() { + + space = new ArrayList[layers.length]; + downProcessingOrder = new ArrayList[layers.length]; + upProcessingOrder = new ArrayList[layers.length]; + + for (int i = 0; i < layers.length; i++) { + space[i] = new ArrayList(); + downProcessingOrder[i] = new ArrayList(); + upProcessingOrder[i] = new ArrayList(); + + int curX = 0; + for (LayoutNode n : layers[i]) { + space[i].add(curX); + curX += n.width + xOffset; + downProcessingOrder[i].add(n); + upProcessingOrder[i].add(n); + } + + Collections.sort(downProcessingOrder[i], nodeProcessingDownComparator); + Collections.sort(upProcessingOrder[i], nodeProcessingUpComparator); + } + + initialPositions(); + for (int i = 0; i < SWEEP_ITERATIONS; i++) { + sweepDown(); + sweepUp(); + } + + for (int i = 0; i < SWEEP_ITERATIONS; i++) { + doubleSweep(); + } + } + + private int calculateOptimalDown(LayoutNode n) { + + List values = new ArrayList(); + if (n.preds.size() == 0) { + return n.x; + } + for (LayoutEdge e : n.preds) { + int cur = e.from.x + e.relativeFrom - e.relativeTo; + values.add(cur); + } + return median(values); + } + + private int calculateOptimalBoth(LayoutNode n) { + + List values = new ArrayList(); + if (n.preds.size() == 0 + n.succs.size()) { + return n.x; + } + for (LayoutEdge e : n.preds) { + int cur = e.from.x + e.relativeFrom - e.relativeTo; + values.add(cur); + } + + for (LayoutEdge e : n.succs) { + int cur = e.to.x + e.relativeTo - e.relativeFrom; + values.add(cur); + } + + return median(values); + } + + private int calculateOptimalUp(LayoutNode n) { + + //List values = new ArrayList(); + int size = n.succs.size(); + if (size == 0) { + return n.x; + } else { + int result = 0; + for (LayoutEdge e : n.succs) { + int cur = e.to.x + e.relativeTo - e.relativeFrom; + result += cur; + } + return result / size; //median(values); + } + } + + private int median(List values) { + Collections.sort(values); + if (values.size() % 2 == 0) { + return (values.get(values.size() / 2 - 1) + values.get(values.size() / 2)) / 2; + } else { + return values.get(values.size() / 2); + } + } + + private void sweepUp() { + for (int i = layers.length - 1; i >= 0; i--) { + NodeRow r = new NodeRow(space[i]); + for (LayoutNode n : upProcessingOrder[i]) { + int optimal = calculateOptimalUp(n); + r.insert(n, optimal); + } + } + /* + for(int i=0; i= 0; i--) { + NodeRow r = new NodeRow(space[i]); + for (LayoutNode n : upProcessingOrder[i]) { + int optimal = calculateOptimalBoth(n); + r.insert(n, optimal); + } + } + } + + private void sweepDown() { + for (int i = 1; i < layers.length; i++) { + NodeRow r = new NodeRow(space[i]); + for (LayoutNode n : downProcessingOrder[i]) { + int optimal = calculateOptimalDown(n); + r.insert(n, optimal); + } + } + } + } + + private static class NodeRow { + + private TreeSet treeSet; + private ArrayList space; + + public NodeRow(ArrayList space) { + treeSet = new TreeSet(nodePositionComparator); + this.space = space; + } + + public int offset(LayoutNode n1, LayoutNode n2) { + int v1 = space.get(n1.pos) + n1.width; + int v2 = space.get(n2.pos); + return v2 - v1; + } + + public void insert(LayoutNode n, int pos) { + + SortedSet headSet = treeSet.headSet(n); + + LayoutNode leftNeighbor = null; + int minX = Integer.MIN_VALUE; + if (!headSet.isEmpty()) { + leftNeighbor = headSet.last(); + minX = leftNeighbor.x + leftNeighbor.width + offset(leftNeighbor, n); + } + + if (pos < minX) { + n.x = minX; + } else { + + LayoutNode rightNeighbor = null; + SortedSet tailSet = treeSet.tailSet(n); + int maxX = Integer.MAX_VALUE; + if (!tailSet.isEmpty()) { + rightNeighbor = tailSet.first(); + maxX = rightNeighbor.x - offset(n, rightNeighbor) - n.width; + } + + if (pos > maxX) { + n.x = maxX; + } else { + n.x = pos; + } + + assert minX <= maxX; + } + + treeSet.add(n); + } + } + + private class AssignXCoordinates extends AlgorithmPart { + + HashMap hashMap = new HashMap(); + ArrayList segments = new ArrayList(); + + private void generateSegments() { + + for (int i = 0; i < layerCount; i++) { + for (LayoutNode n : layers[i]) { + if (!hashMap.containsKey(n)) { + Segment s = new Segment(); + segments.add(s); + LayoutNode curNode = n; + + int maxLength = 0; + while (curNode.succs.size() == 1 && curNode.preds.size() == 1) { + s.nodes.add(curNode); + assert !hashMap.containsKey(curNode); + hashMap.put(curNode, s); + curNode = curNode.succs.get(0).to; + maxLength++; + //if(maxLength > 10) break; + } + + if (s.nodes.size() > 0 && curNode.preds.size() == 1 && curNode.vertex == null) { + s.nodes.add(curNode); + assert !hashMap.containsKey(curNode); + hashMap.put(curNode, s); + } + + if (s.nodes.size() == 0) { + // Simple segment with a single node + s.nodes.add(n); + hashMap.put(n, s); + } + } + } + } + } + + private void addEdges() { + + for (int i = 0; i < layerCount; i++) { + LayoutNode prev = null; + for (LayoutNode n : layers[i]) { + + if (prev != null) { + Segment s1 = hashMap.get(prev); + Segment s2 = hashMap.get(n); + + if (s1 != s2) { + s1.succs.add(s2); + s2.preds.add(s1); + } + } + prev = n; + + } + } + } + + private void topologicalSorting() { + + Queue queue = new LinkedList(); + + int index = 0; + ArrayList newList = new ArrayList(); + for (Segment s : segments) { + if (s.preds.size() == 0) { + s.orderNumber = index; + newList.add(s); + index++; + queue.add(s); + } + } + + while (!queue.isEmpty()) { + Segment s = queue.remove(); + + for (Segment succ : s.succs) { + succ.preds.remove(s); + if (succ.preds.size() == 0) { + queue.add(succ); + succ.orderNumber = index; + newList.add(succ); + index++; + } + } + } + + segments = newList; + } + + private void initialPositions() { + + int[] minPos = new int[layers.length]; + + for (Segment s : segments) { + int max = 0; + for (LayoutNode n : s.nodes) { + int x = minPos[n.layer]; + if (x > max) { + max = x; + } + } + + for (LayoutNode n : s.nodes) { + minPos[n.layer] = max + n.width + xOffset; + n.x = max; + } + } + } + + private int predSum(LayoutNode n) { + int sum = 0; + for (LayoutEdge e : n.preds) { + assert e.to == n; + //sum += (e.from.x + e.relativeFrom + (int)hashMap.get(e.from).d) - (e.to.x + e.relativeTo + (int)hashMap.get(e.to).d); + sum += (e.from.x + e.relativeFrom) - (e.to.x + e.relativeTo); + } + + return sum; + } + + private int succSum(LayoutNode n) { + int sum = 0; + for (LayoutEdge e : n.succs) { + + assert e.from == n; + sum += (e.to.x + e.relativeTo) - (e.from.x + e.relativeFrom); + //sum += (e.to.x + e.relativeTo + (int)hashMap.get(e.to).d) - (e.from.x + e.relativeFrom + (int)hashMap.get(e.from).d); + } + + return sum; + + } + + private void downValues() { + + for (Segment s : segments) { + downValues(s); + + } + + } + + private void downValues(Segment s) { + LayoutNode n = s.nodes.get(0); // Only first node needed, all other have same coordinate + + if (n.preds.size() == 0) { + // Value is 0.0; + if (n.succs.size() == 0) { + s.d = 0.0f; + } else { + s.d = (((float) succSum(n) / (float) n.succs.size())) / 2; + } + } else { + s.d = (float) predSum(n) / (float) n.preds.size(); + } + } + + private void upValues() { + for (Segment s : segments) { + upValues(s); + } + } + + private void upValues(Segment s) { + LayoutNode n = s.nodes.get(0); // Only first node needed, all other have same coordinate + + if (n.succs.size() == 0) { + // Value is 0.0; + if (n.preds.size() == 0) { + s.d = 0.0f; + } else { + s.d = (float) predSum(n) / (float) n.preds.size(); + } + } else { + s.d = ((float) succSum(n) / (float) n.succs.size()) / 2; + } + } + + private void sweep(boolean down) { + + if (down) { + downValues(); + } else { + upValues(); + } + + SortedSet regions = new TreeSet(regionComparator); + for (Segment s : segments) { + s.region = new Region(); + s.region.minOrderNumber = s.orderNumber; + s.region.segments.add(s); + s.region.d = s.d; + regions.add(s.region); + } + + for (Segment s : segments) { + for (LayoutNode n : s.nodes) { + if (n.pos != 0) { + LayoutNode prevNode = layers[n.layer].get(n.pos - 1); + if (prevNode.x + prevNode.width + xOffset == n.x) { + Segment other = hashMap.get(prevNode); + Region r1 = s.region; + Region r2 = other.region; + // They are close together + if (r1 != r2 && r2.d >= r1.d) { + + if (r2.segments.size() < r1.segments.size()) { + + r1.d = (r1.d * r1.segments.size() + r2.d * r2.segments.size()) / (r1.segments.size() + r2.segments.size()); + + for (Segment tempS : r2.segments) { + r1.segments.add(tempS); + tempS.region = r1; + r1.minOrderNumber = Math.min(r1.minOrderNumber, tempS.orderNumber); + } + + regions.remove(r2); + } else { + + r2.d = (r1.d * r1.segments.size() + r2.d * r2.segments.size()) / (r1.segments.size() + r2.segments.size()); + + for (Segment tempS : r1.segments) { + r2.segments.add(tempS); + tempS.region = r2; + r2.minOrderNumber = Math.min(r2.minOrderNumber, tempS.orderNumber); + } + + regions.remove(r1); + } + } + } + } + } + } + + + + ArrayList reversedRegions = new ArrayList(); + for (Region r : regions) { + if (r.d < 0) { + processRegion(r, down); + } else { + reversedRegions.add(0, r); + } + } + + for (Region r : reversedRegions) { + processRegion(r, down); + } + + } + + private void processRegion(Region r, boolean down) { + + // Do not move + if ((int) r.d == 0) { + return; + } + + ArrayList arr = new ArrayList(); + for (Segment s : r.segments) { + arr.add(s); + } + + if (r.d > 0) { + Collections.reverse(arr); + } + + for (Segment s : arr) { + + + int min = (int) r.d; + if (min < 0) { + min = -min; + } + + for (LayoutNode n : s.nodes) { + + int layer = n.layer; + int pos = n.pos; + + + if (r.d > 0) { + + if (pos != layers[layer].size() - 1) { + + int off = layers[layer].get(pos + 1).x - n.x - xOffset - n.width; + assert off >= 0; + if (off < min) { + min = off; + } + } + + } else { + + if (pos != 0) { + + int off = n.x - xOffset - layers[layer].get(pos - 1).x - layers[layer].get(pos - 1).width; + assert off >= 0; + if (off < min) { + min = off; + } + } + } + } + + assert min >= 0; + if (min != 0) { + for (LayoutNode n : s.nodes) { + if (r.d > 0) { + n.x += min; + } else { + n.x -= min; + } + + } + } + } + } + + protected void run() { + + generateSegments(); + addEdges(); + topologicalSorting(); + initialPositions(); + for (int i = 0; i < SWEEP_ITERATIONS; i++) { + + sweep(true); + sweep(true); + sweep(false); + sweep(false); + } + + sweep(true); + sweep(true); + } + } + private static Comparator crossingNodeComparator = new Comparator() { + + public int compare(LayoutNode n1, LayoutNode n2) { + return n1.crossingNumber - n2.crossingNumber; + } + }; + + private class CrossingReduction extends AlgorithmPart { + + @Override + public void preCheck() { + for (LayoutNode n : nodes) { + assert n.layer < layerCount; + } + } + + protected void run() { + + layers = new List[layerCount]; + + for (int i = 0; i < layerCount; i++) { + layers[i] = new ArrayList(); + } + + + // Generate initial ordering + HashSet visited = new HashSet(); + for (LayoutNode n : nodes) { + if (n.layer == 0) { + layers[0].add(n); + visited.add(n); + } else if (n.preds.size() == 0) { + layers[n.layer].add(n); + visited.add(n); + } + } + + for (int i = 0; i < layers.length - 1; i++) { + for (LayoutNode n : layers[i]) { + for (LayoutEdge e : n.succs) { + if (!visited.contains(e.to)) { + visited.add(e.to); + layers[i + 1].add(e.to); + } + } + } + } + + + updatePositions(); + + initX(); + + // Optimize + for (int i = 0; i < CROSSING_ITERATIONS; i++) { + downSweep(); + upSweep(); + } + + /*for(int i=0; i 0) { + sum /= n.preds.size(); + n.crossingNumber = sum; + //if(n.vertex == null) n.crossingNumber += layers[i].size(); + } + } + + + updateCrossingNumbers(i, true); + Collections.sort(layers[i], crossingNodeComparator); + updateXOfLayer(i); + + int z = 0; + for (LayoutNode n : layers[i]) { + n.pos = z; + z++; + } + } + } + + private void updateCrossingNumbers(int index, boolean down) { + for (int i = 0; i < layers[index].size(); i++) { + LayoutNode n = layers[index].get(i); + LayoutNode prev = null; + if (i > 0) { + prev = layers[index].get(i - 1); + } + LayoutNode next = null; + if (i < layers[index].size() - 1) { + next = layers[index].get(i + 1); + } + + boolean cond = (n.succs.size() == 0); + if (down) { + cond = (n.preds.size() == 0); + } + + if (cond) { + + if (prev != null && next != null) { + n.crossingNumber = (prev.crossingNumber + next.crossingNumber) / 2; + } else if (prev != null) { + n.crossingNumber = prev.crossingNumber; + } else if (next != null) { + n.crossingNumber = next.crossingNumber; + } + } + } + } + /* + private void doubleSweep() { + // Downsweep + for(int i=0; i= layerCount) { + index = 2*layerCount - i - 1; + } + for(LayoutNode n : layers[index]) { + float sum = 0.0f; + for(LayoutEdge e : n.preds) { + float cur = e.from.pos; + if(e.from.width != 0 && e.relativeFrom != 0) { + cur += (float)e.relativeFrom / (float)(e.from.width); + } + sum += cur; + } + for(LayoutEdge e : n.succs) { + float cur = e.to.pos; + if(e.to.width != 0 && e.relativeTo != 0) { + cur += (float)e.relativeTo / (float)(e.to.width); + } + sum += cur; + } + if(n.preds.size() + n.succs.size() > 0) { + sum /= n.preds.size() + n.succs.size(); + n.crossingNumber = sum; + } + } + Collections.sort(layers[index], crossingNodeComparator); + updateXOfLayer(index); + int z = 0; + for(LayoutNode n : layers[index]) { + n.pos = z; + z++; + } + } + }*/ + + private void upSweep() { + // Upsweep + for (int i = layerCount - 2; i >= 0; i--) { + + for (LayoutNode n : layers[i]) { + n.crossingNumber = 0; + } + + for (LayoutNode n : layers[i]) { + + int sum = 0; + for (LayoutEdge e : n.succs) { + int cur = e.to.x + e.relativeTo;//pos; + /* + if(e.to.width != 0 && e.relativeTo != 0) { + cur += (float)e.relativeTo / (float)(e.to.width); + }*/ + + sum += cur; + } + + if (n.succs.size() > 0) { + sum /= n.succs.size(); + n.crossingNumber = sum; + //if(n.vertex == null) n.crossingNumber += layers[i].size(); + } + + } + + updateCrossingNumbers(i, false); + Collections.sort(layers[i], crossingNodeComparator); + updateXOfLayer(i); + + int z = 0; + for (LayoutNode n : layers[i]) { + n.pos = z; + z++; + } + } + } + + private int evaluate() { + // TODO: Implement efficient evaluate / crossing min + return 0; + } + + @Override + public void postCheck() { + + HashSet visited = new HashSet(); + for (int i = 0; i < layers.length; i++) { + for (LayoutNode n : layers[i]) { + assert !visited.contains(n); + assert n.layer == i; + visited.add(n); + } + } + + } + } + + private class AssignYCoordinates extends AlgorithmPart { + + protected void run() { + int curY = 0; + //maxLayerHeight = new int[layers.length]; + for (int i = 0; i < layers.length; i++) { + int maxHeight = 0; + int baseLine = 0; + int bottomBaseLine = 0; + for (LayoutNode n : layers[i]) { + maxHeight = Math.max(maxHeight, n.height - n.yOffset - n.bottomYOffset); + baseLine = Math.max(baseLine, n.yOffset); + bottomBaseLine = Math.max(bottomBaseLine, n.bottomYOffset); + } + + int maxXOffset = 0; + for (LayoutNode n : layers[i]) { + if (n.vertex == null) { + // Dummy node + n.y = curY; + n.height = maxHeight + baseLine + bottomBaseLine; + + } else { + n.y = curY + baseLine + (maxHeight - (n.height - n.yOffset - n.bottomYOffset)) / 2 - n.yOffset; + } + + for (LayoutEdge e : n.succs) { + int curXOffset = Math.abs(n.x - e.to.x); + maxXOffset = Math.max(curXOffset, maxXOffset); + } + } + + //maxLayerHeight[i] = maxHeight + baseLine + bottomBaseLine; + + curY += maxHeight + baseLine + bottomBaseLine; + curY += layerOffset + (int) Math.sqrt(maxXOffset); + } + } + } + + private class CreateDummyNodes extends AlgorithmPart { + + private int oldNodeCount; + + @Override + protected void preCheck() { + for (LayoutNode n : nodes) { + for (LayoutEdge e : n.succs) { + assert e.from != null; + assert e.from == n; + assert e.from.layer < e.to.layer; + } + + for (LayoutEdge e : n.preds) { + assert e.to != null; + assert e.to == n; + } + } + } + + protected void run() { + oldNodeCount = nodes.size(); + + + if (combine == Combine.SAME_OUTPUTS) { + + Comparator comparator = new Comparator() { + + public int compare(LayoutEdge e1, LayoutEdge e2) { + return e1.to.layer - e2.to.layer; + } + }; + HashMap> portHash = new HashMap>(); + ArrayList currentNodes = new ArrayList(nodes); + for (LayoutNode n : currentNodes) { + portHash.clear(); + + ArrayList succs = new ArrayList(n.succs); + HashMap topNodeHash = new HashMap(); + HashMap> bottomNodeHash = new HashMap>(); + for (LayoutEdge e : succs) { + assert e.from.layer < e.to.layer; + if (e.from.layer != e.to.layer - 1) { + if (maxLayerLength != -1 && e.to.layer - e.from.layer > maxLayerLength/* && e.to.preds.size() > 1 && e.from.succs.size() > 1*/) { + assert maxLayerLength > 2; + e.to.preds.remove(e); + e.from.succs.remove(e); + + LayoutEdge topEdge = null; + + if (combine == Combine.SAME_OUTPUTS && topNodeHash.containsKey(e.relativeFrom)) { + LayoutNode topNode = topNodeHash.get(e.relativeFrom); + topEdge = new LayoutEdge(); + topEdge.relativeFrom = e.relativeFrom; + topEdge.from = e.from; + topEdge.relativeTo = topNode.width / 2; + topEdge.to = topNode; + topEdge.link = e.link; + e.from.succs.add(topEdge); + topNode.preds.add(topEdge); + } else { + + LayoutNode topNode = new LayoutNode(); + topNode.layer = e.from.layer + 1; + topNode.width = DUMMY_WIDTH; + topNode.height = DUMMY_HEIGHT; + nodes.add(topNode); + topEdge = new LayoutEdge(); + topEdge.relativeFrom = e.relativeFrom; + topEdge.from = e.from; + topEdge.relativeTo = topNode.width / 2; + topEdge.to = topNode; + topEdge.link = e.link; + e.from.succs.add(topEdge); + topNode.preds.add(topEdge); + topNodeHash.put(e.relativeFrom, topNode); + bottomNodeHash.put(e.relativeFrom, new HashMap()); + } + + HashMap hash = bottomNodeHash.get(e.relativeFrom); + + LayoutNode bottomNode = null; + if (hash.containsKey(e.to.layer)) { + bottomNode = hash.get(e.to.layer); + } else { + + bottomNode = new LayoutNode(); + bottomNode.layer = e.to.layer - 1; + bottomNode.width = DUMMY_WIDTH; + bottomNode.height = DUMMY_HEIGHT; + nodes.add(bottomNode); + hash.put(e.to.layer, bottomNode); + } + + LayoutEdge bottomEdge = new LayoutEdge(); + bottomEdge.relativeTo = e.relativeTo; + bottomEdge.to = e.to; + bottomEdge.relativeFrom = bottomNode.width / 2; + bottomEdge.from = bottomNode; + bottomEdge.link = e.link; + e.to.preds.add(bottomEdge); + bottomEdgeHash.put(topEdge, bottomEdge); + bottomNode.succs.add(bottomEdge); + + } else { + Integer i = e.relativeFrom; + if (!portHash.containsKey(i)) { + portHash.put(i, new ArrayList()); + } + + if (n.vertex.toString().equals("1012 CastPP")) { + int x = 0; + } + + portHash.get(i).add(e); + } + } + } + + succs = new ArrayList(n.succs); + for (LayoutEdge e : succs) { + + Integer i = e.relativeFrom; + if (portHash.containsKey(i)) { + + List list = portHash.get(i); + Collections.sort(list, comparator); + + if (list.size() == 1) { + processSingleEdge(list.get(0)); + } else { + + int maxLayer = list.get(0).to.layer; + for (LayoutEdge curEdge : list) { + maxLayer = Math.max(maxLayer, curEdge.to.layer); + } + + + int cnt = maxLayer - n.layer - 1; + LayoutEdge[] edges = new LayoutEdge[cnt]; + LayoutNode[] nodes = new LayoutNode[cnt]; + edges[0] = new LayoutEdge(); + edges[0].from = n; + edges[0].relativeFrom = i; + n.succs.add(edges[0]); + + nodes[0] = new LayoutNode(); + nodes[0].width = dummyWidth; + nodes[0].height = dummyHeight; + nodes[0].layer = n.layer + 1; + nodes[0].preds.add(edges[0]); + edges[0].to = nodes[0]; + edges[0].relativeTo = nodes[0].width / 2; + for (int j = 1; j < cnt; j++) { + edges[j] = new LayoutEdge(); + edges[j].from = nodes[j - 1]; + edges[j].relativeFrom = nodes[j - 1].width / 2; + nodes[j - 1].succs.add(edges[j]); + nodes[j] = new LayoutNode(); + nodes[j].width = dummyWidth; + nodes[j].height = dummyHeight; + nodes[j].layer = n.layer + j + 1; + nodes[j].preds.add(edges[j]); + edges[j].to = nodes[j]; + edges[j].relativeTo = nodes[j].width / 2; + } + + for (LayoutEdge curEdge : list) { + assert curEdge.to.layer - n.layer - 2 >= 0; + assert curEdge.to.layer - n.layer - 2 < cnt; + LayoutNode anchor = nodes[curEdge.to.layer - n.layer - 2]; + anchor.succs.add(curEdge); + curEdge.from = anchor; + curEdge.relativeFrom = anchor.width / 2; + n.succs.remove(curEdge); + } + + } + + portHash.remove(i); + } + } + } + } else if (combine == Combine.SAME_INPUTS) { + throw new UnsupportedOperationException("Currently not supported"); + } else { + ArrayList currentNodes = new ArrayList(nodes); + for (LayoutNode n : currentNodes) { + for (LayoutEdge e : n.succs) { + processSingleEdge(e); + } + } + } + } + + private void processSingleEdge(LayoutEdge e) { + LayoutNode n = e.from; + if (e.to.layer > n.layer + 1) { + LayoutEdge last = e; + for (int i = n.layer + 1; i < last.to.layer; i++) { + last = addBetween(last, i); + } + } + } + + private LayoutEdge addBetween(LayoutEdge e, int layer) { + LayoutNode n = new LayoutNode(); + n.width = dummyWidth; + n.height = dummyHeight; + n.layer = layer; + n.preds.add(e); + nodes.add(n); + LayoutEdge result = new LayoutEdge(); + n.succs.add(result); + result.from = n; + result.relativeFrom = n.width / 2; + result.to = e.to; + result.relativeTo = e.relativeTo; + e.relativeTo = n.width / 2; + e.to.preds.remove(e); + e.to.preds.add(result); + e.to = n; + return result; + } + + @Override + public void printStatistics() { + System.out.println("Dummy nodes created: " + (nodes.size() - oldNodeCount)); + } + + @Override + public void postCheck() { + ArrayList currentNodes = new ArrayList(nodes); + for (LayoutNode n : currentNodes) { + for (LayoutEdge e : n.succs) { + assert e.from.layer == e.to.layer - 1; + } + } + + for (int i = 0; i < layers.length; i++) { + assert layers[i].size() > 0; + for (LayoutNode n : layers[i]) { + assert n.layer == i; + } + } + } + } + + private class AssignLayers extends AlgorithmPart { + + @Override + public void preCheck() { + for (LayoutNode n : nodes) { + assert n.layer == -1; + } + } + + protected void run() { + HashSet set = new HashSet(); + for (LayoutNode n : nodes) { + if (n.preds.size() == 0) { + set.add(n); + n.layer = 0; + } + } + + int z = minLayerDifference; + HashSet newSet = new HashSet(); + HashSet failed = new HashSet(); + while (!set.isEmpty()) { + + newSet.clear(); + failed.clear(); + + for (LayoutNode n : set) { + + for (LayoutEdge se : n.succs) { + LayoutNode s = se.to; + if (!newSet.contains(s) && !failed.contains(s)) { + boolean ok = true; + for (LayoutEdge pe : s.preds) { + LayoutNode p = pe.from; + if (p.layer == -1) { + ok = false; + break; + } + } + + if (ok) { + newSet.add(s); + } else { + failed.add(s); + } + } + } + + } + + for (LayoutNode n : newSet) { + n.layer = z; + } + + // Swap sets + HashSet tmp = set; + set = newSet; + newSet = tmp; + z += minLayerDifference; + } + + optimize(set); + + layerCount = z - minLayerDifference; + + for (Vertex v : lastLayerHint) { + + LayoutNode n = vertexToLayoutNode.get(v); + assert n.succs.size() == 0; + n.layer = layerCount - 1; + } + + for (Vertex v : firstLayerHint) { + LayoutNode n = vertexToLayoutNode.get(v); + assert n.preds.size() == 0; + assert n.layer == 0; + } + } + + public void optimize(HashSet set) { + + for (LayoutNode n : set) { + if (n.preds.size() == 0 && n.succs.size() > 0) { + int minLayer = n.succs.get(0).to.layer; + for (LayoutEdge e : n.succs) { + minLayer = Math.min(minLayer, e.to.layer); + } + + n.layer = minLayer - 1; + } + } + + } + + @Override + public void postCheck() { + for (LayoutNode n : nodes) { + assert n.layer >= 0; + assert n.layer < layerCount; + for (LayoutEdge e : n.succs) { + assert e.from.layer < e.to.layer; + } + } + } + } + + private class ReverseEdges extends AlgorithmPart { + + private HashSet visited; + private HashSet active; + + protected void run() { + + // Remove self-edges, TODO: Special treatment + for (LayoutNode node : nodes) { + ArrayList succs = new ArrayList(node.succs); + for (LayoutEdge e : succs) { + assert e.from == node; + if (e.to == node) { + node.succs.remove(e); + node.preds.remove(e); + } + } + } + + // Reverse inputs of roots + for (LayoutNode node : nodes) { + if (node.vertex.isRoot()) { + boolean ok = true; + for (LayoutEdge e : node.preds) { + if (e.from.vertex.isRoot()) { + ok = false; + break; + } + } + if (ok) { + reverseAllInputs(node); + } + } + } + + + // Start DFS and reverse back edges + visited = new HashSet(); + active = new HashSet(); + for (LayoutNode node : nodes) { + DFS(node); + } + + + for (LayoutNode node : nodes) { + + SortedSet reversedDown = new TreeSet(); + + for (LayoutEdge e : node.succs) { + if (reversedLinks.contains(e.link)) { + reversedDown.add(e.relativeFrom); + } + } + + + SortedSet reversedUp = null; + if (reversedDown.size() == 0) { + reversedUp = new TreeSet(Collections.reverseOrder()); + } else { + reversedUp = new TreeSet(); + } + + for (LayoutEdge e : node.preds) { + if (reversedLinks.contains(e.link)) { + reversedUp.add(e.relativeTo); + } + } + + final int offset = X_OFFSET + DUMMY_WIDTH; + + int curX = 0; + int curWidth = node.width + reversedDown.size() * offset; + for (int pos : reversedDown) { + ArrayList reversedSuccs = new ArrayList(); + for (LayoutEdge e : node.succs) { + if (e.relativeFrom == pos && reversedLinks.contains(e.link)) { + reversedSuccs.add(e); + e.relativeFrom = curWidth; + } + } + + ArrayList startPoints = new ArrayList(); + startPoints.add(new Point(curWidth, curX)); + startPoints.add(new Point(pos, curX)); + startPoints.add(new Point(pos, reversedDown.size() * offset)); + for (LayoutEdge e : reversedSuccs) { + reversedLinkStartPoints.put(e.link, startPoints); + } + + node.inOffsets.put(pos, -curX); + curX += offset; + node.height += offset; + node.yOffset += offset; + curWidth -= offset; + } + node.width += reversedDown.size() * offset; + + if (reversedDown.size() == 0) { + curX = offset; + } else { + curX = -offset; + } + + curX = 0; + int minX = 0; + if (reversedDown.size() != 0) { + minX = -offset * reversedUp.size(); + } + + int oldNodeHeight = node.height; + for (int pos : reversedUp) { + ArrayList reversedPreds = new ArrayList(); + for (LayoutEdge e : node.preds) { + if (e.relativeTo == pos && reversedLinks.contains(e.link)) { + if (reversedDown.size() == 0) { + e.relativeTo = node.width + offset; + } else { + e.relativeTo = curX - offset; + } + + reversedPreds.add(e); + } + } + node.height += offset; + ArrayList endPoints = new ArrayList(); + + if (reversedDown.size() == 0) { + + curX += offset; + node.width += offset; + endPoints.add(new Point(node.width, node.height)); + + } else { + curX -= offset; + node.width += offset; + endPoints.add(new Point(curX, node.height)); + } + + node.outOffsets.put(pos - minX, curX); + curX += offset; + node.bottomYOffset += offset; + + + endPoints.add(new Point(pos, node.height)); + endPoints.add(new Point(pos, oldNodeHeight)); + for (LayoutEdge e : reversedPreds) { + reversedLinkEndPoints.put(e.link, endPoints); + } + } + + + if (minX < 0) { + for (LayoutEdge e : node.preds) { + e.relativeTo -= minX; + } + + for (LayoutEdge e : node.succs) { + e.relativeFrom -= minX; + } + + node.xOffset = -minX; + node.width += -minX; + } + } + + } + + private void DFS(LayoutNode startNode) { + if (visited.contains(startNode)) { + return; + } + + Stack stack = new Stack(); + stack.push(startNode); + + while (!stack.empty()) { + LayoutNode node = stack.pop(); + + if (visited.contains(node)) { + // Node no longer active + active.remove(node); + continue; + } + + // Repush immediately to know when no longer active + stack.push(node); + visited.add(node); + active.add(node); + + ArrayList succs = new ArrayList(node.succs); + for (LayoutEdge e : succs) { + if (active.contains(e.to)) { + assert visited.contains(e.to); + // Encountered back edge + reverseEdge(e); + } else if (!visited.contains(e.to) && (linksToFollow.size() == 0 || linksToFollow.contains(e.link))) { + stack.push(e.to); + } + } + } + } + + private void reverseAllInputs(LayoutNode node) { + for (LayoutEdge e : node.preds) { + assert !reversedLinks.contains(e.link); + reversedLinks.add(e.link); + node.succs.add(e); + e.from.preds.add(e); + e.from.succs.remove(e); + int oldRelativeFrom = e.relativeFrom; + int oldRelativeTo = e.relativeTo; + e.to = e.from; + e.from = node; + e.relativeFrom = oldRelativeTo; + e.relativeTo = oldRelativeFrom; + } + node.preds.clear(); + } + + private void reverseEdge(LayoutEdge e) { + assert !reversedLinks.contains(e.link); + reversedLinks.add(e.link); + + LayoutNode oldFrom = e.from; + LayoutNode oldTo = e.to; + int oldRelativeFrom = e.relativeFrom; + int oldRelativeTo = e.relativeTo; + + e.from = oldTo; + e.to = oldFrom; + e.relativeFrom = oldRelativeTo; + e.relativeTo = oldRelativeFrom; + + oldFrom.succs.remove(e); + oldFrom.preds.add(e); + oldTo.preds.remove(e); + oldTo.succs.add(e); + } + + @Override + public void postCheck() { + + for (LayoutNode n : nodes) { + + HashSet curVisited = new HashSet(); + Queue queue = new LinkedList(); + for (LayoutEdge e : n.succs) { + LayoutNode s = e.to; + queue.add(s); + curVisited.add(s); + } + + while (!queue.isEmpty()) { + LayoutNode curNode = queue.remove(); + + for (LayoutEdge e : curNode.succs) { + assert e.to != n; + if (!curVisited.contains(e.to)) { + queue.add(e.to); + curVisited.add(e.to); + } + } + } + } + } + } + private Comparator linkComparator = new Comparator() { + + public int compare(Link l1, Link l2) { + + int result = l1.getFrom().getVertex().compareTo(l2.getFrom().getVertex()); + if (result != 0) { + return result; + } + result = l1.getTo().getVertex().compareTo(l2.getTo().getVertex()); + if (result != 0) { + return result; + } + result = l1.getFrom().getRelativePosition().x - l2.getFrom().getRelativePosition().x; + if (result != 0) { + return result; + } + result = l1.getTo().getRelativePosition().x - l2.getTo().getRelativePosition().x; + return result; + } + }; + + private class BuildDatastructure extends AlgorithmPart { + + protected void run() { + // Set up nodes + List vertices = new ArrayList(graph.getVertices()); + Collections.sort(vertices); + + for (Vertex v : vertices) { + LayoutNode node = new LayoutNode(); + Dimension size = v.getSize(); + node.width = (int) size.getWidth(); + node.height = (int) size.getHeight(); + node.vertex = v; + nodes.add(node); + vertexToLayoutNode.put(v, node); + } + + // Set up edges + List links = new ArrayList(graph.getLinks()); + Collections.sort(links, linkComparator); + for (Link l : links) { + LayoutEdge edge = new LayoutEdge(); + assert vertexToLayoutNode.containsKey(l.getFrom().getVertex()); + assert vertexToLayoutNode.containsKey(l.getTo().getVertex()); + edge.from = vertexToLayoutNode.get(l.getFrom().getVertex()); + edge.to = vertexToLayoutNode.get(l.getTo().getVertex()); + edge.relativeFrom = l.getFrom().getRelativePosition().x; + edge.relativeTo = l.getTo().getRelativePosition().x; + edge.link = l; + edge.from.succs.add(edge); + edge.to.preds.add(edge); + //assert edge.from != edge.to; // No self-loops allowed + } + + for (Link l : importantLinks) { + if (!vertexToLayoutNode.containsKey(l.getFrom().getVertex()) || + vertexToLayoutNode.containsKey(l.getTo().getVertex())) { + continue; + } + LayoutNode from = vertexToLayoutNode.get(l.getFrom().getVertex()); + LayoutNode to = vertexToLayoutNode.get(l.getTo().getVertex()); + for (LayoutEdge e : from.succs) { + if (e.to == to) { + linksToFollow.add(e.link); + } + } + } + } + + @Override + public void postCheck() { + + assert vertexToLayoutNode.keySet().size() == nodes.size(); + assert nodes.size() == graph.getVertices().size(); + + for (Vertex v : graph.getVertices()) { + + LayoutNode node = vertexToLayoutNode.get(v); + assert node != null; + + for (LayoutEdge e : node.succs) { + assert e.from == node; + } + + for (LayoutEdge e : node.preds) { + assert e.to == node; + } + + } + } + } + + public void doRouting(LayoutGraph graph) { + // Do nothing for now + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/InterClusterConnection.java 2009-08-01 04:19:30.945715999 +0100 @@ -0,0 +1,76 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import java.awt.Point; +import java.util.ArrayList; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class InterClusterConnection implements Link { + + private Port inputSlot; + private Port outputSlot; + private List intermediatePoints; + private ClusterInputSlotNode inputSlotNode; + private ClusterOutputSlotNode outputSlotNode; + + public InterClusterConnection(ClusterOutputSlotNode outputSlotNode, ClusterInputSlotNode inputSlotNode) { + this.outputSlotNode = outputSlotNode; + this.inputSlotNode = inputSlotNode; + this.inputSlot = inputSlotNode.getInputSlot(); + this.outputSlot = outputSlotNode.getOutputSlot(); + intermediatePoints = new ArrayList(); + } + + public ClusterOutputSlotNode getOutputSlotNode() { + return outputSlotNode; + } + + public Port getTo() { + return inputSlot; + } + + public Port getFrom() { + return outputSlot; + } + + public void setControlPoints(List p) { + this.intermediatePoints = p; + } + + public List getControlPoints() { + return intermediatePoints; + } + + @Override + public String toString() { + return "InterClusterConnection[from=" + getFrom() + ", to=" + getTo() + "]"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/Node.java 2009-08-01 04:19:31.381903773 +0100 @@ -0,0 +1,161 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class Node { + + private N data; + private List> inEdges; + private List> outEdges; + private boolean visited; + private boolean active; + private boolean reachable; + private Graph graph; + + protected boolean isVisited() { + return visited; + } + + protected void setVisited(boolean b) { + visited = b; + } + + protected boolean isReachable() { + return reachable; + } + + protected void setReachable(boolean b) { + reachable = b; + } + + protected boolean isActive() { + return active; + } + + protected void setActive(boolean b) { + active = b; + } + + public int getInDegree() { + return getInDegree(true); + } + + public int getInDegree(boolean countSelfLoops) { + if (countSelfLoops) { + return inEdges.size(); + } else { + int cnt = 0; + for (Edge e : inEdges) { + if (e.getSource() != this) { + cnt++; + } + } + return cnt; + } + } + + public int getOutDegree() { + return outEdges.size(); + } + + protected Node(Graph graph, N data) { + setData(data); + this.graph = graph; + inEdges = new ArrayList>(); + outEdges = new ArrayList>(); + } + + protected void addInEdge(Edge e) { + inEdges.add(e); + } + + public Graph getGraph() { + return graph; + } + + protected void addOutEdge(Edge e) { + outEdges.add(e); + } + + protected void removeInEdge(Edge e) { + //assert inEdges.contains(e); + inEdges.remove(e); + } + + protected void removeOutEdge(Edge e) { + //assert outEdges.contains(e); + outEdges.remove(e); + } + + public List> getInEdges() { + return Collections.unmodifiableList(inEdges); + } + + public List> getOutEdges() { + return Collections.unmodifiableList(outEdges); + } + + public List> getSuccessors() { + ArrayList> succ = new ArrayList>(); + for (Edge e : getOutEdges()) { + Node n = e.getDest(); + if (!succ.contains(n)) { + succ.add(n); + } + } + return succ; + } + + public List> getPredecessors() { + ArrayList> pred = new ArrayList>(); + for (Edge e : getInEdges()) { + Node n = e.getSource(); + if (!pred.contains(n)) { + pred.add(n); + } + } + return pred; + } + + public N getData() { + return data; + } + + public void setData(N d) { + data = d; + } + + @Override + public String toString() { + return "Node: " + data; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/OldHierarchicalLayoutManager.java 2009-08-01 04:19:31.827615063 +0100 @@ -0,0 +1,1222 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +import java.awt.Point; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import com.sun.hotspot.igv.layout.LayoutGraph; +import com.sun.hotspot.igv.layout.LayoutManager; +import com.sun.hotspot.igv.layout.Link; +import com.sun.hotspot.igv.layout.Port; +import com.sun.hotspot.igv.layout.Vertex; + +/** + * + * @author Thomas Wuerthinger + */ +public class OldHierarchicalLayoutManager implements LayoutManager { + + public static final int DUMMY_WIDTH = 0; + public static final int DUMMY_HEIGHT = 0; + public static final int LAYER_OFFSET = 50; + public static final int OFFSET = 8; + public static final boolean VERTICAL_LAYOUT = true; + public static final boolean ASSERT = false; + public static final boolean TRACE = false; + public static final Timing initTiming = new Timing("init"); + public static final Timing removeCyclesTiming = new Timing("removeCycles"); + public static final Timing reversedEdgesTiming = new Timing("reversedEdges"); + public static final Timing assignLayersTiming = new Timing("assignLayers"); + public static final Timing dummyNodesTiming = new Timing("dummyNodes"); + public static final Timing crossingReductionTiming = new Timing("crossingReduction"); + public static final Timing assignCoordinatesTiming = new Timing("assignCoordinates"); + public static final Timing assignRealTiming = new Timing("assignReal"); + public static final Timing rootVertexTiming = new Timing("rootVertex"); + public static final Timing createEdgesTiming = new Timing("createEdges"); + public static final Timing optimizeMedianTiming = new Timing("optimizeMedian"); + private Combine combine; + + public enum Combine { + + NONE, + SAME_INPUTS, + SAME_OUTPUTS + } + + private class NodeData { + + private Map reversePositions; + private Vertex node; + private Link edge; + private int layer; + private int x; + private int y; + private int width; + + public NodeData(Vertex node) { + reversePositions = new HashMap(); + layer = -1; + this.node = node; + assert node != null; + + if (VERTICAL_LAYOUT) { + width = node.getSize().width; + } else { + width = node.getSize().height; + } + } + + public NodeData(Link edge) { + layer = -1; + this.edge = edge; + assert edge != null; + + if (VERTICAL_LAYOUT) { + width = DUMMY_WIDTH; + } else { + width = DUMMY_HEIGHT; + } + } + + public Vertex getNode() { + return node; + } + + public Link getEdge() { + return edge; + } + + public int getCoordinate() { + return x; + } + + public void setCoordinate(int x) { + this.x = x; + } + + public int getX() { + if (VERTICAL_LAYOUT) { + return x; + } else { + return y; + } + } + + public int getY() { + if (VERTICAL_LAYOUT) { + return y; + } else { + return x; + } + } + + public void setLayerCoordinate(int y) { + this.y = y; + } + + public void setLayer(int x) { + layer = x; + } + + public int getLayer() { + return layer; + } + + public boolean isDummy() { + return edge != null; + } + + public int getWidth() { + return width; + } + + public void addReversedStartEdge(Edge e) { + assert e.getData().isReversed(); + Port port = e.getData().getEdge().getTo(); + int pos = addReversedPort(port); + Point start = e.getData().getRelativeStart(); + e.getData().addStartPoint(start); + int yCoord = node.getSize().height + width - node.getSize().width; + e.getData().addStartPoint(new Point(start.x, yCoord)); + e.getData().addStartPoint(new Point(pos, yCoord)); + e.getData().setRelativeStart(new Point(pos, 0)); + } + + private int addReversedPort(Port p) { + if (reversePositions.containsKey(p)) { + return reversePositions.get(p); + } else { + width += OFFSET; + reversePositions.put(p, width); + return width; + } + } + + public void addReversedEndEdge(Edge e) { + assert e.getData().isReversed(); + int pos = addReversedPort(e.getData().getEdge().getFrom()); + Point end = e.getData().getRelativeEnd(); + e.getData().setRelativeEnd(new Point(pos, node.getSize().height)); + int yCoord = 0 - width + node.getSize().width; + e.getData().addEndPoint(new Point(pos, yCoord)); + e.getData().addEndPoint(new Point(end.x, yCoord)); + e.getData().addEndPoint(end); + } + + public int getHeight() { + if (isDummy()) { + if (VERTICAL_LAYOUT) { + return DUMMY_HEIGHT; + } else { + return DUMMY_WIDTH; + } + + } else { + if (VERTICAL_LAYOUT) { + return node.getSize().height; + } else { + return node.getSize().width; + } + } + } + + @Override + public String toString() { + if (isDummy()) { + return edge.toString() + "(layer=" + layer + ")"; + } else { + return node.toString() + "(layer=" + layer + ")"; + } + } + } + + private class EdgeData { + + private Point relativeEnd; + private Point relativeStart; + private List startPoints; + private List endPoints; + private boolean important; + private boolean reversed; + private Link edge; + + public EdgeData(Link edge) { + this(edge, false); + } + + public EdgeData(Link edge, boolean rev) { + this.edge = edge; + reversed = rev; + relativeStart = edge.getFrom().getRelativePosition(); + relativeEnd = edge.getTo().getRelativePosition(); + assert relativeStart.x >= 0 && relativeStart.x <= edge.getFrom().getVertex().getSize().width; + assert relativeStart.y >= 0 && relativeStart.y <= edge.getFrom().getVertex().getSize().height; + assert relativeEnd.x >= 0 && relativeEnd.x <= edge.getTo().getVertex().getSize().width; + assert relativeEnd.y >= 0 && relativeEnd.y <= edge.getTo().getVertex().getSize().height; + startPoints = new ArrayList(); + endPoints = new ArrayList(); + this.important = true; + } + + public boolean isImportant() { + return important; + } + + public void setImportant(boolean b) { + this.important = b; + } + + public List getStartPoints() { + return startPoints; + } + + public List getEndPoints() { + return endPoints; + } + + public List getAbsoluteEndPoints() { + if (endPoints.size() == 0) { + return endPoints; + } + + List result = new ArrayList(); + Point point = edge.getTo().getVertex().getPosition(); + for (Point p : endPoints) { + Point p2 = new Point(p.x + point.x, p.y + point.y); + result.add(p2); + } + + return result; + } + + public List getAbsoluteStartPoints() { + if (startPoints.size() == 0) { + return startPoints; + } + + List result = new ArrayList(); + Point point = edge.getFrom().getVertex().getPosition(); + for (Point p : startPoints) { + Point p2 = new Point(p.x + point.x, p.y + point.y); + result.add(p2); + } + + return result; + } + + public void addEndPoint(Point p) { + endPoints.add(p); + } + + public void addStartPoint(Point p) { + startPoints.add(p); + } + + public Link getEdge() { + return edge; + } + + public void setRelativeEnd(Point p) { + relativeEnd = p; + } + + public void setRelativeStart(Point p) { + relativeStart = p; + } + + public Point getRelativeEnd() { + return relativeEnd; + } + + public Point getRelativeStart() { + return relativeStart; + } + + public boolean isReversed() { + return reversed; + } + + public void setReversed(boolean b) { + reversed = b; + } + + @Override + public String toString() { + return "EdgeData[reversed=" + reversed + "]"; + } + } + private Graph graph; + private Map> nodeMap; + private int layerOffset; + + /** Creates a new instance of HierarchicalPositionManager */ + public OldHierarchicalLayoutManager(Combine combine) { + this(combine, LAYER_OFFSET); + } + + public OldHierarchicalLayoutManager(Combine combine, int layerOffset) { + this.combine = combine; + this.layerOffset = layerOffset; + } + + public void doRouting(LayoutGraph graph) { + } + + //public void setPositions(PositionedNode rootNode, List nodes, List edges) { + public void doLayout(LayoutGraph layoutGraph) { + doLayout(layoutGraph, new HashSet(), new HashSet()); + } + + public void doLayout(LayoutGraph layoutGraph, Set firstLayerHint, Set lastLayerHint) { + doLayout(layoutGraph, firstLayerHint, lastLayerHint, new HashSet()); + } + + public void doLayout(LayoutGraph layoutGraph, Set firstLayerHint, Set lastLayerHint, Set importantLinksHint) { + + if (TRACE) { + System.out.println("HierarchicalPositionManager.doLayout called"); + System.out.println("Vertex count = " + layoutGraph.getVertices().size() + " Link count = " + layoutGraph.getLinks().size()); + } + + // Nothing to do => quit immediately + if (layoutGraph.getVertices().size() == 0) { + return; + } + + initTiming.start(); + + // Mapping vertex to Node in graph + nodeMap = new HashMap>(); + + graph = new Graph(); + + Set> rootNodes = new HashSet>(); + Set startRootVertices = new HashSet(); + + for (Vertex v : layoutGraph.getVertices()) { + if (v.isRoot()) { + startRootVertices.add(v); + } + } + + rootVertexTiming.start(); + Set rootVertices = layoutGraph.findRootVertices(startRootVertices); + rootVertexTiming.stop(); + + + for (Vertex node : layoutGraph.getVertices()) { + + NodeData data = new NodeData(node); + Node n = graph.createNode(data, node); + nodeMap.put(node, n); + + if (rootVertices.contains(node)) { + rootNodes.add(n); + } + } + + Set links = layoutGraph.getLinks(); + Link[] linkArr = new Link[links.size()]; + links.toArray(linkArr); + + List linkList = new ArrayList(); + for (Link l : linkArr) { + linkList.add(l); + } + + createEdgesTiming.start(); + Collections.sort(linkList, new Comparator() { + + public int compare(Link o1, Link o2) { + int result = o1.getFrom().getVertex().compareTo(o2.getFrom().getVertex()); + if (result == 0) { + return o1.getTo().getVertex().compareTo(o2.getTo().getVertex()); + } else { + return result; + } + } + }); + + for (Link edge : linkList) { + EdgeData data = new EdgeData(edge); + graph.createEdge(graph.getNode(edge.getFrom().getVertex()), graph.getNode(edge.getTo().getVertex()), data, data); + if (importantLinksHint.size() > 0 && !importantLinksHint.contains(edge)) { + data.setImportant(false); + } + } + createEdgesTiming.stop(); + + initTiming.stop(); + + removeCyclesTiming.start(); + + // STEP 1: Remove cycles! + removeCycles(rootNodes); + if (ASSERT) { + assert checkRemoveCycles(); + } + + removeCyclesTiming.stop(); + + reversedEdgesTiming.start(); + + for (Node n : graph.getNodes()) { + List> edges = new ArrayList>(n.getOutEdges()); + Collections.sort(edges, new Comparator>() { + + public int compare(Edge o1, Edge o2) { + return o2.getData().getRelativeEnd().x - o1.getData().getRelativeEnd().x; + } + }); + + + for (Edge e : edges) { + + if (e.getData().isReversed()) { + e.getSource().getData().addReversedEndEdge(e); + } + } + } + + for (Node n : graph.getNodes()) { + List> edges = new ArrayList>(n.getInEdges()); + Collections.sort(edges, new Comparator>() { + + public int compare(Edge o1, Edge o2) { + return o2.getData().getRelativeStart().x - o1.getData().getRelativeStart().x; + } + }); + + + for (Edge e : edges) { + if (e.getData().isReversed()) { + e.getDest().getData().addReversedStartEdge(e); + } + } + } + + reversedEdgesTiming.stop(); + + assignLayersTiming.start(); + // STEP 2: Assign layers! + int maxLayer = assignLayers(rootNodes, firstLayerHint, lastLayerHint); + if (ASSERT) { + assert checkAssignLayers(); + } + + // Put into layer array + //int maxLayer = 0; + //for(Node n : graph.getNodes()) { + // maxLayer = Math.max(maxLayer, n.getData().getLayer()); + //} + + + ArrayList> layers[] = new ArrayList[maxLayer + 1]; + int layerSizes[] = new int[maxLayer + 1]; + for (int i = 0; i < maxLayer + 1; i++) { + layers[i] = new ArrayList>(); + } + + for (Node n : graph.getNodes()) { + int curLayer = n.getData().getLayer(); + layers[curLayer].add(n); + } + + assignLayersTiming.stop(); + + // STEP 3: Insert dummy nodes! + dummyNodesTiming.start(); + insertDummyNodes(layers); + if (ASSERT) { + assert checkDummyNodes(); + } + dummyNodesTiming.stop(); + + crossingReductionTiming.start(); + // STEP 4: Assign Y coordinates + assignLayerCoordinates(layers, layerSizes); + + // STEP 5: Crossing reduction + crossingReduction(layers); + crossingReductionTiming.stop(); + + // STEP 6: Assign Y coordinates + assignCoordinatesTiming.start(); + assignCoordinates(layers); + assignCoordinatesTiming.stop(); + + assignRealTiming.start(); + + // Assign coordinates of nodes to real objects + for (Node n : graph.getNodes()) { + if (!n.getData().isDummy()) { + + Vertex node = n.getData().getNode(); + node.setPosition(new Point(n.getData().getX(), n.getData().getY())); + } + } + + for (Node n : graph.getNodes()) { + if (!n.getData().isDummy()) { + + Vertex node = n.getData().getNode(); + + List> outEdges = n.getOutEdges(); + for (Edge e : outEdges) { + Node succ = e.getDest(); + if (succ.getData().isDummy()) { + //PositionedEdge edge = succ.getData().getEdge(); + List points = new ArrayList(); + assignToRealObjects(layerSizes, succ, points); + } else { + List points = new ArrayList(); + + EdgeData otherEdgeData = e.getData(); + points.addAll(otherEdgeData.getAbsoluteStartPoints()); + Link otherEdge = otherEdgeData.getEdge(); + Point relFrom = new Point(otherEdgeData.getRelativeStart()); + Point from = otherEdge.getFrom().getVertex().getPosition(); + relFrom.move(relFrom.x + from.x, relFrom.y + from.y); + points.add(relFrom); + + Point relTo = new Point(otherEdgeData.getRelativeEnd()); + Point to = otherEdge.getTo().getVertex().getPosition(); + relTo.move(relTo.x + to.x, relTo.y + to.y); + assert from != null; + assert to != null; + points.add(relTo); + points.addAll(otherEdgeData.getAbsoluteEndPoints()); + e.getData().getEdge().setControlPoints(points); + } + } + } + } + + assignRealTiming.stop(); + + initTiming.print(); + removeCyclesTiming.print(); + reversedEdgesTiming.print(); + assignLayersTiming.print(); + dummyNodesTiming.print(); + crossingReductionTiming.print(); + assignCoordinatesTiming.print(); + assignRealTiming.print(); + rootVertexTiming.print(); + createEdgesTiming.print(); + optimizeMedianTiming.print(); + } + + public boolean onOneLine(Point p1, Point p2, Point p3) { + int xoff1 = p1.x - p2.x; + int yoff1 = p1.y - p2.y; + int xoff2 = p3.x - p2.x; + int yoff2 = p3.y - p2.x; + + return (xoff1 * yoff2 - yoff1 * xoff2 == 0); + } + + private void assignToRealObjects(int layerSizes[], Node cur, List points) { + assert cur.getData().isDummy(); + + ArrayList otherPoints = new ArrayList(points); + + int size = layerSizes[cur.getData().getLayer()]; + otherPoints.add(new Point(cur.getData().getX(), cur.getData().getY() - size / 2)); + if (otherPoints.size() >= 3 && onOneLine(otherPoints.get(otherPoints.size() - 1), otherPoints.get(otherPoints.size() - 2), otherPoints.get(otherPoints.size() - 3))) { + otherPoints.remove(otherPoints.size() - 2); + } + otherPoints.add(new Point(cur.getData().getX(), cur.getData().getY() + size / 2)); + if (otherPoints.size() >= 3 && onOneLine(otherPoints.get(otherPoints.size() - 1), otherPoints.get(otherPoints.size() - 2), otherPoints.get(otherPoints.size() - 3))) { + otherPoints.remove(otherPoints.size() - 2); + } + + for (int i = 0; i < cur.getOutEdges().size(); i++) { + Node otherSucc = cur.getOutEdges().get(i).getDest(); + + if (otherSucc.getData().isDummy()) { + assignToRealObjects(layerSizes, otherSucc, otherPoints); + } else { + EdgeData otherEdgeData = cur.getOutEdges().get(i).getData(); + Link otherEdge = otherEdgeData.getEdge(); + + List middlePoints = new ArrayList(otherPoints); + if (cur.getOutEdges().get(i).getData().isReversed()) { + Collections.reverse(middlePoints); + } + + ArrayList copy = new ArrayList(); + Point relFrom = new Point(otherEdgeData.getRelativeStart()); + Point from = otherEdge.getFrom().getVertex().getPosition(); + //int moveUp = (size - otherEdge.getFrom().getVertex().getSize().height) / 2; + relFrom.move(relFrom.x + from.x, relFrom.y + from.y); + copy.addAll(otherEdgeData.getAbsoluteStartPoints()); + copy.add(relFrom); + copy.addAll(middlePoints); + + Point relTo = new Point(otherEdgeData.getRelativeEnd()); + Point to = otherEdge.getTo().getVertex().getPosition(); + relTo.move(relTo.x + to.x, relTo.y + to.y); + copy.add(relTo); + + copy.addAll(otherEdgeData.getAbsoluteEndPoints()); + + + otherEdge.setControlPoints(copy); + } + } + } + + private boolean checkDummyNodes() { + for (Edge e : graph.getEdges()) { + if (e.getSource().getData().getLayer() != e.getDest().getData().getLayer() - 1) { + return false; + } + } + + return true; + } + + private void insertDummyNodes(ArrayList> layers[]) { + + int sum = 0; + List> nodes = new ArrayList>(graph.getNodes()); + int edgeCount = 0; + int innerMostLoop = 0; + + for (Node n : nodes) { + List> edges = new ArrayList>(n.getOutEdges()); + for (Edge e : edges) { + + edgeCount++; + Link edge = e.getData().getEdge(); + Node destNode = e.getDest(); + Node lastNode = n; + Edge lastEdge = e; + + boolean searchForNode = (combine != Combine.NONE); + for (int i = n.getData().getLayer() + 1; i < destNode.getData().getLayer(); i++) { + + Node foundNode = null; + if (searchForNode) { + for (Node sameLayerNode : layers[i]) { + innerMostLoop++; + + if (combine == Combine.SAME_OUTPUTS) { + if (sameLayerNode.getData().isDummy() && sameLayerNode.getData().getEdge().getFrom() == edge.getFrom()) { + foundNode = sameLayerNode; + break; + } + } else if (combine == Combine.SAME_INPUTS) { + if (sameLayerNode.getData().isDummy() && sameLayerNode.getData().getEdge().getTo() == edge.getTo()) { + foundNode = sameLayerNode; + break; + } + } + } + } + + if (foundNode == null) { + searchForNode = false; + NodeData intermediateData = new NodeData(edge); + Node curNode = graph.createNode(intermediateData, null); + curNode.getData().setLayer(i); + layers[i].add(0, curNode); + sum++; + lastEdge.remove(); + graph.createEdge(lastNode, curNode, e.getData(), null); + assert lastNode.getData().getLayer() == curNode.getData().getLayer() - 1; + lastEdge = graph.createEdge(curNode, destNode, e.getData(), null); + lastNode = curNode; + } else { + lastEdge.remove(); + lastEdge = graph.createEdge(foundNode, destNode, e.getData(), null); + lastNode = foundNode; + } + + } + } + } + + if (TRACE) { + System.out.println("Number of edges: " + edgeCount); + } + if (TRACE) { + System.out.println("Dummy nodes inserted: " + sum); + } + } + + private void assignLayerCoordinates(ArrayList> layers[], int layerSizes[]) { + int cur = 0; + for (int i = 0; i < layers.length; i++) { + int maxHeight = 0; + for (Node n : layers[i]) { + maxHeight = Math.max(maxHeight, n.getData().getHeight()); + } + + layerSizes[i] = maxHeight; + for (Node n : layers[i]) { + int curCoordinate = cur + (maxHeight - n.getData().getHeight()) / 2; + n.getData().setLayerCoordinate(curCoordinate); + } + cur += maxHeight + layerOffset; + + } + } + + private void assignCoordinates(ArrayList> layers[]) { + + // TODO: change this + for (int i = 0; i < layers.length; i++) { + ArrayList> curArray = layers[i]; + int curY = 0; + for (Node n : curArray) { + + n.getData().setCoordinate(curY); + if (!n.getData().isDummy()) { + curY += n.getData().getWidth(); + } + curY += OFFSET; + + } + } + + int curSol = evaluateSolution(); + if (TRACE) { + System.out.println("First coordinate solution found: " + curSol); + } + + // Sort to correct order + for (int i = 0; i < layers.length; i++) { + Collections.sort(layers[i], new Comparator>() { + + public int compare(Node o1, Node o2) { + if (o2.getData().isDummy()) { + return 1; + } else if (o1.getData().isDummy()) { + return -1; + } + return o2.getInEdges().size() + o2.getOutEdges().size() - o1.getInEdges().size() - o1.getOutEdges().size(); + } + }); + } + + + optimizeMedianTiming.start(); + for (int i = 0; i < 2; i++) { + optimizeMedian(layers); + curSol = evaluateSolution(); + if (TRACE) { + System.out.println("Current coordinate solution found: " + curSol); + } + } + optimizeMedianTiming.stop(); + normalizeCoordinate(); + + } + + private void normalizeCoordinate() { + + int min = Integer.MAX_VALUE; + for (Node n : graph.getNodes()) { + min = Math.min(min, n.getData().getCoordinate()); + } + + for (Node n : graph.getNodes()) { + n.getData().setCoordinate(n.getData().getCoordinate() - min); + } + + } + + private void optimizeMedian(ArrayList> layers[]) { + + // Downsweep + for (int i = 1; i < layers.length; i++) { + + ArrayList> processingList = layers[i]; + ArrayList> alreadyAssigned = new ArrayList>(); + for (Node n : processingList) { + + + ArrayList> preds = new ArrayList>(n.getPredecessors()); + int pos = n.getData().getCoordinate(); + if (preds.size() > 0) { + + Collections.sort(preds, new Comparator>() { + + public int compare(Node o1, Node o2) { + return o1.getData().getCoordinate() - o2.getData().getCoordinate(); + } + }); + + if (preds.size() % 2 == 0) { + assert preds.size() >= 2; + pos = (preds.get(preds.size() / 2).getData().getCoordinate() - calcRelativeCoordinate(preds.get(preds.size() / 2), n) + preds.get(preds.size() / 2 - 1).getData().getCoordinate() - calcRelativeCoordinate(preds.get(preds.size() / 2 - 1), n)) / 2; + } else { + assert preds.size() >= 1; + pos = preds.get(preds.size() / 2).getData().getCoordinate() - calcRelativeCoordinate(preds.get(preds.size() / 2), n); + } + } + + tryAdding(alreadyAssigned, n, pos); + } + } + // Upsweep + for (int i = layers.length - 2; i >= 0; i--) { + ArrayList> processingList = layers[i]; + ArrayList> alreadyAssigned = new ArrayList>(); + for (Node n : processingList) { + + ArrayList> succs = new ArrayList>(n.getSuccessors()); + int pos = n.getData().getCoordinate(); + if (succs.size() > 0) { + + Collections.sort(succs, new Comparator>() { + + public int compare(Node o1, Node o2) { + return o1.getData().getCoordinate() - o2.getData().getCoordinate(); + } + }); + + if (succs.size() % 2 == 0) { + assert succs.size() >= 2; + pos = (succs.get(succs.size() / 2).getData().getCoordinate() - calcRelativeCoordinate(n, succs.get(succs.size() / 2)) + succs.get(succs.size() / 2 - 1).getData().getCoordinate() - calcRelativeCoordinate(n, succs.get(succs.size() / 2 - 1))) / 2; + } else { + assert succs.size() >= 1; + pos = succs.get(succs.size() / 2).getData().getCoordinate() - calcRelativeCoordinate(n, succs.get(succs.size() / 2)); + } + } + + tryAdding(alreadyAssigned, n, pos); + } + } + } + + private int median(ArrayList arr) { + assert arr.size() > 0; + Collections.sort(arr); + if (arr.size() % 2 == 0) { + return (arr.get(arr.size() / 2) + arr.get(arr.size() / 2 - 1)) / 2; + } else { + return arr.get(arr.size() / 2); + } + } + + private int calcRelativeCoordinate(Node n, Node succ) { + + if (n.getData().isDummy() && succ.getData().isDummy()) { + return 0; + } + + int pos = 0; + int pos2 = 0; + ArrayList coords2 = new ArrayList(); + ArrayList coords = new ArrayList(); + /*if(!n.getData().isDummy())*/ { + for (Edge e : n.getOutEdges()) { + + //System.out.println("reversed: " + e.getData().isReversed()); + if (e.getDest() == succ) { + + if (e.getData().isReversed()) { + if (!n.getData().isDummy()) { + coords.add(e.getData().getRelativeEnd().x); + } + + if (!succ.getData().isDummy()) { + coords2.add(e.getData().getRelativeStart().x); + } + } else { + if (!n.getData().isDummy()) { + coords.add(e.getData().getRelativeStart().x); + } + + if (!succ.getData().isDummy()) { + coords2.add(e.getData().getRelativeEnd().x); + } + } + } + } + + // assert coords.size() > 0; + if (!n.getData().isDummy()) { + pos = median(coords); + } + + if (!succ.getData().isDummy()) { + pos2 = median(coords2); + } + } + //System.out.println("coords=" + coords); + //System.out.println("coords2=" + coords2); + + return pos - pos2; + } + + private boolean intersect(int v1, int w1, int v2, int w2) { + if (v1 >= v2 && v1 < v2 + w2) { + return true; + } + if (v1 + w1 > v2 && v1 + w1 < v2 + w2) { + return true; + } + if (v1 < v2 && v1 + w1 > v2) { + return true; + } + return false; + } + + private boolean intersect(Node n1, Node n2) { + return intersect(n1.getData().getCoordinate(), n1.getData().getWidth() + OFFSET, n2.getData().getCoordinate(), n2.getData().getWidth() + OFFSET); + } + + private void tryAdding(List> alreadyAssigned, Node node, int pos) { + + boolean doesIntersect = false; + node.getData().setCoordinate(pos); + for (Node n : alreadyAssigned) { + if (n.getData().getCoordinate() + n.getData().getWidth() < pos) { + break; + } else if (intersect(node, n)) { + doesIntersect = true; + break; + } + + } + + if (!doesIntersect) { + + // Everything fine, just place the node + int z = 0; + for (Node n : alreadyAssigned) { + if (pos > n.getData().getCoordinate()) { + break; + } + z++; + } + + if (z == -1) { + z = alreadyAssigned.size(); + } + + + if (ASSERT) { + assert !findOverlap(alreadyAssigned, node); + } + alreadyAssigned.add(z, node); + + } else { + + assert alreadyAssigned.size() > 0; + + // Search for alternative location + int minOffset = Integer.MAX_VALUE; + int minIndex = -1; + int minPos = 0; + int w = node.getData().getWidth() + OFFSET; + + // Try top-most + minIndex = 0; + minPos = alreadyAssigned.get(0).getData().getCoordinate() + alreadyAssigned.get(0).getData().getWidth() + OFFSET; + minOffset = Math.abs(minPos - pos); + + // Try bottom-most + Node lastNode = alreadyAssigned.get(alreadyAssigned.size() - 1); + int lastPos = lastNode.getData().getCoordinate() - w; + int lastOffset = Math.abs(lastPos - pos); + if (lastOffset < minOffset) { + minPos = lastPos; + minOffset = lastOffset; + minIndex = alreadyAssigned.size(); + } + + // Try between + for (int i = 0; i < alreadyAssigned.size() - 1; i++) { + Node curNode = alreadyAssigned.get(i); + Node nextNode = alreadyAssigned.get(i + 1); + + int start = nextNode.getData().getCoordinate() + nextNode.getData().getWidth() + OFFSET; + int end = curNode.getData().getCoordinate() - OFFSET; + + int bestPoss = end - node.getData().getWidth(); + if (bestPoss < pos && pos - bestPoss > minOffset) { + // No better solution possible => break + break; + } + + if (end - start >= node.getData().getWidth()) { + // Node could fit here + int cand1 = start; + int cand2 = end - node.getData().getWidth(); + int off1 = Math.abs(cand1 - pos); + int off2 = Math.abs(cand2 - pos); + if (off1 < minOffset) { + minPos = cand1; + minOffset = off1; + minIndex = i + 1; + } + + if (off2 < minOffset) { + minPos = cand2; + minOffset = off2; + minIndex = i + 1; + } + } + } + + assert minIndex != -1; + node.getData().setCoordinate(minPos); + if (ASSERT) { + assert !findOverlap(alreadyAssigned, node); + } + alreadyAssigned.add(minIndex, node); + } + + } + + private boolean findOverlap(List> nodes, Node node) { + + for (Node n1 : nodes) { + if (intersect(n1, node)) { + return true; + } + } + + return false; + } + + private int evaluateSolution() { + + int sum = 0; + for (Edge e : graph.getEdges()) { + Node source = e.getSource(); + Node dest = e.getDest(); + int offset = 0; + offset = Math.abs(source.getData().getCoordinate() - dest.getData().getCoordinate()); + sum += offset; + } + + return sum; + } + + private void crossingReduction(ArrayList> layers[]) { + + for (int i = 0; i < layers.length - 1; i++) { + + ArrayList> curNodes = layers[i]; + ArrayList> nextNodes = layers[i + 1]; + for (Node n : curNodes) { + for (Node succ : n.getSuccessors()) { + if (ASSERT) { + assert nextNodes.contains(succ); + } + nextNodes.remove(succ); + nextNodes.add(succ); + } + } + + } + + } + + private void removeCycles(Set> rootNodes) { + final List> reversedEdges = new ArrayList>(); + + + int removedCount = 0; + int reversedCount = 0; + + Graph.DFSTraversalVisitor visitor = graph.new DFSTraversalVisitor() { + + @Override + public boolean visitEdge(Edge e, boolean backEdge) { + if (backEdge) { + if (ASSERT) { + assert !reversedEdges.contains(e); + } + reversedEdges.add(e); + e.getData().setReversed(!e.getData().isReversed()); + } + + return e.getData().isImportant(); + } + }; + Set> nodes = new HashSet>(); + nodes.addAll(rootNodes); + + assert nodes.size() > 0; + + this.graph.traverseDFS(nodes, visitor); + + for (Edge e : reversedEdges) { + if (e.isSelfLoop()) { + e.remove(); + removedCount++; + } else { + e.reverse(); + reversedCount++; + } + } + } + + private boolean checkRemoveCycles() { + return !graph.hasCycles(); + } + // Only used by assignLayers + private int maxLayerTemp; + + private int assignLayers(Set> rootNodes, Set firstLayerHints, + Set lastLayerHints) { + this.maxLayerTemp = -1; + for (Node n : graph.getNodes()) { + n.getData().setLayer(-1); + } + + Graph.BFSTraversalVisitor traverser = graph.new BFSTraversalVisitor() { + + @Override + public void visitNode(Node n, int depth) { + if (depth > n.getData().getLayer()) { + n.getData().setLayer(depth); + maxLayerTemp = Math.max(maxLayerTemp, depth); + } + } + }; + + for (Node n : rootNodes) { + if (n.getData().getLayer() == -1) { + this.graph.traverseBFS(n, traverser, true); + } + } + + for (Vertex v : firstLayerHints) { + assert nodeMap.containsKey(v); + nodeMap.get(v).getData().setLayer(0); + } + + for (Vertex v : lastLayerHints) { + assert nodeMap.containsKey(v); + nodeMap.get(v).getData().setLayer(maxLayerTemp); + } + + return maxLayerTemp; + } + + private boolean checkAssignLayers() { + + for (Edge e : graph.getEdges()) { + Node source = e.getSource(); + Node dest = e.getDest(); + + + if (source.getData().getLayer() >= dest.getData().getLayer()) { + return false; + } + } + int maxLayer = 0; + for (Node n : graph.getNodes()) { + assert n.getData().getLayer() >= 0; + if (n.getData().getLayer() > maxLayer) { + maxLayer = n.getData().getLayer(); + } + } + + int countPerLayer[] = new int[maxLayer + 1]; + for (Node n : graph.getNodes()) { + countPerLayer[n.getData().getLayer()]++; + } + + if (TRACE) { + System.out.println("Number of layers: " + maxLayer); + } + return true; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/HierarchicalLayout/src/com/sun/hotspot/igv/hierarchicallayout/Timing.java 2009-08-01 04:19:32.304994986 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.hierarchicallayout; + +/** + * + * @author Thomas Wuerthinger + */ +public class Timing { + + private long lastValue; + private long sum; + private String name; + + public Timing(String name) { + this.name = name; + } + + @Override + public String toString() { + long val = sum; + if (lastValue != 0) { + // Timer running + long newValue = System.nanoTime(); + val += (newValue - lastValue); + } + return "Timing for " + name + " is: " + val / 1000000 + " ms"; + } + + public void print() { + System.out.println(toString()); + } + + public void start() { + lastValue = System.nanoTime(); + } + + public void stop() { + if (lastValue == 0) { + throw new IllegalStateException("You must call start before stop"); + } + long newValue = System.nanoTime(); + sum += newValue - lastValue; + lastValue = 0; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/build.xml 2009-08-01 04:19:32.755337130 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.layout. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/manifest.mf 2009-08-01 04:19:33.180595668 +0100 @@ -0,0 +1,5 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.layout +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/layout/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/nbproject/build-impl.xml 2009-08-01 04:19:33.614579850 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/nbproject/genfiles.properties 2009-08-01 04:19:34.028727367 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=cb0889d9 +build.xml.script.CRC32=d65fccb9 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=cb0889d9 +nbproject/build-impl.xml.script.CRC32=7f82736d +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/nbproject/platform.properties 2009-08-01 04:19:34.415322723 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/nbproject/project.properties 2009-08-01 04:19:34.813245940 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/nbproject/project.xml 2009-08-01 04:19:35.224382378 +0100 @@ -0,0 +1,14 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.layout + + + + com.sun.hotspot.igv.layout + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/nbproject/suite.properties 2009-08-01 04:19:35.632844930 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/src/com/sun/hotspot/igv/layout/Bundle.properties 2009-08-01 04:19:36.175830593 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=Layout --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/src/com/sun/hotspot/igv/layout/Cluster.java 2009-08-01 04:19:36.568152465 +0100 @@ -0,0 +1,42 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.layout; + +import java.awt.Rectangle; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public interface Cluster extends Comparable { + + public Cluster getOuter(); + + public void setBounds(Rectangle r); + + public Set getSuccessors(); + + public Set getPredecessors(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/src/com/sun/hotspot/igv/layout/LayoutGraph.java 2009-08-01 04:19:36.995076687 +0100 @@ -0,0 +1,202 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.layout; + +import java.util.HashSet; +import java.util.HashMap; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +/** + * + * @author Thomas Wuerthinger + */ +public class LayoutGraph { + + private Set links; + private SortedSet vertices; + private HashMap> inputPorts; + private HashMap> outputPorts; + private HashMap> portLinks; + + public LayoutGraph(Set links) { + this(links, new HashSet()); + } + + public LayoutGraph(Set links, Set additionalVertices) { + this.links = links; + assert verify(); + + vertices = new TreeSet(); + portLinks = new HashMap>(); + inputPorts = new HashMap>(); + outputPorts = new HashMap>(); + + for (Link l : links) { + Port p = l.getFrom(); + Port p2 = l.getTo(); + Vertex v1 = p.getVertex(); + Vertex v2 = p2.getVertex(); + + if (!vertices.contains(v1)) { + + outputPorts.put(v1, new HashSet(1)); + inputPorts.put(v1, new HashSet(3)); + vertices.add(v1); + assert vertices.contains(v1); + } + + if (!vertices.contains(v2)) { + vertices.add(v2); + assert vertices.contains(v2); + outputPorts.put(v2, new HashSet(1)); + inputPorts.put(v2, new HashSet(3)); + } + + if (!portLinks.containsKey(p)) { + HashSet hashSet = new HashSet(3); + portLinks.put(p, hashSet); + } + + if (!portLinks.containsKey(p2)) { + portLinks.put(p2, new HashSet(3)); + } + + outputPorts.get(v1).add(p); + inputPorts.get(v2).add(p2); + + portLinks.get(p).add(l); + portLinks.get(p2).add(l); + } + + for (Vertex v : additionalVertices) { + if (!vertices.contains(v)) { + outputPorts.put(v, new HashSet(1)); + inputPorts.put(v, new HashSet(3)); + vertices.add(v); + vertices.contains(v); + } + } + } + + public Set getInputPorts(Vertex v) { + return this.inputPorts.get(v); + } + + public Set getOutputPorts(Vertex v) { + return this.outputPorts.get(v); + } + + public Set getPortLinks(Port p) { + return portLinks.get(p); + } + + public Set getLinks() { + return links; + } + + public boolean verify() { + return true; + } + + public SortedSet getVertices() { + return vertices; + } + + private void markNotRoot(Set notRootSet, Vertex v, Vertex startingVertex) { + + if (notRootSet.contains(v)) { + return; + } + if (v != startingVertex) { + notRootSet.add(v); + } + Set outPorts = getOutputPorts(v); + for (Port p : outPorts) { + Set portLinks = getPortLinks(p); + for (Link l : portLinks) { + Port other = l.getTo(); + Vertex otherVertex = other.getVertex(); + if (otherVertex != startingVertex) { + markNotRoot(notRootSet, otherVertex, startingVertex); + } + } + } + } + + // Returns a set of vertices with the following properties: + // - All Vertices in the set startingRoots are elements of the set. + // - When starting a DFS at every vertex in the set, every vertex of the + // whole graph is visited. + public Set findRootVertices(Set startingRoots) { + + Set notRootSet = new HashSet(); + for (Vertex v : startingRoots) { + if (!notRootSet.contains(v)) { + markNotRoot(notRootSet, v, v); + } + } + + Set tmpVertices = getVertices(); + for (Vertex v : tmpVertices) { + if (!notRootSet.contains(v)) { + if (this.getInputPorts(v).size() == 0) { + markNotRoot(notRootSet, v, v); + } + } + } + + for (Vertex v : tmpVertices) { + if (!notRootSet.contains(v)) { + markNotRoot(notRootSet, v, v); + } + } + + Set result = new HashSet(); + for (Vertex v : tmpVertices) { + if (!notRootSet.contains(v)) { + result.add(v); + } + } + assert tmpVertices.size() == 0 || result.size() > 0; + return result; + } + + public Set findRootVertices() { + return findRootVertices(new HashSet()); + } + + public SortedSet getClusters() { + + SortedSet clusters = new TreeSet(); + for (Vertex v : getVertices()) { + if (v.getCluster() != null) { + clusters.add(v.getCluster()); + } + } + + return clusters; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/src/com/sun/hotspot/igv/layout/LayoutManager.java 2009-08-01 04:19:37.428944628 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.layout; + +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public interface LayoutManager { + + public void doLayout(LayoutGraph graph); + + public void doLayout(LayoutGraph graph, Set firstLayerHint, Set lastLayerHint, Set importantLinks); + + public void doRouting(LayoutGraph graph); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/src/com/sun/hotspot/igv/layout/Link.java 2009-08-01 04:19:37.845975336 +0100 @@ -0,0 +1,42 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.layout; + +import java.awt.Point; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public interface Link { + + public Port getFrom(); + + public Port getTo(); + + public List getControlPoints(); + + public void setControlPoints(List list); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/src/com/sun/hotspot/igv/layout/Port.java 2009-08-01 04:19:38.355987245 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.layout; + +import java.awt.Point; + +/** + * + * @author Thomas Wuerthinger + */ +public interface Port { + + public Vertex getVertex(); + + public Point getRelativePosition(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Layout/src/com/sun/hotspot/igv/layout/Vertex.java 2009-08-01 04:19:38.748538771 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.layout; + +import java.awt.Dimension; +import java.awt.Point; + +/** + * + * @author Thomas Wuerthinger + */ +public interface Vertex extends Comparable { + + public Cluster getCluster(); + + public Dimension getSize(); + + public Point getPosition(); + + public void setPosition(Point p); + + public boolean isRoot(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/build.xml 2009-08-01 04:19:39.190703788 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.connection. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/manifest.mf 2009-08-01 04:19:39.614686394 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.connection +OpenIDE-Module-Layer: com/sun/hotspot/igv/connection/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/connection/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/nbproject/build-impl.xml 2009-08-01 04:19:40.034155541 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/nbproject/genfiles.properties 2009-08-01 04:19:40.451315216 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=f8e21cb6 +build.xml.script.CRC32=a265137e +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=f8e21cb6 +nbproject/build-impl.xml.script.CRC32=36f3138c +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/nbproject/project.properties 2009-08-01 04:19:40.851783125 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/nbproject/project.xml 2009-08-01 04:19:41.250164546 +0100 @@ -0,0 +1,55 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.connection + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + com.sun.hotspot.igv.settings + + + + 1.0 + + + + org.openide.awt + + + + 6.11.1.1 + + + + org.openide.dialogs + + + + 7.5.1 + + + + org.openide.util + + + + 7.10.1.1 + + + + + com.sun.hotspot.igv.connection + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/nbproject/suite.properties 2009-08-01 04:19:41.683359106 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/src/META-INF/services/com.sun.hotspot.igv.data.services.GroupReceiver 2009-08-01 04:19:42.159141642 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.connection.Server \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/src/com/sun/hotspot/igv/connection/Bundle.properties 2009-08-01 04:19:42.703119854 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=NetworkConnection --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/src/com/sun/hotspot/igv/connection/Client.java 2009-08-01 04:19:43.116809580 +0100 @@ -0,0 +1,96 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.connection; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.services.GroupCallback; +import com.sun.hotspot.igv.data.serialization.Parser; +import com.sun.hotspot.igv.data.Properties.RegexpPropertyMatcher; +import java.io.IOException; +import java.io.InputStream; +import java.net.Socket; +import javax.swing.JTextField; +import org.openide.util.Exceptions; +import org.openide.xml.XMLUtil; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; + +/** + * + * @author Thomas Wuerthinger + */ +public class Client implements Runnable, GroupCallback { + + private Socket socket; + private JTextField networkTextField; + private GroupCallback callback; + + public Client(Socket socket, JTextField networkTextField, GroupCallback callback) { + this.callback = callback; + this.socket = socket; + this.networkTextField = networkTextField; + } + + public void run() { + + try { + InputStream inputStream = socket.getInputStream(); + + if (networkTextField.isEnabled()) { + + socket.getOutputStream().write('y'); + InputSource is = new InputSource(inputStream); + + try { + XMLReader reader = XMLUtil.createXMLReader(); + Parser parser = new Parser(this); + parser.parse(reader, is, null); + } catch (SAXException ex) { + ex.printStackTrace(); + } + } else { + socket.getOutputStream().write('n'); + } + + socket.close(); + } catch (IOException ex) { + Exceptions.printStackTrace(ex); + } + } + + public void started(final Group g) { + try { + RegexpPropertyMatcher matcher = new RegexpPropertyMatcher("name", ".*" + networkTextField.getText() + ".*"); + if (g.getProperties().selectSingle(matcher) != null && networkTextField.isEnabled()) { + socket.getOutputStream().write('y'); + callback.started(g); + } else { + socket.getOutputStream().write('n'); + } + } catch (IOException e) { + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/src/com/sun/hotspot/igv/connection/Server.java 2009-08-01 04:19:43.596342966 +0100 @@ -0,0 +1,145 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.connection; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.services.GroupCallback; +import com.sun.hotspot.igv.data.services.GroupReceiver; +import com.sun.hotspot.igv.settings.Settings; +import java.awt.Component; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.prefs.PreferenceChangeEvent; +import java.util.prefs.PreferenceChangeListener; +import javax.swing.SwingUtilities; +import org.openide.DialogDisplayer; +import org.openide.NotifyDescriptor; +import org.openide.util.RequestProcessor; + +/** + * + * @author Thomas Wuerthinger + */ +public class Server implements GroupCallback, GroupReceiver, PreferenceChangeListener { + + private javax.swing.JPanel jPanel1; + private javax.swing.JCheckBox networkCheckBox; + private javax.swing.JTextField networkTextField; + private ServerSocket serverSocket; + private GroupCallback callback; + private int port; + private Runnable serverRunnable; + + public Component init(GroupCallback callback) { + + this.callback = callback; + + jPanel1 = new javax.swing.JPanel(); + networkTextField = new javax.swing.JTextField(); + networkCheckBox = new javax.swing.JCheckBox(); + + + jPanel1.setBorder(javax.swing.BorderFactory.createEmptyBorder(5, 5, 5, 5)); + jPanel1.setLayout(new java.awt.BorderLayout(10, 10)); + jPanel1.add(networkTextField, java.awt.BorderLayout.CENTER); + + networkCheckBox.setSelected(true); + org.openide.awt.Mnemonics.setLocalizedText(networkCheckBox, "Receive when name contains"); + networkCheckBox.setBorder(javax.swing.BorderFactory.createEmptyBorder(0, 0, 0, 0)); + networkCheckBox.setMargin(new java.awt.Insets(0, 0, 0, 0)); + networkCheckBox.addChangeListener(new javax.swing.event.ChangeListener() { + + public void stateChanged(javax.swing.event.ChangeEvent evt) { + networkCheckBoxChanged(evt); + } + }); + jPanel1.add(networkCheckBox, java.awt.BorderLayout.WEST); + networkCheckBox.getAccessibleContext().setAccessibleName("Read from network when name contains"); + + initializeNetwork(); + Settings.get().addPreferenceChangeListener(this); + return jPanel1; + } + + private void networkCheckBoxChanged(javax.swing.event.ChangeEvent evt) { + networkTextField.setEnabled(networkCheckBox.isSelected()); + } + + public void preferenceChange(PreferenceChangeEvent e) { + + int curPort = Integer.parseInt(Settings.get().get(Settings.PORT, Settings.PORT_DEFAULT)); + if (curPort != port) { + initializeNetwork(); + } + } + + private void initializeNetwork() { + + int curPort = Integer.parseInt(Settings.get().get(Settings.PORT, Settings.PORT_DEFAULT)); + this.port = curPort; + try { + serverSocket = new java.net.ServerSocket(curPort); + } catch (IOException ex) { + NotifyDescriptor message = new NotifyDescriptor.Message("Could not create server. Listening for incoming data is disabled.", NotifyDescriptor.ERROR_MESSAGE); + DialogDisplayer.getDefault().notifyLater(message); + return; + } + + Runnable runnable = new Runnable() { + + public void run() { + while (true) { + try { + Socket clientSocket = serverSocket.accept(); + if (serverRunnable != this) { + clientSocket.close(); + return; + } + RequestProcessor.getDefault().post(new Client(clientSocket, networkTextField, Server.this), 0, Thread.MAX_PRIORITY); + } catch (IOException ex) { + serverSocket = null; + NotifyDescriptor message = new NotifyDescriptor.Message("Error during listening for incoming connections. Listening for incoming data is disabled.", NotifyDescriptor.ERROR_MESSAGE); + DialogDisplayer.getDefault().notifyLater(message); + return; + } + } + } + }; + + serverRunnable = runnable; + + RequestProcessor.getDefault().post(runnable, 0, Thread.MAX_PRIORITY); + } + + public void started(final Group g) { + SwingUtilities.invokeLater(new Runnable() { + + public void run() { + callback.started(g); + } + }); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/NetworkConnection/src/com/sun/hotspot/igv/connection/layer.xml 2009-08-01 04:19:44.038495942 +0100 @@ -0,0 +1,4 @@ + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/README 2009-08-01 04:19:44.444811235 +0100 @@ -0,0 +1,40 @@ +The Ideal Graph Visualizer is a tool developed to help examine the +intermediate representation of C2 which is commonly referred to as the +"ideal graph". It was developed in collaboration with the University +of Linz in Austria and has been included as part of hotspot since that +was the primary target of the tool. The tool itself is fairly general +with only a few modules that contain C2 specific elements. + +The tool is built on top of the NetBeans 6.1 rich client +infrastructure and so requires NetBeans to build. It currently +requires Java 6 to run as it needs support for JavaScript for its +filtering mechanism and assumes it's built into the platform. It +should build out of the box with NetBeans 6.1 and Java 6 or later. +It's possible to run it on 1.5 by including Rhino on the classpath +though that currently isn't working correctly. Support for exporting +graphs as SVG can be enabled by adding batik to the classpath which +isn't included by default. It can be built on top of NetBeans 6.0 if +you change the required modules to be platform7 instead of platform8. + +The JVM support is controlled by the flag -XX:PrintIdealGraphLevel=# +where # is: + + 0: no output, the default + 1: dumps graph after parsing, before matching, and final code. + also dumps graph for failed compiles, if available + 2: more detail, including after loop opts + 3: even more detail + 4: prints graph after parsing every bytecode (very slow) + +By default the JVM expects that it will connect to a visualizer on the +local host on port 4444. This can be configured using the options +-XX:PrintIdealGraphAddress= and -XX:PrintIdealGraphPort=. +PrintIdealGraphAddress can actually be a hostname. + +Alternatively the output can be sent to a file using +-XX:PrintIdealGraphFile=filename. Each compiler thread will get it's +own file with unique names being generated by adding a number onto the +provided file name. + +More information about the tool is available at +http://wikis.sun.com/display/HotSpotInternals/IdealGraphVisualizer. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/build.xml 2009-08-01 04:19:44.898904242 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.rhino. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/manifest.mf 2009-08-01 04:19:45.307354134 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.rhino +OpenIDE-Module-Layer: com/sun/hotspot/igv/rhino/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/rhino/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/nbproject/build-impl.xml 2009-08-01 04:19:45.758385596 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/nbproject/genfiles.properties 2009-08-01 04:19:46.175216713 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=0c3e7912 +build.xml.script.CRC32=44d0050c +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=0c3e7912 +nbproject/build-impl.xml.script.CRC32=7aab3f52 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/nbproject/project.properties 2009-08-01 04:19:46.558981037 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/nbproject/project.xml 2009-08-01 04:19:46.944015718 +0100 @@ -0,0 +1,31 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.rhino + + + + com.sun.hotspot.igv.filter + + + + 1.0 + + + + com.sun.hotspot.igv.graph + + + + 1.0 + + + + + com.sun.hotspot.igv.rhino + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/nbproject/suite.properties 2009-08-01 04:19:47.352331307 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/src/META-INF/services/com.sun.hotspot.igv.filter.ScriptEngineAbstraction 2009-08-01 04:19:47.836353826 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.rhino.RhinoScriptEngine \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/src/com/sun/hotspot/igv/rhino/Bundle.properties 2009-08-01 04:19:48.354584554 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=RhinoScriptEngineProxy --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/src/com/sun/hotspot/igv/rhino/RhinoScriptEngine.java 2009-08-01 04:19:48.814365451 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.rhino; + +import com.sun.hotspot.igv.filter.ScriptEngineAbstraction; +import com.sun.hotspot.igv.graph.Diagram; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +/** + * + * @author Thomas Wuerthinger + */ +public class RhinoScriptEngine implements ScriptEngineAbstraction { + + private String jsHelperText; + private Constructor importer; + private Method scope_put; + private Method cx_evaluateString; + private Method context_enter; + private Method context_exit; + + public boolean initialize(String s) { + this.jsHelperText = s; + Class importerTopLevel = null; + try { + ClassLoader cl = RhinoScriptEngine.class.getClassLoader(); + Class context = cl.loadClass("org.mozilla.javascript.Context"); + Class scriptable = cl.loadClass("org.mozilla.javascript.Scriptable"); + importerTopLevel = cl.loadClass("org.mozilla.javascript.ImporterTopLevel"); + importer = importerTopLevel.getDeclaredConstructor(context); + scope_put = importerTopLevel.getMethod("put", new Class[]{String.class, scriptable, Object.class}); + cx_evaluateString = context.getDeclaredMethod("evaluateString", new Class[]{scriptable, String.class, String.class, Integer.TYPE, Object.class}); + context_enter = context.getDeclaredMethod("enter", new Class[0]); + context_exit = context.getDeclaredMethod("exit", new Class[0]); + return true; + } catch (NoSuchMethodException nsme) { + return false; + } catch (ClassNotFoundException cnfe) { + return false; + } + } + + public void execute(Diagram d, String code) { + try { + Object cx = context_enter.invoke(null, (Object[]) null); + try { + Object scope = importer.newInstance(cx); + scope_put.invoke(scope, "IO", scope, System.out); + scope_put.invoke(scope, "graph", scope, d); + cx_evaluateString.invoke(cx, scope, jsHelperText, "jsHelper.js", 1, null); + cx_evaluateString.invoke(cx, scope, code, "", 1, null); + } finally { + // Exit from the context. + context_exit.invoke(null, (Object[]) null); + } + } catch (InvocationTargetException iae) { + } catch (IllegalAccessException iae) { + } catch (InstantiationException iae) { + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/RhinoScriptEngineProxy/src/com/sun/hotspot/igv/rhino/layer.xml 2009-08-01 04:19:49.272413999 +0100 @@ -0,0 +1,4 @@ + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/build.xml 2009-08-01 04:19:49.709239529 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.servercompilerscheduler. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/manifest.mf 2009-08-01 04:19:50.106753007 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.servercompiler +OpenIDE-Module-Layer: com/sun/hotspot/igv/servercompiler/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/servercompiler/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/nbproject/build-impl.xml 2009-08-01 04:19:50.532526187 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/nbproject/genfiles.properties 2009-08-01 04:19:50.961353065 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=d1360a65 +build.xml.script.CRC32=a9d94ef8 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=d1360a65 +nbproject/build-impl.xml.script.CRC32=52847236 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/nbproject/platform.properties 2009-08-01 04:19:51.362258780 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/nbproject/project.properties 2009-08-01 04:19:51.762653149 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/nbproject/project.xml 2009-08-01 04:19:52.168337039 +0100 @@ -0,0 +1,21 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.servercompiler + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/nbproject/suite.properties 2009-08-01 04:19:52.568754227 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/META-INF/services/com.sun.hotspot.igv.data.services.GroupOrganizer 2009-08-01 04:19:53.044501376 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.servercompiler.JavaGroupOrganizer \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/META-INF/services/com.sun.hotspot.igv.data.services.Scheduler 2009-08-01 04:19:53.492808714 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.servercompiler.ServerCompilerScheduler --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/Bundle.properties 2009-08-01 04:19:54.079531050 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=ServerCompiler --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/JavaGroupOrganizer.java 2009-08-01 04:19:54.508497569 +0100 @@ -0,0 +1,200 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.servercompiler; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.services.GroupOrganizer; +import com.sun.hotspot.igv.data.Pair; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class JavaGroupOrganizer implements GroupOrganizer { + + public String getName() { + return "Java structure"; + } + + public List>> organize(List subFolders, List groups) { + + List>> result = new ArrayList>>(); + + if (subFolders.size() == 0) { + buildResult(result, groups, packageNameProvider); + } else if (subFolders.size() == 1) { + buildResult(result, groups, classNameProvider); + } else if (subFolders.size() == 2) { + for (Group g : groups) { + List children = new ArrayList(); + children.add(g); + Pair> p = new Pair>(); + p.setLeft(reducedNameProvider.getName(g)); + p.setRight(children); + result.add(p); + } + } else { + result.add(new Pair>("", groups)); + } + + return result; + } + + private void buildResult(List>> result, List groups, NameProvider provider) { + HashMap> map = new HashMap>(); + for (Group g : groups) { + String s = provider.getName(g); + + if (!map.containsKey(s)) { + List list = new ArrayList(); + Pair> pair = new Pair>(s, list); + result.add(pair); + map.put(s, list); + } + + List curList = map.get(s); + curList.add(g); + } + + Collections.sort(result, new Comparator>>() { + + public int compare(Pair> a, Pair> b) { + return a.getLeft().compareTo(b.getLeft()); + } + }); + } + + private static interface NameProvider { + + public String getName(Group g); + } + private NameProvider reducedNameProvider = new NameProvider() { + + public String getName(Group g) { + String name = g.getName(); + assert name != null : "name of group must be set!"; + final String noReducedName = name; + + int firstPoint = name.indexOf("."); + if (firstPoint == -1) { + return noReducedName; + } + + int firstParenthese = name.indexOf("("); + if (firstParenthese == -1 || firstParenthese < firstPoint) { + return noReducedName; + } + + int current = firstPoint; + while (current > 0 && name.charAt(current) != ' ') { + current--; + } + + String tmp = name.substring(0, firstParenthese); + int lastPoint = tmp.lastIndexOf("."); + if (lastPoint == -1) { + return noReducedName; + } + + name = name.substring(0, current + 1) + name.substring(lastPoint + 1); + return name; + } + }; + private NameProvider packageNameProvider = new NameProvider() { + + public String getName(Group g) { + String name = g.getName(); + assert name != null : "name of group must be set!"; + final String noPackage = ""; + + int firstPoint = name.indexOf("."); + if (firstPoint == -1) { + return noPackage; + } + + int firstParenthese = name.indexOf("("); + if (firstParenthese == -1 || firstParenthese < firstPoint) { + return noPackage; + } + + int current = firstPoint; + while (current > 0 && name.charAt(current) != ' ') { + current--; + } + + String fullClassName = name.substring(current + 1, firstParenthese); + int lastPoint = fullClassName.lastIndexOf("."); + if (lastPoint == -1) { + return noPackage; + } + lastPoint = fullClassName.lastIndexOf(".", lastPoint - 1); + if (lastPoint == -1) { + return noPackage; + } + + String packageName = fullClassName.substring(0, lastPoint); + return packageName; + } + }; + private NameProvider classNameProvider = new NameProvider() { + + public String getName(Group g) { + String name = g.getName(); + assert name != null : "name of group must be set!"; + + final String noClass = ""; + + int firstPoint = name.indexOf("."); + if (firstPoint == -1) { + return noClass; + } + + int firstParenthese = name.indexOf("("); + if (firstParenthese == -1 || firstParenthese < firstPoint) { + return noClass; + } + + int current = firstPoint; + while (current > 0 && name.charAt(current) != ' ') { + current--; + } + + String fullClassName = name.substring(current + 1, firstParenthese); + int lastPoint = fullClassName.lastIndexOf("."); + if (lastPoint == -1) { + return noClass; + } + int lastlastPoint = fullClassName.lastIndexOf(".", lastPoint - 1); + + String className = fullClassName.substring(lastlastPoint + 1, lastPoint); + return className; + } + }; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/ServerCompilerScheduler.java 2009-08-01 04:19:54.929151832 +0100 @@ -0,0 +1,597 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.servercompiler; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputEdge; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.data.services.Scheduler; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; +import java.util.Vector; + +/** + * + * @author Thomas Wuerthinger + */ +public class ServerCompilerScheduler implements Scheduler { + + private static class Node { + + public InputNode inputNode; + public Set succs = new HashSet(); + public List preds = new ArrayList(); + public InputBlock block; + public boolean isBlockProjection; + public boolean isBlockStart; + } + private InputGraph graph; + private Collection nodes; + private Map inputNodeToNode; + private Vector blocks; + private Map dominatorMap; + private Map blockIndex; + private InputBlock[][] commonDominator; + private static final Comparator edgeComparator = new Comparator() { + + public int compare(InputEdge o1, InputEdge o2) { + return o1.getToIndex() - o2.getToIndex(); + } + }; + + public void buildBlocks() { + + blocks = new Vector(); + Node root = findRoot(); + if (root == null) { + return; + } + Stack stack = new Stack(); + Set visited = new HashSet(); + stack.add(root); + int blockCount = 0; + InputBlock rootBlock = null; + + + while (!stack.isEmpty()) { + Node proj = stack.pop(); + Node parent = proj; + if (proj.isBlockProjection && proj.preds.size() > 0) { + parent = proj.preds.get(0); + } + + if (!visited.contains(parent)) { + visited.add(parent); + InputBlock block = new InputBlock(graph, "" + blockCount); + blocks.add(block); + if (parent == root) { + rootBlock = block; + } + blockCount++; + parent.block = block; + if (proj != parent && proj.succs.size() == 1 && proj.succs.contains(root)) { + // Special treatment of Halt-nodes + proj.block = block; + } + + Node p = proj; + do { + if (p.preds.size() == 0 || p.preds.get(0) == null) { + p = parent; + break; + } + p = p.preds.get(0); + + if (p.block == null) { + p.block = block; + } + } while (!p.isBlockProjection && !p.isBlockStart); + + if (block != rootBlock) { + for (Node n : p.preds) { + if (n != null && n != p) { + if (n.isBlockProjection) { + n = n.preds.get(0); + } + if (n.block != null) { + n.block.addSuccessor(block); + } + } + } + } + + for (Node n : parent.succs) { + if (n != root && n.isBlockProjection) { + for (Node n2 : n.succs) { + + if (n2 != parent && n2.block != null && n2.block != rootBlock) { + block.addSuccessor(n2.block); + } + } + } else { + if (n != parent && n.block != null && n.block != rootBlock) { + block.addSuccessor(n.block); + } + } + } + + int num_preds = p.preds.size(); + int bottom = -1; + if (isRegion(p) || isPhi(p)) { + bottom = 0; + } + + int pushed = 0; + for (int i = num_preds - 1; i > bottom; i--) { + if (p.preds.get(i) != null && p.preds.get(i) != p) { + stack.push(p.preds.get(i)); + pushed++; + } + } + + if (pushed == 0 && p == root) { + // TODO: special handling when root backedges are not built yet + } + } + } + + for (Node n : nodes) { + InputBlock block = n.block; + if (block != null) { + block.addNode(n.inputNode.getId()); + } + } + + int z = 0; + blockIndex = new HashMap(); + for (InputBlock b : blocks) { + blockIndex.put(b, z); + z++; + } + } + + private String getBlockName(InputNode n) { + return n.getProperties().get("block"); + } + + public Collection schedule(InputGraph graph) { + if (graph.getBlocks().size() > 0) { + Collection tmpNodes = new ArrayList(graph.getNodes()); + for (InputNode n : tmpNodes) { + String block = getBlockName(n); + if (graph.getBlock(n) == null) { + graph.getBlock(block).addNode(n); + assert graph.getBlock(n) != null; + } + } + return graph.getBlocks(); + } else { + nodes = new ArrayList(); + inputNodeToNode = new HashMap(); + + this.graph = graph; + buildUpGraph(); + buildBlocks(); + buildDominators(); + buildCommonDominators(); + scheduleLatest(); + + for (InputNode n : graph.getNodes()) { + assert graph.getBlock(n) != null; + } + + return blocks; + } + } + + public void scheduleLatest() { + + + Node root = findRoot(); + + // Mark all nodes reachable in backward traversal from root + Set reachable = new HashSet(); + reachable.add(root); + Stack stack = new Stack(); + stack.push(root); + while (!stack.isEmpty()) { + Node cur = stack.pop(); + for (Node n : cur.preds) { + if (!reachable.contains(n)) { + reachable.add(n); + stack.push(n); + } + } + } + + Set unscheduled = new HashSet(); + for (Node n : this.nodes) { + if (n.block == null && reachable.contains(n)) { + unscheduled.add(n); + } + } + + while (unscheduled.size() > 0) { + boolean progress = false; + + Set newUnscheduled = new HashSet(); + for (Node n : unscheduled) { + + InputBlock block = null; + if (this.isPhi(n) && n.preds.get(0) != null) { + // Phi nodes in same block as region nodes + block = n.preds.get(0).block; + } else { + for (Node s : n.succs) { + if (reachable.contains(s)) { + if (s.block == null) { + block = null; + break; + } else { + if (block == null) { + block = s.block; + } else { + block = commonDominator[this.blockIndex.get(block)][blockIndex.get(s.block)]; + } + } + } + } + } + + if (block != null) { + n.block = block; + block.addNode(n.inputNode.getId()); + progress = true; + } else { + newUnscheduled.add(n); + } + } + + unscheduled = newUnscheduled; + + if (!progress) { + break; + } + } + + Set curReachable = new HashSet(reachable); + for (Node n : curReachable) { + if (n.block != null) { + for (Node s : n.succs) { + if (!reachable.contains(s)) { + markWithBlock(s, n.block, reachable); + } + } + } + } + + } + + private void markWithBlock(Node n, InputBlock b, Set reachable) { + assert !reachable.contains(n); + Stack stack = new Stack(); + stack.push(n); + n.block = b; + b.addNode(n.inputNode.getId()); + reachable.add(n); + + while (!stack.isEmpty()) { + Node cur = stack.pop(); + for (Node s : cur.succs) { + if (!reachable.contains(s)) { + reachable.add(s); + s.block = b; + b.addNode(s.inputNode.getId()); + stack.push(s); + } + } + + for (Node s : cur.preds) { + if (!reachable.contains(s)) { + reachable.add(s); + s.block = b; + b.addNode(s.inputNode.getId()); + stack.push(s); + } + } + } + } + + private class BlockIntermediate { + + InputBlock block; + int index; + int dominator; + int semi; + int parent; + int label; + int ancestor; + List pred; + List bucket; + } + + public void buildCommonDominators() { + commonDominator = new InputBlock[this.blocks.size()][this.blocks.size()]; + for (int i = 0; i < blocks.size(); i++) { + for (int j = 0; j < blocks.size(); j++) { + commonDominator[i][j] = getCommonDominator(i, j); + } + } + } + + public InputBlock getCommonDominator(int a, int b) { + InputBlock ba = blocks.get(a); + InputBlock bb = blocks.get(b); + if (ba == bb) { + return ba; + } + Set visited = new HashSet(); + while (ba != null) { + visited.add(ba); + ba = dominatorMap.get(ba); + } + + while (bb != null) { + if (visited.contains(bb)) { + return bb; + } + bb = dominatorMap.get(bb); + } + + assert false; + return null; + } + + public void buildDominators() { + dominatorMap = new HashMap(); + if (blocks.size() == 0) { + return; + } + Vector intermediate = new Vector(); + Map map = new HashMap(); + int z = 0; + for (InputBlock b : blocks) { + BlockIntermediate bi = new BlockIntermediate(); + bi.block = b; + bi.index = z; + bi.dominator = -1; + bi.semi = -1; + bi.parent = -1; + bi.label = z; + bi.ancestor = -1; + bi.pred = new ArrayList(); + bi.bucket = new ArrayList(); + intermediate.add(bi); + map.put(b, bi); + z++; + } + Stack stack = new Stack(); + stack.add(0); + + Vector array = new Vector(); + intermediate.get(0).dominator = 0; + + int n = 0; + while (!stack.isEmpty()) { + int index = stack.pop(); + BlockIntermediate ib = intermediate.get(index); + ib.semi = n; + array.add(ib); + n = n + 1; + for (InputBlock b : ib.block.getSuccessors()) { + BlockIntermediate succ = map.get(b); + if (succ.semi == -1) { + succ.parent = index; + stack.push(succ.index); // TODO: check if same node could be pushed twice + } + succ.pred.add(index); + } + } + + for (int i = n - 1; i > 0; i--) { + BlockIntermediate block = array.get(i); + int block_index = block.index; + for (int predIndex : block.pred) { + int curIndex = eval(predIndex, intermediate); + BlockIntermediate curBlock = intermediate.get(curIndex); + if (curBlock.semi < block.semi) { + block.semi = curBlock.semi; + } + } + + + int semiIndex = block.semi; + BlockIntermediate semiBlock = array.get(semiIndex); + semiBlock.bucket.add(block_index); + + link(block.parent, block_index, intermediate); + BlockIntermediate parentBlock = intermediate.get(block.parent); + + for (int j = 0; j < parentBlock.bucket.size(); j++) { + for (int curIndex : parentBlock.bucket) { + int newIndex = eval(curIndex, intermediate); + BlockIntermediate curBlock = intermediate.get(curIndex); + BlockIntermediate newBlock = intermediate.get(newIndex); + int dom = block.parent; + if (newBlock.semi < curBlock.semi) { + dom = newIndex; + } + + curBlock.dominator = dom; + } + } + + + parentBlock.bucket.clear(); + } + + for (int i = 1; i < n; i++) { + + BlockIntermediate block = array.get(i); + int block_index = block.index; + + int semi_index = block.semi; + BlockIntermediate semi_block = array.get(semi_index); + + if (block.dominator != semi_block.index) { + int new_dom = intermediate.get(block.dominator).dominator; + block.dominator = new_dom; + } + } + + for (BlockIntermediate ib : intermediate) { + if (ib.dominator == -1) { + ib.dominator = 0; + } + } + + for (BlockIntermediate bi : intermediate) { + InputBlock b = bi.block; + int dominator = bi.dominator; + InputBlock dominatorBlock = null; + if (dominator != -1) { + dominatorBlock = intermediate.get(dominator).block; + } + + if (dominatorBlock == b) { + dominatorBlock = null; + } + this.dominatorMap.put(b, dominatorBlock); + } + } + + private void compress(int index, Vector blocks) { + BlockIntermediate block = blocks.get(index); + + int ancestor = block.ancestor; + assert ancestor != -1; + + BlockIntermediate ancestor_block = blocks.get(ancestor); + if (ancestor_block.ancestor != -1) { + compress(ancestor, blocks); + + int label = block.label; + BlockIntermediate label_block = blocks.get(label); + + int ancestor_label = ancestor_block.label; + BlockIntermediate ancestor_label_block = blocks.get(label); + if (ancestor_label_block.semi < label_block.semi) { + block.label = ancestor_label; + } + + block.ancestor = ancestor_block.ancestor; + } + } + + private int eval(int index, Vector blocks) { + BlockIntermediate block = blocks.get(index); + if (block.ancestor == -1) { + return index; + } else { + compress(index, blocks); + return block.label; + } + } + + private void link(int index1, int index2, Vector blocks) { + BlockIntermediate block2 = blocks.get(index2); + block2.ancestor = index1; + } + + private boolean isRegion(Node n) { + return n.inputNode.getProperties().get("name").equals("Region"); + } + + private boolean isPhi(Node n) { + return n.inputNode.getProperties().get("name").equals("Phi"); + } + + private Node findRoot() { + + for (Node n : nodes) { + InputNode inputNode = n.inputNode; + if (inputNode.getProperties().get("name").equals("Root")) { + return n; + } + } + + return null; + } + + public void buildUpGraph() { + + for (InputNode n : graph.getNodes()) { + Node node = new Node(); + node.inputNode = n; + nodes.add(node); + String p = n.getProperties().get("is_block_proj"); + node.isBlockProjection = (p != null && p.equals("true")); + p = n.getProperties().get("is_block_start"); + node.isBlockStart = (p != null && p.equals("true")); + inputNodeToNode.put(n, node); + } + + Map> edgeMap = new HashMap>(); + for (InputEdge e : graph.getEdges()) { + + int to = e.getTo(); + if (!edgeMap.containsKey(to)) { + edgeMap.put(to, new ArrayList()); + } + + + List list = edgeMap.get(to); + list.add(e); + } + + + for (Integer i : edgeMap.keySet()) { + + List list = edgeMap.get(i); + Collections.sort(list, edgeComparator); + + int to = i; + InputNode toInputNode = graph.getNode(to); + Node toNode = inputNodeToNode.get(toInputNode); + for (InputEdge e : list) { + assert to == e.getTo(); + int from = e.getFrom(); + InputNode fromInputNode = graph.getNode(from); + Node fromNode = inputNodeToNode.get(fromInputNode); + fromNode.succs.add(toNode); + toNode.preds.add(fromNode); + } + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/color.filter 2009-08-01 04:19:55.395126530 +0100 @@ -0,0 +1,5 @@ +colorize("name", ".*", yellow); +colorize("name", "Catch.*", blue); + +colorize("name", "Region|Loop|CountedLoop|Root", red); +colorize("name", "CProj|IfFalse|IfTrue|JProj|CatchProj", magenta); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/combine.filter 2009-08-01 04:19:55.827692684 +0100 @@ -0,0 +1,4 @@ +var f = new CombineFilter("Combine Filter"); +f.addRule(new CombineFilter.CombineRule(new Properties.RegexpPropertyMatcher("name", ".*"), new Properties.RegexpPropertyMatcher("name", "Proj|IfFalse|IfTrue|JProj|MachProj|JumpProj|CatchProj"))); +f.addRule(new CombineFilter.CombineRule(new Properties.RegexpPropertyMatcher("name", "Cmp.*"), new Properties.RegexpPropertyMatcher("name", "Bool"))); +f.apply(graph); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/difference.filter 2009-08-01 04:19:56.253207653 +0100 @@ -0,0 +1,4 @@ +colorize("state", "same", white); +colorize("state", "changed", orange); +colorize("state", "new", green); +colorize("state", "deleted", red); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/extendedColor.filter 2009-08-01 04:19:56.687224853 +0100 @@ -0,0 +1,3 @@ +colorize("name", "Con.*", orange); +colorize("name", "Parm|Proj", lightGray); +colorize("bci", "..*", magenta); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/linestyle.filter 2009-08-01 04:19:57.120878738 +0100 @@ -0,0 +1,7 @@ +var f = new ColorFilter("Line Style filter"); +f.addRule(new ColorFilter.ColorRule(new MatcherSelector(new Properties.StringPropertyMatcher("type", "int:")), null, Color.BLUE, null)); +f.addRule(new ColorFilter.ColorRule(new MatcherSelector(new Properties.StringPropertyMatcher("type", "control")), null, Color.RED, null)); +f.addRule(new ColorFilter.ColorRule(new MatcherSelector(new Properties.StringPropertyMatcher("type", "memory")), null, Color.GREEN, null)); +f.addRule(new ColorFilter.ColorRule(new MatcherSelector(new Properties.StringPropertyMatcher("type", "tuple:")), null, Color.MAGENTA, null)); +f.addRule(new ColorFilter.ColorRule(new MatcherSelector(new Properties.StringPropertyMatcher("type", "bottom")), null, Color.LIGHT_GRAY, null)); +f.apply(graph); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/matchingFlags.filter 2009-08-01 04:19:57.545791638 +0100 @@ -0,0 +1,3 @@ +colorize("is_dontcare", "false", white); +colorize("is_shared", "true", green); +colorize("is_dontcare", "true", red); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/onlyControlFlow.filter 2009-08-01 04:19:57.991196625 +0100 @@ -0,0 +1,24 @@ +var f = new RemoveFilter("Show only control flow"); +f.addRule( + new RemoveFilter.RemoveRule( + new InvertSelector( + new OrSelector( + new OrSelector( + new SuccessorSelector( + new MatcherSelector( + new Properties.StringPropertyMatcher("type", "control") + ) + ), + new MatcherSelector( + new Properties.StringPropertyMatcher("type", "control") + ) + ), + new MatcherSelector( + new Properties.StringPropertyMatcher("name", "Start") + ) + ) + ), false + ) +); +f.addRule( new RemoveFilter.RemoveRule(new MatcherSelector(new Properties.RegexpPropertyMatcher("name", "Phi|Store.")), false)); +f.apply(graph); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/register.filter 2009-08-01 04:19:58.425893164 +0100 @@ -0,0 +1,4 @@ +colorize("reg", "EAX", green); +colorize("reg", "EFLAGS", gray); +colorize("reg", "EBP", orange); +colorize("reg", "ECX", cyan); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/remove.filter 2009-08-01 04:19:58.860908536 +0100 @@ -0,0 +1 @@ +remove("dump_spec", "FramePtr|ReturnAdr|I_O"); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/removeMemory.filter 2009-08-01 04:19:59.347479916 +0100 @@ -0,0 +1,7 @@ + +//var f = new RemoveFilter("Remove Memory"); +//f.addRule(new RemoveFilter.RemoveRule(new MatcherSelector(new Properties.StringPropertyMatcher("dump_spec", "Memory")), false)); +//f.addRule(new RemoveFilter.RemoveRule(new AndSelector(new MatcherSelector(new Properties.StringPropertyMatcher("name", "Proj")), new MatcherSelector(new Properties.StringPropertyMatcher("type", "memory"))), false)); +//f.apply(graph); + +remove("dump_spec", "Memory"); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/removeRootInputs.filter 2009-08-01 04:20:00.496828583 +0100 @@ -0,0 +1 @@ +removeInputs("name", "Root"); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/removeSafepointInputs.filter 2009-08-01 04:20:00.901391325 +0100 @@ -0,0 +1,4 @@ +removeInputs("name", "SafePoint|CallStaticJava|CallDynamicJava|CallJava|CallLeaf|CallRuntime|AbstractLock|CallLeafNoFP|Call|CallStaticJavaDirect", 5); +removeInputs("name", "Unlock|Lock", 7); +removeInputs("name", "Allocate", 7); +removeInputs("name", "AllocateArray", 9); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/removeSelfLoops.filter 2009-08-01 04:20:01.314676660 +0100 @@ -0,0 +1,2 @@ +var f = new RemoveSelfLoopsFilter("Remove Self-Loops"); +f.apply(graph); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/filters/split.filter 2009-08-01 04:20:01.727225174 +0100 @@ -0,0 +1,2 @@ +split("name", "BoxLock"); +split("name", "(Con.*)|(loadCon.*)"); \ No newline at end of file --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/ServerCompiler/src/com/sun/hotspot/igv/servercompiler/layer.xml 2009-08-01 04:20:02.136367881 +0100 @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/build.xml 2009-08-01 04:20:02.586596798 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.settings. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/manifest.mf 2009-08-01 04:20:02.986870002 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.settings +OpenIDE-Module-Layer: com/sun/hotspot/igv/settings/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/settings/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/nbproject/build-impl.xml 2009-08-01 04:20:03.404358337 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/nbproject/genfiles.properties 2009-08-01 04:20:03.835731479 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=8869440a +build.xml.script.CRC32=7ef09117 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=8869440a +nbproject/build-impl.xml.script.CRC32=1a0e7f21 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/nbproject/platform.properties 2009-08-01 04:20:04.238089820 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/nbproject/project.properties 2009-08-01 04:20:04.630254420 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/nbproject/project.xml 2009-08-01 04:20:05.031166940 +0100 @@ -0,0 +1,49 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.settings + + + + org.jdesktop.layout + + + + 1 + 1.4 + + + + org.netbeans.modules.options.api + + + + 1 + 1.5 + + + + org.openide.awt + + + + 6.11.0.1 + + + + org.openide.util + + + + 7.9.0.1 + + + + + com.sun.hotspot.igv.settings + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/nbproject/suite.properties 2009-08-01 04:20:05.439854616 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/Bundle.properties 2009-08-01 04:20:05.975120835 +0100 @@ -0,0 +1,5 @@ +AdvancedOption_DisplayName_Settings=Settings +AdvancedOption_Tooltip_Settings=Application Settings +OpenIDE-Module-Name=Settings +OptionsCategory_Name_View=General +OptionsCategory_Title_View=View Settings --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/Settings.java 2009-08-01 04:20:06.367441548 +0100 @@ -0,0 +1,47 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.settings; + +import java.util.prefs.Preferences; + +/** + * + * @author Thomas Wuerthinger + */ +public class Settings { + + public final static String NODE_TEXT = "nodeText"; + public final static String NODE_TEXT_DEFAULT = "[idx] [name]"; + public final static String NODE_WIDTH = "nodeWidth"; + public final static String NODE_WIDTH_DEFAULT = "100"; + public final static String PORT = "port"; + public final static String PORT_DEFAULT = "4444"; + public final static String DIRECTORY = "directory"; + public final static String DIRECTORY_DEFAULT = System.getProperty("user.dir"); + + public static Preferences get() { + return Preferences.userNodeForPackage(Settings.class); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/ViewOptionsCategory.java 2009-08-01 04:20:06.784706761 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.settings; + +import javax.swing.Icon; +import javax.swing.ImageIcon; +import org.netbeans.spi.options.OptionsCategory; +import org.netbeans.spi.options.OptionsPanelController; +import org.openide.util.NbBundle; +import org.openide.util.Utilities; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ViewOptionsCategory extends OptionsCategory { + + @Override + public Icon getIcon() { + return new ImageIcon(Utilities.loadImage("com/sun/hotspot/igv/settings/settings.gif")); + } + + public String getCategoryName() { + return NbBundle.getMessage(ViewOptionsCategory.class, "OptionsCategory_Name_View"); + } + + public String getTitle() { + return NbBundle.getMessage(ViewOptionsCategory.class, "OptionsCategory_Title_View"); + } + + public OptionsPanelController create() { + return new ViewOptionsPanelController(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/ViewOptionsPanelController.java 2009-08-01 04:20:07.202071280 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.settings; + +import java.beans.PropertyChangeListener; +import java.beans.PropertyChangeSupport; +import javax.swing.JComponent; +import org.netbeans.spi.options.OptionsPanelController; +import org.openide.util.HelpCtx; +import org.openide.util.Lookup; + +/** + * + * @author Thomas Wuerthinger + */ +final class ViewOptionsPanelController extends OptionsPanelController { + + private ViewPanel panel; + private final PropertyChangeSupport pcs = new PropertyChangeSupport(this); + private boolean changed; + + public void update() { + getPanel().load(); + changed = false; + } + + public void applyChanges() { + getPanel().store(); + changed = false; + } + + public void cancel() { + // need not do anything special, if no changes have been persisted yet + } + + public boolean isValid() { + return getPanel().valid(); + } + + public boolean isChanged() { + return changed; + } + + public HelpCtx getHelpCtx() { + return null; // new HelpCtx("...ID") if you have a help set + } + + public JComponent getComponent(Lookup masterLookup) { + return getPanel(); + } + + public void addPropertyChangeListener(PropertyChangeListener l) { + pcs.addPropertyChangeListener(l); + } + + public void removePropertyChangeListener(PropertyChangeListener l) { + pcs.removePropertyChangeListener(l); + } + + private ViewPanel getPanel() { + if (panel == null) { + panel = new ViewPanel(this); + } + return panel; + } + + void changed() { + if (!changed) { + changed = true; + pcs.firePropertyChange(OptionsPanelController.PROP_CHANGED, false, true); + } + pcs.firePropertyChange(OptionsPanelController.PROP_VALID, null, null); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/ViewPanel.form 2009-08-01 04:20:07.619643930 +0100 @@ -0,0 +1,121 @@ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/ViewPanel.java 2009-08-01 04:20:08.045541463 +0100 @@ -0,0 +1,144 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.settings; + +/** + * + * @author Thomas Wuerthinger + */ +final class ViewPanel extends javax.swing.JPanel { + + private final ViewOptionsPanelController controller; + + ViewPanel(ViewOptionsPanelController controller) { + this.controller = controller; + initComponents(); + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + // //GEN-BEGIN:initComponents + private void initComponents() { + + jPanel1 = new javax.swing.JPanel(); + jLabel1 = new javax.swing.JLabel(); + jLabel2 = new javax.swing.JLabel(); + portSpinner = new javax.swing.JSpinner(); + jScrollPane1 = new javax.swing.JScrollPane(); + nodeTextArea = new javax.swing.JTextArea(); + nodeWidthSpinner = new javax.swing.JSpinner(); + jLabel3 = new javax.swing.JLabel(); + + org.openide.awt.Mnemonics.setLocalizedText(jLabel1, "Node Text"); + + org.openide.awt.Mnemonics.setLocalizedText(jLabel2, "Node Width"); + + nodeTextArea.setColumns(20); + nodeTextArea.setRows(5); + jScrollPane1.setViewportView(nodeTextArea); + + org.openide.awt.Mnemonics.setLocalizedText(jLabel3, "Network Port"); + + org.jdesktop.layout.GroupLayout jPanel1Layout = new org.jdesktop.layout.GroupLayout(jPanel1); + jPanel1.setLayout(jPanel1Layout); + jPanel1Layout.setHorizontalGroup( + jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(org.jdesktop.layout.GroupLayout.TRAILING, jPanel1Layout.createSequentialGroup() + .addContainerGap() + .add(jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(jLabel1) + .add(jLabel3) + .add(jLabel2)) + .add(39, 39, 39) + .add(jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(portSpinner, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 63, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE) + .add(nodeWidthSpinner, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 63, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE) + .add(jScrollPane1, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 365, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE)) + .addContainerGap()) + ); + jPanel1Layout.setVerticalGroup( + jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(jPanel1Layout.createSequentialGroup() + .addContainerGap() + .add(jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(jPanel1Layout.createSequentialGroup() + .add(jScrollPane1, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE) + .add(18, 18, 18) + .add(jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.BASELINE) + .add(nodeWidthSpinner, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE) + .add(jLabel2)) + .add(18, 18, 18) + .add(jPanel1Layout.createParallelGroup(org.jdesktop.layout.GroupLayout.BASELINE) + .add(portSpinner, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE) + .add(jLabel3))) + .add(jLabel1)) + .add(73, 73, 73)) + ); + + org.jdesktop.layout.GroupLayout layout = new org.jdesktop.layout.GroupLayout(this); + this.setLayout(layout); + layout.setHorizontalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(layout.createSequentialGroup() + .addContainerGap() + .add(jPanel1, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) + .addContainerGap()) + ); + layout.setVerticalGroup( + layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) + .add(layout.createSequentialGroup() + .addContainerGap() + .add(jPanel1, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE, 232, org.jdesktop.layout.GroupLayout.PREFERRED_SIZE) + .addContainerGap(206, Short.MAX_VALUE)) + ); + }// //GEN-END:initComponents + void load() { + nodeTextArea.setText(Settings.get().get(Settings.NODE_TEXT, Settings.NODE_TEXT_DEFAULT)); + nodeWidthSpinner.setValue(Integer.parseInt(Settings.get().get(Settings.NODE_WIDTH, Settings.NODE_WIDTH_DEFAULT))); + portSpinner.setValue(Integer.parseInt(Settings.get().get(Settings.PORT, Settings.PORT_DEFAULT))); + } + + void store() { + Settings.get().put(Settings.NODE_TEXT, nodeTextArea.getText()); + Settings.get().put(Settings.NODE_WIDTH, nodeWidthSpinner.getValue().toString()); + Settings.get().put(Settings.PORT, portSpinner.getValue().toString()); + } + + boolean valid() { + return true; + } + // Variables declaration - do not modify//GEN-BEGIN:variables + private javax.swing.JLabel jLabel1; + private javax.swing.JLabel jLabel2; + private javax.swing.JLabel jLabel3; + private javax.swing.JPanel jPanel1; + private javax.swing.JScrollPane jScrollPane1; + private javax.swing.JTextArea nodeTextArea; + private javax.swing.JSpinner nodeWidthSpinner; + private javax.swing.JSpinner portSpinner; + // End of variables declaration//GEN-END:variables +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/layer.xml 2009-08-01 04:20:08.481151019 +0100 @@ -0,0 +1,9 @@ + + + + + + + + + Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/Settings/src/com/sun/hotspot/igv/settings/settings.gif differ --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/build.xml 2009-08-01 04:20:09.317008113 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.util. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/manifest.mf 2009-08-01 04:20:09.721738763 +0100 @@ -0,0 +1,5 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.util +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/util/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/nbproject/build-impl.xml 2009-08-01 04:20:10.172499291 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/nbproject/genfiles.properties 2009-08-01 04:20:10.602354906 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=a470a16f +build.xml.script.CRC32=466cf03b +build.xml.stylesheet.CRC32=05353c81 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=a470a16f +nbproject/build-impl.xml.script.CRC32=39f45e01 +nbproject/build-impl.xml.stylesheet.CRC32=3f8b4615 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/nbproject/platform.properties 2009-08-01 04:20:10.999459009 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/nbproject/project.properties 2009-08-01 04:20:11.457375669 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/nbproject/project.xml 2009-08-01 04:20:11.843672284 +0100 @@ -0,0 +1,47 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.util + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + org.netbeans.api.visual + + + + 2.9 + + + + org.openide.nodes + + + + 7.2.1.1 + + + + org.openide.util + + + + 7.10.1.1 + + + + + com.sun.hotspot.igv.util + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/nbproject/suite.properties 2009-08-01 04:20:12.268688044 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/BoundedZoomAction.java 2009-08-01 04:20:12.828920280 +0100 @@ -0,0 +1,182 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.util; + +import java.awt.Container; +import java.awt.Dimension; +import java.awt.Point; +import java.awt.Rectangle; +import javax.swing.JComponent; +import javax.swing.JScrollPane; +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.action.WidgetAction.State; +import org.netbeans.api.visual.action.WidgetAction.WidgetMouseWheelEvent; +import org.netbeans.api.visual.animator.SceneAnimator; +import org.netbeans.api.visual.widget.Scene; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class BoundedZoomAction extends WidgetAction.Adapter { + + private double minFactor = 0.0; + private double maxFactor = Double.MAX_VALUE; + private double zoomMultiplier; + private boolean useAnimator; + + public BoundedZoomAction(double zoomMultiplier, boolean useAnimator) { + this.zoomMultiplier = zoomMultiplier; + this.useAnimator = useAnimator; + } + + public double getMinFactor() { + return minFactor; + } + + public void setMinFactor(double d) { + minFactor = d; + } + + public double getMaxFactor() { + return maxFactor; + } + + public void setMaxFactor(double d) { + maxFactor = d; + } + + private JScrollPane findScrollPane(JComponent component) { + for (;;) { + if (component == null) { + return null; + } + if (component instanceof JScrollPane) { + return ((JScrollPane) component); + } + Container parent = component.getParent(); + if (!(parent instanceof JComponent)) { + return null; + } + component = (JComponent) parent; + } + } + + @Override + public State mouseWheelMoved(Widget widget, WidgetMouseWheelEvent event) { + final Scene scene = widget.getScene(); + int amount = event.getWheelRotation(); + JScrollPane scrollPane = findScrollPane(scene.getView()); + Point viewPosition = null; + Point mouseLocation = scene.convertSceneToView(event.getPoint()); + int xOffset = 0; + int yOffset = 0; + Point oldViewPosition = null; + Rectangle bounds = new Rectangle(scene.getBounds()); + Dimension componentSize = new Dimension(scene.getView().getPreferredSize()); + + if (scrollPane != null) { + viewPosition = new Point(scrollPane.getViewport().getViewPosition()); + oldViewPosition = new Point(viewPosition); + xOffset = (mouseLocation.x - viewPosition.x); + yOffset = (mouseLocation.y - viewPosition.y); + viewPosition.x += xOffset; + viewPosition.y += yOffset; + } + + if (useAnimator) { + SceneAnimator sceneAnimator = scene.getSceneAnimator(); + synchronized (sceneAnimator) { + double zoom = sceneAnimator.isAnimatingZoomFactor() ? sceneAnimator.getTargetZoomFactor() : scene.getZoomFactor(); + while (amount > 0 && zoom / zoomMultiplier >= minFactor && zoom / zoomMultiplier <= maxFactor) { + zoom /= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x /= zoomMultiplier; + viewPosition.y /= zoomMultiplier; + bounds.width /= zoomMultiplier; + bounds.height /= zoomMultiplier; + componentSize.width /= zoomMultiplier; + componentSize.height /= zoomMultiplier; + } + amount--; + } + while (amount < 0 && zoom * zoomMultiplier >= minFactor && zoom * zoomMultiplier <= maxFactor) { + zoom *= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x *= zoomMultiplier; + viewPosition.y *= zoomMultiplier; + bounds.width *= zoomMultiplier; + bounds.height *= zoomMultiplier; + componentSize.width *= zoomMultiplier; + componentSize.height *= zoomMultiplier; + } + amount++; + } + sceneAnimator.animateZoomFactor(zoom); + } + } else { + double zoom = scene.getZoomFactor(); + while (amount > 0 && zoom / zoomMultiplier >= minFactor && zoom / zoomMultiplier <= maxFactor) { + zoom /= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x /= zoomMultiplier; + viewPosition.y /= zoomMultiplier; + bounds.width /= zoomMultiplier; + bounds.height /= zoomMultiplier; + componentSize.width /= zoomMultiplier; + componentSize.height /= zoomMultiplier; + } + amount--; + } + while (amount < 0 && zoom * zoomMultiplier >= minFactor && zoom * zoomMultiplier <= maxFactor) { + zoom *= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x *= zoomMultiplier; + viewPosition.y *= zoomMultiplier; + bounds.width *= zoomMultiplier; + bounds.height *= zoomMultiplier; + componentSize.width *= zoomMultiplier; + componentSize.height *= zoomMultiplier; + } + amount++; + } + scene.setZoomFactor(zoom); + } + + if (scrollPane != null) { + scene.validate(); // Call validate to update size of scene + Dimension size = scrollPane.getViewport().getExtentSize(); + viewPosition.x -= xOffset; + viewPosition.y -= yOffset; + scene.resolveBounds(scene.getLocation(), bounds); + scene.getView().setPreferredSize(componentSize); + scene.getView().revalidate(); + scene.getView().addNotify(); + scrollPane.getViewport().setViewPosition(viewPosition); + } + + return WidgetAction.State.CONSUMED; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/Bundle.properties 2009-08-01 04:20:13.262450742 +0100 @@ -0,0 +1 @@ +OpenIDE-Module-Name=Util --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/ColorIcon.java 2009-08-01 04:20:13.654760643 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.util; + +import java.awt.Color; +import java.awt.Component; +import java.awt.Graphics; +import javax.swing.Icon; + +/** + * + * @author Thomas Wuerthinger + */ +public class ColorIcon implements Icon { + + private Color color; + + public ColorIcon(Color c) { + color = c; + } + + public void paintIcon(Component c, Graphics g, int x, int y) { + g.setColor(color); + g.fillRect(x, y, 16, 16); + } + + public int getIconWidth() { + return 16; + } + + public int getIconHeight() { + return 16; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/ContextAction.java 2009-08-01 04:20:14.055159511 +0100 @@ -0,0 +1,95 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.util; + +import java.awt.EventQueue; +import org.openide.util.ContextAwareAction; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.Utilities; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public abstract class ContextAction extends CallableSystemAction implements LookupListener, ContextAwareAction { + + private Lookup context = null; + private Lookup.Result result = null; + + public ContextAction() { + this(Utilities.actionsGlobalContext()); + } + + public ContextAction(Lookup context) { + init(context); + } + + private void init(Lookup context) { + this.context = context; + result = context.lookupResult(contextClass()); + result.addLookupListener(this); + resultChanged(null); + } + + public void resultChanged(LookupEvent e) { + if (result.allItems().size() != 0) { + update(result.allInstances().iterator().next()); + } else { + update(null); + } + } + + @Override + public void performAction() { + final T t = result.allInstances().iterator().next(); + + // Ensure it's AWT event thread + EventQueue.invokeLater(new Runnable() { + + public void run() { + performAction(t); + } + }); + } + + public void update(T t) { + if (t == null) { + setEnabled(false); + } else { + setEnabled(isEnabled(t)); + } + } + + public boolean isEnabled(T context) { + return true; + } + + public abstract Class contextClass(); + + public abstract void performAction(T context); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/DoubleClickAction.java 2009-08-01 04:20:14.497091446 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.util; + +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class DoubleClickAction extends WidgetAction.Adapter { + + private DoubleClickHandler handler; + + public DoubleClickAction(DoubleClickHandler handler) { + this.handler = handler; + } + + @Override + public WidgetAction.State mouseClicked(Widget widget, WidgetAction.WidgetMouseEvent event) { + if (event.getClickCount() > 1) { + handler.handleDoubleClick(widget, event); + return WidgetAction.State.CONSUMED; + } + return WidgetAction.State.REJECTED; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/DoubleClickHandler.java 2009-08-01 04:20:14.996612771 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.util; + +import org.netbeans.api.visual.action.WidgetAction.WidgetMouseEvent; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public interface DoubleClickHandler { + + public void handleDoubleClick(Widget w, WidgetMouseEvent e); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/ExtendedSatelliteComponent.java 2009-08-01 04:20:15.623937244 +0100 @@ -0,0 +1,195 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.util; + +import org.netbeans.api.visual.widget.Scene; + +import javax.swing.*; +import java.awt.*; +import java.awt.event.*; + +/** + * @author David Kaspar + * @author Thomas Wuerthinger + */ +public class ExtendedSatelliteComponent extends JComponent implements MouseListener, MouseMotionListener, Scene.SceneListener, ComponentListener { + + private Scene scene; + private Image image; + private int imageWidth; + private int imageHeight; + + public ExtendedSatelliteComponent(Scene scene) { + this.scene = scene; + setDoubleBuffered(true); + setPreferredSize(new Dimension(128, 128)); + addMouseListener(this); + addMouseMotionListener(this); + } + + @Override + public void addNotify() { + super.addNotify(); + scene.addSceneListener(this); + JComponent viewComponent = scene.getView(); + if (viewComponent == null) { + viewComponent = scene.createView(); + } + viewComponent.addComponentListener(this); + repaint(); + } + + @Override + public void removeNotify() { + scene.getView().removeComponentListener(this); + scene.removeSceneListener(this); + super.removeNotify(); + } + + public void update() { + this.image = null; + repaint(); + } + + @Override + public void paint(Graphics g) { + Graphics2D gr = (Graphics2D) g; + super.paint(g); + Rectangle bounds = scene.getBounds(); + Dimension size = getSize(); + + double sx = bounds.width > 0 ? (double) size.width / bounds.width : 0.0; + double sy = bounds.width > 0 ? (double) size.height / bounds.height : 0.0; + double scale = Math.min(sx, sy); + + int vw = (int) (scale * bounds.width); + int vh = (int) (scale * bounds.height); + int vx = (size.width - vw) / 2; + int vy = (size.height - vh) / 2; + + + if (image == null || vw != imageWidth || vh != imageHeight) { + + imageWidth = vw; + imageHeight = vh; + image = this.createImage(imageWidth, imageHeight); + Graphics2D ig = (Graphics2D) image.getGraphics(); + ig.scale(scale, scale); + scene.paint(ig); + } + + gr.drawImage(image, vx, vy, this); + + JComponent component = scene.getView(); + double zoomFactor = scene.getZoomFactor(); + Rectangle viewRectangle = component != null ? component.getVisibleRect() : null; + if (viewRectangle != null) { + Rectangle window = new Rectangle( + (int) ((double) viewRectangle.x * scale / zoomFactor), + (int) ((double) viewRectangle.y * scale / zoomFactor), + (int) ((double) viewRectangle.width * scale / zoomFactor), + (int) ((double) viewRectangle.height * scale / zoomFactor)); + window.translate(vx, vy); + gr.setColor(new Color(200, 200, 200, 128)); + gr.fill(window); + gr.setColor(Color.BLACK); + gr.drawRect(window.x, window.y, window.width - 1, window.height - 1); + } + } + + public void mouseClicked(MouseEvent e) { + } + + public void mousePressed(MouseEvent e) { + moveVisibleRect(e.getPoint()); + } + + public void mouseReleased(MouseEvent e) { + moveVisibleRect(e.getPoint()); + } + + public void mouseEntered(MouseEvent e) { + } + + public void mouseExited(MouseEvent e) { + } + + public void mouseDragged(MouseEvent e) { + moveVisibleRect(e.getPoint()); + } + + public void mouseMoved(MouseEvent e) { + } + + private void moveVisibleRect(Point center) { + JComponent component = scene.getView(); + if (component == null) { + return; + } + double zoomFactor = scene.getZoomFactor(); + Rectangle bounds = scene.getBounds(); + Dimension size = getSize(); + + double sx = bounds.width > 0 ? (double) size.width / bounds.width : 0.0; + double sy = bounds.width > 0 ? (double) size.height / bounds.height : 0.0; + double scale = Math.min(sx, sy); + + int vw = (int) (scale * bounds.width); + int vh = (int) (scale * bounds.height); + int vx = (size.width - vw) / 2; + int vy = (size.height - vh) / 2; + + int cx = (int) ((double) (center.x - vx) / scale * zoomFactor); + int cy = (int) ((double) (center.y - vy) / scale * zoomFactor); + + Rectangle visibleRect = component.getVisibleRect(); + visibleRect.x = cx - visibleRect.width / 2; + visibleRect.y = cy - visibleRect.height / 2; + component.scrollRectToVisible(visibleRect); + + } + + public void sceneRepaint() { + } + + public void sceneValidating() { + } + + public void sceneValidated() { + } + + public void componentResized(ComponentEvent e) { + repaint(); + } + + public void componentMoved(ComponentEvent e) { + repaint(); + } + + public void componentShown(ComponentEvent e) { + } + + public void componentHidden(ComponentEvent e) { + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/ExtendedSelectAction.java 2009-08-01 04:20:16.438454403 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.util; + +import java.awt.event.MouseEvent; +import javax.swing.JPanel; +import org.netbeans.api.visual.action.ActionFactory; +import org.netbeans.api.visual.action.SelectProvider; +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.action.WidgetAction.State; +import org.netbeans.api.visual.action.WidgetAction.WidgetKeyEvent; +import org.netbeans.api.visual.action.WidgetAction.WidgetMouseEvent; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class ExtendedSelectAction extends WidgetAction.Adapter { + + private WidgetAction innerAction; + private JPanel panel; + + public ExtendedSelectAction(SelectProvider provider) { + innerAction = ActionFactory.createSelectAction(provider); + panel = new JPanel(); + } + + @Override + public State mousePressed(Widget widget, WidgetMouseEvent event) { + // TODO: Solve this differently? + if (event.getButton() != MouseEvent.BUTTON2) { + return innerAction.mousePressed(widget, new WidgetMouseEvent(event.getEventID(), new MouseEvent(panel, (int) event.getEventID(), event.getWhen(), event.getModifiersEx(), event.getPoint().x, event.getPoint().y, event.getClickCount(), event.isPopupTrigger(), MouseEvent.BUTTON1))); + } else { + return super.mousePressed(widget, event); + } + } + + @Override + public State mouseReleased(Widget widget, WidgetMouseEvent event) { + return innerAction.mouseReleased(widget, event); + } + + @Override + public State keyTyped(Widget widget, WidgetKeyEvent event) { + return innerAction.keyTyped(widget, event); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/PropertiesSheet.java 2009-08-01 04:20:16.860236871 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.util; + +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Property; +import java.lang.reflect.InvocationTargetException; +import org.openide.nodes.Node; +import org.openide.nodes.Sheet; + +/** + * + * @author Thomas Wuerthinger + */ +public class PropertiesSheet { + + public static void initializeSheet(final Properties properties, Sheet s) { + + Sheet.Set set1 = Sheet.createPropertiesSet(); + set1.setDisplayName("Properties"); + for (final Property p : properties) { + Node.Property prop = new Node.Property(String.class) { + + @Override + public boolean canRead() { + return true; + } + + @Override + public String getValue() throws IllegalAccessException, InvocationTargetException { + return p.getValue(); + } + + @Override + public boolean canWrite() { + return false; + } + + @Override + public void setValue(String arg0) throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + properties.setProperty(p.getName(), arg0); + } + }; + prop.setName(p.getName()); + set1.put(prop); + } + s.put(set1); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/RangeSlider.java 2009-08-01 04:20:17.324441187 +0100 @@ -0,0 +1,360 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.util; + +import com.sun.hotspot.igv.data.ChangedListener; +import java.awt.Color; +import java.awt.Cursor; +import java.awt.Dimension; +import java.awt.FontMetrics; +import java.awt.Graphics; +import java.awt.Graphics2D; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.RenderingHints; +import java.awt.event.MouseEvent; +import java.awt.event.MouseListener; +import java.awt.event.MouseMotionListener; +import java.util.List; +import javax.swing.JComponent; + +/** + * + * @author Thomas Wuerthinger + */ +public class RangeSlider extends JComponent implements ChangedListener, MouseListener, MouseMotionListener { + + public static final int HEIGHT = 40; + public static final int BAR_HEIGHT = 22; + public static final int BAR_SELECTION_ENDING_HEIGHT = 16; + public static final int BAR_SELECTION_HEIGHT = 10; + public static final int BAR_THICKNESS = 2; + public static final int BAR_CIRCLE_SIZE = 9; + public static final int MOUSE_ENDING_OFFSET = 3; + public static final Color BACKGROUND_COLOR = Color.white; + public static final Color BAR_COLOR = Color.black; + public static final Color BAR_SELECTION_COLOR = new Color(255, 0, 0, 120); + public static final Color BAR_SELECTION_COLOR_ROLLOVER = new Color(255, 0, 255, 120); + public static final Color BAR_SELECTION_COLOR_DRAG = new Color(0, 0, 255, 120); + private RangeSliderModel model; + private State state; + private Point startPoint; + private RangeSliderModel tempModel; + private boolean isOverBar; + + private enum State { + + Initial, + DragBar, + DragFirstPosition, + DragSecondPosition + } + + public RangeSlider() { + state = State.Initial; + this.addMouseMotionListener(this); + this.addMouseListener(this); + } + + public void setModel(RangeSliderModel newModel) { + if (model != null) { + model.getChangedEvent().removeListener(this); + model.getColorChangedEvent().removeListener(this); + } + if (newModel != null) { + newModel.getChangedEvent().addListener(this); + newModel.getColorChangedEvent().addListener(this); + } + this.model = newModel; + update(); + } + + private RangeSliderModel getPaintingModel() { + if (tempModel != null) { + return tempModel; + } + return model; + } + + @Override + public Dimension getPreferredSize() { + Dimension d = super.getPreferredSize(); + d.height = HEIGHT; + return d; + } + + public void changed(RangeSliderModel source) { + update(); + } + + private void update() { + this.repaint(); + } + + private int getXPosition(int index) { + assert index >= 0 && index < getPaintingModel().getPositions().size(); + return getXOffset() * (index + 1); + } + + private int getXOffset() { + int size = getPaintingModel().getPositions().size(); + int width = getWidth(); + return (width / (size + 1)); + } + + private int getEndXPosition(int index) { + return getXPosition(index) + getXOffset() / 2; + } + + private int getStartXPosition(int index) { + return getXPosition(index) - getXOffset() / 2; + } + + @Override + public void paint(Graphics g) { + super.paint(g); + Graphics2D g2 = (Graphics2D) g; + g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, + RenderingHints.VALUE_ANTIALIAS_ON); + int width = getWidth(); + int height = getHeight(); + + g2.setColor(BACKGROUND_COLOR); + g2.fillRect(0, 0, width, height); + + // Nothing to paint? + if (getPaintingModel() == null || getPaintingModel().getPositions().size() == 0) { + return; + } + + int firstPos = getPaintingModel().getFirstPosition(); + int secondPos = getPaintingModel().getSecondPosition(); + + paintSelected(g2, firstPos, secondPos); + paintBar(g2); + + } + + private int getBarStartY() { + return getHeight() - BAR_HEIGHT; + } + + private void paintBar(Graphics2D g) { + List list = getPaintingModel().getPositions(); + int barStartY = getBarStartY(); + + g.setColor(BAR_COLOR); + g.fillRect(getXPosition(0), barStartY + BAR_HEIGHT / 2 - BAR_THICKNESS / 2, getXPosition(list.size() - 1) - getXPosition(0), BAR_THICKNESS); + + int circleCenterY = barStartY + BAR_HEIGHT / 2; + for (int i = 0; i < list.size(); i++) { + int curX = getXPosition(i); + g.setColor(getPaintingModel().getColors().get(i)); + g.fillOval(curX - BAR_CIRCLE_SIZE / 2, circleCenterY - BAR_CIRCLE_SIZE / 2, BAR_CIRCLE_SIZE, BAR_CIRCLE_SIZE); + g.setColor(Color.black); + g.drawOval(curX - BAR_CIRCLE_SIZE / 2, circleCenterY - BAR_CIRCLE_SIZE / 2, BAR_CIRCLE_SIZE, BAR_CIRCLE_SIZE); + + + String curS = list.get(i); + if (curS != null && curS.length() > 0) { + int startX = getStartXPosition(i); + int endX = getEndXPosition(i); + FontMetrics metrics = g.getFontMetrics(); + Rectangle bounds = metrics.getStringBounds(curS, g).getBounds(); + if (bounds.width < endX - startX && bounds.height < barStartY) { + g.setColor(Color.black); + g.drawString(curS, startX + (endX - startX) / 2 - bounds.width / 2, barStartY / 2 + bounds.height / 2); + } + } + } + + } + + private void paintSelected(Graphics2D g, int start, int end) { + + int startX = getStartXPosition(start); + int endX = getEndXPosition(end); + int barStartY = getBarStartY(); + int barSelectionEndingStartY = barStartY + BAR_HEIGHT / 2 - BAR_SELECTION_ENDING_HEIGHT / 2; + paintSelectedEnding(g, startX, barSelectionEndingStartY); + paintSelectedEnding(g, endX, barSelectionEndingStartY); + + g.setColor(BAR_SELECTION_COLOR); + if (state == State.DragBar) { + g.setColor(BAR_SELECTION_COLOR_DRAG); + } else if (isOverBar) { + g.setColor(BAR_SELECTION_COLOR_ROLLOVER); + } + g.fillRect(startX, barStartY + BAR_HEIGHT / 2 - BAR_SELECTION_HEIGHT / 2, endX - startX, BAR_SELECTION_HEIGHT); + } + + private void paintSelectedEnding(Graphics g, int x, int y) { + g.setColor(BAR_COLOR); + g.fillRect(x - BAR_THICKNESS / 2, y, BAR_THICKNESS, BAR_SELECTION_ENDING_HEIGHT); + } + + private boolean isOverSecondPosition(Point p) { + if (p.y >= getBarStartY()) { + int destX = getEndXPosition(getPaintingModel().getSecondPosition()); + int off = Math.abs(destX - p.x); + return off <= MOUSE_ENDING_OFFSET; + } + return false; + } + + private boolean isOverFirstPosition(Point p) { + if (p.y >= getBarStartY()) { + int destX = getStartXPosition(getPaintingModel().getFirstPosition()); + int off = Math.abs(destX - p.x); + return off <= MOUSE_ENDING_OFFSET; + } + return false; + } + + private boolean isOverSelection(Point p) { + if (p.y >= getBarStartY() && !isOverFirstPosition(p) && !isOverSecondPosition(p)) { + return p.x > getStartXPosition(getPaintingModel().getFirstPosition()) && p.x < getEndXPosition(getPaintingModel().getSecondPosition()); + } + return false; + } + + public void mouseDragged(MouseEvent e) { + if (state == State.DragBar) { + int firstX = this.getStartXPosition(model.getFirstPosition()); + int newFirstX = firstX + e.getPoint().x - startPoint.x; + int newIndex = getIndexFromPosition(newFirstX) + 1; + if (newIndex + model.getSecondPosition() - model.getFirstPosition() >= model.getPositions().size()) { + newIndex = model.getPositions().size() - (model.getSecondPosition() - model.getFirstPosition()) - 1; + } + int secondPosition = newIndex + model.getSecondPosition() - model.getFirstPosition(); + tempModel.setPositions(newIndex, secondPosition); + update(); + } else if (state == State.DragFirstPosition) { + int firstPosition = getIndexFromPosition(e.getPoint().x) + 1; + int secondPosition = model.getSecondPosition(); + if (firstPosition > secondPosition) { + firstPosition--; + } + tempModel.setPositions(firstPosition, secondPosition); + update(); + } else if (state == State.DragSecondPosition) { + int firstPosition = model.getFirstPosition(); + int secondPosition = getIndexFromPosition(e.getPoint().x); + if (secondPosition < firstPosition) { + secondPosition++; + } + tempModel.setPositions(firstPosition, secondPosition); + update(); + } + } + + private int getIndexFromPosition(int x) { + if (x < getXPosition(0)) { + return -1; + } + for (int i = 0; i < getPaintingModel().getPositions().size() - 1; i++) { + int startX = getXPosition(i); + int endX = getXPosition(i + 1); + if (x >= startX && x <= endX) { + return i; + } + } + return getPaintingModel().getPositions().size() - 1; + } + + private int getCircleIndexFromPosition(int x) { + int result = 0; + for (int i = 1; i < getPaintingModel().getPositions().size() - 1; i++) { + if (x > getStartXPosition(i)) { + result = i; + } + } + return result; + } + + public void mouseMoved(MouseEvent e) { + isOverBar = false; + if (model == null) { + return; + } + + + Point p = e.getPoint(); + if (isOverFirstPosition(p) || isOverSecondPosition(p)) { + setCursor(Cursor.getPredefinedCursor(Cursor.E_RESIZE_CURSOR)); + } else if (isOverSelection(p)) { + isOverBar = true; + setCursor(Cursor.getPredefinedCursor(Cursor.HAND_CURSOR)); + } else { + this.setCursor(Cursor.getDefaultCursor()); + } + repaint(); + } + + public void mouseClicked(MouseEvent e) { + if (e.getClickCount() > 1) { + // Double click + int index = getCircleIndexFromPosition(e.getPoint().x); + model.setPositions(index, index); + } + } + + public void mousePressed(MouseEvent e) { + if (model == null) { + return; + } + + Point p = e.getPoint(); + if (isOverFirstPosition(p)) { + state = State.DragFirstPosition; + } else if (isOverSecondPosition(p)) { + state = State.DragSecondPosition; + } else if (isOverSelection(p)) { + state = State.DragBar; + } else { + return; + } + + startPoint = e.getPoint(); + tempModel = model.copy(); + } + + public void mouseReleased(MouseEvent e) { + if (model == null || tempModel == null) { + return; + } + state = State.Initial; + model.setPositions(tempModel.getFirstPosition(), tempModel.getSecondPosition()); + tempModel = null; + } + + public void mouseEntered(MouseEvent e) { + } + + public void mouseExited(MouseEvent e) { + isOverBar = false; + repaint(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/Util/src/com/sun/hotspot/igv/util/RangeSliderModel.java 2009-08-01 04:20:17.801668202 +0100 @@ -0,0 +1,136 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.util; + +import com.sun.hotspot.igv.data.ChangedEventProvider; +import com.sun.hotspot.igv.data.ChangedEvent; +import java.awt.Color; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * + * @author Thomas Wuerthinger + */ +public class RangeSliderModel implements ChangedEventProvider { + + // Warning: Update setData method if fields are added + private ChangedEvent changedEvent; + private ChangedEvent colorChangedEvent; + private List positions; + private int firstPosition; + private int secondPosition; + private List colors; + + public void setData(RangeSliderModel model) { + boolean changed = false; + changed |= (positions != model.positions); + positions = model.positions; + changed |= (firstPosition != model.firstPosition); + firstPosition = model.firstPosition; + changed |= (secondPosition != model.secondPosition); + secondPosition = model.secondPosition; + boolean colorChanged = (colors != model.colors); + colors = model.colors; + if (changed) { + changedEvent.fire(); + } + if (colorChanged) { + colorChangedEvent.fire(); + } + } + + public RangeSliderModel(List positions) { + assert positions.size() > 0; + this.changedEvent = new ChangedEvent(this); + this.colorChangedEvent = new ChangedEvent(this); + setPositions(positions); + } + + protected void setPositions(List positions) { + this.positions = positions; + colors = new ArrayList(); + for (int i = 0; i < positions.size(); i++) { + colors.add(Color.black); + } + changedEvent.fire(); + colorChangedEvent.fire(); + } + + public void setColors(List colors) { + this.colors = colors; + colorChangedEvent.fire(); + } + + public List getColors() { + return colors; + } + + public RangeSliderModel copy() { + RangeSliderModel newModel = new RangeSliderModel(positions); + newModel.firstPosition = firstPosition; + newModel.secondPosition = secondPosition; + newModel.colors = colors; + return newModel; + } + + public List getPositions() { + return Collections.unmodifiableList(positions); + } + + public int getFirstPosition() { + return firstPosition; + } + + public int getSecondPosition() { + return secondPosition; + } + + public void setPositions(int fp, int sp) { + assert fp >= 0 && fp < positions.size(); + assert sp >= 0 && sp < positions.size(); + firstPosition = fp; + secondPosition = sp; + ensureOrder(); + changedEvent.fire(); + } + + private void ensureOrder() { + if (secondPosition < firstPosition) { + int tmp = secondPosition; + secondPosition = firstPosition; + firstPosition = tmp; + } + } + + public ChangedEvent getColorChangedEvent() { + return colorChangedEvent; + } + + public ChangedEvent getChangedEvent() { + return changedEvent; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/build.xml 2009-08-01 04:20:18.350726251 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds, tests, and runs the project com.sun.hotspot.igv.view. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/manifest.mf 2009-08-01 04:20:18.776977701 +0100 @@ -0,0 +1,6 @@ +Manifest-Version: 1.0 +OpenIDE-Module: com.sun.hotspot.igv.view +OpenIDE-Module-Layer: com/sun/hotspot/igv/view/layer.xml +OpenIDE-Module-Localizing-Bundle: com/sun/hotspot/igv/view/Bundle.properties +OpenIDE-Module-Specification-Version: 1.0 + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/nbproject/build-impl.xml 2009-08-01 04:20:19.218938995 +0100 @@ -0,0 +1,30 @@ + + + + + + You must set 'suite.dir' to point to your containing module suite + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/nbproject/genfiles.properties 2009-08-01 04:20:19.625580886 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=2de95ef6 +build.xml.script.CRC32=31afe4b1 +build.xml.stylesheet.CRC32=79c3b980 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=2de95ef6 +nbproject/build-impl.xml.script.CRC32=fa7a4119 +nbproject/build-impl.xml.stylesheet.CRC32=deb65f65 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/nbproject/platform.properties 2009-08-01 04:20:20.041927453 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + harness,\ + ide8,\ + java1,\ + nb6.0,\ + profiler2 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.applemenu,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform7 +nbjdk.active=JDK_1.6 +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/nbproject/project.properties 2009-08-01 04:20:20.466339084 +0100 @@ -0,0 +1,2 @@ +javac.source=1.5 +javac.compilerargs=-Xlint -Xlint:-serial --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/nbproject/project.xml 2009-08-01 04:20:20.901327267 +0100 @@ -0,0 +1,151 @@ + + + org.netbeans.modules.apisupport.project + + + com.sun.hotspot.igv.view + + + + com.sun.hotspot.igv.data + + + + 1.0 + + + + com.sun.hotspot.igv.difference + + + + 1.0 + + + + com.sun.hotspot.igv.filter + + + + 1.0 + + + + com.sun.hotspot.igv.graph + + + + 1.0 + + + + com.sun.hotspot.igv.hierarchicallayout + + + + 1.0 + + + + com.sun.hotspot.igv.layout + + + + 1.0 + + + + com.sun.hotspot.igv.settings + + + + 1.0 + + + + com.sun.hotspot.igv.svg + + + + 1.0 + + + + com.sun.hotspot.igv.util + + + + 1.0 + + + + org.netbeans.api.visual + + + + 2.9 + + + + org.openide.actions + + + + 6.6.0.1 + + + + org.openide.awt + + + + 6.11.0.1 + + + + org.openide.dialogs + + + + 7.5.1 + + + + org.openide.loaders + + + + 6.7 + + + + org.openide.nodes + + + + 7.2.1.1 + + + + org.openide.util + + + + 7.9.0.1 + + + + org.openide.windows + + + + 6.16 + + + + + com.sun.hotspot.igv.view + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/nbproject/suite.properties 2009-08-01 04:20:21.336928689 +0100 @@ -0,0 +1 @@ +suite.dir=${basedir}/.. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/META-INF/services/com.sun.hotspot.igv.data.services.GraphViewer 2009-08-01 04:20:21.907377480 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.view.GraphViewerImplementation --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/META-INF/services/com.sun.hotspot.igv.data.services.InputGraphProvider 2009-08-01 04:20:22.329674899 +0100 @@ -0,0 +1 @@ +com.sun.hotspot.igv.view.EditorInputGraphProvider --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/BoundedZoomAction.java 2009-08-01 04:20:22.910098730 +0100 @@ -0,0 +1,175 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import java.awt.Container; +import java.awt.Dimension; +import java.awt.Point; +import java.awt.Rectangle; +import javax.swing.JComponent; +import javax.swing.JScrollPane; +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.action.WidgetAction.State; +import org.netbeans.api.visual.action.WidgetAction.WidgetMouseWheelEvent; +import org.netbeans.api.visual.animator.SceneAnimator; +import org.netbeans.api.visual.widget.Scene; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class BoundedZoomAction extends WidgetAction.Adapter { + + private double minFactor = 0.0; + private double maxFactor = Double.MAX_VALUE; + private double zoomMultiplier; + private boolean useAnimator; + + public BoundedZoomAction(double zoomMultiplier, boolean useAnimator) { + assert zoomMultiplier > 1.0; + this.zoomMultiplier = zoomMultiplier; + this.useAnimator = useAnimator; + } + + public double getMinFactor() { + return minFactor; + } + + public void setMinFactor(double d) { + minFactor = d; + } + + public double getMaxFactor() { + return maxFactor; + } + + public void setMaxFactor(double d) { + maxFactor = d; + } + + private JScrollPane findScrollPane(JComponent component) { + for (;;) { + if (component == null) { + return null; + } + if (component instanceof JScrollPane) { + return ((JScrollPane) component); + } + Container parent = component.getParent(); + if (!(parent instanceof JComponent)) { + return null; + } + component = (JComponent) parent; + } + } + + @Override + public State mouseWheelMoved(Widget widget, WidgetMouseWheelEvent event) { + final Scene scene = widget.getScene(); + int amount = event.getWheelRotation(); + JScrollPane scrollPane = findScrollPane(scene.getView()); + Point viewPosition = null; + Point mouseLocation = scene.convertSceneToView(event.getPoint()); + int xOffset = 0; + int yOffset = 0; + Rectangle bounds = new Rectangle(scene.getBounds()); + Dimension componentSize = new Dimension(scene.getView().getPreferredSize()); + if (scrollPane != null) { + viewPosition = new Point(scrollPane.getViewport().getViewPosition()); + xOffset = (mouseLocation.x - viewPosition.x); + yOffset = (mouseLocation.y - viewPosition.y); + viewPosition.x += xOffset; + viewPosition.y += yOffset; + } + + if (useAnimator) { + SceneAnimator sceneAnimator = scene.getSceneAnimator(); + synchronized (sceneAnimator) { + double zoom = sceneAnimator.isAnimatingZoomFactor() ? sceneAnimator.getTargetZoomFactor() : scene.getZoomFactor(); + while (amount > 0 && zoom / zoomMultiplier >= minFactor) { + zoom /= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x /= zoomMultiplier; + viewPosition.y /= zoomMultiplier; + bounds.width /= zoomMultiplier; + bounds.height /= zoomMultiplier; + componentSize.width /= zoomMultiplier; + componentSize.height /= zoomMultiplier; + } + amount--; + } + while (amount < 0 && zoom * zoomMultiplier <= maxFactor) { + zoom *= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x *= zoomMultiplier; + viewPosition.y *= zoomMultiplier; + bounds.width *= zoomMultiplier; + bounds.height *= zoomMultiplier; + componentSize.width *= zoomMultiplier; + componentSize.height *= zoomMultiplier; + } + amount++; + } + sceneAnimator.animateZoomFactor(zoom); + } + } else { + double zoom = scene.getZoomFactor(); + while (amount > 0 && zoom / zoomMultiplier >= minFactor) { + zoom /= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x /= zoomMultiplier; + viewPosition.y /= zoomMultiplier; + bounds.width /= zoomMultiplier; + bounds.height /= zoomMultiplier; + componentSize.width /= zoomMultiplier; + componentSize.height /= zoomMultiplier; + } + amount--; + } + while (amount < 0 && zoom * zoomMultiplier <= maxFactor) { + zoom *= zoomMultiplier; + if (viewPosition != null) { + viewPosition.x *= zoomMultiplier; + viewPosition.y *= zoomMultiplier; + bounds.width *= zoomMultiplier; + bounds.height *= zoomMultiplier; + componentSize.width *= zoomMultiplier; + componentSize.height *= zoomMultiplier; + } + amount++; + } + scene.setZoomFactor(zoom); + } + + if (scrollPane != null) { + viewPosition.x -= xOffset; + viewPosition.y -= yOffset; + scrollPane.getViewport().setViewPosition(viewPosition); + } + + + return WidgetAction.State.CONSUMED; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/Bundle.properties 2009-08-01 04:20:23.389905235 +0100 @@ -0,0 +1,3 @@ +HINT_EditorTopComponent=This is a Editor window +OpenIDE-Module-Name=View +CTL_EditorTopComponent=Editor Window --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/ConnectionAnchor.java 2009-08-01 04:20:23.810499947 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import com.sun.hotspot.igv.view.widgets.SlotWidget; +import java.awt.Point; +import java.awt.Rectangle; +import org.netbeans.api.visual.anchor.Anchor; +import org.netbeans.api.visual.anchor.Anchor.Entry; +import org.netbeans.api.visual.anchor.Anchor.Result; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class ConnectionAnchor extends Anchor { + + public enum HorizontalAlignment { + + Left, + Center, + Right + } + private HorizontalAlignment alignment; + + public ConnectionAnchor(Widget widget) { + this(HorizontalAlignment.Center, widget); + } + + public ConnectionAnchor(HorizontalAlignment alignment, Widget widget) { + super(widget); + this.alignment = alignment; + } + + public Result compute(Entry entry) { + return new Result(getRelatedSceneLocation(), Anchor.DIRECTION_ANY); + } + + @Override + public Point getRelatedSceneLocation() { + Point p = null; + Widget w = getRelatedWidget(); + if (w != null) { + if (w instanceof SlotWidget) { + p = ((SlotWidget) w).getAnchorPosition(); + } else { + Rectangle r = w.convertLocalToScene(w.getBounds()); + int y = r.y + r.height / 2; + int x = r.x; + if (alignment == HorizontalAlignment.Center) { + x = r.x + r.width / 2; + } else if (alignment == HorizontalAlignment.Right) { + x = r.x + r.width; + } + + p = new Point(x, y); + } + } + + return p; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/DiagramScene.java 2009-08-01 04:20:24.253961733 +0100 @@ -0,0 +1,1254 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import com.sun.hotspot.igv.view.widgets.BlockWidget; +import com.sun.hotspot.igv.view.widgets.LineWidget; +import com.sun.hotspot.igv.util.DoubleClickAction; +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.graph.Slot; +import com.sun.hotspot.igv.hierarchicallayout.HierarchicalClusterLayoutManager; +import com.sun.hotspot.igv.hierarchicallayout.OldHierarchicalLayoutManager; +import com.sun.hotspot.igv.hierarchicallayout.HierarchicalLayoutManager; +import com.sun.hotspot.igv.view.widgets.FigureWidget; +import com.sun.hotspot.igv.view.widgets.InputSlotWidget; +import com.sun.hotspot.igv.view.widgets.OutputSlotWidget; +import com.sun.hotspot.igv.view.widgets.SlotWidget; +import com.sun.hotspot.igv.layout.LayoutGraph; +import com.sun.hotspot.igv.data.services.Scheduler; +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.graph.Block; +import com.sun.hotspot.igv.util.ColorIcon; +import com.sun.hotspot.igv.util.ExtendedSelectAction; +import java.awt.Color; +import java.awt.Dimension; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.event.ActionEvent; +import java.awt.event.FocusEvent; +import java.awt.event.FocusListener; +import java.awt.event.MouseEvent; +import java.awt.event.MouseListener; +import java.awt.event.MouseMotionListener; +import java.awt.event.MouseWheelEvent; +import java.awt.event.MouseWheelListener; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.swing.AbstractAction; +import javax.swing.Action; +import javax.swing.BorderFactory; +import javax.swing.JComponent; +import javax.swing.JPopupMenu; +import javax.swing.JScrollPane; +import javax.swing.SwingUtilities; +import javax.swing.event.ChangeEvent; +import javax.swing.event.ChangeListener; +import javax.swing.event.UndoableEditEvent; +import javax.swing.undo.AbstractUndoableEdit; +import javax.swing.undo.CannotRedoException; +import javax.swing.undo.CannotUndoException; +import org.netbeans.api.visual.action.ActionFactory; +import org.netbeans.api.visual.action.PopupMenuProvider; +import org.netbeans.api.visual.action.RectangularSelectDecorator; +import org.netbeans.api.visual.action.RectangularSelectProvider; +import org.netbeans.api.visual.action.SelectProvider; +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.animator.SceneAnimator; +import org.netbeans.api.visual.layout.LayoutFactory; +import org.netbeans.api.visual.widget.ConnectionWidget; +import org.netbeans.api.visual.widget.LayerWidget; +import org.netbeans.api.visual.widget.Scene; +import org.netbeans.api.visual.widget.Widget; +import org.netbeans.api.visual.widget.LabelWidget; +import org.openide.awt.UndoRedo; +import org.openide.util.Lookup; +import org.openide.util.lookup.AbstractLookup; +import org.openide.util.lookup.InstanceContent; + +/** + * + * @author Thomas Wuerthinger + */ +public class DiagramScene extends Scene implements ChangedListener { + + private HashMap figureWidgets; + private HashMap slotWidgets; + private HashMap connectionWidgets; + private HashMap blockWidgets; + private Widget hoverWidget; + private WidgetAction hoverAction; + private List selectedWidgets; + private Lookup lookup; + private InstanceContent content; + private Action[] actions; + private LayerWidget connectionLayer; + private JScrollPane scrollPane; + private UndoRedo.Manager undoRedoManager; + private LayerWidget mainLayer; + private LayerWidget slotLayer; + private LayerWidget blockLayer; + private double realZoomFactor; + private BoundedZoomAction zoomAction; + private WidgetAction panAction; + private Widget topLeft; + private Widget bottomRight; + private LayerWidget startLayer; + private LabelWidget startLabel; + private DiagramViewModel model; + private DiagramViewModel modelCopy; + public static final int AFTER = 1; + public static final int BEFORE = 1; + public static final float ALPHA = 0.4f; + public static final int GRID_SIZE = 30; + public static final int BORDER_SIZE = 20; + public static final int UNDOREDO_LIMIT = 100; + public static final int SCROLL_UNIT_INCREMENT = 80; + public static final int SCROLL_BLOCK_INCREMENT = 400; + public static final float ZOOM_MAX_FACTOR = 3.0f; + public static final float ZOOM_MIN_FACTOR = 0.0f;//0.15f; + public static final float ZOOM_INCREMENT = 1.5f; + public static final int SLOT_OFFSET = 6; + public static final int ANIMATION_LIMIT = 40; + private PopupMenuProvider popupMenuProvider = new PopupMenuProvider() { + + public JPopupMenu getPopupMenu(Widget widget, Point localLocation) { + return DiagramScene.this.createPopupMenu(); + } + }; + private RectangularSelectDecorator rectangularSelectDecorator = new RectangularSelectDecorator() { + + public Widget createSelectionWidget() { + Widget widget = new Widget(DiagramScene.this); + widget.setBorder(BorderFactory.createLineBorder(Color.black, 2)); + widget.setForeground(Color.red); + return widget; + } + }; + private RectangularSelectProvider rectangularSelectProvider = new RectangularSelectProvider() { + + public void performSelection(Rectangle rectangle) { + if (rectangle.width < 0) { + rectangle.x += rectangle.width; + rectangle.width *= -1; + } + + if (rectangle.height < 0) { + rectangle.y += rectangle.height; + rectangle.height *= -1; + } + + boolean updated = false; + for (Figure f : getModel().getDiagramToView().getFigures()) { + FigureWidget w = figureWidgets.get(f); + Rectangle r = new Rectangle(w.getBounds()); + r.setLocation(w.getLocation()); + if (r.intersects(rectangle)) { + if (!selectedWidgets.contains(w)) { + addToSelection(w); + updated = true; + } + } else { + if (selectedWidgets.contains(w)) { + selectedWidgets.remove(w); + content.remove(w.getNode()); + w.setState(w.getState().deriveSelected(false)); + updated = true; + } + } + } + + if (updated) { + selectionUpdated(); + } + } + }; + private SelectProvider selectProvider = new SelectProvider() { + + public boolean isAimingAllowed(Widget widget, Point point, boolean b) { + return false; + } + + public boolean isSelectionAllowed(Widget widget, Point point, boolean b) { + return widget instanceof FigureWidget || widget == DiagramScene.this; + } + + public void select(Widget w, Point point, boolean change) { + + boolean updated = false; + + if (w == DiagramScene.this) { + if (DiagramScene.this.selectedWidgets.size() != 0) { + clearSelection(); + selectionUpdated(); + } + return; + } + + FigureWidget widget = (FigureWidget) w; + + + if (change) { + if (widget.getState().isSelected()) { + assert selectedWidgets.contains(widget); + widget.setState(widget.getState().deriveSelected(false)); + selectedWidgets.remove(widget); + content.remove(widget.getNode()); + updated = true; + } else { + assert !selectedWidgets.contains(widget); + addToSelection(widget); + updated = true; + assert widget.getState().isSelected(); + } + } else { + + if (widget.getState().isSelected()) { + assert selectedWidgets.contains(widget); + } else { + + assert !selectedWidgets.contains(widget); + clearSelection(); + addToSelection(widget); + updated = true; + assert widget.getState().isSelected(); + } + } + + if (updated) { + selectionUpdated(); + } + + } + }; + + private FigureWidget getFigureWidget(Figure f) { + return figureWidgets.get(f); + } + private FocusListener focusListener = new FocusListener() { + + public void focusGained(FocusEvent e) { + DiagramScene.this.getView().requestFocus(); + } + + public void focusLost(FocusEvent e) { + } + }; + private MouseWheelListener mouseWheelListener = new MouseWheelListener() { + + public void mouseWheelMoved(MouseWheelEvent e) { + DiagramScene.this.zoomAction.mouseWheelMoved(DiagramScene.this, new WidgetAction.WidgetMouseWheelEvent(0, e)); + DiagramScene.this.validate(); + } + }; + private MouseListener mouseListener = new MouseListener() { + + public void mouseClicked(MouseEvent e) { + DiagramScene.this.panAction.mouseClicked(DiagramScene.this, new WidgetAction.WidgetMouseEvent(0, e)); + } + + public void mousePressed(MouseEvent e) { + DiagramScene.this.panAction.mousePressed(DiagramScene.this, new WidgetAction.WidgetMouseEvent(0, e)); + } + + public void mouseReleased(MouseEvent e) { + DiagramScene.this.panAction.mouseReleased(DiagramScene.this, new WidgetAction.WidgetMouseEvent(0, e)); + } + + public void mouseEntered(MouseEvent e) { + DiagramScene.this.panAction.mouseEntered(DiagramScene.this, new WidgetAction.WidgetMouseEvent(0, e)); + } + + public void mouseExited(MouseEvent e) { + DiagramScene.this.panAction.mouseExited(DiagramScene.this, new WidgetAction.WidgetMouseEvent(0, e)); + } + }; + private MouseMotionListener mouseMotionListener = new MouseMotionListener() { + + public void mouseDragged(MouseEvent e) { + DiagramScene.this.panAction.mouseDragged(DiagramScene.this, new WidgetAction.WidgetMouseEvent(0, e)); + } + + public void mouseMoved(MouseEvent e) { + } + }; + private ScrollChangeListener scrollChangeListener = new ScrollChangeListener(); + + private class ScrollChangeListener implements ChangeListener { + + private Map relativePositions = new HashMap(); + private Point oldPosition; + + public void register(Widget w, Point p) { + relativePositions.put(w, p); + } + + public void unregister(Widget w) { + relativePositions.remove(w); + } + + public void stateChanged(ChangeEvent e) { + Point p = DiagramScene.this.getScrollPane().getViewport().getViewPosition(); + if (oldPosition == null || !p.equals(oldPosition)) { + for (Widget w : relativePositions.keySet()) { + Point curPoint = relativePositions.get(w); + Point newPoint = new Point(p.x + curPoint.x, p.y + curPoint.y); + w.setPreferredLocation(newPoint); + DiagramScene.this.validate(); + } + oldPosition = p; + } + } + } + + public Point getScrollPosition() { + return getScrollPane().getViewport().getViewPosition(); + } + + public void setScrollPosition(Point p) { + getScrollPane().getViewport().setViewPosition(p); + } + + public DiagramScene(Action[] actions, DiagramViewModel model) { + this.actions = actions; + selectedWidgets = new ArrayList(); + content = new InstanceContent(); + lookup = new AbstractLookup(content); + this.setCheckClipping(true); + this.getInputBindings().setZoomActionModifiers(0); + + JComponent comp = this.createView(); + comp.setDoubleBuffered(true); + comp.setBackground(Color.WHITE); + comp.setOpaque(true); + + this.setBackground(Color.WHITE); + this.setOpaque(true); + scrollPane = new JScrollPane(comp); + scrollPane.setBackground(Color.WHITE); + scrollPane.getVerticalScrollBar().setUnitIncrement(SCROLL_UNIT_INCREMENT); + scrollPane.getVerticalScrollBar().setBlockIncrement(SCROLL_BLOCK_INCREMENT); + scrollPane.getHorizontalScrollBar().setUnitIncrement(SCROLL_UNIT_INCREMENT); + scrollPane.getHorizontalScrollBar().setBlockIncrement(SCROLL_BLOCK_INCREMENT); + scrollPane.getViewport().addChangeListener(scrollChangeListener); + hoverAction = this.createWidgetHoverAction(); + + blockLayer = new LayerWidget(this); + this.addChild(blockLayer); + + startLayer = new LayerWidget(this); + this.addChild(startLayer); + // TODO: String startLabelString = "Loading graph with " + originalDiagram.getFigures().size() + " figures and " + originalDiagram.getConnections().size() + " connections..."; + String startLabelString = ""; + LabelWidget w = new LabelWidget(this, startLabelString); + scrollChangeListener.register(w, new Point(10, 10)); + w.setAlignment(LabelWidget.Alignment.CENTER); + startLabel = w; + startLayer.addChild(w); + + mainLayer = new LayerWidget(this); + this.addChild(mainLayer); + + topLeft = new Widget(this); + topLeft.setPreferredLocation(new Point(-BORDER_SIZE, -BORDER_SIZE)); + this.addChild(topLeft); + + + bottomRight = new Widget(this); + bottomRight.setPreferredLocation(new Point(-BORDER_SIZE, -BORDER_SIZE)); + this.addChild(bottomRight); + + slotLayer = new LayerWidget(this); + this.addChild(slotLayer); + + connectionLayer = new LayerWidget(this); + this.addChild(connectionLayer); + + LayerWidget selectionLayer = new LayerWidget(this); + this.addChild(selectionLayer); + + this.setLayout(LayoutFactory.createAbsoluteLayout()); + + this.getActions().addAction(hoverAction); + zoomAction = new BoundedZoomAction(1.1, false); + zoomAction.setMaxFactor(ZOOM_MAX_FACTOR); + zoomAction.setMinFactor(ZOOM_MIN_FACTOR); + this.getActions().addAction(ActionFactory.createMouseCenteredZoomAction(1.1)); + panAction = new ExtendedPanAction(); + this.getActions().addAction(panAction); + this.getActions().addAction(ActionFactory.createPopupMenuAction(popupMenuProvider)); + + LayerWidget selectLayer = new LayerWidget(this); + this.addChild(selectLayer); + this.getActions().addAction(ActionFactory.createRectangularSelectAction(rectangularSelectDecorator, selectLayer, rectangularSelectProvider)); + + blockWidgets = new HashMap(); + + boolean b = this.getUndoRedoEnabled(); + this.setUndoRedoEnabled(false); + this.setNewModel(model); + this.setUndoRedoEnabled(b); + } + + private void selectionUpdated() { + getModel().setSelectedNodes(this.getSelectedNodes()); + addUndo(); + } + + public DiagramViewModel getModel() { + return model; + } + + public void setRealZoomFactor(double d) { + this.realZoomFactor = d; + } + + public double getRealZoomFactor() { + if (realZoomFactor == 0.0) { + return getZoomFactor(); + } else { + return realZoomFactor; + } + } + + public JScrollPane getScrollPane() { + return scrollPane; + } + + public boolean isAllVisible() { + return getModel().getHiddenNodes().size() == 0; + } + + public Action createGotoAction(final Figure f) { + final DiagramScene diagramScene = this; + Action a = new AbstractAction() { + + public void actionPerformed(ActionEvent e) { + diagramScene.gotoFigure(f); + } + }; + + a.setEnabled(true); + a.putValue(Action.SMALL_ICON, new ColorIcon(f.getColor())); + String name = f.getLines()[0]; + + name += " ("; + + if (f.getCluster() != null) { + name += "B" + f.getCluster().toString(); + } + if (!this.getFigureWidget(f).isVisible()) { + if (f.getCluster() != null) { + name += ", "; + } + name += "hidden"; + } + name += ")"; + a.putValue(Action.NAME, name); + return a; + } + + public void setNewModel(DiagramViewModel model) { + if (this.model != null) { + this.model.getDiagramChangedEvent().removeListener(this); + this.model.getViewPropertiesChangedEvent().removeListener(this); + } + this.model = model; + + if (this.model == null) { + this.modelCopy = null; + } else { + this.modelCopy = this.model.copy(); + } + + model.getDiagramChangedEvent().addListener(this); + model.getViewPropertiesChangedEvent().addListener(this); + + update(); + } + + private void update() { + + /*if (startLabel != null) { + // Animate fade-out + final LabelWidget labelWidget = this.startLabel; + labelWidget.setVisible(true); + RequestProcessor.getDefault().post(new Runnable() { + public void run() { + final int Sleep = 200; + final int Progress = 10; + for (int i = 0; i < 255 / Progress + 1; i++) { + try { + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + Color c = labelWidget.getForeground(); + int v = c.getRed(); + v += Progress; + if (v > 255) { + v = 255; + } + labelWidget.setForeground(new Color(v, v, v, 255 - v)); + labelWidget.getScene().validate(); + } + }); + } catch (InterruptedException ex) { + } catch (InvocationTargetException ex) { + } + try { + Thread.sleep(Sleep); + } catch (InterruptedException ex) { + } + } + labelWidget.setVisible(false); + DiagramScene.this.scrollChangeListener.unregister(labelWidget); + } + }, 1000); + startLabel = null; + }*/ + + slotLayer.removeChildren(); + mainLayer.removeChildren(); + blockLayer.removeChildren(); + + blockWidgets.clear(); + figureWidgets = new HashMap(); + slotWidgets = new HashMap(); + connectionWidgets = new HashMap(); + + WidgetAction selectAction = new ExtendedSelectAction(selectProvider); + Diagram d = getModel().getDiagramToView(); + + if (getModel().getShowBlocks()) { + Scheduler s = Lookup.getDefault().lookup(Scheduler.class); + Collection newBlocks = new ArrayList(s.schedule(d.getGraph())); + d.schedule(newBlocks); + } + + for (Figure f : d.getFigures()) { + FigureWidget w = new FigureWidget(f, this, mainLayer); + w.getActions().addAction(selectAction); + w.getActions().addAction(hoverAction); + w.getActions().addAction(ActionFactory.createPopupMenuAction(w)); + w.getActions().addAction(new DoubleClickAction(w)); + w.setVisible(false); + + figureWidgets.put(f, w); + + for (InputSlot s : f.getInputSlots()) { + SlotWidget sw = new InputSlotWidget(s, this, slotLayer, w); + slotWidgets.put(s, sw); + sw.getActions().addAction(selectAction); + } + + for (OutputSlot s : f.getOutputSlots()) { + SlotWidget sw = new OutputSlotWidget(s, this, slotLayer, w); + slotWidgets.put(s, sw); + sw.getActions().addAction(selectAction); + } + } + + if (getModel().getShowBlocks()) { + for (InputBlock bn : d.getGraph().getBlocks()) { + BlockWidget w = new BlockWidget(this, d, bn); + w.setVisible(false); + blockWidgets.put(bn, w); + blockLayer.addChild(w); + } + } + + this.smallUpdate(true); + + } + + private void smallUpdate(boolean relayout) { + + this.updateHiddenNodes(model.getHiddenNodes(), relayout); + boolean b = this.getUndoRedoEnabled(); + this.setUndoRedoEnabled(false); + this.setSelection(getModel().getSelectedNodes()); + this.setUndoRedoEnabled(b); + this.validate(); + } + + private boolean isVisible(Connection c) { + FigureWidget w1 = figureWidgets.get(c.getInputSlot().getFigure()); + FigureWidget w2 = figureWidgets.get(c.getOutputSlot().getFigure()); + + if (w1.isVisible() && w2.isVisible()) { + return true; + } + + return false; + } + + private void relayout(Set oldVisibleWidgets) { + + Diagram diagram = getModel().getDiagramToView(); + + HashSet
figures = new HashSet
(); + + for (Figure f : diagram.getFigures()) { + FigureWidget w = figureWidgets.get(f); + if (w.isVisible()) { + figures.add(f); + } + } + + HashSet edges = new HashSet(); + + for (Connection c : diagram.getConnections()) { + if (isVisible(c)) { + edges.add(c); + } + } + + if (getModel().getShowBlocks()) { + HierarchicalClusterLayoutManager m = new HierarchicalClusterLayoutManager(OldHierarchicalLayoutManager.Combine.SAME_OUTPUTS); + HierarchicalLayoutManager manager = new HierarchicalLayoutManager(HierarchicalLayoutManager.Combine.SAME_OUTPUTS); + manager.setMaxLayerLength(9); + manager.setMinLayerDifference(3); + m.setManager(manager); + m.setSubManager(new HierarchicalLayoutManager(HierarchicalLayoutManager.Combine.SAME_OUTPUTS)); + m.doLayout(new LayoutGraph(edges, figures)); + + } else { + HierarchicalLayoutManager manager = new HierarchicalLayoutManager(HierarchicalLayoutManager.Combine.SAME_OUTPUTS); + manager.setMaxLayerLength(10); + manager.doLayout(new LayoutGraph(edges, figures)); + } + + int maxX = -BORDER_SIZE; + int maxY = -BORDER_SIZE; + for (Figure f : diagram.getFigures()) { + FigureWidget w = figureWidgets.get(f); + if (w.isVisible()) { + Point p = f.getPosition(); + Dimension d = f.getSize(); + maxX = Math.max(maxX, p.x + d.width); + maxY = Math.max(maxY, p.y + d.height); + } + } + + for (Connection c : diagram.getConnections()) { + List points = c.getControlPoints(); + FigureWidget w1 = figureWidgets.get((Figure) c.getTo().getVertex()); + FigureWidget w2 = figureWidgets.get((Figure) c.getFrom().getVertex()); + if (w1.isVisible() && w2.isVisible()) { + for (Point p : points) { + if (p != null) { + maxX = Math.max(maxX, p.x); + maxY = Math.max(maxY, p.y); + } + } + } + } + + if (getModel().getShowBlocks()) { + for (Block b : diagram.getBlocks()) { + BlockWidget w = blockWidgets.get(b.getInputBlock()); + if (w != null && w.isVisible()) { + Rectangle r = b.getBounds(); + maxX = Math.max(maxX, r.x + r.width); + maxY = Math.max(maxY, r.y + r.height); + } + } + } + + bottomRight.setPreferredLocation(new Point(maxX + BORDER_SIZE, maxY + BORDER_SIZE)); + int offx = 0; + int offy = 0; + int curWidth = maxX + 2 * BORDER_SIZE; + int curHeight = maxY + 2 * BORDER_SIZE; + + Rectangle bounds = this.getScrollPane().getBounds(); + if (curWidth < bounds.width) { + offx = (bounds.width - curWidth) / 2; + } + + if (curHeight < bounds.height) { + offy = (bounds.height - curHeight) / 2; + } + + final int offx2 = offx; + final int offy2 = offy; + + SceneAnimator animator = this.getSceneAnimator(); + connectionLayer.removeChildren(); + int visibleFigureCount = 0; + for (Figure f : diagram.getFigures()) { + if (figureWidgets.get(f).isVisible()) { + visibleFigureCount++; + } + } + + for (Figure f : diagram.getFigures()) { + for (OutputSlot s : f.getOutputSlots()) { + SceneAnimator anim = animator; + if (visibleFigureCount > ANIMATION_LIMIT) { + anim = null; + } + processOutputSlot(s, s.getConnections(), 0, null, null, offx2, offy2, anim); + } + } + + for (Figure f : diagram.getFigures()) { + FigureWidget w = figureWidgets.get(f); + if (w.isVisible()) { + Point p = f.getPosition(); + Point p2 = new Point(p.x + offx2, p.y + offy2); + Rectangle r = new Rectangle(p.x + offx2, p.y + offy2, f.getSize().width, f.getSize().height); + if (oldVisibleWidgets.contains(w)) { + if (visibleFigureCount > ANIMATION_LIMIT) { + w.setPreferredLocation(p2); + } else { + animator.animatePreferredLocation(w, p2); + } + } else { + w.setPreferredLocation(p2); + } + } + } + + if (getModel().getShowBlocks()) { + for (Block b : diagram.getBlocks()) { + BlockWidget w = blockWidgets.get(b.getInputBlock()); + if (w != null && w.isVisible()) { + Point location = new Point(b.getBounds().x + offx2, b.getBounds().y + offy2); + Rectangle r = new Rectangle(location.x, location.y, b.getBounds().width, b.getBounds().height); + if (oldVisibleWidgets.contains(w)) { + if (visibleFigureCount > ANIMATION_LIMIT) { + w.setPreferredBounds(r); + } else { + animator.animatePreferredBounds(w, r); + } + } else { + w.setPreferredBounds(r); + } + } + } + } + } + private final Point specialNullPoint = new Point(Integer.MAX_VALUE, Integer.MAX_VALUE); + + private void processOutputSlot(OutputSlot s, List connections, int controlPointIndex, Point lastPoint, LineWidget predecessor, int offx, int offy, SceneAnimator animator) { + Map> pointMap = new HashMap>(); + + for (Connection c : connections) { + + if (!isVisible(c)) { + continue; + } + + List controlPoints = c.getControlPoints(); + if (controlPointIndex >= controlPoints.size()) { + continue; + } + + Point cur = controlPoints.get(controlPointIndex); + if (cur == null) { + cur = specialNullPoint; + } else if (controlPointIndex == 0 && !s.getShowName()) { + cur = new Point(cur.x, cur.y - SLOT_OFFSET); + } else if (controlPointIndex == controlPoints.size() - 1 && !c.getInputSlot().getShowName()) { + cur = new Point(cur.x, cur.y + SLOT_OFFSET); + } + + if (pointMap.containsKey(cur)) { + pointMap.get(cur).add(c); + } else { + List newList = new ArrayList(2); + newList.add(c); + pointMap.put(cur, newList); + } + + } + + for (Point p : pointMap.keySet()) { + List connectionList = pointMap.get(p); + + boolean isBold = false; + boolean isDashed = true; + + for (Connection c : connectionList) { + + if (c.getStyle() == Connection.ConnectionStyle.BOLD) { + isBold = true; + } + + if (c.getStyle() != Connection.ConnectionStyle.DASHED) { + isDashed = false; + } + } + + LineWidget newPredecessor = predecessor; + if (p == specialNullPoint) { + + } else if (lastPoint == specialNullPoint) { + + } else if (lastPoint != null) { + Point p1 = new Point(lastPoint.x + offx, lastPoint.y + offy); + Point p2 = new Point(p.x + offx, p.y + offy); + LineWidget w = new LineWidget(this, s, connectionList, p1, p2, predecessor, animator, isBold, isDashed); + newPredecessor = w; + connectionLayer.addChild(w); + w.getActions().addAction(hoverAction); + } + + processOutputSlot(s, connectionList, controlPointIndex + 1, p, newPredecessor, offx, offy, animator); + } + } + + private void clearSelection() { + if (selectedWidgets.size() == 0) { + return; + } + for (FigureWidget w : selectedWidgets) { + assert w.getState().isSelected(); + w.setState(w.getState().deriveSelected(false)); + content.remove(w.getNode()); + } + selectedWidgets.clear(); + } + + public Lookup getLookup() { + return lookup; + } + + public void gotoFigures(final List
figures) { + Rectangle overall = null; + showFigures(figures); + for (Figure f : figures) { + + FigureWidget fw = getFigureWidget(f); + if (fw != null) { + Rectangle r = fw.getBounds(); + Point p = fw.getLocation(); + Rectangle r2 = new Rectangle(p.x, p.y, r.width, r.height); + + if (overall == null) { + overall = r2; + } else { + overall = overall.union(r2); + } + } + } + if (overall != null) { + centerRectangle(overall); + } + } + + private Point calcCenter(Rectangle r) { + + Point center = new Point((int) r.getCenterX(), (int) r.getCenterY()); + center.x -= getScrollPane().getViewport().getViewRect().width / 2; + center.y -= getScrollPane().getViewport().getViewRect().height / 2; + + // Ensure to be within area + center.x = Math.max(0, center.x); + center.x = Math.min(getScrollPane().getViewport().getViewSize().width - getScrollPane().getViewport().getViewRect().width, center.x); + center.y = Math.max(0, center.y); + center.y = Math.min(getScrollPane().getViewport().getViewSize().height - getScrollPane().getViewport().getViewRect().height, center.y); + + return center; + } + + private void centerRectangle(Rectangle r) { + + if (getScrollPane().getViewport().getViewRect().width == 0 || getScrollPane().getViewport().getViewRect().height == 0) { + return; + } + + Rectangle r2 = new Rectangle(r.x, r.y, r.width, r.height); + r2 = convertSceneToView(r2); + + double factorX = (double) r2.width / (double) getScrollPane().getViewport().getViewRect().width; + double factorY = (double) r2.height / (double) getScrollPane().getViewport().getViewRect().height; + double factor = Math.max(factorX, factorY); + if (factor >= 1.0) { + Point p = getScrollPane().getViewport().getViewPosition(); + setZoomFactor(getZoomFactor() / factor); + r2.x /= factor; + r2.y /= factor; + r2.width /= factor; + r2.height /= factor; + getScrollPane().getViewport().setViewPosition(calcCenter(r2)); + } else { + getScrollPane().getViewport().setViewPosition(calcCenter(r2)); + } + } + + private void addToSelection(Figure f) { + FigureWidget w = getFigureWidget(f); + addToSelection(w); + } + + private void addToSelection(FigureWidget w) { + assert !selectedWidgets.contains(w); + selectedWidgets.add(w); + content.add(w.getNode()); + w.setState(w.getState().deriveSelected(true)); + } + + private void setSelection(Set nodes) { + clearSelection(); + for (Figure f : getModel().getDiagramToView().getFigures()) { + if (doesIntersect(f.getSource().getSourceNodesAsSet(), nodes)) { + addToSelection(f); + } + } + selectionUpdated(); + this.validate(); + } + + public void setSelection(Collection
list) { + clearSelection(); + for (Figure f : list) { + addToSelection(f); + } + + selectionUpdated(); + this.validate(); + } + + public Set
getSelectedFigures() { + Set
result = new HashSet
(); + for (Widget w : selectedWidgets) { + if (w instanceof FigureWidget) { + FigureWidget fw = (FigureWidget) w; + if (fw.getState().isSelected()) { + result.add(fw.getFigure()); + } + } + } + return result; + } + + public Set getSelectedNodes() { + Set result = new HashSet(); + for (Widget w : selectedWidgets) { + if (w instanceof FigureWidget) { + FigureWidget fw = (FigureWidget) w; + if (fw.getState().isSelected()) { + result.addAll(fw.getFigure().getSource().getSourceNodesAsSet()); + } + } + } + return result; + } + + private UndoRedo.Manager getUndoRedoManager() { + if (undoRedoManager == null) { + undoRedoManager = new UndoRedo.Manager(); + undoRedoManager.setLimit(UNDOREDO_LIMIT); + } + + return undoRedoManager; + } + + public UndoRedo getUndoRedo() { + return getUndoRedoManager(); + } + + private boolean isVisible(Figure f) { + for (Integer n : f.getSource().getSourceNodesAsSet()) { + if (getModel().getHiddenNodes().contains(n)) { + return false; + } + } + return true; + } + + private boolean doesIntersect(Set s1, Set s2) { + if (s1.size() > s2.size()) { + Set tmp = s1; + s1 = s2; + s2 = tmp; + } + + for (Object o : s1) { + if (s2.contains(o)) { + return true; + } + } + + return false; + } + + public void showNot(final Set nodes) { + updateHiddenNodes(nodes, true); + } + + public void showOnly(final Set nodes) { + HashSet allNodes = new HashSet(getModel().getGraphToView().getGroup().getAllNodes()); + allNodes.removeAll(nodes); + updateHiddenNodes(allNodes, true); + } + + private void updateHiddenNodes(Set newHiddenNodes, boolean doRelayout) { + + Set visibleBlocks = new HashSet(); + + Diagram diagram = getModel().getDiagramToView(); + assert diagram != null; + + Set oldVisibleWidgets = new HashSet(); + + for (Figure f : diagram.getFigures()) { + FigureWidget w = figureWidgets.get(f); + if (w.isVisible()) { + oldVisibleWidgets.add(w); + } + } + + if (getModel().getShowBlocks()) { + for (InputBlock b : diagram.getGraph().getBlocks()) { + BlockWidget w = blockWidgets.get(b); + if (w.isVisible()) { + oldVisibleWidgets.add(w); + } + } + } + + for (Figure f : diagram.getFigures()) { + boolean hiddenAfter = doesIntersect(f.getSource().getSourceNodesAsSet(), newHiddenNodes); + + FigureWidget w = this.figureWidgets.get(f); + w.setBoundary(false); + if (!hiddenAfter) { + // Figure is shown + w.setVisible(true); + for (InputNode n : f.getSource().getSourceNodes()) { + visibleBlocks.add(diagram.getGraph().getBlock(n)); + } + } else { + // Figure is hidden + w.setVisible(false); + } + } + + if (getModel().getShowNodeHull()) { + List boundaries = new ArrayList(); + for (Figure f : diagram.getFigures()) { + FigureWidget w = this.figureWidgets.get(f); + if (!w.isVisible()) { + Set
set = new HashSet
(f.getPredecessorSet()); + set.addAll(f.getSuccessorSet()); + + boolean b = false; + for (Figure neighbor : set) { + FigureWidget neighborWidget = figureWidgets.get(neighbor); + if (neighborWidget.isVisible()) { + b = true; + break; + } + } + + if (b) { + w.setBoundary(true); + for (InputNode n : f.getSource().getSourceNodes()) { + visibleBlocks.add(diagram.getGraph().getBlock(n)); + } + boundaries.add(w); + } + } + } + + for (FigureWidget w : boundaries) { + if (w.isBoundary()) { + w.setVisible(true); + } + } + } + + if (getModel().getShowBlocks()) { + for (InputBlock b : diagram.getGraph().getBlocks()) { + + boolean visibleAfter = visibleBlocks.contains(b); + + BlockWidget w = blockWidgets.get(b); + if (visibleAfter) { + // Block must be shown + w.setVisible(true); + } else { + // Block must be hidden + w.setVisible(false); + } + } + } + + getModel().setHiddenNodes(newHiddenNodes); + if (doRelayout) { + relayout(oldVisibleWidgets); + } + this.validate(); + addUndo(); + } + + private void showFigures(Collection
f) { + HashSet newHiddenNodes = new HashSet(getModel().getHiddenNodes()); + for (Figure fig : f) { + newHiddenNodes.removeAll(fig.getSource().getSourceNodesAsSet()); + } + updateHiddenNodes(newHiddenNodes, true); + } + + private void showFigure(Figure f) { + HashSet newHiddenNodes = new HashSet(getModel().getHiddenNodes()); + newHiddenNodes.removeAll(f.getSource().getSourceNodesAsSet()); + updateHiddenNodes(newHiddenNodes, true); + } + + public void showAll(final Collection
f) { + showFigures(f); + + } + + public void show(final Figure f) { + showFigure(f); + } + + public void gotoFigure(final Figure f) { + + if (!isVisible(f)) { + showFigure(f); + } + + FigureWidget fw = getFigureWidget(f); + if (fw != null) { + Rectangle r = fw.getBounds(); + Point p = fw.getLocation(); + centerRectangle(new Rectangle(p.x, p.y, r.width, r.height)); + + // Select figure + clearSelection(); + addToSelection(fw); + selectionUpdated(); + } + } + + public JPopupMenu createPopupMenu() { + JPopupMenu menu = new JPopupMenu(); + for (Action a : actions) { + if (a == null) { + menu.addSeparator(); + } else { + menu.add(a); + } + } + return menu; + } + + private static class DiagramUndoRedo extends AbstractUndoableEdit implements ChangedListener { + + private DiagramViewModel oldModel; + private DiagramViewModel newModel; + private Point oldScrollPosition; + private DiagramScene scene; + + public DiagramUndoRedo(DiagramScene scene, Point oldScrollPosition, DiagramViewModel oldModel, DiagramViewModel newModel) { + assert oldModel != null; + assert newModel != null; + this.oldModel = oldModel; + this.newModel = newModel; + this.scene = scene; + this.oldScrollPosition = oldScrollPosition; + } + + @Override + public void redo() throws CannotRedoException { + super.redo(); + boolean b = scene.getUndoRedoEnabled(); + scene.setUndoRedoEnabled(false); + scene.getModel().getViewChangedEvent().addListener(this); + scene.getModel().setData(newModel); + scene.getModel().getViewChangedEvent().removeListener(this); + scene.setUndoRedoEnabled(b); + } + + @Override + public void undo() throws CannotUndoException { + super.undo(); + boolean b = scene.getUndoRedoEnabled(); + scene.setUndoRedoEnabled(false); + scene.getModel().getViewChangedEvent().addListener(this); + scene.getModel().setData(oldModel); + scene.getModel().getViewChangedEvent().removeListener(this); + + SwingUtilities.invokeLater(new Runnable() { + + public void run() { + scene.setScrollPosition(oldScrollPosition); + } + }); + + scene.setUndoRedoEnabled(b); + } + + public void changed(DiagramViewModel source) { + scene.getModel().getViewChangedEvent().removeListener(this); + if (oldModel.getSelectedNodes().equals(newModel.getHiddenNodes())) { + scene.smallUpdate(false); + } else { + scene.smallUpdate(true); + } + } + } + private boolean undoRedoEnabled = true; + + public void setUndoRedoEnabled(boolean b) { + this.undoRedoEnabled = b; + } + + public boolean getUndoRedoEnabled() { + return undoRedoEnabled; + } + + public void changed(DiagramViewModel source) { + assert source == model : "Receive only changed event from current model!"; + assert source != null; + update(); + } + + private void addUndo() { + + DiagramViewModel newModelCopy = model.copy(); + + if (undoRedoEnabled) { + this.getUndoRedoManager().undoableEditHappened(new UndoableEditEvent(this, new DiagramUndoRedo(this, this.getScrollPosition(), modelCopy, newModelCopy))); + } + + this.modelCopy = newModelCopy; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/DiagramViewModel.java 2009-08-01 04:20:24.747868055 +0100 @@ -0,0 +1,340 @@ +/* + * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.view; + +import com.sun.hotspot.igv.data.Group; +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.difference.Difference; +import com.sun.hotspot.igv.filter.FilterChain; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.data.ChangedEvent; +import com.sun.hotspot.igv.util.RangeSliderModel; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.settings.Settings; +import java.awt.Color; + +/** + * + * @author Thomas Wuerthinger + */ +public class DiagramViewModel extends RangeSliderModel implements ChangedListener { + + // Warning: Update setData method if fields are added + private Group group; + private Set hiddenNodes; + private Set onScreenNodes; + private Set selectedNodes; + private FilterChain filterChain; + private FilterChain sequenceFilterChain; + private Diagram diagram; + private ChangedEvent groupChangedEvent; + private ChangedEvent diagramChangedEvent; + private ChangedEvent viewChangedEvent; + private ChangedEvent viewPropertiesChangedEvent; + private boolean showBlocks; + private boolean showNodeHull; + private ChangedListener filterChainChangedListener = new ChangedListener() { + + public void changed(FilterChain source) { + diagramChanged(); + } + }; + + @Override + public DiagramViewModel copy() { + DiagramViewModel result = new DiagramViewModel(group, filterChain, sequenceFilterChain); + result.setData(this); + return result; + } + + public void setData(DiagramViewModel newModel) { + super.setData(newModel); + boolean diagramChanged = false; + boolean viewChanged = false; + boolean viewPropertiesChanged = false; + + boolean groupChanged = (group == newModel.group); + this.group = newModel.group; + diagramChanged |= (filterChain != newModel.filterChain); + this.filterChain = newModel.filterChain; + diagramChanged |= (sequenceFilterChain != newModel.sequenceFilterChain); + this.sequenceFilterChain = newModel.sequenceFilterChain; + diagramChanged |= (diagram != newModel.diagram); + this.diagram = newModel.diagram; + viewChanged |= (hiddenNodes != newModel.hiddenNodes); + this.hiddenNodes = newModel.hiddenNodes; + viewChanged |= (onScreenNodes != newModel.onScreenNodes); + this.onScreenNodes = newModel.onScreenNodes; + viewChanged |= (selectedNodes != newModel.selectedNodes); + this.selectedNodes = newModel.selectedNodes; + viewPropertiesChanged |= (showBlocks != newModel.showBlocks); + this.showBlocks = newModel.showBlocks; + viewPropertiesChanged |= (showNodeHull != newModel.showNodeHull); + this.showNodeHull = newModel.showNodeHull; + + if(groupChanged) { + groupChangedEvent.fire(); + } + + if (diagramChanged) { + diagramChangedEvent.fire(); + } + if (viewPropertiesChanged) { + viewPropertiesChangedEvent.fire(); + } + if (viewChanged) { + viewChangedEvent.fire(); + } + } + + public boolean getShowBlocks() { + return showBlocks; + } + + public void setShowBlocks(boolean b) { + showBlocks = b; + viewPropertiesChangedEvent.fire(); + } + + public boolean getShowNodeHull() { + return showNodeHull; + } + + public void setShowNodeHull(boolean b) { + showNodeHull = b; + viewPropertiesChangedEvent.fire(); + } + + public DiagramViewModel(Group g, FilterChain filterChain, FilterChain sequenceFilterChain) { + super(calculateStringList(g)); + + this.showNodeHull = true; + this.showBlocks = true; + this.group = g; + assert filterChain != null; + this.filterChain = filterChain; + assert sequenceFilterChain != null; + this.sequenceFilterChain = sequenceFilterChain; + hiddenNodes = new HashSet(); + onScreenNodes = new HashSet(); + selectedNodes = new HashSet(); + super.getChangedEvent().addListener(this); + diagramChangedEvent = new ChangedEvent(this); + viewChangedEvent = new ChangedEvent(this); + viewPropertiesChangedEvent = new ChangedEvent(this); + groupChangedEvent = new ChangedEvent(this); + groupChangedEvent.addListener(groupChangedListener); + groupChangedEvent.fire(); + + filterChain.getChangedEvent().addListener(filterChainChangedListener); + sequenceFilterChain.getChangedEvent().addListener(filterChainChangedListener); + } + + private final ChangedListener groupChangedListener = new ChangedListener() { + + private Group oldGroup; + + public void changed(DiagramViewModel source) { + if(oldGroup != null) { + oldGroup.getChangedEvent().removeListener(groupContentChangedListener); + } + group.getChangedEvent().addListener(groupContentChangedListener); + oldGroup = group; + } + }; + + + private final ChangedListener groupContentChangedListener = new ChangedListener() { + + public void changed(Group source) { + assert source == group; + setPositions(calculateStringList(source)); + setSelectedNodes(selectedNodes); + } + + }; + + public ChangedEvent getDiagramChangedEvent() { + return diagramChangedEvent; + } + + public ChangedEvent getViewChangedEvent() { + return viewChangedEvent; + } + + public ChangedEvent getViewPropertiesChangedEvent() { + return viewPropertiesChangedEvent; + } + + public Set getSelectedNodes() { + return Collections.unmodifiableSet(selectedNodes); + } + + public Set getHiddenNodes() { + return Collections.unmodifiableSet(hiddenNodes); + } + + public Set getOnScreenNodes() { + return Collections.unmodifiableSet(onScreenNodes); + } + + public void setSelectedNodes(Set nodes) { + this.selectedNodes = nodes; + List colors = new ArrayList(); + for (String s : getPositions()) { + colors.add(Color.black); + } + if (nodes.size() >= 1) { + for (Integer id : nodes) { + if (id < 0) { + id = -id; + } + InputNode last = null; + int index = 0; + for (InputGraph g : group.getGraphs()) { + Color curColor = colors.get(index); + InputNode cur = g.getNode(id); + if (cur != null) { + if (last == null) { + curColor = Color.green; + } else { + if (last.equals(cur)) { + if (curColor == Color.black) { + curColor = Color.white; + } + } else { + if (curColor != Color.green) { + curColor = Color.orange; + } + } + } + } + last = cur; + colors.set(index, curColor); + index++; + } + } + this.setColors(colors); + } + setColors(colors); + viewChangedEvent.fire(); + } + + public void setHiddenNodes(Set nodes) { + this.hiddenNodes = nodes; + viewChangedEvent.fire(); + } + + public void setOnScreenNodes(Set onScreenNodes) { + this.onScreenNodes = onScreenNodes; + viewChangedEvent.fire(); + } + + public FilterChain getSequenceFilterChain() { + return filterChain; + } + + public void setSequenceFilterChain(FilterChain chain) { + assert chain != null : "sequenceFilterChain must never be null"; + sequenceFilterChain.getChangedEvent().removeListener(filterChainChangedListener); + sequenceFilterChain = chain; + sequenceFilterChain.getChangedEvent().addListener(filterChainChangedListener); + diagramChanged(); + } + + private void diagramChanged() { + // clear diagram + diagram = null; + getDiagramChangedEvent().fire(); + + } + + public FilterChain getFilterChain() { + return filterChain; + } + + public void setFilterChain(FilterChain chain) { + assert chain != null : "filterChain must never be null"; + filterChain.getChangedEvent().removeListener(filterChainChangedListener); + filterChain = chain; + filterChain.getChangedEvent().addListener(filterChainChangedListener); + diagramChanged(); + } + + private static List calculateStringList(Group g) { + List result = new ArrayList(); + for (InputGraph graph : g.getGraphs()) { + result.add(graph.getName()); + } + return result; + } + + public InputGraph getFirstGraph() { + return group.getGraphs().get(getFirstPosition()); + } + + public InputGraph getSecondGraph() { + List graphs = group.getGraphs(); + if (graphs.size() >= getSecondPosition()) + return group.getGraphs().get(getSecondPosition()); + return getFirstGraph(); + } + + public void selectGraph(InputGraph g) { + int index = group.getGraphs().indexOf(g); + assert index != -1; + setPositions(index, index); + } + + public Diagram getDiagramToView() { + + if (diagram == null) { + diagram = Diagram.createDiagram(getGraphToView(), Settings.get().get(Settings.NODE_TEXT, Settings.NODE_TEXT_DEFAULT)); + getFilterChain().apply(diagram, getSequenceFilterChain()); + } + + return diagram; + } + + public InputGraph getGraphToView() { + if (getFirstGraph() != getSecondGraph()) { + InputGraph inputGraph = Difference.createDiffGraph(getSecondGraph(), getFirstGraph()); + return inputGraph; + } else { + InputGraph inputGraph = getFirstGraph(); + return inputGraph; + } + } + + public void changed(RangeSliderModel source) { + diagramChanged(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/EditorInputGraphProvider.java 2009-08-01 04:20:25.190987164 +0100 @@ -0,0 +1,52 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.view; + +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.services.InputGraphProvider; +import com.sun.hotspot.igv.data.InputNode; +import java.util.Set; + +/** + * + * @author Thomas Wuerthinger + */ +public class EditorInputGraphProvider implements InputGraphProvider { + + public InputGraph getGraph() { + EditorTopComponent e = EditorTopComponent.getActive(); + if (e == null) { + return null; + } + return e.getDiagramModel().getGraphToView(); + } + + public void setSelectedNodes(Set nodes) { + EditorTopComponent e = EditorTopComponent.getActive(); + if (e != null) { + e.setSelectedNodes(nodes); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/EditorTopComponent.form 2009-08-01 04:20:25.631805124 +0100 @@ -0,0 +1,31 @@ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/EditorTopComponent.java 2009-08-01 04:20:26.062758874 +0100 @@ -0,0 +1,577 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import com.sun.hotspot.igv.data.InputNode; +import com.sun.hotspot.igv.filter.FilterChain; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.view.actions.EnableBlockLayoutAction; +import com.sun.hotspot.igv.view.actions.ExpandPredecessorsAction; +import com.sun.hotspot.igv.view.actions.ExpandSuccessorsAction; +import com.sun.hotspot.igv.view.actions.ExtractAction; +import com.sun.hotspot.igv.view.actions.HideAction; +import com.sun.hotspot.igv.view.actions.NextDiagramAction; +import com.sun.hotspot.igv.view.actions.NodeFindAction; +import com.sun.hotspot.igv.view.actions.OverviewAction; +import com.sun.hotspot.igv.view.actions.PredSuccAction; +import com.sun.hotspot.igv.view.actions.PrevDiagramAction; +import com.sun.hotspot.igv.view.actions.ShowAllAction; +import com.sun.hotspot.igv.view.actions.ZoomInAction; +import com.sun.hotspot.igv.view.actions.ZoomOutAction; +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Properties.PropertyMatcher; +import com.sun.hotspot.igv.filter.FilterChainProvider; +import com.sun.hotspot.igv.util.RangeSlider; +import com.sun.hotspot.igv.util.RangeSliderModel; +import com.sun.hotspot.igv.svg.BatikSVG; +import java.awt.BorderLayout; +import java.awt.CardLayout; +import java.awt.Color; +import java.awt.Dimension; +import java.awt.Graphics2D; +import java.awt.Point; +import java.awt.event.HierarchyBoundsListener; +import java.awt.event.HierarchyEvent; +import java.awt.event.KeyEvent; +import java.awt.event.KeyListener; +import java.beans.PropertyChangeEvent; +import java.beans.PropertyChangeListener; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.UnsupportedEncodingException; +import java.io.Writer; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import javax.swing.Action; +import javax.swing.ActionMap; +import javax.swing.JPanel; +import javax.swing.JToggleButton; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; +import javax.swing.border.Border; +import org.openide.DialogDisplayer; +import org.openide.actions.FindAction; +import org.openide.actions.RedoAction; +import org.openide.actions.UndoAction; +import org.openide.awt.Toolbar; +import org.openide.awt.ToolbarPool; +import org.openide.awt.UndoRedo; +import org.openide.util.Lookup; +import org.openide.util.NbBundle; +import org.openide.util.actions.CallbackSystemAction; +import org.openide.util.actions.SystemAction; +import org.openide.util.lookup.AbstractLookup; +import org.openide.util.lookup.InstanceContent; +import org.openide.util.lookup.ProxyLookup; +import org.openide.windows.Mode; +import org.openide.windows.TopComponent; +import org.openide.windows.WindowManager; +import org.openide.NotifyDescriptor; + +/** + * + * @author Thomas Wuerthinger + */ +public final class EditorTopComponent extends TopComponent implements ChangedListener, PropertyChangeListener { + + private DiagramScene scene; + private InstanceContent content; + private FindPanel findPanel; + private EnableBlockLayoutAction blockLayoutAction; + private OverviewAction overviewAction; + private PredSuccAction predSuccAction; + private boolean notFirstTime; + private ExtendedSatelliteComponent satelliteComponent; + private JPanel centerPanel; + private CardLayout cardLayout; + private RangeSlider rangeSlider; + private JToggleButton overviewButton; + private static final String PREFERRED_ID = "EditorTopComponent"; + private static final String SATELLITE_STRING = "satellite"; + private static final String SCENE_STRING = "scene"; + private DiagramViewModel rangeSliderModel; + private ExportCookie exportCookie = new ExportCookie() { + + public void export(File f) { + + Graphics2D svgGenerator = BatikSVG.createGraphicsObject(); + + if (svgGenerator == null) { + NotifyDescriptor message = new NotifyDescriptor.Message("For export to SVG files the Batik SVG Toolkit must be intalled.", NotifyDescriptor.ERROR_MESSAGE); + DialogDisplayer.getDefault().notifyLater(message); + } else { + scene.paint(svgGenerator); + FileOutputStream os = null; + try { + os = new FileOutputStream(f); + Writer out = new OutputStreamWriter(os, "UTF-8"); + BatikSVG.printToStream(svgGenerator, out, true); + } catch (FileNotFoundException e) { + NotifyDescriptor message = new NotifyDescriptor.Message("For export to SVG files the Batik SVG Toolkit must be intalled.", NotifyDescriptor.ERROR_MESSAGE); + DialogDisplayer.getDefault().notifyLater(message); + + } catch (UnsupportedEncodingException e) { + } finally { + if (os != null) { + try { + os.close(); + } catch (IOException e) { + } + } + } + + } + } + }; + + private void updateDisplayName() { + setDisplayName(getDiagram().getName()); + } + + public EditorTopComponent(Diagram diagram) { + + FilterChain filterChain = null; + FilterChain sequence = null; + FilterChainProvider provider = Lookup.getDefault().lookup(FilterChainProvider.class); + if (provider == null) { + filterChain = new FilterChain(); + sequence = new FilterChain(); + } else { + filterChain = provider.getFilterChain(); + sequence = provider.getSequence(); + } + + setName(NbBundle.getMessage(EditorTopComponent.class, "CTL_EditorTopComponent")); + setToolTipText(NbBundle.getMessage(EditorTopComponent.class, "HINT_EditorTopComponent")); + + Action[] actions = new Action[]{ + PrevDiagramAction.get(PrevDiagramAction.class), + NextDiagramAction.get(NextDiagramAction.class), + null, + ExtractAction.get(ExtractAction.class), + ShowAllAction.get(HideAction.class), + ShowAllAction.get(ShowAllAction.class), + null, + ZoomInAction.get(ZoomInAction.class), + ZoomOutAction.get(ZoomOutAction.class), + null, + ExpandPredecessorsAction.get(ExpandPredecessorsAction.class), + ExpandSuccessorsAction.get(ExpandSuccessorsAction.class) + }; + + + initComponents(); + + ActionMap actionMap = getActionMap(); + + ToolbarPool.getDefault().setPreferredIconSize(16); + Toolbar toolBar = new Toolbar(); + Border b = (Border) UIManager.get("Nb.Editor.Toolbar.border"); //NOI18N + toolBar.setBorder(b); + JPanel container = new JPanel(); + this.add(container, BorderLayout.NORTH); + container.setLayout(new BorderLayout()); + container.add(BorderLayout.NORTH, toolBar); + + rangeSliderModel = new DiagramViewModel(diagram.getGraph().getGroup(), filterChain, sequence); + rangeSliderModel.selectGraph(diagram.getGraph()); + rangeSlider = new RangeSlider(); + rangeSlider.setModel(rangeSliderModel); + rangeSliderModel.getChangedEvent().addListener(this); + container.add(BorderLayout.CENTER, rangeSlider); + + scene = new DiagramScene(actions, rangeSliderModel); + content = new InstanceContent(); + this.associateLookup(new ProxyLookup(new Lookup[]{scene.getLookup(), new AbstractLookup(content)})); + content.add(exportCookie); + content.add(rangeSliderModel); + + + findPanel = new FindPanel(diagram.getFigures()); + findPanel.setMaximumSize(new Dimension(200, 50)); + toolBar.add(findPanel); + toolBar.add(NodeFindAction.get(NodeFindAction.class)); + toolBar.addSeparator(); + toolBar.add(NextDiagramAction.get(NextDiagramAction.class)); + toolBar.add(PrevDiagramAction.get(PrevDiagramAction.class)); + toolBar.addSeparator(); + toolBar.add(ExtractAction.get(ExtractAction.class)); + toolBar.add(ShowAllAction.get(HideAction.class)); + toolBar.add(ShowAllAction.get(ShowAllAction.class)); + toolBar.addSeparator(); + toolBar.add(ShowAllAction.get(ZoomInAction.class)); + toolBar.add(ShowAllAction.get(ZoomOutAction.class)); + + blockLayoutAction = new EnableBlockLayoutAction(); + JToggleButton button = new JToggleButton(blockLayoutAction); + button.setSelected(true); + toolBar.add(button); + blockLayoutAction.addPropertyChangeListener(this); + + overviewAction = new OverviewAction(); + overviewButton = new JToggleButton(overviewAction); + overviewButton.setSelected(false); + toolBar.add(overviewButton); + overviewAction.addPropertyChangeListener(this); + + predSuccAction = new PredSuccAction(); + button = new JToggleButton(predSuccAction); + button.setSelected(true); + toolBar.add(button); + predSuccAction.addPropertyChangeListener(this); + + toolBar.addSeparator(); + toolBar.add(UndoAction.get(UndoAction.class)); + toolBar.add(RedoAction.get(RedoAction.class)); + + centerPanel = new JPanel(); + this.add(centerPanel, BorderLayout.CENTER); + cardLayout = new CardLayout(); + centerPanel.setLayout(cardLayout); + centerPanel.add(SCENE_STRING, scene.getScrollPane()); + centerPanel.setBackground(Color.WHITE); + satelliteComponent = new ExtendedSatelliteComponent(scene); + satelliteComponent.setSize(200, 200); + centerPanel.add(SATELLITE_STRING, satelliteComponent); + + CallbackSystemAction callFindAction = (CallbackSystemAction) SystemAction.get(FindAction.class); + NodeFindAction findAction = NodeFindAction.get(NodeFindAction.class); + Object key = callFindAction.getActionMapKey(); + actionMap.put(key, findAction); + + scene.getScrollPane().addKeyListener(keyListener); + scene.getView().addKeyListener(keyListener); + satelliteComponent.addKeyListener(keyListener); + + scene.getScrollPane().addHierarchyBoundsListener(new HierarchyBoundsListener() { + + public void ancestorMoved(HierarchyEvent e) { + } + + public void ancestorResized(HierarchyEvent e) { + if (!notFirstTime && scene.getScrollPane().getBounds().width > 0) { + notFirstTime = true; + SwingUtilities.invokeLater(new Runnable() { + + public void run() { + Figure f = EditorTopComponent.this.scene.getModel().getDiagramToView().getRootFigure(); + if (f != null) { + scene.setUndoRedoEnabled(false); + scene.gotoFigure(f); + scene.setUndoRedoEnabled(true); + } + } + }); + } + } + }); + + updateDisplayName(); + } + private KeyListener keyListener = new KeyListener() { + + public void keyTyped(KeyEvent e) { + } + + public void keyPressed(KeyEvent e) { + if (e.getKeyCode() == KeyEvent.VK_S) { + EditorTopComponent.this.overviewButton.setSelected(true); + EditorTopComponent.this.overviewAction.setState(true); + } + } + + public void keyReleased(KeyEvent e) { + if (e.getKeyCode() == KeyEvent.VK_S) { + EditorTopComponent.this.overviewButton.setSelected(false); + EditorTopComponent.this.overviewAction.setState(false); + } + } + }; + + public DiagramViewModel getDiagramModel() { + return scene.getModel(); + } + + private void showSatellite() { + cardLayout.show(centerPanel, SATELLITE_STRING); + satelliteComponent.requestFocus(); + + } + + private void showScene() { + cardLayout.show(centerPanel, SCENE_STRING); + scene.getView().requestFocus(); + } + + public void findNode() { + findPanel.find(); + } + + public void zoomOut() { + double zoom = scene.getZoomFactor(); + Point viewPosition = scene.getScrollPane().getViewport().getViewPosition(); + double newZoom = zoom / DiagramScene.ZOOM_INCREMENT; + if (newZoom > DiagramScene.ZOOM_MIN_FACTOR) { + scene.setZoomFactor(newZoom); + scene.validate(); + scene.getScrollPane().getViewport().setViewPosition(new Point((int) (viewPosition.x / DiagramScene.ZOOM_INCREMENT), (int) (viewPosition.y / DiagramScene.ZOOM_INCREMENT))); + this.satelliteComponent.update(); + } + } + + public void zoomIn() { + double zoom = scene.getZoomFactor(); + Point viewPosition = scene.getScrollPane().getViewport().getViewPosition(); + double newZoom = zoom * DiagramScene.ZOOM_INCREMENT; + if (newZoom < DiagramScene.ZOOM_MAX_FACTOR) { + scene.setZoomFactor(newZoom); + scene.validate(); + scene.getScrollPane().getViewport().setViewPosition(new Point((int) (viewPosition.x * DiagramScene.ZOOM_INCREMENT), (int) (viewPosition.y * DiagramScene.ZOOM_INCREMENT))); + this.satelliteComponent.update(); + } + } + + public void showPrevDiagram() { + int fp = getModel().getFirstPosition(); + int sp = getModel().getSecondPosition(); + if (fp != 0) { + fp--; + sp--; + getModel().setPositions(fp, sp); + } + } + + public DiagramViewModel getModel() { + return scene.getModel(); + } + + public FilterChain getFilterChain() { + return this.scene.getModel().getFilterChain(); + } + + public static EditorTopComponent getActive() { + Set modes = WindowManager.getDefault().getModes(); + for (Mode m : modes) { + TopComponent tc = m.getSelectedTopComponent(); + if (tc instanceof EditorTopComponent) { + return (EditorTopComponent) tc; + } + } + return null; + } + + /** This method is called from within the constructor to + * initialize the form. + * WARNING: Do NOT modify this code. The content of this method is + * always regenerated by the Form Editor. + */ + // //GEN-BEGIN:initComponents + private void initComponents() { + jCheckBox1 = new javax.swing.JCheckBox(); + + org.openide.awt.Mnemonics.setLocalizedText(jCheckBox1, "jCheckBox1"); + jCheckBox1.setBorder(javax.swing.BorderFactory.createEmptyBorder(0, 0, 0, 0)); + jCheckBox1.setMargin(new java.awt.Insets(0, 0, 0, 0)); + + setLayout(new java.awt.BorderLayout()); + + }// //GEN-END:initComponents + // Variables declaration - do not modify//GEN-BEGIN:variables + private javax.swing.JCheckBox jCheckBox1; + // End of variables declaration//GEN-END:variables + @Override + public int getPersistenceType() { + return TopComponent.PERSISTENCE_NEVER; + } + + @Override + public void componentOpened() { + } + + @Override + public void componentClosed() { + } + + @Override + protected String preferredID() { + return PREFERRED_ID; + } + + public void changed(RangeSliderModel model) { + updateDisplayName(); + } + + public boolean showPredSucc() { + return (Boolean) predSuccAction.getValue(PredSuccAction.STATE); + } + + public void setSelection(PropertyMatcher matcher) { + + Properties.PropertySelector
selector = new Properties.PropertySelector
(scene.getModel().getDiagramToView().getFigures()); + List
list = selector.selectMultiple(matcher); + boolean b = scene.getUndoRedoEnabled(); + scene.setUndoRedoEnabled(false); + scene.gotoFigures(list); + scene.setUndoRedoEnabled(b); + scene.setSelection(list); + } + + public void setSelectedNodes(Set nodes) { + + List
list = new ArrayList
(); + Set ids = new HashSet(); + for (InputNode n : nodes) { + ids.add(n.getId()); + } + + for (Figure f : scene.getModel().getDiagramToView().getFigures()) { + for (InputNode n : f.getSource().getSourceNodes()) { + if (ids.contains(n.getId())) { + list.add(f); + break; + } + } + } + + scene.gotoFigures(list); + scene.setSelection(list); + } + + public void propertyChange(PropertyChangeEvent evt) { + if (evt.getSource() == this.predSuccAction) { + boolean b = (Boolean) predSuccAction.getValue(PredSuccAction.STATE); + this.getModel().setShowNodeHull(b); + } else if (evt.getSource() == this.overviewAction) { + boolean b = (Boolean) overviewAction.getValue(OverviewAction.STATE); + if (b) { + showSatellite(); + } else { + showScene(); + } + } else if (evt.getSource() == this.blockLayoutAction) { + boolean b = (Boolean) blockLayoutAction.getValue(EnableBlockLayoutAction.STATE); + System.out.println("Showblocks = " + b); + this.getModel().setShowBlocks(b); + } else { + assert false : "Unknown event source"; + } + } + + public void extract() { + scene.showOnly(scene.getSelectedNodes()); + } + + public void hideNodes() { + Set selectedNodes = this.scene.getSelectedNodes(); + HashSet nodes = new HashSet(scene.getModel().getHiddenNodes()); + nodes.addAll(selectedNodes); + this.scene.showNot(nodes); + } + + public void expandPredecessors() { + Set
oldSelection = scene.getSelectedFigures(); + Set
figures = new HashSet
(); + + for (Figure f : this.getDiagramModel().getDiagramToView().getFigures()) { + boolean ok = false; + if (oldSelection.contains(f)) { + ok = true; + } else { + for (Figure pred : f.getSuccessors()) { + if (oldSelection.contains(pred)) { + ok = true; + break; + } + } + } + + if (ok) { + figures.add(f); + } + } + + scene.showAll(figures); + } + + public void expandSuccessors() { + Set
oldSelection = scene.getSelectedFigures(); + Set
figures = new HashSet
(); + + for (Figure f : this.getDiagramModel().getDiagramToView().getFigures()) { + boolean ok = false; + if (oldSelection.contains(f)) { + ok = true; + } else { + for (Figure succ : f.getPredecessors()) { + if (oldSelection.contains(succ)) { + ok = true; + break; + } + } + } + + if (ok) { + figures.add(f); + } + } + + scene.showAll(figures); + } + + public void showAll() { + scene.showNot(new HashSet()); + } + + public Diagram getDiagram() { + return getDiagramModel().getDiagramToView(); + } + + @Override + protected void componentActivated() { + } + + @Override + public void requestFocus() { + super.requestFocus(); + scene.getView().requestFocus(); + } + + @Override + public boolean requestFocusInWindow() { + super.requestFocusInWindow(); + return scene.getView().requestFocusInWindow(); + } + + @Override + public UndoRedo getUndoRedo() { + return scene.getUndoRedo(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/ExportCookie.java 2009-08-01 04:20:26.497518061 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package com.sun.hotspot.igv.view; + +import java.io.File; + +/** + * + * @author Thomas Wuerthinger + */ +public interface ExportCookie { + + void export(File f); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/ExtendedPanAction.java 2009-08-01 04:20:26.934376464 +0100 @@ -0,0 +1,131 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import java.awt.Container; +import java.awt.Dimension; +import java.awt.Point; +import org.netbeans.api.visual.widget.Scene; +import org.netbeans.api.visual.widget.Widget; +import org.netbeans.api.visual.action.WidgetAction; + +import java.awt.event.MouseEvent; +import javax.swing.JComponent; +import javax.swing.JScrollPane; +import javax.swing.SwingUtilities; + +/** + * @author David Kaspar + * @author Thomas Wuerthinger + */ +public class ExtendedPanAction extends WidgetAction.LockedAdapter { + + private Scene scene; + private JScrollPane scrollPane; + private Point lastLocation; + + protected boolean isLocked() { + return scrollPane != null; + } + + @Override + public State mousePressed(Widget widget, WidgetMouseEvent event) { + if (event.getButton() == MouseEvent.BUTTON2 || event.getButton() == MouseEvent.BUTTON1 && ((event.getModifiers() & MouseEvent.CTRL_MASK) != 0)) { + scene = widget.getScene(); + scrollPane = findScrollPane(scene.getView()); + if (scrollPane != null) { + lastLocation = scene.convertSceneToView(widget.convertLocalToScene(event.getPoint())); + SwingUtilities.convertPointToScreen(lastLocation, scrollPane.getViewport().getView()); + return State.createLocked(widget, this); + } + } + return State.REJECTED; + } + + private JScrollPane findScrollPane(JComponent component) { + for (;;) { + if (component == null) { + return null; + } + if (component instanceof JScrollPane) { + return ((JScrollPane) component); + } + Container parent = component.getParent(); + if (!(parent instanceof JComponent)) { + return null; + } + component = (JComponent) parent; + } + } + + @Override + public State mouseReleased(Widget widget, WidgetMouseEvent event) { + boolean state = pan(widget, event.getPoint()); + if (state) { + scrollPane = null; + } + return state ? State.createLocked(widget, this) : State.REJECTED; + } + + @Override + public State mouseDragged(Widget widget, WidgetMouseEvent event) { + return pan(widget, event.getPoint()) ? State.createLocked(widget, this) : State.REJECTED; + } + + private boolean pan(Widget widget, Point newLocation) { + if (scrollPane == null || scene != widget.getScene()) { + return false; + } + newLocation = scene.convertSceneToView(widget.convertLocalToScene(newLocation)); + SwingUtilities.convertPointToScreen(newLocation, scrollPane.getViewport().getView()); + Point viewPosition = scrollPane.getViewport().getViewPosition(); + Dimension viewSize = scrollPane.getViewport().getViewSize(); + Dimension viewPortSize = scrollPane.getViewport().getSize(); + + int xOffset = lastLocation.x - newLocation.x; + int yOffset = lastLocation.y - newLocation.y; + + if (viewPortSize.height == viewSize.height) { + yOffset = 0; + } + + if (viewPortSize.width == viewSize.width) { + xOffset = 0; + } + + if (xOffset == 0 && yOffset == 0) { + return true; + } + viewPosition = new Point(viewPosition.x + xOffset, viewPosition.y + yOffset); + viewPosition.x = Math.max(viewPosition.x, 0); + viewPosition.y = Math.max(viewPosition.y, 0); + viewPosition.x = Math.min(viewPosition.x, scrollPane.getViewport().getView().getSize().width - scrollPane.getViewport().getSize().width); + viewPosition.y = Math.min(viewPosition.y, scrollPane.getViewport().getView().getSize().height - scrollPane.getViewport().getSize().height); + + scrollPane.getViewport().setViewPosition(viewPosition); + scrollPane.getViewport().getView().repaint(); + lastLocation = newLocation; + return true; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/ExtendedSatelliteComponent.java 2009-08-01 04:20:27.358556618 +0100 @@ -0,0 +1,200 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import org.netbeans.api.visual.widget.Scene; + +import javax.swing.*; +import java.awt.*; +import java.awt.event.*; + +/** + * @author David Kaspar + * @author Thomas Wuerthinger + */ +public class ExtendedSatelliteComponent extends JComponent implements MouseListener, MouseMotionListener, Scene.SceneListener, ComponentListener { + + private DiagramScene scene; + private Image image; + private int imageWidth; + private int imageHeight; + + public ExtendedSatelliteComponent(DiagramScene scene) { + this.scene = scene; + setDoubleBuffered(true); + setPreferredSize(new Dimension(128, 128)); + addMouseListener(this); + addMouseMotionListener(this); + } + + public void addNotify() { + super.addNotify(); + scene.addSceneListener(this); + JComponent viewComponent = scene.getView(); + if (viewComponent == null) { + viewComponent = scene.createView(); + } + viewComponent.addComponentListener(this); + repaint(); + } + + public void removeNotify() { + scene.getView().removeComponentListener(this); + scene.removeSceneListener(this); + super.removeNotify(); + } + + public void update() { + this.image = null; + if (this.isVisible()) { + repaint(); + revalidate(); + validate(); + } + } + + public void paint(Graphics g) { + Graphics2D gr = (Graphics2D) g; + super.paint(g); + Rectangle bounds = scene.getBounds(); + Dimension size = getSize(); + + double sx = bounds.width > 0 ? (double) size.width / bounds.width : 0.0; + double sy = bounds.width > 0 ? (double) size.height / bounds.height : 0.0; + double scale = Math.min(sx, sy); + + int vw = (int) (scale * bounds.width); + int vh = (int) (scale * bounds.height); + int vx = (size.width - vw) / 2; + int vy = (size.height - vh) / 2; + + + if (image == null || vw != imageWidth || vh != imageHeight) { + + imageWidth = vw; + imageHeight = vh; + image = this.createImage(imageWidth, imageHeight); + Graphics2D ig = (Graphics2D) image.getGraphics(); + ig.scale(scale, scale); + scene.setRealZoomFactor(scale); + scene.paint(ig); + scene.setRealZoomFactor(0.0); + } + + gr.drawImage(image, vx, vy, this); + + JComponent component = scene.getView(); + double zoomFactor = scene.getZoomFactor(); + Rectangle viewRectangle = component != null ? component.getVisibleRect() : null; + if (viewRectangle != null) { + Rectangle window = new Rectangle( + (int) ((double) viewRectangle.x * scale / zoomFactor), + (int) ((double) viewRectangle.y * scale / zoomFactor), + (int) ((double) viewRectangle.width * scale / zoomFactor), + (int) ((double) viewRectangle.height * scale / zoomFactor)); + window.translate(vx, vy); + gr.setColor(new Color(200, 200, 200, 128)); + gr.fill(window); + gr.setColor(Color.BLACK); + gr.drawRect(window.x, window.y, window.width - 1, window.height - 1); + } + } + + public void mouseClicked(MouseEvent e) { + } + + public void mousePressed(MouseEvent e) { + moveVisibleRect(e.getPoint()); + } + + public void mouseReleased(MouseEvent e) { + moveVisibleRect(e.getPoint()); + } + + public void mouseEntered(MouseEvent e) { + } + + public void mouseExited(MouseEvent e) { + } + + public void mouseDragged(MouseEvent e) { + moveVisibleRect(e.getPoint()); + } + + public void mouseMoved(MouseEvent e) { + } + + private void moveVisibleRect(Point center) { + JComponent component = scene.getView(); + if (component == null) { + return; + } + double zoomFactor = scene.getZoomFactor(); + Rectangle bounds = scene.getBounds(); + Dimension size = getSize(); + + double sx = bounds.width > 0 ? (double) size.width / bounds.width : 0.0; + double sy = bounds.width > 0 ? (double) size.height / bounds.height : 0.0; + double scale = Math.min(sx, sy); + + int vw = (int) (scale * bounds.width); + int vh = (int) (scale * bounds.height); + int vx = (size.width - vw) / 2; + int vy = (size.height - vh) / 2; + + int cx = (int) ((double) (center.x - vx) / scale * zoomFactor); + int cy = (int) ((double) (center.y - vy) / scale * zoomFactor); + + Rectangle visibleRect = component.getVisibleRect(); + visibleRect.x = cx - visibleRect.width / 2; + visibleRect.y = cy - visibleRect.height / 2; + component.scrollRectToVisible(visibleRect); + + this.repaint(); + } + + public void sceneRepaint() { + //repaint (); + } + + public void sceneValidating() { + } + + public void sceneValidated() { + } + + public void componentResized(ComponentEvent e) { + repaint(); + } + + public void componentMoved(ComponentEvent e) { + repaint(); + } + + public void componentShown(ComponentEvent e) { + } + + public void componentHidden(ComponentEvent e) { + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/FindPanel.java 2009-08-01 04:20:27.793530934 +0100 @@ -0,0 +1,111 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.data.Properties.RegexpPropertyMatcher; +import com.sun.hotspot.igv.data.Property; +import java.awt.GridLayout; +import java.awt.event.KeyEvent; +import java.awt.event.KeyListener; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; +import javax.swing.JComboBox; +import javax.swing.JPanel; +import javax.swing.JTextField; + +/** + * + * @author Thomas Wuerthinger + */ +class FindPanel extends JPanel implements KeyListener { + + private JComboBox nameComboBox; + private JTextField valueTextField; + + public FindPanel(List
figures) { + createDesign(); + updateComboBox(figures); + } + + protected void createDesign() { + setLayout(new GridLayout()); + nameComboBox = new JComboBox(); + valueTextField = new JTextField(); + add(nameComboBox); + add(valueTextField); + valueTextField.addKeyListener(this); + } + + public void updateComboBox(List
figures) { + + String sel = (String) nameComboBox.getSelectedItem(); + SortedSet propertyNames = new TreeSet(); + + for (Figure f : figures) { + Properties prop = f.getProperties(); + for (Property p : prop) { + if (!propertyNames.contains(p.getName())) { + propertyNames.add(p.getName()); + } + } + } + + for (String s : propertyNames) { + nameComboBox.addItem(s); + } + nameComboBox.setSelectedItem(sel); + } + + public String getNameText() { + return (String) nameComboBox.getSelectedItem(); + } + + public String getValueText() { + return valueTextField.getText(); + } + + public void keyTyped(KeyEvent e) { + } + + public void keyPressed(KeyEvent e) { + if (e.getKeyCode() == KeyEvent.VK_ENTER) { + find(); + } + } + + public void find() { + EditorTopComponent comp = EditorTopComponent.getActive(); + if (comp != null) { + RegexpPropertyMatcher matcher = new RegexpPropertyMatcher(getNameText(), getValueText()); + comp.setSelection(matcher); + } + } + + public void keyReleased(KeyEvent e) { + + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/GraphViewerImplementation.java 2009-08-01 04:20:28.232698350 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import com.sun.hotspot.igv.data.InputGraph; +import com.sun.hotspot.igv.data.services.GraphViewer; +import com.sun.hotspot.igv.graph.Diagram; +import com.sun.hotspot.igv.settings.Settings; + +/** + * + * @author Thomas Wuerthinger + */ +public class GraphViewerImplementation implements GraphViewer { + + public void view(InputGraph graph) { + Diagram diagram = Diagram.createDiagram(graph, Settings.get().get(Settings.NODE_TEXT, Settings.NODE_TEXT_DEFAULT)); + EditorTopComponent tc = new EditorTopComponent(diagram); + tc.open(); + tc.requestActive(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/PreferenceConstants.java 2009-08-01 04:20:28.677922284 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +/** + * + * @author Thomas Wuerthinger + */ +public class PreferenceConstants { + + public static final String KEY_LINE_GENERATOR = "lineGenerator"; + public static final String DEFAULT_LINE_GENERATOR = "com.sun.hotspot.igv.positioning.BasicLineGenerator"; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/SlotLayout.java 2009-08-01 04:20:29.098621469 +0100 @@ -0,0 +1,171 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view; + +import java.awt.Point; +import java.awt.Rectangle; +import java.util.Collection; +import java.util.List; +import org.netbeans.api.visual.layout.Layout; +import org.netbeans.api.visual.layout.LayoutFactory; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class SlotLayout implements Layout { + + public enum HorizontalAlignment { + + Left, + Center, + Right + } + private Layout baseLayout; + private HorizontalAlignment alignment; + private boolean vertical; + + public SlotLayout() { + this(HorizontalAlignment.Center, false); + } + + public SlotLayout(HorizontalAlignment alignment, boolean vertical) { + this.alignment = alignment; + baseLayout = LayoutFactory.createVerticalFlowLayout(); + this.vertical = vertical; + } + + public void layout(Widget widget) { + if (!vertical) { + Collection children = widget.getChildren(); + int gap = 0; + int max = 0; + for (Widget child : children) { + Rectangle preferredBounds = child.getPreferredBounds(); + int i = preferredBounds.width; + if (i > max) { + max = i; + } + } + int pos = 0; + for (Widget child : children) { + Rectangle preferredBounds = child.getPreferredBounds(); + int x = preferredBounds.x; + int y = preferredBounds.y; + int width = preferredBounds.width; + int height = preferredBounds.height; + if (pos == 0) { + pos += height / 2; + } + int lx = -x; + int ly = pos - y; + switch (alignment) { + case Center: + lx += (max - width) / 2; + break; + case Left: + break; + case Right: + lx += max - width; + break; + } + child.resolveBounds(new Point(lx, ly), new Rectangle(x, y, width, height)); + pos += height + gap; + } + } else { + + Collection children = widget.getChildren(); + int gap = 0; + int max = 0; + for (Widget child : children) { + Rectangle preferredBounds = child.getPreferredBounds(); + int i = preferredBounds.height; + if (i > max) { + max = i; + } + } + int pos = 0; + for (Widget child : children) { + Rectangle preferredBounds = child.getPreferredBounds(); + int x = preferredBounds.x; + int y = preferredBounds.y; + int width = preferredBounds.width; + int height = preferredBounds.height; + if (pos == 0) { + pos += width / 2; + } + int lx = pos - x; + int ly = -y; + switch (alignment) { + case Center: + ly += (max - height) / 2; + break; + case Left: + break; + case Right: + ly += max - height; + break; + } + child.resolveBounds(new Point(lx, ly), new Rectangle(x, y, width, height)); + pos += width + gap; + } + + } + } + + public boolean requiresJustification(Widget widget) { + return true; + } + + public void justify(Widget widget) { + baseLayout.justify(widget); + + Rectangle client = widget.getClientArea(); + List children = widget.getChildren(); + + int count = children.size(); + int z = 0; + + int maxWidth = 0; + for (Widget c : children) { + if (c.getPreferredBounds().width > maxWidth) { + maxWidth = c.getPreferredBounds().width; + } + } + + for (Widget c : children) { + z++; + Point curLocation = c.getLocation(); + Rectangle curBounds = c.getBounds(); + + + Point location = new Point(curLocation.x, client.y + client.height * z / (count + 1) - curBounds.height / 2); + if (vertical) { + location = new Point(client.x + client.width * z / (count + 1) - maxWidth / 2, curLocation.y); + } + c.resolveBounds(location, null); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/Bundle.properties 2009-08-01 04:20:31.091343146 +0100 @@ -0,0 +1,7 @@ +CTL_EditorAction=Open Editor Window +CTL_LineGeneratorAction=Line Generator +CTL_NextDiagramAction=Show next graph +CTL_EnableBlockLayoutAction=Enable block layout +CTL_NodeFindAction=Find +CTL_PrevDiagramAction=Show previous graph +CTL_ExportAction=Export... --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/EnableBlockLayoutAction.java 2009-08-01 04:20:31.829389223 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import java.awt.event.ActionEvent; +import javax.swing.AbstractAction; +import javax.swing.Action; +import javax.swing.ImageIcon; + +/** + * + * @author Thomas Wuerthinger + */ +public class EnableBlockLayoutAction extends AbstractAction { + + private boolean state; + public static final String STATE = "state"; + + public EnableBlockLayoutAction() { + state = true; + putValue(AbstractAction.SMALL_ICON, new ImageIcon(org.openide.util.Utilities.loadImage(iconResource()))); + putValue(STATE, true); + putValue(Action.SHORT_DESCRIPTION, "Cluster nodes into blocks"); + } + + public void actionPerformed(ActionEvent ev) { + this.state = !state; + this.putValue(STATE, state); + } + + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/blocks.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/ExpandPredecessorsAction.java 2009-08-01 04:20:32.306662414 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.EditorTopComponent; +import org.openide.util.HelpCtx; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ExpandPredecessorsAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent editor = EditorTopComponent.getActive(); + if (editor != null) { + editor.expandPredecessors(); + } + } + + public String getName() { + return "Expand Predecessors"; + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/ExpandSuccessorsAction.java 2009-08-01 04:20:32.770215516 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.EditorTopComponent; +import org.openide.util.HelpCtx; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ExpandSuccessorsAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent editor = EditorTopComponent.getActive(); + if (editor != null) { + editor.expandSuccessors(); + } + } + + public String getName() { + return "Expand Successors"; + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/ExportAction.java 2009-08-01 04:20:33.212133342 +0100 @@ -0,0 +1,117 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.settings.Settings; +import com.sun.hotspot.igv.view.ExportCookie; +import java.awt.event.InputEvent; +import java.awt.event.KeyEvent; +import java.io.File; +import javax.swing.Action; +import javax.swing.JFileChooser; +import javax.swing.KeyStroke; +import javax.swing.filechooser.FileFilter; +import org.openide.util.HelpCtx; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.NbBundle; +import org.openide.util.Utilities; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ExportAction extends CallableSystemAction implements LookupListener { + + private final Lookup lookup; + private final Lookup.Result result; + + public ExportAction() { + putValue(Action.SHORT_DESCRIPTION, "Export current graph as an SVG file"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_E, InputEvent.CTRL_MASK)); + lookup = Utilities.actionsGlobalContext(); + result = lookup.lookup(new Lookup.Template(ExportCookie.class)); + result.addLookupListener(this); + resultChanged(null); + } + + public void resultChanged(LookupEvent e) { + super.setEnabled(result.allInstances().size() > 0); + } + + public void performAction() { + + JFileChooser fc = new JFileChooser(); + fc.setFileFilter(new FileFilter() { + + public boolean accept(File f) { + return true; + } + + public String getDescription() { + return "SVG files (*.svg)"; + } + }); + fc.setCurrentDirectory(new File(Settings.get().get(Settings.DIRECTORY, Settings.DIRECTORY_DEFAULT))); + + + if (fc.showSaveDialog(null) == JFileChooser.APPROVE_OPTION) { + File file = fc.getSelectedFile(); + if (!file.getName().contains(".")) { + file = new File(file.getAbsolutePath() + ".svg"); + } + + File dir = file; + if (!dir.isDirectory()) { + dir = dir.getParentFile(); + } + + Settings.get().put(Settings.DIRECTORY, dir.getAbsolutePath()); + ExportCookie cookie = Utilities.actionsGlobalContext().lookup(ExportCookie.class); + if (cookie != null) { + cookie.export(file); + } + } + } + + public String getName() { + return NbBundle.getMessage(ExportAction.class, "CTL_ExportAction"); + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/export.gif"; + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/ExtractAction.java 2009-08-01 04:20:33.669752108 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.EditorTopComponent; +import java.awt.Event; +import java.awt.event.KeyEvent; +import javax.swing.Action; +import javax.swing.KeyStroke; +import org.openide.util.HelpCtx; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ExtractAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent editor = EditorTopComponent.getActive(); + if (editor != null) { + editor.extract(); + } + } + + public ExtractAction() { + putValue(Action.SHORT_DESCRIPTION, "Extract current set of selected nodes"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_X, Event.CTRL_MASK, false)); + } + + public String getName() { + return "Extract action"; + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/extract.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/HideAction.java 2009-08-01 04:20:34.099275554 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.EditorTopComponent; +import java.awt.Event; +import java.awt.event.KeyEvent; +import javax.swing.Action; +import javax.swing.KeyStroke; +import org.openide.util.HelpCtx; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class HideAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent editor = EditorTopComponent.getActive(); + if (editor != null) { + editor.hideNodes(); + } + } + + public HideAction() { + putValue(Action.SHORT_DESCRIPTION, "Hide selected nodes"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_H, Event.CTRL_MASK, false)); + } + + public String getName() { + return "Hide"; + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/hide.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/MouseOverAction.java 2009-08-01 04:20:34.521048369 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import org.netbeans.api.visual.action.HoverProvider; +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.action.WidgetAction.State; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class MouseOverAction extends WidgetAction.Adapter { + + private long eventID = Integer.MIN_VALUE; + private HoverProvider provider; + + public MouseOverAction(HoverProvider provider) { + this.provider = provider; + } + + @Override + public State mouseMoved(Widget widget, WidgetMouseEvent event) { + long id = event.getEventID(); + if (id != eventID) { + eventID = id; + provider.widgetHovered(widget); + } + return State.REJECTED; + } + + @Override + public State mouseExited(Widget widget, WidgetMouseEvent event) { + provider.widgetHovered(null); + return State.REJECTED; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/NextDiagramAction.java 2009-08-01 04:20:34.941707816 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.DiagramViewModel; +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.util.ContextAction; +import javax.swing.Action; +import javax.swing.ImageIcon; +import org.openide.util.HelpCtx; +import org.openide.util.Lookup; +import org.openide.util.NbBundle; +import org.openide.util.Utilities; + +/** + * + * @author Thomas Wuerthinger + */ +public final class NextDiagramAction extends ContextAction implements ChangedListener { + + private DiagramViewModel model; + + public NextDiagramAction() { + this(Utilities.actionsGlobalContext()); + } + + public NextDiagramAction(Lookup lookup) { + putValue(Action.SHORT_DESCRIPTION, "Show next graph of current group"); + putValue(Action.SMALL_ICON, new ImageIcon(Utilities.loadImage("com/sun/hotspot/igv/view/images/next_diagram.png"))); + } + + public String getName() { + return NbBundle.getMessage(NextDiagramAction.class, "CTL_NextDiagramAction"); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + public Class contextClass() { + return DiagramViewModel.class; + } + + @Override + public void performAction(DiagramViewModel model) { + int fp = model.getFirstPosition(); + int sp = model.getSecondPosition(); + if (sp != model.getPositions().size() - 1) { + int nfp = fp + 1; + int nsp = sp + 1; + model.setPositions(nfp, nsp); + } + } + + @Override + public void update(DiagramViewModel model) { + super.update(model); + + if (this.model != model) { + if (this.model != null) { + this.model.getDiagramChangedEvent().removeListener(this); + } + + this.model = model; + if (this.model != null) { + this.model.getDiagramChangedEvent().addListener(this); + } + } + } + + @Override + public boolean isEnabled(DiagramViewModel model) { + return model.getSecondPosition() != model.getPositions().size() - 1; + } + + public Action createContextAwareInstance(Lookup arg0) { + return new NextDiagramAction(arg0); + } + + public void changed(DiagramViewModel source) { + update(source); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/NodeFindAction.java 2009-08-01 04:20:35.372110161 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.EditorTopComponent; +import javax.swing.Action; +import org.openide.util.HelpCtx; +import org.openide.util.NbBundle; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class NodeFindAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent comp = EditorTopComponent.getActive(); + if (comp != null) { + comp.findNode(); + } + } + + public NodeFindAction() { + putValue(Action.SHORT_DESCRIPTION, "Find nodes"); + } + + public String getName() { + return NbBundle.getMessage(NodeFindAction.class, "CTL_NodeFindAction"); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + public boolean isEnabled() { + return true; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/search.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/OverviewAction.java 2009-08-01 04:20:35.800390798 +0100 @@ -0,0 +1,58 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import java.awt.event.ActionEvent; +import javax.swing.AbstractAction; +import javax.swing.Action; +import javax.swing.ImageIcon; + +/** + * + * @author Thomas Wuerthinger + */ +public class OverviewAction extends AbstractAction { + + private boolean state; + public static final String STATE = "state"; + + public OverviewAction() { + putValue(AbstractAction.SMALL_ICON, new ImageIcon(org.openide.util.Utilities.loadImage(iconResource()))); + putValue(Action.SHORT_DESCRIPTION, "Show satellite view of whole graph"); + setState(false); + } + + public void actionPerformed(ActionEvent ev) { + setState(!state); + } + + public void setState(boolean b) { + this.putValue(STATE, b); + this.state = b; + } + + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/overview.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/PredSuccAction.java 2009-08-01 04:20:36.225941757 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import java.awt.event.ActionEvent; +import javax.swing.AbstractAction; +import javax.swing.Action; +import javax.swing.ImageIcon; + +/** + * + * @author Thomas Wuerthinger + */ +public class PredSuccAction extends AbstractAction { + + private boolean state; + public static final String STATE = "state"; + + public PredSuccAction() { + state = true; + putValue(AbstractAction.SMALL_ICON, new ImageIcon(org.openide.util.Utilities.loadImage(iconResource()))); + putValue(STATE, true); + putValue(Action.SHORT_DESCRIPTION, "Show neighboring nodes of fully visible nodes semi-transparent"); + } + + public void actionPerformed(ActionEvent ev) { + this.state = !state; + this.putValue(STATE, state); + } + + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/predsucc.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/PrevDiagramAction.java 2009-08-01 04:20:36.642833387 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.data.ChangedListener; +import com.sun.hotspot.igv.view.DiagramViewModel; +import com.sun.hotspot.igv.util.ContextAction; +import javax.swing.Action; +import javax.swing.ImageIcon; +import org.openide.util.HelpCtx; +import org.openide.util.Lookup; +import org.openide.util.NbBundle; +import org.openide.util.Utilities; + +/** + * + * @author Thomas Wuerthinger + */ +public final class PrevDiagramAction extends ContextAction implements ChangedListener { + + private DiagramViewModel model; + + public PrevDiagramAction() { + this(Utilities.actionsGlobalContext()); + } + + public PrevDiagramAction(Lookup lookup) { + putValue(Action.SHORT_DESCRIPTION, "Show previous graph of current group"); + putValue(Action.SMALL_ICON, new ImageIcon(Utilities.loadImage("com/sun/hotspot/igv/view/images/prev_diagram.png"))); + } + + public String getName() { + return NbBundle.getMessage(PrevDiagramAction.class, "CTL_PrevDiagramAction"); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + public Class contextClass() { + return DiagramViewModel.class; + } + + @Override + public void performAction(DiagramViewModel model) { + int fp = model.getFirstPosition(); + int sp = model.getSecondPosition(); + if (fp != 0) { + int nfp = fp - 1; + int nsp = sp - 1; + model.setPositions(nfp, nsp); + } + } + + @Override + public void update(DiagramViewModel model) { + super.update(model); + + if (this.model != model) { + if (this.model != null) { + this.model.getDiagramChangedEvent().removeListener(this); + } + + this.model = model; + if (this.model != null) { + this.model.getDiagramChangedEvent().addListener(this); + } + } + } + + @Override + public boolean isEnabled(DiagramViewModel model) { + return model.getFirstPosition() != 0; + } + + public Action createContextAwareInstance(Lookup arg0) { + return new PrevDiagramAction(arg0); + } + + public void changed(DiagramViewModel source) { + update(source); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/ShowAllAction.java 2009-08-01 04:20:37.059789535 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.*; +import java.awt.event.InputEvent; +import java.awt.event.KeyEvent; +import javax.swing.Action; +import javax.swing.KeyStroke; +import org.openide.util.HelpCtx; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ShowAllAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent editor = EditorTopComponent.getActive(); + if (editor != null) { + editor.showAll(); + } + } + + public ShowAllAction() { + putValue(Action.SHORT_DESCRIPTION, "Show all nodes"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_A, InputEvent.CTRL_MASK)); + } + + public String getName() { + return "Show all"; + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/expand.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/ZoomInAction.java 2009-08-01 04:20:37.476824645 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.EditorTopComponent; +import java.awt.Event; +import java.awt.event.KeyEvent; +import javax.swing.Action; +import javax.swing.KeyStroke; +import org.openide.util.HelpCtx; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ZoomInAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent editor = EditorTopComponent.getActive(); + if (editor != null) { + editor.zoomIn(); + } + } + + public String getName() { + return "Zoom in"; + } + + public ZoomInAction() { + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_EQUALS, Event.CTRL_MASK, false)); + putValue(Action.SHORT_DESCRIPTION, "Zoom in"); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/zoomin.gif"; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/actions/ZoomOutAction.java 2009-08-01 04:20:37.893882996 +0100 @@ -0,0 +1,75 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.actions; + +import com.sun.hotspot.igv.view.EditorTopComponent; +import java.awt.Event; +import java.awt.event.KeyEvent; +import javax.swing.Action; +import javax.swing.KeyStroke; +import org.openide.util.HelpCtx; +import org.openide.util.actions.CallableSystemAction; + +/** + * + * @author Thomas Wuerthinger + */ +public final class ZoomOutAction extends CallableSystemAction { + + public void performAction() { + EditorTopComponent editor = EditorTopComponent.getActive(); + if (editor != null) { + editor.zoomOut(); + } + } + + public ZoomOutAction() { + + putValue(Action.SHORT_DESCRIPTION, "Zoom out"); + putValue(Action.ACCELERATOR_KEY, KeyStroke.getKeyStroke(KeyEvent.VK_MINUS, Event.CTRL_MASK, false)); + } + + public String getName() { + return "Zoom out"; + } + + @Override + protected void initialize() { + super.initialize(); + } + + public HelpCtx getHelpCtx() { + return HelpCtx.DEFAULT_HELP; + } + + @Override + protected boolean asynchronous() { + return false; + } + + @Override + protected String iconResource() { + return "com/sun/hotspot/igv/view/images/zoomout.gif"; + } +} Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/blocks.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/expand.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/export.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/extract.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/hide.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/next_diagram.png differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/overview.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/predsucc.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/prev_diagram.png differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/search.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/zoomin.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/images/zoomout.gif differ --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/layer.xml 2009-08-01 04:20:43.225169168 +0100 @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/BlockWidget.java 2009-08-01 04:20:43.680591146 +0100 @@ -0,0 +1,82 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.data.InputBlock; +import com.sun.hotspot.igv.graph.Diagram; +import java.awt.BasicStroke; +import java.awt.Color; +import java.awt.Font; +import java.awt.Graphics2D; +import java.awt.Rectangle; +import java.awt.Stroke; +import java.awt.geom.Rectangle2D; +import org.netbeans.api.visual.widget.Scene; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class BlockWidget extends Widget { + + public static final int BORDER = 20; + public static final Color BACKGROUND_COLOR = new Color(235, 235, 255); + private static final Font titleFont = new Font("Serif", Font.PLAIN, 14).deriveFont(Font.BOLD); + private InputBlock blockNode; + private Diagram diagram; + + public BlockWidget(Scene scene, Diagram d, InputBlock blockNode) { + super(scene); + this.blockNode = blockNode; + this.diagram = d; + this.setBackground(BACKGROUND_COLOR); + this.setOpaque(true); + this.setCheckClipping(true); + } + + @Override + protected void paintWidget() { + super.paintWidget(); + Graphics2D g = this.getGraphics(); + Stroke old = g.getStroke(); + g.setColor(Color.BLUE); + Rectangle r = new Rectangle(this.getPreferredBounds()); + r.width--; + r.height--; + if (this.getBounds().width > 0 && this.getBounds().height > 0) { + g.setStroke(new BasicStroke(2)); + g.drawRect(r.x, r.y, r.width, r.height); + } + + Color titleColor = Color.BLACK; + g.setColor(titleColor); + g.setFont(titleFont); + + String s = "B" + blockNode.toString(); + Rectangle2D r1 = g.getFontMetrics().getStringBounds(s, g); + g.drawString(s, r.x + 5, r.y + (int) r1.getHeight()); + g.setStroke(old); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/DiagramConnectionWidget.java 2009-08-01 04:20:44.098199029 +0100 @@ -0,0 +1,251 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.view.DiagramScene; +import java.awt.BasicStroke; +import java.awt.Color; +import java.awt.Composite; +import java.awt.Graphics2D; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.Stroke; +import java.util.ArrayList; +import java.util.List; +import org.netbeans.api.visual.anchor.AnchorShape; +import org.netbeans.api.visual.model.ObjectState; +import org.netbeans.api.visual.widget.ConnectionWidget; +import org.netbeans.api.visual.widget.Scene; + +/** + * + * @author Thomas Wuerthinger + */ +public class DiagramConnectionWidget extends ConnectionWidget { + + private static Stroke DASHED_STROKE = new BasicStroke( + 1, + BasicStroke.CAP_BUTT, + BasicStroke.JOIN_ROUND, + 0, + new float[]{2}, + 0); + private static Stroke NORMAL_STROKE = new BasicStroke(1); + private static Stroke BOLD_STROKE = new BasicStroke(3); + public static int WHITE_FACTOR = 5; + private Connection connection; + private Color color; + private Point lastSourceAnchor; + private Point lastTargetAnchor; + private List controlPoints; + private Rectangle clientArea; + private boolean split; + private int[] xPoints; + private int[] yPoints; + private int pointCount; + + /** Creates a new instance of ConnectionWidget */ + public DiagramConnectionWidget(Connection connection, Scene scene) { + super(scene); + this.connection = connection; + color = connection.getColor(); + if (connection.getStyle() == Connection.ConnectionStyle.DASHED) { + this.setStroke(DASHED_STROKE); + } else if (connection.getStyle() == Connection.ConnectionStyle.BOLD) { + this.setStroke(BOLD_STROKE); + } else { + this.setStroke(NORMAL_STROKE); + } + this.setCheckClipping(true); + clientArea = new Rectangle(); + updateControlPoints(); + } + + public Connection getConnection() { + return connection; + } + + public void updateControlPoints() { + List newControlPoints = connection.getControlPoints(); + Connection c = connection; + Figure f = c.getInputSlot().getFigure(); + Point p = new Point(f.getPosition()); + p.translate(c.getInputSlot().getRelativePosition().x, f.getSize().height / 2); + Point p4 = new Point(f.getPosition()); + p4.translate(c.getInputSlot().getRelativePosition().x, c.getInputSlot().getRelativePosition().y); + + Figure f2 = c.getOutputSlot().getFigure(); + Point p2 = new Point(f2.getPosition()); + p2.translate(c.getOutputSlot().getRelativePosition().x, f2.getSize().height / 2); + Point p3 = new Point(f2.getPosition()); + p3.translate(c.getOutputSlot().getRelativePosition().x, c.getOutputSlot().getRelativePosition().y); + + /*if(controlPoints.size() >= 2) { + String className = Preferences.userNodeForPackage(PreferenceConstants.class).get(PreferenceConstants.KEY_LINE_GENERATOR, PreferenceConstants.DEFAULT_LINE_GENERATOR); + try { + LineGenerator lg = (LineGenerator)Class.forName(className).newInstance(); + controlPoints = lg.createLine(controlPoints, p2, p); + } catch (InstantiationException ex) { + } catch (IllegalAccessException ex) { + } catch (ClassNotFoundException ex) { + } + }*/ + + this.controlPoints = newControlPoints; + pointCount = newControlPoints.size(); + xPoints = new int[pointCount]; + yPoints = new int[pointCount]; + int minX = Integer.MAX_VALUE; + int maxX = Integer.MIN_VALUE; + int minY = Integer.MAX_VALUE; + int maxY = Integer.MIN_VALUE; + split = false; + for (int i = 0; i < pointCount; i++) { + if (newControlPoints.get(i) == null) { + split = true; + } else { + int curX = newControlPoints.get(i).x; + int curY = newControlPoints.get(i).y; + this.xPoints[i] = curX; + this.yPoints[i] = curY; + minX = Math.min(minX, curX); + maxX = Math.max(maxX, curX); + minY = Math.min(minY, curY); + maxY = Math.max(maxY, curY); + } + } + + this.clientArea = new Rectangle(minX, minY, maxX - minX, maxY - minY); + } + + @Override + protected void paintWidget() { + Graphics2D g = this.getGraphics(); + + if (xPoints.length == 0 || Math.abs(xPoints[0] - xPoints[xPoints.length - 1]) > 2000) { + return; + } + + //g.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_OFF); + //g.setRenderingHint(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_SPEED); + //g.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_OFF); + + DiagramScene ds = (DiagramScene) this.getScene(); + boolean shouldHide = false;//ds.getShouldHide(this); + + Composite oldComposite = null; + if (shouldHide) { + Color c = new Color(255 - (255 - color.getRed()) / WHITE_FACTOR, 255 - (255 - color.getGreen()) / WHITE_FACTOR, 255 - (255 - color.getBlue()) / WHITE_FACTOR); + g.setPaint(c); + } else { + g.setPaint(color); + } + + if (split) { + for (int i = 1; i < controlPoints.size(); i++) { + Point prev = controlPoints.get(i - 1); + Point cur = controlPoints.get(i); + if (cur == null || prev == null) { + continue; + } + + g.drawLine(prev.x, prev.y, cur.x, cur.y); + } + } else { + g.drawPolyline(xPoints, yPoints, pointCount); + } + + /*for(int i=0; i= 2) { + Graphics2D g2 = (Graphics2D) g.create(); + int xOff = xPoints[xPoints.length - 2] - xPoints[xPoints.length - 1]; + int yOff = yPoints[yPoints.length - 2] - yPoints[yPoints.length - 1]; + if (xOff == 0 && yOff == 0 && yPoints.length >= 3) { + xOff = xPoints[xPoints.length - 3] - xPoints[xPoints.length - 1]; + yOff = yPoints[yPoints.length - 3] - yPoints[yPoints.length - 1]; + } + g2.translate(xPoints[xPoints.length - 1], yPoints[yPoints.length - 1]); + g2.rotate(Math.atan2(yOff, xOff)); + + g2.scale(0.55, 0.80); + AnchorShape.TRIANGLE_FILLED.paint(g2, false); + } + } + + @Override + public void notifyStateChanged(ObjectState previousState, ObjectState state) { + + if (previousState.isHovered() != state.isHovered()) { + color = connection.getColor(); + if (state.isHovered()) { + this.setStroke(BOLD_STROKE); + } else { + this.setStroke(NORMAL_STROKE); + } + + if (state.isHovered()) { + this.setStroke(BOLD_STROKE); + } else { + this.setStroke(NORMAL_STROKE); + } + + repaint(); + } + super.notifyStateChanged(previousState, state); + } + + @Override + public List getControlPoints() { + if (split) { + ArrayList result = new ArrayList(); + for (Point p : controlPoints) { + if (p != null) { + result.add(p); + } + } + return result; + } else { + return controlPoints; + } + } + + @Override + public String toString() { + return "ConnectionWidget[" + connection + "]"; + } + + @Override + protected Rectangle calculateClientArea() { + Rectangle result = new Rectangle(clientArea); + result.grow(10, 10); + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/FigureWidget.java 2009-08-01 04:20:44.524178957 +0100 @@ -0,0 +1,358 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.view.DiagramScene; +import com.sun.hotspot.igv.view.SlotLayout; +import com.sun.hotspot.igv.util.DoubleClickHandler; +import com.sun.hotspot.igv.data.Properties; +import com.sun.hotspot.igv.util.PropertiesSheet; +import java.awt.AlphaComposite; +import java.awt.Color; +import java.awt.Composite; +import java.awt.Dimension; +import java.awt.Font; +import java.awt.Graphics; +import java.awt.Point; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Set; +import javax.swing.Action; +import javax.swing.BorderFactory; +import javax.swing.JMenu; +import javax.swing.JPopupMenu; +import org.netbeans.api.visual.action.PopupMenuProvider; +import org.netbeans.api.visual.action.WidgetAction; +import org.netbeans.api.visual.model.ObjectState; +import org.netbeans.api.visual.widget.Widget; +import org.netbeans.api.visual.layout.LayoutFactory; +import org.netbeans.api.visual.widget.LabelWidget; +import org.openide.nodes.AbstractNode; +import org.openide.nodes.Children; +import org.openide.nodes.Node; +import org.openide.nodes.Sheet; + +/** + * + * @author Thomas Wuerthinger + */ +public class FigureWidget extends Widget implements Properties.Provider, PopupMenuProvider, DoubleClickHandler { + + public static final boolean VERTICAL_LAYOUT = true; + public static final int DEPTH = 5; + public static final int MAX_STRING_LENGTH = 20; + private static final double LABEL_ZOOM_FACTOR = 0.3; + private static final double ZOOM_FACTOR = 0.1; + private Font font; + private Font boldFont; + private Figure figure; + private Widget leftWidget; + private Widget rightWidget; + private Widget middleWidget; + private ArrayList labelWidgets; + private DiagramScene diagramScene; + private boolean boundary; + private Node node; + + public void setBoundary(boolean b) { + boundary = b; + } + + public boolean isBoundary() { + return boundary; + } + + public Node getNode() { + return node; + } + + private String shortenString(String string) { + if (string.length() > MAX_STRING_LENGTH) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < string.length(); i++) { + char c = string.charAt(i); + if (!Character.isLetter(c) || Character.isUpperCase(c)) { + sb.append(c); + } + } + string = sb.toString(); + } + return string; + } + + public FigureWidget(final Figure f, DiagramScene s, Widget parent) { + + super(s); + + + font = f.getDiagram().getFont(); + boldFont = f.getDiagram().getFont().deriveFont(Font.BOLD); + this.setCheckClipping(true); + this.diagramScene = s; + + parent.addChild(this); + this.figure = f; + this.resolveBounds(null, calculateClientArea()); + + leftWidget = new Widget(s); + this.addChild(leftWidget); + leftWidget.setLayout(new SlotLayout(SlotLayout.HorizontalAlignment.Right, VERTICAL_LAYOUT));//LayoutFactory.createVerticalFlowLayout(LayoutFactory.SerialAlignment.JUSTIFY, 0)); + + middleWidget = new Widget(s); + this.addChild(middleWidget); + + if (VERTICAL_LAYOUT) { + this.setLayout(LayoutFactory.createVerticalFlowLayout()); + } else { + this.setLayout(LayoutFactory.createHorizontalFlowLayout()); + } + + middleWidget.setLayout(LayoutFactory.createVerticalFlowLayout()); + + middleWidget.setBackground(f.getColor()); + middleWidget.setOpaque(true); + assert this.getScene() != null; + assert this.getScene().getView() != null; + middleWidget.setBorder(BorderFactory.createLineBorder(Color.BLACK)); + + + labelWidgets = new ArrayList(); + + String[] strings = figure.getLines(); + + for (String cur : strings) { + + String displayString = cur; + + LabelWidget lw = new LabelWidget(s); + labelWidgets.add(lw); + middleWidget.addChild(lw); + lw.setLabel(displayString); + + lw.setFont(font); + lw.setForeground(Color.BLACK); + lw.setAlignment(LabelWidget.Alignment.CENTER); + lw.setVerticalAlignment(LabelWidget.VerticalAlignment.CENTER); + lw.setMaximumSize(new Dimension(f.getWidth(), 20000)); + lw.setMinimumSize(new Dimension(f.getWidth(), 20)); + } + + rightWidget = new Widget(s); + this.addChild(rightWidget); + rightWidget.setLayout(new SlotLayout(SlotLayout.HorizontalAlignment.Left, VERTICAL_LAYOUT));//LayoutFactory.createVerticalLayout(LayoutFactory.SerialAlignment.JUSTIFY, 0)); + + // Initialize node for property sheet + node = new AbstractNode(Children.LEAF) { + + @Override + protected Sheet createSheet() { + Sheet s = super.createSheet(); + PropertiesSheet.initializeSheet(f.getProperties(), s); + return s; + } + }; + node.setDisplayName(getName()); + } + private boolean firstTime = true; + + @Override + protected void paintWidget() { + if (firstTime) { + firstTime = false; + for (LabelWidget w : labelWidgets) { + String cur = w.getLabel(); + Graphics graphics = this.getGraphics(); + if (graphics.getFontMetrics().stringWidth(cur) > figure.getWidth()) { + w.setLabel(shortenString(cur)); + } + } + } + super.paintWidget(); + } + + public Widget getLeftWidget() { + return leftWidget; + } + + public Widget getRightWidget() { + return rightWidget; + } + + @Override + protected void notifyStateChanged(ObjectState previousState, ObjectState state) { + super.notifyStateChanged(previousState, state); + + Color borderColor = Color.BLACK; + int thickness = 1; + boolean repaint = false; + Font f = font; + if (state.isSelected()) { + thickness = 1; + f = boldFont; + } + + if (state.isHovered()) { + borderColor = Color.BLUE; + } + + if (state.isHovered() != previousState.isHovered()) { + repaint = true; + } + + if (state.isSelected() != previousState.isSelected()) { + repaint = true; + } + + if (repaint) { + middleWidget.setBorder(BorderFactory.createLineBorder(borderColor, thickness)); + for (LabelWidget labelWidget : labelWidgets) { + labelWidget.setFont(f); + } + repaint(); + } + } + + public String getName() { + return getProperties().get("name"); + } + + public Properties getProperties() { + return figure.getProperties(); + } + + public Figure getFigure() { + return figure; + } + + @Override + protected void paintChildren() { + + if (diagramScene.getRealZoomFactor() < ZOOM_FACTOR && diagramScene.getModel().getShowBlocks()) { + return; + } + + Composite oldComposite = null; + if (boundary) { + oldComposite = getScene().getGraphics().getComposite(); + float alpha = DiagramScene.ALPHA; + this.getScene().getGraphics().setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_OVER, alpha)); + } + + if (diagramScene.getRealZoomFactor() < LABEL_ZOOM_FACTOR) { + + for (LabelWidget labelWidget : labelWidgets) { + labelWidget.setVisible(false); + } + super.paintChildren(); + for (LabelWidget labelWidget : labelWidgets) { + labelWidget.setVisible(true); + } + + } else { + super.paintChildren(); + } + + if (boundary) { + getScene().getGraphics().setComposite(oldComposite); + } + } + + public JPopupMenu getPopupMenu(Widget widget, Point point) { + JPopupMenu m = diagramScene.createPopupMenu(); + + JMenu predecessors = new JMenu("Predecessors"); + addFigureToSubMenu(predecessors, getFigure(), false, DEPTH); + + JMenu successors = new JMenu("Successors"); + addFigureToSubMenu(successors, getFigure(), true, DEPTH); + + m.addSeparator(); + m.add(predecessors); + m.add(successors); + return m; + } + + public void addFigureToSubMenu(JMenu subMenu, final Figure f, boolean successor, int depth) { + Set
set = f.getPredecessorSet(); + if (successor) { + set = f.getSuccessorSet(); + } + + int count = set.size(); + if (set.contains(f)) { + count--; + } + + for (Figure f2 : set) { + if (f2 == f) { + continue; + } + + count--; + addFigureToMenu(subMenu, f2, successor, depth - 1); + if (count > 0) { + subMenu.addSeparator(); + } + } + } + + public void addFigureToMenu(JMenu m, final Figure f, boolean successor, int depth) { + + Action a = diagramScene.createGotoAction(f); + + + m.add(a); + + if (depth > 0) { + String name = "Predecessors"; + if (successor) { + name = "Successors"; + } + + JMenu subMenu = new JMenu(name); + addFigureToSubMenu(subMenu, f, successor, depth); + m.add(subMenu); + } + + } + + public void handleDoubleClick(Widget w, WidgetAction.WidgetMouseEvent e) { + + if (diagramScene.isAllVisible()) { + Set hiddenNodes = new HashSet(diagramScene.getModel().getGraphToView().getGroup().getAllNodes()); + hiddenNodes.removeAll(this.getFigure().getSource().getSourceNodesAsSet()); + this.diagramScene.showNot(hiddenNodes); + } else if (isBoundary()) { + + Set hiddenNodes = new HashSet(diagramScene.getModel().getHiddenNodes()); + hiddenNodes.removeAll(this.getFigure().getSource().getSourceNodesAsSet()); + this.diagramScene.showNot(hiddenNodes); + } else { + Set hiddenNodes = new HashSet(diagramScene.getModel().getHiddenNodes()); + hiddenNodes.addAll(this.getFigure().getSource().getSourceNodesAsSet()); + this.diagramScene.showNot(hiddenNodes); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/InputSlotWidget.java 2009-08-01 04:20:44.940395429 +0100 @@ -0,0 +1,61 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.view.DiagramScene; +import java.awt.Point; +import java.util.List; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class InputSlotWidget extends SlotWidget { + + private InputSlot inputSlot; + + public InputSlotWidget(InputSlot slot, DiagramScene scene, Widget parent, FigureWidget fw) { + super(slot, scene, parent, fw); + inputSlot = slot; + init(); + getFigureWidget().getLeftWidget().addChild(this); + } + + public InputSlot getInputSlot() { + return inputSlot; + } + + protected Point calculateRelativeLocation() { + if (getFigureWidget().getBounds() == null) { + return new Point(0, 0); + } + + double x = 0; + List slots = inputSlot.getFigure().getInputSlots(); + assert slots.contains(inputSlot); + return new Point((int) x, (int) (calculateRelativeY(slots.size(), slots.indexOf(inputSlot)))); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/LineWidget.java 2009-08-01 04:20:45.356059529 +0100 @@ -0,0 +1,293 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.view.DiagramScene; +import java.awt.BasicStroke; +import java.awt.Color; +import java.awt.Graphics2D; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.Stroke; +import java.awt.geom.Line2D; +import java.util.ArrayList; +import java.util.List; +import javax.swing.JPopupMenu; +import javax.swing.event.PopupMenuEvent; +import javax.swing.event.PopupMenuListener; +import org.netbeans.api.visual.action.ActionFactory; +import org.netbeans.api.visual.action.PopupMenuProvider; +import org.netbeans.api.visual.animator.SceneAnimator; +import org.netbeans.api.visual.model.ObjectState; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class LineWidget extends Widget implements PopupMenuProvider { + + public final int BORDER = 8; + public final int ARROW_SIZE = 6; + public final int BOLD_ARROW_SIZE = 7; + public final int HOVER_ARROW_SIZE = 8; + public final int BOLD_STROKE_WIDTH = 2; + public final int HOVER_STROKE_WIDTH = 3; + private static double ZOOM_FACTOR = 0.1; + private OutputSlot outputSlot; + private DiagramScene scene; + private List connections; + private Point from; + private Point to; + private Rectangle clientArea; + private Color color = Color.BLACK; + private LineWidget predecessor; + private List successors; + private boolean highlighted; + private boolean popupVisible; + private boolean isBold; + private boolean isDashed; + + public LineWidget(DiagramScene scene, OutputSlot s, List connections, Point from, Point to, LineWidget predecessor, SceneAnimator animator, boolean isBold, boolean isDashed) { + super(scene); + this.scene = scene; + this.outputSlot = s; + this.connections = connections; + this.from = from; + this.to = to; + this.predecessor = predecessor; + this.successors = new ArrayList(); + if (predecessor != null) { + predecessor.addSuccessor(this); + } + + this.isBold = isBold; + this.isDashed = isDashed; + + int minX = from.x; + int minY = from.y; + int maxX = to.x; + int maxY = to.y; + if (minX > maxX) { + int tmp = minX; + minX = maxX; + maxX = tmp; + } + + if (minY > maxY) { + int tmp = minY; + minY = maxY; + maxY = tmp; + } + + clientArea = new Rectangle(minX, minY, maxX - minX + 1, maxY - minY + 1); + clientArea.grow(BORDER, BORDER); + + if (connections.size() > 0) { + color = connections.get(0).getColor(); + } + + this.setCheckClipping(true); + + this.getActions().addAction(ActionFactory.createPopupMenuAction(this)); + if (animator == null) { + this.setBackground(color); + } else { + this.setBackground(Color.WHITE); + animator.animateBackgroundColor(this, color); + } + } + + public Point getFrom() { + return from; + } + + public Point getTo() { + return to; + } + + private void addSuccessor(LineWidget widget) { + this.successors.add(widget); + } + + @Override + protected Rectangle calculateClientArea() { + return clientArea; + } + + @Override + protected void paintWidget() { + if (scene.getRealZoomFactor() < ZOOM_FACTOR) { + return; + } + + Graphics2D g = getScene().getGraphics(); + g.setPaint(this.getBackground()); + ObjectState state = this.getState(); + float width = 1.0f; + + if (isBold) { + width = BOLD_STROKE_WIDTH; + } + + if (highlighted || popupVisible) { + width = HOVER_STROKE_WIDTH; + } + + Stroke oldStroke = g.getStroke(); + if (isDashed) { + float[] dashPattern = {5, 5, 5, 5}; + g.setStroke(new BasicStroke(width, BasicStroke.CAP_BUTT, + BasicStroke.JOIN_MITER, 10, + dashPattern, 0)); + } else { + g.setStroke(new BasicStroke(width)); + } + + g.drawLine(from.x, from.y, to.x, to.y); + + boolean sameFrom = false; + boolean sameTo = successors.size() == 0; + for (LineWidget w : successors) { + if (w.getFrom().equals(getTo())) { + sameTo = true; + } + } + + if (predecessor == null || predecessor.getTo().equals(getFrom())) { + sameFrom = true; + } + + + int size = ARROW_SIZE; + if (isBold) { + size = BOLD_ARROW_SIZE; + } + if (highlighted || popupVisible) { + size = HOVER_ARROW_SIZE; + } + if (!sameFrom) { + g.fillPolygon( + new int[]{from.x - size / 2, from.x + size / 2, from.x}, + new int[]{from.y - size / 2, from.y - size / 2, from.y + size / 2}, + 3); + } + if (!sameTo) { + g.fillPolygon( + new int[]{to.x - size / 2, to.x + size / 2, to.x}, + new int[]{to.y - size / 2, to.y - size / 2, to.y + size / 2}, + 3); + } + g.setStroke(oldStroke); + } + + private void setHighlighted(boolean b) { + this.highlighted = b; + this.revalidate(true); + } + + private void setPopupVisible(boolean b) { + this.popupVisible = b; + this.revalidate(true); + } + + @Override + public boolean isHitAt(Point localPoint) { + return Line2D.ptLineDistSq(from.x, from.y, to.x, to.y, localPoint.x, localPoint.y) <= BORDER * BORDER; + } + + @Override + protected void notifyStateChanged(ObjectState previousState, ObjectState state) { + if (previousState.isHovered() != state.isHovered()) { + setRecursiveHighlighted(state.isHovered()); + } + } + + private void setRecursiveHighlighted(boolean b) { + LineWidget cur = predecessor; + while (cur != null) { + cur.setHighlighted(b); + cur = cur.predecessor; + } + + highlightSuccessors(b); + this.setHighlighted(b); + } + + private void highlightSuccessors(boolean b) { + for (LineWidget s : successors) { + s.setHighlighted(b); + s.highlightSuccessors(b); + } + } + + private void setRecursivePopupVisible(boolean b) { + LineWidget cur = predecessor; + while (cur != null) { + cur.setPopupVisible(b); + cur = cur.predecessor; + } + + popupVisibleSuccessors(b); + setPopupVisible(b); + } + + private void popupVisibleSuccessors(boolean b) { + for (LineWidget s : successors) { + s.setPopupVisible(b); + s.popupVisibleSuccessors(b); + } + } + + public JPopupMenu getPopupMenu(Widget widget, Point localLocation) { + JPopupMenu menu = new JPopupMenu(); + menu.add(scene.createGotoAction(outputSlot.getFigure())); + menu.addSeparator(); + + for (Connection c : connections) { + InputSlot s = c.getInputSlot(); + menu.add(scene.createGotoAction(s.getFigure())); + } + + final LineWidget w = this; + menu.addPopupMenuListener(new PopupMenuListener() { + + public void popupMenuWillBecomeVisible(PopupMenuEvent e) { + w.setRecursivePopupVisible(true); + } + + public void popupMenuWillBecomeInvisible(PopupMenuEvent e) { + w.setRecursivePopupVisible(false); + } + + public void popupMenuCanceled(PopupMenuEvent e) { + } + }); + + return menu; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/MultiConnectionWidget.java 2009-08-01 04:20:45.775587761 +0100 @@ -0,0 +1,281 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.graph.Connection; +import com.sun.hotspot.igv.graph.InputSlot; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.graph.Slot; +import com.sun.hotspot.igv.view.DiagramScene; +import java.awt.BasicStroke; +import java.awt.Color; +import java.awt.Graphics2D; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.Stroke; +import java.awt.geom.Line2D; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; +import javax.swing.JPopupMenu; +import javax.swing.event.PopupMenuEvent; +import javax.swing.event.PopupMenuListener; +import org.netbeans.api.visual.action.PopupMenuProvider; +import org.netbeans.api.visual.model.ObjectState; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class MultiConnectionWidget extends Widget implements PopupMenuProvider { + + public final int BORDER = 4; + public final int HOVER_STROKE_WIDTH = 3; + + private static class Route { + + public Point from; + public Point to; + public SortedSet inputSlots; + public boolean decorateStart; + public boolean decorateEnd; + + public Route(Point from, Point to) { + assert from != null; + assert to != null; + this.from = from; + this.to = to; + this.inputSlots = new TreeSet(); + } + + @Override + public boolean equals(Object obj) { + + if (obj instanceof Route) { + Route r = (Route) obj; + return r.from.equals(from) && r.to.equals(to); + } + + return super.equals(obj); + } + + @Override + public int hashCode() { + return ((((from.x * 1711) + from.y) * 1711 + to.x) * 1711 + to.y); + } + } + private Rectangle clientArea; + private OutputSlot outputSlot; + private Map> routeMap; + private List routeList; + private Color color; + private DiagramScene diagramScene; + private boolean popupVisible; + + /** Creates a new instance of MultiConnectionWidget */ + public MultiConnectionWidget(OutputSlot outputSlot, DiagramScene scene) { + super(scene); + + this.diagramScene = scene; + this.outputSlot = outputSlot; + this.setCheckClipping(true); + + routeMap = new HashMap>(); + routeList = new ArrayList(); + color = Color.BLACK; + + for (Connection c : outputSlot.getConnections()) { + List controlPoints = c.getControlPoints(); + InputSlot inputSlot = (InputSlot) c.getTo(); + color = c.getColor(); + + for (int i = 1; i < controlPoints.size(); i++) { + Point prev = controlPoints.get(i - 1); + Point cur = controlPoints.get(i); + + if (prev != null && cur != null) { + Route r = new Route(prev, cur); + if (routeMap.containsKey(r)) { + SortedSet set = routeMap.get(r); + set.add(inputSlot); + } else { + SortedSet set = new TreeSet(Slot.slotFigureComparator); + set.add(inputSlot); + routeMap.put(r, set); + routeList.add(r); + } + } + } + } + + if (routeList.size() == 0) { + clientArea = new Rectangle(); + } else { + for (Route r : routeList) { + + int x = r.from.x; + int y = r.from.y; + + int x2 = r.to.x; + int y2 = r.to.y; + + if (x > x2) { + int tmp = x; + x = x2; + x2 = tmp; + } + + if (y > y2) { + int tmp = y; + y = y2; + y2 = tmp; + } + + int width = x2 - x + 1; + int height = y2 - y + 1; + + Rectangle rect = new Rectangle(x, y, width, height); + if (clientArea == null) { + clientArea = rect; + } else { + clientArea = clientArea.union(rect); + } + } + + clientArea.grow(BORDER, BORDER); + } + } + + private void setHoverPosition(Point location) { + Route r = getNearest(location); + } + + private Route getNearest(Point localLocation) { + + double minDist = Double.MAX_VALUE; + Route result = null; + for (Route r : routeList) { + double dist = Line2D.ptSegDistSq((double) r.from.x, (double) r.from.y, (double) r.to.x, (double) r.to.y, (double) localLocation.x, (double) localLocation.y); + if (dist < minDist) { + result = r; + minDist = dist; + } + } + + assert result != null; + + return result; + } + + @Override + public boolean isHitAt(Point localLocation) { + if (!super.isHitAt(localLocation)) { + return false; + } + + for (Route r : routeList) { + double dist = Line2D.ptSegDistSq((double) r.from.x, (double) r.from.y, (double) r.to.x, (double) r.to.y, (double) localLocation.x, (double) localLocation.y); + if (dist < BORDER * BORDER) { + setHoverPosition(localLocation); + return true; + } + } + + return false; + } + + @Override + protected Rectangle calculateClientArea() { + return clientArea; + } + + @Override + protected void paintWidget() { + Graphics2D g = getScene().getGraphics(); + //g.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_OFF); + g.setColor(this.color); + ObjectState state = this.getState(); + float width = 1.0f; + if (state.isHovered() || this.popupVisible) { + width = HOVER_STROKE_WIDTH; + } + + Stroke oldStroke = g.getStroke(); + g.setStroke(new BasicStroke(width)); + for (Route r : routeList) { + g.drawLine(r.from.x, r.from.y, r.to.x, r.to.y); + } + g.setStroke(oldStroke); + } + + @Override + protected void notifyStateChanged(ObjectState previousState, ObjectState state) { + + boolean repaint = false; + + if (state.isHovered() != previousState.isHovered()) { + repaint = true; + } + + repaint(); + } + + public JPopupMenu getPopupMenu(Widget widget, Point localLocation) { + JPopupMenu menu = new JPopupMenu(); + Route r = getNearest(localLocation); + assert r != null; + assert routeMap.containsKey(r); + + menu.add(diagramScene.createGotoAction(outputSlot.getFigure())); + menu.addSeparator(); + + SortedSet set = this.routeMap.get(r); + for (InputSlot s : set) { + menu.add(diagramScene.createGotoAction(s.getFigure())); + } + + final MultiConnectionWidget w = this; + menu.addPopupMenuListener(new PopupMenuListener() { + + public void popupMenuWillBecomeVisible(PopupMenuEvent e) { + w.popupVisible = true; + w.repaint(); + } + + public void popupMenuWillBecomeInvisible(PopupMenuEvent e) { + w.popupVisible = false; + w.repaint(); + } + + public void popupMenuCanceled(PopupMenuEvent e) { + } + }); + + return menu; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/OutputSlotWidget.java 2009-08-01 04:20:46.184276666 +0100 @@ -0,0 +1,61 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.view.DiagramScene; +import java.awt.Point; +import java.util.List; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public class OutputSlotWidget extends SlotWidget { + + private OutputSlot outputSlot; + + public OutputSlotWidget(OutputSlot slot, DiagramScene scene, Widget parent, FigureWidget fw) { + super(slot, scene, parent, fw); + outputSlot = slot; + init(); + getFigureWidget().getRightWidget().addChild(this); + } + + public OutputSlot getOutputSlot() { + return outputSlot; + } + + protected Point calculateRelativeLocation() { + if (getFigureWidget().getBounds() == null) { + return new Point(0, 0); + } + + double x = this.getFigureWidget().getBounds().width; + List slots = outputSlot.getFigure().getOutputSlots(); + assert slots.contains(outputSlot); + return new Point((int) x, (int) (calculateRelativeY(slots.size(), slots.indexOf(outputSlot)))); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/View/src/com/sun/hotspot/igv/view/widgets/SlotWidget.java 2009-08-01 04:20:46.593005657 +0100 @@ -0,0 +1,126 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ +package com.sun.hotspot.igv.view.widgets; + +import com.sun.hotspot.igv.graph.Figure; +import com.sun.hotspot.igv.graph.OutputSlot; +import com.sun.hotspot.igv.graph.Slot; +import com.sun.hotspot.igv.view.*; +import java.awt.Color; +import java.awt.Font; +import java.awt.Graphics2D; +import java.awt.Image; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.geom.Rectangle2D; +import org.netbeans.api.visual.widget.Widget; + +/** + * + * @author Thomas Wuerthinger + */ +public abstract class SlotWidget extends Widget { + + private Slot slot; + private FigureWidget figureWidget; + private Image bufferImage; + private static double TEXT_ZOOM_FACTOR = 0.9; + private static double ZOOM_FACTOR = 0.6; + private DiagramScene scene; + + public SlotWidget(Slot slot, DiagramScene scene, Widget parent, FigureWidget fw) { + super(scene); + this.scene = scene; + this.slot = slot; + figureWidget = fw; + this.setToolTipText("" + slot.getName() + ""); + this.setCheckClipping(true); + } + + public Point getAnchorPosition() { + Point p = new Point(figureWidget.getFigure().getPosition()); + Point p2 = slot.getRelativePosition(); + p.translate(p2.x, p2.y); + return p; + } + + protected void init() { + + Point p = calculateRelativeLocation(); + Rectangle r = calculateClientArea(); + p = new Point(p.x, p.y - r.height / 2); + this.setPreferredLocation(p); + } + + public Slot getSlot() { + return slot; + } + + public FigureWidget getFigureWidget() { + return figureWidget; + } + + @Override + protected void paintWidget() { + + if (scene.getRealZoomFactor() < ZOOM_FACTOR) { + return; + } + + if (bufferImage == null) { + Graphics2D g = this.getGraphics(); + g.setColor(Color.DARK_GRAY); + int w = this.getBounds().width; + int h = this.getBounds().height; + + if (getSlot().getShortName() != null && getSlot().getShortName().length() > 0 && scene.getRealZoomFactor() >= TEXT_ZOOM_FACTOR) { + Font f = new Font("Arial", Font.PLAIN, 8); + g.setFont(f.deriveFont(7.5f)); + Rectangle2D r1 = g.getFontMetrics().getStringBounds(getSlot().getShortName(), g); + g.drawString(getSlot().getShortName(), (int) (this.getBounds().width - r1.getWidth()) / 2, (int) (this.getBounds().height + r1.getHeight()) / 2); + } else { + + if (slot instanceof OutputSlot) { + g.fillArc(w / 4, -h / 4 - 1, w / 2, h / 2, 180, 180); + } else { + g.fillArc(w / 4, 3 * h / 4, w / 2, h / 2, 0, 180); + } + } + } + } + + @Override + protected Rectangle calculateClientArea() { + return new Rectangle(0, 0, Figure.SLOT_WIDTH, Figure.SLOT_WIDTH); + } + + protected abstract Point calculateRelativeLocation(); + + protected double calculateRelativeY(int size, int index) { + assert index >= 0 && index < size; + assert size > 0; + double height = getFigureWidget().getBounds().getHeight(); + return height * (index + 1) / (size + 1); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties 2009-08-01 04:20:47.220263488 +0100 @@ -0,0 +1,7 @@ +currentVersion=IdealGraphVisualizer {0} +LBL_splash_window_title=Starting IdealGraphVisualizer +SPLASH_WIDTH=475 +SplashProgressBarBounds=0,268,473,6 +SplashProgressBarColor=0xFFFFFF +SplashRunningTextBounds=269,253,205,12 +SplashRunningTextColor=0xFFFFFF Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/branding/core/core.jar/org/netbeans/core/startup/frame.gif differ Files /dev/null and new/hotspot/src/share/tools/IdealGraphVisualizer/branding/core/core.jar/org/netbeans/core/startup/splash.gif differ --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties 2009-08-01 04:20:48.676506727 +0100 @@ -0,0 +1,2 @@ +CTL_MainWindow_Title=IdealGraphVisualizer {0} +CTL_MainWindow_Title_No_Project=IdealGraphVisualizer {0} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/build.xml 2009-08-01 04:20:49.077365449 +0100 @@ -0,0 +1,8 @@ + + + + + + Builds the module suite IdealGraphVisualizer. + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/nbproject/build-impl.xml 2009-08-01 04:20:49.503274110 +0100 @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/nbproject/genfiles.properties 2009-08-01 04:20:49.907691702 +0100 @@ -0,0 +1,8 @@ +build.xml.data.CRC32=72833581 +build.xml.script.CRC32=e9c757c5 +build.xml.stylesheet.CRC32=531c622b +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=72833581 +nbproject/build-impl.xml.script.CRC32=1b6f3648 +nbproject/build-impl.xml.stylesheet.CRC32=196c7090 --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/nbproject/platform.properties 2009-08-01 04:20:50.318809047 +0100 @@ -0,0 +1,29 @@ +# Deprecated since 5.0u1; for compatibility with 5.0: +disabled.clusters=\ + apisupport1,\ + gsf1,\ + harness,\ + ide9,\ + java2,\ + nb6.1,\ + profiler3 +disabled.modules=\ + org.netbeans.core.execution,\ + org.netbeans.core.multiview,\ + org.netbeans.core.output2,\ + org.netbeans.modules.autoupdate.services,\ + org.netbeans.modules.autoupdate.ui,\ + org.netbeans.modules.core.kit,\ + org.netbeans.modules.favorites,\ + org.netbeans.modules.javahelp,\ + org.netbeans.modules.masterfs,\ + org.netbeans.modules.options.keymap,\ + org.netbeans.modules.sendopts,\ + org.netbeans.modules.templates,\ + org.openide.compat,\ + org.openide.execution,\ + org.openide.util.enumerations +enabled.clusters=\ + platform8 +nbjdk.active=default +nbplatform.active=default --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/nbproject/project.properties 2009-08-01 04:20:50.737478129 +0100 @@ -0,0 +1,39 @@ +app.icon=branding/core/core.jar/org/netbeans/core/startup/frame48.gif +app.name=idealgraphvisualizer +app.title=IdealGraphVisualizer +branding.token=${app.name} +modules=\ + ${project.com.sun.hotspot.igv.graph}:\ + ${project.com.sun.hotspot.igv.coordinator}:\ + ${project.com.sun.hotspot.igv.filter}:\ + ${project.com.sun.hotspot.igv.hierarchicallayout}:\ + ${project.com.sun.hotspot.igv.layout}:\ + ${project.com.sun.hotspot.igv.controlflow}:\ + ${project.com.sun.hotspot.igv.data}:\ + ${project.com.sun.hotspot.igv.view}:\ + ${project.com.sun.hotspot.igv.bytecodes}:\ + ${project.com.sun.hotspot.igv.difference}:\ + ${project.com.sun.hotspot.igv.settings}:\ + ${project.com.sun.hotspot.igv.util}:\ + ${project.com.sun.hotspot.igv.svg}:\ + ${project.com.sun.hotspot.connection}:\ + ${project.com.sun.hotspot.igv.servercompilerscheduler}:\ + ${project.com.sun.hotspot.igv.filterwindow} +project.com.sun.hotspot.connection=NetworkConnection +project.com.sun.hotspot.igv.bytecodes=Bytecodes +project.com.sun.hotspot.igv.controlflow=ControlFlow +project.com.sun.hotspot.igv.coordinator=Coordinator +project.com.sun.hotspot.igv.data=Data +project.com.sun.hotspot.igv.difference=Difference +project.com.sun.hotspot.igv.filter=Filter +project.com.sun.hotspot.igv.filterwindow=FilterWindow +project.com.sun.hotspot.igv.graph=Graph +project.com.sun.hotspot.igv.hierarchicallayout=HierarchicalLayout +project.com.sun.hotspot.igv.layout=Layout +project.com.sun.hotspot.igv.servercompilerscheduler=ServerCompiler +project.com.sun.hotspot.igv.settings=Settings +project.com.sun.hotspot.igv.svg=BatikSVGProxy +project.com.sun.hotspot.igv.view=View +project.com.sun.hotspot.igv.util=Util +run.args = -J-server -J-Xms64m -J-Xmx1g -J-da +run.args.extra = -J-server -J-Xms64m -J-Xmx1g -J-da --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/IdealGraphVisualizer/nbproject/project.xml 2009-08-01 04:20:51.140079344 +0100 @@ -0,0 +1,9 @@ + + + org.netbeans.modules.apisupport.project.suite + + + IdealGraphVisualizer + + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/hsdis/Makefile 2009-08-01 04:20:51.567761349 +0100 @@ -0,0 +1,135 @@ +# +# Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +# Single gnu makefile for solaris, linux and windows (windows requires mks or +# cygwin). + +ifeq ($(BINUTILS),) +# Pop all the way out of the workspace to look for binutils. +# ...You probably want to override this setting. +BINUTILS = $(shell cd ../../../../..;pwd)/binutils-2.17-$(LIBARCH) +endif + +# Default arch; it is changed below as needed. +ARCH = i386 +OS = $(shell uname) + +CPPFLAGS += -I$(BINUTILS)/include -I$(BINUTILS)/bfd +CPPFLAGS += -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" -DLIBARCH_$(LIBARCH) +CPPFLAGS += -DHOTSPOT_OS=\"$(OS)\" -DOS_$(OS) + +## OS = SunOS ## +ifeq ($(OS),SunOS) +ARCH = $(shell uname -p) +OS = solaris +CC = cc +CCFLAGS += -Kpic -g +CCFLAGS/amd64 += -xarch=amd64 +CCFLAGS/sparcv9 += -xarch=v9 +CCFLAGS += $(CCFLAGS/$(LIBARCH)) +DLDFLAGS += -G +OUTFLAGS += -o $@ +LIB_EXT = .so +else +## OS = Linux ## +ifeq ($(OS),Linux) +CPU = $(shell uname -m) +ifeq ($(CPU),ia64) +ARCH = ia64 +else +ifeq ($(CPU),x86_64) +CCFLAGS += -fPIC +endif # x86_64 +endif # ia64 +OS = linux +CC = gcc +CCFLAGS += -O +DLDFLAGS += -shared +OUTFLAGS += -o $@ +LIB_EXT = .so +CPPFLAGS += -Iinclude -Iinclude/$(OS)_$(ARCH)/ +## OS = Windows ## +else # !SunOS, !Linux => Windows +OS = win +CC = cl +#CPPFLAGS += /D"WIN32" /D"_WINDOWS" /D"DEBUG" /D"NDEBUG" +CCFLAGS += /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi- +CCFLAGS += -Iinclude -Iinclude/gnu -Iinclude/$(OS)_$(ARCH) +CCFLAGS += /D"HOTSPOT_LIB_ARCH=\"$(LIBARCH)\"" +DLDFLAGS += /dll /subsystem:windows /incremental:no \ + /export:decode_instruction +OUTFLAGS += /link /out:$@ +LIB_EXT = .dll +endif # Linux +endif # SunOS + +LIBARCH = $(ARCH) +ifdef LP64 +LIBARCH64/sparc = sparcv9 +LIBARCH64/i386 = amd64 +LIBARCH64 = $(LIBARCH64/$(ARCH)) +ifneq ($(LIBARCH64),) +LIBARCH = $(LIBARCH64) +endif # LIBARCH64/$(ARCH) +endif # LP64 + +TARGET_DIR = bin/$(OS) +TARGET = $(TARGET_DIR)/hsdis-$(LIBARCH)$(LIB_EXT) + +SOURCE = hsdis.c + +LIBRARIES = $(BINUTILS)/bfd/libbfd.a \ + $(BINUTILS)/opcodes/libopcodes.a \ + $(BINUTILS)/libiberty/libiberty.a + +DEMO_TARGET = $(TARGET_DIR)/hsdis-demo-$(LIBARCH) +DEMO_SOURCE = hsdis-demo.c + +.PHONY: all clean demo both + +all: $(TARGET) demo + +both: all all64 + +%64: + $(MAKE) LP64=1 ${@:%64=%} + +demo: $(TARGET) $(DEMO_TARGET) + +$(LIBRARIES): + @echo "*** Please build binutils first; see ./README: ***" + @sed < ./README '1,/__________/d' | head -20 + @echo "..."; exit 1 + +$(TARGET): $(SOURCE) $(LIBS) $(LIBRARIES) $(TARGET_DIR) + $(CC) $(OUTFLAGS) $(CPPFLAGS) $(CCFLAGS) $(SOURCE) $(DLDFLAGS) $(LIBRARIES) + +$(DEMO_TARGET): $(DEMO_SOURCE) $(TARGET) $(TARGET_DIR) + $(CC) $(OUTFLAGS) $(CPPFLAGS) $(CCFLAGS) $(DEMO_SOURCE) $(LDFLAGS) + +$(TARGET_DIR): + [ -d $@ ] || mkdir -p $@ + +clean: + rm -rf $(TARGET_DIR) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/hsdis/README 2009-08-01 04:20:51.977800955 +0100 @@ -0,0 +1,95 @@ +Copyright (c) 2008 Sun Microsystems, Inc. All Rights Reserved. +DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + +This code is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License version 2 only, as +published by the Free Software Foundation. + +This code is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +version 2 for more details (a copy is included in the LICENSE file that +accompanied this code). + +You should have received a copy of the GNU General Public License version +2 along with this work; if not, write to the Free Software Foundation, +Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + +Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +CA 95054 USA or visit www.sun.com if you need additional information or +have any questions. + +________________________________________________________________________ + +'hsdis': A HotSpot plugin for disassembling dynamically generated code. + +The files in this directory (Makefile, hsdis.[ch], hsdis-demo.c) +are built independently of the HotSpot JVM. + +To use the plugin with a JVM, you need a new version that can load it. +If the product mode of your JVM does not accept -XX:+PrintAssembly, +you do not have a version that is new enough. + +* Building + +To build this project you need a build of Gnu binutils to link against. +It is known to work with binutils 2.17. + +The makefile looks for this build in $BINUTILS, or (if that is not set), +in .../binutils-2.17-$LIBARCH, where LIBARCH (as in HotSpot) is one of +the jre subdirectory keywords i386, amd64, sparc, sparcv9, etc. + +To build Gnu binutils, first download a copy of the software: + http://directory.fsf.org/project/binutils/ + +Unpack the binutils tarball into an empty directory: + chdir ../../../../.. + tar -xzf - < ../binutils-2.17.tar.gz + mv binutils-2.17 binutils-2.17-i386 #or binutils-2.17-sparc + cd binutils-2.17-i386 + +From inside that directory, run configure and make: + ( export CFLAGS='-fPIC' + ./configure i386-pc-elf ) + gnumake + +(Leave out or change the argument to configure if not on an i386 system.) + +Next, untar again into another empty directory for the LP64 version: + chdir .. + tar -xzf - < ../binutils-2.17.tar.gz + mv binutils-2.17 binutils-2.17-amd64 #or binutils-2.17-sparcv9 + cd binutils-2.17-amd64 + +From inside that directory, run configure for LP64 and make: + ( export ac_cv_c_bigendian=no CFLAGS='-m64 -fPIC' LDFLAGS=-m64 + ./configure amd64-pc-elf ) + gnumake + +The -fPIC option is needed because the generated code will be +linked into the hsdid-$LIBARCH.so binary. If you miss the +option, the JVM will fail to load the disassembler. + +You probably want two builds, one for 32 and one for 64 bits. +To build the 64-bit variation of a platforn, add LP64=1 to +the make command line for hsdis. + +So, go back to the hsdis project and build: + chdir .../hsdis + gnumake + gnumake LP64=1 + +* Installing + +Products are named like bin/$OS/hsdis-$LIBARCH.so. +You can install them on your LD_LIBRARY_PATH, +or inside of your JRE next to $LIBARCH/libjvm.so. + +Now test: + export LD_LIBRARY_PATH .../hsdis/bin/solaris:$LD_LIBRARY_PATH + dargs='-XX:+UnlockDiagnosticVMOptions -XX:+PrintAssembly' + dargs=$dargs' -XX:PrintAssemblyOptions=hsdis-print-bytes' + java $dargs -Xbatch CompileCommand=print,*String.hashCode HelloWorld + +If the product mode of the JVM does not accept -XX:+PrintAssembly, +you do not have a version new enough to use the hsdis plugin. --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/hsdis/hsdis-demo.c 2009-08-01 04:20:52.396301071 +0100 @@ -0,0 +1,223 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* hsdis-demo.c -- dump a range of addresses as native instructions + This demonstrates the protocol required by the HotSpot PrintAssembly option. +*/ + +#include "hsdis.h" + +#include "stdio.h" +#include "stdlib.h" +#include "string.h" + +void greet(const char*); +void disassemble(void*, void*); +void end_of_file(); + +const char* options = NULL; +int raw = 0; +int xml = 0; + +int main(int ac, char** av) { + int greeted = 0; + int i; + for (i = 1; i < ac; i++) { + const char* arg = av[i]; + if (arg[0] == '-') { + if (!strcmp(arg, "-xml")) + xml ^= 1; + else if (!strcmp(arg, "-raw")) + raw ^= 1; + else if (!strncmp(arg, "-options=", 9)) + options = arg+9; + else + { printf("Usage: %s [-xml] [name...]\n"); exit(2); } + continue; + } + greet(arg); + greeted = 1; + } + if (!greeted) + greet("world"); + printf("...And now for something completely different:\n"); + disassemble((void*) &main, (void*) &end_of_file); + printf("Cheers!\n"); +} + +void greet(const char* whom) { + printf("Hello, %s!\n", whom); +} + +void end_of_file() { } + +/* don't disassemble after this point... */ + +#include "dlfcn.h" + +#ifdef HOTSPOT_LIB_ARCH +#define LIBARCH HOTSPOT_LIB_ARCH +#endif +#ifdef HOTSPOT_OS +#define OS HOTSPOT_OS +#endif + +#define DECODE_INSTRUCTIONS_NAME "decode_instructions" +#define HSDIS_NAME "hsdis" +static void* decode_instructions_pv = 0; +static const char* hsdis_path[] = { + HSDIS_NAME".so", +#ifdef OS + "bin/"OS"/"HSDIS_NAME".so", +#endif +#ifdef LIBARCH + HSDIS_NAME"-"LIBARCH".so", +#ifdef OS + "bin/"OS"/"HSDIS_NAME"-"LIBARCH".so", +#endif +#endif + NULL +}; + +static const char* load_decode_instructions() { + void* dllib = NULL; + const char* *next_in_path = hsdis_path; + while (1) { + decode_instructions_pv = dlsym(dllib, DECODE_INSTRUCTIONS_NAME); + if (decode_instructions_pv != NULL) + return NULL; + if (dllib != NULL) + return "plugin does not defined "DECODE_INSTRUCTIONS_NAME; + for (dllib = NULL; dllib == NULL; ) { + const char* next_lib = (*next_in_path++); + if (next_lib == NULL) + return "cannot find plugin "HSDIS_NAME".so"; + dllib = dlopen(next_lib, RTLD_LAZY); + } + } +} + + +static const char* lookup(void* addr) { +#define CHECK_NAME(fn) \ + if (addr == (void*) &fn) return #fn; + + CHECK_NAME(main); + CHECK_NAME(greet); + return NULL; +} + +/* does the event match the tag, followed by a null, space, or slash? */ +#define MATCH(event, tag) \ + (!strncmp(event, tag, sizeof(tag)-1) && \ + (!event[sizeof(tag)-1] || strchr(" /", event[sizeof(tag)-1]))) + + +static const char event_cookie[] = "event_cookie"; /* demo placeholder */ +static void* handle_event(void* cookie, const char* event, void* arg) { +#define NS_DEMO "demo:" + if (cookie != event_cookie) + printf("*** bad event cookie %p != %p\n", cookie, event_cookie); + + if (xml) { + /* We could almost do a printf(event, arg), + but for the sake of a better demo, + we dress the result up as valid XML. + */ + const char* fmt = strchr(event, ' '); + int evlen = (fmt ? fmt - event : strlen(event)); + if (!fmt) { + if (event[0] != '/') { + printf("<"NS_DEMO"%.*s>", evlen, event); + } else { + printf("", evlen-1, event+1); + } + } else { + if (event[0] != '/') { + printf("<"NS_DEMO"%.*s", evlen, event); + printf(fmt, arg); + printf(">"); + } else { + printf("<"NS_DEMO"%.*s_done", evlen-1, event+1); + printf(fmt, arg); + printf("/>", evlen-1, event+1); + } + } + } + + if (MATCH(event, "insn")) { + const char* name = lookup(arg); + if (name) printf("%s:\n", name); + + /* basic action for : */ + printf(" %p\t", arg); + + } else if (MATCH(event, "/insn")) { + /* basic action for : + (none, plugin puts the newline for us + */ + + } else if (MATCH(event, "mach")) { + printf("Decoding for CPU '%s'\n", (char*) arg); + + } else if (MATCH(event, "addr")) { + /* basic action for : */ + const char* name = lookup(arg); + if (name) { + printf("&%s (%p)", name, arg); + /* return non-null to notify hsdis not to print the addr */ + return arg; + } + } + + /* null return is always safe; can mean "I ignored it" */ + return NULL; +} + +#define fprintf_callback \ + (decode_instructions_printf_callback_ftype)&fprintf + +void disassemble(void* from, void* to) { + const char* err = load_decode_instructions(); + if (err != NULL) { + printf("%s: %s\n", err, dlerror()); + exit(1); + } + printf("Decoding from %p to %p...\n", from, to); + decode_instructions_ftype decode_instructions + = (decode_instructions_ftype) decode_instructions_pv; + void* res; + if (raw && xml) { + res = (*decode_instructions)(from, to, NULL, stdout, NULL, stdout, options); + } else if (raw) { + res = (*decode_instructions)(from, to, NULL, NULL, NULL, stdout, options); + } else { + res = (*decode_instructions)(from, to, + handle_event, (void*) event_cookie, + fprintf_callback, stdout, + options); + } + if (res != to) + printf("*** Result was %p!\n", res); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/hsdis/hsdis.c 2009-08-01 04:20:52.842844985 +0100 @@ -0,0 +1,499 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* hsdis.c -- dump a range of addresses as native instructions + This implements the plugin protocol required by the + HotSpot PrintAssembly option. +*/ + +#include "hsdis.h" + +#include +#include +#include +#include + +#ifndef bool +#define bool int +#define true 1 +#define false 0 +#endif /*bool*/ + +/* short names for stuff in hsdis.h */ +typedef decode_instructions_event_callback_ftype event_callback_t; +typedef decode_instructions_printf_callback_ftype printf_callback_t; + +/* disassemble_info.application_data object */ +struct hsdis_app_data { + /* the arguments to decode_instructions */ + uintptr_t start; uintptr_t end; + event_callback_t event_callback; void* event_stream; + printf_callback_t printf_callback; void* printf_stream; + bool losing; + + /* the architecture being disassembled */ + const char* arch_name; + const bfd_arch_info_type* arch_info; + + /* the disassembler we are going to use: */ + disassembler_ftype dfn; + struct disassemble_info dinfo; /* the actual struct! */ + + char mach_option[64]; + char insn_options[256]; +}; + +#define DECL_APP_DATA(dinfo) \ + struct hsdis_app_data* app_data = (struct hsdis_app_data*) (dinfo)->application_data + +#define DECL_EVENT_CALLBACK(app_data) \ + event_callback_t event_callback = (app_data)->event_callback; \ + void* event_stream = (app_data)->event_stream + +#define DECL_PRINTF_CALLBACK(app_data) \ + printf_callback_t printf_callback = (app_data)->printf_callback; \ + void* printf_stream = (app_data)->printf_stream + + +static void print_help(struct hsdis_app_data* app_data, + const char* msg, const char* arg); +static void setup_app_data(struct hsdis_app_data* app_data, + const char* options); +static const char* format_insn_close(const char* close, + disassemble_info* dinfo, + char* buf, size_t bufsize); + +void* +#ifdef DLL_ENTRY + DLL_ENTRY +#endif +decode_instructions(void* start_pv, void* end_pv, + event_callback_t event_callback_arg, void* event_stream_arg, + printf_callback_t printf_callback_arg, void* printf_stream_arg, + const char* options) { + struct hsdis_app_data app_data; + memset(&app_data, 0, sizeof(app_data)); + app_data.start = (uintptr_t) start_pv; + app_data.end = (uintptr_t) end_pv; + app_data.event_callback = event_callback_arg; + app_data.event_stream = event_stream_arg; + app_data.printf_callback = printf_callback_arg; + app_data.printf_stream = printf_stream_arg; + + setup_app_data(&app_data, options); + char buf[128]; + + { + /* now reload everything from app_data: */ + DECL_EVENT_CALLBACK(&app_data); + DECL_PRINTF_CALLBACK(&app_data); + uintptr_t start = app_data.start; + uintptr_t end = app_data.end; + uintptr_t p = start; + + (*event_callback)(event_stream, "insns", (void*)start); + + (*event_callback)(event_stream, "mach name='%s'", + (void*) app_data.arch_info->printable_name); + if (app_data.dinfo.bytes_per_line != 0) { + (*event_callback)(event_stream, "format bytes-per-line='%p'/", + (void*)(intptr_t) app_data.dinfo.bytes_per_line); + } + + while (p < end && !app_data.losing) { + (*event_callback)(event_stream, "insn", (void*) p); + + /* reset certain state, so we can read it with confidence */ + app_data.dinfo.insn_info_valid = 0; + app_data.dinfo.branch_delay_insns = 0; + app_data.dinfo.data_size = 0; + app_data.dinfo.insn_type = 0; + + int size = (*app_data.dfn)((bfd_vma) p, &app_data.dinfo); + + if (size > 0) p += size; + else app_data.losing = true; + + const char* insn_close = format_insn_close("/insn", &app_data.dinfo, + buf, sizeof(buf)); + (*event_callback)(event_stream, insn_close, (void*) p); + + /* follow each complete insn by a nice newline */ + (*printf_callback)(printf_stream, "\n"); + } + + (*event_callback)(event_stream, "/insns", (void*) p); + return (void*) p; + } +} + +/* take the address of the function, for luck, and also test the typedef: */ +const decode_instructions_ftype decode_instructions_address = &decode_instructions; + +static const char* format_insn_close(const char* close, + disassemble_info* dinfo, + char* buf, size_t bufsize) { + if (!dinfo->insn_info_valid) + return close; + enum dis_insn_type itype = dinfo->insn_type; + int dsize = dinfo->data_size, delays = dinfo->branch_delay_insns; + if ((itype == dis_nonbranch && (dsize | delays) == 0) + || (strlen(close) + 3*20 > bufsize)) + return close; + + const char* type = "unknown"; + switch (itype) { + case dis_nonbranch: type = NULL; break; + case dis_branch: type = "branch"; break; + case dis_condbranch: type = "condbranch"; break; + case dis_jsr: type = "jsr"; break; + case dis_condjsr: type = "condjsr"; break; + case dis_dref: type = "dref"; break; + case dis_dref2: type = "dref2"; break; + } + + strcpy(buf, close); + char* p = buf; + if (type) sprintf(p += strlen(p), " type='%s'", type); + if (dsize) sprintf(p += strlen(p), " dsize='%d'", dsize); + if (delays) sprintf(p += strlen(p), " delay='%d'", delays); + return buf; +} + +/* handler functions */ + +static int +hsdis_read_memory_func(bfd_vma memaddr, + bfd_byte* myaddr, + unsigned int length, + struct disassemble_info* dinfo) { + uintptr_t memaddr_p = (uintptr_t) memaddr; + DECL_APP_DATA(dinfo); + if (memaddr_p + length > app_data->end) { + /* read is out of bounds */ + return EIO; + } else { + memcpy(myaddr, (bfd_byte*) memaddr_p, length); + return 0; + } +} + +static void +hsdis_print_address_func(bfd_vma vma, struct disassemble_info* dinfo) { + /* the actual value to print: */ + void* addr_value = (void*) (uintptr_t) vma; + DECL_APP_DATA(dinfo); + DECL_EVENT_CALLBACK(app_data); + + /* issue the event: */ + void* result = + (*event_callback)(event_stream, "addr/", addr_value); + if (result == NULL) { + /* event declined */ + generic_print_address(vma, dinfo); + } +} + + +/* configuration */ + +static void set_optional_callbacks(struct hsdis_app_data* app_data); +static void parse_caller_options(struct hsdis_app_data* app_data, + const char* caller_options); +static const char* native_arch_name(); +static enum bfd_endian native_endian(); +static const bfd_arch_info_type* find_arch_info(const char* arch_nane); +static bfd* get_native_bfd(const bfd_arch_info_type* arch_info, + /* to avoid malloc: */ + bfd* empty_bfd, bfd_target* empty_xvec); +static void init_disassemble_info_from_bfd(struct disassemble_info* dinfo, + void *stream, + fprintf_ftype fprintf_func, + bfd* bfd, + char* disassembler_options); +static void parse_fake_insn(disassembler_ftype dfn, + struct disassemble_info* dinfo); + +static void setup_app_data(struct hsdis_app_data* app_data, + const char* caller_options) { + /* Make reasonable defaults for null callbacks. + A non-null stream for a null callback is assumed to be a FILE* for output. + Events are rendered as XML. + */ + set_optional_callbacks(app_data); + + /* Look into caller_options for anything interesting. */ + if (caller_options != NULL) + parse_caller_options(app_data, caller_options); + + /* Discover which architecture we are going to disassemble. */ + app_data->arch_name = &app_data->mach_option[0]; + if (app_data->arch_name[0] == '\0') + app_data->arch_name = native_arch_name(); + app_data->arch_info = find_arch_info(app_data->arch_name); + + /* Make a fake bfd to hold the arch. and byteorder info. */ + struct { + bfd_target empty_xvec; + bfd empty_bfd; + } buf; + bfd* native_bfd = get_native_bfd(app_data->arch_info, + /* to avoid malloc: */ + &buf.empty_bfd, &buf.empty_xvec); + init_disassemble_info_from_bfd(&app_data->dinfo, + app_data->printf_stream, + app_data->printf_callback, + native_bfd, + app_data->insn_options); + + /* Finish linking together the various callback blocks. */ + app_data->dinfo.application_data = (void*) app_data; + app_data->dfn = disassembler(native_bfd); + app_data->dinfo.print_address_func = hsdis_print_address_func; + app_data->dinfo.read_memory_func = hsdis_read_memory_func; + + if (app_data->dfn == NULL) { + const char* bad = app_data->arch_name; + static bool complained; + if (bad == &app_data->mach_option[0]) + print_help(app_data, "bad mach=%s", bad); + else if (!complained) + print_help(app_data, "bad native mach=%s; please port hsdis to this platform", bad); + complained = true; + /* must bail out */ + app_data->losing = true; + return; + } + + parse_fake_insn(app_data->dfn, &app_data->dinfo); +} + + +/* ignore all events, return a null */ +static void* null_event_callback(void* ignore_stream, const char* ignore_event, void* arg) { + return NULL; +} + +/* print all events as XML markup */ +static void* xml_event_callback(void* stream, const char* event, void* arg) { + FILE* fp = (FILE*) stream; +#define NS_PFX "dis:" + if (event[0] != '/') { + /* issue the tag, with or without a formatted argument */ + fprintf(fp, "<"NS_PFX); + fprintf(fp, event, arg); + fprintf(fp, ">"); + } else { + ++event; /* skip slash */ + const char* argp = strchr(event, ' '); + if (argp == NULL) { + /* no arguments; just issue the closing tag */ + fprintf(fp, "", event); + } else { + /* split out the closing attributes as */ + int event_prefix = (argp - event); + fprintf(fp, "<"NS_PFX"%.*s_done", event_prefix, event); + fprintf(fp, argp, arg); + fprintf(fp, "/>", event_prefix, event); + } + } + return NULL; +} + +static void set_optional_callbacks(struct hsdis_app_data* app_data) { + if (app_data->printf_callback == NULL) { + int (*fprintf_callback)(FILE*, const char*, ...) = &fprintf; + FILE* fprintf_stream = stdout; + app_data->printf_callback = (printf_callback_t) fprintf_callback; + if (app_data->printf_stream == NULL) + app_data->printf_stream = (void*) fprintf_stream; + } + if (app_data->event_callback == NULL) { + if (app_data->event_stream == NULL) + app_data->event_callback = &null_event_callback; + else + app_data->event_callback = &xml_event_callback; + } + +} + +static void parse_caller_options(struct hsdis_app_data* app_data, const char* caller_options) { + char* iop_base = app_data->insn_options; + char* iop_limit = iop_base + sizeof(app_data->insn_options) - 1; + char* iop = iop_base; + const char* p; + for (p = caller_options; p != NULL; ) { + const char* q = strchr(p, ','); + size_t plen = (q == NULL) ? strlen(p) : ((q++) - p); + if (plen == 4 && strncmp(p, "help", plen) == 0) { + print_help(app_data, NULL, NULL); + } else if (plen >= 5 && strncmp(p, "mach=", 5) == 0) { + char* mach_option = app_data->mach_option; + size_t mach_size = sizeof(app_data->mach_option); + mach_size -= 1; /*leave room for the null*/ + if (plen > mach_size) plen = mach_size; + strncpy(mach_option, p, plen); + mach_option[plen] = '\0'; + } else if (plen > 6 && strncmp(p, "hsdis-", 6)) { + // do not pass these to the next level + } else { + /* just copy it; {i386,sparc}-dis.c might like to see it */ + if (iop > iop_base && iop < iop_limit) (*iop++) = ','; + if (iop + plen > iop_limit) + plen = iop_limit - iop; + strncpy(iop, p, plen); + iop += plen; + } + p = q; + } +} + +static void print_help(struct hsdis_app_data* app_data, + const char* msg, const char* arg) { + DECL_PRINTF_CALLBACK(app_data); + if (msg != NULL) { + (*printf_callback)(printf_stream, "hsdis: "); + (*printf_callback)(printf_stream, msg, arg); + (*printf_callback)(printf_stream, "\n"); + } + (*printf_callback)(printf_stream, "hsdis output options:\n"); + if (printf_callback == (printf_callback_t) &fprintf) + disassembler_usage((FILE*) printf_stream); + else + disassembler_usage(stderr); /* better than nothing */ + (*printf_callback)(printf_stream, " mach= select disassembly mode\n"); +#if defined(LIBARCH_i386) || defined(LIBARCH_amd64) + (*printf_callback)(printf_stream, " mach=i386 select 32-bit mode\n"); + (*printf_callback)(printf_stream, " mach=x86-64 select 64-bit mode\n"); + (*printf_callback)(printf_stream, " suffix always print instruction suffix\n"); +#endif + (*printf_callback)(printf_stream, " help print this message\n"); +} + + +/* low-level bfd and arch stuff that binutils doesn't do for us */ + +static const bfd_arch_info_type* find_arch_info(const char* arch_name) { + const bfd_arch_info_type* arch_info = bfd_scan_arch(arch_name); + if (arch_info == NULL) { + extern const bfd_arch_info_type bfd_default_arch_struct; + arch_info = &bfd_default_arch_struct; + } + return arch_info; +} + +static const char* native_arch_name() { + const char* res = HOTSPOT_LIB_ARCH; +#ifdef LIBARCH_amd64 + res = "i386:x86-64"; +#endif +#ifdef LIBARCH_sparc + res = "sparc:v8plusb"; +#endif +#ifdef LIBARCH_sparc + res = "sparc:v8plusb"; +#endif +#ifdef LIBARCH_sparcv9 + res = "sparc:v9b"; +#endif + if (res == NULL) + res = "HOTSPOT_LIB_ARCH is not set in Makefile!"; + return res; +} + +static enum bfd_endian native_endian() { + int32_t endian_test = 'x'; + if (*(const char*) &endian_test == 'x') + return BFD_ENDIAN_LITTLE; + else + return BFD_ENDIAN_BIG; +} + +static bfd* get_native_bfd(const bfd_arch_info_type* arch_info, + bfd* empty_bfd, bfd_target* empty_xvec) { + memset(empty_bfd, 0, sizeof(*empty_bfd)); + memset(empty_xvec, 0, sizeof(*empty_xvec)); + empty_xvec->flavour = bfd_target_unknown_flavour; + empty_xvec->byteorder = native_endian(); + empty_bfd->xvec = empty_xvec; + empty_bfd->arch_info = arch_info; + return empty_bfd; +} + +static int read_zero_data_only(bfd_vma ignore_p, + bfd_byte* myaddr, unsigned int length, + struct disassemble_info *ignore_info) { + memset(myaddr, 0, length); + return 0; +} +static int print_to_dev_null(void* ignore_stream, const char* ignore_format, ...) { + return 0; +} + +/* Prime the pump by running the selected disassembler on a null input. + This forces the machine-specific disassembler to divulge invariant + information like bytes_per_line. + */ +static void parse_fake_insn(disassembler_ftype dfn, + struct disassemble_info* dinfo) { + typedef int (*read_memory_ftype) + (bfd_vma memaddr, bfd_byte *myaddr, unsigned int length, + struct disassemble_info *info); + read_memory_ftype read_memory_func = dinfo->read_memory_func; + fprintf_ftype fprintf_func = dinfo->fprintf_func; + + dinfo->read_memory_func = &read_zero_data_only; + dinfo->fprintf_func = &print_to_dev_null; + (*dfn)(0, dinfo); + + // put it back: + dinfo->read_memory_func = read_memory_func; + dinfo->fprintf_func = fprintf_func; +} + +static void init_disassemble_info_from_bfd(struct disassemble_info* dinfo, + void *stream, + fprintf_ftype fprintf_func, + bfd* abfd, + char* disassembler_options) { + init_disassemble_info(dinfo, stream, fprintf_func); + + dinfo->flavour = bfd_get_flavour(abfd); + dinfo->arch = bfd_get_arch(abfd); + dinfo->mach = bfd_get_mach(abfd); + dinfo->disassembler_options = disassembler_options; + dinfo->octets_per_byte = bfd_octets_per_byte (abfd); + dinfo->skip_zeroes = sizeof(void*) * 2; + dinfo->skip_zeroes_at_end = sizeof(void*)-1; + dinfo->disassembler_needs_relocs = FALSE; + + if (bfd_big_endian(abfd)) + dinfo->display_endian = dinfo->endian = BFD_ENDIAN_BIG; + else if (bfd_little_endian(abfd)) + dinfo->display_endian = dinfo->endian = BFD_ENDIAN_LITTLE; + else + dinfo->endian = native_endian(); + + disassemble_init_for_target(dinfo); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/tools/hsdis/hsdis.h 2009-08-01 04:20:53.271843364 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* decode_instructions -- dump a range of addresses as native instructions + This implements the protocol required by the HotSpot PrintAssembly option. + + The starting and ending addresses are within the current process's address space. + + The option string, if not empty, is interpreted by the disassembler implementation. + + The printf callback is 'fprintf' or any other workalike. + It is called as (*printf_callback)(printf_stream, "some format...", some, format, args). + + The event callback receives an event tag (a string) and an argument (a void*). + It is called as (*event_callback)(event_stream, "tag", arg). + + Events: + begin an instruction, at a given location + end an instruction, at a given location + emit the symbolic value of an address + + A tag format is one of three basic forms: "tag", "/tag", "tag/", + where tag is a simple identifier, signifying (as in XML) a element start, + element end, and standalone element. (To render as XML, add angle brackets.) +*/ +extern +#ifdef DLL_EXPORT + DLL_EXPORT +#endif +void* decode_instructions(void* start, void* end, + void* (*event_callback)(void*, const char*, void*), + void* event_stream, + int (*printf_callback)(void*, const char*, ...), + void* printf_stream, + const char* options); + +/* convenience typedefs */ + +typedef void* (*decode_instructions_event_callback_ftype) (void*, const char*, void*); +typedef int (*decode_instructions_printf_callback_ftype) (void*, const char*, ...); +typedef void* (*decode_instructions_ftype) (void* start, void* end, + decode_instructions_event_callback_ftype event_callback, + void* event_stream, + decode_instructions_printf_callback_ftype printf_callback, + void* printf_stream, + const char* options); --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/compiler/disassembler.cpp 2009-08-01 04:20:53.685634332 +0100 @@ -0,0 +1,443 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_disassembler.cpp.incl" + +void* Disassembler::_library = NULL; +bool Disassembler::_tried_to_load_library = false; + +// This routine is in the shared library: +Disassembler::decode_func Disassembler::_decode_instructions = NULL; + +static const char hsdis_library_name[] = "hsdis-"HOTSPOT_LIB_ARCH; +static const char decode_instructions_name[] = "decode_instructions"; + +#define COMMENT_COLUMN 40 LP64_ONLY(+8) /*could be an option*/ +#define BYTES_COMMENT ";..." /* funky byte display comment */ + +bool Disassembler::load_library() { + if (_decode_instructions != NULL) { + // Already succeeded. + return true; + } + if (_tried_to_load_library) { + // Do not try twice. + // To force retry in debugger: assign _tried_to_load_library=0 + return false; + } + // Try to load it. + char ebuf[1024]; + char buf[JVM_MAXPATHLEN]; + os::jvm_path(buf, sizeof(buf)); + int jvm_offset = -1; + { + // Match "jvm[^/]*" in jvm_path. + const char* base = buf; + const char* p = strrchr(buf, '/'); + p = strstr(p ? p : base, "jvm"); + if (p != NULL) jvm_offset = p - base; + } + if (jvm_offset >= 0) { + // Find the disassembler next to libjvm.so. + strcpy(&buf[jvm_offset], hsdis_library_name); + strcat(&buf[jvm_offset], os::dll_file_extension()); + _library = hpi::dll_load(buf, ebuf, sizeof ebuf); + } + if (_library == NULL) { + // Try a free-floating lookup. + strcpy(&buf[0], hsdis_library_name); + strcat(&buf[0], os::dll_file_extension()); + _library = hpi::dll_load(buf, ebuf, sizeof ebuf); + } + if (_library != NULL) { + _decode_instructions = CAST_TO_FN_PTR(Disassembler::decode_func, + hpi::dll_lookup(_library, decode_instructions_name)); + } + _tried_to_load_library = true; + if (_decode_instructions == NULL) { + tty->print_cr("Could not load %s; %s; %s", buf, + ((_library != NULL) + ? "entry point is missing" + : (WizardMode || PrintMiscellaneous) + ? (const char*)ebuf + : "library not loadable"), + "PrintAssembly is disabled"); + return false; + } + + // Success. + tty->print_cr("Loaded disassembler from %s", buf); + return true; +} + + +class decode_env { + private: + nmethod* _nm; + CodeBlob* _code; + outputStream* _output; + address _start, _end; + + char _option_buf[512]; + char _print_raw; + bool _print_pc; + bool _print_bytes; + address _cur_insn; + int _total_ticks; + int _bytes_per_line; // arch-specific formatting option + + static bool match(const char* event, const char* tag) { + size_t taglen = strlen(tag); + if (strncmp(event, tag, taglen) != 0) + return false; + char delim = event[taglen]; + return delim == '\0' || delim == ' ' || delim == '/' || delim == '='; + } + + void collect_options(const char* p) { + if (p == NULL || p[0] == '\0') return; + size_t opt_so_far = strlen(_option_buf); + if (opt_so_far + 1 + strlen(p) + 1 > sizeof(_option_buf)) return; + char* fillp = &_option_buf[opt_so_far]; + if (opt_so_far > 0) *fillp++ = ','; + strcat(fillp, p); + // replace white space by commas: + char* q = fillp; + while ((q = strpbrk(q, " \t\n")) != NULL) + *q++ = ','; + // Note that multiple PrintAssemblyOptions flags accumulate with \n, + // which we want to be changed to a comma... + } + + void print_insn_labels(); + void print_insn_bytes(address pc0, address pc); + void print_address(address value); + + public: + decode_env(CodeBlob* code, outputStream* output); + + address decode_instructions(address start, address end); + + void start_insn(address pc) { + _cur_insn = pc; + output()->bol(); + print_insn_labels(); + } + + void end_insn(address pc) { + address pc0 = cur_insn(); + outputStream* st = output(); + if (_print_bytes && pc > pc0) + print_insn_bytes(pc0, pc); + if (_nm != NULL) + _nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc); + + // Output pc bucket ticks if we have any + if (total_ticks() != 0) { + address bucket_pc = FlatProfiler::bucket_start_for(pc); + if (bucket_pc != NULL && bucket_pc > pc0 && bucket_pc <= pc) { + int bucket_count = FlatProfiler::bucket_count_for(pc0); + if (bucket_count != 0) { + st->bol(); + st->print_cr("%3.1f%% [%d]", bucket_count*100.0/total_ticks(), bucket_count); + } + } + } + } + + address handle_event(const char* event, address arg); + + outputStream* output() { return _output; } + address cur_insn() { return _cur_insn; } + int total_ticks() { return _total_ticks; } + void set_total_ticks(int n) { _total_ticks = n; } + const char* options() { return _option_buf; } +}; + +decode_env::decode_env(CodeBlob* code, outputStream* output) { + memset(this, 0, sizeof(*this)); + _output = output ? output : tty; + _code = code; + if (code != NULL && code->is_nmethod()) + _nm = (nmethod*) code; + + // by default, output pc but not bytes: + _print_pc = true; + _print_bytes = false; + _bytes_per_line = Disassembler::pd_instruction_alignment(); + + // parse the global option string: + collect_options(Disassembler::pd_cpu_opts()); + collect_options(PrintAssemblyOptions); + + if (strstr(options(), "hsdis-")) { + if (strstr(options(), "hsdis-print-raw")) + _print_raw = (strstr(options(), "xml") ? 2 : 1); + if (strstr(options(), "hsdis-print-pc")) + _print_pc = !_print_pc; + if (strstr(options(), "hsdis-print-bytes")) + _print_bytes = !_print_bytes; + } + if (strstr(options(), "help")) { + tty->print_cr("PrintAssemblyOptions help:"); + tty->print_cr(" hsdis-print-raw test plugin by requesting raw output"); + tty->print_cr(" hsdis-print-raw-xml test plugin by requesting raw xml"); + tty->print_cr(" hsdis-print-pc turn off PC printing (on by default)"); + tty->print_cr(" hsdis-print-bytes turn on instruction byte output"); + tty->print_cr("combined options: %s", options()); + } +} + +address decode_env::handle_event(const char* event, address arg) { + if (match(event, "insn")) { + start_insn(arg); + } else if (match(event, "/insn")) { + end_insn(arg); + } else if (match(event, "addr")) { + if (arg != NULL) { + print_address(arg); + return arg; + } + } else if (match(event, "mach")) { + output()->print_cr("[Disassembling for mach='%s']", arg); + } else if (match(event, "format bytes-per-line")) { + _bytes_per_line = (int) (intptr_t) arg; + } else { + // ignore unrecognized markup + } + return NULL; +} + +// called by the disassembler to print out jump targets and data addresses +void decode_env::print_address(address adr) { + outputStream* st = _output; + + if (adr == NULL) { + st->print("NULL"); + return; + } + + int small_num = (int)(intptr_t)adr; + if ((intptr_t)adr == (intptr_t)small_num + && -1 <= small_num && small_num <= 9) { + st->print("%d", small_num); + return; + } + + if (Universe::is_fully_initialized()) { + if (StubRoutines::contains(adr)) { + StubCodeDesc* desc = StubCodeDesc::desc_for(adr); + if (desc == NULL) + desc = StubCodeDesc::desc_for(adr + frame::pc_return_offset); + if (desc != NULL) { + st->print("Stub::%s", desc->name()); + if (desc->begin() != adr) + st->print("%+d 0x%p",adr - desc->begin(), adr); + else if (WizardMode) st->print(" " INTPTR_FORMAT, adr); + return; + } + st->print("Stub:: " INTPTR_FORMAT, adr); + return; + } + + BarrierSet* bs = Universe::heap()->barrier_set(); + if (bs->kind() == BarrierSet::CardTableModRef && + adr == (address)((CardTableModRefBS*)(bs))->byte_map_base) { + st->print("word_map_base"); + if (WizardMode) st->print(" " INTPTR_FORMAT, (intptr_t)adr); + return; + } + + oop obj; + if (_nm != NULL + && (obj = _nm->embeddedOop_at(cur_insn())) != NULL + && (address) obj == adr) { + obj->print_value_on(st); + return; + } + } + + // Fall through to a simple numeral. + st->print(INTPTR_FORMAT, (intptr_t)adr); +} + +void decode_env::print_insn_labels() { + address p = cur_insn(); + outputStream* st = output(); + nmethod* nm = _nm; + if (nm != NULL) { + if (p == nm->entry_point()) st->print_cr("[Entry Point]"); + if (p == nm->verified_entry_point()) st->print_cr("[Verified Entry Point]"); + if (p == nm->exception_begin()) st->print_cr("[Exception Handler]"); + if (p == nm->stub_begin()) st->print_cr("[Stub Code]"); + if (p == nm->consts_begin()) st->print_cr("[Constants]"); + } + CodeBlob* cb = _code; + if (cb != NULL) { + cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin())); + } + if (_print_pc) { + st->print(" " INTPTR_FORMAT ": ", (intptr_t) p); + } +} + +void decode_env::print_insn_bytes(address pc, address pc_limit) { + outputStream* st = output(); + size_t incr = 1; + size_t perline = _bytes_per_line; + if ((size_t) Disassembler::pd_instruction_alignment() >= sizeof(int) + && !((uintptr_t)pc % sizeof(int)) + && !((uintptr_t)pc_limit % sizeof(int))) { + incr = sizeof(int); + if (perline % incr) perline += incr - (perline % incr); + } + while (pc < pc_limit) { + // tab to the desired column: + st->move_to(COMMENT_COLUMN); + address pc0 = pc; + address pc1 = pc + perline; + if (pc1 > pc_limit) pc1 = pc_limit; + for (; pc < pc1; pc += incr) { + if (pc == pc0) + st->print(BYTES_COMMENT); + else if ((uint)(pc - pc0) % sizeof(int) == 0) + st->print(" "); // put out a space on word boundaries + if (incr == sizeof(int)) + st->print("%08lx", *(int*)pc); + else st->print("%02x", (*pc)&0xFF); + } + st->cr(); + } +} + + +static void* event_to_env(void* env_pv, const char* event, void* arg) { + decode_env* env = (decode_env*) env_pv; + return env->handle_event(event, (address) arg); +} + +static int printf_to_env(void* env_pv, const char* format, ...) { + decode_env* env = (decode_env*) env_pv; + outputStream* st = env->output(); + size_t flen = strlen(format); + const char* raw = NULL; + if (flen == 0) return 0; + if (flen == 1 && format[0] == '\n') { st->bol(); return 1; } + if (flen < 2 || + strchr(format, '%') == NULL) { + raw = format; + } else if (format[0] == '%' && format[1] == '%' && + strchr(format+2, '%') == NULL) { + // happens a lot on machines with names like %foo + flen--; + raw = format+1; + } + if (raw != NULL) { + st->print_raw(raw, (int) flen); + return (int) flen; + } + va_list ap; + va_start(ap, format); + julong cnt0 = st->count(); + st->vprint(format, ap); + julong cnt1 = st->count(); + va_end(ap); + return (int)(cnt1 - cnt0); +} + +address decode_env::decode_instructions(address start, address end) { + _start = start; _end = end; + + assert((((intptr_t)start | (intptr_t)end) % Disassembler::pd_instruction_alignment() == 0), "misaligned insn addr"); + + const int show_bytes = false; // for disassembler debugging + + //_version = Disassembler::pd_cpu_version(); + + if (!Disassembler::can_decode()) { + return NULL; + } + + // decode a series of instructions and return the end of the last instruction + + if (_print_raw) { + // Print whatever the library wants to print, w/o fancy callbacks. + // This is mainly for debugging the library itself. + FILE* out = stdout; + FILE* xmlout = (_print_raw > 1 ? out : NULL); + return (address) + (*Disassembler::_decode_instructions)(start, end, + NULL, (void*) xmlout, + NULL, (void*) out, + options()); + } + + return (address) + (*Disassembler::_decode_instructions)(start, end, + &event_to_env, (void*) this, + &printf_to_env, (void*) this, + options()); +} + + +void Disassembler::decode(CodeBlob* cb, outputStream* st) { + if (!load_library()) return; + decode_env env(cb, st); + env.output()->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb); + env.decode_instructions(cb->instructions_begin(), cb->instructions_end()); +} + + +void Disassembler::decode(address start, address end, outputStream* st) { + if (!load_library()) return; + decode_env env(CodeCache::find_blob_unsafe(start), st); + env.decode_instructions(start, end); +} + +void Disassembler::decode(nmethod* nm, outputStream* st) { + if (!load_library()) return; + decode_env env(nm, st); + env.output()->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm); + env.output()->print_cr("Code:"); + + unsigned char* p = nm->instructions_begin(); + unsigned char* end = nm->instructions_end(); + + // If there has been profiling, print the buckets. + if (FlatProfiler::bucket_start_for(p) != NULL) { + unsigned char* p1 = p; + int total_bucket_count = 0; + while (p1 < end) { + unsigned char* p0 = p1; + p1 += pd_instruction_alignment(); + address bucket_pc = FlatProfiler::bucket_start_for(p1); + if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p1) + total_bucket_count += FlatProfiler::bucket_count_for(p0); + } + env.set_total_ticks(total_bucket_count); + } + + env.decode_instructions(p, end); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/compiler/disassembler.hpp 2009-08-01 04:20:54.106092748 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class decode_env; + +// The disassembler prints out assembly code annotated +// with Java specific information. + +class Disassembler { + friend class decode_env; + private: + // this is the type of the dll entry point: + typedef void* (*decode_func)(void* start, void* end, + void* (*event_callback)(void*, const char*, void*), + void* event_stream, + int (*printf_callback)(void*, const char*, ...), + void* printf_stream, + const char* options); + // points to the library. + static void* _library; + // bailout + static bool _tried_to_load_library; + // points to the decode function. + static decode_func _decode_instructions; + // tries to load library and return whether it succedded. + static bool load_library(); + + // Machine dependent stuff + #include "incls/_disassembler_pd.hpp.incl" + + public: + static bool can_decode() { + return (_decode_instructions != NULL) || load_library(); + } + static void decode(CodeBlob *cb, outputStream* st = NULL); + static void decode(nmethod* nm, outputStream* st = NULL); + static void decode(address begin, address end, outputStream* st = NULL); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp 2009-08-01 04:20:54.528681281 +0100 @@ -0,0 +1,139 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// +// Free block maintenance for Concurrent Mark Sweep Generation +// +// The main data structure for free blocks are +// . an indexed array of small free blocks, and +// . a dictionary of large free blocks +// + +// No virtuals in FreeChunk (don't want any vtables). + +// A FreeChunk is merely a chunk that can be in a doubly linked list +// and has a size field. NOTE: FreeChunks are distinguished from allocated +// objects in two ways (by the sweeper), depending on whether the VM is 32 or +// 64 bits. +// In 32 bits or 64 bits without CompressedOops, the second word (prev) has the +// LSB set to indicate a free chunk; allocated objects' klass() pointers +// don't have their LSB set. The corresponding bit in the CMSBitMap is +// set when the chunk is allocated. There are also blocks that "look free" +// but are not part of the free list and should not be coalesced into larger +// free blocks. These free blocks have their two LSB's set. + +class FreeChunk VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; + // For 64 bit compressed oops, the markOop encodes both the size and the + // indication that this is a FreeChunk and not an object. + volatile size_t _size; + FreeChunk* _prev; + FreeChunk* _next; + + markOop mark() const volatile { return (markOop)_size; } + void set_mark(markOop m) { _size = (size_t)m; } + + public: + NOT_PRODUCT(static const size_t header_size();) + + // Returns "true" if the address indicates that the block represents + // a free chunk. + static bool indicatesFreeChunk(const HeapWord* addr) { + // Force volatile read from addr because value might change between + // calls. We really want the read of _mark and _prev from this pointer + // to be volatile but making the fields volatile causes all sorts of + // compilation errors. + return ((volatile FreeChunk*)addr)->isFree(); + } + + bool isFree() const volatile { + LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else) + return (((intptr_t)_prev) & 0x1) == 0x1; + } + bool cantCoalesce() const { + assert(isFree(), "can't get coalesce bit on not free"); + return (((intptr_t)_prev) & 0x2) == 0x2; + } + void dontCoalesce() { + // the block should be free + assert(isFree(), "Should look like a free block"); + _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2); + } + FreeChunk* prev() const { + return (FreeChunk*)(((intptr_t)_prev) & ~(0x3)); + } + + debug_only(void* prev_addr() const { return (void*)&_prev; }) + debug_only(void* next_addr() const { return (void*)&_next; }) + debug_only(void* size_addr() const { return (void*)&_size; }) + + size_t size() const volatile { + LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else ) + return _size; + } + void setSize(size_t sz) { + LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else ) + _size = sz; + } + + FreeChunk* next() const { return _next; } + + void linkAfter(FreeChunk* ptr) { + linkNext(ptr); + if (ptr != NULL) ptr->linkPrev(this); + } + void linkAfterNonNull(FreeChunk* ptr) { + assert(ptr != NULL, "precondition violation"); + linkNext(ptr); + ptr->linkPrev(this); + } + void linkNext(FreeChunk* ptr) { _next = ptr; } + void linkPrev(FreeChunk* ptr) { + LP64_ONLY(if (UseCompressedOops) _prev = ptr; else) + _prev = (FreeChunk*)((intptr_t)ptr | 0x1); + } + void clearPrev() { _prev = NULL; } + void clearNext() { _next = NULL; } + void markNotFree() { + LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::prototype());) + // Also set _prev to null + _prev = NULL; + } + + // Return the address past the end of this chunk + HeapWord* end() const { return ((HeapWord*) this) + size(); } + + // debugging + void verify() const PRODUCT_RETURN; + void verifyList() const PRODUCT_RETURN; + void mangleAllocated(size_t size) PRODUCT_RETURN; + void mangleFreed(size_t size) PRODUCT_RETURN; +}; + +// Alignment helpers etc. +#define numQuanta(x,y) ((x+y-1)/y) +enum AlignmentConstants { + MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment +}; + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp 2009-08-01 04:20:55.059756377 +0100 @@ -0,0 +1,195 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// A BufferingOops closure tries to separate out the cost of finding roots +// from the cost of applying closures to them. It maintains an array of +// ref-containing locations. Until the array is full, applying the closure +// to an oop* merely records that location in the array. Since this +// closure app cost is small, an elapsed timer can approximately attribute +// all of this cost to the cost of finding the roots. When the array fills +// up, the wrapped closure is applied to all elements, keeping track of +// this elapsed time of this process, and leaving the array empty. +// The caller must be sure to call "done" to process any unprocessed +// buffered entriess. + +class Generation; +class HeapRegion; + +class BufferingOopClosure: public OopClosure { +protected: + enum PrivateConstants { + BufferLength = 1024 + }; + + oop *_buffer[BufferLength]; + oop **_buffer_top; + oop **_buffer_curr; + + OopClosure *_oc; + double _closure_app_seconds; + + void process_buffer () { + + double start = os::elapsedTime(); + for (oop **curr = _buffer; curr < _buffer_curr; ++curr) { + _oc->do_oop(*curr); + } + _buffer_curr = _buffer; + _closure_app_seconds += (os::elapsedTime() - start); + } + +public: + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + virtual void do_oop(oop *p) { + if (_buffer_curr == _buffer_top) { + process_buffer(); + } + + *_buffer_curr = p; + ++_buffer_curr; + } + void done () { + if (_buffer_curr > _buffer) { + process_buffer(); + } + } + double closure_app_seconds () { + return _closure_app_seconds; + } + BufferingOopClosure (OopClosure *oc) : + _oc(oc), + _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength), + _closure_app_seconds(0.0) { } +}; + +class BufferingOopsInGenClosure: public OopsInGenClosure { + BufferingOopClosure _boc; + OopsInGenClosure* _oc; +public: + BufferingOopsInGenClosure(OopsInGenClosure *oc) : + _boc(oc), _oc(oc) {} + + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + virtual void do_oop(oop* p) { + assert(generation()->is_in_reserved(p), "Must be in!"); + _boc.do_oop(p); + } + + void done() { + _boc.done(); + } + + double closure_app_seconds () { + return _boc.closure_app_seconds(); + } + + void set_generation(Generation* gen) { + OopsInGenClosure::set_generation(gen); + _oc->set_generation(gen); + } + + void reset_generation() { + // Make sure we finish the current work with the current generation. + _boc.done(); + OopsInGenClosure::reset_generation(); + _oc->reset_generation(); + } + +}; + + +class BufferingOopsInHeapRegionClosure: public OopsInHeapRegionClosure { +private: + enum PrivateConstants { + BufferLength = 1024 + }; + + oop *_buffer[BufferLength]; + oop **_buffer_top; + oop **_buffer_curr; + + HeapRegion *_hr_buffer[BufferLength]; + HeapRegion **_hr_curr; + + OopsInHeapRegionClosure *_oc; + double _closure_app_seconds; + + void process_buffer () { + + assert((_hr_curr - _hr_buffer) == (_buffer_curr - _buffer), + "the two lengths should be the same"); + + double start = os::elapsedTime(); + HeapRegion **hr_curr = _hr_buffer; + HeapRegion *hr_prev = NULL; + for (oop **curr = _buffer; curr < _buffer_curr; ++curr) { + HeapRegion *region = *hr_curr; + if (region != hr_prev) { + _oc->set_region(region); + hr_prev = region; + } + _oc->do_oop(*curr); + ++hr_curr; + } + _buffer_curr = _buffer; + _hr_curr = _hr_buffer; + _closure_app_seconds += (os::elapsedTime() - start); + } + +public: + virtual void do_oop(narrowOop *p) { + guarantee(false, "NYI"); + } + + virtual void do_oop(oop *p) { + if (_buffer_curr == _buffer_top) { + assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr"); + process_buffer(); + } + + *_buffer_curr = p; + ++_buffer_curr; + *_hr_curr = _from; + ++_hr_curr; + } + void done () { + if (_buffer_curr > _buffer) { + assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr"); + process_buffer(); + } + } + double closure_app_seconds () { + return _closure_app_seconds; + } + BufferingOopsInHeapRegionClosure (OopsInHeapRegionClosure *oc) : + _oc(oc), + _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength), + _hr_curr(_hr_buffer), + _closure_app_seconds(0.0) { } +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp 2009-08-01 04:20:55.516711103 +0100 @@ -0,0 +1,407 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_collectionSetChooser.cpp.incl" + +CSetChooserCache::CSetChooserCache() { + for (int i = 0; i < CacheLength; ++i) + _cache[i] = NULL; + clear(); +} + +void CSetChooserCache::clear() { + _occupancy = 0; + _first = 0; + for (int i = 0; i < CacheLength; ++i) { + HeapRegion *hr = _cache[i]; + if (hr != NULL) + hr->set_sort_index(-1); + _cache[i] = NULL; + } +} + +#ifndef PRODUCT +bool CSetChooserCache::verify() { + int index = _first; + HeapRegion *prev = NULL; + for (int i = 0; i < _occupancy; ++i) { + guarantee(_cache[index] != NULL, "cache entry should not be empty"); + HeapRegion *hr = _cache[index]; + guarantee(!hr->is_young(), "should not be young!"); + if (prev != NULL) { + guarantee(prev->gc_efficiency() >= hr->gc_efficiency(), + "cache should be correctly ordered"); + } + guarantee(hr->sort_index() == get_sort_index(index), + "sort index should be correct"); + index = trim_index(index + 1); + prev = hr; + } + + for (int i = 0; i < (CacheLength - _occupancy); ++i) { + guarantee(_cache[index] == NULL, "cache entry should be empty"); + index = trim_index(index + 1); + } + + guarantee(index == _first, "we should have reached where we started from"); + return true; +} +#endif // PRODUCT + +void CSetChooserCache::insert(HeapRegion *hr) { + assert(!is_full(), "cache should not be empty"); + hr->calc_gc_efficiency(); + + int empty_index; + if (_occupancy == 0) { + empty_index = _first; + } else { + empty_index = trim_index(_first + _occupancy); + assert(_cache[empty_index] == NULL, "last slot should be empty"); + int last_index = trim_index(empty_index - 1); + HeapRegion *last = _cache[last_index]; + assert(last != NULL,"as the cache is not empty, last should not be empty"); + while (empty_index != _first && + last->gc_efficiency() < hr->gc_efficiency()) { + _cache[empty_index] = last; + last->set_sort_index(get_sort_index(empty_index)); + empty_index = last_index; + last_index = trim_index(last_index - 1); + last = _cache[last_index]; + } + } + _cache[empty_index] = hr; + hr->set_sort_index(get_sort_index(empty_index)); + + ++_occupancy; + assert(verify(), "cache should be consistent"); +} + +HeapRegion *CSetChooserCache::remove_first() { + if (_occupancy > 0) { + assert(_cache[_first] != NULL, "cache should have at least one region"); + HeapRegion *ret = _cache[_first]; + _cache[_first] = NULL; + ret->set_sort_index(-1); + --_occupancy; + _first = trim_index(_first + 1); + assert(verify(), "cache should be consistent"); + return ret; + } else { + return NULL; + } +} + +// this is a bit expensive... but we expect that it should not be called +// to often. +void CSetChooserCache::remove(HeapRegion *hr) { + assert(_occupancy > 0, "cache should not be empty"); + assert(hr->sort_index() < -1, "should already be in the cache"); + int index = get_index(hr->sort_index()); + assert(_cache[index] == hr, "index should be correct"); + int next_index = trim_index(index + 1); + int last_index = trim_index(_first + _occupancy - 1); + while (index != last_index) { + assert(_cache[next_index] != NULL, "should not be null"); + _cache[index] = _cache[next_index]; + _cache[index]->set_sort_index(get_sort_index(index)); + + index = next_index; + next_index = trim_index(next_index+1); + } + assert(index == last_index, "should have reached the last one"); + _cache[index] = NULL; + hr->set_sort_index(-1); + --_occupancy; + assert(verify(), "cache should be consistent"); +} + +static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { + if (hr1 == NULL) { + if (hr2 == NULL) return 0; + else return 1; + } else if (hr2 == NULL) { + return -1; + } + if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1; + else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1; + else return 0; +} + +static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { + return orderRegions(*hr1p, *hr2p); +} + +CollectionSetChooser::CollectionSetChooser() : + // The line below is the worst bit of C++ hackery I've ever written + // (Detlefs, 11/23). You should think of it as equivalent to + // "_regions(100, true)": initialize the growable array and inform it + // that it should allocate its elem array(s) on the C heap. The first + // argument, however, is actually a comma expression (new-expr, 100). + // The purpose of the new_expr is to inform the growable array that it + // is *already* allocated on the C heap: it uses the placement syntax to + // keep it from actually doing any allocation. + _markedRegions((ResourceObj::operator new (sizeof(GrowableArray), + (void*)&_markedRegions, + ResourceObj::C_HEAP), + 100), + true), + _curMarkedIndex(0), + _numMarkedRegions(0), + _unmarked_age_1_returned_as_new(false), + _first_par_unreserved_idx(0) +{} + + + +#ifndef PRODUCT +bool CollectionSetChooser::verify() { + int index = 0; + guarantee(_curMarkedIndex <= _numMarkedRegions, + "_curMarkedIndex should be within bounds"); + while (index < _curMarkedIndex) { + guarantee(_markedRegions.at(index++) == NULL, + "all entries before _curMarkedIndex should be NULL"); + } + HeapRegion *prev = NULL; + while (index < _numMarkedRegions) { + HeapRegion *curr = _markedRegions.at(index++); + if (curr != NULL) { + int si = curr->sort_index(); + guarantee(!curr->is_young(), "should not be young!"); + guarantee(si > -1 && si == (index-1), "sort index invariant"); + if (prev != NULL) { + guarantee(orderRegions(prev, curr) != 1, "regions should be sorted"); + } + prev = curr; + } + } + return _cache.verify(); +} +#endif + +bool +CollectionSetChooser::addRegionToCache() { + assert(!_cache.is_full(), "cache should not be full"); + + HeapRegion *hr = NULL; + while (hr == NULL && _curMarkedIndex < _numMarkedRegions) { + hr = _markedRegions.at(_curMarkedIndex++); + } + if (hr == NULL) + return false; + assert(!hr->is_young(), "should not be young!"); + assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant"); + _markedRegions.at_put(hr->sort_index(), NULL); + _cache.insert(hr); + assert(!_cache.is_empty(), "cache should not be empty"); + assert(verify(), "cache should be consistent"); + return false; +} + +void +CollectionSetChooser::fillCache() { + while (!_cache.is_full() && addRegionToCache()) { + } +} + +void +CollectionSetChooser::sortMarkedHeapRegions() { + guarantee(_cache.is_empty(), "cache should be empty"); + // First trim any unused portion of the top in the parallel case. + if (_first_par_unreserved_idx > 0) { + if (G1PrintParCleanupStats) { + gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n", + _markedRegions.length(), _first_par_unreserved_idx); + } + assert(_first_par_unreserved_idx <= _markedRegions.length(), + "Or we didn't reserved enough length"); + _markedRegions.trunc_to(_first_par_unreserved_idx); + } + _markedRegions.sort(orderRegions); + assert(_numMarkedRegions <= _markedRegions.length(), "Requirement"); + assert(_numMarkedRegions == 0 + || _markedRegions.at(_numMarkedRegions-1) != NULL, + "Testing _numMarkedRegions"); + assert(_numMarkedRegions == _markedRegions.length() + || _markedRegions.at(_numMarkedRegions) == NULL, + "Testing _numMarkedRegions"); + if (G1PrintParCleanupStats) { + gclog_or_tty->print_cr(" Sorted %d marked regions.", _numMarkedRegions); + } + for (int i = 0; i < _numMarkedRegions; i++) { + assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); + _markedRegions.at(i)->set_sort_index(i); + if (G1PrintRegionLivenessInfo > 0) { + if (i == 0) gclog_or_tty->print_cr("Sorted marked regions:"); + if (i < G1PrintRegionLivenessInfo || + (_numMarkedRegions-i) < G1PrintRegionLivenessInfo) { + HeapRegion* hr = _markedRegions.at(i); + size_t u = hr->used(); + gclog_or_tty->print_cr(" Region %d: %d used, %d max live, %5.2f%%.", + i, u, hr->max_live_bytes(), + 100.0*(float)hr->max_live_bytes()/(float)u); + } + } + } + if (G1PolicyVerbose > 1) + printSortedHeapRegions(); + assert(verify(), "should now be sorted"); +} + +void +printHeapRegion(HeapRegion *hr) { + if (hr->isHumongous()) + gclog_or_tty->print("H: "); + if (hr->in_collection_set()) + gclog_or_tty->print("CS: "); + gclog_or_tty->print_cr("Region " PTR_FORMAT " (%s%s) " + "[" PTR_FORMAT ", " PTR_FORMAT"] " + "Used: " SIZE_FORMAT "K, garbage: " SIZE_FORMAT "K.", + hr, hr->is_young() ? "Y " : " ", + hr->is_marked()? "M1" : "M0", + hr->bottom(), hr->end(), + hr->used()/K, hr->garbage_bytes()/K); +} + +void +CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { + assert(!hr->isHumongous(), + "Humongous regions shouldn't be added to the collection set"); + assert(!hr->is_young(), "should not be young!"); + _markedRegions.append(hr); + _numMarkedRegions++; + hr->calc_gc_efficiency(); +} + +void +CollectionSetChooser:: +prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) { + _first_par_unreserved_idx = 0; + size_t max_waste = ParallelGCThreads * chunkSize; + // it should be aligned with respect to chunkSize + size_t aligned_n_regions = + (n_regions + (chunkSize - 1)) / chunkSize * chunkSize; + assert( aligned_n_regions % chunkSize == 0, "should be aligned" ); + _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL); +} + +jint +CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { + jint res = Atomic::add(n_regions, &_first_par_unreserved_idx); + assert(_markedRegions.length() > res + n_regions - 1, + "Should already have been expanded"); + return res - n_regions; +} + +void +CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) { + assert(_markedRegions.at(index) == NULL, "precondition"); + assert(!hr->is_young(), "should not be young!"); + _markedRegions.at_put(index, hr); + hr->calc_gc_efficiency(); +} + +void +CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) { + (void)Atomic::add(inc_by, &_numMarkedRegions); +} + +void +CollectionSetChooser::clearMarkedHeapRegions(){ + for (int i = 0; i < _markedRegions.length(); i++) { + HeapRegion* r = _markedRegions.at(i); + if (r != NULL) r->set_sort_index(-1); + } + _markedRegions.clear(); + _curMarkedIndex = 0; + _numMarkedRegions = 0; + _cache.clear(); +}; + +void +CollectionSetChooser::updateAfterFullCollection() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + clearMarkedHeapRegions(); +} + +void +CollectionSetChooser::printSortedHeapRegions() { + gclog_or_tty->print_cr("Printing %d Heap Regions sorted by amount of known garbage", + _numMarkedRegions); + for (int i = 0; i < _markedRegions.length(); i++) { + printHeapRegion(_markedRegions.at(i)); + } + gclog_or_tty->print_cr("Done sorted heap region print"); +} + +void CollectionSetChooser::removeRegion(HeapRegion *hr) { + int si = hr->sort_index(); + assert(si == -1 || hr->is_marked(), "Sort index not valid."); + if (si > -1) { + assert(_markedRegions.at(si) == hr, "Sort index not valid." ); + _markedRegions.at_put(si, NULL); + } else if (si < -1) { + assert(_cache.region_in_cache(hr), "should be in the cache"); + _cache.remove(hr); + assert(hr->sort_index() == -1, "sort index invariant"); + } + hr->set_sort_index(-1); +} + +// if time_remaining < 0.0, then this method should try to return +// a region, whether it fits within the remaining time or not +HeapRegion* +CollectionSetChooser::getNextMarkedRegion(double time_remaining, + double avg_prediction) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1CollectorPolicy* g1p = g1h->g1_policy(); + fillCache(); + if (_cache.is_empty()) { + assert(_curMarkedIndex == _numMarkedRegions, + "if cache is empty, list should also be empty"); + return NULL; + } + + HeapRegion *hr = _cache.get_first(); + assert(hr != NULL, "if cache not empty, first entry should be non-null"); + double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false); + + if (g1p->adaptive_young_list_length()) { + if (time_remaining - predicted_time < 0.0) { + g1h->check_if_region_is_too_expensive(predicted_time); + return NULL; + } + } else { + if (predicted_time > 2.0 * avg_prediction) { + return NULL; + } + } + + HeapRegion *hr2 = _cache.remove_first(); + assert(hr == hr2, "cache contents should not have changed"); + + return hr; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp 2009-08-01 04:20:55.947118088 +0100 @@ -0,0 +1,138 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// We need to sort heap regions by collection desirability. + +class CSetChooserCache VALUE_OBJ_CLASS_SPEC { +private: + enum { + CacheLength = 16 + } PrivateConstants; + + HeapRegion* _cache[CacheLength]; + int _occupancy; // number of region in cache + int _first; // "first" region in the cache + + // adding CacheLength to deal with negative values + inline int trim_index(int index) { + return (index + CacheLength) % CacheLength; + } + + inline int get_sort_index(int index) { + return -index-2; + } + inline int get_index(int sort_index) { + return -sort_index-2; + } + +public: + CSetChooserCache(void); + + inline int occupancy(void) { return _occupancy; } + inline bool is_full() { return _occupancy == CacheLength; } + inline bool is_empty() { return _occupancy == 0; } + + void clear(void); + void insert(HeapRegion *hr); + HeapRegion *remove_first(void); + void remove (HeapRegion *hr); + inline HeapRegion *get_first(void) { + return _cache[_first]; + } + +#ifndef PRODUCT + bool verify (void); + bool region_in_cache(HeapRegion *hr) { + int sort_index = hr->sort_index(); + if (sort_index < -1) { + int index = get_index(sort_index); + guarantee(index < CacheLength, "should be within bounds"); + return _cache[index] == hr; + } else + return 0; + } +#endif // PRODUCT +}; + +class CollectionSetChooser: public CHeapObj { + + GrowableArray _markedRegions; + int _curMarkedIndex; + int _numMarkedRegions; + CSetChooserCache _cache; + + // True iff last collection pause ran of out new "age 0" regions, and + // returned an "age 1" region. + bool _unmarked_age_1_returned_as_new; + + jint _first_par_unreserved_idx; + +public: + + HeapRegion* getNextMarkedRegion(double time_so_far, double avg_prediction); + + CollectionSetChooser(); + + void printSortedHeapRegions(); + + void sortMarkedHeapRegions(); + void fillCache(); + bool addRegionToCache(void); + void addMarkedHeapRegion(HeapRegion *hr); + + // Must be called before calls to getParMarkedHeapRegionChunk. + // "n_regions" is the number of regions, "chunkSize" the chunk size. + void prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize); + // Returns the first index in a contiguous chunk of "n_regions" indexes + // that the calling thread has reserved. These must be set by the + // calling thread using "setMarkedHeapRegion" (to NULL if necessary). + jint getParMarkedHeapRegionChunk(jint n_regions); + // Set the marked array entry at index to hr. Careful to claim the index + // first if in parallel. + void setMarkedHeapRegion(jint index, HeapRegion* hr); + // Atomically increment the number of claimed regions by "inc_by". + void incNumMarkedHeapRegions(jint inc_by); + + void clearMarkedHeapRegions(); + + void updateAfterFullCollection(); + + // Ensure that "hr" is not a member of the marked region array or the cache + void removeRegion(HeapRegion* hr); + + bool unmarked_age_1_returned_as_new() { return _unmarked_age_1_returned_as_new; } + + // Returns true if the used portion of "_markedRegions" is properly + // sorted, otherwise asserts false. +#ifndef PRODUCT + bool verify(void); + bool regionProperlyOrdered(HeapRegion* r) { + int si = r->sort_index(); + return (si == -1) || + (si > -1 && _markedRegions.at(si) == r) || + (si < -1 && _cache.region_in_cache(r)); + } +#endif + +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp 2009-08-01 04:20:56.382613947 +0100 @@ -0,0 +1,350 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_concurrentG1Refine.cpp.incl" + +bool ConcurrentG1Refine::_enabled = false; + +ConcurrentG1Refine::ConcurrentG1Refine() : + _pya(PYA_continue), _last_pya(PYA_continue), + _last_cards_during(), _first_traversal(false), + _card_counts(NULL), _cur_card_count_histo(NULL), _cum_card_count_histo(NULL), + _hot_cache(NULL), + _def_use_cache(false), _use_cache(false), + _n_periods(0), _total_cards(0), _total_travs(0) +{ + if (G1ConcRefine) { + _cg1rThread = new ConcurrentG1RefineThread(this); + assert(cg1rThread() != NULL, "Conc refine should have been created"); + assert(cg1rThread()->cg1r() == this, + "Conc refine thread should refer to this"); + } else { + _cg1rThread = NULL; + } +} + +void ConcurrentG1Refine::init() { + if (G1ConcRSLogCacheSize > 0 || G1ConcRSCountTraversals) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + _n_card_counts = + (unsigned) (g1h->g1_reserved_obj_bytes() >> CardTableModRefBS::card_shift); + _card_counts = NEW_C_HEAP_ARRAY(unsigned char, _n_card_counts); + for (size_t i = 0; i < _n_card_counts; i++) _card_counts[i] = 0; + ModRefBarrierSet* bs = g1h->mr_bs(); + guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); + CardTableModRefBS* ctbs = (CardTableModRefBS*)bs; + _ct_bot = ctbs->byte_for_const(g1h->reserved_region().start()); + if (G1ConcRSCountTraversals) { + _cur_card_count_histo = NEW_C_HEAP_ARRAY(unsigned, 256); + _cum_card_count_histo = NEW_C_HEAP_ARRAY(unsigned, 256); + for (int i = 0; i < 256; i++) { + _cur_card_count_histo[i] = 0; + _cum_card_count_histo[i] = 0; + } + } + } + if (G1ConcRSLogCacheSize > 0) { + _def_use_cache = true; + _use_cache = true; + _hot_cache_size = (1 << G1ConcRSLogCacheSize); + _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size); + _n_hot = 0; + _hot_cache_idx = 0; + } +} + +ConcurrentG1Refine::~ConcurrentG1Refine() { + if (G1ConcRSLogCacheSize > 0 || G1ConcRSCountTraversals) { + assert(_card_counts != NULL, "Logic"); + FREE_C_HEAP_ARRAY(unsigned char, _card_counts); + assert(_cur_card_count_histo != NULL, "Logic"); + FREE_C_HEAP_ARRAY(unsigned, _cur_card_count_histo); + assert(_cum_card_count_histo != NULL, "Logic"); + FREE_C_HEAP_ARRAY(unsigned, _cum_card_count_histo); + } + if (G1ConcRSLogCacheSize > 0) { + assert(_hot_cache != NULL, "Logic"); + FREE_C_HEAP_ARRAY(jbyte*, _hot_cache); + } +} + +bool ConcurrentG1Refine::refine() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + unsigned cards_before = g1h->g1_rem_set()->conc_refine_cards(); + clear_hot_cache(); // Any previous values in this are now invalid. + g1h->g1_rem_set()->concurrentRefinementPass(this); + _traversals++; + unsigned cards_after = g1h->g1_rem_set()->conc_refine_cards(); + unsigned cards_during = cards_after-cards_before; + // If this is the first traversal in the current enabling + // and we did some cards, or if the number of cards found is decreasing + // sufficiently quickly, then keep going. Otherwise, sleep a while. + bool res = + (_first_traversal && cards_during > 0) + || + (!_first_traversal && cards_during * 3 < _last_cards_during * 2); + _last_cards_during = cards_during; + _first_traversal = false; + return res; +} + +void ConcurrentG1Refine::enable() { + MutexLocker x(G1ConcRefine_mon); + if (!_enabled) { + _enabled = true; + _first_traversal = true; _last_cards_during = 0; + G1ConcRefine_mon->notify_all(); + } +} + +unsigned ConcurrentG1Refine::disable() { + MutexLocker x(G1ConcRefine_mon); + if (_enabled) { + _enabled = false; + return _traversals; + } else { + return 0; + } +} + +void ConcurrentG1Refine::wait_for_ConcurrentG1Refine_enabled() { + G1ConcRefine_mon->lock(); + while (!_enabled) { + G1ConcRefine_mon->wait(Mutex::_no_safepoint_check_flag); + } + G1ConcRefine_mon->unlock(); + _traversals = 0; +}; + +void ConcurrentG1Refine::set_pya_restart() { + // If we're using the log-based RS barrier, the above will cause + // in-progress traversals of completed log buffers to quit early; we will + // also abandon all other buffers. + if (G1RSBarrierUseQueue) { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + dcqs.abandon_logs(); + // Reset the post-yield actions. + _pya = PYA_continue; + _last_pya = PYA_continue; + } else { + _pya = PYA_restart; + } +} + +void ConcurrentG1Refine::set_pya_cancel() { + _pya = PYA_cancel; +} + +PostYieldAction ConcurrentG1Refine::get_pya() { + if (_pya != PYA_continue) { + jint val = _pya; + while (true) { + jint val_read = Atomic::cmpxchg(PYA_continue, &_pya, val); + if (val_read == val) { + PostYieldAction res = (PostYieldAction)val; + assert(res != PYA_continue, "Only the refine thread should reset."); + _last_pya = res; + return res; + } else { + val = val_read; + } + } + } + // QQQ WELL WHAT DO WE RETURN HERE??? + // make up something! + return PYA_continue; +} + +PostYieldAction ConcurrentG1Refine::get_last_pya() { + PostYieldAction res = _last_pya; + _last_pya = PYA_continue; + return res; +} + +bool ConcurrentG1Refine::do_traversal() { + return _cg1rThread->do_traversal(); +} + +int ConcurrentG1Refine::add_card_count(jbyte* card_ptr) { + size_t card_num = (card_ptr - _ct_bot); + guarantee(0 <= card_num && card_num < _n_card_counts, "Bounds"); + unsigned char cnt = _card_counts[card_num]; + if (cnt < 255) _card_counts[card_num]++; + return cnt; + _total_travs++; +} + +jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr) { + int count = add_card_count(card_ptr); + // Count previously unvisited cards. + if (count == 0) _total_cards++; + // We'll assume a traversal unless we store it in the cache. + if (count < G1ConcRSHotCardLimit) { + _total_travs++; + return card_ptr; + } + // Otherwise, it's hot. + jbyte* res = NULL; + MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag); + if (_n_hot == _hot_cache_size) { + _total_travs++; + res = _hot_cache[_hot_cache_idx]; + _n_hot--; + } + // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx. + _hot_cache[_hot_cache_idx] = card_ptr; + _hot_cache_idx++; + if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0; + _n_hot++; + return res; +} + + +void ConcurrentG1Refine::clean_up_cache(int worker_i, G1RemSet* g1rs) { + assert(!use_cache(), "cache should be disabled"); + int start_ind = _hot_cache_idx-1; + for (int i = 0; i < _n_hot; i++) { + int ind = start_ind - i; + if (ind < 0) ind = ind + _hot_cache_size; + jbyte* entry = _hot_cache[ind]; + if (entry != NULL) { + g1rs->concurrentRefineOneCard(entry, worker_i); + } + } + _n_hot = 0; + _hot_cache_idx = 0; +} + +void ConcurrentG1Refine::clear_and_record_card_counts() { + if (G1ConcRSLogCacheSize == 0 && !G1ConcRSCountTraversals) return; + _n_periods++; + if (G1ConcRSCountTraversals) { + for (size_t i = 0; i < _n_card_counts; i++) { + unsigned char bucket = _card_counts[i]; + _cur_card_count_histo[bucket]++; + _card_counts[i] = 0; + } + gclog_or_tty->print_cr("Card counts:"); + for (int i = 0; i < 256; i++) { + if (_cur_card_count_histo[i] > 0) { + gclog_or_tty->print_cr(" %3d: %9d", i, _cur_card_count_histo[i]); + _cum_card_count_histo[i] += _cur_card_count_histo[i]; + _cur_card_count_histo[i] = 0; + } + } + } else { + assert(G1ConcRSLogCacheSize > 0, "Logic"); + Copy::fill_to_words((HeapWord*)(&_card_counts[0]), + _n_card_counts / HeapWordSize); + } +} + +void +ConcurrentG1Refine:: +print_card_count_histo_range(unsigned* histo, int from, int to, + float& cum_card_pct, + float& cum_travs_pct) { + unsigned cards = 0; + unsigned travs = 0; + guarantee(to <= 256, "Precondition"); + for (int i = from; i < to-1; i++) { + cards += histo[i]; + travs += histo[i] * i; + } + if (to == 256) { + unsigned histo_card_sum = 0; + unsigned histo_trav_sum = 0; + for (int i = 1; i < 255; i++) { + histo_trav_sum += histo[i] * i; + } + cards += histo[255]; + // correct traversals for the last one. + unsigned travs_255 = (unsigned) (_total_travs - histo_trav_sum); + travs += travs_255; + + } else { + cards += histo[to-1]; + travs += histo[to-1] * (to-1); + } + float fperiods = (float)_n_periods; + float f_tot_cards = (float)_total_cards/fperiods; + float f_tot_travs = (float)_total_travs/fperiods; + if (cards > 0) { + float fcards = (float)cards/fperiods; + float ftravs = (float)travs/fperiods; + if (to == 256) { + gclog_or_tty->print(" %4d- %10.2f%10.2f", from, fcards, ftravs); + } else { + gclog_or_tty->print(" %4d-%4d %10.2f%10.2f", from, to-1, fcards, ftravs); + } + float pct_cards = fcards*100.0/f_tot_cards; + cum_card_pct += pct_cards; + float pct_travs = ftravs*100.0/f_tot_travs; + cum_travs_pct += pct_travs; + gclog_or_tty->print_cr("%10.2f%10.2f%10.2f%10.2f", + pct_cards, cum_card_pct, + pct_travs, cum_travs_pct); + } +} + +void ConcurrentG1Refine::print_final_card_counts() { + if (!G1ConcRSCountTraversals) return; + + gclog_or_tty->print_cr("Did %d total traversals of %d distinct cards.", + _total_travs, _total_cards); + float fperiods = (float)_n_periods; + gclog_or_tty->print_cr(" This is an average of %8.2f traversals, %8.2f cards, " + "per collection.", (float)_total_travs/fperiods, + (float)_total_cards/fperiods); + gclog_or_tty->print_cr(" This is an average of %8.2f traversals/distinct " + "dirty card.\n", + _total_cards > 0 ? + (float)_total_travs/(float)_total_cards : 0.0); + + + gclog_or_tty->print_cr("Histogram:\n\n%10s %10s%10s%10s%10s%10s%10s", + "range", "# cards", "# travs", "% cards", "(cum)", + "% travs", "(cum)"); + gclog_or_tty->print_cr("------------------------------------------------------------" + "-------------"); + float cum_cards_pct = 0.0; + float cum_travs_pct = 0.0; + for (int i = 1; i < 10; i++) { + print_card_count_histo_range(_cum_card_count_histo, i, i+1, + cum_cards_pct, cum_travs_pct); + } + for (int i = 10; i < 100; i += 10) { + print_card_count_histo_range(_cum_card_count_histo, i, i+10, + cum_cards_pct, cum_travs_pct); + } + print_card_count_histo_range(_cum_card_count_histo, 100, 150, + cum_cards_pct, cum_travs_pct); + print_card_count_histo_range(_cum_card_count_histo, 150, 200, + cum_cards_pct, cum_travs_pct); + print_card_count_histo_range(_cum_card_count_histo, 150, 255, + cum_cards_pct, cum_travs_pct); + print_card_count_histo_range(_cum_card_count_histo, 255, 256, + cum_cards_pct, cum_travs_pct); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp 2009-08-01 04:20:56.800484767 +0100 @@ -0,0 +1,132 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Forward decl +class ConcurrentG1RefineThread; +class G1RemSet; + +// What to do after a yield: +enum PostYieldAction { + PYA_continue, // Continue the traversal + PYA_restart, // Restart + PYA_cancel // It's been completed by somebody else: cancel. +}; + +class ConcurrentG1Refine: public CHeapObj { + ConcurrentG1RefineThread* _cg1rThread; + + volatile jint _pya; + PostYieldAction _last_pya; + + static bool _enabled; // Protected by G1ConcRefine_mon. + unsigned _traversals; + + // Number of cards processed during last refinement traversal. + unsigned _first_traversal; + unsigned _last_cards_during; + + // The cache for card refinement. + bool _use_cache; + bool _def_use_cache; + size_t _n_periods; + size_t _total_cards; + size_t _total_travs; + + unsigned char* _card_counts; + unsigned _n_card_counts; + const jbyte* _ct_bot; + unsigned* _cur_card_count_histo; + unsigned* _cum_card_count_histo; + jbyte** _hot_cache; + int _hot_cache_size; + int _n_hot; + int _hot_cache_idx; + + // Returns the count of this card after incrementing it. + int add_card_count(jbyte* card_ptr); + + void print_card_count_histo_range(unsigned* histo, int from, int to, + float& cum_card_pct, + float& cum_travs_pct); + public: + ConcurrentG1Refine(); + ~ConcurrentG1Refine(); + + void init(); // Accomplish some initialization that has to wait. + + // Enabled Conc refinement, waking up thread if necessary. + void enable(); + + // Returns the number of traversals performed since this refiner was enabled. + unsigned disable(); + + // Requires G1ConcRefine_mon to be held. + bool enabled() { return _enabled; } + + // Returns only when G1 concurrent refinement has been enabled. + void wait_for_ConcurrentG1Refine_enabled(); + + // Do one concurrent refinement pass over the card table. Returns "true" + // if heuristics determine that another pass should be done immediately. + bool refine(); + + // Indicate that an in-progress refinement pass should start over. + void set_pya_restart(); + // Indicate that an in-progress refinement pass should quit. + void set_pya_cancel(); + + // Get the appropriate post-yield action. Also sets last_pya. + PostYieldAction get_pya(); + + // The last PYA read by "get_pya". + PostYieldAction get_last_pya(); + + bool do_traversal(); + + ConcurrentG1RefineThread* cg1rThread() { return _cg1rThread; } + + // If this is the first entry for the slot, writes into the cache and + // returns NULL. If it causes an eviction, returns the evicted pointer. + // Otherwise, its a cache hit, and returns NULL. + jbyte* cache_insert(jbyte* card_ptr); + + // Process the cached entries. + void clean_up_cache(int worker_i, G1RemSet* g1rs); + + // Discard entries in the hot cache. + void clear_hot_cache() { + _hot_cache_idx = 0; _n_hot = 0; + } + + bool hot_cache_is_empty() { return _n_hot == 0; } + + bool use_cache() { return _use_cache; } + void set_use_cache(bool b) { + if (b) _use_cache = _def_use_cache; + else _use_cache = false; + } + + void clear_and_record_card_counts(); + void print_final_card_counts(); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp 2009-08-01 04:20:57.234730393 +0100 @@ -0,0 +1,246 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_concurrentG1RefineThread.cpp.incl" + +// ======= Concurrent Mark Thread ======== + +// The CM thread is created when the G1 garbage collector is used + +ConcurrentG1RefineThread:: +ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r) : + ConcurrentGCThread(), + _cg1r(cg1r), + _started(false), + _in_progress(false), + _do_traversal(false), + _vtime_accum(0.0), + _co_tracker(G1CRGroup), + _interval_ms(5.0) +{ + create_and_start(); +} + +const long timeout = 200; // ms. + +void ConcurrentG1RefineThread::traversalBasedRefinement() { + _cg1r->wait_for_ConcurrentG1Refine_enabled(); + MutexLocker x(G1ConcRefine_mon); + while (_cg1r->enabled()) { + MutexUnlocker ux(G1ConcRefine_mon); + ResourceMark rm; + HandleMark hm; + + if (G1TraceConcurrentRefinement) { + gclog_or_tty->print_cr("G1-Refine starting pass"); + } + _sts.join(); + bool no_sleep = _cg1r->refine(); + _sts.leave(); + if (!no_sleep) { + MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); + // We do this only for the timeout; we don't expect this to be signalled. + CGC_lock->wait(Mutex::_no_safepoint_check_flag, timeout); + } + } +} + +void ConcurrentG1RefineThread::queueBasedRefinement() { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + // Wait for completed log buffers to exist. + { + MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag); + while (!_do_traversal && !dcqs.process_completed_buffers() && + !_should_terminate) { + DirtyCardQ_CBL_mon->wait(Mutex::_no_safepoint_check_flag); + } + } + + if (_should_terminate) { + return; + } + + // Now we take them off (this doesn't hold locks while it applies + // closures.) (If we did a full collection, then we'll do a full + // traversal. + _sts.join(); + if (_do_traversal) { + (void)_cg1r->refine(); + switch (_cg1r->get_last_pya()) { + case PYA_cancel: case PYA_continue: + // Continue was caught and handled inside "refine". If it's still + // "continue" when we get here, we're done. + _do_traversal = false; + break; + case PYA_restart: + assert(_do_traversal, "Because of Full GC."); + break; + } + } else { + int n_logs = 0; + int lower_limit = 0; + double start_vtime_sec; // only used when G1SmoothConcRefine is on + int prev_buffer_num; // only used when G1SmoothConcRefine is on + + if (G1SmoothConcRefine) { + lower_limit = 0; + start_vtime_sec = os::elapsedVTime(); + prev_buffer_num = (int) dcqs.completed_buffers_num(); + } else { + lower_limit = DCQBarrierProcessCompletedThreshold / 4; // For now. + } + while (dcqs.apply_closure_to_completed_buffer(0, lower_limit)) { + double end_vtime_sec; + double elapsed_vtime_sec; + int elapsed_vtime_ms; + int curr_buffer_num; + + if (G1SmoothConcRefine) { + end_vtime_sec = os::elapsedVTime(); + elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; + elapsed_vtime_ms = (int) (elapsed_vtime_sec * 1000.0); + curr_buffer_num = (int) dcqs.completed_buffers_num(); + + if (curr_buffer_num > prev_buffer_num || + curr_buffer_num > DCQBarrierProcessCompletedThreshold) { + decreaseInterval(elapsed_vtime_ms); + } else if (curr_buffer_num < prev_buffer_num) { + increaseInterval(elapsed_vtime_ms); + } + } + + sample_young_list_rs_lengths(); + _co_tracker.update(false); + + if (G1SmoothConcRefine) { + prev_buffer_num = curr_buffer_num; + _sts.leave(); + os::sleep(Thread::current(), (jlong) _interval_ms, false); + _sts.join(); + start_vtime_sec = os::elapsedVTime(); + } + n_logs++; + } + // Make sure we harvest the PYA, if any. + (void)_cg1r->get_pya(); + } + _sts.leave(); +} + +void ConcurrentG1RefineThread::sample_young_list_rs_lengths() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1CollectorPolicy* g1p = g1h->g1_policy(); + if (g1p->adaptive_young_list_length()) { + int regions_visited = 0; + + g1h->young_list_rs_length_sampling_init(); + while (g1h->young_list_rs_length_sampling_more()) { + g1h->young_list_rs_length_sampling_next(); + ++regions_visited; + + // we try to yield every time we visit 10 regions + if (regions_visited == 10) { + if (_sts.should_yield()) { + _sts.yield("G1 refine"); + // we just abandon the iteration + break; + } + regions_visited = 0; + } + } + + g1p->check_prediction_validity(); + } +} + +void ConcurrentG1RefineThread::run() { + initialize_in_thread(); + _vtime_start = os::elapsedVTime(); + wait_for_universe_init(); + + _co_tracker.enable(); + _co_tracker.start(); + + while (!_should_terminate) { + // wait until started is set. + if (G1RSBarrierUseQueue) { + queueBasedRefinement(); + } else { + traversalBasedRefinement(); + } + _sts.join(); + _co_tracker.update(); + _sts.leave(); + if (os::supports_vtime()) { + _vtime_accum = (os::elapsedVTime() - _vtime_start); + } else { + _vtime_accum = 0.0; + } + } + _sts.join(); + _co_tracker.update(true); + _sts.leave(); + assert(_should_terminate, "just checking"); + + terminate(); +} + + +void ConcurrentG1RefineThread::yield() { + if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield"); + _sts.yield("G1 refine"); + if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield-end"); +} + +void ConcurrentG1RefineThread::stop() { + // it is ok to take late safepoints here, if needed + { + MutexLockerEx mu(Terminator_lock); + _should_terminate = true; + } + + { + MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag); + DirtyCardQ_CBL_mon->notify_all(); + } + + { + MutexLockerEx mu(Terminator_lock); + while (!_has_terminated) { + Terminator_lock->wait(); + } + } + if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-stop"); +} + +void ConcurrentG1RefineThread::print() { + gclog_or_tty->print("\"Concurrent G1 Refinement Thread\" "); + Thread::print(); + gclog_or_tty->cr(); +} + +void ConcurrentG1RefineThread::set_do_traversal(bool b) { + _do_traversal = b; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp 2009-08-01 04:20:57.651677428 +0100 @@ -0,0 +1,104 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Forward Decl. +class ConcurrentG1Refine; + +// The G1 Concurrent Refinement Thread (could be several in the future). + +class ConcurrentG1RefineThread: public ConcurrentGCThread { + friend class VMStructs; + friend class G1CollectedHeap; + + double _vtime_start; // Initial virtual time. + double _vtime_accum; // Initial virtual time. + + public: + virtual void run(); + + private: + ConcurrentG1Refine* _cg1r; + bool _started; + bool _in_progress; + volatile bool _restart; + + COTracker _co_tracker; + double _interval_ms; + + bool _do_traversal; + + void decreaseInterval(int processing_time_ms) { + double min_interval_ms = (double) processing_time_ms; + _interval_ms = 0.8 * _interval_ms; + if (_interval_ms < min_interval_ms) + _interval_ms = min_interval_ms; + } + void increaseInterval(int processing_time_ms) { + double max_interval_ms = 9.0 * (double) processing_time_ms; + _interval_ms = 1.1 * _interval_ms; + if (max_interval_ms > 0 && _interval_ms > max_interval_ms) + _interval_ms = max_interval_ms; + } + + void sleepBeforeNextCycle(); + + void traversalBasedRefinement(); + + void queueBasedRefinement(); + + // For use by G1CollectedHeap, which is a friend. + static SuspendibleThreadSet* sts() { return &_sts; } + + public: + // Constructor + ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r); + + // Printing + void print(); + + // Total virtual time so far. + double vtime_accum() { return _vtime_accum; } + + ConcurrentG1Refine* cg1r() { return _cg1r; } + + + void set_started() { _started = true; } + void clear_started() { _started = false; } + bool started() { return _started; } + + void set_in_progress() { _in_progress = true; } + void clear_in_progress() { _in_progress = false; } + bool in_progress() { return _in_progress; } + + void set_do_traversal(bool b); + bool do_traversal() { return _do_traversal; } + + void sample_young_list_rs_lengths(); + + // Yield for GC + void yield(); + + // shutdown + static void stop(); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2009-08-01 04:20:58.084329797 +0100 @@ -0,0 +1,3983 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_concurrentMark.cpp.incl" + +// +// CMS Bit Map Wrapper + +CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter): + _bm((uintptr_t*)NULL,0), + _shifter(shifter) { + _bmStartWord = (HeapWord*)(rs.base()); + _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes + ReservedSpace brs(ReservedSpace::allocation_align_size_up( + (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); + + guarantee(brs.is_reserved(), "couldn't allocate CMS bit map"); + // For now we'll just commit all of the bit map up fromt. + // Later on we'll try to be more parsimonious with swap. + guarantee(_virtual_space.initialize(brs, brs.size()), + "couldn't reseve backing store for CMS bit map"); + assert(_virtual_space.committed_size() == brs.size(), + "didn't reserve backing store for all of CMS bit map?"); + _bm.set_map((uintptr_t*)_virtual_space.low()); + assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= + _bmWordSize, "inconsistency in bit map sizing"); + _bm.set_size(_bmWordSize >> _shifter); +} + +HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, + HeapWord* limit) const { + // First we must round addr *up* to a possible object boundary. + addr = (HeapWord*)align_size_up((intptr_t)addr, + HeapWordSize << _shifter); + size_t addrOffset = heapWordToOffset(addr); + if (limit == NULL) limit = _bmStartWord + _bmWordSize; + size_t limitOffset = heapWordToOffset(limit); + size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); + HeapWord* nextAddr = offsetToHeapWord(nextOffset); + assert(nextAddr >= addr, "get_next_one postcondition"); + assert(nextAddr == limit || isMarked(nextAddr), + "get_next_one postcondition"); + return nextAddr; +} + +HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, + HeapWord* limit) const { + size_t addrOffset = heapWordToOffset(addr); + if (limit == NULL) limit = _bmStartWord + _bmWordSize; + size_t limitOffset = heapWordToOffset(limit); + size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); + HeapWord* nextAddr = offsetToHeapWord(nextOffset); + assert(nextAddr >= addr, "get_next_one postcondition"); + assert(nextAddr == limit || !isMarked(nextAddr), + "get_next_one postcondition"); + return nextAddr; +} + +int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { + assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); + return (int) (diff >> _shifter); +} + +bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { + HeapWord* left = MAX2(_bmStartWord, mr.start()); + HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end()); + if (right > left) { + // Right-open interval [leftOffset, rightOffset). + return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right)); + } else { + return true; + } +} + +void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap, + size_t from_start_index, + HeapWord* to_start_word, + size_t word_num) { + _bm.mostly_disjoint_range_union(from_bitmap, + from_start_index, + heapWordToOffset(to_start_word), + word_num); +} + +#ifndef PRODUCT +bool CMBitMapRO::covers(ReservedSpace rs) const { + // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); + assert(((size_t)_bm.size() * (1 << _shifter)) == _bmWordSize, + "size inconsistency"); + return _bmStartWord == (HeapWord*)(rs.base()) && + _bmWordSize == rs.size()>>LogHeapWordSize; +} +#endif + +void CMBitMap::clearAll() { + _bm.clear(); + return; +} + +void CMBitMap::markRange(MemRegion mr) { + mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); + assert(!mr.is_empty(), "unexpected empty region"); + assert((offsetToHeapWord(heapWordToOffset(mr.end())) == + ((HeapWord *) mr.end())), + "markRange memory region end is not card aligned"); + // convert address range into offset range + _bm.at_put_range(heapWordToOffset(mr.start()), + heapWordToOffset(mr.end()), true); +} + +void CMBitMap::clearRange(MemRegion mr) { + mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); + assert(!mr.is_empty(), "unexpected empty region"); + // convert address range into offset range + _bm.at_put_range(heapWordToOffset(mr.start()), + heapWordToOffset(mr.end()), false); +} + +MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, + HeapWord* end_addr) { + HeapWord* start = getNextMarkedWordAddress(addr); + start = MIN2(start, end_addr); + HeapWord* end = getNextUnmarkedWordAddress(start); + end = MIN2(end, end_addr); + assert(start <= end, "Consistency check"); + MemRegion mr(start, end); + if (!mr.is_empty()) { + clearRange(mr); + } + return mr; +} + +CMMarkStack::CMMarkStack(ConcurrentMark* cm) : + _base(NULL), _cm(cm) +#ifdef ASSERT + , _drain_in_progress(false) + , _drain_in_progress_yields(false) +#endif +{} + +void CMMarkStack::allocate(size_t size) { + _base = NEW_C_HEAP_ARRAY(oop, size); + if (_base == NULL) + vm_exit_during_initialization("Failed to allocate " + "CM region mark stack"); + _index = 0; + // QQQQ cast ... + _capacity = (jint) size; + _oops_do_bound = -1; + NOT_PRODUCT(_max_depth = 0); +} + +CMMarkStack::~CMMarkStack() { + if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); +} + +void CMMarkStack::par_push(oop ptr) { + while (true) { + if (isFull()) { + _overflow = true; + return; + } + // Otherwise... + jint index = _index; + jint next_index = index+1; + jint res = Atomic::cmpxchg(next_index, &_index, index); + if (res == index) { + _base[index] = ptr; + // Note that we don't maintain this atomically. We could, but it + // doesn't seem necessary. + NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); + return; + } + // Otherwise, we need to try again. + } +} + +void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { + while (true) { + if (isFull()) { + _overflow = true; + return; + } + // Otherwise... + jint index = _index; + jint next_index = index + n; + if (next_index > _capacity) { + _overflow = true; + return; + } + jint res = Atomic::cmpxchg(next_index, &_index, index); + if (res == index) { + for (int i = 0; i < n; i++) { + int ind = index + i; + assert(ind < _capacity, "By overflow test above."); + _base[ind] = ptr_arr[i]; + } + NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); + return; + } + // Otherwise, we need to try again. + } +} + + +void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { + MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); + jint start = _index; + jint next_index = start + n; + if (next_index > _capacity) { + _overflow = true; + return; + } + // Otherwise. + _index = next_index; + for (int i = 0; i < n; i++) { + int ind = start + i; + guarantee(ind < _capacity, "By overflow test above."); + _base[ind] = ptr_arr[i]; + } +} + + +bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { + MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); + jint index = _index; + if (index == 0) { + *n = 0; + return false; + } else { + int k = MIN2(max, index); + jint new_ind = index - k; + for (int j = 0; j < k; j++) { + ptr_arr[j] = _base[new_ind + j]; + } + _index = new_ind; + *n = k; + return true; + } +} + + +CMRegionStack::CMRegionStack() : _base(NULL) {} + +void CMRegionStack::allocate(size_t size) { + _base = NEW_C_HEAP_ARRAY(MemRegion, size); + if (_base == NULL) + vm_exit_during_initialization("Failed to allocate " + "CM region mark stack"); + _index = 0; + // QQQQ cast ... + _capacity = (jint) size; +} + +CMRegionStack::~CMRegionStack() { + if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); +} + +void CMRegionStack::push(MemRegion mr) { + assert(mr.word_size() > 0, "Precondition"); + while (true) { + if (isFull()) { + _overflow = true; + return; + } + // Otherwise... + jint index = _index; + jint next_index = index+1; + jint res = Atomic::cmpxchg(next_index, &_index, index); + if (res == index) { + _base[index] = mr; + return; + } + // Otherwise, we need to try again. + } +} + +MemRegion CMRegionStack::pop() { + while (true) { + // Otherwise... + jint index = _index; + + if (index == 0) { + return MemRegion(); + } + jint next_index = index-1; + jint res = Atomic::cmpxchg(next_index, &_index, index); + if (res == index) { + MemRegion mr = _base[next_index]; + if (mr.start() != NULL) { + tmp_guarantee_CM( mr.end() != NULL, "invariant" ); + tmp_guarantee_CM( mr.word_size() > 0, "invariant" ); + return mr; + } else { + // that entry was invalidated... let's skip it + tmp_guarantee_CM( mr.end() == NULL, "invariant" ); + } + } + // Otherwise, we need to try again. + } +} + +bool CMRegionStack::invalidate_entries_into_cset() { + bool result = false; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + for (int i = 0; i < _oops_do_bound; ++i) { + MemRegion mr = _base[i]; + if (mr.start() != NULL) { + tmp_guarantee_CM( mr.end() != NULL, "invariant"); + tmp_guarantee_CM( mr.word_size() > 0, "invariant" ); + HeapRegion* hr = g1h->heap_region_containing(mr.start()); + tmp_guarantee_CM( hr != NULL, "invariant" ); + if (hr->in_collection_set()) { + // The region points into the collection set + _base[i] = MemRegion(); + result = true; + } + } else { + // that entry was invalidated... let's skip it + tmp_guarantee_CM( mr.end() == NULL, "invariant" ); + } + } + return result; +} + +template +bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { + assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after + || SafepointSynchronize::is_at_safepoint(), + "Drain recursion must be yield-safe."); + bool res = true; + debug_only(_drain_in_progress = true); + debug_only(_drain_in_progress_yields = yield_after); + while (!isEmpty()) { + oop newOop = pop(); + assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); + assert(newOop->is_oop(), "Expected an oop"); + assert(bm == NULL || bm->isMarked((HeapWord*)newOop), + "only grey objects on this stack"); + // iterate over the oops in this oop, marking and pushing + // the ones in CMS generation. + newOop->oop_iterate(cl); + if (yield_after && _cm->do_yield_check()) { + res = false; break; + } + } + debug_only(_drain_in_progress = false); + return res; +} + +void CMMarkStack::oops_do(OopClosure* f) { + if (_index == 0) return; + assert(_oops_do_bound != -1 && _oops_do_bound <= _index, + "Bound must be set."); + for (int i = 0; i < _oops_do_bound; i++) { + f->do_oop(&_base[i]); + } + _oops_do_bound = -1; +} + +bool ConcurrentMark::not_yet_marked(oop obj) const { + return (_g1h->is_obj_ill(obj) + || (_g1h->is_in_permanent(obj) + && !nextMarkBitMap()->isMarked((HeapWord*)obj))); +} + +#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +#endif // _MSC_VER + +ConcurrentMark::ConcurrentMark(ReservedSpace rs, + int max_regions) : + _markBitMap1(rs, MinObjAlignment - 1), + _markBitMap2(rs, MinObjAlignment - 1), + + _parallel_marking_threads(0), + _sleep_factor(0.0), + _marking_task_overhead(1.0), + _cleanup_sleep_factor(0.0), + _cleanup_task_overhead(1.0), + _region_bm(max_regions, false /* in_resource_area*/), + _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> + CardTableModRefBS::card_shift, + false /* in_resource_area*/), + _prevMarkBitMap(&_markBitMap1), + _nextMarkBitMap(&_markBitMap2), + _at_least_one_mark_complete(false), + + _markStack(this), + _regionStack(), + // _finger set in set_non_marking_state + + _max_task_num(MAX2(ParallelGCThreads, (size_t)1)), + // _active_tasks set in set_non_marking_state + // _tasks set inside the constructor + _task_queues(new CMTaskQueueSet((int) _max_task_num)), + _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)), + + _has_overflown(false), + _concurrent(false), + _has_aborted(false), + _restart_for_overflow(false), + _concurrent_marking_in_progress(false), + _should_gray_objects(false), + + // _verbose_level set below + + _init_times(), + _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), + _cleanup_times(), + _total_counting_time(0.0), + _total_rs_scrub_time(0.0), + + _parallel_workers(NULL), + _cleanup_co_tracker(G1CLGroup) +{ + CMVerboseLevel verbose_level = + (CMVerboseLevel) G1MarkingVerboseLevel; + if (verbose_level < no_verbose) + verbose_level = no_verbose; + if (verbose_level > high_verbose) + verbose_level = high_verbose; + _verbose_level = verbose_level; + + if (verbose_low()) + gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " + "heap end = "PTR_FORMAT, _heap_start, _heap_end); + + _markStack.allocate(G1MarkStackSize); + _regionStack.allocate(G1MarkRegionStackSize); + + // Create & start a ConcurrentMark thread. + if (G1ConcMark) { + _cmThread = new ConcurrentMarkThread(this); + assert(cmThread() != NULL, "CM Thread should have been created"); + assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); + } else { + _cmThread = NULL; + } + _g1h = G1CollectedHeap::heap(); + assert(CGC_lock != NULL, "Where's the CGC_lock?"); + assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); + assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); + + SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); + satb_qs.set_buffer_size(G1SATBLogBufferSize); + + int size = (int) MAX2(ParallelGCThreads, (size_t)1); + _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size); + for (int i = 0 ; i < size; i++) { + _par_cleanup_thread_state[i] = new ParCleanupThreadState; + } + + _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); + _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); + + // so that the assertion in MarkingTaskQueue::task_queue doesn't fail + _active_tasks = _max_task_num; + for (int i = 0; i < (int) _max_task_num; ++i) { + CMTaskQueue* task_queue = new CMTaskQueue(); + task_queue->initialize(); + _task_queues->register_queue(i, task_queue); + + _tasks[i] = new CMTask(i, this, task_queue, _task_queues); + _accum_task_vtime[i] = 0.0; + } + + if (ParallelMarkingThreads > ParallelGCThreads) { + vm_exit_during_initialization("Can't have more ParallelMarkingThreads " + "than ParallelGCThreads."); + } + if (ParallelGCThreads == 0) { + // if we are not running with any parallel GC threads we will not + // spawn any marking threads either + _parallel_marking_threads = 0; + _sleep_factor = 0.0; + _marking_task_overhead = 1.0; + } else { + if (ParallelMarkingThreads > 0) { + // notice that ParallelMarkingThreads overwrites G1MarkingOverheadPercent + // if both are set + + _parallel_marking_threads = ParallelMarkingThreads; + _sleep_factor = 0.0; + _marking_task_overhead = 1.0; + } else if (G1MarkingOverheadPercent > 0) { + // we will calculate the number of parallel marking threads + // based on a target overhead with respect to the soft real-time + // goal + + double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; + double overall_cm_overhead = + (double) MaxGCPauseMillis * marking_overhead / + (double) GCPauseIntervalMillis; + double cpu_ratio = 1.0 / (double) os::processor_count(); + double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); + double marking_task_overhead = + overall_cm_overhead / marking_thread_num * + (double) os::processor_count(); + double sleep_factor = + (1.0 - marking_task_overhead) / marking_task_overhead; + + _parallel_marking_threads = (size_t) marking_thread_num; + _sleep_factor = sleep_factor; + _marking_task_overhead = marking_task_overhead; + } else { + _parallel_marking_threads = MAX2((ParallelGCThreads + 2) / 4, (size_t)1); + _sleep_factor = 0.0; + _marking_task_overhead = 1.0; + } + + if (parallel_marking_threads() > 1) + _cleanup_task_overhead = 1.0; + else + _cleanup_task_overhead = marking_task_overhead(); + _cleanup_sleep_factor = + (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); + +#if 0 + gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); + gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); + gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); + gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); + gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); +#endif + + guarantee( parallel_marking_threads() > 0, "peace of mind" ); + _parallel_workers = new WorkGang("Parallel Marking Threads", + (int) parallel_marking_threads(), false, true); + if (_parallel_workers == NULL) + vm_exit_during_initialization("Failed necessary allocation."); + } + + // so that the call below can read a sensible value + _heap_start = (HeapWord*) rs.base(); + set_non_marking_state(); +} + +void ConcurrentMark::update_g1_committed(bool force) { + // If concurrent marking is not in progress, then we do not need to + // update _heap_end. This has a subtle and important + // side-effect. Imagine that two evacuation pauses happen between + // marking completion and remark. The first one can grow the + // heap (hence now the finger is below the heap end). Then, the + // second one could unnecessarily push regions on the region + // stack. This causes the invariant that the region stack is empty + // at the beginning of remark to be false. By ensuring that we do + // not observe heap expansions after marking is complete, then we do + // not have this problem. + if (!concurrent_marking_in_progress() && !force) + return; + + MemRegion committed = _g1h->g1_committed(); + tmp_guarantee_CM( committed.start() == _heap_start, + "start shouldn't change" ); + HeapWord* new_end = committed.end(); + if (new_end > _heap_end) { + // The heap has been expanded. + + _heap_end = new_end; + } + // Notice that the heap can also shrink. However, this only happens + // during a Full GC (at least currently) and the entire marking + // phase will bail out and the task will not be restarted. So, let's + // do nothing. +} + +void ConcurrentMark::reset() { + // Starting values for these two. This should be called in a STW + // phase. CM will be notified of any future g1_committed expansions + // will be at the end of evacuation pauses, when tasks are + // inactive. + MemRegion committed = _g1h->g1_committed(); + _heap_start = committed.start(); + _heap_end = committed.end(); + + guarantee( _heap_start != NULL && + _heap_end != NULL && + _heap_start < _heap_end, "heap bounds should look ok" ); + + // reset all the marking data structures and any necessary flags + clear_marking_state(); + + if (verbose_low()) + gclog_or_tty->print_cr("[global] resetting"); + + // We do reset all of them, since different phases will use + // different number of active threads. So, it's easiest to have all + // of them ready. + for (int i = 0; i < (int) _max_task_num; ++i) + _tasks[i]->reset(_nextMarkBitMap); + + // we need this to make sure that the flag is on during the evac + // pause with initial mark piggy-backed + set_concurrent_marking_in_progress(); +} + +void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { + guarantee( active_tasks <= _max_task_num, "we should not have more" ); + + _active_tasks = active_tasks; + // Need to update the three data structures below according to the + // number of active threads for this phase. + _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); + _first_overflow_barrier_sync.set_n_workers((int) active_tasks); + _second_overflow_barrier_sync.set_n_workers((int) active_tasks); + + _concurrent = concurrent; + // We propagate this to all tasks, not just the active ones. + for (int i = 0; i < (int) _max_task_num; ++i) + _tasks[i]->set_concurrent(concurrent); + + if (concurrent) { + set_concurrent_marking_in_progress(); + } else { + // We currently assume that the concurrent flag has been set to + // false before we start remark. At this point we should also be + // in a STW phase. + guarantee( !concurrent_marking_in_progress(), "invariant" ); + guarantee( _finger == _heap_end, "only way to get here" ); + update_g1_committed(true); + } +} + +void ConcurrentMark::set_non_marking_state() { + // We set the global marking state to some default values when we're + // not doing marking. + clear_marking_state(); + _active_tasks = 0; + clear_concurrent_marking_in_progress(); +} + +ConcurrentMark::~ConcurrentMark() { + int size = (int) MAX2(ParallelGCThreads, (size_t)1); + for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i]; + FREE_C_HEAP_ARRAY(ParCleanupThreadState*, + _par_cleanup_thread_state); + + for (int i = 0; i < (int) _max_task_num; ++i) { + delete _task_queues->queue(i); + delete _tasks[i]; + } + delete _task_queues; + FREE_C_HEAP_ARRAY(CMTask*, _max_task_num); +} + +// This closure is used to mark refs into the g1 generation +// from external roots in the CMS bit map. +// Called at the first checkpoint. +// + +#define PRINT_REACHABLE_AT_INITIAL_MARK 0 +#if PRINT_REACHABLE_AT_INITIAL_MARK +static FILE* reachable_file = NULL; + +class PrintReachableClosure: public OopsInGenClosure { + CMBitMap* _bm; + int _level; +public: + PrintReachableClosure(CMBitMap* bm) : + _bm(bm), _level(0) { + guarantee(reachable_file != NULL, "pre-condition"); + } + void do_oop(oop* p) { + oop obj = *p; + HeapWord* obj_addr = (HeapWord*)obj; + if (obj == NULL) return; + fprintf(reachable_file, "%d: "PTR_FORMAT" -> "PTR_FORMAT" (%d)\n", + _level, p, (void*) obj, _bm->isMarked(obj_addr)); + if (!_bm->isMarked(obj_addr)) { + _bm->mark(obj_addr); + _level++; + obj->oop_iterate(this); + _level--; + } + } +}; +#endif // PRINT_REACHABLE_AT_INITIAL_MARK + +#define SEND_HEAP_DUMP_TO_FILE 0 +#if SEND_HEAP_DUMP_TO_FILE +static FILE* heap_dump_file = NULL; +#endif // SEND_HEAP_DUMP_TO_FILE + +void ConcurrentMark::clearNextBitmap() { + guarantee(!G1CollectedHeap::heap()->mark_in_progress(), "Precondition."); + + // clear the mark bitmap (no grey objects to start with). + // We need to do this in chunks and offer to yield in between + // each chunk. + HeapWord* start = _nextMarkBitMap->startWord(); + HeapWord* end = _nextMarkBitMap->endWord(); + HeapWord* cur = start; + size_t chunkSize = M; + while (cur < end) { + HeapWord* next = cur + chunkSize; + if (next > end) + next = end; + MemRegion mr(cur,next); + _nextMarkBitMap->clearRange(mr); + cur = next; + do_yield_check(); + } +} + +class NoteStartOfMarkHRClosure: public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion* r) { + if (!r->continuesHumongous()) { + r->note_start_of_marking(true); + } + return false; + } +}; + +void ConcurrentMark::checkpointRootsInitialPre() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1CollectorPolicy* g1p = g1h->g1_policy(); + + _has_aborted = false; + + // Find all the reachable objects... +#if PRINT_REACHABLE_AT_INITIAL_MARK + guarantee(reachable_file == NULL, "Protocol"); + char fn_buf[100]; + sprintf(fn_buf, "/tmp/reachable.txt.%d", os::current_process_id()); + reachable_file = fopen(fn_buf, "w"); + // clear the mark bitmap (no grey objects to start with) + _nextMarkBitMap->clearAll(); + PrintReachableClosure prcl(_nextMarkBitMap); + g1h->process_strong_roots( + false, // fake perm gen collection + SharedHeap::SO_AllClasses, + &prcl, // Regular roots + &prcl // Perm Gen Roots + ); + // The root iteration above "consumed" dirty cards in the perm gen. + // Therefore, as a shortcut, we dirty all such cards. + g1h->rem_set()->invalidate(g1h->perm_gen()->used_region(), false); + fclose(reachable_file); + reachable_file = NULL; + // clear the mark bitmap again. + _nextMarkBitMap->clearAll(); + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + COMPILER2_PRESENT(DerivedPointerTable::clear()); +#endif // PRINT_REACHABLE_AT_INITIAL_MARK + + // Initialise marking structures. This has to be done in a STW phase. + reset(); +} + +class CMMarkRootsClosure: public OopsInGenClosure { +private: + ConcurrentMark* _cm; + G1CollectedHeap* _g1h; + bool _do_barrier; + +public: + CMMarkRootsClosure(ConcurrentMark* cm, + G1CollectedHeap* g1h, + bool do_barrier) : _cm(cm), _g1h(g1h), + _do_barrier(do_barrier) { } + + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + virtual void do_oop(oop* p) { + oop thisOop = *p; + if (thisOop != NULL) { + assert(thisOop->is_oop() || thisOop->mark() == NULL, + "expected an oop, possibly with mark word displaced"); + HeapWord* addr = (HeapWord*)thisOop; + if (_g1h->is_in_g1_reserved(addr)) { + _cm->grayRoot(thisOop); + } + } + if (_do_barrier) { + assert(!_g1h->is_in_g1_reserved(p), + "Should be called on external roots"); + do_barrier(p); + } + } +}; + +void ConcurrentMark::checkpointRootsInitialPost() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + // For each region note start of marking. + NoteStartOfMarkHRClosure startcl; + g1h->heap_region_iterate(&startcl); + + // Start weak-reference discovery. + ReferenceProcessor* rp = g1h->ref_processor(); + rp->verify_no_references_recorded(); + rp->enable_discovery(); // enable ("weak") refs discovery + rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle + + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold); + satb_mq_set.set_active_all_threads(true); + + // update_g1_committed() will be called at the end of an evac pause + // when marking is on. So, it's also called at the end of the + // initial-mark pause to update the heap end, if the heap expands + // during it. No need to call it here. + + guarantee( !_cleanup_co_tracker.enabled(), "invariant" ); + + size_t max_marking_threads = + MAX2((size_t) 1, parallel_marking_threads()); + for (int i = 0; i < (int)_max_task_num; ++i) { + _tasks[i]->enable_co_tracker(); + if (i < (int) max_marking_threads) + _tasks[i]->reset_co_tracker(marking_task_overhead()); + else + _tasks[i]->reset_co_tracker(0.0); + } +} + +// Checkpoint the roots into this generation from outside +// this generation. [Note this initial checkpoint need only +// be approximate -- we'll do a catch up phase subsequently.] +void ConcurrentMark::checkpointRootsInitial() { + assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + double start = os::elapsedTime(); + GCOverheadReporter::recordSTWStart(start); + + // If there has not been a GC[n-1] since last GC[n] cycle completed, + // precede our marking with a collection of all + // younger generations to keep floating garbage to a minimum. + // YSR: we won't do this for now -- it's an optimization to be + // done post-beta. + + // YSR: ignoring weak refs for now; will do at bug fixing stage + // EVM: assert(discoveredRefsAreClear()); + + + G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); + g1p->record_concurrent_mark_init_start(); + checkpointRootsInitialPre(); + + // YSR: when concurrent precleaning is in place, we'll + // need to clear the cached card table here + + ResourceMark rm; + HandleMark hm; + + g1h->ensure_parsability(false); + g1h->perm_gen()->save_marks(); + + CMMarkRootsClosure notOlder(this, g1h, false); + CMMarkRootsClosure older(this, g1h, true); + + g1h->set_marking_started(); + g1h->rem_set()->prepare_for_younger_refs_iterate(false); + + g1h->process_strong_roots(false, // fake perm gen collection + SharedHeap::SO_AllClasses, + ¬Older, // Regular roots + &older // Perm Gen Roots + ); + checkpointRootsInitialPost(); + + // Statistics. + double end = os::elapsedTime(); + _init_times.add((end - start) * 1000.0); + GCOverheadReporter::recordSTWEnd(end); + + g1p->record_concurrent_mark_init_end(); +} + +/* + Notice that in the next two methods, we actually leave the STS + during the barrier sync and join it immediately afterwards. If we + do not do this, this then the following deadlock can occur: one + thread could be in the barrier sync code, waiting for the other + thread to also sync up, whereas another one could be trying to + yield, while also waiting for the other threads to sync up too. + + Because the thread that does the sync barrier has left the STS, it + is possible to be suspended for a Full GC or an evacuation pause + could occur. This is actually safe, since the entering the sync + barrier is one of the last things do_marking_step() does, and it + doesn't manipulate any data structures afterwards. +*/ + +void ConcurrentMark::enter_first_sync_barrier(int task_num) { + if (verbose_low()) + gclog_or_tty->print_cr("[%d] entering first barrier", task_num); + + ConcurrentGCThread::stsLeave(); + _first_overflow_barrier_sync.enter(); + ConcurrentGCThread::stsJoin(); + // at this point everyone should have synced up and not be doing any + // more work + + if (verbose_low()) + gclog_or_tty->print_cr("[%d] leaving first barrier", task_num); + + // let task 0 do this + if (task_num == 0) { + // task 0 is responsible for clearing the global data structures + clear_marking_state(); + + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); + } + } + + // after this, each task should reset its own data structures then + // then go into the second barrier +} + +void ConcurrentMark::enter_second_sync_barrier(int task_num) { + if (verbose_low()) + gclog_or_tty->print_cr("[%d] entering second barrier", task_num); + + ConcurrentGCThread::stsLeave(); + _second_overflow_barrier_sync.enter(); + ConcurrentGCThread::stsJoin(); + // at this point everything should be re-initialised and ready to go + + if (verbose_low()) + gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); +} + +void ConcurrentMark::grayRoot(oop p) { + HeapWord* addr = (HeapWord*) p; + // We can't really check against _heap_start and _heap_end, since it + // is possible during an evacuation pause with piggy-backed + // initial-mark that the committed space is expanded during the + // pause without CM observing this change. So the assertions below + // is a bit conservative; but better than nothing. + tmp_guarantee_CM( _g1h->g1_committed().contains(addr), + "address should be within the heap bounds" ); + + if (!_nextMarkBitMap->isMarked(addr)) + _nextMarkBitMap->parMark(addr); +} + +void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { + // The objects on the region have already been marked "in bulk" by + // the caller. We only need to decide whether to push the region on + // the region stack or not. + + if (!concurrent_marking_in_progress() || !_should_gray_objects) + // We're done with marking and waiting for remark. We do not need to + // push anything else on the region stack. + return; + + HeapWord* finger = _finger; + + if (verbose_low()) + gclog_or_tty->print_cr("[global] attempting to push " + "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at " + PTR_FORMAT, mr.start(), mr.end(), finger); + + if (mr.start() < finger) { + // The finger is always heap region aligned and it is not possible + // for mr to span heap regions. + tmp_guarantee_CM( mr.end() <= finger, "invariant" ); + + tmp_guarantee_CM( mr.start() <= mr.end() && + _heap_start <= mr.start() && + mr.end() <= _heap_end, + "region boundaries should fall within the committed space" ); + if (verbose_low()) + gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " + "below the finger, pushing it", + mr.start(), mr.end()); + + if (!region_stack_push(mr)) { + if (verbose_low()) + gclog_or_tty->print_cr("[global] region stack has overflown."); + } + } +} + +void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) { + // The object is not marked by the caller. We need to at least mark + // it and maybe push in on the stack. + + HeapWord* addr = (HeapWord*)p; + if (!_nextMarkBitMap->isMarked(addr)) { + // We definitely need to mark it, irrespective whether we bail out + // because we're done with marking. + if (_nextMarkBitMap->parMark(addr)) { + if (!concurrent_marking_in_progress() || !_should_gray_objects) + // If we're done with concurrent marking and we're waiting for + // remark, then we're not pushing anything on the stack. + return; + + // No OrderAccess:store_load() is needed. It is implicit in the + // CAS done in parMark(addr) above + HeapWord* finger = _finger; + + if (addr < finger) { + if (!mark_stack_push(oop(addr))) { + if (verbose_low()) + gclog_or_tty->print_cr("[global] global stack overflow " + "during parMark"); + } + } + } + } +} + +class CMConcurrentMarkingTask: public AbstractGangTask { +private: + ConcurrentMark* _cm; + ConcurrentMarkThread* _cmt; + +public: + void work(int worker_i) { + guarantee( Thread::current()->is_ConcurrentGC_thread(), + "this should only be done by a conc GC thread" ); + + double start_vtime = os::elapsedVTime(); + + ConcurrentGCThread::stsJoin(); + + guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" ); + CMTask* the_task = _cm->task(worker_i); + the_task->start_co_tracker(); + the_task->record_start_time(); + if (!_cm->has_aborted()) { + do { + double start_vtime_sec = os::elapsedVTime(); + double start_time_sec = os::elapsedTime(); + the_task->do_marking_step(10.0); + double end_time_sec = os::elapsedTime(); + double end_vtime_sec = os::elapsedVTime(); + double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; + double elapsed_time_sec = end_time_sec - start_time_sec; + _cm->clear_has_overflown(); + + bool ret = _cm->do_yield_check(worker_i); + + jlong sleep_time_ms; + if (!_cm->has_aborted() && the_task->has_aborted()) { + sleep_time_ms = + (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); + ConcurrentGCThread::stsLeave(); + os::sleep(Thread::current(), sleep_time_ms, false); + ConcurrentGCThread::stsJoin(); + } + double end_time2_sec = os::elapsedTime(); + double elapsed_time2_sec = end_time2_sec - start_time_sec; + + the_task->update_co_tracker(); + +#if 0 + gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " + "overhead %1.4lf", + elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, + the_task->conc_overhead(os::elapsedTime()) * 8.0); + gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", + elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); +#endif + } while (!_cm->has_aborted() && the_task->has_aborted()); + } + the_task->record_end_time(); + guarantee( !the_task->has_aborted() || _cm->has_aborted(), "invariant" ); + + ConcurrentGCThread::stsLeave(); + + double end_vtime = os::elapsedVTime(); + the_task->update_co_tracker(true); + _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); + } + + CMConcurrentMarkingTask(ConcurrentMark* cm, + ConcurrentMarkThread* cmt) : + AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } + + ~CMConcurrentMarkingTask() { } +}; + +void ConcurrentMark::markFromRoots() { + // we might be tempted to assert that: + // assert(asynch == !SafepointSynchronize::is_at_safepoint(), + // "inconsistent argument?"); + // However that wouldn't be right, because it's possible that + // a safepoint is indeed in progress as a younger generation + // stop-the-world GC happens even as we mark in this generation. + + _restart_for_overflow = false; + + set_phase(MAX2((size_t) 1, parallel_marking_threads()), true); + + CMConcurrentMarkingTask markingTask(this, cmThread()); + if (parallel_marking_threads() > 0) + _parallel_workers->run_task(&markingTask); + else + markingTask.work(0); + print_stats(); +} + +void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { + // world is stopped at this checkpoint + assert(SafepointSynchronize::is_at_safepoint(), + "world should be stopped"); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + // If a full collection has happened, we shouldn't do this. + if (has_aborted()) { + g1h->set_marking_complete(); // So bitmap clearing isn't confused + return; + } + + G1CollectorPolicy* g1p = g1h->g1_policy(); + g1p->record_concurrent_mark_remark_start(); + + double start = os::elapsedTime(); + GCOverheadReporter::recordSTWStart(start); + + checkpointRootsFinalWork(); + + double mark_work_end = os::elapsedTime(); + + weakRefsWork(clear_all_soft_refs); + + if (has_overflown()) { + // Oops. We overflowed. Restart concurrent marking. + _restart_for_overflow = true; + // Clear the flag. We do not need it any more. + clear_has_overflown(); + if (G1TraceMarkStackOverflow) + gclog_or_tty->print_cr("\nRemark led to restart for overflow."); + } else { + // We're done with marking. + JavaThread::satb_mark_queue_set().set_active_all_threads(false); + } + +#if VERIFY_OBJS_PROCESSED + _scan_obj_cl.objs_processed = 0; + ThreadLocalObjQueue::objs_enqueued = 0; +#endif + + // Statistics + double now = os::elapsedTime(); + _remark_mark_times.add((mark_work_end - start) * 1000.0); + _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); + _remark_times.add((now - start) * 1000.0); + + GCOverheadReporter::recordSTWEnd(now); + for (int i = 0; i < (int)_max_task_num; ++i) + _tasks[i]->disable_co_tracker(); + _cleanup_co_tracker.enable(); + _cleanup_co_tracker.reset(cleanup_task_overhead()); + g1p->record_concurrent_mark_remark_end(); +} + + +#define CARD_BM_TEST_MODE 0 + +class CalcLiveObjectsClosure: public HeapRegionClosure { + + CMBitMapRO* _bm; + ConcurrentMark* _cm; + COTracker* _co_tracker; + bool _changed; + bool _yield; + size_t _words_done; + size_t _tot_live; + size_t _tot_used; + size_t _regions_done; + double _start_vtime_sec; + + BitMap* _region_bm; + BitMap* _card_bm; + intptr_t _bottom_card_num; + bool _final; + + void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) { + for (intptr_t i = start_card_num; i <= last_card_num; i++) { +#if CARD_BM_TEST_MODE + guarantee(_card_bm->at(i - _bottom_card_num), + "Should already be set."); +#else + _card_bm->par_at_put(i - _bottom_card_num, 1); +#endif + } + } + +public: + CalcLiveObjectsClosure(bool final, + CMBitMapRO *bm, ConcurrentMark *cm, + BitMap* region_bm, BitMap* card_bm, + COTracker* co_tracker) : + _bm(bm), _cm(cm), _changed(false), _yield(true), + _words_done(0), _tot_live(0), _tot_used(0), + _region_bm(region_bm), _card_bm(card_bm), + _final(final), _co_tracker(co_tracker), + _regions_done(0), _start_vtime_sec(0.0) + { + _bottom_card_num = + intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >> + CardTableModRefBS::card_shift); + } + + bool doHeapRegion(HeapRegion* hr) { + if (_co_tracker != NULL) + _co_tracker->update(); + + if (!_final && _regions_done == 0) + _start_vtime_sec = os::elapsedVTime(); + + if (hr->continuesHumongous()) { + HeapRegion* hum_start = hr->humongous_start_region(); + // If the head region of the humongous region has been determined + // to be alive, then all the tail regions should be marked + // such as well. + if (_region_bm->at(hum_start->hrs_index())) { + _region_bm->par_at_put(hr->hrs_index(), 1); + } + return false; + } + + HeapWord* nextTop = hr->next_top_at_mark_start(); + HeapWord* start = hr->top_at_conc_mark_count(); + assert(hr->bottom() <= start && start <= hr->end() && + hr->bottom() <= nextTop && nextTop <= hr->end() && + start <= nextTop, + "Preconditions."); + // Otherwise, record the number of word's we'll examine. + size_t words_done = (nextTop - start); + // Find the first marked object at or after "start". + start = _bm->getNextMarkedWordAddress(start, nextTop); + size_t marked_bytes = 0; + + // Below, the term "card num" means the result of shifting an address + // by the card shift -- address 0 corresponds to card number 0. One + // must subtract the card num of the bottom of the heap to obtain a + // card table index. + // The first card num of the sequence of live cards currently being + // constructed. -1 ==> no sequence. + intptr_t start_card_num = -1; + // The last card num of the sequence of live cards currently being + // constructed. -1 ==> no sequence. + intptr_t last_card_num = -1; + + while (start < nextTop) { + if (_yield && _cm->do_yield_check()) { + // We yielded. It might be for a full collection, in which case + // all bets are off; terminate the traversal. + if (_cm->has_aborted()) { + _changed = false; + return true; + } else { + // Otherwise, it might be a collection pause, and the region + // we're looking at might be in the collection set. We'll + // abandon this region. + return false; + } + } + oop obj = oop(start); + int obj_sz = obj->size(); + // The card num of the start of the current object. + intptr_t obj_card_num = + intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift); + + HeapWord* obj_last = start + obj_sz - 1; + intptr_t obj_last_card_num = + intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift); + + if (obj_card_num != last_card_num) { + if (start_card_num == -1) { + assert(last_card_num == -1, "Both or neither."); + start_card_num = obj_card_num; + } else { + assert(last_card_num != -1, "Both or neither."); + assert(obj_card_num >= last_card_num, "Inv"); + if ((obj_card_num - last_card_num) > 1) { + // Mark the last run, and start a new one. + mark_card_num_range(start_card_num, last_card_num); + start_card_num = obj_card_num; + } + } +#if CARD_BM_TEST_MODE + /* + gclog_or_tty->print_cr("Setting bits from %d/%d.", + obj_card_num - _bottom_card_num, + obj_last_card_num - _bottom_card_num); + */ + for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) { + _card_bm->par_at_put(j - _bottom_card_num, 1); + } +#endif + } + // In any case, we set the last card num. + last_card_num = obj_last_card_num; + + marked_bytes += obj_sz * HeapWordSize; + // Find the next marked object after this one. + start = _bm->getNextMarkedWordAddress(start + 1, nextTop); + _changed = true; + } + // Handle the last range, if any. + if (start_card_num != -1) + mark_card_num_range(start_card_num, last_card_num); + if (_final) { + // Mark the allocated-since-marking portion... + HeapWord* tp = hr->top(); + if (nextTop < tp) { + start_card_num = + intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift); + last_card_num = + intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift); + mark_card_num_range(start_card_num, last_card_num); + // This definitely means the region has live objects. + _region_bm->par_at_put(hr->hrs_index(), 1); + } + } + + hr->add_to_marked_bytes(marked_bytes); + // Update the live region bitmap. + if (marked_bytes > 0) { + _region_bm->par_at_put(hr->hrs_index(), 1); + } + hr->set_top_at_conc_mark_count(nextTop); + _tot_live += hr->next_live_bytes(); + _tot_used += hr->used(); + _words_done = words_done; + + if (!_final) { + ++_regions_done; + if (_regions_done % 10 == 0) { + double end_vtime_sec = os::elapsedVTime(); + double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec; + if (elapsed_vtime_sec > (10.0 / 1000.0)) { + jlong sleep_time_ms = + (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0); +#if 0 + gclog_or_tty->print_cr("CL: elapsed %1.4lf ms, sleep %1.4lf ms, " + "overhead %1.4lf", + elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, + _co_tracker->concOverhead(os::elapsedTime())); +#endif + os::sleep(Thread::current(), sleep_time_ms, false); + _start_vtime_sec = end_vtime_sec; + } + } + } + + return false; + } + + bool changed() { return _changed; } + void reset() { _changed = false; _words_done = 0; } + void no_yield() { _yield = false; } + size_t words_done() { return _words_done; } + size_t tot_live() { return _tot_live; } + size_t tot_used() { return _tot_used; } +}; + + +void ConcurrentMark::calcDesiredRegions() { + guarantee( _cleanup_co_tracker.enabled(), "invariant" ); + _cleanup_co_tracker.start(); + + _region_bm.clear(); + _card_bm.clear(); + CalcLiveObjectsClosure calccl(false /*final*/, + nextMarkBitMap(), this, + &_region_bm, &_card_bm, + &_cleanup_co_tracker); + G1CollectedHeap *g1h = G1CollectedHeap::heap(); + g1h->heap_region_iterate(&calccl); + + do { + calccl.reset(); + g1h->heap_region_iterate(&calccl); + } while (calccl.changed()); + + _cleanup_co_tracker.update(true); +} + +class G1ParFinalCountTask: public AbstractGangTask { +protected: + G1CollectedHeap* _g1h; + CMBitMap* _bm; + size_t _n_workers; + size_t *_live_bytes; + size_t *_used_bytes; + BitMap* _region_bm; + BitMap* _card_bm; +public: + G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm, + BitMap* region_bm, BitMap* card_bm) : + AbstractGangTask("G1 final counting"), _g1h(g1h), + _bm(bm), _region_bm(region_bm), _card_bm(card_bm) + { + if (ParallelGCThreads > 0) + _n_workers = _g1h->workers()->total_workers(); + else + _n_workers = 1; + _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); + _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); + } + + ~G1ParFinalCountTask() { + FREE_C_HEAP_ARRAY(size_t, _live_bytes); + FREE_C_HEAP_ARRAY(size_t, _used_bytes); + } + + void work(int i) { + CalcLiveObjectsClosure calccl(true /*final*/, + _bm, _g1h->concurrent_mark(), + _region_bm, _card_bm, + NULL /* CO tracker */); + calccl.no_yield(); + if (ParallelGCThreads > 0) { + _g1h->heap_region_par_iterate_chunked(&calccl, i, + HeapRegion::FinalCountClaimValue); + } else { + _g1h->heap_region_iterate(&calccl); + } + assert(calccl.complete(), "Shouldn't have yielded!"); + + guarantee( (size_t)i < _n_workers, "invariant" ); + _live_bytes[i] = calccl.tot_live(); + _used_bytes[i] = calccl.tot_used(); + } + size_t live_bytes() { + size_t live_bytes = 0; + for (size_t i = 0; i < _n_workers; ++i) + live_bytes += _live_bytes[i]; + return live_bytes; + } + size_t used_bytes() { + size_t used_bytes = 0; + for (size_t i = 0; i < _n_workers; ++i) + used_bytes += _used_bytes[i]; + return used_bytes; + } +}; + +class G1ParNoteEndTask; + +class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { + G1CollectedHeap* _g1; + int _worker_num; + size_t _max_live_bytes; + size_t _regions_claimed; + size_t _freed_bytes; + size_t _cleared_h_regions; + size_t _freed_regions; + UncleanRegionList* _unclean_region_list; + double _claimed_region_time; + double _max_region_time; + +public: + G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, + UncleanRegionList* list, + int worker_num); + size_t freed_bytes() { return _freed_bytes; } + size_t cleared_h_regions() { return _cleared_h_regions; } + size_t freed_regions() { return _freed_regions; } + UncleanRegionList* unclean_region_list() { + return _unclean_region_list; + } + + bool doHeapRegion(HeapRegion *r); + + size_t max_live_bytes() { return _max_live_bytes; } + size_t regions_claimed() { return _regions_claimed; } + double claimed_region_time_sec() { return _claimed_region_time; } + double max_region_time_sec() { return _max_region_time; } +}; + +class G1ParNoteEndTask: public AbstractGangTask { + friend class G1NoteEndOfConcMarkClosure; +protected: + G1CollectedHeap* _g1h; + size_t _max_live_bytes; + size_t _freed_bytes; + ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state; +public: + G1ParNoteEndTask(G1CollectedHeap* g1h, + ConcurrentMark::ParCleanupThreadState** + par_cleanup_thread_state) : + AbstractGangTask("G1 note end"), _g1h(g1h), + _max_live_bytes(0), _freed_bytes(0), + _par_cleanup_thread_state(par_cleanup_thread_state) + {} + + void work(int i) { + double start = os::elapsedTime(); + G1NoteEndOfConcMarkClosure g1_note_end(_g1h, + &_par_cleanup_thread_state[i]->list, + i); + if (ParallelGCThreads > 0) { + _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, + HeapRegion::NoteEndClaimValue); + } else { + _g1h->heap_region_iterate(&g1_note_end); + } + assert(g1_note_end.complete(), "Shouldn't have yielded!"); + + // Now finish up freeing the current thread's regions. + _g1h->finish_free_region_work(g1_note_end.freed_bytes(), + g1_note_end.cleared_h_regions(), + 0, NULL); + { + MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); + _max_live_bytes += g1_note_end.max_live_bytes(); + _freed_bytes += g1_note_end.freed_bytes(); + } + double end = os::elapsedTime(); + if (G1PrintParCleanupStats) { + gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " + "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", + i, start, end, (end-start)*1000.0, + g1_note_end.regions_claimed(), + g1_note_end.claimed_region_time_sec()*1000.0, + g1_note_end.max_region_time_sec()*1000.0); + } + } + size_t max_live_bytes() { return _max_live_bytes; } + size_t freed_bytes() { return _freed_bytes; } +}; + +class G1ParScrubRemSetTask: public AbstractGangTask { +protected: + G1RemSet* _g1rs; + BitMap* _region_bm; + BitMap* _card_bm; +public: + G1ParScrubRemSetTask(G1CollectedHeap* g1h, + BitMap* region_bm, BitMap* card_bm) : + AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), + _region_bm(region_bm), _card_bm(card_bm) + {} + + void work(int i) { + if (ParallelGCThreads > 0) { + _g1rs->scrub_par(_region_bm, _card_bm, i, + HeapRegion::ScrubRemSetClaimValue); + } else { + _g1rs->scrub(_region_bm, _card_bm); + } + } + +}; + +G1NoteEndOfConcMarkClosure:: +G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, + UncleanRegionList* list, + int worker_num) + : _g1(g1), _worker_num(worker_num), + _max_live_bytes(0), _regions_claimed(0), + _freed_bytes(0), _cleared_h_regions(0), _freed_regions(0), + _claimed_region_time(0.0), _max_region_time(0.0), + _unclean_region_list(list) +{} + +bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) { + // We use a claim value of zero here because all regions + // were claimed with value 1 in the FinalCount task. + r->reset_gc_time_stamp(); + if (!r->continuesHumongous()) { + double start = os::elapsedTime(); + _regions_claimed++; + r->note_end_of_marking(); + _max_live_bytes += r->max_live_bytes(); + _g1->free_region_if_totally_empty_work(r, + _freed_bytes, + _cleared_h_regions, + _freed_regions, + _unclean_region_list, + true /*par*/); + double region_time = (os::elapsedTime() - start); + _claimed_region_time += region_time; + if (region_time > _max_region_time) _max_region_time = region_time; + } + return false; +} + +void ConcurrentMark::cleanup() { + // world is stopped at this checkpoint + assert(SafepointSynchronize::is_at_safepoint(), + "world should be stopped"); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + // If a full collection has happened, we shouldn't do this. + if (has_aborted()) { + g1h->set_marking_complete(); // So bitmap clearing isn't confused + return; + } + + _cleanup_co_tracker.disable(); + + G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); + g1p->record_concurrent_mark_cleanup_start(); + + double start = os::elapsedTime(); + GCOverheadReporter::recordSTWStart(start); + + // Do counting once more with the world stopped for good measure. + G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), + &_region_bm, &_card_bm); + if (ParallelGCThreads > 0) { + assert(g1h->check_heap_region_claim_values( + HeapRegion::InitialClaimValue), + "sanity check"); + + int n_workers = g1h->workers()->total_workers(); + g1h->set_par_threads(n_workers); + g1h->workers()->run_task(&g1_par_count_task); + g1h->set_par_threads(0); + + assert(g1h->check_heap_region_claim_values( + HeapRegion::FinalCountClaimValue), + "sanity check"); + } else { + g1_par_count_task.work(0); + } + + size_t known_garbage_bytes = + g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); +#if 0 + gclog_or_tty->print_cr("used %1.2lf, live %1.2lf, garbage %1.2lf", + (double) g1_par_count_task.used_bytes() / (double) (1024 * 1024), + (double) g1_par_count_task.live_bytes() / (double) (1024 * 1024), + (double) known_garbage_bytes / (double) (1024 * 1024)); +#endif // 0 + g1p->set_known_garbage_bytes(known_garbage_bytes); + + size_t start_used_bytes = g1h->used(); + _at_least_one_mark_complete = true; + g1h->set_marking_complete(); + + double count_end = os::elapsedTime(); + double this_final_counting_time = (count_end - start); + if (G1PrintParCleanupStats) { + gclog_or_tty->print_cr("Cleanup:"); + gclog_or_tty->print_cr(" Finalize counting: %8.3f ms", + this_final_counting_time*1000.0); + } + _total_counting_time += this_final_counting_time; + + // Install newly created mark bitMap as "prev". + swapMarkBitMaps(); + + g1h->reset_gc_time_stamp(); + + // Note end of marking in all heap regions. + double note_end_start = os::elapsedTime(); + G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state); + if (ParallelGCThreads > 0) { + int n_workers = g1h->workers()->total_workers(); + g1h->set_par_threads(n_workers); + g1h->workers()->run_task(&g1_par_note_end_task); + g1h->set_par_threads(0); + + assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), + "sanity check"); + } else { + g1_par_note_end_task.work(0); + } + g1h->set_unclean_regions_coming(true); + double note_end_end = os::elapsedTime(); + // Tell the mutators that there might be unclean regions coming... + if (G1PrintParCleanupStats) { + gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", + (note_end_end - note_end_start)*1000.0); + } + + + // call below, since it affects the metric by which we sort the heap + // regions. + if (G1ScrubRemSets) { + double rs_scrub_start = os::elapsedTime(); + G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); + if (ParallelGCThreads > 0) { + int n_workers = g1h->workers()->total_workers(); + g1h->set_par_threads(n_workers); + g1h->workers()->run_task(&g1_par_scrub_rs_task); + g1h->set_par_threads(0); + + assert(g1h->check_heap_region_claim_values( + HeapRegion::ScrubRemSetClaimValue), + "sanity check"); + } else { + g1_par_scrub_rs_task.work(0); + } + + double rs_scrub_end = os::elapsedTime(); + double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); + _total_rs_scrub_time += this_rs_scrub_time; + } + + // this will also free any regions totally full of garbage objects, + // and sort the regions. + g1h->g1_policy()->record_concurrent_mark_cleanup_end( + g1_par_note_end_task.freed_bytes(), + g1_par_note_end_task.max_live_bytes()); + + // Statistics. + double end = os::elapsedTime(); + _cleanup_times.add((end - start) * 1000.0); + GCOverheadReporter::recordSTWEnd(end); + + // G1CollectedHeap::heap()->print(); + // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d", + // G1CollectedHeap::heap()->get_gc_time_stamp()); + + if (PrintGC || PrintGCDetails) { + g1h->print_size_transition(gclog_or_tty, + start_used_bytes, + g1h->used(), + g1h->capacity()); + } + + size_t cleaned_up_bytes = start_used_bytes - g1h->used(); + g1p->decrease_known_garbage_bytes(cleaned_up_bytes); + + // We need to make this be a "collection" so any collection pause that + // races with it goes around and waits for completeCleanup to finish. + g1h->increment_total_collections(); + +#ifndef PRODUCT + if (VerifyDuringGC) { + G1CollectedHeap::heap()->prepare_for_verify(); + G1CollectedHeap::heap()->verify(true,false); + } +#endif +} + +void ConcurrentMark::completeCleanup() { + // A full collection intervened. + if (has_aborted()) return; + + int first = 0; + int last = (int)MAX2(ParallelGCThreads, (size_t)1); + for (int t = 0; t < last; t++) { + UncleanRegionList* list = &_par_cleanup_thread_state[t]->list; + assert(list->well_formed(), "Inv"); + HeapRegion* hd = list->hd(); + while (hd != NULL) { + // Now finish up the other stuff. + hd->rem_set()->clear(); + HeapRegion* next_hd = hd->next_from_unclean_list(); + (void)list->pop(); + guarantee(list->hd() == next_hd, "how not?"); + _g1h->put_region_on_unclean_list(hd); + if (!hd->isHumongous()) { + // Add this to the _free_regions count by 1. + _g1h->finish_free_region_work(0, 0, 1, NULL); + } + hd = list->hd(); + guarantee(hd == next_hd, "how not?"); + } + } +} + + +class G1CMIsAliveClosure: public BoolObjectClosure { + G1CollectedHeap* _g1; + public: + G1CMIsAliveClosure(G1CollectedHeap* g1) : + _g1(g1) + {} + + void do_object(oop obj) { + assert(false, "not to be invoked"); + } + bool do_object_b(oop obj) { + HeapWord* addr = (HeapWord*)obj; + return addr != NULL && + (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); + } +}; + +class G1CMKeepAliveClosure: public OopClosure { + G1CollectedHeap* _g1; + ConcurrentMark* _cm; + CMBitMap* _bitMap; + public: + G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm, + CMBitMap* bitMap) : + _g1(g1), _cm(cm), + _bitMap(bitMap) {} + + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + void do_oop(oop* p) { + oop thisOop = *p; + HeapWord* addr = (HeapWord*)thisOop; + if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) { + _bitMap->mark(addr); + _cm->mark_stack_push(thisOop); + } + } +}; + +class G1CMDrainMarkingStackClosure: public VoidClosure { + CMMarkStack* _markStack; + CMBitMap* _bitMap; + G1CMKeepAliveClosure* _oopClosure; + public: + G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack, + G1CMKeepAliveClosure* oopClosure) : + _bitMap(bitMap), + _markStack(markStack), + _oopClosure(oopClosure) + {} + + void do_void() { + _markStack->drain((OopClosure*)_oopClosure, _bitMap, false); + } +}; + +void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { + ResourceMark rm; + HandleMark hm; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + ReferenceProcessor* rp = g1h->ref_processor(); + + // Process weak references. + rp->setup_policy(clear_all_soft_refs); + assert(_markStack.isEmpty(), "mark stack should be empty"); + + G1CMIsAliveClosure g1IsAliveClosure (g1h); + G1CMKeepAliveClosure g1KeepAliveClosure(g1h, this, nextMarkBitMap()); + G1CMDrainMarkingStackClosure + g1DrainMarkingStackClosure(nextMarkBitMap(), &_markStack, + &g1KeepAliveClosure); + + // XXXYYY Also: copy the parallel ref processing code from CMS. + rp->process_discovered_references(&g1IsAliveClosure, + &g1KeepAliveClosure, + &g1DrainMarkingStackClosure, + NULL); + assert(_markStack.overflow() || _markStack.isEmpty(), + "mark stack should be empty (unless it overflowed)"); + if (_markStack.overflow()) { + set_has_overflown(); + } + + rp->enqueue_discovered_references(); + rp->verify_no_references_recorded(); + assert(!rp->discovery_enabled(), "should have been disabled"); + + // Now clean up stale oops in SymbolTable and StringTable + SymbolTable::unlink(&g1IsAliveClosure); + StringTable::unlink(&g1IsAliveClosure); +} + +void ConcurrentMark::swapMarkBitMaps() { + CMBitMapRO* temp = _prevMarkBitMap; + _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; + _nextMarkBitMap = (CMBitMap*) temp; +} + +class CMRemarkTask: public AbstractGangTask { +private: + ConcurrentMark *_cm; + +public: + void work(int worker_i) { + // Since all available tasks are actually started, we should + // only proceed if we're supposed to be actived. + if ((size_t)worker_i < _cm->active_tasks()) { + CMTask* task = _cm->task(worker_i); + task->record_start_time(); + do { + task->do_marking_step(1000000000.0 /* something very large */); + } while (task->has_aborted() && !_cm->has_overflown()); + // If we overflow, then we do not want to restart. We instead + // want to abort remark and do concurrent marking again. + task->record_end_time(); + } + } + + CMRemarkTask(ConcurrentMark* cm) : + AbstractGangTask("Par Remark"), _cm(cm) { } +}; + +void ConcurrentMark::checkpointRootsFinalWork() { + ResourceMark rm; + HandleMark hm; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + g1h->ensure_parsability(false); + + if (ParallelGCThreads > 0) { + g1h->change_strong_roots_parity(); + // this is remark, so we'll use up all available threads + int active_workers = ParallelGCThreads; + set_phase(active_workers, false); + + CMRemarkTask remarkTask(this); + // We will start all available threads, even if we decide that the + // active_workers will be fewer. The extra ones will just bail out + // immediately. + int n_workers = g1h->workers()->total_workers(); + g1h->set_par_threads(n_workers); + g1h->workers()->run_task(&remarkTask); + g1h->set_par_threads(0); + + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); + } else { + g1h->change_strong_roots_parity(); + // this is remark, so we'll use up all available threads + int active_workers = 1; + set_phase(active_workers, false); + + CMRemarkTask remarkTask(this); + // We will start all available threads, even if we decide that the + // active_workers will be fewer. The extra ones will just bail out + // immediately. + remarkTask.work(0); + + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); + } + + print_stats(); + + if (!restart_for_overflow()) + set_non_marking_state(); + +#if VERIFY_OBJS_PROCESSED + if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { + gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", + _scan_obj_cl.objs_processed, + ThreadLocalObjQueue::objs_enqueued); + guarantee(_scan_obj_cl.objs_processed == + ThreadLocalObjQueue::objs_enqueued, + "Different number of objs processed and enqueued."); + } +#endif +} + +class ReachablePrinterOopClosure: public OopClosure { +private: + G1CollectedHeap* _g1h; + CMBitMapRO* _bitmap; + outputStream* _out; + +public: + ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) : + _bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { } + + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + void do_oop(oop* p) { + oop obj = *p; + const char* str = NULL; + const char* str2 = ""; + + if (!_g1h->is_in_g1_reserved(obj)) + str = "outside G1 reserved"; + else { + HeapRegion* hr = _g1h->heap_region_containing(obj); + guarantee( hr != NULL, "invariant" ); + if (hr->obj_allocated_since_prev_marking(obj)) { + str = "over TAMS"; + if (_bitmap->isMarked((HeapWord*) obj)) + str2 = " AND MARKED"; + } else if (_bitmap->isMarked((HeapWord*) obj)) + str = "marked"; + else + str = "#### NOT MARKED ####"; + } + + _out->print_cr(" "PTR_FORMAT" contains "PTR_FORMAT" %s%s", + p, (void*) obj, str, str2); + } +}; + +class ReachablePrinterClosure: public BitMapClosure { +private: + CMBitMapRO* _bitmap; + outputStream* _out; + +public: + ReachablePrinterClosure(CMBitMapRO* bitmap, outputStream* out) : + _bitmap(bitmap), _out(out) { } + + bool do_bit(size_t offset) { + HeapWord* addr = _bitmap->offsetToHeapWord(offset); + ReachablePrinterOopClosure oopCl(_bitmap, _out); + + _out->print_cr(" obj "PTR_FORMAT", offset %10d (marked)", addr, offset); + oop(addr)->oop_iterate(&oopCl); + _out->print_cr(""); + + return true; + } +}; + +class ObjInRegionReachablePrinterClosure : public ObjectClosure { +private: + CMBitMapRO* _bitmap; + outputStream* _out; + +public: + void do_object(oop o) { + ReachablePrinterOopClosure oopCl(_bitmap, _out); + + _out->print_cr(" obj "PTR_FORMAT" (over TAMS)", (void*) o); + o->oop_iterate(&oopCl); + _out->print_cr(""); + } + + ObjInRegionReachablePrinterClosure(CMBitMapRO* bitmap, outputStream* out) : + _bitmap(bitmap), _out(out) { } +}; + +class RegionReachablePrinterClosure : public HeapRegionClosure { +private: + CMBitMapRO* _bitmap; + outputStream* _out; + +public: + bool doHeapRegion(HeapRegion* hr) { + HeapWord* b = hr->bottom(); + HeapWord* e = hr->end(); + HeapWord* t = hr->top(); + HeapWord* p = hr->prev_top_at_mark_start(); + _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " + "PTAMS: "PTR_FORMAT, b, e, t, p); + _out->print_cr(""); + + ObjInRegionReachablePrinterClosure ocl(_bitmap, _out); + hr->object_iterate_mem_careful(MemRegion(p, t), &ocl); + + return false; + } + + RegionReachablePrinterClosure(CMBitMapRO* bitmap, + outputStream* out) : + _bitmap(bitmap), _out(out) { } +}; + +void ConcurrentMark::print_prev_bitmap_reachable() { + outputStream* out = gclog_or_tty; + +#if SEND_HEAP_DUMP_TO_FILE + guarantee(heap_dump_file == NULL, "Protocol"); + char fn_buf[100]; + sprintf(fn_buf, "/tmp/dump.txt.%d", os::current_process_id()); + heap_dump_file = fopen(fn_buf, "w"); + fileStream fstream(heap_dump_file); + out = &fstream; +#endif // SEND_HEAP_DUMP_TO_FILE + + RegionReachablePrinterClosure rcl(_prevMarkBitMap, out); + out->print_cr("--- ITERATING OVER REGIONS WITH PTAMS < TOP"); + _g1h->heap_region_iterate(&rcl); + out->print_cr(""); + + ReachablePrinterClosure cl(_prevMarkBitMap, out); + out->print_cr("--- REACHABLE OBJECTS ON THE BITMAP"); + _prevMarkBitMap->iterate(&cl); + out->print_cr(""); + +#if SEND_HEAP_DUMP_TO_FILE + fclose(heap_dump_file); + heap_dump_file = NULL; +#endif // SEND_HEAP_DUMP_TO_FILE +} + +// This note is for drainAllSATBBuffers and the code in between. +// In the future we could reuse a task to do this work during an +// evacuation pause (since now tasks are not active and can be claimed +// during an evacuation pause). This was a late change to the code and +// is currently not being taken advantage of. + +class CMGlobalObjectClosure : public ObjectClosure { +private: + ConcurrentMark* _cm; + +public: + void do_object(oop obj) { + _cm->deal_with_reference(obj); + } + + CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { } +}; + +void ConcurrentMark::deal_with_reference(oop obj) { + if (verbose_high()) + gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT, + (void*) obj); + + + HeapWord* objAddr = (HeapWord*) obj; + if (_g1h->is_in_g1_reserved(objAddr)) { + tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" ); + HeapRegion* hr = _g1h->heap_region_containing(obj); + if (_g1h->is_obj_ill(obj, hr)) { + if (verbose_high()) + gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " + "marked", (void*) obj); + + // we need to mark it first + if (_nextMarkBitMap->parMark(objAddr)) { + // No OrderAccess:store_load() is needed. It is implicit in the + // CAS done in parMark(objAddr) above + HeapWord* finger = _finger; + if (objAddr < finger) { + if (verbose_high()) + gclog_or_tty->print_cr("[global] below the global finger " + "("PTR_FORMAT"), pushing it", finger); + if (!mark_stack_push(obj)) { + if (verbose_low()) + gclog_or_tty->print_cr("[global] global stack overflow during " + "deal_with_reference"); + } + } + } + } + } +} + +void ConcurrentMark::drainAllSATBBuffers() { + CMGlobalObjectClosure oc(this); + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + satb_mq_set.set_closure(&oc); + + while (satb_mq_set.apply_closure_to_completed_buffer()) { + if (verbose_medium()) + gclog_or_tty->print_cr("[global] processed an SATB buffer"); + } + + // no need to check whether we should do this, as this is only + // called during an evacuation pause + satb_mq_set.iterate_closure_all_threads(); + + satb_mq_set.set_closure(NULL); + guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" ); +} + +void ConcurrentMark::markPrev(oop p) { + // Note we are overriding the read-only view of the prev map here, via + // the cast. + ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p); +} + +void ConcurrentMark::clear(oop p) { + assert(p != NULL && p->is_oop(), "expected an oop"); + HeapWord* addr = (HeapWord*)p; + assert(addr >= _nextMarkBitMap->startWord() || + addr < _nextMarkBitMap->endWord(), "in a region"); + + _nextMarkBitMap->clear(addr); +} + +void ConcurrentMark::clearRangeBothMaps(MemRegion mr) { + // Note we are overriding the read-only view of the prev map here, via + // the cast. + ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); + _nextMarkBitMap->clearRange(mr); +} + +HeapRegion* +ConcurrentMark::claim_region(int task_num) { + // "checkpoint" the finger + HeapWord* finger = _finger; + + // _heap_end will not change underneath our feet; it only changes at + // yield points. + while (finger < _heap_end) { + tmp_guarantee_CM( _g1h->is_in_g1_reserved(finger), "invariant" ); + + // is the gap between reading the finger and doing the CAS too long? + + HeapRegion* curr_region = _g1h->heap_region_containing(finger); + HeapWord* bottom = curr_region->bottom(); + HeapWord* end = curr_region->end(); + HeapWord* limit = curr_region->next_top_at_mark_start(); + + if (verbose_low()) + gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" " + "["PTR_FORMAT", "PTR_FORMAT"), " + "limit = "PTR_FORMAT, + task_num, curr_region, bottom, end, limit); + + HeapWord* res = + (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); + if (res == finger) { + // we succeeded + + // notice that _finger == end cannot be guaranteed here since, + // someone else might have moved the finger even further + guarantee( _finger >= end, "the finger should have moved forward" ); + + if (verbose_low()) + gclog_or_tty->print_cr("[%d] we were successful with region = " + PTR_FORMAT, task_num, curr_region); + + if (limit > bottom) { + if (verbose_low()) + gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, " + "returning it ", task_num, curr_region); + return curr_region; + } else { + tmp_guarantee_CM( limit == bottom, + "the region limit should be at bottom" ); + if (verbose_low()) + gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, " + "returning NULL", task_num, curr_region); + // we return NULL and the caller should try calling + // claim_region() again. + return NULL; + } + } else { + guarantee( _finger > finger, "the finger should have moved forward" ); + if (verbose_low()) + gclog_or_tty->print_cr("[%d] somebody else moved the finger, " + "global finger = "PTR_FORMAT", " + "our finger = "PTR_FORMAT, + task_num, _finger, finger); + + // read it again + finger = _finger; + } + } + + return NULL; +} + +void ConcurrentMark::oops_do(OopClosure* cl) { + if (_markStack.size() > 0 && verbose_low()) + gclog_or_tty->print_cr("[global] scanning the global marking stack, " + "size = %d", _markStack.size()); + // we first iterate over the contents of the mark stack... + _markStack.oops_do(cl); + + for (int i = 0; i < (int)_max_task_num; ++i) { + OopTaskQueue* queue = _task_queues->queue((int)i); + + if (queue->size() > 0 && verbose_low()) + gclog_or_tty->print_cr("[global] scanning task queue of task %d, " + "size = %d", i, queue->size()); + + // ...then over the contents of the all the task queues. + queue->oops_do(cl); + } + + // finally, invalidate any entries that in the region stack that + // point into the collection set + if (_regionStack.invalidate_entries_into_cset()) { + // otherwise, any gray objects copied during the evacuation pause + // might not be visited. + guarantee( _should_gray_objects, "invariant" ); + } +} + +void ConcurrentMark::clear_marking_state() { + _markStack.setEmpty(); + _markStack.clear_overflow(); + _regionStack.setEmpty(); + _regionStack.clear_overflow(); + clear_has_overflown(); + _finger = _heap_start; + + for (int i = 0; i < (int)_max_task_num; ++i) { + OopTaskQueue* queue = _task_queues->queue(i); + queue->set_empty(); + } +} + +void ConcurrentMark::print_stats() { + if (verbose_stats()) { + gclog_or_tty->print_cr("---------------------------------------------------------------------"); + for (size_t i = 0; i < _active_tasks; ++i) { + _tasks[i]->print_stats(); + gclog_or_tty->print_cr("---------------------------------------------------------------------"); + } + } +} + +class CSMarkOopClosure: public OopClosure { + friend class CSMarkBitMapClosure; + + G1CollectedHeap* _g1h; + CMBitMap* _bm; + ConcurrentMark* _cm; + oop* _ms; + jint* _array_ind_stack; + int _ms_size; + int _ms_ind; + int _array_increment; + + bool push(oop obj, int arr_ind = 0) { + if (_ms_ind == _ms_size) { + gclog_or_tty->print_cr("Mark stack is full."); + return false; + } + _ms[_ms_ind] = obj; + if (obj->is_objArray()) _array_ind_stack[_ms_ind] = arr_ind; + _ms_ind++; + return true; + } + + oop pop() { + if (_ms_ind == 0) return NULL; + else { + _ms_ind--; + return _ms[_ms_ind]; + } + } + + bool drain() { + while (_ms_ind > 0) { + oop obj = pop(); + assert(obj != NULL, "Since index was non-zero."); + if (obj->is_objArray()) { + jint arr_ind = _array_ind_stack[_ms_ind]; + objArrayOop aobj = objArrayOop(obj); + jint len = aobj->length(); + jint next_arr_ind = arr_ind + _array_increment; + if (next_arr_ind < len) { + push(obj, next_arr_ind); + } + // Now process this portion of this one. + int lim = MIN2(next_arr_ind, len); + assert(!UseCompressedOops, "This needs to be fixed"); + for (int j = arr_ind; j < lim; j++) { + do_oop(aobj->obj_at_addr(j)); + } + + } else { + obj->oop_iterate(this); + } + if (abort()) return false; + } + return true; + } + +public: + CSMarkOopClosure(ConcurrentMark* cm, int ms_size) : + _g1h(G1CollectedHeap::heap()), + _cm(cm), + _bm(cm->nextMarkBitMap()), + _ms_size(ms_size), _ms_ind(0), + _ms(NEW_C_HEAP_ARRAY(oop, ms_size)), + _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), + _array_increment(MAX2(ms_size/8, 16)) + {} + + ~CSMarkOopClosure() { + FREE_C_HEAP_ARRAY(oop, _ms); + FREE_C_HEAP_ARRAY(jint, _array_ind_stack); + } + + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + void do_oop(oop* p) { + oop obj = *p; + if (obj == NULL) return; + if (obj->is_forwarded()) { + // If the object has already been forwarded, we have to make sure + // that it's marked. So follow the forwarding pointer. Note that + // this does the right thing for self-forwarding pointers in the + // evacuation failure case. + obj = obj->forwardee(); + } + HeapRegion* hr = _g1h->heap_region_containing(obj); + if (hr != NULL) { + if (hr->in_collection_set()) { + if (_g1h->is_obj_ill(obj)) { + _bm->mark((HeapWord*)obj); + if (!push(obj)) { + gclog_or_tty->print_cr("Setting abort in CSMarkOopClosure because push failed."); + set_abort(); + } + } + } else { + // Outside the collection set; we need to gray it + _cm->deal_with_reference(obj); + } + } + } +}; + +class CSMarkBitMapClosure: public BitMapClosure { + G1CollectedHeap* _g1h; + CMBitMap* _bitMap; + ConcurrentMark* _cm; + CSMarkOopClosure _oop_cl; +public: + CSMarkBitMapClosure(ConcurrentMark* cm, int ms_size) : + _g1h(G1CollectedHeap::heap()), + _bitMap(cm->nextMarkBitMap()), + _oop_cl(cm, ms_size) + {} + + ~CSMarkBitMapClosure() {} + + bool do_bit(size_t offset) { + // convert offset into a HeapWord* + HeapWord* addr = _bitMap->offsetToHeapWord(offset); + assert(_bitMap->endWord() && addr < _bitMap->endWord(), + "address out of range"); + assert(_bitMap->isMarked(addr), "tautology"); + oop obj = oop(addr); + if (!obj->is_forwarded()) { + if (!_oop_cl.push(obj)) return false; + if (!_oop_cl.drain()) return false; + } + // Otherwise... + return true; + } +}; + + +class CompleteMarkingInCSHRClosure: public HeapRegionClosure { + CMBitMap* _bm; + CSMarkBitMapClosure _bit_cl; + enum SomePrivateConstants { + MSSize = 1000 + }; + bool _completed; +public: + CompleteMarkingInCSHRClosure(ConcurrentMark* cm) : + _bm(cm->nextMarkBitMap()), + _bit_cl(cm, MSSize), + _completed(true) + {} + + ~CompleteMarkingInCSHRClosure() {} + + bool doHeapRegion(HeapRegion* r) { + if (!r->evacuation_failed()) { + MemRegion mr = MemRegion(r->bottom(), r->next_top_at_mark_start()); + if (!mr.is_empty()) { + if (!_bm->iterate(&_bit_cl, mr)) { + _completed = false; + return true; + } + } + } + return false; + } + + bool completed() { return _completed; } +}; + +class ClearMarksInHRClosure: public HeapRegionClosure { + CMBitMap* _bm; +public: + ClearMarksInHRClosure(CMBitMap* bm): _bm(bm) { } + + bool doHeapRegion(HeapRegion* r) { + if (!r->used_region().is_empty() && !r->evacuation_failed()) { + MemRegion usedMR = r->used_region(); + _bm->clearRange(r->used_region()); + } + return false; + } +}; + +void ConcurrentMark::complete_marking_in_collection_set() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + if (!g1h->mark_in_progress()) { + g1h->g1_policy()->record_mark_closure_time(0.0); + return; + } + + int i = 1; + double start = os::elapsedTime(); + while (true) { + i++; + CompleteMarkingInCSHRClosure cmplt(this); + g1h->collection_set_iterate(&cmplt); + if (cmplt.completed()) break; + } + double end_time = os::elapsedTime(); + double elapsed_time_ms = (end_time - start) * 1000.0; + g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms); + if (PrintGCDetails) { + gclog_or_tty->print_cr("Mark closure took %5.2f ms.", elapsed_time_ms); + } + + ClearMarksInHRClosure clr(nextMarkBitMap()); + g1h->collection_set_iterate(&clr); +} + +// The next two methods deal with the following optimisation. Some +// objects are gray by being marked and located above the finger. If +// they are copied, during an evacuation pause, below the finger then +// the need to be pushed on the stack. The observation is that, if +// there are no regions in the collection set located above the +// finger, then the above cannot happen, hence we do not need to +// explicitly gray any objects when copying them to below the +// finger. The global stack will be scanned to ensure that, if it +// points to objects being copied, it will update their +// location. There is a tricky situation with the gray objects in +// region stack that are being coped, however. See the comment in +// newCSet(). + +void ConcurrentMark::newCSet() { + if (!concurrent_marking_in_progress()) + // nothing to do if marking is not in progress + return; + + // find what the lowest finger is among the global and local fingers + _min_finger = _finger; + for (int i = 0; i < (int)_max_task_num; ++i) { + CMTask* task = _tasks[i]; + HeapWord* task_finger = task->finger(); + if (task_finger != NULL && task_finger < _min_finger) + _min_finger = task_finger; + } + + _should_gray_objects = false; + + // This fixes a very subtle and fustrating bug. It might be the case + // that, during en evacuation pause, heap regions that contain + // objects that are gray (by being in regions contained in the + // region stack) are included in the collection set. Since such gray + // objects will be moved, and because it's not easy to redirect + // region stack entries to point to a new location (because objects + // in one region might be scattered to multiple regions after they + // are copied), one option is to ensure that all marked objects + // copied during a pause are pushed on the stack. Notice, however, + // that this problem can only happen when the region stack is not + // empty during an evacuation pause. So, we make the fix a bit less + // conservative and ensure that regions are pushed on the stack, + // irrespective whether all collection set regions are below the + // finger, if the region stack is not empty. This is expected to be + // a rare case, so I don't think it's necessary to be smarted about it. + if (!region_stack_empty()) + _should_gray_objects = true; +} + +void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { + if (!concurrent_marking_in_progress()) + return; + + HeapWord* region_end = hr->end(); + if (region_end > _min_finger) + _should_gray_objects = true; +} + +void ConcurrentMark::disable_co_trackers() { + if (has_aborted()) { + if (_cleanup_co_tracker.enabled()) + _cleanup_co_tracker.disable(); + for (int i = 0; i < (int)_max_task_num; ++i) { + CMTask* task = _tasks[i]; + if (task->co_tracker_enabled()) + task->disable_co_tracker(); + } + } else { + guarantee( !_cleanup_co_tracker.enabled(), "invariant" ); + for (int i = 0; i < (int)_max_task_num; ++i) { + CMTask* task = _tasks[i]; + guarantee( !task->co_tracker_enabled(), "invariant" ); + } + } +} + +// abandon current marking iteration due to a Full GC +void ConcurrentMark::abort() { + // If we're not marking, nothing to do. + if (!G1ConcMark) return; + + // Clear all marks to force marking thread to do nothing + _nextMarkBitMap->clearAll(); + // Empty mark stack + clear_marking_state(); + for (int i = 0; i < (int)_max_task_num; ++i) + _tasks[i]->clear_region_fields(); + _has_aborted = true; + + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + satb_mq_set.abandon_partial_marking(); + satb_mq_set.set_active_all_threads(false); +} + +static void print_ms_time_info(const char* prefix, const char* name, + NumberSeq& ns) { + gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", + prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); + if (ns.num() > 0) { + gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", + prefix, ns.sd(), ns.maximum()); + } +} + +void ConcurrentMark::print_summary_info() { + gclog_or_tty->print_cr(" Concurrent marking:"); + print_ms_time_info(" ", "init marks", _init_times); + print_ms_time_info(" ", "remarks", _remark_times); + { + print_ms_time_info(" ", "final marks", _remark_mark_times); + print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); + + } + print_ms_time_info(" ", "cleanups", _cleanup_times); + gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", + _total_counting_time, + (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / + (double)_cleanup_times.num() + : 0.0)); + if (G1ScrubRemSets) { + gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", + _total_rs_scrub_time, + (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / + (double)_cleanup_times.num() + : 0.0)); + } + gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", + (_init_times.sum() + _remark_times.sum() + + _cleanup_times.sum())/1000.0); + gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " + "(%8.2f s marking, %8.2f s counting).", + cmThread()->vtime_accum(), + cmThread()->vtime_mark_accum(), + cmThread()->vtime_count_accum()); +} + +// Closures +// XXX: there seems to be a lot of code duplication here; +// should refactor and consolidate the shared code. + +// This closure is used to mark refs into the CMS generation in +// the CMS bit map. Called at the first checkpoint. + +// We take a break if someone is trying to stop the world. +bool ConcurrentMark::do_yield_check(int worker_i) { + if (should_yield()) { + if (worker_i == 0) + _g1h->g1_policy()->record_concurrent_pause(); + cmThread()->yield(); + if (worker_i == 0) + _g1h->g1_policy()->record_concurrent_pause_end(); + return true; + } else { + return false; + } +} + +bool ConcurrentMark::should_yield() { + return cmThread()->should_yield(); +} + +bool ConcurrentMark::containing_card_is_marked(void* p) { + size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); + return _card_bm.at(offset >> CardTableModRefBS::card_shift); +} + +bool ConcurrentMark::containing_cards_are_marked(void* start, + void* last) { + return + containing_card_is_marked(start) && + containing_card_is_marked(last); +} + +#ifndef PRODUCT +// for debugging purposes +void ConcurrentMark::print_finger() { + gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, + _heap_start, _heap_end, _finger); + for (int i = 0; i < (int) _max_task_num; ++i) { + gclog_or_tty->print(" %d: "PTR_FORMAT, i, _tasks[i]->finger()); + } + gclog_or_tty->print_cr(""); +} +#endif + +// Closure for iteration over bitmaps +class CMBitMapClosure : public BitMapClosure { +private: + // the bitmap that is being iterated over + CMBitMap* _nextMarkBitMap; + ConcurrentMark* _cm; + CMTask* _task; + // true if we're scanning a heap region claimed by the task (so that + // we move the finger along), false if we're not, i.e. currently when + // scanning a heap region popped from the region stack (so that we + // do not move the task finger along; it'd be a mistake if we did so). + bool _scanning_heap_region; + +public: + CMBitMapClosure(CMTask *task, + ConcurrentMark* cm, + CMBitMap* nextMarkBitMap) + : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } + + void set_scanning_heap_region(bool scanning_heap_region) { + _scanning_heap_region = scanning_heap_region; + } + + bool do_bit(size_t offset) { + HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); + tmp_guarantee_CM( _nextMarkBitMap->isMarked(addr), "invariant" ); + tmp_guarantee_CM( addr < _cm->finger(), "invariant" ); + + if (_scanning_heap_region) { + statsOnly( _task->increase_objs_found_on_bitmap() ); + tmp_guarantee_CM( addr >= _task->finger(), "invariant" ); + // We move that task's local finger along. + _task->move_finger_to(addr); + } else { + // We move the task's region finger along. + _task->move_region_finger_to(addr); + } + + _task->scan_object(oop(addr)); + // we only partially drain the local queue and global stack + _task->drain_local_queue(true); + _task->drain_global_stack(true); + + // if the has_aborted flag has been raised, we need to bail out of + // the iteration + return !_task->has_aborted(); + } +}; + +// Closure for iterating over objects, currently only used for +// processing SATB buffers. +class CMObjectClosure : public ObjectClosure { +private: + CMTask* _task; + +public: + void do_object(oop obj) { + _task->deal_with_reference(obj); + } + + CMObjectClosure(CMTask* task) : _task(task) { } +}; + +// Closure for iterating over object fields +class CMOopClosure : public OopClosure { +private: + G1CollectedHeap* _g1h; + ConcurrentMark* _cm; + CMTask* _task; + +public: + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + void do_oop(oop* p) { + tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" ); + + oop obj = *p; + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] we're looking at location " + "*"PTR_FORMAT" = "PTR_FORMAT, + _task->task_id(), p, (void*) obj); + _task->deal_with_reference(obj); + } + + CMOopClosure(G1CollectedHeap* g1h, + ConcurrentMark* cm, + CMTask* task) + : _g1h(g1h), _cm(cm), _task(task) { } +}; + +void CMTask::setup_for_region(HeapRegion* hr) { + tmp_guarantee_CM( hr != NULL && !hr->continuesHumongous(), + "claim_region() should have filtered out continues humongous regions" ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT, + _task_id, hr); + + _curr_region = hr; + _finger = hr->bottom(); + update_region_limit(); +} + +void CMTask::update_region_limit() { + HeapRegion* hr = _curr_region; + HeapWord* bottom = hr->bottom(); + HeapWord* limit = hr->next_top_at_mark_start(); + + if (limit == bottom) { + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] found an empty region " + "["PTR_FORMAT", "PTR_FORMAT")", + _task_id, bottom, limit); + // The region was collected underneath our feet. + // We set the finger to bottom to ensure that the bitmap + // iteration that will follow this will not do anything. + // (this is not a condition that holds when we set the region up, + // as the region is not supposed to be empty in the first place) + _finger = bottom; + } else if (limit >= _region_limit) { + tmp_guarantee_CM( limit >= _finger, "peace of mind" ); + } else { + tmp_guarantee_CM( limit < _region_limit, "only way to get here" ); + // This can happen under some pretty unusual circumstances. An + // evacuation pause empties the region underneath our feet (NTAMS + // at bottom). We then do some allocation in the region (NTAMS + // stays at bottom), followed by the region being used as a GC + // alloc region (NTAMS will move to top() and the objects + // originally below it will be grayed). All objects now marked in + // the region are explicitly grayed, if below the global finger, + // and we do not need in fact to scan anything else. So, we simply + // set _finger to be limit to ensure that the bitmap iteration + // doesn't do anything. + _finger = limit; + } + + _region_limit = limit; +} + +void CMTask::giveup_current_region() { + tmp_guarantee_CM( _curr_region != NULL, "invariant" ); + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT, + _task_id, _curr_region); + clear_region_fields(); +} + +void CMTask::clear_region_fields() { + // Values for these three fields that indicate that we're not + // holding on to a region. + _curr_region = NULL; + _finger = NULL; + _region_limit = NULL; + + _region_finger = NULL; +} + +void CMTask::reset(CMBitMap* nextMarkBitMap) { + guarantee( nextMarkBitMap != NULL, "invariant" ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] resetting", _task_id); + + _nextMarkBitMap = nextMarkBitMap; + clear_region_fields(); + + _calls = 0; + _elapsed_time_ms = 0.0; + _termination_time_ms = 0.0; + _termination_start_time_ms = 0.0; + +#if _MARKING_STATS_ + _local_pushes = 0; + _local_pops = 0; + _local_max_size = 0; + _objs_scanned = 0; + _global_pushes = 0; + _global_pops = 0; + _global_max_size = 0; + _global_transfers_to = 0; + _global_transfers_from = 0; + _region_stack_pops = 0; + _regions_claimed = 0; + _objs_found_on_bitmap = 0; + _satb_buffers_processed = 0; + _steal_attempts = 0; + _steals = 0; + _aborted = 0; + _aborted_overflow = 0; + _aborted_cm_aborted = 0; + _aborted_yield = 0; + _aborted_timed_out = 0; + _aborted_satb = 0; + _aborted_termination = 0; +#endif // _MARKING_STATS_ +} + +bool CMTask::should_exit_termination() { + regular_clock_call(); + // This is called when we are in the termination protocol. We should + // quit if, for some reason, this task wants to abort or the global + // stack is not empty (this means that we can get work from it). + return !_cm->mark_stack_empty() || has_aborted(); +} + +// This determines whether the method below will check both the local +// and global fingers when determining whether to push on the stack a +// gray object (value 1) or whether it will only check the global one +// (value 0). The tradeoffs are that the former will be a bit more +// accurate and possibly push less on the stack, but it might also be +// a little bit slower. + +#define _CHECK_BOTH_FINGERS_ 1 + +void CMTask::deal_with_reference(oop obj) { + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT, + _task_id, (void*) obj); + + ++_refs_reached; + + HeapWord* objAddr = (HeapWord*) obj; + if (_g1h->is_in_g1_reserved(objAddr)) { + tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" ); + HeapRegion* hr = _g1h->heap_region_containing(obj); + if (_g1h->is_obj_ill(obj, hr)) { + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked", + _task_id, (void*) obj); + + // we need to mark it first + if (_nextMarkBitMap->parMark(objAddr)) { + // No OrderAccess:store_load() is needed. It is implicit in the + // CAS done in parMark(objAddr) above + HeapWord* global_finger = _cm->finger(); + +#if _CHECK_BOTH_FINGERS_ + // we will check both the local and global fingers + + if (_finger != NULL && objAddr < _finger) { + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), " + "pushing it", _task_id, _finger); + push(obj); + } else if (_curr_region != NULL && objAddr < _region_limit) { + // do nothing + } else if (objAddr < global_finger) { + // Notice that the global finger might be moving forward + // concurrently. This is not a problem. In the worst case, we + // mark the object while it is above the global finger and, by + // the time we read the global finger, it has moved forward + // passed this object. In this case, the object will probably + // be visited when a task is scanning the region and will also + // be pushed on the stack. So, some duplicate work, but no + // correctness problems. + + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] below the global finger " + "("PTR_FORMAT"), pushing it", + _task_id, global_finger); + push(obj); + } else { + // do nothing + } +#else // _CHECK_BOTH_FINGERS_ + // we will only check the global finger + + if (objAddr < global_finger) { + // see long comment above + + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] below the global finger " + "("PTR_FORMAT"), pushing it", + _task_id, global_finger); + push(obj); + } +#endif // _CHECK_BOTH_FINGERS_ + } + } + } +} + +void CMTask::push(oop obj) { + HeapWord* objAddr = (HeapWord*) obj; + tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" ); + tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" ); + tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" ); + + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj); + + if (!_task_queue->push(obj)) { + // The local task queue looks full. We need to push some entries + // to the global stack. + + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] task queue overflow, " + "moving entries to the global stack", + _task_id); + move_entries_to_global_stack(); + + // this should succeed since, even if we overflow the global + // stack, we should have definitely removed some entries from the + // local queue. So, there must be space on it. + bool success = _task_queue->push(obj); + tmp_guarantee_CM( success, "invariant" ); + } + + statsOnly( int tmp_size = _task_queue->size(); + if (tmp_size > _local_max_size) + _local_max_size = tmp_size; + ++_local_pushes ); +} + +void CMTask::reached_limit() { + tmp_guarantee_CM( _words_scanned >= _words_scanned_limit || + _refs_reached >= _refs_reached_limit , + "shouldn't have been called otherwise" ); + regular_clock_call(); +} + +void CMTask::regular_clock_call() { + if (has_aborted()) + return; + + // First, we need to recalculate the words scanned and refs reached + // limits for the next clock call. + recalculate_limits(); + + // During the regular clock call we do the following + + // (1) If an overflow has been flagged, then we abort. + if (_cm->has_overflown()) { + set_has_aborted(); + return; + } + + // If we are not concurrent (i.e. we're doing remark) we don't need + // to check anything else. The other steps are only needed during + // the concurrent marking phase. + if (!concurrent()) + return; + + // (2) If marking has been aborted for Full GC, then we also abort. + if (_cm->has_aborted()) { + set_has_aborted(); + statsOnly( ++_aborted_cm_aborted ); + return; + } + + double curr_time_ms = os::elapsedVTime() * 1000.0; + + // (3) If marking stats are enabled, then we update the step history. +#if _MARKING_STATS_ + if (_words_scanned >= _words_scanned_limit) + ++_clock_due_to_scanning; + if (_refs_reached >= _refs_reached_limit) + ++_clock_due_to_marking; + + double last_interval_ms = curr_time_ms - _interval_start_time_ms; + _interval_start_time_ms = curr_time_ms; + _all_clock_intervals_ms.add(last_interval_ms); + + if (_cm->verbose_medium()) { + gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, " + "scanned = %d%s, refs reached = %d%s", + _task_id, last_interval_ms, + _words_scanned, + (_words_scanned >= _words_scanned_limit) ? " (*)" : "", + _refs_reached, + (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); + } +#endif // _MARKING_STATS_ + + // (4) We check whether we should yield. If we have to, then we abort. + if (_cm->should_yield()) { + // We should yield. To do this we abort the task. The caller is + // responsible for yielding. + set_has_aborted(); + statsOnly( ++_aborted_yield ); + return; + } + + // (5) We check whether we've reached our time quota. If we have, + // then we abort. + double elapsed_time_ms = curr_time_ms - _start_time_ms; + if (elapsed_time_ms > _time_target_ms) { + set_has_aborted(); + _has_aborted_timed_out = true; + statsOnly( ++_aborted_timed_out ); + return; + } + + // (6) Finally, we check whether there are enough completed STAB + // buffers available for processing. If there are, we abort. + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers", + _task_id); + // we do need to process SATB buffers, we'll abort and restart + // the marking task to do so + set_has_aborted(); + statsOnly( ++_aborted_satb ); + return; + } +} + +void CMTask::recalculate_limits() { + _real_words_scanned_limit = _words_scanned + words_scanned_period; + _words_scanned_limit = _real_words_scanned_limit; + + _real_refs_reached_limit = _refs_reached + refs_reached_period; + _refs_reached_limit = _real_refs_reached_limit; +} + +void CMTask::decrease_limits() { + // This is called when we believe that we're going to do an infrequent + // operation which will increase the per byte scanned cost (i.e. move + // entries to/from the global stack). It basically tries to decrease the + // scanning limit so that the clock is called earlier. + + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] decreasing limits", _task_id); + + _words_scanned_limit = _real_words_scanned_limit - + 3 * words_scanned_period / 4; + _refs_reached_limit = _real_refs_reached_limit - + 3 * refs_reached_period / 4; +} + +void CMTask::move_entries_to_global_stack() { + // local array where we'll store the entries that will be popped + // from the local queue + oop buffer[global_stack_transfer_size]; + + int n = 0; + oop obj; + while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { + buffer[n] = obj; + ++n; + } + + if (n > 0) { + // we popped at least one entry from the local queue + + statsOnly( ++_global_transfers_to; _local_pops += n ); + + if (!_cm->mark_stack_push(buffer, n)) { + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", _task_id); + set_has_aborted(); + } else { + // the transfer was successful + + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack", + _task_id, n); + statsOnly( int tmp_size = _cm->mark_stack_size(); + if (tmp_size > _global_max_size) + _global_max_size = tmp_size; + _global_pushes += n ); + } + } + + // this operation was quite expensive, so decrease the limits + decrease_limits(); +} + +void CMTask::get_entries_from_global_stack() { + // local array where we'll store the entries that will be popped + // from the global stack. + oop buffer[global_stack_transfer_size]; + int n; + _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); + tmp_guarantee_CM( n <= global_stack_transfer_size, + "we should not pop more than the given limit" ); + if (n > 0) { + // yes, we did actually pop at least one entry + + statsOnly( ++_global_transfers_from; _global_pops += n ); + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] popped %d entries from the global stack", + _task_id, n); + for (int i = 0; i < n; ++i) { + bool success = _task_queue->push(buffer[i]); + // We only call this when the local queue is empty or under a + // given target limit. So, we do not expect this push to fail. + tmp_guarantee_CM( success, "invariant" ); + } + + statsOnly( int tmp_size = _task_queue->size(); + if (tmp_size > _local_max_size) + _local_max_size = tmp_size; + _local_pushes += n ); + } + + // this operation was quite expensive, so decrease the limits + decrease_limits(); +} + +void CMTask::drain_local_queue(bool partially) { + if (has_aborted()) + return; + + // Decide what the target size is, depending whether we're going to + // drain it partially (so that other tasks can steal if they run out + // of things to do) or totally (at the very end). + size_t target_size; + if (partially) + target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); + else + target_size = 0; + + if (_task_queue->size() > target_size) { + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] draining local queue, target size = %d", + _task_id, target_size); + + oop obj; + bool ret = _task_queue->pop_local(obj); + while (ret) { + statsOnly( ++_local_pops ); + + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, + (void*) obj); + + tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj), + "invariant" ); + + scan_object(obj); + + if (_task_queue->size() <= target_size || has_aborted()) + ret = false; + else + ret = _task_queue->pop_local(obj); + } + + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] drained local queue, size = %d", + _task_id, _task_queue->size()); + } +} + +void CMTask::drain_global_stack(bool partially) { + if (has_aborted()) + return; + + // We have a policy to drain the local queue before we attempt to + // drain the global stack. + tmp_guarantee_CM( partially || _task_queue->size() == 0, "invariant" ); + + // Decide what the target size is, depending whether we're going to + // drain it partially (so that other tasks can steal if they run out + // of things to do) or totally (at the very end). Notice that, + // because we move entries from the global stack in chunks or + // because another task might be doing the same, we might in fact + // drop below the target. But, this is not a problem. + size_t target_size; + if (partially) + target_size = _cm->partial_mark_stack_size_target(); + else + target_size = 0; + + if (_cm->mark_stack_size() > target_size) { + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] draining global_stack, target size %d", + _task_id, target_size); + + while (!has_aborted() && _cm->mark_stack_size() > target_size) { + get_entries_from_global_stack(); + drain_local_queue(partially); + } + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] drained global stack, size = %d", + _task_id, _cm->mark_stack_size()); + } +} + +// SATB Queue has several assumptions on whether to call the par or +// non-par versions of the methods. this is why some of the code is +// replicated. We should really get rid of the single-threaded version +// of the code to simplify things. +void CMTask::drain_satb_buffers() { + if (has_aborted()) + return; + + // We set this so that the regular clock knows that we're in the + // middle of draining buffers and doesn't set the abort flag when it + // notices that SATB buffers are available for draining. It'd be + // very counter productive if it did that. :-) + _draining_satb_buffers = true; + + CMObjectClosure oc(this); + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + if (ParallelGCThreads > 0) + satb_mq_set.set_par_closure(_task_id, &oc); + else + satb_mq_set.set_closure(&oc); + + // This keeps claiming and applying the closure to completed buffers + // until we run out of buffers or we need to abort. + if (ParallelGCThreads > 0) { + while (!has_aborted() && + satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); + statsOnly( ++_satb_buffers_processed ); + regular_clock_call(); + } + } else { + while (!has_aborted() && + satb_mq_set.apply_closure_to_completed_buffer()) { + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); + statsOnly( ++_satb_buffers_processed ); + regular_clock_call(); + } + } + + if (!concurrent() && !has_aborted()) { + // We should only do this during remark. + if (ParallelGCThreads > 0) + satb_mq_set.par_iterate_closure_all_threads(_task_id); + else + satb_mq_set.iterate_closure_all_threads(); + } + + _draining_satb_buffers = false; + + tmp_guarantee_CM( has_aborted() || + concurrent() || + satb_mq_set.completed_buffers_num() == 0, "invariant" ); + + if (ParallelGCThreads > 0) + satb_mq_set.set_par_closure(_task_id, NULL); + else + satb_mq_set.set_closure(NULL); + + // again, this was a potentially expensive operation, decrease the + // limits to get the regular clock call early + decrease_limits(); +} + +void CMTask::drain_region_stack(BitMapClosure* bc) { + if (has_aborted()) + return; + + tmp_guarantee_CM( _region_finger == NULL, + "it should be NULL when we're not scanning a region" ); + + if (!_cm->region_stack_empty()) { + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] draining region stack, size = %d", + _task_id, _cm->region_stack_size()); + + MemRegion mr = _cm->region_stack_pop(); + // it returns MemRegion() if the pop fails + statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); + + while (mr.start() != NULL) { + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] we are scanning region " + "["PTR_FORMAT", "PTR_FORMAT")", + _task_id, mr.start(), mr.end()); + tmp_guarantee_CM( mr.end() <= _cm->finger(), + "otherwise the region shouldn't be on the stack" ); + assert(!mr.is_empty(), "Only non-empty regions live on the region stack"); + if (_nextMarkBitMap->iterate(bc, mr)) { + tmp_guarantee_CM( !has_aborted(), + "cannot abort the task without aborting the bitmap iteration" ); + + // We finished iterating over the region without aborting. + regular_clock_call(); + if (has_aborted()) + mr = MemRegion(); + else { + mr = _cm->region_stack_pop(); + // it returns MemRegion() if the pop fails + statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); + } + } else { + guarantee( has_aborted(), "currently the only way to do so" ); + + // The only way to abort the bitmap iteration is to return + // false from the do_bit() method. However, inside the + // do_bit() method we move the _region_finger to point to the + // object currently being looked at. So, if we bail out, we + // have definitely set _region_finger to something non-null. + guarantee( _region_finger != NULL, "invariant" ); + + // The iteration was actually aborted. So now _region_finger + // points to the address of the object we last scanned. If we + // leave it there, when we restart this task, we will rescan + // the object. It is easy to avoid this. We move the finger by + // enough to point to the next possible object header (the + // bitmap knows by how much we need to move it as it knows its + // granularity). + MemRegion newRegion = + MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end()); + + if (!newRegion.is_empty()) { + if (_cm->verbose_low()) { + gclog_or_tty->print_cr("[%d] pushing unscanned region" + "[" PTR_FORMAT "," PTR_FORMAT ") on region stack", + _task_id, + newRegion.start(), newRegion.end()); + } + // Now push the part of the region we didn't scan on the + // region stack to make sure a task scans it later. + _cm->region_stack_push(newRegion); + } + // break from while + mr = MemRegion(); + } + _region_finger = NULL; + } + + // We only push regions on the region stack during evacuation + // pauses. So if we come out the above iteration because we region + // stack is empty, it will remain empty until the next yield + // point. So, the guarantee below is safe. + guarantee( has_aborted() || _cm->region_stack_empty(), + "only way to exit the loop" ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] drained region stack, size = %d", + _task_id, _cm->region_stack_size()); + } +} + +void CMTask::print_stats() { + gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d", + _task_id, _calls); + gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", + _elapsed_time_ms, _termination_time_ms); + gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", + _step_times_ms.num(), _step_times_ms.avg(), + _step_times_ms.sd()); + gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", + _step_times_ms.maximum(), _step_times_ms.sum()); + +#if _MARKING_STATS_ + gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", + _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), + _all_clock_intervals_ms.sd()); + gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", + _all_clock_intervals_ms.maximum(), + _all_clock_intervals_ms.sum()); + gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", + _clock_due_to_scanning, _clock_due_to_marking); + gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", + _objs_scanned, _objs_found_on_bitmap); + gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", + _local_pushes, _local_pops, _local_max_size); + gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", + _global_pushes, _global_pops, _global_max_size); + gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", + _global_transfers_to,_global_transfers_from); + gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d", + _regions_claimed, _region_stack_pops); + gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); + gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", + _steal_attempts, _steals); + gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); + gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", + _aborted_overflow, _aborted_cm_aborted, _aborted_yield); + gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", + _aborted_timed_out, _aborted_satb, _aborted_termination); +#endif // _MARKING_STATS_ +} + +/***************************************************************************** + + The do_marking_step(time_target_ms) method is the building block + of the parallel marking framework. It can be called in parallel + with other invocations of do_marking_step() on different tasks + (but only one per task, obviously) and concurrently with the + mutator threads, or during remark, hence it eliminates the need + for two versions of the code. When called during remark, it will + pick up from where the task left off during the concurrent marking + phase. Interestingly, tasks are also claimable during evacuation + pauses too, since do_marking_step() ensures that it aborts before + it needs to yield. + + The data structures that is uses to do marking work are the + following: + + (1) Marking Bitmap. If there are gray objects that appear only + on the bitmap (this happens either when dealing with an overflow + or when the initial marking phase has simply marked the roots + and didn't push them on the stack), then tasks claim heap + regions whose bitmap they then scan to find gray objects. A + global finger indicates where the end of the last claimed region + is. A local finger indicates how far into the region a task has + scanned. The two fingers are used to determine how to gray an + object (i.e. whether simply marking it is OK, as it will be + visited by a task in the future, or whether it needs to be also + pushed on a stack). + + (2) Local Queue. The local queue of the task which is accessed + reasonably efficiently by the task. Other tasks can steal from + it when they run out of work. Throughout the marking phase, a + task attempts to keep its local queue short but not totally + empty, so that entries are available for stealing by other + tasks. Only when there is no more work, a task will totally + drain its local queue. + + (3) Global Mark Stack. This handles local queue overflow. During + marking only sets of entries are moved between it and the local + queues, as access to it requires a mutex and more fine-grain + interaction with it which might cause contention. If it + overflows, then the marking phase should restart and iterate + over the bitmap to identify gray objects. Throughout the marking + phase, tasks attempt to keep the global mark stack at a small + length but not totally empty, so that entries are available for + popping by other tasks. Only when there is no more work, tasks + will totally drain the global mark stack. + + (4) Global Region Stack. Entries on it correspond to areas of + the bitmap that need to be scanned since they contain gray + objects. Pushes on the region stack only happen during + evacuation pauses and typically correspond to areas covered by + GC LABS. If it overflows, then the marking phase should restart + and iterate over the bitmap to identify gray objects. Tasks will + try to totally drain the region stack as soon as possible. + + (5) SATB Buffer Queue. This is where completed SATB buffers are + made available. Buffers are regularly removed from this queue + and scanned for roots, so that the queue doesn't get too + long. During remark, all completed buffers are processed, as + well as the filled in parts of any uncompleted buffers. + + The do_marking_step() method tries to abort when the time target + has been reached. There are a few other cases when the + do_marking_step() method also aborts: + + (1) When the marking phase has been aborted (after a Full GC). + + (2) When a global overflow (either on the global stack or the + region stack) has been triggered. Before the task aborts, it + will actually sync up with the other tasks to ensure that all + the marking data structures (local queues, stacks, fingers etc.) + are re-initialised so that when do_marking_step() completes, + the marking phase can immediately restart. + + (3) When enough completed SATB buffers are available. The + do_marking_step() method only tries to drain SATB buffers right + at the beginning. So, if enough buffers are available, the + marking step aborts and the SATB buffers are processed at + the beginning of the next invocation. + + (4) To yield. when we have to yield then we abort and yield + right at the end of do_marking_step(). This saves us from a lot + of hassle as, by yielding we might allow a Full GC. If this + happens then objects will be compacted underneath our feet, the + heap might shrink, etc. We save checking for this by just + aborting and doing the yield right at the end. + + From the above it follows that the do_marking_step() method should + be called in a loop (or, otherwise, regularly) until it completes. + + If a marking step completes without its has_aborted() flag being + true, it means it has completed the current marking phase (and + also all other marking tasks have done so and have all synced up). + + A method called regular_clock_call() is invoked "regularly" (in + sub ms intervals) throughout marking. It is this clock method that + checks all the abort conditions which were mentioned above and + decides when the task should abort. A work-based scheme is used to + trigger this clock method: when the number of object words the + marking phase has scanned or the number of references the marking + phase has visited reach a given limit. Additional invocations to + the method clock have been planted in a few other strategic places + too. The initial reason for the clock method was to avoid calling + vtime too regularly, as it is quite expensive. So, once it was in + place, it was natural to piggy-back all the other conditions on it + too and not constantly check them throughout the code. + + *****************************************************************************/ + +void CMTask::do_marking_step(double time_target_ms) { + guarantee( time_target_ms >= 1.0, "minimum granularity is 1ms" ); + guarantee( concurrent() == _cm->concurrent(), "they should be the same" ); + + guarantee( concurrent() || _cm->region_stack_empty(), + "the region stack should have been cleared before remark" ); + guarantee( _region_finger == NULL, + "this should be non-null only when a region is being scanned" ); + + G1CollectorPolicy* g1_policy = _g1h->g1_policy(); + guarantee( _task_queues != NULL, "invariant" ); + guarantee( _task_queue != NULL, "invariant" ); + guarantee( _task_queues->queue(_task_id) == _task_queue, "invariant" ); + + guarantee( !_claimed, + "only one thread should claim this task at any one time" ); + + // OK, this doesn't safeguard again all possible scenarios, as it is + // possible for two threads to set the _claimed flag at the same + // time. But it is only for debugging purposes anyway and it will + // catch most problems. + _claimed = true; + + _start_time_ms = os::elapsedVTime() * 1000.0; + statsOnly( _interval_start_time_ms = _start_time_ms ); + + double diff_prediction_ms = + g1_policy->get_new_prediction(&_marking_step_diffs_ms); + _time_target_ms = time_target_ms - diff_prediction_ms; + + // set up the variables that are used in the work-based scheme to + // call the regular clock method + _words_scanned = 0; + _refs_reached = 0; + recalculate_limits(); + + // clear all flags + clear_has_aborted(); + _has_aborted_timed_out = false; + _draining_satb_buffers = false; + + ++_calls; + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, " + "target = %1.2lfms >>>>>>>>>>", + _task_id, _calls, _time_target_ms); + + // Set up the bitmap and oop closures. Anything that uses them is + // eventually called from this method, so it is OK to allocate these + // statically. + CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); + CMOopClosure oop_closure(_g1h, _cm, this); + set_oop_closure(&oop_closure); + + if (_cm->has_overflown()) { + // This can happen if the region stack or the mark stack overflows + // during a GC pause and this task, after a yield point, + // restarts. We have to abort as we need to get into the overflow + // protocol which happens right at the end of this task. + set_has_aborted(); + } + + // First drain any available SATB buffers. After this, we will not + // look at SATB buffers before the next invocation of this method. + // If enough completed SATB buffers are queued up, the regular clock + // will abort this task so that it restarts. + drain_satb_buffers(); + // ...then partially drain the local queue and the global stack + drain_local_queue(true); + drain_global_stack(true); + + // Then totally drain the region stack. We will not look at + // it again before the next invocation of this method. Entries on + // the region stack are only added during evacuation pauses, for + // which we have to yield. When we do, we abort the task anyway so + // it will look at the region stack again when it restarts. + bitmap_closure.set_scanning_heap_region(false); + drain_region_stack(&bitmap_closure); + // ...then partially drain the local queue and the global stack + drain_local_queue(true); + drain_global_stack(true); + + do { + if (!has_aborted() && _curr_region != NULL) { + // This means that we're already holding on to a region. + tmp_guarantee_CM( _finger != NULL, + "if region is not NULL, then the finger " + "should not be NULL either" ); + + // We might have restarted this task after an evacuation pause + // which might have evacuated the region we're holding on to + // underneath our feet. Let's read its limit again to make sure + // that we do not iterate over a region of the heap that + // contains garbage (update_region_limit() will also move + // _finger to the start of the region if it is found empty). + update_region_limit(); + // We will start from _finger not from the start of the region, + // as we might be restarting this task after aborting half-way + // through scanning this region. In this case, _finger points to + // the address where we last found a marked object. If this is a + // fresh region, _finger points to start(). + MemRegion mr = MemRegion(_finger, _region_limit); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] we're scanning part " + "["PTR_FORMAT", "PTR_FORMAT") " + "of region "PTR_FORMAT, + _task_id, _finger, _region_limit, _curr_region); + + // Let's iterate over the bitmap of the part of the + // region that is left. + bitmap_closure.set_scanning_heap_region(true); + if (mr.is_empty() || + _nextMarkBitMap->iterate(&bitmap_closure, mr)) { + // We successfully completed iterating over the region. Now, + // let's give up the region. + giveup_current_region(); + regular_clock_call(); + } else { + guarantee( has_aborted(), "currently the only way to do so" ); + // The only way to abort the bitmap iteration is to return + // false from the do_bit() method. However, inside the + // do_bit() method we move the _finger to point to the + // object currently being looked at. So, if we bail out, we + // have definitely set _finger to something non-null. + guarantee( _finger != NULL, "invariant" ); + + // Region iteration was actually aborted. So now _finger + // points to the address of the object we last scanned. If we + // leave it there, when we restart this task, we will rescan + // the object. It is easy to avoid this. We move the finger by + // enough to point to the next possible object header (the + // bitmap knows by how much we need to move it as it knows its + // granularity). + move_finger_to(_nextMarkBitMap->nextWord(_finger)); + } + } + // At this point we have either completed iterating over the + // region we were holding on to, or we have aborted. + + // We then partially drain the local queue and the global stack. + // (Do we really need this?) + drain_local_queue(true); + drain_global_stack(true); + + // Read the note on the claim_region() method on why it might + // return NULL with potentially more regions available for + // claiming and why we have to check out_of_regions() to determine + // whether we're done or not. + while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { + // We are going to try to claim a new region. We should have + // given up on the previous one. + tmp_guarantee_CM( _curr_region == NULL && + _finger == NULL && + _region_limit == NULL, "invariant" ); + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id); + HeapRegion* claimed_region = _cm->claim_region(_task_id); + if (claimed_region != NULL) { + // Yes, we managed to claim one + statsOnly( ++_regions_claimed ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] we successfully claimed " + "region "PTR_FORMAT, + _task_id, claimed_region); + + setup_for_region(claimed_region); + tmp_guarantee_CM( _curr_region == claimed_region, "invariant" ); + } + // It is important to call the regular clock here. It might take + // a while to claim a region if, for example, we hit a large + // block of empty regions. So we need to call the regular clock + // method once round the loop to make sure it's called + // frequently enough. + regular_clock_call(); + } + + if (!has_aborted() && _curr_region == NULL) { + tmp_guarantee_CM( _cm->out_of_regions(), + "at this point we should be out of regions" ); + } + } while ( _curr_region != NULL && !has_aborted()); + + if (!has_aborted()) { + // We cannot check whether the global stack is empty, since other + // tasks might be pushing objects to it concurrently. We also cannot + // check if the region stack is empty because if a thread is aborting + // it can push a partially done region back. + tmp_guarantee_CM( _cm->out_of_regions(), + "at this point we should be out of regions" ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); + + // Try to reduce the number of available SATB buffers so that + // remark has less work to do. + drain_satb_buffers(); + } + + // Since we've done everything else, we can now totally drain the + // local queue and global stack. + drain_local_queue(false); + drain_global_stack(false); + + // Attempt at work stealing from other task's queues. + if (!has_aborted()) { + // We have not aborted. This means that we have finished all that + // we could. Let's try to do some stealing... + + // We cannot check whether the global stack is empty, since other + // tasks might be pushing objects to it concurrently. We also cannot + // check if the region stack is empty because if a thread is aborting + // it can push a partially done region back. + guarantee( _cm->out_of_regions() && + _task_queue->size() == 0, "only way to reach here" ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] starting to steal", _task_id); + + while (!has_aborted()) { + oop obj; + statsOnly( ++_steal_attempts ); + + if (_cm->try_stealing(_task_id, &_hash_seed, obj)) { + if (_cm->verbose_medium()) + gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully", + _task_id, (void*) obj); + + statsOnly( ++_steals ); + + tmp_guarantee_CM( _nextMarkBitMap->isMarked((HeapWord*) obj), + "any stolen object should be marked" ); + scan_object(obj); + + // And since we're towards the end, let's totally drain the + // local queue and global stack. + drain_local_queue(false); + drain_global_stack(false); + } else { + break; + } + } + } + + // We still haven't aborted. Now, let's try to get into the + // termination protocol. + if (!has_aborted()) { + // We cannot check whether the global stack is empty, since other + // tasks might be concurrently pushing objects on it. We also cannot + // check if the region stack is empty because if a thread is aborting + // it can push a partially done region back. + guarantee( _cm->out_of_regions() && + _task_queue->size() == 0, "only way to reach here" ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id); + + _termination_start_time_ms = os::elapsedVTime() * 1000.0; + // The CMTask class also extends the TerminatorTerminator class, + // hence its should_exit_termination() method will also decide + // whether to exit the termination protocol or not. + bool finished = _cm->terminator()->offer_termination(this); + double termination_end_time_ms = os::elapsedVTime() * 1000.0; + _termination_time_ms += + termination_end_time_ms - _termination_start_time_ms; + + if (finished) { + // We're all done. + + if (_task_id == 0) { + // let's allow task 0 to do this + if (concurrent()) { + guarantee( _cm->concurrent_marking_in_progress(), "invariant" ); + // we need to set this to false before the next + // safepoint. This way we ensure that the marking phase + // doesn't observe any more heap expansions. + _cm->clear_concurrent_marking_in_progress(); + } + } + + // We can now guarantee that the global stack is empty, since + // all other tasks have finished. + guarantee( _cm->out_of_regions() && + _cm->region_stack_empty() && + _cm->mark_stack_empty() && + _task_queue->size() == 0 && + !_cm->has_overflown() && + !_cm->mark_stack_overflow() && + !_cm->region_stack_overflow(), + "only way to reach here" ); + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); + } else { + // Apparently there's more work to do. Let's abort this task. It + // will restart it and we can hopefully find more things to do. + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] apparently there is more work to do", _task_id); + + set_has_aborted(); + statsOnly( ++_aborted_termination ); + } + } + + // Mainly for debugging purposes to make sure that a pointer to the + // closure which was statically allocated in this frame doesn't + // escape it by accident. + set_oop_closure(NULL); + double end_time_ms = os::elapsedVTime() * 1000.0; + double elapsed_time_ms = end_time_ms - _start_time_ms; + // Update the step history. + _step_times_ms.add(elapsed_time_ms); + + if (has_aborted()) { + // The task was aborted for some reason. + + statsOnly( ++_aborted ); + + if (_has_aborted_timed_out) { + double diff_ms = elapsed_time_ms - _time_target_ms; + // Keep statistics of how well we did with respect to hitting + // our target only if we actually timed out (if we aborted for + // other reasons, then the results might get skewed). + _marking_step_diffs_ms.add(diff_ms); + } + + if (_cm->has_overflown()) { + // This is the interesting one. We aborted because a global + // overflow was raised. This means we have to restart the + // marking phase and start iterating over regions. However, in + // order to do this we have to make sure that all tasks stop + // what they are doing and re-initialise in a safe manner. We + // will achieve this with the use of two barrier sync points. + + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] detected overflow", _task_id); + + _cm->enter_first_sync_barrier(_task_id); + // When we exit this sync barrier we know that all tasks have + // stopped doing marking work. So, it's now safe to + // re-initialise our data structures. At the end of this method, + // task 0 will clear the global data structures. + + statsOnly( ++_aborted_overflow ); + + // We clear the local state of this task... + clear_region_fields(); + + // ...and enter the second barrier. + _cm->enter_second_sync_barrier(_task_id); + // At this point everything has bee re-initialised and we're + // ready to restart. + } + + if (_cm->verbose_low()) { + gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, " + "elapsed = %1.2lfms <<<<<<<<<<", + _task_id, _time_target_ms, elapsed_time_ms); + if (_cm->has_aborted()) + gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========", + _task_id); + } + } else { + if (_cm->verbose_low()) + gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, " + "elapsed = %1.2lfms <<<<<<<<<<", + _task_id, _time_target_ms, elapsed_time_ms); + } + + _claimed = false; +} + +CMTask::CMTask(int task_id, + ConcurrentMark* cm, + CMTaskQueue* task_queue, + CMTaskQueueSet* task_queues) + : _g1h(G1CollectedHeap::heap()), + _co_tracker(G1CMGroup), + _task_id(task_id), _cm(cm), + _claimed(false), + _nextMarkBitMap(NULL), _hash_seed(17), + _task_queue(task_queue), + _task_queues(task_queues), + _oop_closure(NULL) { + guarantee( task_queue != NULL, "invariant" ); + guarantee( task_queues != NULL, "invariant" ); + + statsOnly( _clock_due_to_scanning = 0; + _clock_due_to_marking = 0 ); + + _marking_step_diffs_ms.add(0.5); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp 2009-08-01 04:20:58.591212487 +0100 @@ -0,0 +1,1049 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class G1CollectedHeap; +class CMTask; +typedef GenericTaskQueue CMTaskQueue; +typedef GenericTaskQueueSet CMTaskQueueSet; + +// A generic CM bit map. This is essentially a wrapper around the BitMap +// class, with one bit per (1<<_shifter) HeapWords. + +class CMBitMapRO VALUE_OBJ_CLASS_SPEC { + protected: + HeapWord* _bmStartWord; // base address of range covered by map + size_t _bmWordSize; // map size (in #HeapWords covered) + const int _shifter; // map to char or bit + VirtualSpace _virtual_space; // underlying the bit map + BitMap _bm; // the bit map itself + + public: + // constructor + CMBitMapRO(ReservedSpace rs, int shifter); + + enum { do_yield = true }; + + // inquiries + HeapWord* startWord() const { return _bmStartWord; } + size_t sizeInWords() const { return _bmWordSize; } + // the following is one past the last word in space + HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } + + // read marks + + bool isMarked(HeapWord* addr) const { + assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), + "outside underlying space?"); + return _bm.at(heapWordToOffset(addr)); + } + + // iteration + bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); } + bool iterate(BitMapClosure* cl, MemRegion mr); + + // Return the address corresponding to the next marked bit at or after + // "addr", and before "limit", if "limit" is non-NULL. If there is no + // such bit, returns "limit" if that is non-NULL, or else "endWord()". + HeapWord* getNextMarkedWordAddress(HeapWord* addr, + HeapWord* limit = NULL) const; + // Return the address corresponding to the next unmarked bit at or after + // "addr", and before "limit", if "limit" is non-NULL. If there is no + // such bit, returns "limit" if that is non-NULL, or else "endWord()". + HeapWord* getNextUnmarkedWordAddress(HeapWord* addr, + HeapWord* limit = NULL) const; + + // conversion utilities + // XXX Fix these so that offsets are size_t's... + HeapWord* offsetToHeapWord(size_t offset) const { + return _bmStartWord + (offset << _shifter); + } + size_t heapWordToOffset(HeapWord* addr) const { + return pointer_delta(addr, _bmStartWord) >> _shifter; + } + int heapWordDiffToOffsetDiff(size_t diff) const; + HeapWord* nextWord(HeapWord* addr) { + return offsetToHeapWord(heapWordToOffset(addr) + 1); + } + + void mostly_disjoint_range_union(BitMap* from_bitmap, + size_t from_start_index, + HeapWord* to_start_word, + size_t word_num); + + // debugging + NOT_PRODUCT(bool covers(ReservedSpace rs) const;) +}; + +class CMBitMap : public CMBitMapRO { + + public: + // constructor + CMBitMap(ReservedSpace rs, int shifter) : + CMBitMapRO(rs, shifter) {} + + // write marks + void mark(HeapWord* addr) { + assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), + "outside underlying space?"); + _bm.at_put(heapWordToOffset(addr), true); + } + void clear(HeapWord* addr) { + assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), + "outside underlying space?"); + _bm.at_put(heapWordToOffset(addr), false); + } + bool parMark(HeapWord* addr) { + assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), + "outside underlying space?"); + return _bm.par_at_put(heapWordToOffset(addr), true); + } + bool parClear(HeapWord* addr) { + assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), + "outside underlying space?"); + return _bm.par_at_put(heapWordToOffset(addr), false); + } + void markRange(MemRegion mr); + void clearAll(); + void clearRange(MemRegion mr); + + // Starting at the bit corresponding to "addr" (inclusive), find the next + // "1" bit, if any. This bit starts some run of consecutive "1"'s; find + // the end of this run (stopping at "end_addr"). Return the MemRegion + // covering from the start of the region corresponding to the first bit + // of the run to the end of the region corresponding to the last bit of + // the run. If there is no "1" bit at or after "addr", return an empty + // MemRegion. + MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); +}; + +// Represents a marking stack used by the CM collector. +// Ideally this should be GrowableArray<> just like MSC's marking stack(s). +class CMMarkStack VALUE_OBJ_CLASS_SPEC { + ConcurrentMark* _cm; + oop* _base; // bottom of stack + jint _index; // one more than last occupied index + jint _capacity; // max #elements + jint _oops_do_bound; // Number of elements to include in next iteration. + NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run + + bool _overflow; + DEBUG_ONLY(bool _drain_in_progress;) + DEBUG_ONLY(bool _drain_in_progress_yields;) + + public: + CMMarkStack(ConcurrentMark* cm); + ~CMMarkStack(); + + void allocate(size_t size); + + oop pop() { + if (!isEmpty()) { + return _base[--_index] ; + } + return NULL; + } + + // If overflow happens, don't do the push, and record the overflow. + // *Requires* that "ptr" is already marked. + void push(oop ptr) { + if (isFull()) { + // Record overflow. + _overflow = true; + return; + } else { + _base[_index++] = ptr; + NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); + } + } + // Non-block impl. Note: concurrency is allowed only with other + // "par_push" operations, not with "pop" or "drain". We would need + // parallel versions of them if such concurrency was desired. + void par_push(oop ptr); + + // Pushes the first "n" elements of "ptr_arr" on the stack. + // Non-block impl. Note: concurrency is allowed only with other + // "par_adjoin_arr" or "push" operations, not with "pop" or "drain". + void par_adjoin_arr(oop* ptr_arr, int n); + + // Pushes the first "n" elements of "ptr_arr" on the stack. + // Locking impl: concurrency is allowed only with + // "par_push_arr" and/or "par_pop_arr" operations, which use the same + // locking strategy. + void par_push_arr(oop* ptr_arr, int n); + + // If returns false, the array was empty. Otherwise, removes up to "max" + // elements from the stack, and transfers them to "ptr_arr" in an + // unspecified order. The actual number transferred is given in "n" ("n + // == 0" is deliberately redundant with the return value.) Locking impl: + // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" + // operations, which use the same locking strategy. + bool par_pop_arr(oop* ptr_arr, int max, int* n); + + // Drain the mark stack, applying the given closure to all fields of + // objects on the stack. (That is, continue until the stack is empty, + // even if closure applications add entries to the stack.) The "bm" + // argument, if non-null, may be used to verify that only marked objects + // are on the mark stack. If "yield_after" is "true", then the + // concurrent marker performing the drain offers to yield after + // processing each object. If a yield occurs, stops the drain operation + // and returns false. Otherwise, returns true. + template + bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); + + bool isEmpty() { return _index == 0; } + bool isFull() { return _index == _capacity; } + int maxElems() { return _capacity; } + + bool overflow() { return _overflow; } + void clear_overflow() { _overflow = false; } + + int size() { return _index; } + + void setEmpty() { _index = 0; clear_overflow(); } + + // Record the current size; a subsequent "oops_do" will iterate only over + // indices valid at the time of this call. + void set_oops_do_bound(jint bound = -1) { + if (bound == -1) { + _oops_do_bound = _index; + } else { + _oops_do_bound = bound; + } + } + jint oops_do_bound() { return _oops_do_bound; } + // iterate over the oops in the mark stack, up to the bound recorded via + // the call above. + void oops_do(OopClosure* f); +}; + +class CMRegionStack VALUE_OBJ_CLASS_SPEC { + MemRegion* _base; + jint _capacity; + jint _index; + jint _oops_do_bound; + bool _overflow; +public: + CMRegionStack(); + ~CMRegionStack(); + void allocate(size_t size); + + // This is lock-free; assumes that it will only be called in parallel + // with other "push" operations (no pops). + void push(MemRegion mr); + + // Lock-free; assumes that it will only be called in parallel + // with other "pop" operations (no pushes). + MemRegion pop(); + + bool isEmpty() { return _index == 0; } + bool isFull() { return _index == _capacity; } + + bool overflow() { return _overflow; } + void clear_overflow() { _overflow = false; } + + int size() { return _index; } + + // It iterates over the entries in the region stack and it + // invalidates (i.e. assigns MemRegion()) the ones that point to + // regions in the collection set. + bool invalidate_entries_into_cset(); + + // This gives an upper bound up to which the iteration in + // invalidate_entries_into_cset() will reach. This prevents + // newly-added entries to be unnecessarily scanned. + void set_oops_do_bound() { + _oops_do_bound = _index; + } + + void setEmpty() { _index = 0; clear_overflow(); } +}; + +// this will enable a variety of different statistics per GC task +#define _MARKING_STATS_ 0 +// this will enable the higher verbose levels +#define _MARKING_VERBOSE_ 0 + +#if _MARKING_STATS_ +#define statsOnly(statement) \ +do { \ + statement ; \ +} while (0) +#else // _MARKING_STATS_ +#define statsOnly(statement) \ +do { \ +} while (0) +#endif // _MARKING_STATS_ + +// Some extra guarantees that I like to also enable in optimised mode +// when debugging. If you want to enable them, comment out the assert +// macro and uncomment out the guaratee macro +// #define tmp_guarantee_CM(expr, str) guarantee(expr, str) +#define tmp_guarantee_CM(expr, str) assert(expr, str) + +typedef enum { + no_verbose = 0, // verbose turned off + stats_verbose, // only prints stats at the end of marking + low_verbose, // low verbose, mostly per region and per major event + medium_verbose, // a bit more detailed than low + high_verbose // per object verbose +} CMVerboseLevel; + + +class ConcurrentMarkThread; + +class ConcurrentMark: public CHeapObj { + friend class ConcurrentMarkThread; + friend class CMTask; + friend class CMBitMapClosure; + friend class CSMarkOopClosure; + friend class CMGlobalObjectClosure; + friend class CMRemarkTask; + friend class CMConcurrentMarkingTask; + friend class G1ParNoteEndTask; + friend class CalcLiveObjectsClosure; + +protected: + ConcurrentMarkThread* _cmThread; // the thread doing the work + G1CollectedHeap* _g1h; // the heap. + size_t _parallel_marking_threads; // the number of marking + // threads we'll use + double _sleep_factor; // how much we have to sleep, with + // respect to the work we just did, to + // meet the marking overhead goal + double _marking_task_overhead; // marking target overhead for + // a single task + + // same as the two above, but for the cleanup task + double _cleanup_sleep_factor; + double _cleanup_task_overhead; + + // Stuff related to age cohort processing. + struct ParCleanupThreadState { + char _pre[64]; + UncleanRegionList list; + char _post[64]; + }; + ParCleanupThreadState** _par_cleanup_thread_state; + + // CMS marking support structures + CMBitMap _markBitMap1; + CMBitMap _markBitMap2; + CMBitMapRO* _prevMarkBitMap; // completed mark bitmap + CMBitMap* _nextMarkBitMap; // under-construction mark bitmap + bool _at_least_one_mark_complete; + + BitMap _region_bm; + BitMap _card_bm; + + // Heap bounds + HeapWord* _heap_start; + HeapWord* _heap_end; + + // For gray objects + CMMarkStack _markStack; // Grey objects behind global finger. + CMRegionStack _regionStack; // Grey regions behind global finger. + HeapWord* volatile _finger; // the global finger, region aligned, + // always points to the end of the + // last claimed region + + // marking tasks + size_t _max_task_num; // maximum task number + size_t _active_tasks; // task num currently active + CMTask** _tasks; // task queue array (max_task_num len) + CMTaskQueueSet* _task_queues; // task queue set + ParallelTaskTerminator _terminator; // for termination + + // Two sync barriers that are used to synchronise tasks when an + // overflow occurs. The algorithm is the following. All tasks enter + // the first one to ensure that they have all stopped manipulating + // the global data structures. After they exit it, they re-initialise + // their data structures and task 0 re-initialises the global data + // structures. Then, they enter the second sync barrier. This + // ensure, that no task starts doing work before all data + // structures (local and global) have been re-initialised. When they + // exit it, they are free to start working again. + WorkGangBarrierSync _first_overflow_barrier_sync; + WorkGangBarrierSync _second_overflow_barrier_sync; + + + // this is set by any task, when an overflow on the global data + // structures is detected. + volatile bool _has_overflown; + // true: marking is concurrent, false: we're in remark + volatile bool _concurrent; + // set at the end of a Full GC so that marking aborts + volatile bool _has_aborted; + // used when remark aborts due to an overflow to indicate that + // another concurrent marking phase should start + volatile bool _restart_for_overflow; + + // This is true from the very start of concurrent marking until the + // point when all the tasks complete their work. It is really used + // to determine the points between the end of concurrent marking and + // time of remark. + volatile bool _concurrent_marking_in_progress; + + // verbose level + CMVerboseLevel _verbose_level; + + COTracker _cleanup_co_tracker; + + // These two fields are used to implement the optimisation that + // avoids pushing objects on the global/region stack if there are + // no collection set regions above the lowest finger. + + // This is the lowest finger (among the global and local fingers), + // which is calculated before a new collection set is chosen. + HeapWord* _min_finger; + // If this flag is true, objects/regions that are marked below the + // finger should be pushed on the stack(s). If this is flag is + // false, it is safe not to push them on the stack(s). + bool _should_gray_objects; + + // All of these times are in ms. + NumberSeq _init_times; + NumberSeq _remark_times; + NumberSeq _remark_mark_times; + NumberSeq _remark_weak_ref_times; + NumberSeq _cleanup_times; + double _total_counting_time; + double _total_rs_scrub_time; + + double* _accum_task_vtime; // accumulated task vtime + + WorkGang* _parallel_workers; + + void weakRefsWork(bool clear_all_soft_refs); + + void swapMarkBitMaps(); + + // It resets the global marking data structures, as well as the + // task local ones; should be called during initial mark. + void reset(); + // It resets all the marking data structures. + void clear_marking_state(); + + // It should be called to indicate which phase we're in (concurrent + // mark or remark) and how many threads are currently active. + void set_phase(size_t active_tasks, bool concurrent); + // We do this after we're done with marking so that the marking data + // structures are initialised to a sensible and predictable state. + void set_non_marking_state(); + + // prints all gathered CM-related statistics + void print_stats(); + + // accessor methods + size_t parallel_marking_threads() { return _parallel_marking_threads; } + double sleep_factor() { return _sleep_factor; } + double marking_task_overhead() { return _marking_task_overhead;} + double cleanup_sleep_factor() { return _cleanup_sleep_factor; } + double cleanup_task_overhead() { return _cleanup_task_overhead;} + + HeapWord* finger() { return _finger; } + bool concurrent() { return _concurrent; } + size_t active_tasks() { return _active_tasks; } + ParallelTaskTerminator* terminator() { return &_terminator; } + + // It claims the next available region to be scanned by a marking + // task. It might return NULL if the next region is empty or we have + // run out of regions. In the latter case, out_of_regions() + // determines whether we've really run out of regions or the task + // should call claim_region() again. This might seem a bit + // awkward. Originally, the code was written so that claim_region() + // either successfully returned with a non-empty region or there + // were no more regions to be claimed. The problem with this was + // that, in certain circumstances, it iterated over large chunks of + // the heap finding only empty regions and, while it was working, it + // was preventing the calling task to call its regular clock + // method. So, this way, each task will spend very little time in + // claim_region() and is allowed to call the regular clock method + // frequently. + HeapRegion* claim_region(int task); + + // It determines whether we've run out of regions to scan. + bool out_of_regions() { return _finger == _heap_end; } + + // Returns the task with the given id + CMTask* task(int id) { + guarantee( 0 <= id && id < (int) _active_tasks, "task id not within " + "active bounds" ); + return _tasks[id]; + } + + // Returns the task queue with the given id + CMTaskQueue* task_queue(int id) { + guarantee( 0 <= id && id < (int) _active_tasks, "task queue id not within " + "active bounds" ); + return (CMTaskQueue*) _task_queues->queue(id); + } + + // Returns the task queue set + CMTaskQueueSet* task_queues() { return _task_queues; } + + // Access / manipulation of the overflow flag which is set to + // indicate that the global stack or region stack has overflown + bool has_overflown() { return _has_overflown; } + void set_has_overflown() { _has_overflown = true; } + void clear_has_overflown() { _has_overflown = false; } + + bool has_aborted() { return _has_aborted; } + bool restart_for_overflow() { return _restart_for_overflow; } + + // Methods to enter the two overflow sync barriers + void enter_first_sync_barrier(int task_num); + void enter_second_sync_barrier(int task_num); + +public: + // Manipulation of the global mark stack. + // Notice that the first mark_stack_push is CAS-based, whereas the + // two below are Mutex-based. This is OK since the first one is only + // called during evacuation pauses and doesn't compete with the + // other two (which are called by the marking tasks during + // concurrent marking or remark). + bool mark_stack_push(oop p) { + _markStack.par_push(p); + if (_markStack.overflow()) { + set_has_overflown(); + return false; + } + return true; + } + bool mark_stack_push(oop* arr, int n) { + _markStack.par_push_arr(arr, n); + if (_markStack.overflow()) { + set_has_overflown(); + return false; + } + return true; + } + void mark_stack_pop(oop* arr, int max, int* n) { + _markStack.par_pop_arr(arr, max, n); + } + size_t mark_stack_size() { return _markStack.size(); } + size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } + bool mark_stack_overflow() { return _markStack.overflow(); } + bool mark_stack_empty() { return _markStack.isEmpty(); } + + // Manipulation of the region stack + bool region_stack_push(MemRegion mr) { + _regionStack.push(mr); + if (_regionStack.overflow()) { + set_has_overflown(); + return false; + } + return true; + } + MemRegion region_stack_pop() { return _regionStack.pop(); } + int region_stack_size() { return _regionStack.size(); } + bool region_stack_overflow() { return _regionStack.overflow(); } + bool region_stack_empty() { return _regionStack.isEmpty(); } + + bool concurrent_marking_in_progress() { + return _concurrent_marking_in_progress; + } + void set_concurrent_marking_in_progress() { + _concurrent_marking_in_progress = true; + } + void clear_concurrent_marking_in_progress() { + _concurrent_marking_in_progress = false; + } + + void update_accum_task_vtime(int i, double vtime) { + _accum_task_vtime[i] += vtime; + } + + double all_task_accum_vtime() { + double ret = 0.0; + for (int i = 0; i < (int)_max_task_num; ++i) + ret += _accum_task_vtime[i]; + return ret; + } + + // Attempts to steal an object from the task queues of other tasks + bool try_stealing(int task_num, int* hash_seed, oop& obj) { + return _task_queues->steal(task_num, hash_seed, obj); + } + + // It grays an object by first marking it. Then, if it's behind the + // global finger, it also pushes it on the global stack. + void deal_with_reference(oop obj); + + ConcurrentMark(ReservedSpace rs, int max_regions); + ~ConcurrentMark(); + ConcurrentMarkThread* cmThread() { return _cmThread; } + + CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } + CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } + + // The following three are interaction between CM and + // G1CollectedHeap + + // This notifies CM that a root during initial-mark needs to be + // grayed and it's MT-safe. Currently, we just mark it. But, in the + // future, we can experiment with pushing it on the stack and we can + // do this without changing G1CollectedHeap. + void grayRoot(oop p); + // It's used during evacuation pauses to gray a region, if + // necessary, and it's MT-safe. It assumes that the caller has + // marked any objects on that region. If _should_gray_objects is + // true and we're still doing concurrent marking, the region is + // pushed on the region stack, if it is located below the global + // finger, otherwise we do nothing. + void grayRegionIfNecessary(MemRegion mr); + // It's used during evacuation pauses to mark and, if necessary, + // gray a single object and it's MT-safe. It assumes the caller did + // not mark the object. If _should_gray_objects is true and we're + // still doing concurrent marking, the objects is pushed on the + // global stack, if it is located below the global finger, otherwise + // we do nothing. + void markAndGrayObjectIfNecessary(oop p); + + // This iterates over the bitmap of the previous marking and prints + // out all objects that are marked on the bitmap and indicates + // whether what they point to is also marked or not. + void print_prev_bitmap_reachable(); + + // Clear the next marking bitmap (will be called concurrently). + void clearNextBitmap(); + + // main CMS steps and related support + void checkpointRootsInitial(); + + // These two do the work that needs to be done before and after the + // initial root checkpoint. Since this checkpoint can be done at two + // different points (i.e. an explicit pause or piggy-backed on a + // young collection), then it's nice to be able to easily share the + // pre/post code. It might be the case that we can put everything in + // the post method. TP + void checkpointRootsInitialPre(); + void checkpointRootsInitialPost(); + + // Do concurrent phase of marking, to a tentative transitive closure. + void markFromRoots(); + + // Process all unprocessed SATB buffers. It is called at the + // beginning of an evacuation pause. + void drainAllSATBBuffers(); + + void checkpointRootsFinal(bool clear_all_soft_refs); + void checkpointRootsFinalWork(); + void calcDesiredRegions(); + void cleanup(); + void completeCleanup(); + + // Mark in the previous bitmap. NB: this is usually read-only, so use + // this carefully! + void markPrev(oop p); + void clear(oop p); + // Clears marks for all objects in the given range, for both prev and + // next bitmaps. NB: the previous bitmap is usually read-only, so use + // this carefully! + void clearRangeBothMaps(MemRegion mr); + + // Record the current top of the mark and region stacks; a + // subsequent oops_do() on the mark stack and + // invalidate_entries_into_cset() on the region stack will iterate + // only over indices valid at the time of this call. + void set_oops_do_bound() { + _markStack.set_oops_do_bound(); + _regionStack.set_oops_do_bound(); + } + // Iterate over the oops in the mark stack and all local queues. It + // also calls invalidate_entries_into_cset() on the region stack. + void oops_do(OopClosure* f); + // It is called at the end of an evacuation pause during marking so + // that CM is notified of where the new end of the heap is. It + // doesn't do anything if concurrent_marking_in_progress() is false, + // unless the force parameter is true. + void update_g1_committed(bool force = false); + + void complete_marking_in_collection_set(); + + // It indicates that a new collection set is being chosen. + void newCSet(); + // It registers a collection set heap region with CM. This is used + // to determine whether any heap regions are located above the finger. + void registerCSetRegion(HeapRegion* hr); + + // Returns "true" if at least one mark has been completed. + bool at_least_one_mark_complete() { return _at_least_one_mark_complete; } + + bool isMarked(oop p) const { + assert(p != NULL && p->is_oop(), "expected an oop"); + HeapWord* addr = (HeapWord*)p; + assert(addr >= _nextMarkBitMap->startWord() || + addr < _nextMarkBitMap->endWord(), "in a region"); + + return _nextMarkBitMap->isMarked(addr); + } + + inline bool not_yet_marked(oop p) const; + + // XXX Debug code + bool containing_card_is_marked(void* p); + bool containing_cards_are_marked(void* start, void* last); + + bool isPrevMarked(oop p) const { + assert(p != NULL && p->is_oop(), "expected an oop"); + HeapWord* addr = (HeapWord*)p; + assert(addr >= _prevMarkBitMap->startWord() || + addr < _prevMarkBitMap->endWord(), "in a region"); + + return _prevMarkBitMap->isMarked(addr); + } + + inline bool do_yield_check(int worker_i = 0); + inline bool should_yield(); + + // Called to abort the marking cycle after a Full GC takes palce. + void abort(); + + void disable_co_trackers(); + + // This prints the global/local fingers. It is used for debugging. + NOT_PRODUCT(void print_finger();) + + void print_summary_info(); + + // The following indicate whether a given verbose level has been + // set. Notice that anything above stats is conditional to + // _MARKING_VERBOSE_ having been set to 1 + bool verbose_stats() + { return _verbose_level >= stats_verbose; } + bool verbose_low() + { return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; } + bool verbose_medium() + { return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; } + bool verbose_high() + { return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; } +}; + +// A class representing a marking task. +class CMTask : public TerminatorTerminator { +private: + enum PrivateConstants { + // the regular clock call is called once the scanned words reaches + // this limit + words_scanned_period = 12*1024, + // the regular clock call is called once the number of visited + // references reaches this limit + refs_reached_period = 384, + // initial value for the hash seed, used in the work stealing code + init_hash_seed = 17, + // how many entries will be transferred between global stack and + // local queues + global_stack_transfer_size = 16 + }; + + int _task_id; + G1CollectedHeap* _g1h; + ConcurrentMark* _cm; + CMBitMap* _nextMarkBitMap; + // the task queue of this task + CMTaskQueue* _task_queue; + // the task queue set---needed for stealing + CMTaskQueueSet* _task_queues; + // indicates whether the task has been claimed---this is only for + // debugging purposes + bool _claimed; + + // number of calls to this task + int _calls; + + // concurrent overhead over a single CPU for this task + COTracker _co_tracker; + + // when the virtual timer reaches this time, the marking step should + // exit + double _time_target_ms; + // the start time of the current marking step + double _start_time_ms; + + // the oop closure used for iterations over oops + OopClosure* _oop_closure; + + // the region this task is scanning, NULL if we're not scanning any + HeapRegion* _curr_region; + // the local finger of this task, NULL if we're not scanning a region + HeapWord* _finger; + // limit of the region this task is scanning, NULL if we're not scanning one + HeapWord* _region_limit; + + // This is used only when we scan regions popped from the region + // stack. It records what the last object on such a region we + // scanned was. It is used to ensure that, if we abort region + // iteration, we do not rescan the first part of the region. This + // should be NULL when we're not scanning a region from the region + // stack. + HeapWord* _region_finger; + + // the number of words this task has scanned + size_t _words_scanned; + // When _words_scanned reaches this limit, the regular clock is + // called. Notice that this might be decreased under certain + // circumstances (i.e. when we believe that we did an expensive + // operation). + size_t _words_scanned_limit; + // the initial value of _words_scanned_limit (i.e. what it was + // before it was decreased). + size_t _real_words_scanned_limit; + + // the number of references this task has visited + size_t _refs_reached; + // When _refs_reached reaches this limit, the regular clock is + // called. Notice this this might be decreased under certain + // circumstances (i.e. when we believe that we did an expensive + // operation). + size_t _refs_reached_limit; + // the initial value of _refs_reached_limit (i.e. what it was before + // it was decreased). + size_t _real_refs_reached_limit; + + // used by the work stealing stuff + int _hash_seed; + // if this is true, then the task has aborted for some reason + bool _has_aborted; + // set when the task aborts because it has met its time quota + bool _has_aborted_timed_out; + // true when we're draining SATB buffers; this avoids the task + // aborting due to SATB buffers being available (as we're already + // dealing with them) + bool _draining_satb_buffers; + + // number sequence of past step times + NumberSeq _step_times_ms; + // elapsed time of this task + double _elapsed_time_ms; + // termination time of this task + double _termination_time_ms; + // when this task got into the termination protocol + double _termination_start_time_ms; + + // true when the task is during a concurrent phase, false when it is + // in the remark phase (so, in the latter case, we do not have to + // check all the things that we have to check during the concurrent + // phase, i.e. SATB buffer availability...) + bool _concurrent; + + TruncatedSeq _marking_step_diffs_ms; + + // LOTS of statistics related with this task +#if _MARKING_STATS_ + NumberSeq _all_clock_intervals_ms; + double _interval_start_time_ms; + + int _aborted; + int _aborted_overflow; + int _aborted_cm_aborted; + int _aborted_yield; + int _aborted_timed_out; + int _aborted_satb; + int _aborted_termination; + + int _steal_attempts; + int _steals; + + int _clock_due_to_marking; + int _clock_due_to_scanning; + + int _local_pushes; + int _local_pops; + int _local_max_size; + int _objs_scanned; + + int _global_pushes; + int _global_pops; + int _global_max_size; + + int _global_transfers_to; + int _global_transfers_from; + + int _region_stack_pops; + + int _regions_claimed; + int _objs_found_on_bitmap; + + int _satb_buffers_processed; +#endif // _MARKING_STATS_ + + // it updates the local fields after this task has claimed + // a new region to scan + void setup_for_region(HeapRegion* hr); + // it brings up-to-date the limit of the region + void update_region_limit(); + // it resets the local fields after a task has finished scanning a + // region + void giveup_current_region(); + + // called when either the words scanned or the refs visited limit + // has been reached + void reached_limit(); + // recalculates the words scanned and refs visited limits + void recalculate_limits(); + // decreases the words scanned and refs visited limits when we reach + // an expensive operation + void decrease_limits(); + // it checks whether the words scanned or refs visited reached their + // respective limit and calls reached_limit() if they have + void check_limits() { + if (_words_scanned >= _words_scanned_limit || + _refs_reached >= _refs_reached_limit) + reached_limit(); + } + // this is supposed to be called regularly during a marking step as + // it checks a bunch of conditions that might cause the marking step + // to abort + void regular_clock_call(); + bool concurrent() { return _concurrent; } + +public: + // It resets the task; it should be called right at the beginning of + // a marking phase. + void reset(CMBitMap* _nextMarkBitMap); + // it clears all the fields that correspond to a claimed region. + void clear_region_fields(); + + void set_concurrent(bool concurrent) { _concurrent = concurrent; } + + void enable_co_tracker() { + guarantee( !_co_tracker.enabled(), "invariant" ); + _co_tracker.enable(); + } + void disable_co_tracker() { + guarantee( _co_tracker.enabled(), "invariant" ); + _co_tracker.disable(); + } + bool co_tracker_enabled() { + return _co_tracker.enabled(); + } + void reset_co_tracker(double starting_conc_overhead = 0.0) { + _co_tracker.reset(starting_conc_overhead); + } + void start_co_tracker() { + _co_tracker.start(); + } + void update_co_tracker(bool force_end = false) { + _co_tracker.update(force_end); + } + + // The main method of this class which performs a marking step + // trying not to exceed the given duration. However, it might exit + // prematurely, according to some conditions (i.e. SATB buffers are + // available for processing). + void do_marking_step(double target_ms); + + // These two calls start and stop the timer + void record_start_time() { + _elapsed_time_ms = os::elapsedTime() * 1000.0; + } + void record_end_time() { + _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; + } + + // returns the task ID + int task_id() { return _task_id; } + + // From TerminatorTerminator. It determines whether this task should + // exit the termination protocol after it's entered it. + virtual bool should_exit_termination(); + + HeapWord* finger() { return _finger; } + + bool has_aborted() { return _has_aborted; } + void set_has_aborted() { _has_aborted = true; } + void clear_has_aborted() { _has_aborted = false; } + bool claimed() { return _claimed; } + + void set_oop_closure(OopClosure* oop_closure) { + _oop_closure = oop_closure; + } + + // It grays the object by marking it and, if necessary, pushing it + // on the local queue + void deal_with_reference(oop obj); + + // It scans an object and visits its children. + void scan_object(oop obj) { + tmp_guarantee_CM( _nextMarkBitMap->isMarked((HeapWord*) obj), + "invariant" ); + + if (_cm->verbose_high()) + gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT, + _task_id, (void*) obj); + + size_t obj_size = obj->size(); + _words_scanned += obj_size; + + obj->oop_iterate(_oop_closure); + statsOnly( ++_objs_scanned ); + check_limits(); + } + + // It pushes an object on the local queue. + void push(oop obj); + + // These two move entries to/from the global stack. + void move_entries_to_global_stack(); + void get_entries_from_global_stack(); + + // It pops and scans objects from the local queue. If partially is + // true, then it stops when the queue size is of a given limit. If + // partially is false, then it stops when the queue is empty. + void drain_local_queue(bool partially); + // It moves entries from the global stack to the local queue and + // drains the local queue. If partially is true, then it stops when + // both the global stack and the local queue reach a given size. If + // partially if false, it tries to empty them totally. + void drain_global_stack(bool partially); + // It keeps picking SATB buffers and processing them until no SATB + // buffers are available. + void drain_satb_buffers(); + // It keeps popping regions from the region stack and processing + // them until the region stack is empty. + void drain_region_stack(BitMapClosure* closure); + + // moves the local finger to a new location + inline void move_finger_to(HeapWord* new_finger) { + tmp_guarantee_CM( new_finger >= _finger && new_finger < _region_limit, + "invariant" ); + _finger = new_finger; + } + + // moves the region finger to a new location + inline void move_region_finger_to(HeapWord* new_finger) { + tmp_guarantee_CM( new_finger < _cm->finger(), "invariant" ); + _region_finger = new_finger; + } + + CMTask(int task_num, ConcurrentMark *cm, + CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); + + // it prints statistics associated with this task + void print_stats(); + +#if _MARKING_STATS_ + void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; } +#endif // _MARKING_STATS_ +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp 2009-08-01 04:20:59.042369239 +0100 @@ -0,0 +1,320 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_concurrentMarkThread.cpp.incl" + +// ======= Concurrent Mark Thread ======== + +// The CM thread is created when the G1 garbage collector is used + +SurrogateLockerThread* + ConcurrentMarkThread::_slt = NULL; + +ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) : + ConcurrentGCThread(), + _cm(cm), + _started(false), + _in_progress(false), + _vtime_accum(0.0), + _vtime_mark_accum(0.0), + _vtime_count_accum(0.0) +{ + create_and_start(); +} + +class CMCheckpointRootsInitialClosure: public VoidClosure { + + ConcurrentMark* _cm; +public: + + CMCheckpointRootsInitialClosure(ConcurrentMark* cm) : + _cm(cm) {} + + void do_void(){ + _cm->checkpointRootsInitial(); + } +}; + +class CMCheckpointRootsFinalClosure: public VoidClosure { + + ConcurrentMark* _cm; +public: + + CMCheckpointRootsFinalClosure(ConcurrentMark* cm) : + _cm(cm) {} + + void do_void(){ + _cm->checkpointRootsFinal(false); // !clear_all_soft_refs + } +}; + +class CMCleanUp: public VoidClosure { + ConcurrentMark* _cm; +public: + + CMCleanUp(ConcurrentMark* cm) : + _cm(cm) {} + + void do_void(){ + _cm->cleanup(); + } +}; + + + +void ConcurrentMarkThread::run() { + initialize_in_thread(); + _vtime_start = os::elapsedVTime(); + wait_for_universe_init(); + + G1CollectedHeap* g1 = G1CollectedHeap::heap(); + G1CollectorPolicy* g1_policy = g1->g1_policy(); + G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); + Thread *current_thread = Thread::current(); + + while (!_should_terminate) { + // wait until started is set. + sleepBeforeNextCycle(); + { + ResourceMark rm; + HandleMark hm; + double cycle_start = os::elapsedVTime(); + double mark_start_sec = os::elapsedTime(); + char verbose_str[128]; + + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-mark-start]"); + } + + if (!g1_policy->in_young_gc_mode()) { + // this ensures the flag is not set if we bail out of the marking + // cycle; normally the flag is cleared immediately after cleanup + g1->set_marking_complete(); + + if (g1_policy->adaptive_young_list_length()) { + double now = os::elapsedTime(); + double init_prediction_ms = g1_policy->predict_init_time_ms(); + jlong sleep_time_ms = mmu_tracker->when_ms(now, init_prediction_ms); + os::sleep(current_thread, sleep_time_ms, false); + } + + // We don't have to skip here if we've been asked to restart, because + // in the worst case we just enqueue a new VM operation to start a + // marking. Note that the init operation resets has_aborted() + CMCheckpointRootsInitialClosure init_cl(_cm); + strcpy(verbose_str, "GC initial-mark"); + VM_CGC_Operation op(&init_cl, verbose_str); + VMThread::execute(&op); + } + + int iter = 0; + do { + iter++; + if (!cm()->has_aborted()) { + _cm->markFromRoots(); + } + + double mark_end_time = os::elapsedVTime(); + double mark_end_sec = os::elapsedTime(); + _vtime_mark_accum += (mark_end_time - cycle_start); + if (!cm()->has_aborted()) { + if (g1_policy->adaptive_young_list_length()) { + double now = os::elapsedTime(); + double remark_prediction_ms = g1_policy->predict_remark_time_ms(); + jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms); + os::sleep(current_thread, sleep_time_ms, false); + } + + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf sec]", + mark_end_sec - mark_start_sec); + } + + CMCheckpointRootsFinalClosure final_cl(_cm); + sprintf(verbose_str, "GC remark"); + VM_CGC_Operation op(&final_cl, verbose_str); + VMThread::execute(&op); + } + if (cm()->restart_for_overflow() && + G1TraceMarkStackOverflow) { + gclog_or_tty->print_cr("Restarting conc marking because of MS overflow " + "in remark (restart #%d).", iter); + } + + if (cm()->restart_for_overflow()) { + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]"); + } + } + } while (cm()->restart_for_overflow()); + double counting_start_time = os::elapsedVTime(); + + // YSR: These look dubious (i.e. redundant) !!! FIX ME + slt()->manipulatePLL(SurrogateLockerThread::acquirePLL); + slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); + + if (!cm()->has_aborted()) { + double count_start_sec = os::elapsedTime(); + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-count-start]"); + } + + _sts.join(); + _cm->calcDesiredRegions(); + _sts.leave(); + + if (!cm()->has_aborted()) { + double count_end_sec = os::elapsedTime(); + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-count-end, %1.7lf]", + count_end_sec - count_start_sec); + } + } + } + double end_time = os::elapsedVTime(); + _vtime_count_accum += (end_time - counting_start_time); + // Update the total virtual time before doing this, since it will try + // to measure it to get the vtime for this marking. We purposely + // neglect the presumably-short "completeCleanup" phase here. + _vtime_accum = (end_time - _vtime_start); + if (!cm()->has_aborted()) { + if (g1_policy->adaptive_young_list_length()) { + double now = os::elapsedTime(); + double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms(); + jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms); + os::sleep(current_thread, sleep_time_ms, false); + } + + CMCleanUp cl_cl(_cm); + sprintf(verbose_str, "GC cleanup"); + VM_CGC_Operation op(&cl_cl, verbose_str); + VMThread::execute(&op); + } else { + G1CollectedHeap::heap()->set_marking_complete(); + } + + if (!cm()->has_aborted()) { + double cleanup_start_sec = os::elapsedTime(); + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-cleanup-start]"); + } + + // Now do the remainder of the cleanup operation. + _sts.join(); + _cm->completeCleanup(); + if (!cm()->has_aborted()) { + g1_policy->record_concurrent_mark_cleanup_completed(); + + double cleanup_end_sec = os::elapsedTime(); + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]", + cleanup_end_sec - cleanup_start_sec); + } + } + _sts.leave(); + } + // We're done: no more unclean regions coming. + G1CollectedHeap::heap()->set_unclean_regions_coming(false); + + if (cm()->has_aborted()) { + if (PrintGC) { + gclog_or_tty->date_stamp(PrintGCDateStamps); + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print_cr("[GC concurrent-mark-abort]"); + } + } + + _sts.join(); + _cm->disable_co_trackers(); + _sts.leave(); + + // we now want to allow clearing of the marking bitmap to be + // suspended by a collection pause. + _sts.join(); + _cm->clearNextBitmap(); + _sts.leave(); + } + } + assert(_should_terminate, "just checking"); + + terminate(); +} + + +void ConcurrentMarkThread::yield() { + _sts.yield("Concurrent Mark"); +} + +void ConcurrentMarkThread::stop() { + // it is ok to take late safepoints here, if needed + MutexLockerEx mu(Terminator_lock); + _should_terminate = true; + while (!_has_terminated) { + Terminator_lock->wait(); + } +} + +void ConcurrentMarkThread::print() { + gclog_or_tty->print("\"Concurrent Mark GC Thread\" "); + Thread::print(); + gclog_or_tty->cr(); +} + +void ConcurrentMarkThread::sleepBeforeNextCycle() { + clear_in_progress(); + // We join here because we don't want to do the "shouldConcurrentMark()" + // below while the world is otherwise stopped. + MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); + while (!started()) { + CGC_lock->wait(Mutex::_no_safepoint_check_flag); + } + set_in_progress(); + clear_started(); +} + +// Note: this method, although exported by the ConcurrentMarkSweepThread, +// which is a non-JavaThread, can only be called by a JavaThread. +// Currently this is done at vm creation time (post-vm-init) by the +// main/Primordial (Java)Thread. +// XXX Consider changing this in the future to allow the CMS thread +// itself to create this thread? +void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) { + assert(_slt == NULL, "SLT already created"); + _slt = SurrogateLockerThread::make(THREAD); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp 2009-08-01 04:21:00.190238023 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// The Concurrent Mark GC Thread (could be several in the future). +// This is copied from the Concurrent Mark Sweep GC Thread +// Still under construction. + +class ConcurrentMark; + +class ConcurrentMarkThread: public ConcurrentGCThread { + friend class VMStructs; + + double _vtime_start; // Initial virtual time. + double _vtime_accum; // Accumulated virtual time. + + double _vtime_mark_accum; + double _vtime_count_accum; + + public: + virtual void run(); + + private: + ConcurrentMark* _cm; + bool _started; + bool _in_progress; + + void sleepBeforeNextCycle(); + + static SurrogateLockerThread* _slt; + + public: + // Constructor + ConcurrentMarkThread(ConcurrentMark* cm); + + static void makeSurrogateLockerThread(TRAPS); + static SurrogateLockerThread* slt() { return _slt; } + + // Printing + void print(); + + // Total virtual time so far. + double vtime_accum(); + // Marking virtual time so far + double vtime_mark_accum(); + // Counting virtual time so far. + double vtime_count_accum() { return _vtime_count_accum; } + + ConcurrentMark* cm() { return _cm; } + + void set_started() { _started = true; } + void clear_started() { _started = false; } + bool started() { return _started; } + + void set_in_progress() { _in_progress = true; } + void clear_in_progress() { _in_progress = false; } + bool in_progress() { return _in_progress; } + + // Yield for GC + void yield(); + + // shutdown + static void stop(); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.inline.hpp 2009-08-01 04:21:00.847159749 +0100 @@ -0,0 +1,33 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + + // Total virtual time so far. +inline double ConcurrentMarkThread::vtime_accum() { + return _vtime_accum + _cm->all_task_accum_vtime(); +} + +// Marking virtual time so far +inline double ConcurrentMarkThread::vtime_mark_accum() { + return _vtime_mark_accum + _cm->all_task_accum_vtime(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp 2009-08-01 04:21:01.256645841 +0100 @@ -0,0 +1,191 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_concurrentZFThread.cpp.incl" + +// ======= Concurrent Zero-Fill Thread ======== + +// The CM thread is created when the G1 garbage collector is used + +int ConcurrentZFThread::_region_allocs = 0; +int ConcurrentZFThread::_sync_zfs = 0; +int ConcurrentZFThread::_zf_waits = 0; +int ConcurrentZFThread::_regions_filled = 0; + +ConcurrentZFThread::ConcurrentZFThread() : + ConcurrentGCThread(), + _co_tracker(G1ZFGroup) +{ + create_and_start(); +} + +void ConcurrentZFThread::wait_for_ZF_completed(HeapRegion* hr) { + assert(ZF_mon->owned_by_self(), "Precondition."); + note_zf_wait(); + while (hr->zero_fill_state() == HeapRegion::ZeroFilling) { + ZF_mon->wait(Mutex::_no_safepoint_check_flag); + } +} + +void ConcurrentZFThread::processHeapRegion(HeapRegion* hr) { + assert(!Universe::heap()->is_gc_active(), + "This should not happen during GC."); + assert(hr != NULL, "Precondition"); + // These are unlocked reads, but if this test is successful, then no + // other thread will attempt this zero filling. Only a GC thread can + // modify the ZF state of a region whose state is zero-filling, and this + // should only happen while the ZF thread is locking out GC. + if (hr->zero_fill_state() == HeapRegion::ZeroFilling + && hr->zero_filler() == Thread::current()) { + assert(hr->top() == hr->bottom(), "better be empty!"); + assert(!hr->isHumongous(), "Only free regions on unclean list."); + Copy::fill_to_words(hr->bottom(), hr->capacity()/HeapWordSize); + note_region_filled(); + } +} + +void ConcurrentZFThread::run() { + initialize_in_thread(); + Thread* thr_self = Thread::current(); + _vtime_start = os::elapsedVTime(); + wait_for_universe_init(); + _co_tracker.enable(); + _co_tracker.start(); + + G1CollectedHeap* g1 = G1CollectedHeap::heap(); + _sts.join(); + while (!_should_terminate) { + _sts.leave(); + + { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + + // This local variable will hold a region being zero-filled. This + // region will neither be on the unclean or zero-filled lists, and + // will not be available for allocation; thus, we might have an + // allocation fail, causing a full GC, because of this, but this is a + // price we will pay. (In future, we might want to make the fact + // that there's a region being zero-filled apparent to the G1 heap, + // which could then wait for it in this extreme case...) + HeapRegion* to_fill; + + while (!g1->should_zf() + || (to_fill = g1->pop_unclean_region_list_locked()) == NULL) + ZF_mon->wait(Mutex::_no_safepoint_check_flag); + while (to_fill->zero_fill_state() == HeapRegion::ZeroFilling) + ZF_mon->wait(Mutex::_no_safepoint_check_flag); + + // So now to_fill is non-NULL and is not ZeroFilling. It might be + // Allocated or ZeroFilled. (The latter could happen if this thread + // starts the zero-filling of a region, but a GC intervenes and + // pushes new regions needing on the front of the filling on the + // front of the list.) + + switch (to_fill->zero_fill_state()) { + case HeapRegion::Allocated: + to_fill = NULL; + break; + + case HeapRegion::NotZeroFilled: + to_fill->set_zero_fill_in_progress(thr_self); + + ZF_mon->unlock(); + _sts.join(); + processHeapRegion(to_fill); + _sts.leave(); + ZF_mon->lock_without_safepoint_check(); + + if (to_fill->zero_fill_state() == HeapRegion::ZeroFilling + && to_fill->zero_filler() == thr_self) { + to_fill->set_zero_fill_complete(); + (void)g1->put_free_region_on_list_locked(to_fill); + } + break; + + case HeapRegion::ZeroFilled: + (void)g1->put_free_region_on_list_locked(to_fill); + break; + + case HeapRegion::ZeroFilling: + ShouldNotReachHere(); + break; + } + } + _vtime_accum = (os::elapsedVTime() - _vtime_start); + _sts.join(); + + _co_tracker.update(); + } + _co_tracker.update(false); + _sts.leave(); + + assert(_should_terminate, "just checking"); + terminate(); +} + +bool ConcurrentZFThread::offer_yield() { + if (_sts.should_yield()) { + _sts.yield("Concurrent ZF"); + return true; + } else { + return false; + } +} + +void ConcurrentZFThread::stop() { + // it is ok to take late safepoints here, if needed + MutexLockerEx mu(Terminator_lock); + _should_terminate = true; + while (!_has_terminated) { + Terminator_lock->wait(); + } +} + +void ConcurrentZFThread::print() { + gclog_or_tty->print("\"Concurrent ZF Thread\" "); + Thread::print(); + gclog_or_tty->cr(); +} + + +double ConcurrentZFThread::_vtime_accum; + +void ConcurrentZFThread::print_summary_info() { + gclog_or_tty->print("\nConcurrent Zero-Filling:\n"); + gclog_or_tty->print(" Filled %d regions, used %5.2fs.\n", + _regions_filled, + vtime_accum()); + gclog_or_tty->print(" Of %d region allocs, %d (%5.2f%%) required sync ZF,\n", + _region_allocs, _sync_zfs, + (_region_allocs > 0 ? + (float)_sync_zfs/(float)_region_allocs*100.0 : + 0.0)); + gclog_or_tty->print(" and %d (%5.2f%%) required a ZF wait.\n", + _zf_waits, + (_region_allocs > 0 ? + (float)_zf_waits/(float)_region_allocs*100.0 : + 0.0)); + +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/concurrentZFThread.hpp 2009-08-01 04:21:01.670104611 +0100 @@ -0,0 +1,85 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// The Concurrent ZF Thread. Performs concurrent zero-filling. + +class ConcurrentZFThread: public ConcurrentGCThread { + friend class VMStructs; + friend class ZeroFillRegionClosure; + + private: + + // Zero fill the heap region. + void processHeapRegion(HeapRegion* r); + + // Stats + // Allocation (protected by heap lock). + static int _region_allocs; // Number of regions allocated + static int _sync_zfs; // Synchronous zero-fills + + static int _zf_waits; // Wait for conc zero-fill completion. + + // Number of regions CFZ thread fills. + static int _regions_filled; + + COTracker _co_tracker; + + double _vtime_start; // Initial virtual time. + + // These are static because the "print_summary_info" method is, and + // it currently assumes there is only one ZF thread. We'll change when + // we need to. + static double _vtime_accum; // Initial virtual time. + static double vtime_accum() { return _vtime_accum; } + + // Offer yield for GC. Returns true if yield occurred. + bool offer_yield(); + + public: + // Constructor + ConcurrentZFThread(); + + // Main loop. + virtual void run(); + + // Printing + void print(); + + // Waits until "r" has been zero-filled. Requires caller to hold the + // ZF_mon. + static void wait_for_ZF_completed(HeapRegion* r); + + // Get or clear the current unclean region. Should be done + // while holding the ZF_needed_mon lock. + + // shutdown + static void stop(); + + // Stats + static void note_region_alloc() {_region_allocs++; } + static void note_sync_zfs() { _sync_zfs++; } + static void note_zf_wait() { _zf_waits++; } + static void note_region_filled() { _regions_filled++; } + + static void print_summary_info(); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp 2009-08-01 04:21:02.112855686 +0100 @@ -0,0 +1,308 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_dirtyCardQueue.cpp.incl" + +bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl, + bool consume, + size_t worker_i) { + bool res = true; + if (_buf != NULL) { + res = apply_closure_to_buffer(cl, _buf, _index, _sz, + consume, + (int) worker_i); + if (res && consume) _index = _sz; + } + return res; +} + +bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl, + void** buf, + size_t index, size_t sz, + bool consume, + int worker_i) { + if (cl == NULL) return true; + for (size_t i = index; i < sz; i += oopSize) { + int ind = byte_index_to_index((int)i); + jbyte* card_ptr = (jbyte*)buf[ind]; + if (card_ptr != NULL) { + // Set the entry to null, so we don't do it again (via the test + // above) if we reconsider this buffer. + if (consume) buf[ind] = NULL; + if (!cl->do_card_ptr(card_ptr, worker_i)) return false; + } + } + return true; +} + +#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +#endif // _MSC_VER + +DirtyCardQueueSet::DirtyCardQueueSet() : + PtrQueueSet(true /*notify_when_complete*/), + _closure(NULL), + _shared_dirty_card_queue(this, true /*perm*/), + _free_ids(NULL), + _processed_buffers_mut(0), _processed_buffers_rs_thread(0) +{ + _all_active = true; +} + +size_t DirtyCardQueueSet::num_par_ids() { + return MAX2(ParallelGCThreads, (size_t)2); +} + + +void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, + int max_completed_queue, + Mutex* lock, PtrQueueSet* fl_owner) { + PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue, fl_owner); + set_buffer_size(DCQBarrierQueueBufferSize); + set_process_completed_threshold(DCQBarrierProcessCompletedThreshold); + + _shared_dirty_card_queue.set_lock(lock); + _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon); + bool b = _free_ids->claim_perm_id(0); + guarantee(b, "Must reserve id zero for concurrent refinement thread."); +} + +void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) { + t->dirty_card_queue().handle_zero_index(); +} + +void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) { + _closure = closure; +} + +void DirtyCardQueueSet::iterate_closure_all_threads(bool consume, + size_t worker_i) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); + for(JavaThread* t = Threads::first(); t; t = t->next()) { + bool b = t->dirty_card_queue().apply_closure(_closure, consume); + guarantee(b, "Should not be interrupted."); + } + bool b = shared_dirty_card_queue()->apply_closure(_closure, + consume, + worker_i); + guarantee(b, "Should not be interrupted."); +} + +bool DirtyCardQueueSet::mut_process_buffer(void** buf) { + + // Used to determine if we had already claimed a par_id + // before entering this method. + bool already_claimed = false; + + // We grab the current JavaThread. + JavaThread* thread = JavaThread::current(); + + // We get the the number of any par_id that this thread + // might have already claimed. + int worker_i = thread->get_claimed_par_id(); + + // If worker_i is not -1 then the thread has already claimed + // a par_id. We make note of it using the already_claimed value + if (worker_i != -1) { + already_claimed = true; + } else { + + // Otherwise we need to claim a par id + worker_i = _free_ids->claim_par_id(); + + // And store the par_id value in the thread + thread->set_claimed_par_id(worker_i); + } + + bool b = false; + if (worker_i != -1) { + b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0, + _sz, true, worker_i); + if (b) Atomic::inc(&_processed_buffers_mut); + + // If we had not claimed an id before entering the method + // then we must release the id. + if (!already_claimed) { + + // we release the id + _free_ids->release_par_id(worker_i); + + // and set the claimed_id in the thread to -1 + thread->set_claimed_par_id(-1); + } + } + return b; +} + +DirtyCardQueueSet::CompletedBufferNode* +DirtyCardQueueSet::get_completed_buffer_lock(int stop_at) { + CompletedBufferNode* nd = NULL; + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + + if ((int)_n_completed_buffers <= stop_at) { + _process_completed = false; + return NULL; + } + + if (_completed_buffers_head != NULL) { + nd = _completed_buffers_head; + _completed_buffers_head = nd->next; + if (_completed_buffers_head == NULL) + _completed_buffers_tail = NULL; + _n_completed_buffers--; + } + debug_only(assert_completed_buffer_list_len_correct_locked()); + return nd; +} + +// We only do this in contexts where there is no concurrent enqueueing. +DirtyCardQueueSet::CompletedBufferNode* +DirtyCardQueueSet::get_completed_buffer_CAS() { + CompletedBufferNode* nd = _completed_buffers_head; + + while (nd != NULL) { + CompletedBufferNode* next = nd->next; + CompletedBufferNode* result = + (CompletedBufferNode*)Atomic::cmpxchg_ptr(next, + &_completed_buffers_head, + nd); + if (result == nd) { + return result; + } else { + nd = _completed_buffers_head; + } + } + assert(_completed_buffers_head == NULL, "Loop post"); + _completed_buffers_tail = NULL; + return NULL; +} + +bool DirtyCardQueueSet:: +apply_closure_to_completed_buffer_helper(int worker_i, + CompletedBufferNode* nd) { + if (nd != NULL) { + bool b = + DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf, + nd->index, _sz, + true, worker_i); + void** buf = nd->buf; + size_t index = nd->index; + delete nd; + if (b) { + deallocate_buffer(buf); + return true; // In normal case, go on to next buffer. + } else { + enqueue_complete_buffer(buf, index, true); + return false; + } + } else { + return false; + } +} + +bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i, + int stop_at, + bool with_CAS) +{ + CompletedBufferNode* nd = NULL; + if (with_CAS) { + guarantee(stop_at == 0, "Precondition"); + nd = get_completed_buffer_CAS(); + } else { + nd = get_completed_buffer_lock(stop_at); + } + bool res = apply_closure_to_completed_buffer_helper(worker_i, nd); + if (res) _processed_buffers_rs_thread++; + return res; +} + +void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() { + CompletedBufferNode* nd = _completed_buffers_head; + while (nd != NULL) { + bool b = + DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf, 0, _sz, + false); + guarantee(b, "Should not stop early."); + nd = nd->next; + } +} + +void DirtyCardQueueSet::abandon_logs() { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); + CompletedBufferNode* buffers_to_delete = NULL; + { + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + while (_completed_buffers_head != NULL) { + CompletedBufferNode* nd = _completed_buffers_head; + _completed_buffers_head = nd->next; + nd->next = buffers_to_delete; + buffers_to_delete = nd; + } + _n_completed_buffers = 0; + _completed_buffers_tail = NULL; + debug_only(assert_completed_buffer_list_len_correct_locked()); + } + while (buffers_to_delete != NULL) { + CompletedBufferNode* nd = buffers_to_delete; + buffers_to_delete = nd->next; + deallocate_buffer(nd->buf); + delete nd; + } + // Since abandon is done only at safepoints, we can safely manipulate + // these queues. + for (JavaThread* t = Threads::first(); t; t = t->next()) { + t->dirty_card_queue().reset(); + } + shared_dirty_card_queue()->reset(); +} + + +void DirtyCardQueueSet::concatenate_logs() { + // Iterate over all the threads, if we find a partial log add it to + // the global list of logs. Temporarily turn off the limit on the number + // of outstanding buffers. + int save_max_completed_queue = _max_completed_queue; + _max_completed_queue = max_jint; + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); + for (JavaThread* t = Threads::first(); t; t = t->next()) { + DirtyCardQueue& dcq = t->dirty_card_queue(); + if (dcq.size() != 0) { + void **buf = t->dirty_card_queue().get_buf(); + // We must NULL out the unused entries, then enqueue. + for (size_t i = 0; i < t->dirty_card_queue().get_index(); i += oopSize) { + buf[PtrQueue::byte_index_to_index((int)i)] = NULL; + } + enqueue_complete_buffer(dcq.get_buf(), dcq.get_index()); + dcq.reinitialize(); + } + } + if (_shared_dirty_card_queue.size() != 0) { + enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(), + _shared_dirty_card_queue.get_index()); + _shared_dirty_card_queue.reinitialize(); + } + // Restore the completed buffer queue limit. + _max_completed_queue = save_max_completed_queue; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp 2009-08-01 04:21:02.516487317 +0100 @@ -0,0 +1,152 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class FreeIdSet; + +// A closure class for processing card table entries. Note that we don't +// require these closure objects to be stack-allocated. +class CardTableEntryClosure: public CHeapObj { +public: + // Process the card whose card table entry is "card_ptr". If returns + // "false", terminate the iteration early. + virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0; +}; + +// A ptrQueue whose elements are "oops", pointers to object heads. +class DirtyCardQueue: public PtrQueue { +public: + DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) : + PtrQueue(qset_, perm) + { + // Dirty card queues are always active. + _active = true; + } + // Apply the closure to all elements, and reset the index to make the + // buffer empty. If a closure application returns "false", return + // "false" immediately, halting the iteration. If "consume" is true, + // deletes processed entries from logs. + bool apply_closure(CardTableEntryClosure* cl, + bool consume = true, + size_t worker_i = 0); + + // Apply the closure to all elements of "buf", down to "index" + // (inclusive.) If returns "false", then a closure application returned + // "false", and we return immediately. If "consume" is true, entries are + // set to NULL as they are processed, so they will not be processed again + // later. + static bool apply_closure_to_buffer(CardTableEntryClosure* cl, + void** buf, size_t index, size_t sz, + bool consume = true, + int worker_i = 0); + void **get_buf() { return _buf;} + void set_buf(void **buf) {_buf = buf;} + size_t get_index() { return _index;} + void reinitialize() { _buf = 0; _sz = 0; _index = 0;} +}; + + + +class DirtyCardQueueSet: public PtrQueueSet { + CardTableEntryClosure* _closure; + + DirtyCardQueue _shared_dirty_card_queue; + + // Override. + bool mut_process_buffer(void** buf); + + // Protected by the _cbl_mon. + FreeIdSet* _free_ids; + + // The number of completed buffers processed by mutator and rs thread, + // respectively. + jint _processed_buffers_mut; + jint _processed_buffers_rs_thread; + +public: + DirtyCardQueueSet(); + + void initialize(Monitor* cbl_mon, Mutex* fl_lock, + int max_completed_queue = 0, + Mutex* lock = NULL, PtrQueueSet* fl_owner = NULL); + + // The number of parallel ids that can be claimed to allow collector or + // mutator threads to do card-processing work. + static size_t num_par_ids(); + + static void handle_zero_index_for_thread(JavaThread* t); + + // Register "blk" as "the closure" for all queues. Only one such closure + // is allowed. The "apply_closure_to_completed_buffer" method will apply + // this closure to a completed buffer, and "iterate_closure_all_threads" + // applies it to partially-filled buffers (the latter should only be done + // with the world stopped). + void set_closure(CardTableEntryClosure* closure); + + // If there is a registered closure for buffers, apply it to all entries + // in all currently-active buffers. This should only be applied at a + // safepoint. (Currently must not be called in parallel; this should + // change in the future.) If "consume" is true, processed entries are + // discarded. + void iterate_closure_all_threads(bool consume = true, + size_t worker_i = 0); + + // If there exists some completed buffer, pop it, then apply the + // registered closure to all its elements, nulling out those elements + // processed. If all elements are processed, returns "true". If no + // completed buffers exist, returns false. If a completed buffer exists, + // but is only partially completed before a "yield" happens, the + // partially completed buffer (with its processed elements set to NULL) + // is returned to the completed buffer set, and this call returns false. + bool apply_closure_to_completed_buffer(int worker_i = 0, + int stop_at = 0, + bool with_CAS = false); + bool apply_closure_to_completed_buffer_helper(int worker_i, + CompletedBufferNode* nd); + + CompletedBufferNode* get_completed_buffer_CAS(); + CompletedBufferNode* get_completed_buffer_lock(int stop_at); + // Applies the current closure to all completed buffers, + // non-consumptively. + void apply_closure_to_all_completed_buffers(); + + DirtyCardQueue* shared_dirty_card_queue() { + return &_shared_dirty_card_queue; + } + + // If a full collection is happening, reset partial logs, and ignore + // completed ones: the full collection will make them all irrelevant. + void abandon_logs(); + + // If any threads have partial logs, add them to the global list of logs. + void concatenate_logs(); + void clear_n_completed_buffers() { _n_completed_buffers = 0;} + + jint processed_buffers_mut() { + return _processed_buffers_mut; + } + jint processed_buffers_rs_thread() { + return _processed_buffers_rs_thread; + } + +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp 2009-08-01 04:21:02.953469585 +0100 @@ -0,0 +1,628 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1BlockOffsetTable.cpp.incl" + +////////////////////////////////////////////////////////////////////// +// G1BlockOffsetSharedArray +////////////////////////////////////////////////////////////////////// + +G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved, + size_t init_word_size) : + _reserved(reserved), _end(NULL) +{ + size_t size = compute_size(reserved.word_size()); + ReservedSpace rs(ReservedSpace::allocation_align_size_up(size)); + if (!rs.is_reserved()) { + vm_exit_during_initialization("Could not reserve enough space for heap offset array"); + } + if (!_vs.initialize(rs, 0)) { + vm_exit_during_initialization("Could not reserve enough space for heap offset array"); + } + _offset_array = (u_char*)_vs.low_boundary(); + resize(init_word_size); + if (TraceBlockOffsetTable) { + gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: "); + gclog_or_tty->print_cr(" " + " rs.base(): " INTPTR_FORMAT + " rs.size(): " INTPTR_FORMAT + " rs end(): " INTPTR_FORMAT, + rs.base(), rs.size(), rs.base() + rs.size()); + gclog_or_tty->print_cr(" " + " _vs.low_boundary(): " INTPTR_FORMAT + " _vs.high_boundary(): " INTPTR_FORMAT, + _vs.low_boundary(), + _vs.high_boundary()); + } +} + +void G1BlockOffsetSharedArray::resize(size_t new_word_size) { + assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved"); + size_t new_size = compute_size(new_word_size); + size_t old_size = _vs.committed_size(); + size_t delta; + char* high = _vs.high(); + _end = _reserved.start() + new_word_size; + if (new_size > old_size) { + delta = ReservedSpace::page_align_size_up(new_size - old_size); + assert(delta > 0, "just checking"); + if (!_vs.expand_by(delta)) { + // Do better than this for Merlin + vm_exit_out_of_memory(delta, "offset table expansion"); + } + assert(_vs.high() == high + delta, "invalid expansion"); + // Initialization of the contents is left to the + // G1BlockOffsetArray that uses it. + } else { + delta = ReservedSpace::page_align_size_down(old_size - new_size); + if (delta == 0) return; + _vs.shrink_by(delta); + assert(_vs.high() == high - delta, "invalid expansion"); + } +} + +bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const { + assert(p >= _reserved.start(), "just checking"); + size_t delta = pointer_delta(p, _reserved.start()); + return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; +} + + +////////////////////////////////////////////////////////////////////// +// G1BlockOffsetArray +////////////////////////////////////////////////////////////////////// + +G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array, + MemRegion mr, bool init_to_zero) : + G1BlockOffsetTable(mr.start(), mr.end()), + _unallocated_block(_bottom), + _array(array), _csp(NULL), + _init_to_zero(init_to_zero) { + assert(_bottom <= _end, "arguments out of order"); + if (!_init_to_zero) { + // initialize cards to point back to mr.start() + set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); + _array->set_offset_array(0, 0); // set first card to 0 + } +} + +void G1BlockOffsetArray::set_space(Space* sp) { + _sp = sp; + _csp = sp->toContiguousSpace(); +} + +// The arguments follow the normal convention of denoting +// a right-open interval: [start, end) +void +G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) { + + if (start >= end) { + // The start address is equal to the end address (or to + // the right of the end address) so there are not cards + // that need to be updated.. + return; + } + + // Write the backskip value for each region. + // + // offset + // card 2nd 3rd + // | +- 1st | | + // v v v v + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- + // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ... + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- + // 11 19 75 + // 12 + // + // offset card is the card that points to the start of an object + // x - offset value of offset card + // 1st - start of first logarithmic region + // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1 + // 2nd - start of second logarithmic region + // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8 + // 3rd - start of third logarithmic region + // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64 + // + // integer below the block offset entry is an example of + // the index of the entry + // + // Given an address, + // Find the index for the address + // Find the block offset table entry + // Convert the entry to a back slide + // (e.g., with today's, offset = 0x81 => + // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8 + // Move back N (e.g., 8) entries and repeat with the + // value of the new entry + // + size_t start_card = _array->index_for(start); + size_t end_card = _array->index_for(end-1); + assert(start ==_array->address_for_index(start_card), "Precondition"); + assert(end ==_array->address_for_index(end_card)+N_words, "Precondition"); + set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval +} + +// Unlike the normal convention in this code, the argument here denotes +// a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start() +// above. +void +G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) { + if (start_card > end_card) { + return; + } + assert(start_card > _array->index_for(_bottom), "Cannot be first card"); + assert(_array->offset_array(start_card-1) <= N_words, + "Offset card has an unexpected value"); + size_t start_card_for_region = start_card; + u_char offset = max_jubyte; + for (int i = 0; i < BlockOffsetArray::N_powers; i++) { + // -1 so that the the card with the actual offset is counted. Another -1 + // so that the reach ends in this region and not at the start + // of the next. + size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1); + offset = N_words + i; + if (reach >= end_card) { + _array->set_offset_array(start_card_for_region, end_card, offset); + start_card_for_region = reach + 1; + break; + } + _array->set_offset_array(start_card_for_region, reach, offset); + start_card_for_region = reach + 1; + } + assert(start_card_for_region > end_card, "Sanity check"); + DEBUG_ONLY(check_all_cards(start_card, end_card);) +} + +// The block [blk_start, blk_end) has been allocated; +// adjust the block offset table to represent this information; +// right-open interval: [blk_start, blk_end) +void +G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { + mark_block(blk_start, blk_end); + allocated(blk_start, blk_end); +} + +// Adjust BOT to show that a previously whole block has been split +// into two. +void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size, + size_t left_blk_size) { + // Verify that the BOT shows [blk, blk + blk_size) to be one block. + verify_single_block(blk, blk_size); + // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) + // is one single block. + mark_block(blk + left_blk_size, blk + blk_size); +} + + +// Action_mark - update the BOT for the block [blk_start, blk_end). +// Current typical use is for splitting a block. +// Action_single - udpate the BOT for an allocation. +// Action_verify - BOT verification. +void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start, + HeapWord* blk_end, + Action action) { + assert(Universe::heap()->is_in_reserved(blk_start), + "reference must be into the heap"); + assert(Universe::heap()->is_in_reserved(blk_end-1), + "limit must be within the heap"); + // This is optimized to make the test fast, assuming we only rarely + // cross boundaries. + uintptr_t end_ui = (uintptr_t)(blk_end - 1); + uintptr_t start_ui = (uintptr_t)blk_start; + // Calculate the last card boundary preceding end of blk + intptr_t boundary_before_end = (intptr_t)end_ui; + clear_bits(boundary_before_end, right_n_bits(LogN)); + if (start_ui <= (uintptr_t)boundary_before_end) { + // blk starts at or crosses a boundary + // Calculate index of card on which blk begins + size_t start_index = _array->index_for(blk_start); + // Index of card on which blk ends + size_t end_index = _array->index_for(blk_end - 1); + // Start address of card on which blk begins + HeapWord* boundary = _array->address_for_index(start_index); + assert(boundary <= blk_start, "blk should start at or after boundary"); + if (blk_start != boundary) { + // blk starts strictly after boundary + // adjust card boundary and start_index forward to next card + boundary += N_words; + start_index++; + } + assert(start_index <= end_index, "monotonicity of index_for()"); + assert(boundary <= (HeapWord*)boundary_before_end, "tautology"); + switch (action) { + case Action_mark: { + if (init_to_zero()) { + _array->set_offset_array(start_index, boundary, blk_start); + break; + } // Else fall through to the next case + } + case Action_single: { + _array->set_offset_array(start_index, boundary, blk_start); + // We have finished marking the "offset card". We need to now + // mark the subsequent cards that this blk spans. + if (start_index < end_index) { + HeapWord* rem_st = _array->address_for_index(start_index) + N_words; + HeapWord* rem_end = _array->address_for_index(end_index) + N_words; + set_remainder_to_point_to_start(rem_st, rem_end); + } + break; + } + case Action_check: { + _array->check_offset_array(start_index, boundary, blk_start); + // We have finished checking the "offset card". We need to now + // check the subsequent cards that this blk spans. + check_all_cards(start_index + 1, end_index); + break; + } + default: + ShouldNotReachHere(); + } + } +} + +// The card-interval [start_card, end_card] is a closed interval; this +// is an expensive check -- use with care and only under protection of +// suitable flag. +void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const { + + if (end_card < start_card) { + return; + } + guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card"); + for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) { + u_char entry = _array->offset_array(c); + if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) { + guarantee(entry > N_words, "Should be in logarithmic region"); + } + size_t backskip = BlockOffsetArray::entry_to_cards_back(entry); + size_t landing_card = c - backskip; + guarantee(landing_card >= (start_card - 1), "Inv"); + if (landing_card >= start_card) { + guarantee(_array->offset_array(landing_card) <= entry, "monotonicity"); + } else { + guarantee(landing_card == start_card - 1, "Tautology"); + guarantee(_array->offset_array(landing_card) <= N_words, "Offset value"); + } + } +} + +// The range [blk_start, blk_end) represents a single contiguous block +// of storage; modify the block offset table to represent this +// information; Right-open interval: [blk_start, blk_end) +// NOTE: this method does _not_ adjust _unallocated_block. +void +G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) { + do_block_internal(blk_start, blk_end, Action_single); +} + +// Mark the BOT such that if [blk_start, blk_end) straddles a card +// boundary, the card following the first such boundary is marked +// with the appropriate offset. +// NOTE: this method does _not_ adjust _unallocated_block or +// any cards subsequent to the first one. +void +G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) { + do_block_internal(blk_start, blk_end, Action_mark); +} + +void G1BlockOffsetArray::join_blocks(HeapWord* blk1, HeapWord* blk2) { + HeapWord* blk1_start = Universe::heap()->block_start(blk1); + HeapWord* blk2_start = Universe::heap()->block_start(blk2); + assert(blk1 == blk1_start && blk2 == blk2_start, + "Must be block starts."); + assert(blk1 + _sp->block_size(blk1) == blk2, "Must be contiguous."); + size_t blk1_start_index = _array->index_for(blk1); + size_t blk2_start_index = _array->index_for(blk2); + assert(blk1_start_index <= blk2_start_index, "sanity"); + HeapWord* blk2_card_start = _array->address_for_index(blk2_start_index); + if (blk2 == blk2_card_start) { + // blk2 starts a card. Does blk1 start on the prevous card, or futher + // back? + assert(blk1_start_index < blk2_start_index, "must be lower card."); + if (blk1_start_index + 1 == blk2_start_index) { + // previous card; new value for blk2 card is size of blk1. + _array->set_offset_array(blk2_start_index, (u_char) _sp->block_size(blk1)); + } else { + // Earlier card; go back a card. + _array->set_offset_array(blk2_start_index, N_words); + } + } else { + // blk2 does not start a card. Does it cross a card? If not, nothing + // to do. + size_t blk2_end_index = + _array->index_for(blk2 + _sp->block_size(blk2) - 1); + assert(blk2_end_index >= blk2_start_index, "sanity"); + if (blk2_end_index > blk2_start_index) { + // Yes, it crosses a card. The value for the next card must change. + if (blk1_start_index + 1 == blk2_start_index) { + // previous card; new value for second blk2 card is size of blk1. + _array->set_offset_array(blk2_start_index + 1, + (u_char) _sp->block_size(blk1)); + } else { + // Earlier card; go back a card. + _array->set_offset_array(blk2_start_index + 1, N_words); + } + } + } +} + +HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) { + assert(_bottom <= addr && addr < _end, + "addr must be covered by this Array"); + // Must read this exactly once because it can be modified by parallel + // allocation. + HeapWord* ub = _unallocated_block; + if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { + assert(ub < _end, "tautology (see above)"); + return ub; + } + // Otherwise, find the block start using the table. + HeapWord* q = block_at_or_preceding(addr, false, 0); + return forward_to_block_containing_addr(q, addr); +} + +// This duplicates a little code from the above: unavoidable. +HeapWord* +G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const { + assert(_bottom <= addr && addr < _end, + "addr must be covered by this Array"); + // Must read this exactly once because it can be modified by parallel + // allocation. + HeapWord* ub = _unallocated_block; + if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { + assert(ub < _end, "tautology (see above)"); + return ub; + } + // Otherwise, find the block start using the table. + HeapWord* q = block_at_or_preceding(addr, false, 0); + HeapWord* n = q + _sp->block_size(q); + return forward_to_block_containing_addr_const(q, n, addr); +} + + +HeapWord* +G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q, + HeapWord* n, + const void* addr) { + // We're not in the normal case. We need to handle an important subcase + // here: LAB allocation. An allocation previously recorded in the + // offset table was actually a lab allocation, and was divided into + // several objects subsequently. Fix this situation as we answer the + // query, by updating entries as we cross them. + + // If the fist object's end q is at the card boundary. Start refining + // with the corresponding card (the value of the entry will be basically + // set to 0). If the object crosses the boundary -- start from the next card. + size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n); + HeapWord* next_boundary = _array->address_for_index(next_index); + if (csp() != NULL) { + if (addr >= csp()->top()) return csp()->top(); + while (next_boundary < addr) { + while (n <= next_boundary) { + q = n; + oop obj = oop(q); + if (obj->klass() == NULL) return q; + n += obj->size(); + } + assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); + // [q, n) is the block that crosses the boundary. + alloc_block_work2(&next_boundary, &next_index, q, n); + } + } else { + while (next_boundary < addr) { + while (n <= next_boundary) { + q = n; + oop obj = oop(q); + if (obj->klass() == NULL) return q; + n += _sp->block_size(q); + } + assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); + // [q, n) is the block that crosses the boundary. + alloc_block_work2(&next_boundary, &next_index, q, n); + } + } + return forward_to_block_containing_addr_const(q, n, addr); +} + +HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const { + assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); + + assert(_bottom <= addr && addr < _end, + "addr must be covered by this Array"); + // Must read this exactly once because it can be modified by parallel + // allocation. + HeapWord* ub = _unallocated_block; + if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { + assert(ub < _end, "tautology (see above)"); + return ub; + } + + // Otherwise, find the block start using the table, but taking + // care (cf block_start_unsafe() above) not to parse any objects/blocks + // on the cards themsleves. + size_t index = _array->index_for(addr); + assert(_array->address_for_index(index) == addr, + "arg should be start of card"); + + HeapWord* q = (HeapWord*)addr; + uint offset; + do { + offset = _array->offset_array(index--); + q -= offset; + } while (offset == N_words); + assert(q <= addr, "block start should be to left of arg"); + return q; +} + +// Note that the committed size of the covered space may have changed, +// so the table size might also wish to change. +void G1BlockOffsetArray::resize(size_t new_word_size) { + HeapWord* new_end = _bottom + new_word_size; + if (_end < new_end && !init_to_zero()) { + // verify that the old and new boundaries are also card boundaries + assert(_array->is_card_boundary(_end), + "_end not a card boundary"); + assert(_array->is_card_boundary(new_end), + "new _end would not be a card boundary"); + // set all the newly added cards + _array->set_offset_array(_end, new_end, N_words); + } + _end = new_end; // update _end +} + +void G1BlockOffsetArray::set_region(MemRegion mr) { + _bottom = mr.start(); + _end = mr.end(); +} + +// +// threshold_ +// | _index_ +// v v +// +-------+-------+-------+-------+-------+ +// | i-1 | i | i+1 | i+2 | i+3 | +// +-------+-------+-------+-------+-------+ +// ( ^ ] +// block-start +// +void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_, + HeapWord* blk_start, HeapWord* blk_end) { + // For efficiency, do copy-in/copy-out. + HeapWord* threshold = *threshold_; + size_t index = *index_; + + assert(blk_start != NULL && blk_end > blk_start, + "phantom block"); + assert(blk_end > threshold, "should be past threshold"); + assert(blk_start <= threshold, "blk_start should be at or before threshold") + assert(pointer_delta(threshold, blk_start) <= N_words, + "offset should be <= BlockOffsetSharedArray::N"); + assert(Universe::heap()->is_in_reserved(blk_start), + "reference must be into the heap"); + assert(Universe::heap()->is_in_reserved(blk_end-1), + "limit must be within the heap"); + assert(threshold == _array->_reserved.start() + index*N_words, + "index must agree with threshold"); + + DEBUG_ONLY(size_t orig_index = index;) + + // Mark the card that holds the offset into the block. Note + // that _next_offset_index and _next_offset_threshold are not + // updated until the end of this method. + _array->set_offset_array(index, threshold, blk_start); + + // We need to now mark the subsequent cards that this blk spans. + + // Index of card on which blk ends. + size_t end_index = _array->index_for(blk_end - 1); + + // Are there more cards left to be updated? + if (index + 1 <= end_index) { + HeapWord* rem_st = _array->address_for_index(index + 1); + // Calculate rem_end this way because end_index + // may be the last valid index in the covered region. + HeapWord* rem_end = _array->address_for_index(end_index) + N_words; + set_remainder_to_point_to_start(rem_st, rem_end); + } + + index = end_index + 1; + // Calculate threshold_ this way because end_index + // may be the last valid index in the covered region. + threshold = _array->address_for_index(end_index) + N_words; + assert(threshold >= blk_end, "Incorrect offset threshold"); + + // index_ and threshold_ updated here. + *threshold_ = threshold; + *index_ = index; + +#ifdef ASSERT + // The offset can be 0 if the block starts on a boundary. That + // is checked by an assertion above. + size_t start_index = _array->index_for(blk_start); + HeapWord* boundary = _array->address_for_index(start_index); + assert((_array->offset_array(orig_index) == 0 && + blk_start == boundary) || + (_array->offset_array(orig_index) > 0 && + _array->offset_array(orig_index) <= N_words), + "offset array should have been set"); + for (size_t j = orig_index + 1; j <= end_index; j++) { + assert(_array->offset_array(j) > 0 && + _array->offset_array(j) <= + (u_char) (N_words+BlockOffsetArray::N_powers-1), + "offset array should have been set"); + } +#endif +} + +////////////////////////////////////////////////////////////////////// +// G1BlockOffsetArrayContigSpace +////////////////////////////////////////////////////////////////////// + +HeapWord* +G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) { + assert(_bottom <= addr && addr < _end, + "addr must be covered by this Array"); + HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1); + return forward_to_block_containing_addr(q, addr); +} + +HeapWord* +G1BlockOffsetArrayContigSpace:: +block_start_unsafe_const(const void* addr) const { + assert(_bottom <= addr && addr < _end, + "addr must be covered by this Array"); + HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1); + HeapWord* n = q + _sp->block_size(q); + return forward_to_block_containing_addr_const(q, n, addr); +} + +G1BlockOffsetArrayContigSpace:: +G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, + MemRegion mr) : + G1BlockOffsetArray(array, mr, true) +{ + _next_offset_threshold = NULL; + _next_offset_index = 0; +} + +HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() { + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), + "just checking"); + _next_offset_index = _array->index_for(_bottom); + _next_offset_index++; + _next_offset_threshold = + _array->address_for_index(_next_offset_index); + return _next_offset_threshold; +} + +void G1BlockOffsetArrayContigSpace::zero_bottom_entry() { + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), + "just checking"); + size_t bottom_index = _array->index_for(_bottom); + assert(_array->address_for_index(bottom_index) == _bottom, + "Precondition of call"); + _array->set_offset_array(bottom_index, 0); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp 2009-08-01 04:21:03.372313023 +0100 @@ -0,0 +1,487 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// The CollectedHeap type requires subtypes to implement a method +// "block_start". For some subtypes, notably generational +// systems using card-table-based write barriers, the efficiency of this +// operation may be important. Implementations of the "BlockOffsetArray" +// class may be useful in providing such efficient implementations. +// +// While generally mirroring the structure of the BOT for GenCollectedHeap, +// the following types are tailored more towards G1's uses; these should, +// however, be merged back into a common BOT to avoid code duplication +// and reduce maintenance overhead. +// +// G1BlockOffsetTable (abstract) +// -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray) +// -- G1BlockOffsetArrayContigSpace +// +// A main impediment to the consolidation of this code might be the +// effect of making some of the block_start*() calls non-const as +// below. Whether that might adversely affect performance optimizations +// that compilers might normally perform in the case of non-G1 +// collectors needs to be carefully investigated prior to any such +// consolidation. + +// Forward declarations +class ContiguousSpace; +class G1BlockOffsetSharedArray; + +class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; +protected: + // These members describe the region covered by the table. + + // The space this table is covering. + HeapWord* _bottom; // == reserved.start + HeapWord* _end; // End of currently allocated region. + +public: + // Initialize the table to cover the given space. + // The contents of the initial table are undefined. + G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) : + _bottom(bottom), _end(end) + { + assert(_bottom <= _end, "arguments out of order"); + } + + // Note that the committed size of the covered space may have changed, + // so the table size might also wish to change. + virtual void resize(size_t new_word_size) = 0; + + virtual void set_bottom(HeapWord* new_bottom) { + assert(new_bottom <= _end, "new_bottom > _end"); + _bottom = new_bottom; + resize(pointer_delta(_end, _bottom)); + } + + // Requires "addr" to be contained by a block, and returns the address of + // the start of that block. (May have side effects, namely updating of + // shared array entries that "point" too far backwards. This can occur, + // for example, when LAB allocation is used in a space covered by the + // table.) + virtual HeapWord* block_start_unsafe(const void* addr) = 0; + // Same as above, but does not have any of the possible side effects + // discussed above. + virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0; + + // Returns the address of the start of the block containing "addr", or + // else "null" if it is covered by no block. (May have side effects, + // namely updating of shared array entries that "point" too far + // backwards. This can occur, for example, when lab allocation is used + // in a space covered by the table.) + inline HeapWord* block_start(const void* addr); + // Same as above, but does not have any of the possible side effects + // discussed above. + inline HeapWord* block_start_const(const void* addr) const; +}; + +// This implementation of "G1BlockOffsetTable" divides the covered region +// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry +// for each such subregion indicates how far back one must go to find the +// start of the chunk that includes the first word of the subregion. +// +// Each BlockOffsetArray is owned by a Space. However, the actual array +// may be shared by several BlockOffsetArrays; this is useful +// when a single resizable area (such as a generation) is divided up into +// several spaces in which contiguous allocation takes place, +// such as, for example, in G1 or in the train generation.) + +// Here is the shared array type. + +class G1BlockOffsetSharedArray: public CHeapObj { + friend class G1BlockOffsetArray; + friend class G1BlockOffsetArrayContigSpace; + friend class VMStructs; + +private: + // The reserved region covered by the shared array. + MemRegion _reserved; + + // End of the current committed region. + HeapWord* _end; + + // Array for keeping offsets for retrieving object start fast given an + // address. + VirtualSpace _vs; + u_char* _offset_array; // byte array keeping backwards offsets + + // Bounds checking accessors: + // For performance these have to devolve to array accesses in product builds. + u_char offset_array(size_t index) const { + assert(index < _vs.committed_size(), "index out of range"); + return _offset_array[index]; + } + + void set_offset_array(size_t index, u_char offset) { + assert(index < _vs.committed_size(), "index out of range"); + assert(offset <= N_words, "offset too large"); + _offset_array[index] = offset; + } + + void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { + assert(index < _vs.committed_size(), "index out of range"); + assert(high >= low, "addresses out of order"); + assert(pointer_delta(high, low) <= N_words, "offset too large"); + _offset_array[index] = (u_char) pointer_delta(high, low); + } + + void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { + assert(index_for(right - 1) < _vs.committed_size(), + "right address out of range"); + assert(left < right, "Heap addresses out of order"); + size_t num_cards = pointer_delta(right, left) >> LogN_words; + memset(&_offset_array[index_for(left)], offset, num_cards); + } + + void set_offset_array(size_t left, size_t right, u_char offset) { + assert(right < _vs.committed_size(), "right address out of range"); + assert(left <= right, "indexes out of order"); + size_t num_cards = right - left + 1; + memset(&_offset_array[left], offset, num_cards); + } + + void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { + assert(index < _vs.committed_size(), "index out of range"); + assert(high >= low, "addresses out of order"); + assert(pointer_delta(high, low) <= N_words, "offset too large"); + assert(_offset_array[index] == pointer_delta(high, low), + "Wrong offset"); + } + + bool is_card_boundary(HeapWord* p) const; + + // Return the number of slots needed for an offset array + // that covers mem_region_words words. + // We always add an extra slot because if an object + // ends on a card boundary we put a 0 in the next + // offset array slot, so we want that slot always + // to be reserved. + + size_t compute_size(size_t mem_region_words) { + size_t number_of_slots = (mem_region_words / N_words) + 1; + return ReservedSpace::page_align_size_up(number_of_slots); + } + +public: + enum SomePublicConstants { + LogN = 9, + LogN_words = LogN - LogHeapWordSize, + N_bytes = 1 << LogN, + N_words = 1 << LogN_words + }; + + // Initialize the table to cover from "base" to (at least) + // "base + init_word_size". In the future, the table may be expanded + // (see "resize" below) up to the size of "_reserved" (which must be at + // least "init_word_size".) The contents of the initial table are + // undefined; it is the responsibility of the constituent + // G1BlockOffsetTable(s) to initialize cards. + G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); + + // Notes a change in the committed size of the region covered by the + // table. The "new_word_size" may not be larger than the size of the + // reserved region this table covers. + void resize(size_t new_word_size); + + void set_bottom(HeapWord* new_bottom); + + // Updates all the BlockOffsetArray's sharing this shared array to + // reflect the current "top"'s of their spaces. + void update_offset_arrays(); + + // Return the appropriate index into "_offset_array" for "p". + inline size_t index_for(const void* p) const; + + // Return the address indicating the start of the region corresponding to + // "index" in "_offset_array". + inline HeapWord* address_for_index(size_t index) const; +}; + +// And here is the G1BlockOffsetTable subtype that uses the array. + +class G1BlockOffsetArray: public G1BlockOffsetTable { + friend class G1BlockOffsetSharedArray; + friend class G1BlockOffsetArrayContigSpace; + friend class VMStructs; +private: + enum SomePrivateConstants { + N_words = G1BlockOffsetSharedArray::N_words, + LogN = G1BlockOffsetSharedArray::LogN + }; + + // The following enums are used by do_block_helper + enum Action { + Action_single, // BOT records a single block (see single_block()) + Action_mark, // BOT marks the start of a block (see mark_block()) + Action_check // Check that BOT records block correctly + // (see verify_single_block()). + }; + + // This is the array, which can be shared by several BlockOffsetArray's + // servicing different + G1BlockOffsetSharedArray* _array; + + // The space that owns this subregion. + Space* _sp; + + // If "_sp" is a contiguous space, the field below is the view of "_sp" + // as a contiguous space, else NULL. + ContiguousSpace* _csp; + + // If true, array entries are initialized to 0; otherwise, they are + // initialized to point backwards to the beginning of the covered region. + bool _init_to_zero; + + // The portion [_unallocated_block, _sp.end()) of the space that + // is a single block known not to contain any objects. + // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. + HeapWord* _unallocated_block; + + // Sets the entries + // corresponding to the cards starting at "start" and ending at "end" + // to point back to the card before "start": the interval [start, end) + // is right-open. + void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); + // Same as above, except that the args here are a card _index_ interval + // that is closed: [start_index, end_index] + void set_remainder_to_point_to_start_incl(size_t start, size_t end); + + // A helper function for BOT adjustment/verification work + void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); + +protected: + + ContiguousSpace* csp() const { return _csp; } + + // Returns the address of a block whose start is at most "addr". + // If "has_max_index" is true, "assumes "max_index" is the last valid one + // in the array. + inline HeapWord* block_at_or_preceding(const void* addr, + bool has_max_index, + size_t max_index) const; + + // "q" is a block boundary that is <= "addr"; "n" is the address of the + // next block (or the end of the space.) Return the address of the + // beginning of the block that contains "addr". Does so without side + // effects (see, e.g., spec of block_start.) + inline HeapWord* + forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, + const void* addr) const; + + // "q" is a block boundary that is <= "addr"; return the address of the + // beginning of the block that contains "addr". May have side effects + // on "this", by updating imprecise entries. + inline HeapWord* forward_to_block_containing_addr(HeapWord* q, + const void* addr); + + // "q" is a block boundary that is <= "addr"; "n" is the address of the + // next block (or the end of the space.) Return the address of the + // beginning of the block that contains "addr". May have side effects + // on "this", by updating imprecise entries. + HeapWord* forward_to_block_containing_addr_slow(HeapWord* q, + HeapWord* n, + const void* addr); + + // Requires that "*threshold_" be the first array entry boundary at or + // above "blk_start", and that "*index_" be the corresponding array + // index. If the block starts at or crosses "*threshold_", records + // "blk_start" as the appropriate block start for the array index + // starting at "*threshold_", and for any other indices crossed by the + // block. Updates "*threshold_" and "*index_" to correspond to the first + // index after the block end. + void alloc_block_work2(HeapWord** threshold_, size_t* index_, + HeapWord* blk_start, HeapWord* blk_end); + +public: + // The space may not have it's bottom and top set yet, which is why the + // region is passed as a parameter. If "init_to_zero" is true, the + // elements of the array are initialized to zero. Otherwise, they are + // initialized to point backwards to the beginning. + G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr, + bool init_to_zero); + + // Note: this ought to be part of the constructor, but that would require + // "this" to be passed as a parameter to a member constructor for + // the containing concrete subtype of Space. + // This would be legal C++, but MS VC++ doesn't allow it. + void set_space(Space* sp); + + // Resets the covered region to the given "mr". + void set_region(MemRegion mr); + + // Resets the covered region to one with the same _bottom as before but + // the "new_word_size". + void resize(size_t new_word_size); + + // These must be guaranteed to work properly (i.e., do nothing) + // when "blk_start" ("blk" for second version) is "NULL". + virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); + virtual void alloc_block(HeapWord* blk, size_t size) { + alloc_block(blk, blk + size); + } + + // The following methods are useful and optimized for a + // general, non-contiguous space. + + // The given arguments are required to be the starts of adjacent ("blk1" + // before "blk2") well-formed blocks covered by "this". After this call, + // they should be considered to form one block. + virtual void join_blocks(HeapWord* blk1, HeapWord* blk2); + + // Given a block [blk_start, blk_start + full_blk_size), and + // a left_blk_size < full_blk_size, adjust the BOT to show two + // blocks [blk_start, blk_start + left_blk_size) and + // [blk_start + left_blk_size, blk_start + full_blk_size). + // It is assumed (and verified in the non-product VM) that the + // BOT was correct for the original block. + void split_block(HeapWord* blk_start, size_t full_blk_size, + size_t left_blk_size); + + // Adjust the BOT to show that it has a single block in the + // range [blk_start, blk_start + size). All necessary BOT + // cards are adjusted, but _unallocated_block isn't. + void single_block(HeapWord* blk_start, HeapWord* blk_end); + void single_block(HeapWord* blk, size_t size) { + single_block(blk, blk + size); + } + + // Adjust BOT to show that it has a block in the range + // [blk_start, blk_start + size). Only the first card + // of BOT is touched. It is assumed (and verified in the + // non-product VM) that the remaining cards of the block + // are correct. + void mark_block(HeapWord* blk_start, HeapWord* blk_end); + void mark_block(HeapWord* blk, size_t size) { + mark_block(blk, blk + size); + } + + // Adjust _unallocated_block to indicate that a particular + // block has been newly allocated or freed. It is assumed (and + // verified in the non-product VM) that the BOT is correct for + // the given block. + inline void allocated(HeapWord* blk_start, HeapWord* blk_end) { + // Verify that the BOT shows [blk, blk + blk_size) to be one block. + verify_single_block(blk_start, blk_end); + if (BlockOffsetArrayUseUnallocatedBlock) { + _unallocated_block = MAX2(_unallocated_block, blk_end); + } + } + + inline void allocated(HeapWord* blk, size_t size) { + allocated(blk, blk + size); + } + + inline void freed(HeapWord* blk_start, HeapWord* blk_end); + + inline void freed(HeapWord* blk, size_t size); + + virtual HeapWord* block_start_unsafe(const void* addr); + virtual HeapWord* block_start_unsafe_const(const void* addr) const; + + // Requires "addr" to be the start of a card and returns the + // start of the block that contains the given address. + HeapWord* block_start_careful(const void* addr) const; + + // If true, initialize array slots with no allocated blocks to zero. + // Otherwise, make them point back to the front. + bool init_to_zero() { return _init_to_zero; } + + // Verification & debugging - ensure that the offset table reflects the fact + // that the block [blk_start, blk_end) or [blk, blk + size) is a + // single block of storage. NOTE: can;t const this because of + // call to non-const do_block_internal() below. + inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) { + if (VerifyBlockOffsetArray) { + do_block_internal(blk_start, blk_end, Action_check); + } + } + + inline void verify_single_block(HeapWord* blk, size_t size) { + verify_single_block(blk, blk + size); + } + + // Verify that the given block is before _unallocated_block + inline void verify_not_unallocated(HeapWord* blk_start, + HeapWord* blk_end) const { + if (BlockOffsetArrayUseUnallocatedBlock) { + assert(blk_start < blk_end, "Block inconsistency?"); + assert(blk_end <= _unallocated_block, "_unallocated_block problem"); + } + } + + inline void verify_not_unallocated(HeapWord* blk, size_t size) const { + verify_not_unallocated(blk, blk + size); + } + + void check_all_cards(size_t left_card, size_t right_card) const; +}; + +// A subtype of BlockOffsetArray that takes advantage of the fact +// that its underlying space is a ContiguousSpace, so that its "active" +// region can be more efficiently tracked (than for a non-contiguous space). +class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray { + friend class VMStructs; + + // allocation boundary at which offset array must be updated + HeapWord* _next_offset_threshold; + size_t _next_offset_index; // index corresponding to that boundary + + // Work function to be called when allocation start crosses the next + // threshold in the contig space. + void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) { + alloc_block_work2(&_next_offset_threshold, &_next_offset_index, + blk_start, blk_end); + } + + + public: + G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); + + // Initialize the threshold to reflect the first boundary after the + // bottom of the covered region. + HeapWord* initialize_threshold(); + + // Zero out the entry for _bottom (offset will be zero). + void zero_bottom_entry(); + + // Return the next threshold, the point at which the table should be + // updated. + HeapWord* threshold() const { return _next_offset_threshold; } + + // These must be guaranteed to work properly (i.e., do nothing) + // when "blk_start" ("blk" for second version) is "NULL". In this + // implementation, that's true because NULL is represented as 0, and thus + // never exceeds the "_next_offset_threshold". + void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { + if (blk_end > _next_offset_threshold) + alloc_block_work1(blk_start, blk_end); + } + void alloc_block(HeapWord* blk, size_t size) { + alloc_block(blk, blk+size); + } + + HeapWord* block_start_unsafe(const void* addr); + HeapWord* block_start_unsafe_const(const void* addr) const; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp 2009-08-01 04:21:03.815295715 +0100 @@ -0,0 +1,153 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { + if (addr >= _bottom && addr < _end) { + return block_start_unsafe(addr); + } else { + return NULL; + } +} + +inline HeapWord* +G1BlockOffsetTable::block_start_const(const void* addr) const { + if (addr >= _bottom && addr < _end) { + return block_start_unsafe_const(addr); + } else { + return NULL; + } +} + +inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const { + char* pc = (char*)p; + assert(pc >= (char*)_reserved.start() && + pc < (char*)_reserved.end(), + "p not in range."); + size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); + size_t result = delta >> LogN; + assert(result < _vs.committed_size(), "bad index from address"); + return result; +} + +inline HeapWord* +G1BlockOffsetSharedArray::address_for_index(size_t index) const { + assert(index < _vs.committed_size(), "bad index"); + HeapWord* result = _reserved.start() + (index << LogN_words); + assert(result >= _reserved.start() && result < _reserved.end(), + "bad address from index"); + return result; +} + +inline HeapWord* +G1BlockOffsetArray::block_at_or_preceding(const void* addr, + bool has_max_index, + size_t max_index) const { + assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); + size_t index = _array->index_for(addr); + // We must make sure that the offset table entry we use is valid. If + // "addr" is past the end, start at the last known one and go forward. + if (has_max_index) { + index = MIN2(index, max_index); + } + HeapWord* q = _array->address_for_index(index); + + uint offset = _array->offset_array(index); // Extend u_char to uint. + while (offset >= N_words) { + // The excess of the offset from N_words indicates a power of Base + // to go back by. + size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); + q -= (N_words * n_cards_back); + assert(q >= _sp->bottom(), "Went below bottom!"); + index -= n_cards_back; + offset = _array->offset_array(index); + } + assert(offset < N_words, "offset too large"); + q -= offset; + return q; +} + +inline HeapWord* +G1BlockOffsetArray:: +forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, + const void* addr) const { + if (csp() != NULL) { + if (addr >= csp()->top()) return csp()->top(); + while (n <= addr) { + q = n; + oop obj = oop(q); + if (obj->klass() == NULL) return q; + n += obj->size(); + } + } else { + while (n <= addr) { + q = n; + oop obj = oop(q); + if (obj->klass() == NULL) return q; + n += _sp->block_size(q); + } + } + assert(q <= n, "wrong order for q and addr"); + assert(addr < n, "wrong order for addr and n"); + return q; +} + +inline HeapWord* +G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q, + const void* addr) { + if (oop(q)->klass() == NULL) return q; + HeapWord* n = q + _sp->block_size(q); + // In the normal case, where the query "addr" is a card boundary, and the + // offset table chunks are the same size as cards, the block starting at + // "q" will contain addr, so the test below will fail, and we'll fall + // through quickly. + if (n <= addr) { + q = forward_to_block_containing_addr_slow(q, n, addr); + } + assert(q <= addr, "wrong order for current and arg"); + return q; +} + +////////////////////////////////////////////////////////////////////////// +// BlockOffsetArrayNonContigSpace inlines +////////////////////////////////////////////////////////////////////////// +inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) { + // Verify that the BOT shows [blk_start, blk_end) to be one block. + verify_single_block(blk_start, blk_end); + // adjust _unallocated_block upward or downward + // as appropriate + if (BlockOffsetArrayUseUnallocatedBlock) { + assert(_unallocated_block <= _end, + "Inconsistent value for _unallocated_block"); + if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) { + // CMS-specific note: a block abutting _unallocated_block to + // its left is being freed, a new block is being added or + // we are resetting following a compaction + _unallocated_block = blk_start; + } + } +} + +inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) { + freed(blk, blk + size); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2009-08-01 04:21:04.260157771 +0100 @@ -0,0 +1,5372 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1CollectedHeap.cpp.incl" + +// turn it on so that the contents of the young list (scan-only / +// to-be-collected) are printed at "strategic" points before / during +// / after the collection --- this is useful for debugging +#define SCAN_ONLY_VERBOSE 0 +// CURRENT STATUS +// This file is under construction. Search for "FIXME". + +// INVARIANTS/NOTES +// +// All allocation activity covered by the G1CollectedHeap interface is +// serialized by acquiring the HeapLock. This happens in +// mem_allocate_work, which all such allocation functions call. +// (Note that this does not apply to TLAB allocation, which is not part +// of this interface: it is done by clients of this interface.) + +// Local to this file. + +class RefineCardTableEntryClosure: public CardTableEntryClosure { + SuspendibleThreadSet* _sts; + G1RemSet* _g1rs; + ConcurrentG1Refine* _cg1r; + bool _concurrent; +public: + RefineCardTableEntryClosure(SuspendibleThreadSet* sts, + G1RemSet* g1rs, + ConcurrentG1Refine* cg1r) : + _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) + {} + bool do_card_ptr(jbyte* card_ptr, int worker_i) { + _g1rs->concurrentRefineOneCard(card_ptr, worker_i); + if (_concurrent && _sts->should_yield()) { + // Caller will actually yield. + return false; + } + // Otherwise, we finished successfully; return true. + return true; + } + void set_concurrent(bool b) { _concurrent = b; } +}; + + +class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { + int _calls; + G1CollectedHeap* _g1h; + CardTableModRefBS* _ctbs; + int _histo[256]; +public: + ClearLoggedCardTableEntryClosure() : + _calls(0) + { + _g1h = G1CollectedHeap::heap(); + _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); + for (int i = 0; i < 256; i++) _histo[i] = 0; + } + bool do_card_ptr(jbyte* card_ptr, int worker_i) { + if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { + _calls++; + unsigned char* ujb = (unsigned char*)card_ptr; + int ind = (int)(*ujb); + _histo[ind]++; + *card_ptr = -1; + } + return true; + } + int calls() { return _calls; } + void print_histo() { + gclog_or_tty->print_cr("Card table value histogram:"); + for (int i = 0; i < 256; i++) { + if (_histo[i] != 0) { + gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); + } + } + } +}; + +class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { + int _calls; + G1CollectedHeap* _g1h; + CardTableModRefBS* _ctbs; +public: + RedirtyLoggedCardTableEntryClosure() : + _calls(0) + { + _g1h = G1CollectedHeap::heap(); + _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); + } + bool do_card_ptr(jbyte* card_ptr, int worker_i) { + if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { + _calls++; + *card_ptr = 0; + } + return true; + } + int calls() { return _calls; } +}; + +class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { +public: + bool do_card_ptr(jbyte* card_ptr, int worker_i) { + *card_ptr = CardTableModRefBS::dirty_card_val(); + return true; + } +}; + +YoungList::YoungList(G1CollectedHeap* g1h) + : _g1h(g1h), _head(NULL), + _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), + _length(0), _scan_only_length(0), + _last_sampled_rs_lengths(0), + _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) +{ + guarantee( check_list_empty(false), "just making sure..." ); +} + +void YoungList::push_region(HeapRegion *hr) { + assert(!hr->is_young(), "should not already be young"); + assert(hr->get_next_young_region() == NULL, "cause it should!"); + + hr->set_next_young_region(_head); + _head = hr; + + hr->set_young(); + double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); + ++_length; +} + +void YoungList::add_survivor_region(HeapRegion* hr) { + assert(hr->is_survivor(), "should be flagged as survivor region"); + assert(hr->get_next_young_region() == NULL, "cause it should!"); + + hr->set_next_young_region(_survivor_head); + if (_survivor_head == NULL) { + _survivor_tail = hr; + } + _survivor_head = hr; + + ++_survivor_length; +} + +HeapRegion* YoungList::pop_region() { + while (_head != NULL) { + assert( length() > 0, "list should not be empty" ); + HeapRegion* ret = _head; + _head = ret->get_next_young_region(); + ret->set_next_young_region(NULL); + --_length; + assert(ret->is_young(), "region should be very young"); + + // Replace 'Survivor' region type with 'Young'. So the region will + // be treated as a young region and will not be 'confused' with + // newly created survivor regions. + if (ret->is_survivor()) { + ret->set_young(); + } + + if (!ret->is_scan_only()) { + return ret; + } + + // scan-only, we'll add it to the scan-only list + if (_scan_only_tail == NULL) { + guarantee( _scan_only_head == NULL, "invariant" ); + + _scan_only_head = ret; + _curr_scan_only = ret; + } else { + guarantee( _scan_only_head != NULL, "invariant" ); + _scan_only_tail->set_next_young_region(ret); + } + guarantee( ret->get_next_young_region() == NULL, "invariant" ); + _scan_only_tail = ret; + + // no need to be tagged as scan-only any more + ret->set_young(); + + ++_scan_only_length; + } + assert( length() == 0, "list should be empty" ); + return NULL; +} + +void YoungList::empty_list(HeapRegion* list) { + while (list != NULL) { + HeapRegion* next = list->get_next_young_region(); + list->set_next_young_region(NULL); + list->uninstall_surv_rate_group(); + list->set_not_young(); + list = next; + } +} + +void YoungList::empty_list() { + assert(check_list_well_formed(), "young list should be well formed"); + + empty_list(_head); + _head = NULL; + _length = 0; + + empty_list(_scan_only_head); + _scan_only_head = NULL; + _scan_only_tail = NULL; + _scan_only_length = 0; + _curr_scan_only = NULL; + + empty_list(_survivor_head); + _survivor_head = NULL; + _survivor_tail = NULL; + _survivor_length = 0; + + _last_sampled_rs_lengths = 0; + + assert(check_list_empty(false), "just making sure..."); +} + +bool YoungList::check_list_well_formed() { + bool ret = true; + + size_t length = 0; + HeapRegion* curr = _head; + HeapRegion* last = NULL; + while (curr != NULL) { + if (!curr->is_young() || curr->is_scan_only()) { + gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " + "incorrectly tagged (%d, %d)", + curr->bottom(), curr->end(), + curr->is_young(), curr->is_scan_only()); + ret = false; + } + ++length; + last = curr; + curr = curr->get_next_young_region(); + } + ret = ret && (length == _length); + + if (!ret) { + gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); + gclog_or_tty->print_cr("### list has %d entries, _length is %d", + length, _length); + } + + bool scan_only_ret = true; + length = 0; + curr = _scan_only_head; + last = NULL; + while (curr != NULL) { + if (!curr->is_young() || curr->is_scan_only()) { + gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " + "incorrectly tagged (%d, %d)", + curr->bottom(), curr->end(), + curr->is_young(), curr->is_scan_only()); + scan_only_ret = false; + } + ++length; + last = curr; + curr = curr->get_next_young_region(); + } + scan_only_ret = scan_only_ret && (length == _scan_only_length); + + if ( (last != _scan_only_tail) || + (_scan_only_head == NULL && _scan_only_tail != NULL) || + (_scan_only_head != NULL && _scan_only_tail == NULL) ) { + gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); + scan_only_ret = false; + } + + if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { + gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); + scan_only_ret = false; + } + + if (!scan_only_ret) { + gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); + gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", + length, _scan_only_length); + } + + return ret && scan_only_ret; +} + +bool YoungList::check_list_empty(bool ignore_scan_only_list, + bool check_sample) { + bool ret = true; + + if (_length != 0) { + gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", + _length); + ret = false; + } + if (check_sample && _last_sampled_rs_lengths != 0) { + gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); + ret = false; + } + if (_head != NULL) { + gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); + ret = false; + } + if (!ret) { + gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); + } + + if (ignore_scan_only_list) + return ret; + + bool scan_only_ret = true; + if (_scan_only_length != 0) { + gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", + _scan_only_length); + scan_only_ret = false; + } + if (_scan_only_head != NULL) { + gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); + scan_only_ret = false; + } + if (_scan_only_tail != NULL) { + gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); + scan_only_ret = false; + } + if (!scan_only_ret) { + gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); + } + + return ret && scan_only_ret; +} + +void +YoungList::rs_length_sampling_init() { + _sampled_rs_lengths = 0; + _curr = _head; +} + +bool +YoungList::rs_length_sampling_more() { + return _curr != NULL; +} + +void +YoungList::rs_length_sampling_next() { + assert( _curr != NULL, "invariant" ); + _sampled_rs_lengths += _curr->rem_set()->occupied(); + _curr = _curr->get_next_young_region(); + if (_curr == NULL) { + _last_sampled_rs_lengths = _sampled_rs_lengths; + // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); + } +} + +void +YoungList::reset_auxilary_lists() { + // We could have just "moved" the scan-only list to the young list. + // However, the scan-only list is ordered according to the region + // age in descending order, so, by moving one entry at a time, we + // ensure that it is recreated in ascending order. + + guarantee( is_empty(), "young list should be empty" ); + assert(check_list_well_formed(), "young list should be well formed"); + + // Add survivor regions to SurvRateGroup. + _g1h->g1_policy()->note_start_adding_survivor_regions(); + _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); + for (HeapRegion* curr = _survivor_head; + curr != NULL; + curr = curr->get_next_young_region()) { + _g1h->g1_policy()->set_region_survivors(curr); + } + _g1h->g1_policy()->note_stop_adding_survivor_regions(); + + if (_survivor_head != NULL) { + _head = _survivor_head; + _length = _survivor_length + _scan_only_length; + _survivor_tail->set_next_young_region(_scan_only_head); + } else { + _head = _scan_only_head; + _length = _scan_only_length; + } + + for (HeapRegion* curr = _scan_only_head; + curr != NULL; + curr = curr->get_next_young_region()) { + curr->recalculate_age_in_surv_rate_group(); + } + _scan_only_head = NULL; + _scan_only_tail = NULL; + _scan_only_length = 0; + _curr_scan_only = NULL; + + _survivor_head = NULL; + _survivor_tail = NULL; + _survivor_length = 0; + _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); + + assert(check_list_well_formed(), "young list should be well formed"); +} + +void YoungList::print() { + HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; + const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; + + for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { + gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); + HeapRegion *curr = lists[list]; + if (curr == NULL) + gclog_or_tty->print_cr(" empty"); + while (curr != NULL) { + gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " + "age: %4d, y: %d, s-o: %d, surv: %d", + curr->bottom(), curr->end(), + curr->top(), + curr->prev_top_at_mark_start(), + curr->next_top_at_mark_start(), + curr->top_at_conc_mark_count(), + curr->age_in_surv_rate_group_cond(), + curr->is_young(), + curr->is_scan_only(), + curr->is_survivor()); + curr = curr->get_next_young_region(); + } + } + + gclog_or_tty->print_cr(""); +} + +void G1CollectedHeap::stop_conc_gc_threads() { + _cg1r->cg1rThread()->stop(); + _czft->stop(); + _cmThread->stop(); +} + + +void G1CollectedHeap::check_ct_logs_at_safepoint() { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); + + // Count the dirty cards at the start. + CountNonCleanMemRegionClosure count1(this); + ct_bs->mod_card_iterate(&count1); + int orig_count = count1.n(); + + // First clear the logged cards. + ClearLoggedCardTableEntryClosure clear; + dcqs.set_closure(&clear); + dcqs.apply_closure_to_all_completed_buffers(); + dcqs.iterate_closure_all_threads(false); + clear.print_histo(); + + // Now ensure that there's no dirty cards. + CountNonCleanMemRegionClosure count2(this); + ct_bs->mod_card_iterate(&count2); + if (count2.n() != 0) { + gclog_or_tty->print_cr("Card table has %d entries; %d originally", + count2.n(), orig_count); + } + guarantee(count2.n() == 0, "Card table should be clean."); + + RedirtyLoggedCardTableEntryClosure redirty; + JavaThread::dirty_card_queue_set().set_closure(&redirty); + dcqs.apply_closure_to_all_completed_buffers(); + dcqs.iterate_closure_all_threads(false); + gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", + clear.calls(), orig_count); + guarantee(redirty.calls() == clear.calls(), + "Or else mechanism is broken."); + + CountNonCleanMemRegionClosure count3(this); + ct_bs->mod_card_iterate(&count3); + if (count3.n() != orig_count) { + gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", + orig_count, count3.n()); + guarantee(count3.n() >= orig_count, "Should have restored them all."); + } + + JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); +} + +// Private class members. + +G1CollectedHeap* G1CollectedHeap::_g1h; + +// Private methods. + +// Finds a HeapRegion that can be used to allocate a given size of block. + + +HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, + bool do_expand, + bool zero_filled) { + ConcurrentZFThread::note_region_alloc(); + HeapRegion* res = alloc_free_region_from_lists(zero_filled); + if (res == NULL && do_expand) { + expand(word_size * HeapWordSize); + res = alloc_free_region_from_lists(zero_filled); + assert(res == NULL || + (!res->isHumongous() && + (!zero_filled || + res->zero_fill_state() == HeapRegion::Allocated)), + "Alloc Regions must be zero filled (and non-H)"); + } + if (res != NULL && res->is_empty()) _free_regions--; + assert(res == NULL || + (!res->isHumongous() && + (!zero_filled || + res->zero_fill_state() == HeapRegion::Allocated)), + "Non-young alloc Regions must be zero filled (and non-H)"); + + if (G1PrintRegions) { + if (res != NULL) { + gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " + "top "PTR_FORMAT, + res->hrs_index(), res->bottom(), res->end(), res->top()); + } + } + + return res; +} + +HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, + size_t word_size, + bool zero_filled) { + HeapRegion* alloc_region = NULL; + if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { + alloc_region = newAllocRegion_work(word_size, true, zero_filled); + if (purpose == GCAllocForSurvived && alloc_region != NULL) { + alloc_region->set_survivor(); + } + ++_gc_alloc_region_counts[purpose]; + } else { + g1_policy()->note_alloc_region_limit_reached(purpose); + } + return alloc_region; +} + +// If could fit into free regions w/o expansion, try. +// Otherwise, if can expand, do so. +// Otherwise, if using ex regions might help, try with ex given back. +HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { + assert(regions_accounted_for(), "Region leakage!"); + + // We can't allocate H regions while cleanupComplete is running, since + // some of the regions we find to be empty might not yet be added to the + // unclean list. (If we're already at a safepoint, this call is + // unnecessary, not to mention wrong.) + if (!SafepointSynchronize::is_at_safepoint()) + wait_for_cleanup_complete(); + + size_t num_regions = + round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; + + // Special case if < one region??? + + // Remember the ft size. + size_t x_size = expansion_regions(); + + HeapWord* res = NULL; + bool eliminated_allocated_from_lists = false; + + // Can the allocation potentially fit in the free regions? + if (free_regions() >= num_regions) { + res = _hrs->obj_allocate(word_size); + } + if (res == NULL) { + // Try expansion. + size_t fs = _hrs->free_suffix(); + if (fs + x_size >= num_regions) { + expand((num_regions - fs) * HeapRegion::GrainBytes); + res = _hrs->obj_allocate(word_size); + assert(res != NULL, "This should have worked."); + } else { + // Expansion won't help. Are there enough free regions if we get rid + // of reservations? + size_t avail = free_regions(); + if (avail >= num_regions) { + res = _hrs->obj_allocate(word_size); + if (res != NULL) { + remove_allocated_regions_from_lists(); + eliminated_allocated_from_lists = true; + } + } + } + } + if (res != NULL) { + // Increment by the number of regions allocated. + // FIXME: Assumes regions all of size GrainBytes. +#ifndef PRODUCT + mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * + HeapRegion::GrainWords)); +#endif + if (!eliminated_allocated_from_lists) + remove_allocated_regions_from_lists(); + _summary_bytes_used += word_size * HeapWordSize; + _free_regions -= num_regions; + _num_humongous_regions += (int) num_regions; + } + assert(regions_accounted_for(), "Region Leakage"); + return res; +} + +HeapWord* +G1CollectedHeap::attempt_allocation_slow(size_t word_size, + bool permit_collection_pause) { + HeapWord* res = NULL; + HeapRegion* allocated_young_region = NULL; + + assert( SafepointSynchronize::is_at_safepoint() || + Heap_lock->owned_by_self(), "pre condition of the call" ); + + if (isHumongous(word_size)) { + // Allocation of a humongous object can, in a sense, complete a + // partial region, if the previous alloc was also humongous, and + // caused the test below to succeed. + if (permit_collection_pause) + do_collection_pause_if_appropriate(word_size); + res = humongousObjAllocate(word_size); + assert(_cur_alloc_region == NULL + || !_cur_alloc_region->isHumongous(), + "Prevent a regression of this bug."); + + } else { + // We may have concurrent cleanup working at the time. Wait for it + // to complete. In the future we would probably want to make the + // concurrent cleanup truly concurrent by decoupling it from the + // allocation. + if (!SafepointSynchronize::is_at_safepoint()) + wait_for_cleanup_complete(); + // If we do a collection pause, this will be reset to a non-NULL + // value. If we don't, nulling here ensures that we allocate a new + // region below. + if (_cur_alloc_region != NULL) { + // We're finished with the _cur_alloc_region. + _summary_bytes_used += _cur_alloc_region->used(); + _cur_alloc_region = NULL; + } + assert(_cur_alloc_region == NULL, "Invariant."); + // Completion of a heap region is perhaps a good point at which to do + // a collection pause. + if (permit_collection_pause) + do_collection_pause_if_appropriate(word_size); + // Make sure we have an allocation region available. + if (_cur_alloc_region == NULL) { + if (!SafepointSynchronize::is_at_safepoint()) + wait_for_cleanup_complete(); + bool next_is_young = should_set_young_locked(); + // If the next region is not young, make sure it's zero-filled. + _cur_alloc_region = newAllocRegion(word_size, !next_is_young); + if (_cur_alloc_region != NULL) { + _summary_bytes_used -= _cur_alloc_region->used(); + if (next_is_young) { + set_region_short_lived_locked(_cur_alloc_region); + allocated_young_region = _cur_alloc_region; + } + } + } + assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), + "Prevent a regression of this bug."); + + // Now retry the allocation. + if (_cur_alloc_region != NULL) { + res = _cur_alloc_region->allocate(word_size); + } + } + + // NOTE: fails frequently in PRT + assert(regions_accounted_for(), "Region leakage!"); + + if (res != NULL) { + if (!SafepointSynchronize::is_at_safepoint()) { + assert( permit_collection_pause, "invariant" ); + assert( Heap_lock->owned_by_self(), "invariant" ); + Heap_lock->unlock(); + } + + if (allocated_young_region != NULL) { + HeapRegion* hr = allocated_young_region; + HeapWord* bottom = hr->bottom(); + HeapWord* end = hr->end(); + MemRegion mr(bottom, end); + ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); + } + } + + assert( SafepointSynchronize::is_at_safepoint() || + (res == NULL && Heap_lock->owned_by_self()) || + (res != NULL && !Heap_lock->owned_by_self()), + "post condition of the call" ); + + return res; +} + +HeapWord* +G1CollectedHeap::mem_allocate(size_t word_size, + bool is_noref, + bool is_tlab, + bool* gc_overhead_limit_was_exceeded) { + debug_only(check_for_valid_allocation_state()); + assert(no_gc_in_progress(), "Allocation during gc not allowed"); + HeapWord* result = NULL; + + // Loop until the allocation is satisified, + // or unsatisfied after GC. + for (int try_count = 1; /* return or throw */; try_count += 1) { + int gc_count_before; + { + Heap_lock->lock(); + result = attempt_allocation(word_size); + if (result != NULL) { + // attempt_allocation should have unlocked the heap lock + assert(is_in(result), "result not in heap"); + return result; + } + // Read the gc count while the heap lock is held. + gc_count_before = SharedHeap::heap()->total_collections(); + Heap_lock->unlock(); + } + + // Create the garbage collection operation... + VM_G1CollectForAllocation op(word_size, + gc_count_before); + + // ...and get the VM thread to execute it. + VMThread::execute(&op); + if (op.prologue_succeeded()) { + result = op.result(); + assert(result == NULL || is_in(result), "result not in heap"); + return result; + } + + // Give a warning if we seem to be looping forever. + if ((QueuedAllocationWarningCount > 0) && + (try_count % QueuedAllocationWarningCount == 0)) { + warning("G1CollectedHeap::mem_allocate_work retries %d times", + try_count); + } + } +} + +void G1CollectedHeap::abandon_cur_alloc_region() { + if (_cur_alloc_region != NULL) { + // We're finished with the _cur_alloc_region. + if (_cur_alloc_region->is_empty()) { + _free_regions++; + free_region(_cur_alloc_region); + } else { + _summary_bytes_used += _cur_alloc_region->used(); + } + _cur_alloc_region = NULL; + } +} + +void G1CollectedHeap::abandon_gc_alloc_regions() { + // first, make sure that the GC alloc region list is empty (it should!) + assert(_gc_alloc_region_list == NULL, "invariant"); + release_gc_alloc_regions(true /* totally */); +} + +class PostMCRemSetClearClosure: public HeapRegionClosure { + ModRefBarrierSet* _mr_bs; +public: + PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} + bool doHeapRegion(HeapRegion* r) { + r->reset_gc_time_stamp(); + if (r->continuesHumongous()) + return false; + HeapRegionRemSet* hrrs = r->rem_set(); + if (hrrs != NULL) hrrs->clear(); + // You might think here that we could clear just the cards + // corresponding to the used region. But no: if we leave a dirty card + // in a region we might allocate into, then it would prevent that card + // from being enqueued, and cause it to be missed. + // Re: the performance cost: we shouldn't be doing full GC anyway! + _mr_bs->clear(MemRegion(r->bottom(), r->end())); + return false; + } +}; + + +class PostMCRemSetInvalidateClosure: public HeapRegionClosure { + ModRefBarrierSet* _mr_bs; +public: + PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} + bool doHeapRegion(HeapRegion* r) { + if (r->continuesHumongous()) return false; + if (r->used_region().word_size() != 0) { + _mr_bs->invalidate(r->used_region(), true /*whole heap*/); + } + return false; + } +}; + +class RebuildRSOutOfRegionClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; + UpdateRSOopClosure _cl; + int _worker_i; +public: + RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : + _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), + _worker_i(worker_i), + _g1h(g1) + { } + bool doHeapRegion(HeapRegion* r) { + if (!r->continuesHumongous()) { + _cl.set_from(r); + r->oop_iterate(&_cl); + } + return false; + } +}; + +class ParRebuildRSTask: public AbstractGangTask { + G1CollectedHeap* _g1; +public: + ParRebuildRSTask(G1CollectedHeap* g1) + : AbstractGangTask("ParRebuildRSTask"), + _g1(g1) + { } + + void work(int i) { + RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); + _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, + HeapRegion::RebuildRSClaimValue); + } +}; + +void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, + size_t word_size) { + ResourceMark rm; + + if (full && DisableExplicitGC) { + gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); + return; + } + + assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); + assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); + + if (GC_locker::is_active()) { + return; // GC is disabled (e.g. JNI GetXXXCritical operation) + } + + { + IsGCActiveMark x; + + // Timing + gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); + TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); + TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); + + double start = os::elapsedTime(); + GCOverheadReporter::recordSTWStart(start); + g1_policy()->record_full_collection_start(); + + gc_prologue(true); + increment_total_collections(); + + size_t g1h_prev_used = used(); + assert(used() == recalculate_used(), "Should be equal"); + + if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { + HandleMark hm; // Discard invalid handles created during verification + prepare_for_verify(); + gclog_or_tty->print(" VerifyBeforeGC:"); + Universe::verify(true); + } + assert(regions_accounted_for(), "Region leakage!"); + + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + // We want to discover references, but not process them yet. + // This mode is disabled in + // instanceRefKlass::process_discovered_references if the + // generation does some collection work, or + // instanceRefKlass::enqueue_discovered_references if the + // generation returns without doing any work. + ref_processor()->disable_discovery(); + ref_processor()->abandon_partial_discovery(); + ref_processor()->verify_no_references_recorded(); + + // Abandon current iterations of concurrent marking and concurrent + // refinement, if any are in progress. + concurrent_mark()->abort(); + + // Make sure we'll choose a new allocation region afterwards. + abandon_cur_alloc_region(); + abandon_gc_alloc_regions(); + assert(_cur_alloc_region == NULL, "Invariant."); + g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); + tear_down_region_lists(); + set_used_regions_to_need_zero_fill(); + if (g1_policy()->in_young_gc_mode()) { + empty_young_list(); + g1_policy()->set_full_young_gcs(true); + } + + // Temporarily make reference _discovery_ single threaded (non-MT). + ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); + + // Temporarily make refs discovery atomic + ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); + + // Temporarily clear _is_alive_non_header + ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); + + ref_processor()->enable_discovery(); + ref_processor()->setup_policy(clear_all_soft_refs); + + // Do collection work + { + HandleMark hm; // Discard invalid handles created during gc + G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); + } + // Because freeing humongous regions may have added some unclean + // regions, it is necessary to tear down again before rebuilding. + tear_down_region_lists(); + rebuild_region_lists(); + + _summary_bytes_used = recalculate_used(); + + ref_processor()->enqueue_discovered_references(); + + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + + if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { + HandleMark hm; // Discard invalid handles created during verification + gclog_or_tty->print(" VerifyAfterGC:"); + prepare_for_verify(); + Universe::verify(false); + } + NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); + + reset_gc_time_stamp(); + // Since everything potentially moved, we will clear all remembered + // sets, and clear all cards. Later we will rebuild remebered + // sets. We will also reset the GC time stamps of the regions. + PostMCRemSetClearClosure rs_clear(mr_bs()); + heap_region_iterate(&rs_clear); + + // Resize the heap if necessary. + resize_if_necessary_after_full_collection(full ? 0 : word_size); + + if (_cg1r->use_cache()) { + _cg1r->clear_and_record_card_counts(); + _cg1r->clear_hot_cache(); + } + + // Rebuild remembered sets of all regions. + if (ParallelGCThreads > 0) { + ParRebuildRSTask rebuild_rs_task(this); + assert(check_heap_region_claim_values( + HeapRegion::InitialClaimValue), "sanity check"); + set_par_threads(workers()->total_workers()); + workers()->run_task(&rebuild_rs_task); + set_par_threads(0); + assert(check_heap_region_claim_values( + HeapRegion::RebuildRSClaimValue), "sanity check"); + reset_heap_region_claim_values(); + } else { + RebuildRSOutOfRegionClosure rebuild_rs(this); + heap_region_iterate(&rebuild_rs); + } + + if (PrintGC) { + print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); + } + + if (true) { // FIXME + // Ask the permanent generation to adjust size for full collections + perm()->compute_new_size(); + } + + double end = os::elapsedTime(); + GCOverheadReporter::recordSTWEnd(end); + g1_policy()->record_full_collection_end(); + + gc_epilogue(true); + + // Abandon concurrent refinement. This must happen last: in the + // dirty-card logging system, some cards may be dirty by weak-ref + // processing, and may be enqueued. But the whole card table is + // dirtied, so this should abandon those logs, and set "do_traversal" + // to true. + concurrent_g1_refine()->set_pya_restart(); + assert(!G1DeferredRSUpdate + || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); + assert(regions_accounted_for(), "Region leakage!"); + } + + if (g1_policy()->in_young_gc_mode()) { + _young_list->reset_sampled_info(); + assert( check_young_list_empty(false, false), + "young list should be empty at this point"); + } +} + +void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { + do_collection(true, clear_all_soft_refs, 0); +} + +// This code is mostly copied from TenuredGeneration. +void +G1CollectedHeap:: +resize_if_necessary_after_full_collection(size_t word_size) { + assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); + + // Include the current allocation, if any, and bytes that will be + // pre-allocated to support collections, as "used". + const size_t used_after_gc = used(); + const size_t capacity_after_gc = capacity(); + const size_t free_after_gc = capacity_after_gc - used_after_gc; + + // We don't have floating point command-line arguments + const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; + const double maximum_used_percentage = 1.0 - minimum_free_percentage; + const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; + const double minimum_used_percentage = 1.0 - maximum_free_percentage; + + size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); + size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); + + // Don't shrink less than the initial size. + minimum_desired_capacity = + MAX2(minimum_desired_capacity, + collector_policy()->initial_heap_byte_size()); + maximum_desired_capacity = + MAX2(maximum_desired_capacity, + collector_policy()->initial_heap_byte_size()); + + // We are failing here because minimum_desired_capacity is + assert(used_after_gc <= minimum_desired_capacity, "sanity check"); + assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); + + if (PrintGC && Verbose) { + const double free_percentage = ((double)free_after_gc) / capacity(); + gclog_or_tty->print_cr("Computing new size after full GC "); + gclog_or_tty->print_cr(" " + " minimum_free_percentage: %6.2f", + minimum_free_percentage); + gclog_or_tty->print_cr(" " + " maximum_free_percentage: %6.2f", + maximum_free_percentage); + gclog_or_tty->print_cr(" " + " capacity: %6.1fK" + " minimum_desired_capacity: %6.1fK" + " maximum_desired_capacity: %6.1fK", + capacity() / (double) K, + minimum_desired_capacity / (double) K, + maximum_desired_capacity / (double) K); + gclog_or_tty->print_cr(" " + " free_after_gc : %6.1fK" + " used_after_gc : %6.1fK", + free_after_gc / (double) K, + used_after_gc / (double) K); + gclog_or_tty->print_cr(" " + " free_percentage: %6.2f", + free_percentage); + } + if (capacity() < minimum_desired_capacity) { + // Don't expand unless it's significant + size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; + expand(expand_bytes); + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" expanding:" + " minimum_desired_capacity: %6.1fK" + " expand_bytes: %6.1fK", + minimum_desired_capacity / (double) K, + expand_bytes / (double) K); + } + + // No expansion, now see if we want to shrink + } else if (capacity() > maximum_desired_capacity) { + // Capacity too large, compute shrinking size + size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; + shrink(shrink_bytes); + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" " + " shrinking:" + " initSize: %.1fK" + " maximum_desired_capacity: %.1fK", + collector_policy()->initial_heap_byte_size() / (double) K, + maximum_desired_capacity / (double) K); + gclog_or_tty->print_cr(" " + " shrink_bytes: %.1fK", + shrink_bytes / (double) K); + } + } +} + + +HeapWord* +G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { + HeapWord* result = NULL; + + // In a G1 heap, we're supposed to keep allocation from failing by + // incremental pauses. Therefore, at least for now, we'll favor + // expansion over collection. (This might change in the future if we can + // do something smarter than full collection to satisfy a failed alloc.) + + result = expand_and_allocate(word_size); + if (result != NULL) { + assert(is_in(result), "result not in heap"); + return result; + } + + // OK, I guess we have to try collection. + + do_collection(false, false, word_size); + + result = attempt_allocation(word_size, /*permit_collection_pause*/false); + + if (result != NULL) { + assert(is_in(result), "result not in heap"); + return result; + } + + // Try collecting soft references. + do_collection(false, true, word_size); + result = attempt_allocation(word_size, /*permit_collection_pause*/false); + if (result != NULL) { + assert(is_in(result), "result not in heap"); + return result; + } + + // What else? We might try synchronous finalization later. If the total + // space available is large enough for the allocation, then a more + // complete compaction phase than we've tried so far might be + // appropriate. + return NULL; +} + +// Attempting to expand the heap sufficiently +// to support an allocation of the given "word_size". If +// successful, perform the allocation and return the address of the +// allocated block, or else "NULL". + +HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { + size_t expand_bytes = word_size * HeapWordSize; + if (expand_bytes < MinHeapDeltaBytes) { + expand_bytes = MinHeapDeltaBytes; + } + expand(expand_bytes); + assert(regions_accounted_for(), "Region leakage!"); + HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); + return result; +} + +size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { + size_t pre_used = 0; + size_t cleared_h_regions = 0; + size_t freed_regions = 0; + UncleanRegionList local_list; + free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, + freed_regions, &local_list); + + finish_free_region_work(pre_used, cleared_h_regions, freed_regions, + &local_list); + return pre_used; +} + +void +G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, + size_t& pre_used, + size_t& cleared_h, + size_t& freed_regions, + UncleanRegionList* list, + bool par) { + assert(!hr->continuesHumongous(), "should have filtered these out"); + size_t res = 0; + if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && + !hr->is_young()) { + if (G1PolicyVerbose > 0) + gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" + " during cleanup", hr, hr->used()); + free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); + } +} + +// FIXME: both this and shrink could probably be more efficient by +// doing one "VirtualSpace::expand_by" call rather than several. +void G1CollectedHeap::expand(size_t expand_bytes) { + size_t old_mem_size = _g1_storage.committed_size(); + // We expand by a minimum of 1K. + expand_bytes = MAX2(expand_bytes, (size_t)K); + size_t aligned_expand_bytes = + ReservedSpace::page_align_size_up(expand_bytes); + aligned_expand_bytes = align_size_up(aligned_expand_bytes, + HeapRegion::GrainBytes); + expand_bytes = aligned_expand_bytes; + while (expand_bytes > 0) { + HeapWord* base = (HeapWord*)_g1_storage.high(); + // Commit more storage. + bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); + if (!successful) { + expand_bytes = 0; + } else { + expand_bytes -= HeapRegion::GrainBytes; + // Expand the committed region. + HeapWord* high = (HeapWord*) _g1_storage.high(); + _g1_committed.set_end(high); + // Create a new HeapRegion. + MemRegion mr(base, high); + bool is_zeroed = !_g1_max_committed.contains(base); + HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); + + // Now update max_committed if necessary. + _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); + + // Add it to the HeapRegionSeq. + _hrs->insert(hr); + // Set the zero-fill state, according to whether it's already + // zeroed. + { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + if (is_zeroed) { + hr->set_zero_fill_complete(); + put_free_region_on_list_locked(hr); + } else { + hr->set_zero_fill_needed(); + put_region_on_unclean_list_locked(hr); + } + } + _free_regions++; + // And we used up an expansion region to create it. + _expansion_regions--; + // Tell the cardtable about it. + Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); + // And the offset table as well. + _bot_shared->resize(_g1_committed.word_size()); + } + } + if (Verbose && PrintGC) { + size_t new_mem_size = _g1_storage.committed_size(); + gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", + old_mem_size/K, aligned_expand_bytes/K, + new_mem_size/K); + } +} + +void G1CollectedHeap::shrink_helper(size_t shrink_bytes) +{ + size_t old_mem_size = _g1_storage.committed_size(); + size_t aligned_shrink_bytes = + ReservedSpace::page_align_size_down(shrink_bytes); + aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, + HeapRegion::GrainBytes); + size_t num_regions_deleted = 0; + MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); + + assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); + if (mr.byte_size() > 0) + _g1_storage.shrink_by(mr.byte_size()); + assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); + + _g1_committed.set_end(mr.start()); + _free_regions -= num_regions_deleted; + _expansion_regions += num_regions_deleted; + + // Tell the cardtable about it. + Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); + + // And the offset table as well. + _bot_shared->resize(_g1_committed.word_size()); + + HeapRegionRemSet::shrink_heap(n_regions()); + + if (Verbose && PrintGC) { + size_t new_mem_size = _g1_storage.committed_size(); + gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", + old_mem_size/K, aligned_shrink_bytes/K, + new_mem_size/K); + } +} + +void G1CollectedHeap::shrink(size_t shrink_bytes) { + release_gc_alloc_regions(true /* totally */); + tear_down_region_lists(); // We will rebuild them in a moment. + shrink_helper(shrink_bytes); + rebuild_region_lists(); +} + +// Public methods. + +#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +#endif // _MSC_VER + + +G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : + SharedHeap(policy_), + _g1_policy(policy_), + _ref_processor(NULL), + _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), + _bot_shared(NULL), + _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), + _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), + _evac_failure_scan_stack(NULL) , + _mark_in_progress(false), + _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), + _cur_alloc_region(NULL), + _refine_cte_cl(NULL), + _free_region_list(NULL), _free_region_list_size(0), + _free_regions(0), + _full_collection(false), + _unclean_region_list(), + _unclean_regions_coming(false), + _young_list(new YoungList(this)), + _gc_time_stamp(0), + _surviving_young_words(NULL), + _in_cset_fast_test(NULL), + _in_cset_fast_test_base(NULL) { + _g1h = this; // To catch bugs. + if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { + vm_exit_during_initialization("Failed necessary allocation."); + } + int n_queues = MAX2((int)ParallelGCThreads, 1); + _task_queues = new RefToScanQueueSet(n_queues); + + int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); + assert(n_rem_sets > 0, "Invariant."); + + HeapRegionRemSetIterator** iter_arr = + NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); + for (int i = 0; i < n_queues; i++) { + iter_arr[i] = new HeapRegionRemSetIterator(); + } + _rem_set_iterator = iter_arr; + + for (int i = 0; i < n_queues; i++) { + RefToScanQueue* q = new RefToScanQueue(); + q->initialize(); + _task_queues->register_queue(i, q); + } + + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + _gc_alloc_regions[ap] = NULL; + _gc_alloc_region_counts[ap] = 0; + _retained_gc_alloc_regions[ap] = NULL; + // by default, we do not retain a GC alloc region for each ap; + // we'll override this, when appropriate, below + _retain_gc_alloc_region[ap] = false; + } + + // We will try to remember the last half-full tenured region we + // allocated to at the end of a collection so that we can re-use it + // during the next collection. + _retain_gc_alloc_region[GCAllocForTenured] = true; + + guarantee(_task_queues != NULL, "task_queues allocation failure."); +} + +jint G1CollectedHeap::initialize() { + os::enable_vtime(); + + // Necessary to satisfy locking discipline assertions. + + MutexLocker x(Heap_lock); + + // While there are no constraints in the GC code that HeapWordSize + // be any particular value, there are multiple other areas in the + // system which believe this to be true (e.g. oop->object_size in some + // cases incorrectly returns the size in wordSize units rather than + // HeapWordSize). + guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); + + size_t init_byte_size = collector_policy()->initial_heap_byte_size(); + size_t max_byte_size = collector_policy()->max_heap_byte_size(); + + // Ensure that the sizes are properly aligned. + Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); + Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); + + // We allocate this in any case, but only do no work if the command line + // param is off. + _cg1r = new ConcurrentG1Refine(); + + // Reserve the maximum. + PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); + // Includes the perm-gen. + ReservedSpace heap_rs(max_byte_size + pgs->max_size(), + HeapRegion::GrainBytes, + false /*ism*/); + + if (!heap_rs.is_reserved()) { + vm_exit_during_initialization("Could not reserve enough space for object heap"); + return JNI_ENOMEM; + } + + // It is important to do this in a way such that concurrent readers can't + // temporarily think somethings in the heap. (I've actually seen this + // happen in asserts: DLD.) + _reserved.set_word_size(0); + _reserved.set_start((HeapWord*)heap_rs.base()); + _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); + + _expansion_regions = max_byte_size/HeapRegion::GrainBytes; + + _num_humongous_regions = 0; + + // Create the gen rem set (and barrier set) for the entire reserved region. + _rem_set = collector_policy()->create_rem_set(_reserved, 2); + set_barrier_set(rem_set()->bs()); + if (barrier_set()->is_a(BarrierSet::ModRef)) { + _mr_bs = (ModRefBarrierSet*)_barrier_set; + } else { + vm_exit_during_initialization("G1 requires a mod ref bs."); + return JNI_ENOMEM; + } + + // Also create a G1 rem set. + if (G1UseHRIntoRS) { + if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { + _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); + } else { + vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); + return JNI_ENOMEM; + } + } else { + _g1_rem_set = new StupidG1RemSet(this); + } + + // Carve out the G1 part of the heap. + + ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); + _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), + g1_rs.size()/HeapWordSize); + ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); + + _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); + + _g1_storage.initialize(g1_rs, 0); + _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); + _g1_max_committed = _g1_committed; + _hrs = new HeapRegionSeq(_expansion_regions); + guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); + guarantee(_cur_alloc_region == NULL, "from constructor"); + + _bot_shared = new G1BlockOffsetSharedArray(_reserved, + heap_word_size(init_byte_size)); + + _g1h = this; + + // Create the ConcurrentMark data structure and thread. + // (Must do this late, so that "max_regions" is defined.) + _cm = new ConcurrentMark(heap_rs, (int) max_regions()); + _cmThread = _cm->cmThread(); + + // ...and the concurrent zero-fill thread, if necessary. + if (G1ConcZeroFill) { + _czft = new ConcurrentZFThread(); + } + + // Initialize the from_card cache structure of HeapRegionRemSet. + HeapRegionRemSet::init_heap(max_regions()); + + // Now expand into the initial heap size. + expand(init_byte_size); + + // Perform any initialization actions delegated to the policy. + g1_policy()->init(); + + g1_policy()->note_start_of_mark_thread(); + + _refine_cte_cl = + new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), + g1_rem_set(), + concurrent_g1_refine()); + JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); + + JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, + SATB_Q_FL_lock, + 0, + Shared_SATB_Q_lock); + if (G1RSBarrierUseQueue) { + JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, + DirtyCardQ_FL_lock, + G1DirtyCardQueueMax, + Shared_DirtyCardQ_lock); + } + if (G1DeferredRSUpdate) { + dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, + DirtyCardQ_FL_lock, + 0, + Shared_DirtyCardQ_lock, + &JavaThread::dirty_card_queue_set()); + } + // In case we're keeping closure specialization stats, initialize those + // counts and that mechanism. + SpecializationStats::clear(); + + _gc_alloc_region_list = NULL; + + // Do later initialization work for concurrent refinement. + _cg1r->init(); + + const char* group_names[] = { "CR", "ZF", "CM", "CL" }; + GCOverheadReporter::initGCOverheadReporter(4, group_names); + + return JNI_OK; +} + +void G1CollectedHeap::ref_processing_init() { + SharedHeap::ref_processing_init(); + MemRegion mr = reserved_region(); + _ref_processor = ReferenceProcessor::create_ref_processor( + mr, // span + false, // Reference discovery is not atomic + // (though it shouldn't matter here.) + true, // mt_discovery + NULL, // is alive closure: need to fill this in for efficiency + ParallelGCThreads, + ParallelRefProcEnabled, + true); // Setting next fields of discovered + // lists requires a barrier. +} + +size_t G1CollectedHeap::capacity() const { + return _g1_committed.byte_size(); +} + +void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, + int worker_i) { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + int n_completed_buffers = 0; + while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { + n_completed_buffers++; + } + g1_policy()->record_update_rs_processed_buffers(worker_i, + (double) n_completed_buffers); + dcqs.clear_n_completed_buffers(); + // Finish up the queue... + if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i, + g1_rem_set()); + assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); +} + + +// Computes the sum of the storage used by the various regions. + +size_t G1CollectedHeap::used() const { + assert(Heap_lock->owner() != NULL, + "Should be owned on this thread's behalf."); + size_t result = _summary_bytes_used; + if (_cur_alloc_region != NULL) + result += _cur_alloc_region->used(); + return result; +} + +class SumUsedClosure: public HeapRegionClosure { + size_t _used; +public: + SumUsedClosure() : _used(0) {} + bool doHeapRegion(HeapRegion* r) { + if (!r->continuesHumongous()) { + _used += r->used(); + } + return false; + } + size_t result() { return _used; } +}; + +size_t G1CollectedHeap::recalculate_used() const { + SumUsedClosure blk; + _hrs->iterate(&blk); + return blk.result(); +} + +#ifndef PRODUCT +class SumUsedRegionsClosure: public HeapRegionClosure { + size_t _num; +public: + SumUsedRegionsClosure() : _num(0) {} + bool doHeapRegion(HeapRegion* r) { + if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { + _num += 1; + } + return false; + } + size_t result() { return _num; } +}; + +size_t G1CollectedHeap::recalculate_used_regions() const { + SumUsedRegionsClosure blk; + _hrs->iterate(&blk); + return blk.result(); +} +#endif // PRODUCT + +size_t G1CollectedHeap::unsafe_max_alloc() { + if (_free_regions > 0) return HeapRegion::GrainBytes; + // otherwise, is there space in the current allocation region? + + // We need to store the current allocation region in a local variable + // here. The problem is that this method doesn't take any locks and + // there may be other threads which overwrite the current allocation + // region field. attempt_allocation(), for example, sets it to NULL + // and this can happen *after* the NULL check here but before the call + // to free(), resulting in a SIGSEGV. Note that this doesn't appear + // to be a problem in the optimized build, since the two loads of the + // current allocation region field are optimized away. + HeapRegion* car = _cur_alloc_region; + + // FIXME: should iterate over all regions? + if (car == NULL) { + return 0; + } + return car->free(); +} + +void G1CollectedHeap::collect(GCCause::Cause cause) { + // The caller doesn't have the Heap_lock + assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); + MutexLocker ml(Heap_lock); + collect_locked(cause); +} + +void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { + assert(Thread::current()->is_VM_thread(), "Precondition#1"); + assert(Heap_lock->is_locked(), "Precondition#2"); + GCCauseSetter gcs(this, cause); + switch (cause) { + case GCCause::_heap_inspection: + case GCCause::_heap_dump: { + HandleMark hm; + do_full_collection(false); // don't clear all soft refs + break; + } + default: // XXX FIX ME + ShouldNotReachHere(); // Unexpected use of this function + } +} + + +void G1CollectedHeap::collect_locked(GCCause::Cause cause) { + // Don't want to do a GC until cleanup is completed. + wait_for_cleanup_complete(); + + // Read the GC count while holding the Heap_lock + int gc_count_before = SharedHeap::heap()->total_collections(); + { + MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back + VM_G1CollectFull op(gc_count_before, cause); + VMThread::execute(&op); + } +} + +bool G1CollectedHeap::is_in(const void* p) const { + if (_g1_committed.contains(p)) { + HeapRegion* hr = _hrs->addr_to_region(p); + return hr->is_in(p); + } else { + return _perm_gen->as_gen()->is_in(p); + } +} + +// Iteration functions. + +// Iterates an OopClosure over all ref-containing fields of objects +// within a HeapRegion. + +class IterateOopClosureRegionClosure: public HeapRegionClosure { + MemRegion _mr; + OopClosure* _cl; +public: + IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) + : _mr(mr), _cl(cl) {} + bool doHeapRegion(HeapRegion* r) { + if (! r->continuesHumongous()) { + r->oop_iterate(_cl); + } + return false; + } +}; + +void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { + IterateOopClosureRegionClosure blk(_g1_committed, cl); + _hrs->iterate(&blk); + if (do_perm) { + perm_gen()->oop_iterate(cl); + } +} + +void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { + IterateOopClosureRegionClosure blk(mr, cl); + _hrs->iterate(&blk); + if (do_perm) { + perm_gen()->oop_iterate(cl); + } +} + +// Iterates an ObjectClosure over all objects within a HeapRegion. + +class IterateObjectClosureRegionClosure: public HeapRegionClosure { + ObjectClosure* _cl; +public: + IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} + bool doHeapRegion(HeapRegion* r) { + if (! r->continuesHumongous()) { + r->object_iterate(_cl); + } + return false; + } +}; + +void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { + IterateObjectClosureRegionClosure blk(cl); + _hrs->iterate(&blk); + if (do_perm) { + perm_gen()->object_iterate(cl); + } +} + +void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { + // FIXME: is this right? + guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); +} + +// Calls a SpaceClosure on a HeapRegion. + +class SpaceClosureRegionClosure: public HeapRegionClosure { + SpaceClosure* _cl; +public: + SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} + bool doHeapRegion(HeapRegion* r) { + _cl->do_space(r); + return false; + } +}; + +void G1CollectedHeap::space_iterate(SpaceClosure* cl) { + SpaceClosureRegionClosure blk(cl); + _hrs->iterate(&blk); +} + +void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { + _hrs->iterate(cl); +} + +void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, + HeapRegionClosure* cl) { + _hrs->iterate_from(r, cl); +} + +void +G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { + _hrs->iterate_from(idx, cl); +} + +HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } + +void +G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, + int worker, + jint claim_value) { + const size_t regions = n_regions(); + const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); + // try to spread out the starting points of the workers + const size_t start_index = regions / worker_num * (size_t) worker; + + // each worker will actually look at all regions + for (size_t count = 0; count < regions; ++count) { + const size_t index = (start_index + count) % regions; + assert(0 <= index && index < regions, "sanity"); + HeapRegion* r = region_at(index); + // we'll ignore "continues humongous" regions (we'll process them + // when we come across their corresponding "start humongous" + // region) and regions already claimed + if (r->claim_value() == claim_value || r->continuesHumongous()) { + continue; + } + // OK, try to claim it + if (r->claimHeapRegion(claim_value)) { + // success! + assert(!r->continuesHumongous(), "sanity"); + if (r->startsHumongous()) { + // If the region is "starts humongous" we'll iterate over its + // "continues humongous" first; in fact we'll do them + // first. The order is important. In on case, calling the + // closure on the "starts humongous" region might de-allocate + // and clear all its "continues humongous" regions and, as a + // result, we might end up processing them twice. So, we'll do + // them first (notice: most closures will ignore them anyway) and + // then we'll do the "starts humongous" region. + for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { + HeapRegion* chr = region_at(ch_index); + + // if the region has already been claimed or it's not + // "continues humongous" we're done + if (chr->claim_value() == claim_value || + !chr->continuesHumongous()) { + break; + } + + // Noone should have claimed it directly. We can given + // that we claimed its "starts humongous" region. + assert(chr->claim_value() != claim_value, "sanity"); + assert(chr->humongous_start_region() == r, "sanity"); + + if (chr->claimHeapRegion(claim_value)) { + // we should always be able to claim it; noone else should + // be trying to claim this region + + bool res2 = cl->doHeapRegion(chr); + assert(!res2, "Should not abort"); + + // Right now, this holds (i.e., no closure that actually + // does something with "continues humongous" regions + // clears them). We might have to weaken it in the future, + // but let's leave these two asserts here for extra safety. + assert(chr->continuesHumongous(), "should still be the case"); + assert(chr->humongous_start_region() == r, "sanity"); + } else { + guarantee(false, "we should not reach here"); + } + } + } + + assert(!r->continuesHumongous(), "sanity"); + bool res = cl->doHeapRegion(r); + assert(!res, "Should not abort"); + } + } +} + +class ResetClaimValuesClosure: public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion* r) { + r->set_claim_value(HeapRegion::InitialClaimValue); + return false; + } +}; + +void +G1CollectedHeap::reset_heap_region_claim_values() { + ResetClaimValuesClosure blk; + heap_region_iterate(&blk); +} + +#ifdef ASSERT +// This checks whether all regions in the heap have the correct claim +// value. I also piggy-backed on this a check to ensure that the +// humongous_start_region() information on "continues humongous" +// regions is correct. + +class CheckClaimValuesClosure : public HeapRegionClosure { +private: + jint _claim_value; + size_t _failures; + HeapRegion* _sh_region; +public: + CheckClaimValuesClosure(jint claim_value) : + _claim_value(claim_value), _failures(0), _sh_region(NULL) { } + bool doHeapRegion(HeapRegion* r) { + if (r->claim_value() != _claim_value) { + gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " + "claim value = %d, should be %d", + r->bottom(), r->end(), r->claim_value(), + _claim_value); + ++_failures; + } + if (!r->isHumongous()) { + _sh_region = NULL; + } else if (r->startsHumongous()) { + _sh_region = r; + } else if (r->continuesHumongous()) { + if (r->humongous_start_region() != _sh_region) { + gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " + "HS = "PTR_FORMAT", should be "PTR_FORMAT, + r->bottom(), r->end(), + r->humongous_start_region(), + _sh_region); + ++_failures; + } + } + return false; + } + size_t failures() { + return _failures; + } +}; + +bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { + CheckClaimValuesClosure cl(claim_value); + heap_region_iterate(&cl); + return cl.failures() == 0; +} +#endif // ASSERT + +void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { + HeapRegion* r = g1_policy()->collection_set(); + while (r != NULL) { + HeapRegion* next = r->next_in_collection_set(); + if (cl->doHeapRegion(r)) { + cl->incomplete(); + return; + } + r = next; + } +} + +void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, + HeapRegionClosure *cl) { + assert(r->in_collection_set(), + "Start region must be a member of the collection set."); + HeapRegion* cur = r; + while (cur != NULL) { + HeapRegion* next = cur->next_in_collection_set(); + if (cl->doHeapRegion(cur) && false) { + cl->incomplete(); + return; + } + cur = next; + } + cur = g1_policy()->collection_set(); + while (cur != r) { + HeapRegion* next = cur->next_in_collection_set(); + if (cl->doHeapRegion(cur) && false) { + cl->incomplete(); + return; + } + cur = next; + } +} + +CompactibleSpace* G1CollectedHeap::first_compactible_space() { + return _hrs->length() > 0 ? _hrs->at(0) : NULL; +} + + +Space* G1CollectedHeap::space_containing(const void* addr) const { + Space* res = heap_region_containing(addr); + if (res == NULL) + res = perm_gen()->space_containing(addr); + return res; +} + +HeapWord* G1CollectedHeap::block_start(const void* addr) const { + Space* sp = space_containing(addr); + if (sp != NULL) { + return sp->block_start(addr); + } + return NULL; +} + +size_t G1CollectedHeap::block_size(const HeapWord* addr) const { + Space* sp = space_containing(addr); + assert(sp != NULL, "block_size of address outside of heap"); + return sp->block_size(addr); +} + +bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { + Space* sp = space_containing(addr); + return sp->block_is_obj(addr); +} + +bool G1CollectedHeap::supports_tlab_allocation() const { + return true; +} + +size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { + return HeapRegion::GrainBytes; +} + +size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { + // Return the remaining space in the cur alloc region, but not less than + // the min TLAB size. + // Also, no more than half the region size, since we can't allow tlabs to + // grow big enough to accomodate humongous objects. + + // We need to story it locally, since it might change between when we + // test for NULL and when we use it later. + ContiguousSpace* cur_alloc_space = _cur_alloc_region; + if (cur_alloc_space == NULL) { + return HeapRegion::GrainBytes/2; + } else { + return MAX2(MIN2(cur_alloc_space->free(), + (size_t)(HeapRegion::GrainBytes/2)), + (size_t)MinTLABSize); + } +} + +HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { + bool dummy; + return G1CollectedHeap::mem_allocate(size, false, true, &dummy); +} + +bool G1CollectedHeap::allocs_are_zero_filled() { + return false; +} + +size_t G1CollectedHeap::large_typearray_limit() { + // FIXME + return HeapRegion::GrainBytes/HeapWordSize; +} + +size_t G1CollectedHeap::max_capacity() const { + return _g1_committed.byte_size(); +} + +jlong G1CollectedHeap::millis_since_last_gc() { + // assert(false, "NYI"); + return 0; +} + + +void G1CollectedHeap::prepare_for_verify() { + if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { + ensure_parsability(false); + } + g1_rem_set()->prepare_for_verify(); +} + +class VerifyLivenessOopClosure: public OopClosure { + G1CollectedHeap* g1h; +public: + VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { + g1h = _g1h; + } + void do_oop(narrowOop *p) { + guarantee(false, "NYI"); + } + void do_oop(oop *p) { + oop obj = *p; + assert(obj == NULL || !g1h->is_obj_dead(obj), + "Dead object referenced by a not dead object"); + } +}; + +class VerifyObjsInRegionClosure: public ObjectClosure { + G1CollectedHeap* _g1h; + size_t _live_bytes; + HeapRegion *_hr; +public: + VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) { + _g1h = G1CollectedHeap::heap(); + } + void do_object(oop o) { + VerifyLivenessOopClosure isLive(_g1h); + assert(o != NULL, "Huh?"); + if (!_g1h->is_obj_dead(o)) { + o->oop_iterate(&isLive); + if (!_hr->obj_allocated_since_prev_marking(o)) + _live_bytes += (o->size() * HeapWordSize); + } + } + size_t live_bytes() { return _live_bytes; } +}; + +class PrintObjsInRegionClosure : public ObjectClosure { + HeapRegion *_hr; + G1CollectedHeap *_g1; +public: + PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { + _g1 = G1CollectedHeap::heap(); + }; + + void do_object(oop o) { + if (o != NULL) { + HeapWord *start = (HeapWord *) o; + size_t word_sz = o->size(); + gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT + " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", + (void*) o, word_sz, + _g1->isMarkedPrev(o), + _g1->isMarkedNext(o), + _hr->obj_allocated_since_prev_marking(o)); + HeapWord *end = start + word_sz; + HeapWord *cur; + int *val; + for (cur = start; cur < end; cur++) { + val = (int *) cur; + gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); + } + } + } +}; + +class VerifyRegionClosure: public HeapRegionClosure { +public: + bool _allow_dirty; + bool _par; + VerifyRegionClosure(bool allow_dirty, bool par = false) + : _allow_dirty(allow_dirty), _par(par) {} + bool doHeapRegion(HeapRegion* r) { + guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, + "Should be unclaimed at verify points."); + if (!r->continuesHumongous()) { + VerifyObjsInRegionClosure not_dead_yet_cl(r); + r->verify(_allow_dirty); + r->object_iterate(¬_dead_yet_cl); + guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), + "More live objects than counted in last complete marking."); + } + return false; + } +}; + +class VerifyRootsClosure: public OopsInGenClosure { +private: + G1CollectedHeap* _g1h; + bool _failures; + +public: + VerifyRootsClosure() : + _g1h(G1CollectedHeap::heap()), _failures(false) { } + + bool failures() { return _failures; } + + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + void do_oop(oop* p) { + oop obj = *p; + if (obj != NULL) { + if (_g1h->is_obj_dead(obj)) { + gclog_or_tty->print_cr("Root location "PTR_FORMAT" " + "points to dead obj "PTR_FORMAT, p, (void*) obj); + obj->print_on(gclog_or_tty); + _failures = true; + } + } + } +}; + +// This is the task used for parallel heap verification. + +class G1ParVerifyTask: public AbstractGangTask { +private: + G1CollectedHeap* _g1h; + bool _allow_dirty; + +public: + G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : + AbstractGangTask("Parallel verify task"), + _g1h(g1h), _allow_dirty(allow_dirty) { } + + void work(int worker_i) { + HandleMark hm; + VerifyRegionClosure blk(_allow_dirty, true); + _g1h->heap_region_par_iterate_chunked(&blk, worker_i, + HeapRegion::ParVerifyClaimValue); + } +}; + +void G1CollectedHeap::verify(bool allow_dirty, bool silent) { + if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { + if (!silent) { gclog_or_tty->print("roots "); } + VerifyRootsClosure rootsCl; + process_strong_roots(false, + SharedHeap::SO_AllClasses, + &rootsCl, + &rootsCl); + rem_set()->invalidate(perm_gen()->used_region(), false); + if (!silent) { gclog_or_tty->print("heapRegions "); } + if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { + assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), + "sanity check"); + + G1ParVerifyTask task(this, allow_dirty); + int n_workers = workers()->total_workers(); + set_par_threads(n_workers); + workers()->run_task(&task); + set_par_threads(0); + + assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), + "sanity check"); + + reset_heap_region_claim_values(); + + assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), + "sanity check"); + } else { + VerifyRegionClosure blk(allow_dirty); + _hrs->iterate(&blk); + } + if (!silent) gclog_or_tty->print("remset "); + rem_set()->verify(); + guarantee(!rootsCl.failures(), "should not have had failures"); + } else { + if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); + } +} + +class PrintRegionClosure: public HeapRegionClosure { + outputStream* _st; +public: + PrintRegionClosure(outputStream* st) : _st(st) {} + bool doHeapRegion(HeapRegion* r) { + r->print_on(_st); + return false; + } +}; + +void G1CollectedHeap::print() const { print_on(gclog_or_tty); } + +void G1CollectedHeap::print_on(outputStream* st) const { + PrintRegionClosure blk(st); + _hrs->iterate(&blk); +} + +void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { + if (ParallelGCThreads > 0) { + workers()->print_worker_threads(); + } + st->print("\"G1 concurrent mark GC Thread\" "); + _cmThread->print(); + st->cr(); + st->print("\"G1 concurrent refinement GC Thread\" "); + _cg1r->cg1rThread()->print_on(st); + st->cr(); + st->print("\"G1 zero-fill GC Thread\" "); + _czft->print_on(st); + st->cr(); +} + +void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { + if (ParallelGCThreads > 0) { + workers()->threads_do(tc); + } + tc->do_thread(_cmThread); + tc->do_thread(_cg1r->cg1rThread()); + tc->do_thread(_czft); +} + +void G1CollectedHeap::print_tracing_info() const { + concurrent_g1_refine()->print_final_card_counts(); + + // We'll overload this to mean "trace GC pause statistics." + if (TraceGen0Time || TraceGen1Time) { + // The "G1CollectorPolicy" is keeping track of these stats, so delegate + // to that. + g1_policy()->print_tracing_info(); + } + if (G1SummarizeRSetStats) { + g1_rem_set()->print_summary_info(); + } + if (G1SummarizeConcurrentMark) { + concurrent_mark()->print_summary_info(); + } + if (G1SummarizeZFStats) { + ConcurrentZFThread::print_summary_info(); + } + g1_policy()->print_yg_surv_rate_info(); + + GCOverheadReporter::printGCOverhead(); + + SpecializationStats::print(); +} + + +int G1CollectedHeap::addr_to_arena_id(void* addr) const { + HeapRegion* hr = heap_region_containing(addr); + if (hr == NULL) { + return 0; + } else { + return 1; + } +} + +G1CollectedHeap* G1CollectedHeap::heap() { + assert(_sh->kind() == CollectedHeap::G1CollectedHeap, + "not a garbage-first heap"); + return _g1h; +} + +void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { + if (PrintHeapAtGC){ + gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections()); + Universe::print(); + } + assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); + // Call allocation profiler + AllocationProfiler::iterate_since_last_gc(); + // Fill TLAB's and such + ensure_parsability(true); +} + +void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { + // FIXME: what is this about? + // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" + // is set. + COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), + "derived pointer present")); + + if (PrintHeapAtGC){ + gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections()); + Universe::print(); + gclog_or_tty->print("} "); + } +} + +void G1CollectedHeap::do_collection_pause() { + // Read the GC count while holding the Heap_lock + // we need to do this _before_ wait_for_cleanup_complete(), to + // ensure that we do not give up the heap lock and potentially + // pick up the wrong count + int gc_count_before = SharedHeap::heap()->total_collections(); + + // Don't want to do a GC pause while cleanup is being completed! + wait_for_cleanup_complete(); + + g1_policy()->record_stop_world_start(); + { + MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back + VM_G1IncCollectionPause op(gc_count_before); + VMThread::execute(&op); + } +} + +void +G1CollectedHeap::doConcurrentMark() { + if (G1ConcMark) { + MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); + if (!_cmThread->in_progress()) { + _cmThread->set_started(); + CGC_lock->notify(); + } + } +} + +class VerifyMarkedObjsClosure: public ObjectClosure { + G1CollectedHeap* _g1h; + public: + VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} + void do_object(oop obj) { + assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, + "markandsweep mark should agree with concurrent deadness"); + } +}; + +void +G1CollectedHeap::checkConcurrentMark() { + VerifyMarkedObjsClosure verifycl(this); + // MutexLockerEx x(getMarkBitMapLock(), + // Mutex::_no_safepoint_check_flag); + object_iterate(&verifycl, false); +} + +void G1CollectedHeap::do_sync_mark() { + _cm->checkpointRootsInitial(); + _cm->markFromRoots(); + _cm->checkpointRootsFinal(false); +} + +// + +double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, + bool young) { + return _g1_policy->predict_region_elapsed_time_ms(hr, young); +} + +void G1CollectedHeap::check_if_region_is_too_expensive(double + predicted_time_ms) { + _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); +} + +size_t G1CollectedHeap::pending_card_num() { + size_t extra_cards = 0; + JavaThread *curr = Threads::first(); + while (curr != NULL) { + DirtyCardQueue& dcq = curr->dirty_card_queue(); + extra_cards += dcq.size(); + curr = curr->next(); + } + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + size_t buffer_size = dcqs.buffer_size(); + size_t buffer_num = dcqs.completed_buffers_num(); + return buffer_size * buffer_num + extra_cards; +} + +size_t G1CollectedHeap::max_pending_card_num() { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + size_t buffer_size = dcqs.buffer_size(); + size_t buffer_num = dcqs.completed_buffers_num(); + int thread_num = Threads::number_of_threads(); + return (buffer_num + thread_num) * buffer_size; +} + +size_t G1CollectedHeap::cards_scanned() { + HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); + return g1_rset->cardsScanned(); +} + +void +G1CollectedHeap::setup_surviving_young_words() { + guarantee( _surviving_young_words == NULL, "pre-condition" ); + size_t array_length = g1_policy()->young_cset_length(); + _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); + if (_surviving_young_words == NULL) { + vm_exit_out_of_memory(sizeof(size_t) * array_length, + "Not enough space for young surv words summary."); + } + memset(_surviving_young_words, 0, array_length * sizeof(size_t)); + for (size_t i = 0; i < array_length; ++i) { + guarantee( _surviving_young_words[i] == 0, "invariant" ); + } +} + +void +G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { + MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); + size_t array_length = g1_policy()->young_cset_length(); + for (size_t i = 0; i < array_length; ++i) + _surviving_young_words[i] += surv_young_words[i]; +} + +void +G1CollectedHeap::cleanup_surviving_young_words() { + guarantee( _surviving_young_words != NULL, "pre-condition" ); + FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); + _surviving_young_words = NULL; +} + +// + +void +G1CollectedHeap::do_collection_pause_at_safepoint() { + char verbose_str[128]; + sprintf(verbose_str, "GC pause "); + if (g1_policy()->in_young_gc_mode()) { + if (g1_policy()->full_young_gcs()) + strcat(verbose_str, "(young)"); + else + strcat(verbose_str, "(partial)"); + } + if (g1_policy()->should_initiate_conc_mark()) + strcat(verbose_str, " (initial-mark)"); + + GCCauseSetter x(this, GCCause::_g1_inc_collection_pause); + + // if PrintGCDetails is on, we'll print long statistics information + // in the collector policy code, so let's not print this as the output + // is messy if we do. + gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); + TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); + TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); + + ResourceMark rm; + assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); + assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); + guarantee(!is_gc_active(), "collection is not reentrant"); + assert(regions_accounted_for(), "Region leakage!"); + + increment_gc_time_stamp(); + + if (g1_policy()->in_young_gc_mode()) { + assert(check_young_list_well_formed(), + "young list should be well formed"); + } + + if (GC_locker::is_active()) { + return; // GC is disabled (e.g. JNI GetXXXCritical operation) + } + + bool abandoned = false; + { // Call to jvmpi::post_class_unload_events must occur outside of active GC + IsGCActiveMark x; + + gc_prologue(false); + increment_total_collections(); + +#if G1_REM_SET_LOGGING + gclog_or_tty->print_cr("\nJust chose CS, heap:"); + print(); +#endif + + if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { + HandleMark hm; // Discard invalid handles created during verification + prepare_for_verify(); + gclog_or_tty->print(" VerifyBeforeGC:"); + Universe::verify(false); + } + + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + // We want to turn off ref discovery, if necessary, and turn it back on + // on again later if we do. + bool was_enabled = ref_processor()->discovery_enabled(); + if (was_enabled) ref_processor()->disable_discovery(); + + // Forget the current alloc region (we might even choose it to be part + // of the collection set!). + abandon_cur_alloc_region(); + + // The elapsed time induced by the start time below deliberately elides + // the possible verification above. + double start_time_sec = os::elapsedTime(); + GCOverheadReporter::recordSTWStart(start_time_sec); + size_t start_used_bytes = used(); + if (!G1ConcMark) { + do_sync_mark(); + } + + g1_policy()->record_collection_pause_start(start_time_sec, + start_used_bytes); + + guarantee(_in_cset_fast_test == NULL, "invariant"); + guarantee(_in_cset_fast_test_base == NULL, "invariant"); + _in_cset_fast_test_length = max_regions(); + _in_cset_fast_test_base = + NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); + memset(_in_cset_fast_test_base, false, + _in_cset_fast_test_length * sizeof(bool)); + // We're biasing _in_cset_fast_test to avoid subtracting the + // beginning of the heap every time we want to index; basically + // it's the same with what we do with the card table. + _in_cset_fast_test = _in_cset_fast_test_base - + ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); + +#if SCAN_ONLY_VERBOSE + _young_list->print(); +#endif // SCAN_ONLY_VERBOSE + + if (g1_policy()->should_initiate_conc_mark()) { + concurrent_mark()->checkpointRootsInitialPre(); + } + save_marks(); + + // We must do this before any possible evacuation that should propogate + // marks. + if (mark_in_progress()) { + double start_time_sec = os::elapsedTime(); + + _cm->drainAllSATBBuffers(); + double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; + g1_policy()->record_satb_drain_time(finish_mark_ms); + + } + // Record the number of elements currently on the mark stack, so we + // only iterate over these. (Since evacuation may add to the mark + // stack, doing more exposes race conditions.) If no mark is in + // progress, this will be zero. + _cm->set_oops_do_bound(); + + assert(regions_accounted_for(), "Region leakage."); + + if (mark_in_progress()) + concurrent_mark()->newCSet(); + + // Now choose the CS. + g1_policy()->choose_collection_set(); + + // We may abandon a pause if we find no region that will fit in the MMU + // pause. + bool abandoned = (g1_policy()->collection_set() == NULL); + + // Nothing to do if we were unable to choose a collection set. + if (!abandoned) { +#if G1_REM_SET_LOGGING + gclog_or_tty->print_cr("\nAfter pause, heap:"); + print(); +#endif + + setup_surviving_young_words(); + + // Set up the gc allocation regions. + get_gc_alloc_regions(); + + // Actually do the work... + evacuate_collection_set(); + free_collection_set(g1_policy()->collection_set()); + g1_policy()->clear_collection_set(); + + FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); + // this is more for peace of mind; we're nulling them here and + // we're expecting them to be null at the beginning of the next GC + _in_cset_fast_test = NULL; + _in_cset_fast_test_base = NULL; + + release_gc_alloc_regions(false /* totally */); + + cleanup_surviving_young_words(); + + if (g1_policy()->in_young_gc_mode()) { + _young_list->reset_sampled_info(); + assert(check_young_list_empty(true), + "young list should be empty"); + +#if SCAN_ONLY_VERBOSE + _young_list->print(); +#endif // SCAN_ONLY_VERBOSE + + g1_policy()->record_survivor_regions(_young_list->survivor_length(), + _young_list->first_survivor_region(), + _young_list->last_survivor_region()); + _young_list->reset_auxilary_lists(); + } + } else { + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + } + + if (evacuation_failed()) { + _summary_bytes_used = recalculate_used(); + } else { + // The "used" of the the collection set have already been subtracted + // when they were freed. Add in the bytes evacuated. + _summary_bytes_used += g1_policy()->bytes_in_to_space(); + } + + if (g1_policy()->in_young_gc_mode() && + g1_policy()->should_initiate_conc_mark()) { + concurrent_mark()->checkpointRootsInitialPost(); + set_marking_started(); + doConcurrentMark(); + } + +#if SCAN_ONLY_VERBOSE + _young_list->print(); +#endif // SCAN_ONLY_VERBOSE + + double end_time_sec = os::elapsedTime(); + double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; + g1_policy()->record_pause_time_ms(pause_time_ms); + GCOverheadReporter::recordSTWEnd(end_time_sec); + g1_policy()->record_collection_pause_end(abandoned); + + assert(regions_accounted_for(), "Region leakage."); + + if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { + HandleMark hm; // Discard invalid handles created during verification + gclog_or_tty->print(" VerifyAfterGC:"); + prepare_for_verify(); + Universe::verify(false); + } + + if (was_enabled) ref_processor()->enable_discovery(); + + { + size_t expand_bytes = g1_policy()->expansion_amount(); + if (expand_bytes > 0) { + size_t bytes_before = capacity(); + expand(expand_bytes); + } + } + + if (mark_in_progress()) + concurrent_mark()->update_g1_committed(); + + gc_epilogue(false); + } + + assert(verify_region_lists(), "Bad region lists."); + + if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { + gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); + print_tracing_info(); + vm_exit(-1); + } +} + +void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { + assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); + // make sure we don't call set_gc_alloc_region() multiple times on + // the same region + assert(r == NULL || !r->is_gc_alloc_region(), + "shouldn't already be a GC alloc region"); + HeapWord* original_top = NULL; + if (r != NULL) + original_top = r->top(); + + // We will want to record the used space in r as being there before gc. + // One we install it as a GC alloc region it's eligible for allocation. + // So record it now and use it later. + size_t r_used = 0; + if (r != NULL) { + r_used = r->used(); + + if (ParallelGCThreads > 0) { + // need to take the lock to guard against two threads calling + // get_gc_alloc_region concurrently (very unlikely but...) + MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); + r->save_marks(); + } + } + HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; + _gc_alloc_regions[purpose] = r; + if (old_alloc_region != NULL) { + // Replace aliases too. + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + if (_gc_alloc_regions[ap] == old_alloc_region) { + _gc_alloc_regions[ap] = r; + } + } + } + if (r != NULL) { + push_gc_alloc_region(r); + if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { + // We are using a region as a GC alloc region after it has been used + // as a mutator allocation region during the current marking cycle. + // The mutator-allocated objects are currently implicitly marked, but + // when we move hr->next_top_at_mark_start() forward at the the end + // of the GC pause, they won't be. We therefore mark all objects in + // the "gap". We do this object-by-object, since marking densely + // does not currently work right with marking bitmap iteration. This + // means we rely on TLAB filling at the start of pauses, and no + // "resuscitation" of filled TLAB's. If we want to do this, we need + // to fix the marking bitmap iteration. + HeapWord* curhw = r->next_top_at_mark_start(); + HeapWord* t = original_top; + + while (curhw < t) { + oop cur = (oop)curhw; + // We'll assume parallel for generality. This is rare code. + concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? + curhw = curhw + cur->size(); + } + assert(curhw == t, "Should have parsed correctly."); + } + if (G1PolicyVerbose > 1) { + gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " + "for survivors:", r->bottom(), original_top, r->end()); + r->print(); + } + g1_policy()->record_before_bytes(r_used); + } +} + +void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { + assert(Thread::current()->is_VM_thread() || + par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); + assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), + "Precondition."); + hr->set_is_gc_alloc_region(true); + hr->set_next_gc_alloc_region(_gc_alloc_region_list); + _gc_alloc_region_list = hr; +} + +#ifdef G1_DEBUG +class FindGCAllocRegion: public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion* r) { + if (r->is_gc_alloc_region()) { + gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", + r->hrs_index(), r->bottom()); + } + return false; + } +}; +#endif // G1_DEBUG + +void G1CollectedHeap::forget_alloc_region_list() { + assert(Thread::current()->is_VM_thread(), "Precondition"); + while (_gc_alloc_region_list != NULL) { + HeapRegion* r = _gc_alloc_region_list; + assert(r->is_gc_alloc_region(), "Invariant."); + // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on + // newly allocated data in order to be able to apply deferred updates + // before the GC is done for verification purposes (i.e to allow + // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the + // collection. + r->ContiguousSpace::set_saved_mark(); + _gc_alloc_region_list = r->next_gc_alloc_region(); + r->set_next_gc_alloc_region(NULL); + r->set_is_gc_alloc_region(false); + if (r->is_survivor()) { + if (r->is_empty()) { + r->set_not_young(); + } else { + _young_list->add_survivor_region(r); + } + } + if (r->is_empty()) { + ++_free_regions; + } + } +#ifdef G1_DEBUG + FindGCAllocRegion fa; + heap_region_iterate(&fa); +#endif // G1_DEBUG +} + + +bool G1CollectedHeap::check_gc_alloc_regions() { + // TODO: allocation regions check + return true; +} + +void G1CollectedHeap::get_gc_alloc_regions() { + // First, let's check that the GC alloc region list is empty (it should) + assert(_gc_alloc_region_list == NULL, "invariant"); + + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + assert(_gc_alloc_regions[ap] == NULL, "invariant"); + + // Create new GC alloc regions. + HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; + _retained_gc_alloc_regions[ap] = NULL; + + if (alloc_region != NULL) { + assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); + + // let's make sure that the GC alloc region is not tagged as such + // outside a GC operation + assert(!alloc_region->is_gc_alloc_region(), "sanity"); + + if (alloc_region->in_collection_set() || + alloc_region->top() == alloc_region->end() || + alloc_region->top() == alloc_region->bottom()) { + // we will discard the current GC alloc region if it's in the + // collection set (it can happen!), if it's already full (no + // point in using it), or if it's empty (this means that it + // was emptied during a cleanup and it should be on the free + // list now). + + alloc_region = NULL; + } + } + + if (alloc_region == NULL) { + // we will get a new GC alloc region + alloc_region = newAllocRegionWithExpansion(ap, 0); + } + + if (alloc_region != NULL) { + assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); + set_gc_alloc_region(ap, alloc_region); + } + + assert(_gc_alloc_regions[ap] == NULL || + _gc_alloc_regions[ap]->is_gc_alloc_region(), + "the GC alloc region should be tagged as such"); + assert(_gc_alloc_regions[ap] == NULL || + _gc_alloc_regions[ap] == _gc_alloc_region_list, + "the GC alloc region should be the same as the GC alloc list head"); + } + // Set alternative regions for allocation purposes that have reached + // their limit. + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); + if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { + _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; + } + } + assert(check_gc_alloc_regions(), "alloc regions messed up"); +} + +void G1CollectedHeap::release_gc_alloc_regions(bool totally) { + // We keep a separate list of all regions that have been alloc regions in + // the current collection pause. Forget that now. This method will + // untag the GC alloc regions and tear down the GC alloc region + // list. It's desirable that no regions are tagged as GC alloc + // outside GCs. + forget_alloc_region_list(); + + // The current alloc regions contain objs that have survived + // collection. Make them no longer GC alloc regions. + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + HeapRegion* r = _gc_alloc_regions[ap]; + _retained_gc_alloc_regions[ap] = NULL; + + if (r != NULL) { + // we retain nothing on _gc_alloc_regions between GCs + set_gc_alloc_region(ap, NULL); + _gc_alloc_region_counts[ap] = 0; + + if (r->is_empty()) { + // we didn't actually allocate anything in it; let's just put + // it on the free list + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + r->set_zero_fill_complete(); + put_free_region_on_list_locked(r); + } else if (_retain_gc_alloc_region[ap] && !totally) { + // retain it so that we can use it at the beginning of the next GC + _retained_gc_alloc_regions[ap] = r; + } + } + } +} + +#ifndef PRODUCT +// Useful for debugging + +void G1CollectedHeap::print_gc_alloc_regions() { + gclog_or_tty->print_cr("GC alloc regions"); + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + HeapRegion* r = _gc_alloc_regions[ap]; + if (r == NULL) { + gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); + } else { + gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, + ap, r->bottom(), r->used()); + } + } +} +#endif // PRODUCT + +void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { + _drain_in_progress = false; + set_evac_failure_closure(cl); + _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray(40, true); +} + +void G1CollectedHeap::finalize_for_evac_failure() { + assert(_evac_failure_scan_stack != NULL && + _evac_failure_scan_stack->length() == 0, + "Postcondition"); + assert(!_drain_in_progress, "Postcondition"); + // Don't have to delete, since the scan stack is a resource object. + _evac_failure_scan_stack = NULL; +} + + + +// *** Sequential G1 Evacuation + +HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { + HeapRegion* alloc_region = _gc_alloc_regions[purpose]; + // let the caller handle alloc failure + if (alloc_region == NULL) return NULL; + assert(isHumongous(word_size) || !alloc_region->isHumongous(), + "Either the object is humongous or the region isn't"); + HeapWord* block = alloc_region->allocate(word_size); + if (block == NULL) { + block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); + } + return block; +} + +class G1IsAliveClosure: public BoolObjectClosure { + G1CollectedHeap* _g1; +public: + G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} + void do_object(oop p) { assert(false, "Do not call."); } + bool do_object_b(oop p) { + // It is reachable if it is outside the collection set, or is inside + // and forwarded. + +#ifdef G1_DEBUG + gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", + (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), + !_g1->obj_in_cs(p) || p->is_forwarded()); +#endif // G1_DEBUG + + return !_g1->obj_in_cs(p) || p->is_forwarded(); + } +}; + +class G1KeepAliveClosure: public OopClosure { + G1CollectedHeap* _g1; +public: + G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + void do_oop(oop* p) { + oop obj = *p; +#ifdef G1_DEBUG + if (PrintGC && Verbose) { + gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, + p, (void*) obj, (void*) *p); + } +#endif // G1_DEBUG + + if (_g1->obj_in_cs(obj)) { + assert( obj->is_forwarded(), "invariant" ); + *p = obj->forwardee(); + +#ifdef G1_DEBUG + gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, + (void*) obj, (void*) *p); +#endif // G1_DEBUG + } + } +}; + +class UpdateRSetImmediate : public OopsInHeapRegionClosure { +private: + G1CollectedHeap* _g1; + G1RemSet* _g1_rem_set; +public: + UpdateRSetImmediate(G1CollectedHeap* g1) : + _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} + + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + void do_oop(oop* p) { + assert(_from->is_in_reserved(p), "paranoia"); + if (*p != NULL && !_from->is_survivor()) { + _g1_rem_set->par_write_ref(_from, p, 0); + } + } +}; + +class UpdateRSetDeferred : public OopsInHeapRegionClosure { +private: + G1CollectedHeap* _g1; + DirtyCardQueue *_dcq; + CardTableModRefBS* _ct_bs; + +public: + UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : + _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} + + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + void do_oop(oop* p) { + assert(_from->is_in_reserved(p), "paranoia"); + if (!_from->is_in_reserved(*p) && !_from->is_survivor()) { + size_t card_index = _ct_bs->index_for(p); + if (_ct_bs->mark_card_deferred(card_index)) { + _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); + } + } + } +}; + + + +class RemoveSelfPointerClosure: public ObjectClosure { +private: + G1CollectedHeap* _g1; + ConcurrentMark* _cm; + HeapRegion* _hr; + size_t _prev_marked_bytes; + size_t _next_marked_bytes; + OopsInHeapRegionClosure *_cl; +public: + RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : + _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), + _next_marked_bytes(0), _cl(cl) {} + + size_t prev_marked_bytes() { return _prev_marked_bytes; } + size_t next_marked_bytes() { return _next_marked_bytes; } + + // The original idea here was to coalesce evacuated and dead objects. + // However that caused complications with the block offset table (BOT). + // In particular if there were two TLABs, one of them partially refined. + // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| + // The BOT entries of the unrefined part of TLAB_2 point to the start + // of TLAB_2. If the last object of the TLAB_1 and the first object + // of TLAB_2 are coalesced, then the cards of the unrefined part + // would point into middle of the filler object. + // + // The current approach is to not coalesce and leave the BOT contents intact. + void do_object(oop obj) { + if (obj->is_forwarded() && obj->forwardee() == obj) { + // The object failed to move. + assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); + _cm->markPrev(obj); + assert(_cm->isPrevMarked(obj), "Should be marked!"); + _prev_marked_bytes += (obj->size() * HeapWordSize); + if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { + _cm->markAndGrayObjectIfNecessary(obj); + } + obj->set_mark(markOopDesc::prototype()); + // While we were processing RSet buffers during the + // collection, we actually didn't scan any cards on the + // collection set, since we didn't want to update remebered + // sets with entries that point into the collection set, given + // that live objects fromthe collection set are about to move + // and such entries will be stale very soon. This change also + // dealt with a reliability issue which involved scanning a + // card in the collection set and coming across an array that + // was being chunked and looking malformed. The problem is + // that, if evacuation fails, we might have remembered set + // entries missing given that we skipped cards on the + // collection set. So, we'll recreate such entries now. + obj->oop_iterate(_cl); + assert(_cm->isPrevMarked(obj), "Should be marked!"); + } else { + // The object has been either evacuated or is dead. Fill it with a + // dummy object. + MemRegion mr((HeapWord*)obj, obj->size()); + CollectedHeap::fill_with_object(mr); + _cm->clearRangeBothMaps(mr); + } + } +}; + +void G1CollectedHeap::remove_self_forwarding_pointers() { + UpdateRSetImmediate immediate_update(_g1h); + DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); + UpdateRSetDeferred deferred_update(_g1h, &dcq); + OopsInHeapRegionClosure *cl; + if (G1DeferredRSUpdate) { + cl = &deferred_update; + } else { + cl = &immediate_update; + } + HeapRegion* cur = g1_policy()->collection_set(); + while (cur != NULL) { + assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); + + RemoveSelfPointerClosure rspc(_g1h, cl); + if (cur->evacuation_failed()) { + assert(cur->in_collection_set(), "bad CS"); + cl->set_region(cur); + cur->object_iterate(&rspc); + + // A number of manipulations to make the TAMS be the current top, + // and the marked bytes be the ones observed in the iteration. + if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { + // The comments below are the postconditions achieved by the + // calls. Note especially the last such condition, which says that + // the count of marked bytes has been properly restored. + cur->note_start_of_marking(false); + // _next_top_at_mark_start == top, _next_marked_bytes == 0 + cur->add_to_marked_bytes(rspc.prev_marked_bytes()); + // _next_marked_bytes == prev_marked_bytes. + cur->note_end_of_marking(); + // _prev_top_at_mark_start == top(), + // _prev_marked_bytes == prev_marked_bytes + } + // If there is no mark in progress, we modified the _next variables + // above needlessly, but harmlessly. + if (_g1h->mark_in_progress()) { + cur->note_start_of_marking(false); + // _next_top_at_mark_start == top, _next_marked_bytes == 0 + // _next_marked_bytes == next_marked_bytes. + } + + // Now make sure the region has the right index in the sorted array. + g1_policy()->note_change_in_marked_bytes(cur); + } + cur = cur->next_in_collection_set(); + } + assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); + + // Now restore saved marks, if any. + if (_objs_with_preserved_marks != NULL) { + assert(_preserved_marks_of_objs != NULL, "Both or none."); + assert(_objs_with_preserved_marks->length() == + _preserved_marks_of_objs->length(), "Both or none."); + guarantee(_objs_with_preserved_marks->length() == + _preserved_marks_of_objs->length(), "Both or none."); + for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { + oop obj = _objs_with_preserved_marks->at(i); + markOop m = _preserved_marks_of_objs->at(i); + obj->set_mark(m); + } + // Delete the preserved marks growable arrays (allocated on the C heap). + delete _objs_with_preserved_marks; + delete _preserved_marks_of_objs; + _objs_with_preserved_marks = NULL; + _preserved_marks_of_objs = NULL; + } +} + +void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { + _evac_failure_scan_stack->push(obj); +} + +void G1CollectedHeap::drain_evac_failure_scan_stack() { + assert(_evac_failure_scan_stack != NULL, "precondition"); + + while (_evac_failure_scan_stack->length() > 0) { + oop obj = _evac_failure_scan_stack->pop(); + _evac_failure_closure->set_region(heap_region_containing(obj)); + obj->oop_iterate_backwards(_evac_failure_closure); + } +} + +void G1CollectedHeap::handle_evacuation_failure(oop old) { + markOop m = old->mark(); + // forward to self + assert(!old->is_forwarded(), "precondition"); + + old->forward_to(old); + handle_evacuation_failure_common(old, m); +} + +oop +G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, + oop old) { + markOop m = old->mark(); + oop forward_ptr = old->forward_to_atomic(old); + if (forward_ptr == NULL) { + // Forward-to-self succeeded. + if (_evac_failure_closure != cl) { + MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); + assert(!_drain_in_progress, + "Should only be true while someone holds the lock."); + // Set the global evac-failure closure to the current thread's. + assert(_evac_failure_closure == NULL, "Or locking has failed."); + set_evac_failure_closure(cl); + // Now do the common part. + handle_evacuation_failure_common(old, m); + // Reset to NULL. + set_evac_failure_closure(NULL); + } else { + // The lock is already held, and this is recursive. + assert(_drain_in_progress, "This should only be the recursive case."); + handle_evacuation_failure_common(old, m); + } + return old; + } else { + // Someone else had a place to copy it. + return forward_ptr; + } +} + +void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { + set_evacuation_failed(true); + + preserve_mark_if_necessary(old, m); + + HeapRegion* r = heap_region_containing(old); + if (!r->evacuation_failed()) { + r->set_evacuation_failed(true); + if (G1PrintRegions) { + gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " + "["PTR_FORMAT","PTR_FORMAT")\n", + r, r->bottom(), r->end()); + } + } + + push_on_evac_failure_scan_stack(old); + + if (!_drain_in_progress) { + // prevent recursion in copy_to_survivor_space() + _drain_in_progress = true; + drain_evac_failure_scan_stack(); + _drain_in_progress = false; + } +} + +void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { + if (m != markOopDesc::prototype()) { + if (_objs_with_preserved_marks == NULL) { + assert(_preserved_marks_of_objs == NULL, "Both or none."); + _objs_with_preserved_marks = + new (ResourceObj::C_HEAP) GrowableArray(40, true); + _preserved_marks_of_objs = + new (ResourceObj::C_HEAP) GrowableArray(40, true); + } + _objs_with_preserved_marks->push(obj); + _preserved_marks_of_objs->push(m); + } +} + +// *** Parallel G1 Evacuation + +HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, + size_t word_size) { + HeapRegion* alloc_region = _gc_alloc_regions[purpose]; + // let the caller handle alloc failure + if (alloc_region == NULL) return NULL; + + HeapWord* block = alloc_region->par_allocate(word_size); + if (block == NULL) { + MutexLockerEx x(par_alloc_during_gc_lock(), + Mutex::_no_safepoint_check_flag); + block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); + } + return block; +} + +void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, + bool par) { + // Another thread might have obtained alloc_region for the given + // purpose, and might be attempting to allocate in it, and might + // succeed. Therefore, we can't do the "finalization" stuff on the + // region below until we're sure the last allocation has happened. + // We ensure this by allocating the remaining space with a garbage + // object. + if (par) par_allocate_remaining_space(alloc_region); + // Now we can do the post-GC stuff on the region. + alloc_region->note_end_of_copying(); + g1_policy()->record_after_bytes(alloc_region->used()); +} + +HeapWord* +G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, + HeapRegion* alloc_region, + bool par, + size_t word_size) { + HeapWord* block = NULL; + // In the parallel case, a previous thread to obtain the lock may have + // already assigned a new gc_alloc_region. + if (alloc_region != _gc_alloc_regions[purpose]) { + assert(par, "But should only happen in parallel case."); + alloc_region = _gc_alloc_regions[purpose]; + if (alloc_region == NULL) return NULL; + block = alloc_region->par_allocate(word_size); + if (block != NULL) return block; + // Otherwise, continue; this new region is empty, too. + } + assert(alloc_region != NULL, "We better have an allocation region"); + retire_alloc_region(alloc_region, par); + + if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { + // Cannot allocate more regions for the given purpose. + GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); + // Is there an alternative? + if (purpose != alt_purpose) { + HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; + // Has not the alternative region been aliased? + if (alloc_region != alt_region && alt_region != NULL) { + // Try to allocate in the alternative region. + if (par) { + block = alt_region->par_allocate(word_size); + } else { + block = alt_region->allocate(word_size); + } + // Make an alias. + _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; + if (block != NULL) { + return block; + } + retire_alloc_region(alt_region, par); + } + // Both the allocation region and the alternative one are full + // and aliased, replace them with a new allocation region. + purpose = alt_purpose; + } else { + set_gc_alloc_region(purpose, NULL); + return NULL; + } + } + + // Now allocate a new region for allocation. + alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); + + // let the caller handle alloc failure + if (alloc_region != NULL) { + + assert(check_gc_alloc_regions(), "alloc regions messed up"); + assert(alloc_region->saved_mark_at_top(), + "Mark should have been saved already."); + // We used to assert that the region was zero-filled here, but no + // longer. + + // This must be done last: once it's installed, other regions may + // allocate in it (without holding the lock.) + set_gc_alloc_region(purpose, alloc_region); + + if (par) { + block = alloc_region->par_allocate(word_size); + } else { + block = alloc_region->allocate(word_size); + } + // Caller handles alloc failure. + } else { + // This sets other apis using the same old alloc region to NULL, also. + set_gc_alloc_region(purpose, NULL); + } + return block; // May be NULL. +} + +void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { + HeapWord* block = NULL; + size_t free_words; + do { + free_words = r->free()/HeapWordSize; + // If there's too little space, no one can allocate, so we're done. + if (free_words < (size_t)oopDesc::header_size()) return; + // Otherwise, try to claim it. + block = r->par_allocate(free_words); + } while (block == NULL); + fill_with_object(block, free_words); +} + +#define use_local_bitmaps 1 +#define verify_local_bitmaps 0 + +#ifndef PRODUCT + +class GCLabBitMap; +class GCLabBitMapClosure: public BitMapClosure { +private: + ConcurrentMark* _cm; + GCLabBitMap* _bitmap; + +public: + GCLabBitMapClosure(ConcurrentMark* cm, + GCLabBitMap* bitmap) { + _cm = cm; + _bitmap = bitmap; + } + + virtual bool do_bit(size_t offset); +}; + +#endif // PRODUCT + +#define oop_buffer_length 256 + +class GCLabBitMap: public BitMap { +private: + ConcurrentMark* _cm; + + int _shifter; + size_t _bitmap_word_covers_words; + + // beginning of the heap + HeapWord* _heap_start; + + // this is the actual start of the GCLab + HeapWord* _real_start_word; + + // this is the actual end of the GCLab + HeapWord* _real_end_word; + + // this is the first word, possibly located before the actual start + // of the GCLab, that corresponds to the first bit of the bitmap + HeapWord* _start_word; + + // size of a GCLab in words + size_t _gclab_word_size; + + static int shifter() { + return MinObjAlignment - 1; + } + + // how many heap words does a single bitmap word corresponds to? + static size_t bitmap_word_covers_words() { + return BitsPerWord << shifter(); + } + + static size_t gclab_word_size() { + return G1ParallelGCAllocBufferSize / HeapWordSize; + } + + static size_t bitmap_size_in_bits() { + size_t bits_in_bitmap = gclab_word_size() >> shifter(); + // We are going to ensure that the beginning of a word in this + // bitmap also corresponds to the beginning of a word in the + // global marking bitmap. To handle the case where a GCLab + // starts from the middle of the bitmap, we need to add enough + // space (i.e. up to a bitmap word) to ensure that we have + // enough bits in the bitmap. + return bits_in_bitmap + BitsPerWord - 1; + } +public: + GCLabBitMap(HeapWord* heap_start) + : BitMap(bitmap_size_in_bits()), + _cm(G1CollectedHeap::heap()->concurrent_mark()), + _shifter(shifter()), + _bitmap_word_covers_words(bitmap_word_covers_words()), + _heap_start(heap_start), + _gclab_word_size(gclab_word_size()), + _real_start_word(NULL), + _real_end_word(NULL), + _start_word(NULL) + { + guarantee( size_in_words() >= bitmap_size_in_words(), + "just making sure"); + } + + inline unsigned heapWordToOffset(HeapWord* addr) { + unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; + assert(offset < size(), "offset should be within bounds"); + return offset; + } + + inline HeapWord* offsetToHeapWord(size_t offset) { + HeapWord* addr = _start_word + (offset << _shifter); + assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); + return addr; + } + + bool fields_well_formed() { + bool ret1 = (_real_start_word == NULL) && + (_real_end_word == NULL) && + (_start_word == NULL); + if (ret1) + return true; + + bool ret2 = _real_start_word >= _start_word && + _start_word < _real_end_word && + (_real_start_word + _gclab_word_size) == _real_end_word && + (_start_word + _gclab_word_size + _bitmap_word_covers_words) + > _real_end_word; + return ret2; + } + + inline bool mark(HeapWord* addr) { + guarantee(use_local_bitmaps, "invariant"); + assert(fields_well_formed(), "invariant"); + + if (addr >= _real_start_word && addr < _real_end_word) { + assert(!isMarked(addr), "should not have already been marked"); + + // first mark it on the bitmap + at_put(heapWordToOffset(addr), true); + + return true; + } else { + return false; + } + } + + inline bool isMarked(HeapWord* addr) { + guarantee(use_local_bitmaps, "invariant"); + assert(fields_well_formed(), "invariant"); + + return at(heapWordToOffset(addr)); + } + + void set_buffer(HeapWord* start) { + guarantee(use_local_bitmaps, "invariant"); + clear(); + + assert(start != NULL, "invariant"); + _real_start_word = start; + _real_end_word = start + _gclab_word_size; + + size_t diff = + pointer_delta(start, _heap_start) % _bitmap_word_covers_words; + _start_word = start - diff; + + assert(fields_well_formed(), "invariant"); + } + +#ifndef PRODUCT + void verify() { + // verify that the marks have been propagated + GCLabBitMapClosure cl(_cm, this); + iterate(&cl); + } +#endif // PRODUCT + + void retire() { + guarantee(use_local_bitmaps, "invariant"); + assert(fields_well_formed(), "invariant"); + + if (_start_word != NULL) { + CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); + + // this means that the bitmap was set up for the GCLab + assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); + + mark_bitmap->mostly_disjoint_range_union(this, + 0, // always start from the start of the bitmap + _start_word, + size_in_words()); + _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); + +#ifndef PRODUCT + if (use_local_bitmaps && verify_local_bitmaps) + verify(); +#endif // PRODUCT + } else { + assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); + } + } + + static size_t bitmap_size_in_words() { + return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; + } +}; + +#ifndef PRODUCT + +bool GCLabBitMapClosure::do_bit(size_t offset) { + HeapWord* addr = _bitmap->offsetToHeapWord(offset); + guarantee(_cm->isMarked(oop(addr)), "it should be!"); + return true; +} + +#endif // PRODUCT + +class G1ParGCAllocBuffer: public ParGCAllocBuffer { +private: + bool _retired; + bool _during_marking; + GCLabBitMap _bitmap; + +public: + G1ParGCAllocBuffer() : + ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize), + _during_marking(G1CollectedHeap::heap()->mark_in_progress()), + _bitmap(G1CollectedHeap::heap()->reserved_region().start()), + _retired(false) + { } + + inline bool mark(HeapWord* addr) { + guarantee(use_local_bitmaps, "invariant"); + assert(_during_marking, "invariant"); + return _bitmap.mark(addr); + } + + inline void set_buf(HeapWord* buf) { + if (use_local_bitmaps && _during_marking) + _bitmap.set_buffer(buf); + ParGCAllocBuffer::set_buf(buf); + _retired = false; + } + + inline void retire(bool end_of_gc, bool retain) { + if (_retired) + return; + if (use_local_bitmaps && _during_marking) { + _bitmap.retire(); + } + ParGCAllocBuffer::retire(end_of_gc, retain); + _retired = true; + } +}; + + +class G1ParScanThreadState : public StackObj { +protected: + G1CollectedHeap* _g1h; + RefToScanQueue* _refs; + DirtyCardQueue _dcq; + CardTableModRefBS* _ct_bs; + G1RemSet* _g1_rem; + + typedef GrowableArray OverflowQueue; + OverflowQueue* _overflowed_refs; + + G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; + ageTable _age_table; + + size_t _alloc_buffer_waste; + size_t _undo_waste; + + OopsInHeapRegionClosure* _evac_failure_cl; + G1ParScanHeapEvacClosure* _evac_cl; + G1ParScanPartialArrayClosure* _partial_scan_cl; + + int _hash_seed; + int _queue_num; + + int _term_attempts; +#if G1_DETAILED_STATS + int _pushes, _pops, _steals, _steal_attempts; + int _overflow_pushes; +#endif + + double _start; + double _start_strong_roots; + double _strong_roots_time; + double _start_term; + double _term_time; + + // Map from young-age-index (0 == not young, 1 is youngest) to + // surviving words. base is what we get back from the malloc call + size_t* _surviving_young_words_base; + // this points into the array, as we use the first few entries for padding + size_t* _surviving_young_words; + +#define PADDING_ELEM_NUM (64 / sizeof(size_t)) + + void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } + + void add_to_undo_waste(size_t waste) { _undo_waste += waste; } + + DirtyCardQueue& dirty_card_queue() { return _dcq; } + CardTableModRefBS* ctbs() { return _ct_bs; } + + void immediate_rs_update(HeapRegion* from, oop* p, int tid) { + if (!from->is_survivor()) { + _g1_rem->par_write_ref(from, p, tid); + } + } + + void deferred_rs_update(HeapRegion* from, oop* p, int tid) { + // If the new value of the field points to the same region or + // is the to-space, we don't need to include it in the Rset updates. + if (!from->is_in_reserved(*p) && !from->is_survivor()) { + size_t card_index = ctbs()->index_for(p); + // If the card hasn't been added to the buffer, do it. + if (ctbs()->mark_card_deferred(card_index)) { + dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); + } + } + } + +public: + G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) + : _g1h(g1h), + _refs(g1h->task_queue(queue_num)), + _dcq(&g1h->dirty_card_queue_set()), + _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), + _g1_rem(g1h->g1_rem_set()), + _hash_seed(17), _queue_num(queue_num), + _term_attempts(0), + _age_table(false), +#if G1_DETAILED_STATS + _pushes(0), _pops(0), _steals(0), + _steal_attempts(0), _overflow_pushes(0), +#endif + _strong_roots_time(0), _term_time(0), + _alloc_buffer_waste(0), _undo_waste(0) + { + // we allocate G1YoungSurvRateNumRegions plus one entries, since + // we "sacrifice" entry 0 to keep track of surviving bytes for + // non-young regions (where the age is -1) + // We also add a few elements at the beginning and at the end in + // an attempt to eliminate cache contention + size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); + size_t array_length = PADDING_ELEM_NUM + + real_length + + PADDING_ELEM_NUM; + _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); + if (_surviving_young_words_base == NULL) + vm_exit_out_of_memory(array_length * sizeof(size_t), + "Not enough space for young surv histo."); + _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; + memset(_surviving_young_words, 0, real_length * sizeof(size_t)); + + _overflowed_refs = new OverflowQueue(10); + + _start = os::elapsedTime(); + } + + ~G1ParScanThreadState() { + FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); + } + + RefToScanQueue* refs() { return _refs; } + OverflowQueue* overflowed_refs() { return _overflowed_refs; } + ageTable* age_table() { return &_age_table; } + + G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { + return &_alloc_buffers[purpose]; + } + + size_t alloc_buffer_waste() { return _alloc_buffer_waste; } + size_t undo_waste() { return _undo_waste; } + + void push_on_queue(oop* ref) { + assert(ref != NULL, "invariant"); + assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant"); + + if (!refs()->push(ref)) { + overflowed_refs()->push(ref); + IF_G1_DETAILED_STATS(note_overflow_push()); + } else { + IF_G1_DETAILED_STATS(note_push()); + } + } + + void pop_from_queue(oop*& ref) { + if (!refs()->pop_local(ref)) { + ref = NULL; + } else { + assert(ref != NULL, "invariant"); + assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), + "invariant"); + + IF_G1_DETAILED_STATS(note_pop()); + } + } + + void pop_from_overflow_queue(oop*& ref) { + ref = overflowed_refs()->pop(); + } + + int refs_to_scan() { return refs()->size(); } + int overflowed_refs_to_scan() { return overflowed_refs()->length(); } + + void update_rs(HeapRegion* from, oop* p, int tid) { + if (G1DeferredRSUpdate) { + deferred_rs_update(from, p, tid); + } else { + immediate_rs_update(from, p, tid); + } + } + + HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { + + HeapWord* obj = NULL; + if (word_sz * 100 < + (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) * + ParallelGCBufferWastePct) { + G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); + add_to_alloc_buffer_waste(alloc_buf->words_remaining()); + alloc_buf->retire(false, false); + + HeapWord* buf = + _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize); + if (buf == NULL) return NULL; // Let caller handle allocation failure. + // Otherwise. + alloc_buf->set_buf(buf); + + obj = alloc_buf->allocate(word_sz); + assert(obj != NULL, "buffer was definitely big enough..."); + } else { + obj = _g1h->par_allocate_during_gc(purpose, word_sz); + } + return obj; + } + + HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { + HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); + if (obj != NULL) return obj; + return allocate_slow(purpose, word_sz); + } + + void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { + if (alloc_buffer(purpose)->contains(obj)) { + guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), + "should contain whole object"); + alloc_buffer(purpose)->undo_allocation(obj, word_sz); + } else { + CollectedHeap::fill_with_object(obj, word_sz); + add_to_undo_waste(word_sz); + } + } + + void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { + _evac_failure_cl = evac_failure_cl; + } + OopsInHeapRegionClosure* evac_failure_closure() { + return _evac_failure_cl; + } + + void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { + _evac_cl = evac_cl; + } + + void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { + _partial_scan_cl = partial_scan_cl; + } + + int* hash_seed() { return &_hash_seed; } + int queue_num() { return _queue_num; } + + int term_attempts() { return _term_attempts; } + void note_term_attempt() { _term_attempts++; } + +#if G1_DETAILED_STATS + int pushes() { return _pushes; } + int pops() { return _pops; } + int steals() { return _steals; } + int steal_attempts() { return _steal_attempts; } + int overflow_pushes() { return _overflow_pushes; } + + void note_push() { _pushes++; } + void note_pop() { _pops++; } + void note_steal() { _steals++; } + void note_steal_attempt() { _steal_attempts++; } + void note_overflow_push() { _overflow_pushes++; } +#endif + + void start_strong_roots() { + _start_strong_roots = os::elapsedTime(); + } + void end_strong_roots() { + _strong_roots_time += (os::elapsedTime() - _start_strong_roots); + } + double strong_roots_time() { return _strong_roots_time; } + + void start_term_time() { + note_term_attempt(); + _start_term = os::elapsedTime(); + } + void end_term_time() { + _term_time += (os::elapsedTime() - _start_term); + } + double term_time() { return _term_time; } + + double elapsed() { + return os::elapsedTime() - _start; + } + + size_t* surviving_young_words() { + // We add on to hide entry 0 which accumulates surviving words for + // age -1 regions (i.e. non-young ones) + return _surviving_young_words; + } + + void retire_alloc_buffers() { + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + size_t waste = _alloc_buffers[ap].words_remaining(); + add_to_alloc_buffer_waste(waste); + _alloc_buffers[ap].retire(true, false); + } + } + +private: + void deal_with_reference(oop* ref_to_scan) { + if (has_partial_array_mask(ref_to_scan)) { + _partial_scan_cl->do_oop_nv(ref_to_scan); + } else { + // Note: we can use "raw" versions of "region_containing" because + // "obj_to_scan" is definitely in the heap, and is not in a + // humongous region. + HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); + _evac_cl->set_region(r); + _evac_cl->do_oop_nv(ref_to_scan); + } + } + +public: + void trim_queue() { + // I've replicated the loop twice, first to drain the overflow + // queue, second to drain the task queue. This is better than + // having a single loop, which checks both conditions and, inside + // it, either pops the overflow queue or the task queue, as each + // loop is tighter. Also, the decision to drain the overflow queue + // first is not arbitrary, as the overflow queue is not visible + // to the other workers, whereas the task queue is. So, we want to + // drain the "invisible" entries first, while allowing the other + // workers to potentially steal the "visible" entries. + + while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { + while (overflowed_refs_to_scan() > 0) { + oop *ref_to_scan = NULL; + pop_from_overflow_queue(ref_to_scan); + assert(ref_to_scan != NULL, "invariant"); + // We shouldn't have pushed it on the queue if it was not + // pointing into the CSet. + assert(ref_to_scan != NULL, "sanity"); + assert(has_partial_array_mask(ref_to_scan) || + _g1h->obj_in_cs(*ref_to_scan), "sanity"); + + deal_with_reference(ref_to_scan); + } + + while (refs_to_scan() > 0) { + oop *ref_to_scan = NULL; + pop_from_queue(ref_to_scan); + + if (ref_to_scan != NULL) { + // We shouldn't have pushed it on the queue if it was not + // pointing into the CSet. + assert(has_partial_array_mask(ref_to_scan) || + _g1h->obj_in_cs(*ref_to_scan), "sanity"); + + deal_with_reference(ref_to_scan); + } + } + } + } +}; + +G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : + _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), + _par_scan_state(par_scan_state) { } + +// This closure is applied to the fields of the objects that have just been copied. +// Should probably be made inline and moved in g1OopClosures.inline.hpp. +void G1ParScanClosure::do_oop_nv(oop* p) { + oop obj = *p; + + if (obj != NULL) { + if (_g1->in_cset_fast_test(obj)) { + // We're not going to even bother checking whether the object is + // already forwarded or not, as this usually causes an immediate + // stall. We'll try to prefetch the object (for write, given that + // we might need to install the forwarding reference) and we'll + // get back to it when pop it from the queue + Prefetch::write(obj->mark_addr(), 0); + Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); + + // slightly paranoid test; I'm trying to catch potential + // problems before we go into push_on_queue to know where the + // problem is coming from + assert(obj == *p, "the value of *p should not have changed"); + _par_scan_state->push_on_queue(p); + } else { + _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); + } + } +} + +void G1ParCopyHelper::mark_forwardee(oop* p) { + // This is called _after_ do_oop_work has been called, hence after + // the object has been relocated to its new location and *p points + // to its new location. + + oop thisOop = *p; + if (thisOop != NULL) { + assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), + "shouldn't still be in the CSet if evacuation didn't fail."); + HeapWord* addr = (HeapWord*)thisOop; + if (_g1->is_in_g1_reserved(addr)) + _cm->grayRoot(oop(addr)); + } +} + +oop G1ParCopyHelper::copy_to_survivor_space(oop old) { + size_t word_sz = old->size(); + HeapRegion* from_region = _g1->heap_region_containing_raw(old); + // +1 to make the -1 indexes valid... + int young_index = from_region->young_index_in_cset()+1; + assert( (from_region->is_young() && young_index > 0) || + (!from_region->is_young() && young_index == 0), "invariant" ); + G1CollectorPolicy* g1p = _g1->g1_policy(); + markOop m = old->mark(); + int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() + : m->age(); + GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, + word_sz); + HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); + oop obj = oop(obj_ptr); + + if (obj_ptr == NULL) { + // This will either forward-to-self, or detect that someone else has + // installed a forwarding pointer. + OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); + return _g1->handle_evacuation_failure_par(cl, old); + } + + // We're going to allocate linearly, so might as well prefetch ahead. + Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); + + oop forward_ptr = old->forward_to_atomic(obj); + if (forward_ptr == NULL) { + Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); + if (g1p->track_object_age(alloc_purpose)) { + // We could simply do obj->incr_age(). However, this causes a + // performance issue. obj->incr_age() will first check whether + // the object has a displaced mark by checking its mark word; + // getting the mark word from the new location of the object + // stalls. So, given that we already have the mark word and we + // are about to install it anyway, it's better to increase the + // age on the mark word, when the object does not have a + // displaced mark word. We're not expecting many objects to have + // a displaced marked word, so that case is not optimized + // further (it could be...) and we simply call obj->incr_age(). + + if (m->has_displaced_mark_helper()) { + // in this case, we have to install the mark word first, + // otherwise obj looks to be forwarded (the old mark word, + // which contains the forward pointer, was copied) + obj->set_mark(m); + obj->incr_age(); + } else { + m = m->incr_age(); + obj->set_mark(m); + } + _par_scan_state->age_table()->add(obj, word_sz); + } else { + obj->set_mark(m); + } + + // preserve "next" mark bit + if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { + if (!use_local_bitmaps || + !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { + // if we couldn't mark it on the local bitmap (this happens when + // the object was not allocated in the GCLab), we have to bite + // the bullet and do the standard parallel mark + _cm->markAndGrayObjectIfNecessary(obj); + } +#if 1 + if (_g1->isMarkedNext(old)) { + _cm->nextMarkBitMap()->parClear((HeapWord*)old); + } +#endif + } + + size_t* surv_young_words = _par_scan_state->surviving_young_words(); + surv_young_words[young_index] += word_sz; + + if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { + arrayOop(old)->set_length(0); + _par_scan_state->push_on_queue(set_partial_array_mask(old)); + } else { + // No point in using the slower heap_region_containing() method, + // given that we know obj is in the heap. + _scanner->set_region(_g1->heap_region_containing_raw(obj)); + obj->oop_iterate_backwards(_scanner); + } + } else { + _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); + obj = forward_ptr; + } + return obj; +} + +template +void G1ParCopyClosure::do_oop_work(oop* p) { + oop obj = *p; + assert(barrier != G1BarrierRS || obj != NULL, + "Precondition: G1BarrierRS implies obj is nonNull"); + + // The only time we skip the cset test is when we're scanning + // references popped from the queue. And we only push on the queue + // references that we know point into the cset, so no point in + // checking again. But we'll leave an assert here for peace of mind. + assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); + + // here the null check is implicit in the cset_fast_test() test + if (skip_cset_test || _g1->in_cset_fast_test(obj)) { +#if G1_REM_SET_LOGGING + gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " + "into CS.", p, (void*) obj); +#endif + if (obj->is_forwarded()) { + *p = obj->forwardee(); + } else { + *p = copy_to_survivor_space(obj); + } + // When scanning the RS, we only care about objs in CS. + if (barrier == G1BarrierRS) { + _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); + } + } + + // When scanning moved objs, must look at all oops. + if (barrier == G1BarrierEvac && obj != NULL) { + _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); + } + + if (do_gen_barrier && obj != NULL) { + par_do_barrier(p); + } +} + +template void G1ParCopyClosure::do_oop_work(oop* p); + +template void G1ParScanPartialArrayClosure::process_array_chunk( + oop obj, int start, int end) { + // process our set of indices (include header in first chunk) + assert(start < end, "invariant"); + T* const base = (T*)objArrayOop(obj)->base(); + T* const start_addr = (start == 0) ? (T*) obj : base + start; + T* const end_addr = base + end; + MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); + _scanner.set_region(_g1->heap_region_containing(obj)); + obj->oop_iterate(&_scanner, mr); +} + +void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { + assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); + assert(has_partial_array_mask(p), "invariant"); + oop old = clear_partial_array_mask(p); + assert(old->is_objArray(), "must be obj array"); + assert(old->is_forwarded(), "must be forwarded"); + assert(Universe::heap()->is_in_reserved(old), "must be in heap."); + + objArrayOop obj = objArrayOop(old->forwardee()); + assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); + // Process ParGCArrayScanChunk elements now + // and push the remainder back onto queue + int start = arrayOop(old)->length(); + int end = obj->length(); + int remainder = end - start; + assert(start <= end, "just checking"); + if (remainder > 2 * ParGCArrayScanChunk) { + // Test above combines last partial chunk with a full chunk + end = start + ParGCArrayScanChunk; + arrayOop(old)->set_length(end); + // Push remainder. + _par_scan_state->push_on_queue(set_partial_array_mask(old)); + } else { + // Restore length so that the heap remains parsable in + // case of evacuation failure. + arrayOop(old)->set_length(end); + } + + // process our set of indices (include header in first chunk) + process_array_chunk(obj, start, end); +} + +int G1ScanAndBalanceClosure::_nq = 0; + +class G1ParEvacuateFollowersClosure : public VoidClosure { +protected: + G1CollectedHeap* _g1h; + G1ParScanThreadState* _par_scan_state; + RefToScanQueueSet* _queues; + ParallelTaskTerminator* _terminator; + + G1ParScanThreadState* par_scan_state() { return _par_scan_state; } + RefToScanQueueSet* queues() { return _queues; } + ParallelTaskTerminator* terminator() { return _terminator; } + +public: + G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, + G1ParScanThreadState* par_scan_state, + RefToScanQueueSet* queues, + ParallelTaskTerminator* terminator) + : _g1h(g1h), _par_scan_state(par_scan_state), + _queues(queues), _terminator(terminator) {} + + void do_void() { + G1ParScanThreadState* pss = par_scan_state(); + while (true) { + oop* ref_to_scan; + pss->trim_queue(); + IF_G1_DETAILED_STATS(pss->note_steal_attempt()); + if (queues()->steal(pss->queue_num(), + pss->hash_seed(), + ref_to_scan)) { + IF_G1_DETAILED_STATS(pss->note_steal()); + + // slightly paranoid tests; I'm trying to catch potential + // problems before we go into push_on_queue to know where the + // problem is coming from + assert(ref_to_scan != NULL, "invariant"); + assert(has_partial_array_mask(ref_to_scan) || + _g1h->obj_in_cs(*ref_to_scan), "invariant"); + pss->push_on_queue(ref_to_scan); + continue; + } + pss->start_term_time(); + if (terminator()->offer_termination()) break; + pss->end_term_time(); + } + pss->end_term_time(); + pss->retire_alloc_buffers(); + } +}; + +class G1ParTask : public AbstractGangTask { +protected: + G1CollectedHeap* _g1h; + RefToScanQueueSet *_queues; + ParallelTaskTerminator _terminator; + + Mutex _stats_lock; + Mutex* stats_lock() { return &_stats_lock; } + + size_t getNCards() { + return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) + / G1BlockOffsetSharedArray::N_bytes; + } + +public: + G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) + : AbstractGangTask("G1 collection"), + _g1h(g1h), + _queues(task_queues), + _terminator(workers, _queues), + _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) + {} + + RefToScanQueueSet* queues() { return _queues; } + + RefToScanQueue *work_queue(int i) { + return queues()->queue(i); + } + + void work(int i) { + ResourceMark rm; + HandleMark hm; + + G1ParScanThreadState pss(_g1h, i); + G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); + G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); + G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); + + pss.set_evac_closure(&scan_evac_cl); + pss.set_evac_failure_closure(&evac_failure_cl); + pss.set_partial_scan_closure(&partial_scan_cl); + + G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); + G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); + G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); + + G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); + G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); + G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); + + OopsInHeapRegionClosure *scan_root_cl; + OopsInHeapRegionClosure *scan_perm_cl; + OopsInHeapRegionClosure *scan_so_cl; + + if (_g1h->g1_policy()->should_initiate_conc_mark()) { + scan_root_cl = &scan_mark_root_cl; + scan_perm_cl = &scan_mark_perm_cl; + scan_so_cl = &scan_mark_heap_rs_cl; + } else { + scan_root_cl = &only_scan_root_cl; + scan_perm_cl = &only_scan_perm_cl; + scan_so_cl = &only_scan_heap_rs_cl; + } + + pss.start_strong_roots(); + _g1h->g1_process_strong_roots(/* not collecting perm */ false, + SharedHeap::SO_AllClasses, + scan_root_cl, + &only_scan_heap_rs_cl, + scan_so_cl, + scan_perm_cl, + i); + pss.end_strong_roots(); + { + double start = os::elapsedTime(); + G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); + evac.do_void(); + double elapsed_ms = (os::elapsedTime()-start)*1000.0; + double term_ms = pss.term_time()*1000.0; + _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); + _g1h->g1_policy()->record_termination_time(i, term_ms); + } + if (G1UseSurvivorSpaces) { + _g1h->g1_policy()->record_thread_age_table(pss.age_table()); + } + _g1h->update_surviving_young_words(pss.surviving_young_words()+1); + + // Clean up any par-expanded rem sets. + HeapRegionRemSet::par_cleanup(); + + MutexLocker x(stats_lock()); + if (ParallelGCVerbose) { + gclog_or_tty->print("Thread %d complete:\n", i); +#if G1_DETAILED_STATS + gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", + pss.pushes(), + pss.pops(), + pss.overflow_pushes(), + pss.steals(), + pss.steal_attempts()); +#endif + double elapsed = pss.elapsed(); + double strong_roots = pss.strong_roots_time(); + double term = pss.term_time(); + gclog_or_tty->print(" Elapsed: %7.2f ms.\n" + " Strong roots: %7.2f ms (%6.2f%%)\n" + " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", + elapsed * 1000.0, + strong_roots * 1000.0, (strong_roots*100.0/elapsed), + term * 1000.0, (term*100.0/elapsed), + pss.term_attempts()); + size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); + gclog_or_tty->print(" Waste: %8dK\n" + " Alloc Buffer: %8dK\n" + " Undo: %8dK\n", + (total_waste * HeapWordSize) / K, + (pss.alloc_buffer_waste() * HeapWordSize) / K, + (pss.undo_waste() * HeapWordSize) / K); + } + + assert(pss.refs_to_scan() == 0, "Task queue should be empty"); + assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); + } +}; + +// *** Common G1 Evacuation Stuff + +class G1CountClosure: public OopsInHeapRegionClosure { +public: + int n; + G1CountClosure() : n(0) {} + void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + void do_oop(oop* p) { + oop obj = *p; + assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj), + "Rem set closure called on non-rem-set pointer."); + n++; + } +}; + +static G1CountClosure count_closure; + +void +G1CollectedHeap:: +g1_process_strong_roots(bool collecting_perm_gen, + SharedHeap::ScanningOption so, + OopClosure* scan_non_heap_roots, + OopsInHeapRegionClosure* scan_rs, + OopsInHeapRegionClosure* scan_so, + OopsInGenClosure* scan_perm, + int worker_i) { + // First scan the strong roots, including the perm gen. + double ext_roots_start = os::elapsedTime(); + double closure_app_time_sec = 0.0; + + BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); + BufferingOopsInGenClosure buf_scan_perm(scan_perm); + buf_scan_perm.set_generation(perm_gen()); + + process_strong_roots(collecting_perm_gen, so, + &buf_scan_non_heap_roots, + &buf_scan_perm); + // Finish up any enqueued closure apps. + buf_scan_non_heap_roots.done(); + buf_scan_perm.done(); + double ext_roots_end = os::elapsedTime(); + g1_policy()->reset_obj_copy_time(worker_i); + double obj_copy_time_sec = + buf_scan_non_heap_roots.closure_app_seconds() + + buf_scan_perm.closure_app_seconds(); + g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); + double ext_root_time_ms = + ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; + g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); + + // Scan strong roots in mark stack. + if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { + concurrent_mark()->oops_do(scan_non_heap_roots); + } + double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; + g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); + + // XXX What should this be doing in the parallel case? + g1_policy()->record_collection_pause_end_CH_strong_roots(); + if (scan_so != NULL) { + scan_scan_only_set(scan_so, worker_i); + } + // Now scan the complement of the collection set. + if (scan_rs != NULL) { + g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); + } + // Finish with the ref_processor roots. + if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { + ref_processor()->oops_do(scan_non_heap_roots); + } + g1_policy()->record_collection_pause_end_G1_strong_roots(); + _process_strong_tasks->all_tasks_completed(); +} + +void +G1CollectedHeap::scan_scan_only_region(HeapRegion* r, + OopsInHeapRegionClosure* oc, + int worker_i) { + HeapWord* startAddr = r->bottom(); + HeapWord* endAddr = r->used_region().end(); + + oc->set_region(r); + + HeapWord* p = r->bottom(); + HeapWord* t = r->top(); + guarantee( p == r->next_top_at_mark_start(), "invariant" ); + while (p < t) { + oop obj = oop(p); + p += obj->oop_iterate(oc); + } +} + +void +G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, + int worker_i) { + double start = os::elapsedTime(); + + BufferingOopsInHeapRegionClosure boc(oc); + + FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); + FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); + + OopsInHeapRegionClosure *foc; + if (g1_policy()->should_initiate_conc_mark()) + foc = &scan_and_mark; + else + foc = &scan_only; + + HeapRegion* hr; + int n = 0; + while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { + scan_scan_only_region(hr, foc, worker_i); + ++n; + } + boc.done(); + + double closure_app_s = boc.closure_app_seconds(); + g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); + double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; + g1_policy()->record_scan_only_time(worker_i, ms, n); +} + +void +G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, + OopClosure* non_root_closure) { + SharedHeap::process_weak_roots(root_closure, non_root_closure); +} + + +class SaveMarksClosure: public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion* r) { + r->save_marks(); + return false; + } +}; + +void G1CollectedHeap::save_marks() { + if (ParallelGCThreads == 0) { + SaveMarksClosure sm; + heap_region_iterate(&sm); + } + // We do this even in the parallel case + perm_gen()->save_marks(); +} + +void G1CollectedHeap::evacuate_collection_set() { + set_evacuation_failed(false); + + g1_rem_set()->prepare_for_oops_into_collection_set_do(); + concurrent_g1_refine()->set_use_cache(false); + int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); + set_par_threads(n_workers); + G1ParTask g1_par_task(this, n_workers, _task_queues); + + init_for_evac_failure(NULL); + + change_strong_roots_parity(); // In preparation for parallel strong roots. + rem_set()->prepare_for_younger_refs_iterate(true); + + assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); + double start_par = os::elapsedTime(); + if (ParallelGCThreads > 0) { + // The individual threads will set their evac-failure closures. + workers()->run_task(&g1_par_task); + } else { + g1_par_task.work(0); + } + + double par_time = (os::elapsedTime() - start_par) * 1000.0; + g1_policy()->record_par_time(par_time); + set_par_threads(0); + // Is this the right thing to do here? We don't save marks + // on individual heap regions when we allocate from + // them in parallel, so this seems like the correct place for this. + retire_all_alloc_regions(); + { + G1IsAliveClosure is_alive(this); + G1KeepAliveClosure keep_alive(this); + JNIHandles::weak_oops_do(&is_alive, &keep_alive); + } + g1_rem_set()->cleanup_after_oops_into_collection_set_do(); + + concurrent_g1_refine()->set_use_cache(true); + + finalize_for_evac_failure(); + + // Must do this before removing self-forwarding pointers, which clears + // the per-region evac-failure flags. + concurrent_mark()->complete_marking_in_collection_set(); + + if (evacuation_failed()) { + remove_self_forwarding_pointers(); + if (PrintGCDetails) { + gclog_or_tty->print(" (evacuation failed)"); + } else if (PrintGC) { + gclog_or_tty->print("--"); + } + } + + if (G1DeferredRSUpdate) { + RedirtyLoggedCardTableEntryFastClosure redirty; + dirty_card_queue_set().set_closure(&redirty); + dirty_card_queue_set().apply_closure_to_all_completed_buffers(); + JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set()); + assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); + } + + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); +} + +void G1CollectedHeap::free_region(HeapRegion* hr) { + size_t pre_used = 0; + size_t cleared_h_regions = 0; + size_t freed_regions = 0; + UncleanRegionList local_list; + + HeapWord* start = hr->bottom(); + HeapWord* end = hr->prev_top_at_mark_start(); + size_t used_bytes = hr->used(); + size_t live_bytes = hr->max_live_bytes(); + if (used_bytes > 0) { + guarantee( live_bytes <= used_bytes, "invariant" ); + } else { + guarantee( live_bytes == 0, "invariant" ); + } + + size_t garbage_bytes = used_bytes - live_bytes; + if (garbage_bytes > 0) + g1_policy()->decrease_known_garbage_bytes(garbage_bytes); + + free_region_work(hr, pre_used, cleared_h_regions, freed_regions, + &local_list); + finish_free_region_work(pre_used, cleared_h_regions, freed_regions, + &local_list); +} + +void +G1CollectedHeap::free_region_work(HeapRegion* hr, + size_t& pre_used, + size_t& cleared_h_regions, + size_t& freed_regions, + UncleanRegionList* list, + bool par) { + pre_used += hr->used(); + if (hr->isHumongous()) { + assert(hr->startsHumongous(), + "Only the start of a humongous region should be freed."); + int ind = _hrs->find(hr); + assert(ind != -1, "Should have an index."); + // Clear the start region. + hr->hr_clear(par, true /*clear_space*/); + list->insert_before_head(hr); + cleared_h_regions++; + freed_regions++; + // Clear any continued regions. + ind++; + while ((size_t)ind < n_regions()) { + HeapRegion* hrc = _hrs->at(ind); + if (!hrc->continuesHumongous()) break; + // Otherwise, does continue the H region. + assert(hrc->humongous_start_region() == hr, "Huh?"); + hrc->hr_clear(par, true /*clear_space*/); + cleared_h_regions++; + freed_regions++; + list->insert_before_head(hrc); + ind++; + } + } else { + hr->hr_clear(par, true /*clear_space*/); + list->insert_before_head(hr); + freed_regions++; + // If we're using clear2, this should not be enabled. + // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); + } +} + +void G1CollectedHeap::finish_free_region_work(size_t pre_used, + size_t cleared_h_regions, + size_t freed_regions, + UncleanRegionList* list) { + if (list != NULL && list->sz() > 0) { + prepend_region_list_on_unclean_list(list); + } + // Acquire a lock, if we're parallel, to update possibly-shared + // variables. + Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; + { + MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); + _summary_bytes_used -= pre_used; + _num_humongous_regions -= (int) cleared_h_regions; + _free_regions += freed_regions; + } +} + + +void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { + while (list != NULL) { + guarantee( list->is_young(), "invariant" ); + + HeapWord* bottom = list->bottom(); + HeapWord* end = list->end(); + MemRegion mr(bottom, end); + ct_bs->dirty(mr); + + list = list->get_next_young_region(); + } +} + +void G1CollectedHeap::cleanUpCardTable() { + CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); + double start = os::elapsedTime(); + + ct_bs->clear(_g1_committed); + + // now, redirty the cards of the scan-only and survivor regions + // (it seemed faster to do it this way, instead of iterating over + // all regions and then clearing / dirtying as approprite) + dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); + dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); + + double elapsed = os::elapsedTime() - start; + g1_policy()->record_clear_ct_time( elapsed * 1000.0); +} + + +void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { + if (g1_policy()->should_do_collection_pause(word_size)) { + do_collection_pause(); + } +} + +void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { + double young_time_ms = 0.0; + double non_young_time_ms = 0.0; + + G1CollectorPolicy* policy = g1_policy(); + + double start_sec = os::elapsedTime(); + bool non_young = true; + + HeapRegion* cur = cs_head; + int age_bound = -1; + size_t rs_lengths = 0; + + while (cur != NULL) { + if (non_young) { + if (cur->is_young()) { + double end_sec = os::elapsedTime(); + double elapsed_ms = (end_sec - start_sec) * 1000.0; + non_young_time_ms += elapsed_ms; + + start_sec = os::elapsedTime(); + non_young = false; + } + } else { + if (!cur->is_on_free_list()) { + double end_sec = os::elapsedTime(); + double elapsed_ms = (end_sec - start_sec) * 1000.0; + young_time_ms += elapsed_ms; + + start_sec = os::elapsedTime(); + non_young = true; + } + } + + rs_lengths += cur->rem_set()->occupied(); + + HeapRegion* next = cur->next_in_collection_set(); + assert(cur->in_collection_set(), "bad CS"); + cur->set_next_in_collection_set(NULL); + cur->set_in_collection_set(false); + + if (cur->is_young()) { + int index = cur->young_index_in_cset(); + guarantee( index != -1, "invariant" ); + guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); + size_t words_survived = _surviving_young_words[index]; + cur->record_surv_words_in_group(words_survived); + } else { + int index = cur->young_index_in_cset(); + guarantee( index == -1, "invariant" ); + } + + assert( (cur->is_young() && cur->young_index_in_cset() > -1) || + (!cur->is_young() && cur->young_index_in_cset() == -1), + "invariant" ); + + if (!cur->evacuation_failed()) { + // And the region is empty. + assert(!cur->is_empty(), + "Should not have empty regions in a CS."); + free_region(cur); + } else { + guarantee( !cur->is_scan_only(), "should not be scan only" ); + cur->uninstall_surv_rate_group(); + if (cur->is_young()) + cur->set_young_index_in_cset(-1); + cur->set_not_young(); + cur->set_evacuation_failed(false); + } + cur = next; + } + + policy->record_max_rs_lengths(rs_lengths); + policy->cset_regions_freed(); + + double end_sec = os::elapsedTime(); + double elapsed_ms = (end_sec - start_sec) * 1000.0; + if (non_young) + non_young_time_ms += elapsed_ms; + else + young_time_ms += elapsed_ms; + + policy->record_young_free_cset_time_ms(young_time_ms); + policy->record_non_young_free_cset_time_ms(non_young_time_ms); +} + +HeapRegion* +G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { + assert(ZF_mon->owned_by_self(), "Precondition"); + HeapRegion* res = pop_unclean_region_list_locked(); + if (res != NULL) { + assert(!res->continuesHumongous() && + res->zero_fill_state() != HeapRegion::Allocated, + "Only free regions on unclean list."); + if (zero_filled) { + res->ensure_zero_filled_locked(); + res->set_zero_fill_allocated(); + } + } + return res; +} + +HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { + MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); + return alloc_region_from_unclean_list_locked(zero_filled); +} + +void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + put_region_on_unclean_list_locked(r); + if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. +} + +void G1CollectedHeap::set_unclean_regions_coming(bool b) { + MutexLockerEx x(Cleanup_mon); + set_unclean_regions_coming_locked(b); +} + +void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { + assert(Cleanup_mon->owned_by_self(), "Precondition"); + _unclean_regions_coming = b; + // Wake up mutator threads that might be waiting for completeCleanup to + // finish. + if (!b) Cleanup_mon->notify_all(); +} + +void G1CollectedHeap::wait_for_cleanup_complete() { + MutexLockerEx x(Cleanup_mon); + wait_for_cleanup_complete_locked(); +} + +void G1CollectedHeap::wait_for_cleanup_complete_locked() { + assert(Cleanup_mon->owned_by_self(), "precondition"); + while (_unclean_regions_coming) { + Cleanup_mon->wait(); + } +} + +void +G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { + assert(ZF_mon->owned_by_self(), "precondition."); + _unclean_region_list.insert_before_head(r); +} + +void +G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + prepend_region_list_on_unclean_list_locked(list); + if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. +} + +void +G1CollectedHeap:: +prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { + assert(ZF_mon->owned_by_self(), "precondition."); + _unclean_region_list.prepend_list(list); +} + +HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { + assert(ZF_mon->owned_by_self(), "precondition."); + HeapRegion* res = _unclean_region_list.pop(); + if (res != NULL) { + // Inform ZF thread that there's a new unclean head. + if (_unclean_region_list.hd() != NULL && should_zf()) + ZF_mon->notify_all(); + } + return res; +} + +HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { + assert(ZF_mon->owned_by_self(), "precondition."); + return _unclean_region_list.hd(); +} + + +bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { + assert(ZF_mon->owned_by_self(), "Precondition"); + HeapRegion* r = peek_unclean_region_list_locked(); + if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { + // Result of below must be equal to "r", since we hold the lock. + (void)pop_unclean_region_list_locked(); + put_free_region_on_list_locked(r); + return true; + } else { + return false; + } +} + +bool G1CollectedHeap::move_cleaned_region_to_free_list() { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + return move_cleaned_region_to_free_list_locked(); +} + + +void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { + assert(ZF_mon->owned_by_self(), "precondition."); + assert(_free_region_list_size == free_region_list_length(), "Inv"); + assert(r->zero_fill_state() == HeapRegion::ZeroFilled, + "Regions on free list must be zero filled"); + assert(!r->isHumongous(), "Must not be humongous."); + assert(r->is_empty(), "Better be empty"); + assert(!r->is_on_free_list(), + "Better not already be on free list"); + assert(!r->is_on_unclean_list(), + "Better not already be on unclean list"); + r->set_on_free_list(true); + r->set_next_on_free_list(_free_region_list); + _free_region_list = r; + _free_region_list_size++; + assert(_free_region_list_size == free_region_list_length(), "Inv"); +} + +void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + put_free_region_on_list_locked(r); +} + +HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { + assert(ZF_mon->owned_by_self(), "precondition."); + assert(_free_region_list_size == free_region_list_length(), "Inv"); + HeapRegion* res = _free_region_list; + if (res != NULL) { + _free_region_list = res->next_from_free_list(); + _free_region_list_size--; + res->set_on_free_list(false); + res->set_next_on_free_list(NULL); + assert(_free_region_list_size == free_region_list_length(), "Inv"); + } + return res; +} + + +HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { + // By self, or on behalf of self. + assert(Heap_lock->is_locked(), "Precondition"); + HeapRegion* res = NULL; + bool first = true; + while (res == NULL) { + if (zero_filled || !first) { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + res = pop_free_region_list_locked(); + if (res != NULL) { + assert(!res->zero_fill_is_allocated(), + "No allocated regions on free list."); + res->set_zero_fill_allocated(); + } else if (!first) { + break; // We tried both, time to return NULL. + } + } + + if (res == NULL) { + res = alloc_region_from_unclean_list(zero_filled); + } + assert(res == NULL || + !zero_filled || + res->zero_fill_is_allocated(), + "We must have allocated the region we're returning"); + first = false; + } + return res; +} + +void G1CollectedHeap::remove_allocated_regions_from_lists() { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + { + HeapRegion* prev = NULL; + HeapRegion* cur = _unclean_region_list.hd(); + while (cur != NULL) { + HeapRegion* next = cur->next_from_unclean_list(); + if (cur->zero_fill_is_allocated()) { + // Remove from the list. + if (prev == NULL) { + (void)_unclean_region_list.pop(); + } else { + _unclean_region_list.delete_after(prev); + } + cur->set_on_unclean_list(false); + cur->set_next_on_unclean_list(NULL); + } else { + prev = cur; + } + cur = next; + } + assert(_unclean_region_list.sz() == unclean_region_list_length(), + "Inv"); + } + + { + HeapRegion* prev = NULL; + HeapRegion* cur = _free_region_list; + while (cur != NULL) { + HeapRegion* next = cur->next_from_free_list(); + if (cur->zero_fill_is_allocated()) { + // Remove from the list. + if (prev == NULL) { + _free_region_list = cur->next_from_free_list(); + } else { + prev->set_next_on_free_list(cur->next_from_free_list()); + } + cur->set_on_free_list(false); + cur->set_next_on_free_list(NULL); + _free_region_list_size--; + } else { + prev = cur; + } + cur = next; + } + assert(_free_region_list_size == free_region_list_length(), "Inv"); + } +} + +bool G1CollectedHeap::verify_region_lists() { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + return verify_region_lists_locked(); +} + +bool G1CollectedHeap::verify_region_lists_locked() { + HeapRegion* unclean = _unclean_region_list.hd(); + while (unclean != NULL) { + guarantee(unclean->is_on_unclean_list(), "Well, it is!"); + guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); + guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, + "Everything else is possible."); + unclean = unclean->next_from_unclean_list(); + } + guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); + + HeapRegion* free_r = _free_region_list; + while (free_r != NULL) { + assert(free_r->is_on_free_list(), "Well, it is!"); + assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); + switch (free_r->zero_fill_state()) { + case HeapRegion::NotZeroFilled: + case HeapRegion::ZeroFilling: + guarantee(false, "Should not be on free list."); + break; + default: + // Everything else is possible. + break; + } + free_r = free_r->next_from_free_list(); + } + guarantee(_free_region_list_size == free_region_list_length(), "Inv"); + // If we didn't do an assertion... + return true; +} + +size_t G1CollectedHeap::free_region_list_length() { + assert(ZF_mon->owned_by_self(), "precondition."); + size_t len = 0; + HeapRegion* cur = _free_region_list; + while (cur != NULL) { + len++; + cur = cur->next_from_free_list(); + } + return len; +} + +size_t G1CollectedHeap::unclean_region_list_length() { + assert(ZF_mon->owned_by_self(), "precondition."); + return _unclean_region_list.length(); +} + +size_t G1CollectedHeap::n_regions() { + return _hrs->length(); +} + +size_t G1CollectedHeap::max_regions() { + return + (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / + HeapRegion::GrainBytes; +} + +size_t G1CollectedHeap::free_regions() { + /* Possibly-expensive assert. + assert(_free_regions == count_free_regions(), + "_free_regions is off."); + */ + return _free_regions; +} + +bool G1CollectedHeap::should_zf() { + return _free_region_list_size < (size_t) G1ConcZFMaxRegions; +} + +class RegionCounter: public HeapRegionClosure { + size_t _n; +public: + RegionCounter() : _n(0) {} + bool doHeapRegion(HeapRegion* r) { + if (r->is_empty()) { + assert(!r->isHumongous(), "H regions should not be empty."); + _n++; + } + return false; + } + int res() { return (int) _n; } +}; + +size_t G1CollectedHeap::count_free_regions() { + RegionCounter rc; + heap_region_iterate(&rc); + size_t n = rc.res(); + if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) + n--; + return n; +} + +size_t G1CollectedHeap::count_free_regions_list() { + size_t n = 0; + size_t o = 0; + ZF_mon->lock_without_safepoint_check(); + HeapRegion* cur = _free_region_list; + while (cur != NULL) { + cur = cur->next_from_free_list(); + n++; + } + size_t m = unclean_region_list_length(); + ZF_mon->unlock(); + return n + m; +} + +bool G1CollectedHeap::should_set_young_locked() { + assert(heap_lock_held_for_gc(), + "the heap lock should already be held by or for this thread"); + return (g1_policy()->in_young_gc_mode() && + g1_policy()->should_add_next_region_to_young_list()); +} + +void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { + assert(heap_lock_held_for_gc(), + "the heap lock should already be held by or for this thread"); + _young_list->push_region(hr); + g1_policy()->set_region_short_lived(hr); +} + +class NoYoungRegionsClosure: public HeapRegionClosure { +private: + bool _success; +public: + NoYoungRegionsClosure() : _success(true) { } + bool doHeapRegion(HeapRegion* r) { + if (r->is_young()) { + gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", + r->bottom(), r->end()); + _success = false; + } + return false; + } + bool success() { return _success; } +}; + +bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, + bool check_sample) { + bool ret = true; + + ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); + if (!ignore_scan_only_list) { + NoYoungRegionsClosure closure; + heap_region_iterate(&closure); + ret = ret && closure.success(); + } + + return ret; +} + +void G1CollectedHeap::empty_young_list() { + assert(heap_lock_held_for_gc(), + "the heap lock should already be held by or for this thread"); + assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); + + _young_list->empty_list(); +} + +bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { + bool no_allocs = true; + for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { + HeapRegion* r = _gc_alloc_regions[ap]; + no_allocs = r == NULL || r->saved_mark_at_top(); + } + return no_allocs; +} + +void G1CollectedHeap::retire_all_alloc_regions() { + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + HeapRegion* r = _gc_alloc_regions[ap]; + if (r != NULL) { + // Check for aliases. + bool has_processed_alias = false; + for (int i = 0; i < ap; ++i) { + if (_gc_alloc_regions[i] == r) { + has_processed_alias = true; + break; + } + } + if (!has_processed_alias) { + retire_alloc_region(r, false /* par */); + } + } + } +} + + +// Done at the start of full GC. +void G1CollectedHeap::tear_down_region_lists() { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + while (pop_unclean_region_list_locked() != NULL) ; + assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, + "Postconditions of loop.") + while (pop_free_region_list_locked() != NULL) ; + assert(_free_region_list == NULL, "Postcondition of loop."); + if (_free_region_list_size != 0) { + gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); + print(); + } + assert(_free_region_list_size == 0, "Postconditions of loop."); +} + + +class RegionResetter: public HeapRegionClosure { + G1CollectedHeap* _g1; + int _n; +public: + RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} + bool doHeapRegion(HeapRegion* r) { + if (r->continuesHumongous()) return false; + if (r->top() > r->bottom()) { + if (r->top() < r->end()) { + Copy::fill_to_words(r->top(), + pointer_delta(r->end(), r->top())); + } + r->set_zero_fill_allocated(); + } else { + assert(r->is_empty(), "tautology"); + _n++; + switch (r->zero_fill_state()) { + case HeapRegion::NotZeroFilled: + case HeapRegion::ZeroFilling: + _g1->put_region_on_unclean_list_locked(r); + break; + case HeapRegion::Allocated: + r->set_zero_fill_complete(); + // no break; go on to put on free list. + case HeapRegion::ZeroFilled: + _g1->put_free_region_on_list_locked(r); + break; + } + } + return false; + } + + int getFreeRegionCount() {return _n;} +}; + +// Done at the end of full GC. +void G1CollectedHeap::rebuild_region_lists() { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + // This needs to go at the end of the full GC. + RegionResetter rs; + heap_region_iterate(&rs); + _free_regions = rs.getFreeRegionCount(); + // Tell the ZF thread it may have work to do. + if (should_zf()) ZF_mon->notify_all(); +} + +class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { + G1CollectedHeap* _g1; + int _n; +public: + UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} + bool doHeapRegion(HeapRegion* r) { + if (r->continuesHumongous()) return false; + if (r->top() > r->bottom()) { + // There are assertions in "set_zero_fill_needed()" below that + // require top() == bottom(), so this is technically illegal. + // We'll skirt the law here, by making that true temporarily. + DEBUG_ONLY(HeapWord* save_top = r->top(); + r->set_top(r->bottom())); + r->set_zero_fill_needed(); + DEBUG_ONLY(r->set_top(save_top)); + } + return false; + } +}; + +// Done at the start of full GC. +void G1CollectedHeap::set_used_regions_to_need_zero_fill() { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + // This needs to go at the end of the full GC. + UsedRegionsNeedZeroFillSetter rs; + heap_region_iterate(&rs); +} + +void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { + _refine_cte_cl->set_concurrent(concurrent); +} + +#ifndef PRODUCT + +class PrintHeapRegionClosure: public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion *r) { + gclog_or_tty->print("Region: "PTR_FORMAT":", r); + if (r != NULL) { + if (r->is_on_free_list()) + gclog_or_tty->print("Free "); + if (r->is_young()) + gclog_or_tty->print("Young "); + if (r->isHumongous()) + gclog_or_tty->print("Is Humongous "); + r->print(); + } + return false; + } +}; + +class SortHeapRegionClosure : public HeapRegionClosure { + size_t young_regions,free_regions, unclean_regions; + size_t hum_regions, count; + size_t unaccounted, cur_unclean, cur_alloc; + size_t total_free; + HeapRegion* cur; +public: + SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), + free_regions(0), unclean_regions(0), + hum_regions(0), + count(0), unaccounted(0), + cur_alloc(0), total_free(0) + {} + bool doHeapRegion(HeapRegion *r) { + count++; + if (r->is_on_free_list()) free_regions++; + else if (r->is_on_unclean_list()) unclean_regions++; + else if (r->isHumongous()) hum_regions++; + else if (r->is_young()) young_regions++; + else if (r == cur) cur_alloc++; + else unaccounted++; + return false; + } + void print() { + total_free = free_regions + unclean_regions; + gclog_or_tty->print("%d regions\n", count); + gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", + total_free, free_regions, unclean_regions); + gclog_or_tty->print("%d humongous %d young\n", + hum_regions, young_regions); + gclog_or_tty->print("%d cur_alloc\n", cur_alloc); + gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); + } +}; + +void G1CollectedHeap::print_region_counts() { + SortHeapRegionClosure sc(_cur_alloc_region); + PrintHeapRegionClosure cl; + heap_region_iterate(&cl); + heap_region_iterate(&sc); + sc.print(); + print_region_accounting_info(); +}; + +bool G1CollectedHeap::regions_accounted_for() { + // TODO: regions accounting for young/survivor/tenured + return true; +} + +bool G1CollectedHeap::print_region_accounting_info() { + gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", + free_regions(), + count_free_regions(), count_free_regions_list(), + _free_region_list_size, _unclean_region_list.sz()); + gclog_or_tty->print_cr("cur_alloc: %d.", + (_cur_alloc_region == NULL ? 0 : 1)); + gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); + + // TODO: check regions accounting for young/survivor/tenured + return true; +} + +bool G1CollectedHeap::is_in_closed_subset(const void* p) const { + HeapRegion* hr = heap_region_containing(p); + if (hr == NULL) { + return is_in_permanent(p); + } else { + return hr->is_in(p); + } +} +#endif // PRODUCT + +void G1CollectedHeap::g1_unimplemented() { + // Unimplemented(); +} + + +// Local Variables: *** +// c-indentation-style: gnu *** +// End: *** --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2009-08-01 04:21:04.838620320 +0100 @@ -0,0 +1,1222 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// A "G1CollectedHeap" is an implementation of a java heap for HotSpot. +// It uses the "Garbage First" heap organization and algorithm, which +// may combine concurrent marking with parallel, incremental compaction of +// heap subsets that will yield large amounts of garbage. + +class HeapRegion; +class HeapRegionSeq; +class PermanentGenerationSpec; +class GenerationSpec; +class OopsInHeapRegionClosure; +class G1ScanHeapEvacClosure; +class ObjectClosure; +class SpaceClosure; +class CompactibleSpaceClosure; +class Space; +class G1CollectorPolicy; +class GenRemSet; +class G1RemSet; +class HeapRegionRemSetIterator; +class ConcurrentMark; +class ConcurrentMarkThread; +class ConcurrentG1Refine; +class ConcurrentZFThread; + +// If want to accumulate detailed statistics on work queues +// turn this on. +#define G1_DETAILED_STATS 0 + +#if G1_DETAILED_STATS +# define IF_G1_DETAILED_STATS(code) code +#else +# define IF_G1_DETAILED_STATS(code) +#endif + +typedef GenericTaskQueue RefToScanQueue; +typedef GenericTaskQueueSet RefToScanQueueSet; + +enum G1GCThreadGroups { + G1CRGroup = 0, + G1ZFGroup = 1, + G1CMGroup = 2, + G1CLGroup = 3 +}; + +enum GCAllocPurpose { + GCAllocForTenured, + GCAllocForSurvived, + GCAllocPurposeCount +}; + +class YoungList : public CHeapObj { +private: + G1CollectedHeap* _g1h; + + HeapRegion* _head; + + HeapRegion* _scan_only_head; + HeapRegion* _scan_only_tail; + size_t _length; + size_t _scan_only_length; + + size_t _last_sampled_rs_lengths; + size_t _sampled_rs_lengths; + HeapRegion* _curr; + HeapRegion* _curr_scan_only; + + HeapRegion* _survivor_head; + HeapRegion* _survivor_tail; + size_t _survivor_length; + + void empty_list(HeapRegion* list); + +public: + YoungList(G1CollectedHeap* g1h); + + void push_region(HeapRegion* hr); + void add_survivor_region(HeapRegion* hr); + HeapRegion* pop_region(); + void empty_list(); + bool is_empty() { return _length == 0; } + size_t length() { return _length; } + size_t scan_only_length() { return _scan_only_length; } + size_t survivor_length() { return _survivor_length; } + + void rs_length_sampling_init(); + bool rs_length_sampling_more(); + void rs_length_sampling_next(); + + void reset_sampled_info() { + _last_sampled_rs_lengths = 0; + } + size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } + + // for development purposes + void reset_auxilary_lists(); + HeapRegion* first_region() { return _head; } + HeapRegion* first_scan_only_region() { return _scan_only_head; } + HeapRegion* first_survivor_region() { return _survivor_head; } + HeapRegion* last_survivor_region() { return _survivor_tail; } + HeapRegion* par_get_next_scan_only_region() { + MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); + HeapRegion* ret = _curr_scan_only; + if (ret != NULL) + _curr_scan_only = ret->get_next_young_region(); + return ret; + } + + // debugging + bool check_list_well_formed(); + bool check_list_empty(bool ignore_scan_only_list, + bool check_sample = true); + void print(); +}; + +class RefineCardTableEntryClosure; +class G1CollectedHeap : public SharedHeap { + friend class VM_G1CollectForAllocation; + friend class VM_GenCollectForPermanentAllocation; + friend class VM_G1CollectFull; + friend class VM_G1IncCollectionPause; + friend class VMStructs; + + // Closures used in implementation. + friend class G1ParCopyHelper; + friend class G1IsAliveClosure; + friend class G1EvacuateFollowersClosure; + friend class G1ParScanThreadState; + friend class G1ParScanClosureSuper; + friend class G1ParEvacuateFollowersClosure; + friend class G1ParTask; + friend class G1FreeGarbageRegionClosure; + friend class RefineCardTableEntryClosure; + friend class G1PrepareCompactClosure; + friend class RegionSorter; + friend class CountRCClosure; + friend class EvacPopObjClosure; + + // Other related classes. + friend class G1MarkSweep; + +private: + enum SomePrivateConstants { + VeryLargeInBytes = HeapRegion::GrainBytes/2, + VeryLargeInWords = VeryLargeInBytes/HeapWordSize, + MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME + NumAPIs = HeapRegion::MaxAge + }; + + // The one and only G1CollectedHeap, so static functions can find it. + static G1CollectedHeap* _g1h; + + // Storage for the G1 heap (excludes the permanent generation). + VirtualSpace _g1_storage; + MemRegion _g1_reserved; + + // The part of _g1_storage that is currently committed. + MemRegion _g1_committed; + + // The maximum part of _g1_storage that has ever been committed. + MemRegion _g1_max_committed; + + // The number of regions that are completely free. + size_t _free_regions; + + // The number of regions we could create by expansion. + size_t _expansion_regions; + + // Return the number of free regions in the heap (by direct counting.) + size_t count_free_regions(); + // Return the number of free regions on the free and unclean lists. + size_t count_free_regions_list(); + + // The block offset table for the G1 heap. + G1BlockOffsetSharedArray* _bot_shared; + + // Move all of the regions off the free lists, then rebuild those free + // lists, before and after full GC. + void tear_down_region_lists(); + void rebuild_region_lists(); + // This sets all non-empty regions to need zero-fill (which they will if + // they are empty after full collection.) + void set_used_regions_to_need_zero_fill(); + + // The sequence of all heap regions in the heap. + HeapRegionSeq* _hrs; + + // The region from which normal-sized objects are currently being + // allocated. May be NULL. + HeapRegion* _cur_alloc_region; + + // Postcondition: cur_alloc_region == NULL. + void abandon_cur_alloc_region(); + void abandon_gc_alloc_regions(); + + // The to-space memory regions into which objects are being copied during + // a GC. + HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; + size_t _gc_alloc_region_counts[GCAllocPurposeCount]; + // These are the regions, one per GCAllocPurpose, that are half-full + // at the end of a collection and that we want to reuse during the + // next collection. + HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; + // This specifies whether we will keep the last half-full region at + // the end of a collection so that it can be reused during the next + // collection (this is specified per GCAllocPurpose) + bool _retain_gc_alloc_region[GCAllocPurposeCount]; + + // A list of the regions that have been set to be alloc regions in the + // current collection. + HeapRegion* _gc_alloc_region_list; + + // When called by par thread, require par_alloc_during_gc_lock() to be held. + void push_gc_alloc_region(HeapRegion* hr); + + // This should only be called single-threaded. Undeclares all GC alloc + // regions. + void forget_alloc_region_list(); + + // Should be used to set an alloc region, because there's other + // associated bookkeeping. + void set_gc_alloc_region(int purpose, HeapRegion* r); + + // Check well-formedness of alloc region list. + bool check_gc_alloc_regions(); + + // Outside of GC pauses, the number of bytes used in all regions other + // than the current allocation region. + size_t _summary_bytes_used; + + // This is used for a quick test on whether a reference points into + // the collection set or not. Basically, we have an array, with one + // byte per region, and that byte denotes whether the corresponding + // region is in the collection set or not. The entry corresponding + // the bottom of the heap, i.e., region 0, is pointed to by + // _in_cset_fast_test_base. The _in_cset_fast_test field has been + // biased so that it actually points to address 0 of the address + // space, to make the test as fast as possible (we can simply shift + // the address to address into it, instead of having to subtract the + // bottom of the heap from the address before shifting it; basically + // it works in the same way the card table works). + bool* _in_cset_fast_test; + + // The allocated array used for the fast test on whether a reference + // points into the collection set or not. This field is also used to + // free the array. + bool* _in_cset_fast_test_base; + + // The length of the _in_cset_fast_test_base array. + size_t _in_cset_fast_test_length; + + volatile unsigned _gc_time_stamp; + + size_t* _surviving_young_words; + + void setup_surviving_young_words(); + void update_surviving_young_words(size_t* surv_young_words); + void cleanup_surviving_young_words(); + +protected: + + // Returns "true" iff none of the gc alloc regions have any allocations + // since the last call to "save_marks". + bool all_alloc_regions_no_allocs_since_save_marks(); + // Perform finalization stuff on all allocation regions. + void retire_all_alloc_regions(); + + // The number of regions allocated to hold humongous objects. + int _num_humongous_regions; + YoungList* _young_list; + + // The current policy object for the collector. + G1CollectorPolicy* _g1_policy; + + // Parallel allocation lock to protect the current allocation region. + Mutex _par_alloc_during_gc_lock; + Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } + + // If possible/desirable, allocate a new HeapRegion for normal object + // allocation sufficient for an allocation of the given "word_size". + // If "do_expand" is true, will attempt to expand the heap if necessary + // to to satisfy the request. If "zero_filled" is true, requires a + // zero-filled region. + // (Returning NULL will trigger a GC.) + virtual HeapRegion* newAllocRegion_work(size_t word_size, + bool do_expand, + bool zero_filled); + + virtual HeapRegion* newAllocRegion(size_t word_size, + bool zero_filled = true) { + return newAllocRegion_work(word_size, false, zero_filled); + } + virtual HeapRegion* newAllocRegionWithExpansion(int purpose, + size_t word_size, + bool zero_filled = true); + + // Attempt to allocate an object of the given (very large) "word_size". + // Returns "NULL" on failure. + virtual HeapWord* humongousObjAllocate(size_t word_size); + + // If possible, allocate a block of the given word_size, else return "NULL". + // Returning NULL will trigger GC or heap expansion. + // These two methods have rather awkward pre- and + // post-conditions. If they are called outside a safepoint, then + // they assume that the caller is holding the heap lock. Upon return + // they release the heap lock, if they are returning a non-NULL + // value. attempt_allocation_slow() also dirties the cards of a + // newly-allocated young region after it releases the heap + // lock. This change in interface was the neatest way to achieve + // this card dirtying without affecting mem_allocate(), which is a + // more frequently called method. We tried two or three different + // approaches, but they were even more hacky. + HeapWord* attempt_allocation(size_t word_size, + bool permit_collection_pause = true); + + HeapWord* attempt_allocation_slow(size_t word_size, + bool permit_collection_pause = true); + + // Allocate blocks during garbage collection. Will ensure an + // allocation region, either by picking one or expanding the + // heap, and then allocate a block of the given size. The block + // may not be a humongous - it must fit into a single heap region. + HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); + HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); + + HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, + HeapRegion* alloc_region, + bool par, + size_t word_size); + + // Ensure that no further allocations can happen in "r", bearing in mind + // that parallel threads might be attempting allocations. + void par_allocate_remaining_space(HeapRegion* r); + + // Retires an allocation region when it is full or at the end of a + // GC pause. + void retire_alloc_region(HeapRegion* alloc_region, bool par); + + // Helper function for two callbacks below. + // "full", if true, indicates that the GC is for a System.gc() request, + // and should collect the entire heap. If "clear_all_soft_refs" is true, + // all soft references are cleared during the GC. If "full" is false, + // "word_size" describes the allocation that the GC should + // attempt (at least) to satisfy. + void do_collection(bool full, bool clear_all_soft_refs, + size_t word_size); + + // Callback from VM_G1CollectFull operation. + // Perform a full collection. + void do_full_collection(bool clear_all_soft_refs); + + // Resize the heap if necessary after a full collection. If this is + // after a collect-for allocation, "word_size" is the allocation size, + // and will be considered part of the used portion of the heap. + void resize_if_necessary_after_full_collection(size_t word_size); + + // Callback from VM_G1CollectForAllocation operation. + // This function does everything necessary/possible to satisfy a + // failed allocation request (including collection, expansion, etc.) + HeapWord* satisfy_failed_allocation(size_t word_size); + + // Attempting to expand the heap sufficiently + // to support an allocation of the given "word_size". If + // successful, perform the allocation and return the address of the + // allocated block, or else "NULL". + virtual HeapWord* expand_and_allocate(size_t word_size); + +public: + // Expand the garbage-first heap by at least the given size (in bytes!). + // (Rounds up to a HeapRegion boundary.) + virtual void expand(size_t expand_bytes); + + // Do anything common to GC's. + virtual void gc_prologue(bool full); + virtual void gc_epilogue(bool full); + + // We register a region with the fast "in collection set" test. We + // simply set to true the array slot corresponding to this region. + void register_region_with_in_cset_fast_test(HeapRegion* r) { + assert(_in_cset_fast_test_base != NULL, "sanity"); + assert(r->in_collection_set(), "invariant"); + int index = r->hrs_index(); + assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, + "invariant"); + assert(!_in_cset_fast_test_base[index], "invariant"); + _in_cset_fast_test_base[index] = true; + } + + // This is a fast test on whether a reference points into the + // collection set or not. It does not assume that the reference + // points into the heap; if it doesn't, it will return false. + bool in_cset_fast_test(oop obj) { + assert(_in_cset_fast_test != NULL, "sanity"); + if (_g1_committed.contains((HeapWord*) obj)) { + // no need to subtract the bottom of the heap from obj, + // _in_cset_fast_test is biased + size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; + bool ret = _in_cset_fast_test[index]; + // let's make sure the result is consistent with what the slower + // test returns + assert( ret || !obj_in_cs(obj), "sanity"); + assert(!ret || obj_in_cs(obj), "sanity"); + return ret; + } else { + return false; + } + } + +protected: + + // Shrink the garbage-first heap by at most the given size (in bytes!). + // (Rounds down to a HeapRegion boundary.) + virtual void shrink(size_t expand_bytes); + void shrink_helper(size_t expand_bytes); + + // Do an incremental collection: identify a collection set, and evacuate + // its live objects elsewhere. + virtual void do_collection_pause(); + + // The guts of the incremental collection pause, executed by the vm + // thread. + virtual void do_collection_pause_at_safepoint(); + + // Actually do the work of evacuating the collection set. + virtual void evacuate_collection_set(); + + // If this is an appropriate right time, do a collection pause. + // The "word_size" argument, if non-zero, indicates the size of an + // allocation request that is prompting this query. + void do_collection_pause_if_appropriate(size_t word_size); + + // The g1 remembered set of the heap. + G1RemSet* _g1_rem_set; + // And it's mod ref barrier set, used to track updates for the above. + ModRefBarrierSet* _mr_bs; + + // A set of cards that cover the objects for which the Rsets should be updated + // concurrently after the collection. + DirtyCardQueueSet _dirty_card_queue_set; + + // The Heap Region Rem Set Iterator. + HeapRegionRemSetIterator** _rem_set_iterator; + + // The closure used to refine a single card. + RefineCardTableEntryClosure* _refine_cte_cl; + + // A function to check the consistency of dirty card logs. + void check_ct_logs_at_safepoint(); + + // After a collection pause, make the regions in the CS into free + // regions. + void free_collection_set(HeapRegion* cs_head); + + // Applies "scan_non_heap_roots" to roots outside the heap, + // "scan_rs" to roots inside the heap (having done "set_region" to + // indicate the region in which the root resides), and does "scan_perm" + // (setting the generation to the perm generation.) If "scan_rs" is + // NULL, then this step is skipped. The "worker_i" + // param is for use with parallel roots processing, and should be + // the "i" of the calling parallel worker thread's work(i) function. + // In the sequential case this param will be ignored. + void g1_process_strong_roots(bool collecting_perm_gen, + SharedHeap::ScanningOption so, + OopClosure* scan_non_heap_roots, + OopsInHeapRegionClosure* scan_rs, + OopsInHeapRegionClosure* scan_so, + OopsInGenClosure* scan_perm, + int worker_i); + + void scan_scan_only_set(OopsInHeapRegionClosure* oc, + int worker_i); + void scan_scan_only_region(HeapRegion* hr, + OopsInHeapRegionClosure* oc, + int worker_i); + + // Apply "blk" to all the weak roots of the system. These include + // JNI weak roots, the code cache, system dictionary, symbol table, + // string table, and referents of reachable weak refs. + void g1_process_weak_roots(OopClosure* root_closure, + OopClosure* non_root_closure); + + // Invoke "save_marks" on all heap regions. + void save_marks(); + + // Free a heap region. + void free_region(HeapRegion* hr); + // A component of "free_region", exposed for 'batching'. + // All the params after "hr" are out params: the used bytes of the freed + // region(s), the number of H regions cleared, the number of regions + // freed, and pointers to the head and tail of a list of freed contig + // regions, linked throught the "next_on_unclean_list" field. + void free_region_work(HeapRegion* hr, + size_t& pre_used, + size_t& cleared_h, + size_t& freed_regions, + UncleanRegionList* list, + bool par = false); + + + // The concurrent marker (and the thread it runs in.) + ConcurrentMark* _cm; + ConcurrentMarkThread* _cmThread; + bool _mark_in_progress; + + // The concurrent refiner. + ConcurrentG1Refine* _cg1r; + + // The concurrent zero-fill thread. + ConcurrentZFThread* _czft; + + // The parallel task queues + RefToScanQueueSet *_task_queues; + + // True iff a evacuation has failed in the current collection. + bool _evacuation_failed; + + // Set the attribute indicating whether evacuation has failed in the + // current collection. + void set_evacuation_failed(bool b) { _evacuation_failed = b; } + + // Failed evacuations cause some logical from-space objects to have + // forwarding pointers to themselves. Reset them. + void remove_self_forwarding_pointers(); + + // When one is non-null, so is the other. Together, they each pair is + // an object with a preserved mark, and its mark value. + GrowableArray* _objs_with_preserved_marks; + GrowableArray* _preserved_marks_of_objs; + + // Preserve the mark of "obj", if necessary, in preparation for its mark + // word being overwritten with a self-forwarding-pointer. + void preserve_mark_if_necessary(oop obj, markOop m); + + // The stack of evac-failure objects left to be scanned. + GrowableArray* _evac_failure_scan_stack; + // The closure to apply to evac-failure objects. + + OopsInHeapRegionClosure* _evac_failure_closure; + // Set the field above. + void + set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { + _evac_failure_closure = evac_failure_closure; + } + + // Push "obj" on the scan stack. + void push_on_evac_failure_scan_stack(oop obj); + // Process scan stack entries until the stack is empty. + void drain_evac_failure_scan_stack(); + // True iff an invocation of "drain_scan_stack" is in progress; to + // prevent unnecessary recursion. + bool _drain_in_progress; + + // Do any necessary initialization for evacuation-failure handling. + // "cl" is the closure that will be used to process evac-failure + // objects. + void init_for_evac_failure(OopsInHeapRegionClosure* cl); + // Do any necessary cleanup for evacuation-failure handling data + // structures. + void finalize_for_evac_failure(); + + // An attempt to evacuate "obj" has failed; take necessary steps. + void handle_evacuation_failure(oop obj); + oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); + void handle_evacuation_failure_common(oop obj, markOop m); + + + // Ensure that the relevant gc_alloc regions are set. + void get_gc_alloc_regions(); + // We're done with GC alloc regions. We are going to tear down the + // gc alloc list and remove the gc alloc tag from all the regions on + // that list. However, we will also retain the last (i.e., the one + // that is half-full) GC alloc region, per GCAllocPurpose, for + // possible reuse during the next collection, provided + // _retain_gc_alloc_region[] indicates that it should be the + // case. Said regions are kept in the _retained_gc_alloc_regions[] + // array. If the parameter totally is set, we will not retain any + // regions, irrespective of what _retain_gc_alloc_region[] + // indicates. + void release_gc_alloc_regions(bool totally); +#ifndef PRODUCT + // Useful for debugging. + void print_gc_alloc_regions(); +#endif // !PRODUCT + + // ("Weak") Reference processing support + ReferenceProcessor* _ref_processor; + + enum G1H_process_strong_roots_tasks { + G1H_PS_mark_stack_oops_do, + G1H_PS_refProcessor_oops_do, + // Leave this one last. + G1H_PS_NumElements + }; + + SubTasksDone* _process_strong_tasks; + + // List of regions which require zero filling. + UncleanRegionList _unclean_region_list; + bool _unclean_regions_coming; + +public: + void set_refine_cte_cl_concurrency(bool concurrent); + + RefToScanQueue *task_queue(int i); + + // A set of cards where updates happened during the GC + DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } + + // Create a G1CollectedHeap with the specified policy. + // Must call the initialize method afterwards. + // May not return if something goes wrong. + G1CollectedHeap(G1CollectorPolicy* policy); + + // Initialize the G1CollectedHeap to have the initial and + // maximum sizes, permanent generation, and remembered and barrier sets + // specified by the policy object. + jint initialize(); + + void ref_processing_init(); + + void set_par_threads(int t) { + SharedHeap::set_par_threads(t); + _process_strong_tasks->set_par_threads(t); + } + + virtual CollectedHeap::Name kind() const { + return CollectedHeap::G1CollectedHeap; + } + + // The current policy object for the collector. + G1CollectorPolicy* g1_policy() const { return _g1_policy; } + + // Adaptive size policy. No such thing for g1. + virtual AdaptiveSizePolicy* size_policy() { return NULL; } + + // The rem set and barrier set. + G1RemSet* g1_rem_set() const { return _g1_rem_set; } + ModRefBarrierSet* mr_bs() const { return _mr_bs; } + + // The rem set iterator. + HeapRegionRemSetIterator* rem_set_iterator(int i) { + return _rem_set_iterator[i]; + } + + HeapRegionRemSetIterator* rem_set_iterator() { + return _rem_set_iterator[0]; + } + + unsigned get_gc_time_stamp() { + return _gc_time_stamp; + } + + void reset_gc_time_stamp() { + _gc_time_stamp = 0; + OrderAccess::fence(); + } + + void increment_gc_time_stamp() { + ++_gc_time_stamp; + OrderAccess::fence(); + } + + void iterate_dirty_card_closure(bool concurrent, int worker_i); + + // The shared block offset table array. + G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } + + // Reference Processing accessor + ReferenceProcessor* ref_processor() { return _ref_processor; } + + // Reserved (g1 only; super method includes perm), capacity and the used + // portion in bytes. + size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); } + virtual size_t capacity() const; + virtual size_t used() const; + size_t recalculate_used() const; +#ifndef PRODUCT + size_t recalculate_used_regions() const; +#endif // PRODUCT + + // These virtual functions do the actual allocation. + virtual HeapWord* mem_allocate(size_t word_size, + bool is_noref, + bool is_tlab, + bool* gc_overhead_limit_was_exceeded); + + // Some heaps may offer a contiguous region for shared non-blocking + // allocation, via inlined code (by exporting the address of the top and + // end fields defining the extent of the contiguous allocation region.) + // But G1CollectedHeap doesn't yet support this. + + // Return an estimate of the maximum allocation that could be performed + // without triggering any collection or expansion activity. In a + // generational collector, for example, this is probably the largest + // allocation that could be supported (without expansion) in the youngest + // generation. It is "unsafe" because no locks are taken; the result + // should be treated as an approximation, not a guarantee, for use in + // heuristic resizing decisions. + virtual size_t unsafe_max_alloc(); + + virtual bool is_maximal_no_gc() const { + return _g1_storage.uncommitted_size() == 0; + } + + // The total number of regions in the heap. + size_t n_regions(); + + // The number of regions that are completely free. + size_t max_regions(); + + // The number of regions that are completely free. + size_t free_regions(); + + // The number of regions that are not completely free. + size_t used_regions() { return n_regions() - free_regions(); } + + // True iff the ZF thread should run. + bool should_zf(); + + // The number of regions available for "regular" expansion. + size_t expansion_regions() { return _expansion_regions; } + +#ifndef PRODUCT + bool regions_accounted_for(); + bool print_region_accounting_info(); + void print_region_counts(); +#endif + + HeapRegion* alloc_region_from_unclean_list(bool zero_filled); + HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); + + void put_region_on_unclean_list(HeapRegion* r); + void put_region_on_unclean_list_locked(HeapRegion* r); + + void prepend_region_list_on_unclean_list(UncleanRegionList* list); + void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); + + void set_unclean_regions_coming(bool b); + void set_unclean_regions_coming_locked(bool b); + // Wait for cleanup to be complete. + void wait_for_cleanup_complete(); + // Like above, but assumes that the calling thread owns the Heap_lock. + void wait_for_cleanup_complete_locked(); + + // Return the head of the unclean list. + HeapRegion* peek_unclean_region_list_locked(); + // Remove and return the head of the unclean list. + HeapRegion* pop_unclean_region_list_locked(); + + // List of regions which are zero filled and ready for allocation. + HeapRegion* _free_region_list; + // Number of elements on the free list. + size_t _free_region_list_size; + + // If the head of the unclean list is ZeroFilled, move it to the free + // list. + bool move_cleaned_region_to_free_list_locked(); + bool move_cleaned_region_to_free_list(); + + void put_free_region_on_list_locked(HeapRegion* r); + void put_free_region_on_list(HeapRegion* r); + + // Remove and return the head element of the free list. + HeapRegion* pop_free_region_list_locked(); + + // If "zero_filled" is true, we first try the free list, then we try the + // unclean list, zero-filling the result. If "zero_filled" is false, we + // first try the unclean list, then the zero-filled list. + HeapRegion* alloc_free_region_from_lists(bool zero_filled); + + // Verify the integrity of the region lists. + void remove_allocated_regions_from_lists(); + bool verify_region_lists(); + bool verify_region_lists_locked(); + size_t unclean_region_list_length(); + size_t free_region_list_length(); + + // Perform a collection of the heap; intended for use in implementing + // "System.gc". This probably implies as full a collection as the + // "CollectedHeap" supports. + virtual void collect(GCCause::Cause cause); + + // The same as above but assume that the caller holds the Heap_lock. + void collect_locked(GCCause::Cause cause); + + // This interface assumes that it's being called by the + // vm thread. It collects the heap assuming that the + // heap lock is already held and that we are executing in + // the context of the vm thread. + virtual void collect_as_vm_thread(GCCause::Cause cause); + + // True iff a evacuation has failed in the most-recent collection. + bool evacuation_failed() { return _evacuation_failed; } + + // Free a region if it is totally full of garbage. Returns the number of + // bytes freed (0 ==> didn't free it). + size_t free_region_if_totally_empty(HeapRegion *hr); + void free_region_if_totally_empty_work(HeapRegion *hr, + size_t& pre_used, + size_t& cleared_h_regions, + size_t& freed_regions, + UncleanRegionList* list, + bool par = false); + + // If we've done free region work that yields the given changes, update + // the relevant global variables. + void finish_free_region_work(size_t pre_used, + size_t cleared_h_regions, + size_t freed_regions, + UncleanRegionList* list); + + + // Returns "TRUE" iff "p" points into the allocated area of the heap. + virtual bool is_in(const void* p) const; + + // Return "TRUE" iff the given object address is within the collection + // set. + inline bool obj_in_cs(oop obj); + + // Return "TRUE" iff the given object address is in the reserved + // region of g1 (excluding the permanent generation). + bool is_in_g1_reserved(const void* p) const { + return _g1_reserved.contains(p); + } + + // Returns a MemRegion that corresponds to the space that has been + // committed in the heap + MemRegion g1_committed() { + return _g1_committed; + } + + NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) + + // Dirty card table entries covering a list of young regions. + void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); + + // This resets the card table to all zeros. It is used after + // a collection pause which used the card table to claim cards. + void cleanUpCardTable(); + + // Iteration functions. + + // Iterate over all the ref-containing fields of all objects, calling + // "cl.do_oop" on each. + virtual void oop_iterate(OopClosure* cl) { + oop_iterate(cl, true); + } + void oop_iterate(OopClosure* cl, bool do_perm); + + // Same as above, restricted to a memory region. + virtual void oop_iterate(MemRegion mr, OopClosure* cl) { + oop_iterate(mr, cl, true); + } + void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm); + + // Iterate over all objects, calling "cl.do_object" on each. + virtual void object_iterate(ObjectClosure* cl) { + object_iterate(cl, true); + } + virtual void safe_object_iterate(ObjectClosure* cl) { + object_iterate(cl, true); + } + void object_iterate(ObjectClosure* cl, bool do_perm); + + // Iterate over all objects allocated since the last collection, calling + // "cl.do_object" on each. The heap must have been initialized properly + // to support this function, or else this call will fail. + virtual void object_iterate_since_last_GC(ObjectClosure* cl); + + // Iterate over all spaces in use in the heap, in ascending address order. + virtual void space_iterate(SpaceClosure* cl); + + // Iterate over heap regions, in address order, terminating the + // iteration early if the "doHeapRegion" method returns "true". + void heap_region_iterate(HeapRegionClosure* blk); + + // Iterate over heap regions starting with r (or the first region if "r" + // is NULL), in address order, terminating early if the "doHeapRegion" + // method returns "true". + void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); + + // As above but starting from the region at index idx. + void heap_region_iterate_from(int idx, HeapRegionClosure* blk); + + HeapRegion* region_at(size_t idx); + + // Divide the heap region sequence into "chunks" of some size (the number + // of regions divided by the number of parallel threads times some + // overpartition factor, currently 4). Assumes that this will be called + // in parallel by ParallelGCThreads worker threads with discinct worker + // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel + // calls will use the same "claim_value", and that that claim value is + // different from the claim_value of any heap region before the start of + // the iteration. Applies "blk->doHeapRegion" to each of the regions, by + // attempting to claim the first region in each chunk, and, if + // successful, applying the closure to each region in the chunk (and + // setting the claim value of the second and subsequent regions of the + // chunk.) For now requires that "doHeapRegion" always returns "false", + // i.e., that a closure never attempt to abort a traversal. + void heap_region_par_iterate_chunked(HeapRegionClosure* blk, + int worker, + jint claim_value); + + // It resets all the region claim values to the default. + void reset_heap_region_claim_values(); + +#ifdef ASSERT + bool check_heap_region_claim_values(jint claim_value); +#endif // ASSERT + + // Iterate over the regions (if any) in the current collection set. + void collection_set_iterate(HeapRegionClosure* blk); + + // As above but starting from region r + void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); + + // Returns the first (lowest address) compactible space in the heap. + virtual CompactibleSpace* first_compactible_space(); + + // A CollectedHeap will contain some number of spaces. This finds the + // space containing a given address, or else returns NULL. + virtual Space* space_containing(const void* addr) const; + + // A G1CollectedHeap will contain some number of heap regions. This + // finds the region containing a given address, or else returns NULL. + HeapRegion* heap_region_containing(const void* addr) const; + + // Like the above, but requires "addr" to be in the heap (to avoid a + // null-check), and unlike the above, may return an continuing humongous + // region. + HeapRegion* heap_region_containing_raw(const void* addr) const; + + // A CollectedHeap is divided into a dense sequence of "blocks"; that is, + // each address in the (reserved) heap is a member of exactly + // one block. The defining characteristic of a block is that it is + // possible to find its size, and thus to progress forward to the next + // block. (Blocks may be of different sizes.) Thus, blocks may + // represent Java objects, or they might be free blocks in a + // free-list-based heap (or subheap), as long as the two kinds are + // distinguishable and the size of each is determinable. + + // Returns the address of the start of the "block" that contains the + // address "addr". We say "blocks" instead of "object" since some heaps + // may not pack objects densely; a chunk may either be an object or a + // non-object. + virtual HeapWord* block_start(const void* addr) const; + + // Requires "addr" to be the start of a chunk, and returns its size. + // "addr + size" is required to be the start of a new chunk, or the end + // of the active area of the heap. + virtual size_t block_size(const HeapWord* addr) const; + + // Requires "addr" to be the start of a block, and returns "TRUE" iff + // the block is an object. + virtual bool block_is_obj(const HeapWord* addr) const; + + // Does this heap support heap inspection? (+PrintClassHistogram) + virtual bool supports_heap_inspection() const { return true; } + + // Section on thread-local allocation buffers (TLABs) + // See CollectedHeap for semantics. + + virtual bool supports_tlab_allocation() const; + virtual size_t tlab_capacity(Thread* thr) const; + virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; + virtual HeapWord* allocate_new_tlab(size_t size); + + // Can a compiler initialize a new object without store barriers? + // This permission only extends from the creation of a new object + // via a TLAB up to the first subsequent safepoint. + virtual bool can_elide_tlab_store_barriers() const { + // Since G1's TLAB's may, on occasion, come from non-young regions + // as well. (Is there a flag controlling that? XXX) + return false; + } + + // Can a compiler elide a store barrier when it writes + // a permanent oop into the heap? Applies when the compiler + // is storing x to the heap, where x->is_perm() is true. + virtual bool can_elide_permanent_oop_store_barriers() const { + // At least until perm gen collection is also G1-ified, at + // which point this should return false. + return true; + } + + virtual bool allocs_are_zero_filled(); + + // The boundary between a "large" and "small" array of primitives, in + // words. + virtual size_t large_typearray_limit(); + + // Returns "true" iff the given word_size is "very large". + static bool isHumongous(size_t word_size) { + return word_size >= VeryLargeInWords; + } + + // Update mod union table with the set of dirty cards. + void updateModUnion(); + + // Set the mod union bits corresponding to the given memRegion. Note + // that this is always a safe operation, since it doesn't clear any + // bits. + void markModUnionRange(MemRegion mr); + + // Records the fact that a marking phase is no longer in progress. + void set_marking_complete() { + _mark_in_progress = false; + } + void set_marking_started() { + _mark_in_progress = true; + } + bool mark_in_progress() { + return _mark_in_progress; + } + + // Print the maximum heap capacity. + virtual size_t max_capacity() const; + + virtual jlong millis_since_last_gc(); + + // Perform any cleanup actions necessary before allowing a verification. + virtual void prepare_for_verify(); + + // Perform verification. + virtual void verify(bool allow_dirty, bool silent); + virtual void print() const; + virtual void print_on(outputStream* st) const; + + virtual void print_gc_threads_on(outputStream* st) const; + virtual void gc_threads_do(ThreadClosure* tc) const; + + // Override + void print_tracing_info() const; + + // If "addr" is a pointer into the (reserved?) heap, returns a positive + // number indicating the "arena" within the heap in which "addr" falls. + // Or else returns 0. + virtual int addr_to_arena_id(void* addr) const; + + // Convenience function to be used in situations where the heap type can be + // asserted to be this type. + static G1CollectedHeap* heap(); + + void empty_young_list(); + bool should_set_young_locked(); + + void set_region_short_lived_locked(HeapRegion* hr); + // add appropriate methods for any other surv rate groups + + void young_list_rs_length_sampling_init() { + _young_list->rs_length_sampling_init(); + } + bool young_list_rs_length_sampling_more() { + return _young_list->rs_length_sampling_more(); + } + void young_list_rs_length_sampling_next() { + _young_list->rs_length_sampling_next(); + } + size_t young_list_sampled_rs_lengths() { + return _young_list->sampled_rs_lengths(); + } + + size_t young_list_length() { return _young_list->length(); } + size_t young_list_scan_only_length() { + return _young_list->scan_only_length(); } + + HeapRegion* pop_region_from_young_list() { + return _young_list->pop_region(); + } + + HeapRegion* young_list_first_region() { + return _young_list->first_region(); + } + + // debugging + bool check_young_list_well_formed() { + return _young_list->check_list_well_formed(); + } + bool check_young_list_empty(bool ignore_scan_only_list, + bool check_sample = true); + + // *** Stuff related to concurrent marking. It's not clear to me that so + // many of these need to be public. + + // The functions below are helper functions that a subclass of + // "CollectedHeap" can use in the implementation of its virtual + // functions. + // This performs a concurrent marking of the live objects in a + // bitmap off to the side. + void doConcurrentMark(); + + // This is called from the marksweep collector which then does + // a concurrent mark and verifies that the results agree with + // the stop the world marking. + void checkConcurrentMark(); + void do_sync_mark(); + + bool isMarkedPrev(oop obj) const; + bool isMarkedNext(oop obj) const; + + // Determine if an object is dead, given the object and also + // the region to which the object belongs. An object is dead + // iff a) it was not allocated since the last mark and b) it + // is not marked. + + bool is_obj_dead(const oop obj, const HeapRegion* hr) const { + return + !hr->obj_allocated_since_prev_marking(obj) && + !isMarkedPrev(obj); + } + + // This is used when copying an object to survivor space. + // If the object is marked live, then we mark the copy live. + // If the object is allocated since the start of this mark + // cycle, then we mark the copy live. + // If the object has been around since the previous mark + // phase, and hasn't been marked yet during this phase, + // then we don't mark it, we just wait for the + // current marking cycle to get to it. + + // This function returns true when an object has been + // around since the previous marking and hasn't yet + // been marked during this marking. + + bool is_obj_ill(const oop obj, const HeapRegion* hr) const { + return + !hr->obj_allocated_since_next_marking(obj) && + !isMarkedNext(obj); + } + + // Determine if an object is dead, given only the object itself. + // This will find the region to which the object belongs and + // then call the region version of the same function. + + // Added if it is in permanent gen it isn't dead. + // Added if it is NULL it isn't dead. + + bool is_obj_dead(oop obj) { + HeapRegion* hr = heap_region_containing(obj); + if (hr == NULL) { + if (Universe::heap()->is_in_permanent(obj)) + return false; + else if (obj == NULL) return false; + else return true; + } + else return is_obj_dead(obj, hr); + } + + bool is_obj_ill(oop obj) { + HeapRegion* hr = heap_region_containing(obj); + if (hr == NULL) { + if (Universe::heap()->is_in_permanent(obj)) + return false; + else if (obj == NULL) return false; + else return true; + } + else return is_obj_ill(obj, hr); + } + + // The following is just to alert the verification code + // that a full collection has occurred and that the + // remembered sets are no longer up to date. + bool _full_collection; + void set_full_collection() { _full_collection = true;} + void clear_full_collection() {_full_collection = false;} + bool full_collection() {return _full_collection;} + + ConcurrentMark* concurrent_mark() const { return _cm; } + ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } + +public: + void stop_conc_gc_threads(); + + // + + double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); + void check_if_region_is_too_expensive(double predicted_time_ms); + size_t pending_card_num(); + size_t max_pending_card_num(); + size_t cards_scanned(); + + // + +protected: + size_t _max_heap_capacity; + +// debug_only(static void check_for_valid_allocation_state();) + +public: + // Temporary: call to mark things unimplemented for the G1 heap (e.g., + // MemoryService). In productization, we can make this assert false + // to catch such places (as well as searching for calls to this...) + static void g1_unimplemented(); + +}; + +// Local Variables: *** +// c-indentation-style: gnu *** +// End: *** --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp 2009-08-01 04:21:05.279609197 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Inline functions for G1CollectedHeap + +inline HeapRegion* +G1CollectedHeap::heap_region_containing(const void* addr) const { + HeapRegion* hr = _hrs->addr_to_region(addr); + // hr can be null if addr in perm_gen + if (hr != NULL && hr->continuesHumongous()) { + hr = hr->humongous_start_region(); + } + return hr; +} + +inline HeapRegion* +G1CollectedHeap::heap_region_containing_raw(const void* addr) const { + assert(_g1_reserved.contains(addr), "invariant"); + size_t index = ((intptr_t) addr - (intptr_t) _g1_reserved.start()) + >> HeapRegion::LogOfHRGrainBytes; + HeapRegion* res = _hrs->at(index); + assert(res == _hrs->addr_to_region(addr), "sanity"); + return res; +} + +inline bool G1CollectedHeap::obj_in_cs(oop obj) { + HeapRegion* r = _hrs->addr_to_region(obj); + return r != NULL && r->in_collection_set(); +} + +inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, + bool permit_collection_pause) { + HeapWord* res = NULL; + + assert( SafepointSynchronize::is_at_safepoint() || + Heap_lock->owned_by_self(), "pre-condition of the call" ); + + if (_cur_alloc_region != NULL) { + + // If this allocation causes a region to become non empty, + // then we need to update our free_regions count. + + if (_cur_alloc_region->is_empty()) { + res = _cur_alloc_region->allocate(word_size); + if (res != NULL) + _free_regions--; + } else { + res = _cur_alloc_region->allocate(word_size); + } + } + if (res != NULL) { + if (!SafepointSynchronize::is_at_safepoint()) { + assert( Heap_lock->owned_by_self(), "invariant" ); + Heap_lock->unlock(); + } + return res; + } + // attempt_allocation_slow will also unlock the heap lock when appropriate. + return attempt_allocation_slow(word_size, permit_collection_pause); +} + +inline RefToScanQueue* G1CollectedHeap::task_queue(int i) { + return _task_queues->queue(i); +} + + +inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { + return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); +} + +inline bool G1CollectedHeap::isMarkedNext(oop obj) const { + return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2009-08-01 04:21:05.769455495 +0100 @@ -0,0 +1,3031 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1CollectorPolicy.cpp.incl" + +#define PREDICTIONS_VERBOSE 0 + +// + +// Different defaults for different number of GC threads +// They were chosen by running GCOld and SPECjbb on debris with different +// numbers of GC threads and choosing them based on the results + +// all the same +static double rs_length_diff_defaults[] = { + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 +}; + +static double cost_per_card_ms_defaults[] = { + 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 +}; + +static double cost_per_scan_only_region_ms_defaults[] = { + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 +}; + +// all the same +static double fully_young_cards_per_entry_ratio_defaults[] = { + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 +}; + +static double cost_per_entry_ms_defaults[] = { + 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 +}; + +static double cost_per_byte_ms_defaults[] = { + 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 +}; + +// these should be pretty consistent +static double constant_other_time_ms_defaults[] = { + 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 +}; + + +static double young_other_cost_per_region_ms_defaults[] = { + 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 +}; + +static double non_young_other_cost_per_region_ms_defaults[] = { + 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 +}; + +// + +G1CollectorPolicy::G1CollectorPolicy() : + _parallel_gc_threads((ParallelGCThreads > 0) ? ParallelGCThreads : 1), + _n_pauses(0), + _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)), + _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _all_pause_times_ms(new NumberSeq()), + _stop_world_start(0.0), + _all_stop_world_times_ms(new NumberSeq()), + _all_yield_times_ms(new NumberSeq()), + + _all_mod_union_times_ms(new NumberSeq()), + + _summary(new Summary()), + _abandoned_summary(new AbandonedSummary()), + + _cur_clear_ct_time_ms(0.0), + + _region_num_young(0), + _region_num_tenured(0), + _prev_region_num_young(0), + _prev_region_num_tenured(0), + + _aux_num(10), + _all_aux_times_ms(new NumberSeq[_aux_num]), + _cur_aux_start_times_ms(new double[_aux_num]), + _cur_aux_times_ms(new double[_aux_num]), + _cur_aux_times_set(new bool[_aux_num]), + + _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + + // + + _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _prev_collection_pause_end_ms(0.0), + _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)), + _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_scan_only_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), + _partially_young_cards_per_entry_ratio_seq( + new TruncatedSeq(TruncatedSeqLength)), + _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_scan_only_region_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), + _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _non_young_other_cost_per_region_ms_seq( + new TruncatedSeq(TruncatedSeqLength)), + + _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), + _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)), + _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), + + _pause_time_target_ms((double) MaxGCPauseMillis), + + // + + _in_young_gc_mode(false), + _full_young_gcs(true), + _full_young_pause_num(0), + _partial_young_pause_num(0), + + _during_marking(false), + _in_marking_window(false), + _in_marking_window_im(false), + + _known_garbage_ratio(0.0), + _known_garbage_bytes(0), + + _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), + _target_pause_time_ms(-1.0), + + _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)), + + _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)), + _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)), + + _recent_avg_pause_time_ratio(0.0), + _num_markings(0), + _n_marks(0), + _n_pauses_at_mark_end(0), + + _all_full_gc_times_ms(new NumberSeq()), + + _conc_refine_enabled(0), + _conc_refine_zero_traversals(0), + _conc_refine_max_traversals(0), + _conc_refine_current_delta(G1ConcRefineInitialDelta), + + // G1PausesBtwnConcMark defaults to -1 + // so the hack is to do the cast QQQ FIXME + _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), + _n_marks_since_last_pause(0), + _conc_mark_initiated(false), + _should_initiate_conc_mark(false), + _should_revert_to_full_young_gcs(false), + _last_full_young_gc(false), + + _prev_collection_pause_used_at_end_bytes(0), + + _collection_set(NULL), +#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +#endif // _MSC_VER + + _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived", + G1YoungSurvRateNumRegionsSummary)), + _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor", + G1YoungSurvRateNumRegionsSummary)), + // add here any more surv rate groups + _recorded_survivor_regions(0), + _recorded_survivor_head(NULL), + _recorded_survivor_tail(NULL), + _survivors_age_table(true) + +{ + _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); + _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; + + _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads]; + _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads]; + _par_last_scan_only_times_ms = new double[_parallel_gc_threads]; + _par_last_scan_only_regions_scanned = new double[_parallel_gc_threads]; + + _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads]; + _par_last_update_rs_times_ms = new double[_parallel_gc_threads]; + _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads]; + + _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads]; + _par_last_scan_rs_times_ms = new double[_parallel_gc_threads]; + _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads]; + + _par_last_obj_copy_times_ms = new double[_parallel_gc_threads]; + + _par_last_termination_times_ms = new double[_parallel_gc_threads]; + + // start conservatively + _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; + + // + + int index; + if (ParallelGCThreads == 0) + index = 0; + else if (ParallelGCThreads > 8) + index = 7; + else + index = ParallelGCThreads - 1; + + _pending_card_diff_seq->add(0.0); + _rs_length_diff_seq->add(rs_length_diff_defaults[index]); + _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); + _cost_per_scan_only_region_ms_seq->add( + cost_per_scan_only_region_ms_defaults[index]); + _fully_young_cards_per_entry_ratio_seq->add( + fully_young_cards_per_entry_ratio_defaults[index]); + _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); + _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); + _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); + _young_other_cost_per_region_ms_seq->add( + young_other_cost_per_region_ms_defaults[index]); + _non_young_other_cost_per_region_ms_seq->add( + non_young_other_cost_per_region_ms_defaults[index]); + + // + + double time_slice = (double) GCPauseIntervalMillis / 1000.0; + double max_gc_time = (double) MaxGCPauseMillis / 1000.0; + guarantee(max_gc_time < time_slice, + "Max GC time should not be greater than the time slice"); + _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); + _sigma = (double) G1ConfidencePercent / 100.0; + + // start conservatively (around 50ms is about right) + _concurrent_mark_init_times_ms->add(0.05); + _concurrent_mark_remark_times_ms->add(0.05); + _concurrent_mark_cleanup_times_ms->add(0.20); + _tenuring_threshold = MaxTenuringThreshold; + + if (G1UseSurvivorSpaces) { + // if G1FixedSurvivorSpaceSize is 0 which means the size is not + // fixed, then _max_survivor_regions will be calculated at + // calculate_young_list_target_config during initialization + _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; + } else { + _max_survivor_regions = 0; + } + + initialize_all(); +} + +// Increment "i", mod "len" +static void inc_mod(int& i, int len) { + i++; if (i == len) i = 0; +} + +void G1CollectorPolicy::initialize_flags() { + set_min_alignment(HeapRegion::GrainBytes); + set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name())); + if (SurvivorRatio < 1) { + vm_exit_during_initialization("Invalid survivor ratio specified"); + } + CollectorPolicy::initialize_flags(); +} + +void G1CollectorPolicy::init() { + // Set aside an initial future to_space. + _g1 = G1CollectedHeap::heap(); + size_t regions = Universe::heap()->capacity() / HeapRegion::GrainBytes; + + assert(Heap_lock->owned_by_self(), "Locking discipline."); + + if (G1SteadyStateUsed < 50) { + vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%."); + } + if (UseConcMarkSweepGC) { + vm_exit_during_initialization("-XX:+UseG1GC is incompatible with " + "-XX:+UseConcMarkSweepGC."); + } + + initialize_gc_policy_counters(); + + if (G1Gen) { + _in_young_gc_mode = true; + + if (G1YoungGenSize == 0) { + set_adaptive_young_list_length(true); + _young_list_fixed_length = 0; + } else { + set_adaptive_young_list_length(false); + _young_list_fixed_length = (G1YoungGenSize / HeapRegion::GrainBytes); + } + _free_regions_at_end_of_collection = _g1->free_regions(); + _scan_only_regions_at_end_of_collection = 0; + calculate_young_list_min_length(); + guarantee( _young_list_min_length == 0, "invariant, not enough info" ); + calculate_young_list_target_config(); + } else { + _young_list_fixed_length = 0; + _in_young_gc_mode = false; + } +} + +// Create the jstat counters for the policy. +void G1CollectorPolicy::initialize_gc_policy_counters() +{ + _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen); +} + +void G1CollectorPolicy::calculate_young_list_min_length() { + _young_list_min_length = 0; + + if (!adaptive_young_list_length()) + return; + + if (_alloc_rate_ms_seq->num() > 3) { + double now_sec = os::elapsedTime(); + double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; + double alloc_rate_ms = predict_alloc_rate_ms(); + int min_regions = (int) ceil(alloc_rate_ms * when_ms); + int current_region_num = (int) _g1->young_list_length(); + _young_list_min_length = min_regions + current_region_num; + } +} + +void G1CollectorPolicy::calculate_young_list_target_config() { + if (adaptive_young_list_length()) { + size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); + calculate_young_list_target_config(rs_lengths); + } else { + if (full_young_gcs()) + _young_list_target_length = _young_list_fixed_length; + else + _young_list_target_length = _young_list_fixed_length / 2; + _young_list_target_length = MAX2(_young_list_target_length, (size_t)1); + size_t so_length = calculate_optimal_so_length(_young_list_target_length); + guarantee( so_length < _young_list_target_length, "invariant" ); + _young_list_so_prefix_length = so_length; + } + calculate_survivors_policy(); +} + +// This method calculate the optimal scan-only set for a fixed young +// gen size. I couldn't work out how to reuse the more elaborate one, +// i.e. calculate_young_list_target_config(rs_length), as the loops are +// fundamentally different (the other one finds a config for different +// S-O lengths, whereas here we need to do the opposite). +size_t G1CollectorPolicy::calculate_optimal_so_length( + size_t young_list_length) { + if (!G1UseScanOnlyPrefix) + return 0; + + if (_all_pause_times_ms->num() < 3) { + // we won't use a scan-only set at the beginning to allow the rest + // of the predictors to warm up + return 0; + } + + if (_cost_per_scan_only_region_ms_seq->num() < 3) { + // then, we'll only set the S-O set to 1 for a little bit of time, + // to get enough information on the scanning cost + return 1; + } + + size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); + size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); + size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); + size_t scanned_cards; + if (full_young_gcs()) + scanned_cards = predict_young_card_num(adj_rs_lengths); + else + scanned_cards = predict_non_young_card_num(adj_rs_lengths); + double base_time_ms = predict_base_elapsed_time_ms(pending_cards, + scanned_cards); + + size_t so_length = 0; + double max_gc_eff = 0.0; + for (size_t i = 0; i < young_list_length; ++i) { + double gc_eff = 0.0; + double pause_time_ms = 0.0; + predict_gc_eff(young_list_length, i, base_time_ms, + &gc_eff, &pause_time_ms); + if (gc_eff > max_gc_eff) { + max_gc_eff = gc_eff; + so_length = i; + } + } + + // set it to 95% of the optimal to make sure we sample the "area" + // around the optimal length to get up-to-date survival rate data + return so_length * 950 / 1000; +} + +// This is a really cool piece of code! It finds the best +// target configuration (young length / scan-only prefix length) so +// that GC efficiency is maximized and that we also meet a pause +// time. It's a triple nested loop. These loops are explained below +// from the inside-out :-) +// +// (a) The innermost loop will try to find the optimal young length +// for a fixed S-O length. It uses a binary search to speed up the +// process. We assume that, for a fixed S-O length, as we add more +// young regions to the CSet, the GC efficiency will only go up (I'll +// skip the proof). So, using a binary search to optimize this process +// makes perfect sense. +// +// (b) The middle loop will fix the S-O length before calling the +// innermost one. It will vary it between two parameters, increasing +// it by a given increment. +// +// (c) The outermost loop will call the middle loop three times. +// (1) The first time it will explore all possible S-O length values +// from 0 to as large as it can get, using a coarse increment (to +// quickly "home in" to where the optimal seems to be). +// (2) The second time it will explore the values around the optimal +// that was found by the first iteration using a fine increment. +// (3) Once the optimal config has been determined by the second +// iteration, we'll redo the calculation, but setting the S-O length +// to 95% of the optimal to make sure we sample the "area" +// around the optimal length to get up-to-date survival rate data +// +// Termination conditions for the iterations are several: the pause +// time is over the limit, we do not have enough to-space, etc. + +void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) { + guarantee( adaptive_young_list_length(), "pre-condition" ); + + double start_time_sec = os::elapsedTime(); + size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1MinReservePercent); + min_reserve_perc = MIN2((size_t) 50, min_reserve_perc); + size_t reserve_regions = + (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0); + + if (full_young_gcs() && _free_regions_at_end_of_collection > 0) { + // we are in fully-young mode and there are free regions in the heap + + double survivor_regions_evac_time = + predict_survivor_regions_evac_time(); + + size_t min_so_length = 0; + size_t max_so_length = 0; + + if (G1UseScanOnlyPrefix) { + if (_all_pause_times_ms->num() < 3) { + // we won't use a scan-only set at the beginning to allow the rest + // of the predictors to warm up + min_so_length = 0; + max_so_length = 0; + } else if (_cost_per_scan_only_region_ms_seq->num() < 3) { + // then, we'll only set the S-O set to 1 for a little bit of time, + // to get enough information on the scanning cost + min_so_length = 1; + max_so_length = 1; + } else if (_in_marking_window || _last_full_young_gc) { + // no S-O prefix during a marking phase either, as at the end + // of the marking phase we'll have to use a very small young + // length target to fill up the rest of the CSet with + // non-young regions and, if we have lots of scan-only regions + // left-over, we will not be able to add any more non-young + // regions. + min_so_length = 0; + max_so_length = 0; + } else { + // this is the common case; we'll never reach the maximum, we + // one of the end conditions will fire well before that + // (hopefully!) + min_so_length = 0; + max_so_length = _free_regions_at_end_of_collection - 1; + } + } else { + // no S-O prefix, as the switch is not set, but we still need to + // do one iteration to calculate the best young target that + // meets the pause time; this way we reuse the same code instead + // of replicating it + min_so_length = 0; + max_so_length = 0; + } + + double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; + size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); + size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); + size_t scanned_cards; + if (full_young_gcs()) + scanned_cards = predict_young_card_num(adj_rs_lengths); + else + scanned_cards = predict_non_young_card_num(adj_rs_lengths); + // calculate this once, so that we don't have to recalculate it in + // the innermost loop + double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) + + survivor_regions_evac_time; + // the result + size_t final_young_length = 0; + size_t final_so_length = 0; + double final_gc_eff = 0.0; + // we'll also keep track of how many times we go into the inner loop + // this is for profiling reasons + size_t calculations = 0; + + // this determines which of the three iterations the outer loop is in + typedef enum { + pass_type_coarse, + pass_type_fine, + pass_type_final + } pass_type_t; + + // range of the outer loop's iteration + size_t from_so_length = min_so_length; + size_t to_so_length = max_so_length; + guarantee( from_so_length <= to_so_length, "invariant" ); + + // this will keep the S-O length that's found by the second + // iteration of the outer loop; we'll keep it just in case the third + // iteration fails to find something + size_t fine_so_length = 0; + + // the increment step for the coarse (first) iteration + size_t so_coarse_increments = 5; + + // the common case, we'll start with the coarse iteration + pass_type_t pass = pass_type_coarse; + size_t so_length_incr = so_coarse_increments; + + if (from_so_length == to_so_length) { + // not point in doing the coarse iteration, we'll go directly into + // the fine one (we essentially trying to find the optimal young + // length for a fixed S-O length). + so_length_incr = 1; + pass = pass_type_final; + } else if (to_so_length - from_so_length < 3 * so_coarse_increments) { + // again, the range is too short so no point in foind the coarse + // iteration either + so_length_incr = 1; + pass = pass_type_fine; + } + + bool done = false; + // this is the outermost loop + while (!done) { +#ifdef TRACE_CALC_YOUNG_CONFIG + // leave this in for debugging, just in case + gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT + ", incr " SIZE_FORMAT ", pass %s", + from_so_length, to_so_length, so_length_incr, + (pass == pass_type_coarse) ? "coarse" : + (pass == pass_type_fine) ? "fine" : "final"); +#endif // TRACE_CALC_YOUNG_CONFIG + + size_t so_length = from_so_length; + size_t init_free_regions = + MAX2((size_t)0, + _free_regions_at_end_of_collection + + _scan_only_regions_at_end_of_collection - reserve_regions); + + // this determines whether a configuration was found + bool gc_eff_set = false; + // this is the middle loop + while (so_length <= to_so_length) { + // base time, which excludes region-related time; again we + // calculate it once to avoid recalculating it in the + // innermost loop + double base_time_with_so_ms = + base_time_ms + predict_scan_only_time_ms(so_length); + // it's already over the pause target, go around + if (base_time_with_so_ms > target_pause_time_ms) + break; + + size_t starting_young_length = so_length+1; + + // we make sure that the short young length that makes sense + // (one more than the S-O length) is feasible + size_t min_young_length = starting_young_length; + double min_gc_eff; + bool min_ok; + ++calculations; + min_ok = predict_gc_eff(min_young_length, so_length, + base_time_with_so_ms, + init_free_regions, target_pause_time_ms, + &min_gc_eff); + + if (min_ok) { + // the shortest young length is indeed feasible; we'll know + // set up the max young length and we'll do a binary search + // between min_young_length and max_young_length + size_t max_young_length = _free_regions_at_end_of_collection - 1; + double max_gc_eff = 0.0; + bool max_ok = false; + + // the innermost loop! (finally!) + while (max_young_length > min_young_length) { + // we'll make sure that min_young_length is always at a + // feasible config + guarantee( min_ok, "invariant" ); + + ++calculations; + max_ok = predict_gc_eff(max_young_length, so_length, + base_time_with_so_ms, + init_free_regions, target_pause_time_ms, + &max_gc_eff); + + size_t diff = (max_young_length - min_young_length) / 2; + if (max_ok) { + min_young_length = max_young_length; + min_gc_eff = max_gc_eff; + min_ok = true; + } + max_young_length = min_young_length + diff; + } + + // the innermost loop found a config + guarantee( min_ok, "invariant" ); + if (min_gc_eff > final_gc_eff) { + // it's the best config so far, so we'll keep it + final_gc_eff = min_gc_eff; + final_young_length = min_young_length; + final_so_length = so_length; + gc_eff_set = true; + } + } + + // incremental the fixed S-O length and go around + so_length += so_length_incr; + } + + // this is the end of the outermost loop and we need to decide + // what to do during the next iteration + if (pass == pass_type_coarse) { + // we just did the coarse pass (first iteration) + + if (!gc_eff_set) + // we didn't find a feasible config so we'll just bail out; of + // course, it might be the case that we missed it; but I'd say + // it's a bit unlikely + done = true; + else { + // We did find a feasible config with optimal GC eff during + // the first pass. So the second pass we'll only consider the + // S-O lengths around that config with a fine increment. + + guarantee( so_length_incr == so_coarse_increments, "invariant" ); + guarantee( final_so_length >= min_so_length, "invariant" ); + +#ifdef TRACE_CALC_YOUNG_CONFIG + // leave this in for debugging, just in case + gclog_or_tty->print_cr(" coarse pass: SO length " SIZE_FORMAT, + final_so_length); +#endif // TRACE_CALC_YOUNG_CONFIG + + from_so_length = + (final_so_length - min_so_length > so_coarse_increments) ? + final_so_length - so_coarse_increments + 1 : min_so_length; + to_so_length = + (max_so_length - final_so_length > so_coarse_increments) ? + final_so_length + so_coarse_increments - 1 : max_so_length; + + pass = pass_type_fine; + so_length_incr = 1; + } + } else if (pass == pass_type_fine) { + // we just finished the second pass + + if (!gc_eff_set) { + // we didn't find a feasible config (yes, it's possible; + // notice that, sometimes, we go directly into the fine + // iteration and skip the coarse one) so we bail out + done = true; + } else { + // We did find a feasible config with optimal GC eff + guarantee( so_length_incr == 1, "invariant" ); + + if (final_so_length == 0) { + // The config is of an empty S-O set, so we'll just bail out + done = true; + } else { + // we'll go around once more, setting the S-O length to 95% + // of the optimal + size_t new_so_length = 950 * final_so_length / 1000; + +#ifdef TRACE_CALC_YOUNG_CONFIG + // leave this in for debugging, just in case + gclog_or_tty->print_cr(" fine pass: SO length " SIZE_FORMAT + ", setting it to " SIZE_FORMAT, + final_so_length, new_so_length); +#endif // TRACE_CALC_YOUNG_CONFIG + + from_so_length = new_so_length; + to_so_length = new_so_length; + fine_so_length = final_so_length; + + pass = pass_type_final; + } + } + } else if (pass == pass_type_final) { + // we just finished the final (third) pass + + if (!gc_eff_set) + // we didn't find a feasible config, so we'll just use the one + // we found during the second pass, which we saved + final_so_length = fine_so_length; + + // and we're done! + done = true; + } else { + guarantee( false, "should never reach here" ); + } + + // we now go around the outermost loop + } + + // we should have at least one region in the target young length + _young_list_target_length = + MAX2((size_t) 1, final_young_length + _recorded_survivor_regions); + if (final_so_length >= final_young_length) + // and we need to ensure that the S-O length is not greater than + // the target young length (this is being a bit careful) + final_so_length = 0; + _young_list_so_prefix_length = final_so_length; + guarantee( !_in_marking_window || !_last_full_young_gc || + _young_list_so_prefix_length == 0, "invariant" ); + + // let's keep an eye of how long we spend on this calculation + // right now, I assume that we'll print it when we need it; we + // should really adde it to the breakdown of a pause + double end_time_sec = os::elapsedTime(); + double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0; + +#ifdef TRACE_CALC_YOUNG_CONFIG + // leave this in for debugging, just in case + gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT + ", SO = " SIZE_FORMAT ", " + "elapsed %1.2lf ms, calcs: " SIZE_FORMAT " (%s%s) " + SIZE_FORMAT SIZE_FORMAT, + target_pause_time_ms, + _young_list_target_length - _young_list_so_prefix_length, + _young_list_so_prefix_length, + elapsed_time_ms, + calculations, + full_young_gcs() ? "full" : "partial", + should_initiate_conc_mark() ? " i-m" : "", + _in_marking_window, + _in_marking_window_im); +#endif // TRACE_CALC_YOUNG_CONFIG + + if (_young_list_target_length < _young_list_min_length) { + // bummer; this means that, if we do a pause when the optimal + // config dictates, we'll violate the pause spacing target (the + // min length was calculate based on the application's current + // alloc rate); + + // so, we have to bite the bullet, and allocate the minimum + // number. We'll violate our target, but we just can't meet it. + + size_t so_length = 0; + // a note further up explains why we do not want an S-O length + // during marking + if (!_in_marking_window && !_last_full_young_gc) + // but we can still try to see whether we can find an optimal + // S-O length + so_length = calculate_optimal_so_length(_young_list_min_length); + +#ifdef TRACE_CALC_YOUNG_CONFIG + // leave this in for debugging, just in case + gclog_or_tty->print_cr("adjusted target length from " + SIZE_FORMAT " to " SIZE_FORMAT + ", SO " SIZE_FORMAT, + _young_list_target_length, _young_list_min_length, + so_length); +#endif // TRACE_CALC_YOUNG_CONFIG + + _young_list_target_length = + MAX2(_young_list_min_length, (size_t)1); + _young_list_so_prefix_length = so_length; + } + } else { + // we are in a partially-young mode or we've run out of regions (due + // to evacuation failure) + +#ifdef TRACE_CALC_YOUNG_CONFIG + // leave this in for debugging, just in case + gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT + ", SO " SIZE_FORMAT, + _young_list_min_length, 0); +#endif // TRACE_CALC_YOUNG_CONFIG + + // we'll do the pause as soon as possible and with no S-O prefix + // (see above for the reasons behind the latter) + _young_list_target_length = + MAX2(_young_list_min_length, (size_t) 1); + _young_list_so_prefix_length = 0; + } + + _rs_lengths_prediction = rs_lengths; +} + +// This is used by: calculate_optimal_so_length(length). It returns +// the GC eff and predicted pause time for a particular config +void +G1CollectorPolicy::predict_gc_eff(size_t young_length, + size_t so_length, + double base_time_ms, + double* ret_gc_eff, + double* ret_pause_time_ms) { + double so_time_ms = predict_scan_only_time_ms(so_length); + double accum_surv_rate_adj = 0.0; + if (so_length > 0) + accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1)); + double accum_surv_rate = + accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj; + size_t bytes_to_copy = + (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); + double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); + double young_other_time_ms = + predict_young_other_time_ms(young_length - so_length); + double pause_time_ms = + base_time_ms + so_time_ms + copy_time_ms + young_other_time_ms; + size_t reclaimed_bytes = + (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy; + double gc_eff = (double) reclaimed_bytes / pause_time_ms; + + *ret_gc_eff = gc_eff; + *ret_pause_time_ms = pause_time_ms; +} + +// This is used by: calculate_young_list_target_config(rs_length). It +// returns the GC eff of a particular config. It returns false if that +// config violates any of the end conditions of the search in the +// calling method, or true upon success. The end conditions were put +// here since it's called twice and it was best not to replicate them +// in the caller. Also, passing the parameteres avoids having to +// recalculate them in the innermost loop. +bool +G1CollectorPolicy::predict_gc_eff(size_t young_length, + size_t so_length, + double base_time_with_so_ms, + size_t init_free_regions, + double target_pause_time_ms, + double* ret_gc_eff) { + *ret_gc_eff = 0.0; + + if (young_length >= init_free_regions) + // end condition 1: not enough space for the young regions + return false; + + double accum_surv_rate_adj = 0.0; + if (so_length > 0) + accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1)); + double accum_surv_rate = + accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj; + size_t bytes_to_copy = + (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); + double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); + double young_other_time_ms = + predict_young_other_time_ms(young_length - so_length); + double pause_time_ms = + base_time_with_so_ms + copy_time_ms + young_other_time_ms; + + if (pause_time_ms > target_pause_time_ms) + // end condition 2: over the target pause time + return false; + + size_t reclaimed_bytes = + (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy; + size_t free_bytes = + (init_free_regions - young_length) * HeapRegion::GrainBytes; + + if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes) + // end condition 3: out of to-space (conservatively) + return false; + + // success! + double gc_eff = (double) reclaimed_bytes / pause_time_ms; + *ret_gc_eff = gc_eff; + + return true; +} + +double G1CollectorPolicy::predict_survivor_regions_evac_time() { + double survivor_regions_evac_time = 0.0; + for (HeapRegion * r = _recorded_survivor_head; + r != NULL && r != _recorded_survivor_tail->get_next_young_region(); + r = r->get_next_young_region()) { + survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true); + } + return survivor_regions_evac_time; +} + +void G1CollectorPolicy::check_prediction_validity() { + guarantee( adaptive_young_list_length(), "should not call this otherwise" ); + + size_t rs_lengths = _g1->young_list_sampled_rs_lengths(); + if (rs_lengths > _rs_lengths_prediction) { + // add 10% to avoid having to recalculate often + size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; + calculate_young_list_target_config(rs_lengths_prediction); + } +} + +HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, + bool is_tlab, + bool* gc_overhead_limit_was_exceeded) { + guarantee(false, "Not using this policy feature yet."); + return NULL; +} + +// This method controls how a collector handles one or more +// of its generations being fully allocated. +HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, + bool is_tlab) { + guarantee(false, "Not using this policy feature yet."); + return NULL; +} + + +#ifndef PRODUCT +bool G1CollectorPolicy::verify_young_ages() { + HeapRegion* head = _g1->young_list_first_region(); + return + verify_young_ages(head, _short_lived_surv_rate_group); + // also call verify_young_ages on any additional surv rate groups +} + +bool +G1CollectorPolicy::verify_young_ages(HeapRegion* head, + SurvRateGroup *surv_rate_group) { + guarantee( surv_rate_group != NULL, "pre-condition" ); + + const char* name = surv_rate_group->name(); + bool ret = true; + int prev_age = -1; + + for (HeapRegion* curr = head; + curr != NULL; + curr = curr->get_next_young_region()) { + SurvRateGroup* group = curr->surv_rate_group(); + if (group == NULL && !curr->is_survivor()) { + gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); + ret = false; + } + + if (surv_rate_group == group) { + int age = curr->age_in_surv_rate_group(); + + if (age < 0) { + gclog_or_tty->print_cr("## %s: encountered negative age", name); + ret = false; + } + + if (age <= prev_age) { + gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " + "(%d, %d)", name, age, prev_age); + ret = false; + } + prev_age = age; + } + } + + return ret; +} +#endif // PRODUCT + +void G1CollectorPolicy::record_full_collection_start() { + _cur_collection_start_sec = os::elapsedTime(); + // Release the future to-space so that it is available for compaction into. + _g1->set_full_collection(); +} + +void G1CollectorPolicy::record_full_collection_end() { + // Consider this like a collection pause for the purposes of allocation + // since last pause. + double end_sec = os::elapsedTime(); + double full_gc_time_sec = end_sec - _cur_collection_start_sec; + double full_gc_time_ms = full_gc_time_sec * 1000.0; + + checkpoint_conc_overhead(); + + _all_full_gc_times_ms->add(full_gc_time_ms); + + update_recent_gc_times(end_sec, full_gc_time_ms); + + _g1->clear_full_collection(); + + // "Nuke" the heuristics that control the fully/partially young GC + // transitions and make sure we start with fully young GCs after the + // Full GC. + set_full_young_gcs(true); + _last_full_young_gc = false; + _should_revert_to_full_young_gcs = false; + _should_initiate_conc_mark = false; + _known_garbage_bytes = 0; + _known_garbage_ratio = 0.0; + _in_marking_window = false; + _in_marking_window_im = false; + + _short_lived_surv_rate_group->record_scan_only_prefix(0); + _short_lived_surv_rate_group->start_adding_regions(); + // also call this on any additional surv rate groups + + record_survivor_regions(0, NULL, NULL); + + _prev_region_num_young = _region_num_young; + _prev_region_num_tenured = _region_num_tenured; + + _free_regions_at_end_of_collection = _g1->free_regions(); + _scan_only_regions_at_end_of_collection = 0; + // Reset survivors SurvRateGroup. + _survivor_surv_rate_group->reset(); + calculate_young_list_min_length(); + calculate_young_list_target_config(); + } + +void G1CollectorPolicy::record_before_bytes(size_t bytes) { + _bytes_in_to_space_before_gc += bytes; +} + +void G1CollectorPolicy::record_after_bytes(size_t bytes) { + _bytes_in_to_space_after_gc += bytes; +} + +void G1CollectorPolicy::record_stop_world_start() { + _stop_world_start = os::elapsedTime(); +} + +void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, + size_t start_used) { + if (PrintGCDetails) { + gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->print("[GC pause"); + if (in_young_gc_mode()) + gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial"); + } + + assert(_g1->used_regions() == _g1->recalculate_used_regions(), + "sanity"); + assert(_g1->used() == _g1->recalculate_used(), "sanity"); + + double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; + _all_stop_world_times_ms->add(s_w_t_ms); + _stop_world_start = 0.0; + + _cur_collection_start_sec = start_time_sec; + _cur_collection_pause_used_at_start_bytes = start_used; + _cur_collection_pause_used_regions_at_start = _g1->used_regions(); + _pending_cards = _g1->pending_card_num(); + _max_pending_cards = _g1->max_pending_card_num(); + + _bytes_in_to_space_before_gc = 0; + _bytes_in_to_space_after_gc = 0; + _bytes_in_collection_set_before_gc = 0; + +#ifdef DEBUG + // initialise these to something well known so that we can spot + // if they are not set properly + + for (int i = 0; i < _parallel_gc_threads; ++i) { + _par_last_ext_root_scan_times_ms[i] = -666.0; + _par_last_mark_stack_scan_times_ms[i] = -666.0; + _par_last_scan_only_times_ms[i] = -666.0; + _par_last_scan_only_regions_scanned[i] = -666.0; + _par_last_update_rs_start_times_ms[i] = -666.0; + _par_last_update_rs_times_ms[i] = -666.0; + _par_last_update_rs_processed_buffers[i] = -666.0; + _par_last_scan_rs_start_times_ms[i] = -666.0; + _par_last_scan_rs_times_ms[i] = -666.0; + _par_last_scan_new_refs_times_ms[i] = -666.0; + _par_last_obj_copy_times_ms[i] = -666.0; + _par_last_termination_times_ms[i] = -666.0; + } +#endif + + for (int i = 0; i < _aux_num; ++i) { + _cur_aux_times_ms[i] = 0.0; + _cur_aux_times_set[i] = false; + } + + _satb_drain_time_set = false; + _last_satb_drain_processed_buffers = -1; + + if (in_young_gc_mode()) + _last_young_gc_full = false; + + + // do that for any other surv rate groups + _short_lived_surv_rate_group->stop_adding_regions(); + size_t short_lived_so_length = _young_list_so_prefix_length; + _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length); + tag_scan_only(short_lived_so_length); + + if (G1UseSurvivorSpaces) { + _survivors_age_table.clear(); + } + + assert( verify_young_ages(), "region age verification" ); +} + +void G1CollectorPolicy::tag_scan_only(size_t short_lived_scan_only_length) { + // done in a way that it can be extended for other surv rate groups too... + + HeapRegion* head = _g1->young_list_first_region(); + bool finished_short_lived = (short_lived_scan_only_length == 0); + + if (finished_short_lived) + return; + + for (HeapRegion* curr = head; + curr != NULL; + curr = curr->get_next_young_region()) { + SurvRateGroup* surv_rate_group = curr->surv_rate_group(); + int age = curr->age_in_surv_rate_group(); + + if (surv_rate_group == _short_lived_surv_rate_group) { + if ((size_t)age < short_lived_scan_only_length) + curr->set_scan_only(); + else + finished_short_lived = true; + } + + + if (finished_short_lived) + return; + } + + guarantee( false, "we should never reach here" ); +} + +void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) { + _mark_closure_time_ms = mark_closure_time_ms; +} + +void G1CollectorPolicy::record_concurrent_mark_init_start() { + _mark_init_start_sec = os::elapsedTime(); + guarantee(!in_young_gc_mode(), "should not do be here in young GC mode"); +} + +void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double + mark_init_elapsed_time_ms) { + _during_marking = true; + _should_initiate_conc_mark = false; + _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; +} + +void G1CollectorPolicy::record_concurrent_mark_init_end() { + double end_time_sec = os::elapsedTime(); + double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0; + _concurrent_mark_init_times_ms->add(elapsed_time_ms); + checkpoint_conc_overhead(); + record_concurrent_mark_init_end_pre(elapsed_time_ms); + + _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true); +} + +void G1CollectorPolicy::record_concurrent_mark_remark_start() { + _mark_remark_start_sec = os::elapsedTime(); + _during_marking = false; +} + +void G1CollectorPolicy::record_concurrent_mark_remark_end() { + double end_time_sec = os::elapsedTime(); + double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; + checkpoint_conc_overhead(); + _concurrent_mark_remark_times_ms->add(elapsed_time_ms); + _cur_mark_stop_world_time_ms += elapsed_time_ms; + _prev_collection_pause_end_ms += elapsed_time_ms; + + _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); +} + +void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { + _mark_cleanup_start_sec = os::elapsedTime(); +} + +void +G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes, + size_t max_live_bytes) { + record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); + record_concurrent_mark_cleanup_end_work2(); +} + +void +G1CollectorPolicy:: +record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, + size_t max_live_bytes) { + if (_n_marks < 2) _n_marks++; + if (G1PolicyVerbose > 0) + gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB " + " (of " SIZE_FORMAT " MB heap).", + max_live_bytes/M, _g1->capacity()/M); +} + +// The important thing about this is that it includes "os::elapsedTime". +void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() { + checkpoint_conc_overhead(); + double end_time_sec = os::elapsedTime(); + double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0; + _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); + _cur_mark_stop_world_time_ms += elapsed_time_ms; + _prev_collection_pause_end_ms += elapsed_time_ms; + + _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true); + + _num_markings++; + + // We did a marking, so reset the "since_last_mark" variables. + double considerConcMarkCost = 1.0; + // If there are available processors, concurrent activity is free... + if (Threads::number_of_non_daemon_threads() * 2 < + os::active_processor_count()) { + considerConcMarkCost = 0.0; + } + _n_pauses_at_mark_end = _n_pauses; + _n_marks_since_last_pause++; + _conc_mark_initiated = false; +} + +void +G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { + if (in_young_gc_mode()) { + _should_revert_to_full_young_gcs = false; + _last_full_young_gc = true; + _in_marking_window = false; + if (adaptive_young_list_length()) + calculate_young_list_target_config(); + } +} + +void G1CollectorPolicy::record_concurrent_pause() { + if (_stop_world_start > 0.0) { + double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; + _all_yield_times_ms->add(yield_ms); + } +} + +void G1CollectorPolicy::record_concurrent_pause_end() { +} + +void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() { + _cur_CH_strong_roots_end_sec = os::elapsedTime(); + _cur_CH_strong_roots_dur_ms = + (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0; +} + +void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() { + _cur_G1_strong_roots_end_sec = os::elapsedTime(); + _cur_G1_strong_roots_dur_ms = + (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0; +} + +template +T sum_of(T* sum_arr, int start, int n, int N) { + T sum = (T)0; + for (int i = 0; i < n; i++) { + int j = (start + i) % N; + sum += sum_arr[j]; + } + return sum; +} + +void G1CollectorPolicy::print_par_stats (int level, + const char* str, + double* data, + bool summary) { + double min = data[0], max = data[0]; + double total = 0.0; + int j; + for (j = 0; j < level; ++j) + gclog_or_tty->print(" "); + gclog_or_tty->print("[%s (ms):", str); + for (uint i = 0; i < ParallelGCThreads; ++i) { + double val = data[i]; + if (val < min) + min = val; + if (val > max) + max = val; + total += val; + gclog_or_tty->print(" %3.1lf", val); + } + if (summary) { + gclog_or_tty->print_cr(""); + double avg = total / (double) ParallelGCThreads; + gclog_or_tty->print(" "); + for (j = 0; j < level; ++j) + gclog_or_tty->print(" "); + gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf", + avg, min, max); + } + gclog_or_tty->print_cr("]"); +} + +void G1CollectorPolicy::print_par_buffers (int level, + const char* str, + double* data, + bool summary) { + double min = data[0], max = data[0]; + double total = 0.0; + int j; + for (j = 0; j < level; ++j) + gclog_or_tty->print(" "); + gclog_or_tty->print("[%s :", str); + for (uint i = 0; i < ParallelGCThreads; ++i) { + double val = data[i]; + if (val < min) + min = val; + if (val > max) + max = val; + total += val; + gclog_or_tty->print(" %d", (int) val); + } + if (summary) { + gclog_or_tty->print_cr(""); + double avg = total / (double) ParallelGCThreads; + gclog_or_tty->print(" "); + for (j = 0; j < level; ++j) + gclog_or_tty->print(" "); + gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d", + (int)total, (int)avg, (int)min, (int)max); + } + gclog_or_tty->print_cr("]"); +} + +void G1CollectorPolicy::print_stats (int level, + const char* str, + double value) { + for (int j = 0; j < level; ++j) + gclog_or_tty->print(" "); + gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value); +} + +void G1CollectorPolicy::print_stats (int level, + const char* str, + int value) { + for (int j = 0; j < level; ++j) + gclog_or_tty->print(" "); + gclog_or_tty->print_cr("[%s: %d]", str, value); +} + +double G1CollectorPolicy::avg_value (double* data) { + if (ParallelGCThreads > 0) { + double ret = 0.0; + for (uint i = 0; i < ParallelGCThreads; ++i) + ret += data[i]; + return ret / (double) ParallelGCThreads; + } else { + return data[0]; + } +} + +double G1CollectorPolicy::max_value (double* data) { + if (ParallelGCThreads > 0) { + double ret = data[0]; + for (uint i = 1; i < ParallelGCThreads; ++i) + if (data[i] > ret) + ret = data[i]; + return ret; + } else { + return data[0]; + } +} + +double G1CollectorPolicy::sum_of_values (double* data) { + if (ParallelGCThreads > 0) { + double sum = 0.0; + for (uint i = 0; i < ParallelGCThreads; i++) + sum += data[i]; + return sum; + } else { + return data[0]; + } +} + +double G1CollectorPolicy::max_sum (double* data1, + double* data2) { + double ret = data1[0] + data2[0]; + + if (ParallelGCThreads > 0) { + for (uint i = 1; i < ParallelGCThreads; ++i) { + double data = data1[i] + data2[i]; + if (data > ret) + ret = data; + } + } + return ret; +} + +// Anything below that is considered to be zero +#define MIN_TIMER_GRANULARITY 0.0000001 + +void G1CollectorPolicy::record_collection_pause_end(bool abandoned) { + double end_time_sec = os::elapsedTime(); + double elapsed_ms = _last_pause_time_ms; + bool parallel = ParallelGCThreads > 0; + double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0; + size_t rs_size = + _cur_collection_pause_used_regions_at_start - collection_set_size(); + size_t cur_used_bytes = _g1->used(); + assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); + bool last_pause_included_initial_mark = false; + bool update_stats = !abandoned && !_g1->evacuation_failed(); + +#ifndef PRODUCT + if (G1YoungSurvRateVerbose) { + gclog_or_tty->print_cr(""); + _short_lived_surv_rate_group->print(); + // do that for any other surv rate groups too + } +#endif // PRODUCT + + checkpoint_conc_overhead(); + + if (in_young_gc_mode()) { + last_pause_included_initial_mark = _should_initiate_conc_mark; + if (last_pause_included_initial_mark) + record_concurrent_mark_init_end_pre(0.0); + + size_t min_used_targ = + (_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta); + + if (cur_used_bytes > min_used_targ) { + if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) { + } else if (!_g1->mark_in_progress() && !_last_full_young_gc) { + _should_initiate_conc_mark = true; + } + } + + _prev_collection_pause_used_at_end_bytes = cur_used_bytes; + } + + _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, + end_time_sec, false); + + guarantee(_cur_collection_pause_used_regions_at_start >= + collection_set_size(), + "Negative RS size?"); + + // This assert is exempted when we're doing parallel collection pauses, + // because the fragmentation caused by the parallel GC allocation buffers + // can lead to more memory being used during collection than was used + // before. Best leave this out until the fragmentation problem is fixed. + // Pauses in which evacuation failed can also lead to negative + // collections, since no space is reclaimed from a region containing an + // object whose evacuation failed. + // Further, we're now always doing parallel collection. But I'm still + // leaving this here as a placeholder for a more precise assertion later. + // (DLD, 10/05.) + assert((true || parallel) // Always using GC LABs now. + || _g1->evacuation_failed() + || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes, + "Negative collection"); + + size_t freed_bytes = + _cur_collection_pause_used_at_start_bytes - cur_used_bytes; + size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes; + double survival_fraction = + (double)surviving_bytes/ + (double)_collection_set_bytes_used_before; + + _n_pauses++; + + if (update_stats) { + _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms); + _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms); + _recent_evac_times_ms->add(evac_ms); + _recent_pause_times_ms->add(elapsed_ms); + + _recent_rs_sizes->add(rs_size); + + // We exempt parallel collection from this check because Alloc Buffer + // fragmentation can produce negative collections. Same with evac + // failure. + // Further, we're now always doing parallel collection. But I'm still + // leaving this here as a placeholder for a more precise assertion later. + // (DLD, 10/05. + assert((true || parallel) + || _g1->evacuation_failed() + || surviving_bytes <= _collection_set_bytes_used_before, + "Or else negative collection!"); + _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before); + _recent_CS_bytes_surviving->add(surviving_bytes); + + // this is where we update the allocation rate of the application + double app_time_ms = + (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms); + if (app_time_ms < MIN_TIMER_GRANULARITY) { + // This usually happens due to the timer not having the required + // granularity. Some Linuxes are the usual culprits. + // We'll just set it to something (arbitrarily) small. + app_time_ms = 1.0; + } + size_t regions_allocated = + (_region_num_young - _prev_region_num_young) + + (_region_num_tenured - _prev_region_num_tenured); + double alloc_rate_ms = (double) regions_allocated / app_time_ms; + _alloc_rate_ms_seq->add(alloc_rate_ms); + _prev_region_num_young = _region_num_young; + _prev_region_num_tenured = _region_num_tenured; + + double interval_ms = + (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; + update_recent_gc_times(end_time_sec, elapsed_ms); + _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; + assert(recent_avg_pause_time_ratio() < 1.00, "All GC?"); + } + + if (G1PolicyVerbose > 1) { + gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses); + } + + PauseSummary* summary; + if (abandoned) { + summary = _abandoned_summary; + } else { + summary = _summary; + } + + double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms); + double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms); + double scan_only_time = avg_value(_par_last_scan_only_times_ms); + double scan_only_regions_scanned = + sum_of_values(_par_last_scan_only_regions_scanned); + double update_rs_time = avg_value(_par_last_update_rs_times_ms); + double update_rs_processed_buffers = + sum_of_values(_par_last_update_rs_processed_buffers); + double scan_rs_time = avg_value(_par_last_scan_rs_times_ms); + double obj_copy_time = avg_value(_par_last_obj_copy_times_ms); + double termination_time = avg_value(_par_last_termination_times_ms); + + double parallel_other_time = _cur_collection_par_time_ms - + (update_rs_time + ext_root_scan_time + mark_stack_scan_time + + scan_only_time + scan_rs_time + obj_copy_time + termination_time); + if (update_stats) { + MainBodySummary* body_summary = summary->main_body_summary(); + guarantee(body_summary != NULL, "should not be null!"); + + if (_satb_drain_time_set) + body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms); + else + body_summary->record_satb_drain_time_ms(0.0); + body_summary->record_ext_root_scan_time_ms(ext_root_scan_time); + body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time); + body_summary->record_scan_only_time_ms(scan_only_time); + body_summary->record_update_rs_time_ms(update_rs_time); + body_summary->record_scan_rs_time_ms(scan_rs_time); + body_summary->record_obj_copy_time_ms(obj_copy_time); + if (parallel) { + body_summary->record_parallel_time_ms(_cur_collection_par_time_ms); + body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms); + body_summary->record_termination_time_ms(termination_time); + body_summary->record_parallel_other_time_ms(parallel_other_time); + } + body_summary->record_mark_closure_time_ms(_mark_closure_time_ms); + } + + if (G1PolicyVerbose > 1) { + gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n" + " CH Strong: %10.6f ms (avg: %10.6f ms)\n" + " G1 Strong: %10.6f ms (avg: %10.6f ms)\n" + " Evac: %10.6f ms (avg: %10.6f ms)\n" + " ET-RS: %10.6f ms (avg: %10.6f ms)\n" + " |RS|: " SIZE_FORMAT, + elapsed_ms, recent_avg_time_for_pauses_ms(), + _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(), + _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(), + evac_ms, recent_avg_time_for_evac_ms(), + scan_rs_time, + recent_avg_time_for_pauses_ms() - + recent_avg_time_for_G1_strong_ms(), + rs_size); + + gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K" + " At end " SIZE_FORMAT "K\n" + " garbage : " SIZE_FORMAT "K" + " of " SIZE_FORMAT "K\n" + " survival : %6.2f%% (%6.2f%% avg)", + _cur_collection_pause_used_at_start_bytes/K, + _g1->used()/K, freed_bytes/K, + _collection_set_bytes_used_before/K, + survival_fraction*100.0, + recent_avg_survival_fraction()*100.0); + gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f", + recent_avg_pause_time_ratio() * 100.0); + } + + double other_time_ms = elapsed_ms; + + if (!abandoned) { + if (_satb_drain_time_set) + other_time_ms -= _cur_satb_drain_time_ms; + + if (parallel) + other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms; + else + other_time_ms -= + update_rs_time + + ext_root_scan_time + mark_stack_scan_time + scan_only_time + + scan_rs_time + obj_copy_time; + } + + if (PrintGCDetails) { + gclog_or_tty->print_cr("%s%s, %1.8lf secs]", + abandoned ? " (abandoned)" : "", + (last_pause_included_initial_mark) ? " (initial-mark)" : "", + elapsed_ms / 1000.0); + + if (!abandoned) { + if (_satb_drain_time_set) { + print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms); + } + if (_last_satb_drain_processed_buffers >= 0) { + print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers); + } + if (parallel) { + print_stats(1, "Parallel Time", _cur_collection_par_time_ms); + print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false); + print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); + if (G1RSBarrierUseQueue) + print_par_buffers(3, "Processed Buffers", + _par_last_update_rs_processed_buffers, true); + print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); + print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); + print_par_stats(2, "Scan-Only Scanning", _par_last_scan_only_times_ms); + print_par_buffers(3, "Scan-Only Regions", + _par_last_scan_only_regions_scanned, true); + print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); + print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); + print_par_stats(2, "Termination", _par_last_termination_times_ms); + print_stats(2, "Other", parallel_other_time); + print_stats(1, "Clear CT", _cur_clear_ct_time_ms); + } else { + print_stats(1, "Update RS", update_rs_time); + if (G1RSBarrierUseQueue) + print_stats(2, "Processed Buffers", + (int)update_rs_processed_buffers); + print_stats(1, "Ext Root Scanning", ext_root_scan_time); + print_stats(1, "Mark Stack Scanning", mark_stack_scan_time); + print_stats(1, "Scan-Only Scanning", scan_only_time); + print_stats(1, "Scan RS", scan_rs_time); + print_stats(1, "Object Copying", obj_copy_time); + } + } + print_stats(1, "Other", other_time_ms); + for (int i = 0; i < _aux_num; ++i) { + if (_cur_aux_times_set[i]) { + char buffer[96]; + sprintf(buffer, "Aux%d", i); + print_stats(1, buffer, _cur_aux_times_ms[i]); + } + } + } + if (PrintGCDetails) + gclog_or_tty->print(" ["); + if (PrintGC || PrintGCDetails) + _g1->print_size_transition(gclog_or_tty, + _cur_collection_pause_used_at_start_bytes, + _g1->used(), _g1->capacity()); + if (PrintGCDetails) + gclog_or_tty->print_cr("]"); + + _all_pause_times_ms->add(elapsed_ms); + if (update_stats) { + summary->record_total_time_ms(elapsed_ms); + summary->record_other_time_ms(other_time_ms); + } + for (int i = 0; i < _aux_num; ++i) + if (_cur_aux_times_set[i]) + _all_aux_times_ms[i].add(_cur_aux_times_ms[i]); + + // Reset marks-between-pauses counter. + _n_marks_since_last_pause = 0; + + // Update the efficiency-since-mark vars. + double proc_ms = elapsed_ms * (double) _parallel_gc_threads; + if (elapsed_ms < MIN_TIMER_GRANULARITY) { + // This usually happens due to the timer not having the required + // granularity. Some Linuxes are the usual culprits. + // We'll just set it to something (arbitrarily) small. + proc_ms = 1.0; + } + double cur_efficiency = (double) freed_bytes / proc_ms; + + bool new_in_marking_window = _in_marking_window; + bool new_in_marking_window_im = false; + if (_should_initiate_conc_mark) { + new_in_marking_window = true; + new_in_marking_window_im = true; + } + + if (in_young_gc_mode()) { + if (_last_full_young_gc) { + set_full_young_gcs(false); + _last_full_young_gc = false; + } + + if ( !_last_young_gc_full ) { + if ( _should_revert_to_full_young_gcs || + _known_garbage_ratio < 0.05 || + (adaptive_young_list_length() && + (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) { + set_full_young_gcs(true); + } + } + _should_revert_to_full_young_gcs = false; + + if (_last_young_gc_full && !_during_marking) + _young_gc_eff_seq->add(cur_efficiency); + } + + _short_lived_surv_rate_group->start_adding_regions(); + // do that for any other surv rate groupsx + + // + + if (update_stats) { + double pause_time_ms = elapsed_ms; + + size_t diff = 0; + if (_max_pending_cards >= _pending_cards) + diff = _max_pending_cards - _pending_cards; + _pending_card_diff_seq->add((double) diff); + + double cost_per_card_ms = 0.0; + if (_pending_cards > 0) { + cost_per_card_ms = update_rs_time / (double) _pending_cards; + _cost_per_card_ms_seq->add(cost_per_card_ms); + } + + double cost_per_scan_only_region_ms = 0.0; + if (scan_only_regions_scanned > 0.0) { + cost_per_scan_only_region_ms = + scan_only_time / scan_only_regions_scanned; + if (_in_marking_window_im) + _cost_per_scan_only_region_ms_during_cm_seq->add(cost_per_scan_only_region_ms); + else + _cost_per_scan_only_region_ms_seq->add(cost_per_scan_only_region_ms); + } + + size_t cards_scanned = _g1->cards_scanned(); + + double cost_per_entry_ms = 0.0; + if (cards_scanned > 10) { + cost_per_entry_ms = scan_rs_time / (double) cards_scanned; + if (_last_young_gc_full) + _cost_per_entry_ms_seq->add(cost_per_entry_ms); + else + _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms); + } + + if (_max_rs_lengths > 0) { + double cards_per_entry_ratio = + (double) cards_scanned / (double) _max_rs_lengths; + if (_last_young_gc_full) + _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); + else + _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); + } + + size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; + if (rs_length_diff >= 0) + _rs_length_diff_seq->add((double) rs_length_diff); + + size_t copied_bytes = surviving_bytes; + double cost_per_byte_ms = 0.0; + if (copied_bytes > 0) { + cost_per_byte_ms = obj_copy_time / (double) copied_bytes; + if (_in_marking_window) + _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); + else + _cost_per_byte_ms_seq->add(cost_per_byte_ms); + } + + double all_other_time_ms = pause_time_ms - + (update_rs_time + scan_only_time + scan_rs_time + obj_copy_time + + _mark_closure_time_ms + termination_time); + + double young_other_time_ms = 0.0; + if (_recorded_young_regions > 0) { + young_other_time_ms = + _recorded_young_cset_choice_time_ms + + _recorded_young_free_cset_time_ms; + _young_other_cost_per_region_ms_seq->add(young_other_time_ms / + (double) _recorded_young_regions); + } + double non_young_other_time_ms = 0.0; + if (_recorded_non_young_regions > 0) { + non_young_other_time_ms = + _recorded_non_young_cset_choice_time_ms + + _recorded_non_young_free_cset_time_ms; + + _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / + (double) _recorded_non_young_regions); + } + + double constant_other_time_ms = all_other_time_ms - + (young_other_time_ms + non_young_other_time_ms); + _constant_other_time_ms_seq->add(constant_other_time_ms); + + double survival_ratio = 0.0; + if (_bytes_in_collection_set_before_gc > 0) { + survival_ratio = (double) bytes_in_to_space_during_gc() / + (double) _bytes_in_collection_set_before_gc; + } + + _pending_cards_seq->add((double) _pending_cards); + _scanned_cards_seq->add((double) cards_scanned); + _rs_lengths_seq->add((double) _max_rs_lengths); + + double expensive_region_limit_ms = + (double) MaxGCPauseMillis - predict_constant_other_time_ms(); + if (expensive_region_limit_ms < 0.0) { + // this means that the other time was predicted to be longer than + // than the max pause time + expensive_region_limit_ms = (double) MaxGCPauseMillis; + } + _expensive_region_limit_ms = expensive_region_limit_ms; + + if (PREDICTIONS_VERBOSE) { + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d " + "REGIONS %d %d %d %d " + "PENDING_CARDS %d %d " + "CARDS_SCANNED %d %d " + "RS_LENGTHS %d %d " + "SCAN_ONLY_SCAN %1.6lf %1.6lf " + "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf " + "SURVIVAL_RATIO %1.6lf %1.6lf " + "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf " + "OTHER_YOUNG %1.6lf %1.6lf " + "OTHER_NON_YOUNG %1.6lf %1.6lf " + "VTIME_DIFF %1.6lf TERMINATION %1.6lf " + "ELAPSED %1.6lf %1.6lf ", + _cur_collection_start_sec, + (!_last_young_gc_full) ? 2 : + (last_pause_included_initial_mark) ? 1 : 0, + _recorded_region_num, + _recorded_young_regions, + _recorded_scan_only_regions, + _recorded_non_young_regions, + _predicted_pending_cards, _pending_cards, + _predicted_cards_scanned, cards_scanned, + _predicted_rs_lengths, _max_rs_lengths, + _predicted_scan_only_scan_time_ms, scan_only_time, + _predicted_rs_update_time_ms, update_rs_time, + _predicted_rs_scan_time_ms, scan_rs_time, + _predicted_survival_ratio, survival_ratio, + _predicted_object_copy_time_ms, obj_copy_time, + _predicted_constant_other_time_ms, constant_other_time_ms, + _predicted_young_other_time_ms, young_other_time_ms, + _predicted_non_young_other_time_ms, + non_young_other_time_ms, + _vtime_diff_ms, termination_time, + _predicted_pause_time_ms, elapsed_ms); + } + + if (G1PolicyVerbose > 0) { + gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms", + _predicted_pause_time_ms, + (_within_target) ? "within" : "outside", + elapsed_ms); + } + + } + + _in_marking_window = new_in_marking_window; + _in_marking_window_im = new_in_marking_window_im; + _free_regions_at_end_of_collection = _g1->free_regions(); + _scan_only_regions_at_end_of_collection = _g1->young_list_length(); + calculate_young_list_min_length(); + calculate_young_list_target_config(); + + // + + _target_pause_time_ms = -1.0; +} + +// + +double +G1CollectorPolicy:: +predict_young_collection_elapsed_time_ms(size_t adjustment) { + guarantee( adjustment == 0 || adjustment == 1, "invariant" ); + + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + size_t young_num = g1h->young_list_length(); + if (young_num == 0) + return 0.0; + + young_num += adjustment; + size_t pending_cards = predict_pending_cards(); + size_t rs_lengths = g1h->young_list_sampled_rs_lengths() + + predict_rs_length_diff(); + size_t card_num; + if (full_young_gcs()) + card_num = predict_young_card_num(rs_lengths); + else + card_num = predict_non_young_card_num(rs_lengths); + size_t young_byte_size = young_num * HeapRegion::GrainBytes; + double accum_yg_surv_rate = + _short_lived_surv_rate_group->accum_surv_rate(adjustment); + + size_t bytes_to_copy = + (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes); + + return + predict_rs_update_time_ms(pending_cards) + + predict_rs_scan_time_ms(card_num) + + predict_object_copy_time_ms(bytes_to_copy) + + predict_young_other_time_ms(young_num) + + predict_constant_other_time_ms(); +} + +double +G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { + size_t rs_length = predict_rs_length_diff(); + size_t card_num; + if (full_young_gcs()) + card_num = predict_young_card_num(rs_length); + else + card_num = predict_non_young_card_num(rs_length); + return predict_base_elapsed_time_ms(pending_cards, card_num); +} + +double +G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, + size_t scanned_cards) { + return + predict_rs_update_time_ms(pending_cards) + + predict_rs_scan_time_ms(scanned_cards) + + predict_constant_other_time_ms(); +} + +double +G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, + bool young) { + size_t rs_length = hr->rem_set()->occupied(); + size_t card_num; + if (full_young_gcs()) + card_num = predict_young_card_num(rs_length); + else + card_num = predict_non_young_card_num(rs_length); + size_t bytes_to_copy = predict_bytes_to_copy(hr); + + double region_elapsed_time_ms = + predict_rs_scan_time_ms(card_num) + + predict_object_copy_time_ms(bytes_to_copy); + + if (young) + region_elapsed_time_ms += predict_young_other_time_ms(1); + else + region_elapsed_time_ms += predict_non_young_other_time_ms(1); + + return region_elapsed_time_ms; +} + +size_t +G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { + size_t bytes_to_copy; + if (hr->is_marked()) + bytes_to_copy = hr->max_live_bytes(); + else { + guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1, + "invariant" ); + int age = hr->age_in_surv_rate_group(); + double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); + bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); + } + + return bytes_to_copy; +} + +void +G1CollectorPolicy::start_recording_regions() { + _recorded_rs_lengths = 0; + _recorded_scan_only_regions = 0; + _recorded_young_regions = 0; + _recorded_non_young_regions = 0; + +#if PREDICTIONS_VERBOSE + _predicted_rs_lengths = 0; + _predicted_cards_scanned = 0; + + _recorded_marked_bytes = 0; + _recorded_young_bytes = 0; + _predicted_bytes_to_copy = 0; +#endif // PREDICTIONS_VERBOSE +} + +void +G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) { + if (young) { + ++_recorded_young_regions; + } else { + ++_recorded_non_young_regions; + } +#if PREDICTIONS_VERBOSE + if (young) { + _recorded_young_bytes += hr->used(); + } else { + _recorded_marked_bytes += hr->max_live_bytes(); + } + _predicted_bytes_to_copy += predict_bytes_to_copy(hr); +#endif // PREDICTIONS_VERBOSE + + size_t rs_length = hr->rem_set()->occupied(); + _recorded_rs_lengths += rs_length; +} + +void +G1CollectorPolicy::record_scan_only_regions(size_t scan_only_length) { + _recorded_scan_only_regions = scan_only_length; +} + +void +G1CollectorPolicy::end_recording_regions() { +#if PREDICTIONS_VERBOSE + _predicted_pending_cards = predict_pending_cards(); + _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff(); + if (full_young_gcs()) + _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths); + else + _predicted_cards_scanned += + predict_non_young_card_num(_predicted_rs_lengths); + _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions; + + _predicted_scan_only_scan_time_ms = + predict_scan_only_time_ms(_recorded_scan_only_regions); + _predicted_rs_update_time_ms = + predict_rs_update_time_ms(_g1->pending_card_num()); + _predicted_rs_scan_time_ms = + predict_rs_scan_time_ms(_predicted_cards_scanned); + _predicted_object_copy_time_ms = + predict_object_copy_time_ms(_predicted_bytes_to_copy); + _predicted_constant_other_time_ms = + predict_constant_other_time_ms(); + _predicted_young_other_time_ms = + predict_young_other_time_ms(_recorded_young_regions); + _predicted_non_young_other_time_ms = + predict_non_young_other_time_ms(_recorded_non_young_regions); + + _predicted_pause_time_ms = + _predicted_scan_only_scan_time_ms + + _predicted_rs_update_time_ms + + _predicted_rs_scan_time_ms + + _predicted_object_copy_time_ms + + _predicted_constant_other_time_ms + + _predicted_young_other_time_ms + + _predicted_non_young_other_time_ms; +#endif // PREDICTIONS_VERBOSE +} + +void G1CollectorPolicy::check_if_region_is_too_expensive(double + predicted_time_ms) { + // I don't think we need to do this when in young GC mode since + // marking will be initiated next time we hit the soft limit anyway... + if (predicted_time_ms > _expensive_region_limit_ms) { + if (!in_young_gc_mode()) { + set_full_young_gcs(true); + _should_initiate_conc_mark = true; + } else + // no point in doing another partial one + _should_revert_to_full_young_gcs = true; + } +} + +// + + +void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, + double elapsed_ms) { + _recent_gc_times_ms->add(elapsed_ms); + _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); + _prev_collection_pause_end_ms = end_time_sec * 1000.0; +} + +double G1CollectorPolicy::recent_avg_time_for_pauses_ms() { + if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis; + else return _recent_pause_times_ms->avg(); +} + +double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() { + if (_recent_CH_strong_roots_times_ms->num() == 0) + return (double)MaxGCPauseMillis/3.0; + else return _recent_CH_strong_roots_times_ms->avg(); +} + +double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() { + if (_recent_G1_strong_roots_times_ms->num() == 0) + return (double)MaxGCPauseMillis/3.0; + else return _recent_G1_strong_roots_times_ms->avg(); +} + +double G1CollectorPolicy::recent_avg_time_for_evac_ms() { + if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0; + else return _recent_evac_times_ms->avg(); +} + +int G1CollectorPolicy::number_of_recent_gcs() { + assert(_recent_CH_strong_roots_times_ms->num() == + _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync"); + assert(_recent_G1_strong_roots_times_ms->num() == + _recent_evac_times_ms->num(), "Sequence out of sync"); + assert(_recent_evac_times_ms->num() == + _recent_pause_times_ms->num(), "Sequence out of sync"); + assert(_recent_pause_times_ms->num() == + _recent_CS_bytes_used_before->num(), "Sequence out of sync"); + assert(_recent_CS_bytes_used_before->num() == + _recent_CS_bytes_surviving->num(), "Sequence out of sync"); + return _recent_pause_times_ms->num(); +} + +double G1CollectorPolicy::recent_avg_survival_fraction() { + return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving, + _recent_CS_bytes_used_before); +} + +double G1CollectorPolicy::last_survival_fraction() { + return last_survival_fraction_work(_recent_CS_bytes_surviving, + _recent_CS_bytes_used_before); +} + +double +G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving, + TruncatedSeq* before) { + assert(surviving->num() == before->num(), "Sequence out of sync"); + if (before->sum() > 0.0) { + double recent_survival_rate = surviving->sum() / before->sum(); + // We exempt parallel collection from this check because Alloc Buffer + // fragmentation can produce negative collections. + // Further, we're now always doing parallel collection. But I'm still + // leaving this here as a placeholder for a more precise assertion later. + // (DLD, 10/05.) + assert((true || ParallelGCThreads > 0) || + _g1->evacuation_failed() || + recent_survival_rate <= 1.0, "Or bad frac"); + return recent_survival_rate; + } else { + return 1.0; // Be conservative. + } +} + +double +G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving, + TruncatedSeq* before) { + assert(surviving->num() == before->num(), "Sequence out of sync"); + if (surviving->num() > 0 && before->last() > 0.0) { + double last_survival_rate = surviving->last() / before->last(); + // We exempt parallel collection from this check because Alloc Buffer + // fragmentation can produce negative collections. + // Further, we're now always doing parallel collection. But I'm still + // leaving this here as a placeholder for a more precise assertion later. + // (DLD, 10/05.) + assert((true || ParallelGCThreads > 0) || + last_survival_rate <= 1.0, "Or bad frac"); + return last_survival_rate; + } else { + return 1.0; + } +} + +static const int survival_min_obs = 5; +static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 }; +static const double min_survival_rate = 0.1; + +double +G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg, + double latest) { + double res = avg; + if (number_of_recent_gcs() < survival_min_obs) { + res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]); + } + res = MAX2(res, latest); + res = MAX2(res, min_survival_rate); + // In the parallel case, LAB fragmentation can produce "negative + // collections"; so can evac failure. Cap at 1.0 + res = MIN2(res, 1.0); + return res; +} + +size_t G1CollectorPolicy::expansion_amount() { + if ((int)(recent_avg_pause_time_ratio() * 100.0) > G1GCPercent) { + // We will double the existing space, or take + // G1ExpandByPercentOfAvailable % of the available expansion + // space, whichever is smaller, bounded below by a minimum + // expansion (unless that's all that's left.) + const size_t min_expand_bytes = 1*M; + size_t reserved_bytes = _g1->g1_reserved_obj_bytes(); + size_t committed_bytes = _g1->capacity(); + size_t uncommitted_bytes = reserved_bytes - committed_bytes; + size_t expand_bytes; + size_t expand_bytes_via_pct = + uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; + expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); + expand_bytes = MAX2(expand_bytes, min_expand_bytes); + expand_bytes = MIN2(expand_bytes, uncommitted_bytes); + if (G1PolicyVerbose > 1) { + gclog_or_tty->print("Decided to expand: ratio = %5.2f, " + "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n" + " Answer = %d.\n", + recent_avg_pause_time_ratio(), + byte_size_in_proper_unit(committed_bytes), + proper_unit_for_byte_size(committed_bytes), + byte_size_in_proper_unit(uncommitted_bytes), + proper_unit_for_byte_size(uncommitted_bytes), + byte_size_in_proper_unit(expand_bytes_via_pct), + proper_unit_for_byte_size(expand_bytes_via_pct), + byte_size_in_proper_unit(expand_bytes), + proper_unit_for_byte_size(expand_bytes)); + } + return expand_bytes; + } else { + return 0; + } +} + +void G1CollectorPolicy::note_start_of_mark_thread() { + _mark_thread_startup_sec = os::elapsedTime(); +} + +class CountCSClosure: public HeapRegionClosure { + G1CollectorPolicy* _g1_policy; +public: + CountCSClosure(G1CollectorPolicy* g1_policy) : + _g1_policy(g1_policy) {} + bool doHeapRegion(HeapRegion* r) { + _g1_policy->_bytes_in_collection_set_before_gc += r->used(); + return false; + } +}; + +void G1CollectorPolicy::count_CS_bytes_used() { + CountCSClosure cs_closure(this); + _g1->collection_set_iterate(&cs_closure); +} + +static void print_indent(int level) { + for (int j = 0; j < level+1; ++j) + gclog_or_tty->print(" "); +} + +void G1CollectorPolicy::print_summary (int level, + const char* str, + NumberSeq* seq) const { + double sum = seq->sum(); + print_indent(level); + gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)", + str, sum / 1000.0, seq->avg()); +} + +void G1CollectorPolicy::print_summary_sd (int level, + const char* str, + NumberSeq* seq) const { + print_summary(level, str, seq); + print_indent(level + 5); + gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", + seq->num(), seq->sd(), seq->maximum()); +} + +void G1CollectorPolicy::check_other_times(int level, + NumberSeq* other_times_ms, + NumberSeq* calc_other_times_ms) const { + bool should_print = false; + + double max_sum = MAX2(fabs(other_times_ms->sum()), + fabs(calc_other_times_ms->sum())); + double min_sum = MIN2(fabs(other_times_ms->sum()), + fabs(calc_other_times_ms->sum())); + double sum_ratio = max_sum / min_sum; + if (sum_ratio > 1.1) { + should_print = true; + print_indent(level + 1); + gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###"); + } + + double max_avg = MAX2(fabs(other_times_ms->avg()), + fabs(calc_other_times_ms->avg())); + double min_avg = MIN2(fabs(other_times_ms->avg()), + fabs(calc_other_times_ms->avg())); + double avg_ratio = max_avg / min_avg; + if (avg_ratio > 1.1) { + should_print = true; + print_indent(level + 1); + gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###"); + } + + if (other_times_ms->sum() < -0.01) { + print_indent(level + 1); + gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###"); + } + + if (other_times_ms->avg() < -0.01) { + print_indent(level + 1); + gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###"); + } + + if (calc_other_times_ms->sum() < -0.01) { + should_print = true; + print_indent(level + 1); + gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###"); + } + + if (calc_other_times_ms->avg() < -0.01) { + should_print = true; + print_indent(level + 1); + gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###"); + } + + if (should_print) + print_summary(level, "Other(Calc)", calc_other_times_ms); +} + +void G1CollectorPolicy::print_summary(PauseSummary* summary) const { + bool parallel = ParallelGCThreads > 0; + MainBodySummary* body_summary = summary->main_body_summary(); + if (summary->get_total_seq()->num() > 0) { + print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq()); + if (body_summary != NULL) { + print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq()); + if (parallel) { + print_summary(1, "Parallel Time", body_summary->get_parallel_seq()); + print_summary(2, "Update RS", body_summary->get_update_rs_seq()); + print_summary(2, "Ext Root Scanning", + body_summary->get_ext_root_scan_seq()); + print_summary(2, "Mark Stack Scanning", + body_summary->get_mark_stack_scan_seq()); + print_summary(2, "Scan-Only Scanning", + body_summary->get_scan_only_seq()); + print_summary(2, "Scan RS", body_summary->get_scan_rs_seq()); + print_summary(2, "Object Copy", body_summary->get_obj_copy_seq()); + print_summary(2, "Termination", body_summary->get_termination_seq()); + print_summary(2, "Other", body_summary->get_parallel_other_seq()); + { + NumberSeq* other_parts[] = { + body_summary->get_update_rs_seq(), + body_summary->get_ext_root_scan_seq(), + body_summary->get_mark_stack_scan_seq(), + body_summary->get_scan_only_seq(), + body_summary->get_scan_rs_seq(), + body_summary->get_obj_copy_seq(), + body_summary->get_termination_seq() + }; + NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(), + 7, other_parts); + check_other_times(2, body_summary->get_parallel_other_seq(), + &calc_other_times_ms); + } + print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq()); + print_summary(1, "Clear CT", body_summary->get_clear_ct_seq()); + } else { + print_summary(1, "Update RS", body_summary->get_update_rs_seq()); + print_summary(1, "Ext Root Scanning", + body_summary->get_ext_root_scan_seq()); + print_summary(1, "Mark Stack Scanning", + body_summary->get_mark_stack_scan_seq()); + print_summary(1, "Scan-Only Scanning", + body_summary->get_scan_only_seq()); + print_summary(1, "Scan RS", body_summary->get_scan_rs_seq()); + print_summary(1, "Object Copy", body_summary->get_obj_copy_seq()); + } + } + print_summary(1, "Other", summary->get_other_seq()); + { + NumberSeq calc_other_times_ms; + if (body_summary != NULL) { + // not abandoned + if (parallel) { + // parallel + NumberSeq* other_parts[] = { + body_summary->get_satb_drain_seq(), + body_summary->get_parallel_seq(), + body_summary->get_clear_ct_seq() + }; + calc_other_times_ms = NumberSeq(summary->get_total_seq(), + 3, other_parts); + } else { + // serial + NumberSeq* other_parts[] = { + body_summary->get_satb_drain_seq(), + body_summary->get_update_rs_seq(), + body_summary->get_ext_root_scan_seq(), + body_summary->get_mark_stack_scan_seq(), + body_summary->get_scan_only_seq(), + body_summary->get_scan_rs_seq(), + body_summary->get_obj_copy_seq() + }; + calc_other_times_ms = NumberSeq(summary->get_total_seq(), + 7, other_parts); + } + } else { + // abandoned + calc_other_times_ms = NumberSeq(); + } + check_other_times(1, summary->get_other_seq(), &calc_other_times_ms); + } + } else { + print_indent(0); + gclog_or_tty->print_cr("none"); + } + gclog_or_tty->print_cr(""); +} + +void +G1CollectorPolicy::print_abandoned_summary(PauseSummary* summary) const { + bool printed = false; + if (summary->get_total_seq()->num() > 0) { + printed = true; + print_summary(summary); + } + if (!printed) { + print_indent(0); + gclog_or_tty->print_cr("none"); + gclog_or_tty->print_cr(""); + } +} + +void G1CollectorPolicy::print_tracing_info() const { + if (TraceGen0Time) { + gclog_or_tty->print_cr("ALL PAUSES"); + print_summary_sd(0, "Total", _all_pause_times_ms); + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num); + gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num); + gclog_or_tty->print_cr(""); + + gclog_or_tty->print_cr("EVACUATION PAUSES"); + print_summary(_summary); + + gclog_or_tty->print_cr("ABANDONED PAUSES"); + print_abandoned_summary(_abandoned_summary); + + gclog_or_tty->print_cr("MISC"); + print_summary_sd(0, "Stop World", _all_stop_world_times_ms); + print_summary_sd(0, "Yields", _all_yield_times_ms); + for (int i = 0; i < _aux_num; ++i) { + if (_all_aux_times_ms[i].num() > 0) { + char buffer[96]; + sprintf(buffer, "Aux%d", i); + print_summary_sd(0, buffer, &_all_aux_times_ms[i]); + } + } + + size_t all_region_num = _region_num_young + _region_num_tenured; + gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), " + "Tenured %8d (%6.2lf%%)", + all_region_num, + _region_num_young, + (double) _region_num_young / (double) all_region_num * 100.0, + _region_num_tenured, + (double) _region_num_tenured / (double) all_region_num * 100.0); + + if (!G1RSBarrierUseQueue) { + gclog_or_tty->print_cr("Of %d times conc refinement was enabled, %d (%7.2f%%) " + "did zero traversals.", + _conc_refine_enabled, _conc_refine_zero_traversals, + _conc_refine_enabled > 0 ? + 100.0 * (float)_conc_refine_zero_traversals/ + (float)_conc_refine_enabled : 0.0); + gclog_or_tty->print_cr(" Max # of traversals = %d.", + _conc_refine_max_traversals); + gclog_or_tty->print_cr(""); + } + } + if (TraceGen1Time) { + if (_all_full_gc_times_ms->num() > 0) { + gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", + _all_full_gc_times_ms->num(), + _all_full_gc_times_ms->sum() / 1000.0); + gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg()); + gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", + _all_full_gc_times_ms->sd(), + _all_full_gc_times_ms->maximum()); + } + } +} + +void G1CollectorPolicy::print_yg_surv_rate_info() const { +#ifndef PRODUCT + _short_lived_surv_rate_group->print_surv_rate_summary(); + // add this call for any other surv rate groups +#endif // PRODUCT +} + +void G1CollectorPolicy::update_conc_refine_data() { + unsigned traversals = _g1->concurrent_g1_refine()->disable(); + if (traversals == 0) _conc_refine_zero_traversals++; + _conc_refine_max_traversals = MAX2(_conc_refine_max_traversals, + (size_t)traversals); + + if (G1PolicyVerbose > 1) + gclog_or_tty->print_cr("Did a CR traversal series: %d traversals.", traversals); + double multiplier = 1.0; + if (traversals == 0) { + multiplier = 4.0; + } else if (traversals > (size_t)G1ConcRefineTargTraversals) { + multiplier = 1.0/1.5; + } else if (traversals < (size_t)G1ConcRefineTargTraversals) { + multiplier = 1.5; + } + if (G1PolicyVerbose > 1) { + gclog_or_tty->print_cr(" Multiplier = %7.2f.", multiplier); + gclog_or_tty->print(" Delta went from %d regions to ", + _conc_refine_current_delta); + } + _conc_refine_current_delta = + MIN2(_g1->n_regions(), + (size_t)(_conc_refine_current_delta * multiplier)); + _conc_refine_current_delta = + MAX2(_conc_refine_current_delta, (size_t)1); + if (G1PolicyVerbose > 1) { + gclog_or_tty->print_cr("%d regions.", _conc_refine_current_delta); + } + _conc_refine_enabled++; +} + +bool +G1CollectorPolicy::should_add_next_region_to_young_list() { + assert(in_young_gc_mode(), "should be in young GC mode"); + bool ret; + size_t young_list_length = _g1->young_list_length(); + size_t young_list_max_length = _young_list_target_length; + if (G1FixedEdenSize) { + young_list_max_length -= _max_survivor_regions; + } + if (young_list_length < young_list_max_length) { + ret = true; + ++_region_num_young; + } else { + ret = false; + ++_region_num_tenured; + } + + return ret; +} + +#ifndef PRODUCT +// for debugging, bit of a hack... +static char* +region_num_to_mbs(int length) { + static char buffer[64]; + double bytes = (double) (length * HeapRegion::GrainBytes); + double mbs = bytes / (double) (1024 * 1024); + sprintf(buffer, "%7.2lfMB", mbs); + return buffer; +} +#endif // PRODUCT + +void +G1CollectorPolicy::checkpoint_conc_overhead() { + double conc_overhead = 0.0; + if (G1AccountConcurrentOverhead) + conc_overhead = COTracker::totalPredConcOverhead(); + _mmu_tracker->update_conc_overhead(conc_overhead); +#if 0 + gclog_or_tty->print(" CO %1.4lf TARGET %1.4lf", + conc_overhead, _mmu_tracker->max_gc_time()); +#endif +} + + +size_t G1CollectorPolicy::max_regions(int purpose) { + switch (purpose) { + case GCAllocForSurvived: + return _max_survivor_regions; + case GCAllocForTenured: + return REGIONS_UNLIMITED; + default: + ShouldNotReachHere(); + return REGIONS_UNLIMITED; + }; +} + +// Calculates survivor space parameters. +void G1CollectorPolicy::calculate_survivors_policy() +{ + if (!G1UseSurvivorSpaces) { + return; + } + if (G1FixedSurvivorSpaceSize == 0) { + _max_survivor_regions = _young_list_target_length / SurvivorRatio; + } else { + _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; + } + + if (G1FixedTenuringThreshold) { + _tenuring_threshold = MaxTenuringThreshold; + } else { + _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( + HeapRegion::GrainWords * _max_survivor_regions); + } +} + +bool +G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t + word_size) { + assert(_g1->regions_accounted_for(), "Region leakage!"); + // Initiate a pause when we reach the steady-state "used" target. + size_t used_hard = (_g1->capacity() / 100) * G1SteadyStateUsed; + size_t used_soft = + MAX2((_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta), + used_hard/2); + size_t used = _g1->used(); + + double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; + + size_t young_list_length = _g1->young_list_length(); + size_t young_list_max_length = _young_list_target_length; + if (G1FixedEdenSize) { + young_list_max_length -= _max_survivor_regions; + } + bool reached_target_length = young_list_length >= young_list_max_length; + + if (in_young_gc_mode()) { + if (reached_target_length) { + assert( young_list_length > 0 && _g1->young_list_length() > 0, + "invariant" ); + _target_pause_time_ms = max_pause_time_ms; + return true; + } + } else { + guarantee( false, "should not reach here" ); + } + + return false; +} + +#ifndef PRODUCT +class HRSortIndexIsOKClosure: public HeapRegionClosure { + CollectionSetChooser* _chooser; +public: + HRSortIndexIsOKClosure(CollectionSetChooser* chooser) : + _chooser(chooser) {} + + bool doHeapRegion(HeapRegion* r) { + if (!r->continuesHumongous()) { + assert(_chooser->regionProperlyOrdered(r), "Ought to be."); + } + return false; + } +}; + +bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() { + HRSortIndexIsOKClosure cl(_collectionSetChooser); + _g1->heap_region_iterate(&cl); + return true; +} +#endif + +void +G1CollectorPolicy_BestRegionsFirst:: +record_collection_pause_start(double start_time_sec, size_t start_used) { + G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used); +} + +class NextNonCSElemFinder: public HeapRegionClosure { + HeapRegion* _res; +public: + NextNonCSElemFinder(): _res(NULL) {} + bool doHeapRegion(HeapRegion* r) { + if (!r->in_collection_set()) { + _res = r; + return true; + } else { + return false; + } + } + HeapRegion* res() { return _res; } +}; + +class KnownGarbageClosure: public HeapRegionClosure { + CollectionSetChooser* _hrSorted; + +public: + KnownGarbageClosure(CollectionSetChooser* hrSorted) : + _hrSorted(hrSorted) + {} + + bool doHeapRegion(HeapRegion* r) { + // We only include humongous regions in collection + // sets when concurrent mark shows that their contained object is + // unreachable. + + // Do we have any marking information for this region? + if (r->is_marked()) { + // We don't include humongous regions in collection + // sets because we collect them immediately at the end of a marking + // cycle. We also don't include young regions because we *must* + // include them in the next collection pause. + if (!r->isHumongous() && !r->is_young()) { + _hrSorted->addMarkedHeapRegion(r); + } + } + return false; + } +}; + +class ParKnownGarbageHRClosure: public HeapRegionClosure { + CollectionSetChooser* _hrSorted; + jint _marked_regions_added; + jint _chunk_size; + jint _cur_chunk_idx; + jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) + int _worker; + int _invokes; + + void get_new_chunk() { + _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size); + _cur_chunk_end = _cur_chunk_idx + _chunk_size; + } + void add_region(HeapRegion* r) { + if (_cur_chunk_idx == _cur_chunk_end) { + get_new_chunk(); + } + assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); + _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r); + _marked_regions_added++; + _cur_chunk_idx++; + } + +public: + ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, + jint chunk_size, + int worker) : + _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), + _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0), + _invokes(0) + {} + + bool doHeapRegion(HeapRegion* r) { + // We only include humongous regions in collection + // sets when concurrent mark shows that their contained object is + // unreachable. + _invokes++; + + // Do we have any marking information for this region? + if (r->is_marked()) { + // We don't include humongous regions in collection + // sets because we collect them immediately at the end of a marking + // cycle. + // We also do not include young regions in collection sets + if (!r->isHumongous() && !r->is_young()) { + add_region(r); + } + } + return false; + } + jint marked_regions_added() { return _marked_regions_added; } + int invokes() { return _invokes; } +}; + +class ParKnownGarbageTask: public AbstractGangTask { + CollectionSetChooser* _hrSorted; + jint _chunk_size; + G1CollectedHeap* _g1; +public: + ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) : + AbstractGangTask("ParKnownGarbageTask"), + _hrSorted(hrSorted), _chunk_size(chunk_size), + _g1(G1CollectedHeap::heap()) + {} + + void work(int i) { + ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i); + // Back to zero for the claim value. + _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i, + HeapRegion::InitialClaimValue); + jint regions_added = parKnownGarbageCl.marked_regions_added(); + _hrSorted->incNumMarkedHeapRegions(regions_added); + if (G1PrintParCleanupStats) { + gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n", + i, parKnownGarbageCl.invokes(), regions_added); + } + } +}; + +void +G1CollectorPolicy_BestRegionsFirst:: +record_concurrent_mark_cleanup_end(size_t freed_bytes, + size_t max_live_bytes) { + double start; + if (G1PrintParCleanupStats) start = os::elapsedTime(); + record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes); + + _collectionSetChooser->clearMarkedHeapRegions(); + double clear_marked_end; + if (G1PrintParCleanupStats) { + clear_marked_end = os::elapsedTime(); + gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.", + (clear_marked_end - start)*1000.0); + } + if (ParallelGCThreads > 0) { + const size_t OverpartitionFactor = 4; + const size_t MinChunkSize = 8; + const size_t ChunkSize = + MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), + MinChunkSize); + _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), + ChunkSize); + ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, + (int) ChunkSize); + _g1->workers()->run_task(&parKnownGarbageTask); + + assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), + "sanity check"); + } else { + KnownGarbageClosure knownGarbagecl(_collectionSetChooser); + _g1->heap_region_iterate(&knownGarbagecl); + } + double known_garbage_end; + if (G1PrintParCleanupStats) { + known_garbage_end = os::elapsedTime(); + gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", + (known_garbage_end - clear_marked_end)*1000.0); + } + _collectionSetChooser->sortMarkedHeapRegions(); + double sort_end; + if (G1PrintParCleanupStats) { + sort_end = os::elapsedTime(); + gclog_or_tty->print_cr(" sorting: %8.3f ms.", + (sort_end - known_garbage_end)*1000.0); + } + + record_concurrent_mark_cleanup_end_work2(); + double work2_end; + if (G1PrintParCleanupStats) { + work2_end = os::elapsedTime(); + gclog_or_tty->print_cr(" work2: %8.3f ms.", + (work2_end - sort_end)*1000.0); + } +} + +// Add the heap region to the collection set and return the conservative +// estimate of the number of live bytes. +void G1CollectorPolicy:: +add_to_collection_set(HeapRegion* hr) { + if (G1PrintRegions) { + gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], " + "top "PTR_FORMAT", young %s", + hr->hrs_index(), hr->bottom(), hr->end(), + hr->top(), (hr->is_young()) ? "YES" : "NO"); + } + + if (_g1->mark_in_progress()) + _g1->concurrent_mark()->registerCSetRegion(hr); + + assert(!hr->in_collection_set(), + "should not already be in the CSet"); + hr->set_in_collection_set(true); + hr->set_next_in_collection_set(_collection_set); + _collection_set = hr; + _collection_set_size++; + _collection_set_bytes_used_before += hr->used(); + _g1->register_region_with_in_cset_fast_test(hr); +} + +void +G1CollectorPolicy_BestRegionsFirst:: +choose_collection_set() { + double non_young_start_time_sec; + start_recording_regions(); + + guarantee(_target_pause_time_ms > -1.0, + "_target_pause_time_ms should have been set!"); + assert(_collection_set == NULL, "Precondition"); + + double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); + double predicted_pause_time_ms = base_time_ms; + + double target_time_ms = _target_pause_time_ms; + double time_remaining_ms = target_time_ms - base_time_ms; + + // the 10% and 50% values are arbitrary... + if (time_remaining_ms < 0.10*target_time_ms) { + time_remaining_ms = 0.50 * target_time_ms; + _within_target = false; + } else { + _within_target = true; + } + + // We figure out the number of bytes available for future to-space. + // For new regions without marking information, we must assume the + // worst-case of complete survival. If we have marking information for a + // region, we can bound the amount of live data. We can add a number of + // such regions, as long as the sum of the live data bounds does not + // exceed the available evacuation space. + size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes; + + size_t expansion_bytes = + _g1->expansion_regions() * HeapRegion::GrainBytes; + + _collection_set_bytes_used_before = 0; + _collection_set_size = 0; + + // Adjust for expansion and slop. + max_live_bytes = max_live_bytes + expansion_bytes; + + assert(_g1->regions_accounted_for(), "Region leakage!"); + + HeapRegion* hr; + if (in_young_gc_mode()) { + double young_start_time_sec = os::elapsedTime(); + + if (G1PolicyVerbose > 0) { + gclog_or_tty->print_cr("Adding %d young regions to the CSet", + _g1->young_list_length()); + } + _young_cset_length = 0; + _last_young_gc_full = full_young_gcs() ? true : false; + if (_last_young_gc_full) + ++_full_young_pause_num; + else + ++_partial_young_pause_num; + hr = _g1->pop_region_from_young_list(); + while (hr != NULL) { + + assert( hr->young_index_in_cset() == -1, "invariant" ); + assert( hr->age_in_surv_rate_group() != -1, "invariant" ); + hr->set_young_index_in_cset((int) _young_cset_length); + + ++_young_cset_length; + double predicted_time_ms = predict_region_elapsed_time_ms(hr, true); + time_remaining_ms -= predicted_time_ms; + predicted_pause_time_ms += predicted_time_ms; + assert(!hr->in_collection_set(), "invariant"); + add_to_collection_set(hr); + record_cset_region(hr, true); + max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes); + if (G1PolicyVerbose > 0) { + gclog_or_tty->print_cr(" Added [" PTR_FORMAT ", " PTR_FORMAT") to CS.", + hr->bottom(), hr->end()); + gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", + max_live_bytes/K); + } + hr = _g1->pop_region_from_young_list(); + } + + record_scan_only_regions(_g1->young_list_scan_only_length()); + + double young_end_time_sec = os::elapsedTime(); + _recorded_young_cset_choice_time_ms = + (young_end_time_sec - young_start_time_sec) * 1000.0; + + non_young_start_time_sec = os::elapsedTime(); + + if (_young_cset_length > 0 && _last_young_gc_full) { + // don't bother adding more regions... + goto choose_collection_set_end; + } + } + + if (!in_young_gc_mode() || !full_young_gcs()) { + bool should_continue = true; + NumberSeq seq; + double avg_prediction = 100000000000000000.0; // something very large + do { + hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms, + avg_prediction); + if (hr != NULL) { + double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); + time_remaining_ms -= predicted_time_ms; + predicted_pause_time_ms += predicted_time_ms; + add_to_collection_set(hr); + record_cset_region(hr, false); + max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes); + if (G1PolicyVerbose > 0) { + gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", + max_live_bytes/K); + } + seq.add(predicted_time_ms); + avg_prediction = seq.avg() + seq.sd(); + } + should_continue = + ( hr != NULL) && + ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0 + : _collection_set_size < _young_list_fixed_length ); + } while (should_continue); + + if (!adaptive_young_list_length() && + _collection_set_size < _young_list_fixed_length) + _should_revert_to_full_young_gcs = true; + } + +choose_collection_set_end: + count_CS_bytes_used(); + + end_recording_regions(); + + double non_young_end_time_sec = os::elapsedTime(); + _recorded_non_young_cset_choice_time_ms = + (non_young_end_time_sec - non_young_start_time_sec) * 1000.0; +} + +void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() { + G1CollectorPolicy::record_full_collection_end(); + _collectionSetChooser->updateAfterFullCollection(); +} + +void G1CollectorPolicy_BestRegionsFirst:: +expand_if_possible(size_t numRegions) { + size_t expansion_bytes = numRegions * HeapRegion::GrainBytes; + _g1->expand(expansion_bytes); +} + +void G1CollectorPolicy_BestRegionsFirst:: +record_collection_pause_end(bool abandoned) { + G1CollectorPolicy::record_collection_pause_end(abandoned); + assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); +} + +// Local Variables: *** +// c-indentation-style: gnu *** +// End: *** --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2009-08-01 04:21:06.259011049 +0100 @@ -0,0 +1,1183 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// A G1CollectorPolicy makes policy decisions that determine the +// characteristics of the collector. Examples include: +// * choice of collection set. +// * when to collect. + +class HeapRegion; +class CollectionSetChooser; + +// Yes, this is a bit unpleasant... but it saves replicating the same thing +// over and over again and introducing subtle problems through small typos and +// cutting and pasting mistakes. The macros below introduces a number +// sequnce into the following two classes and the methods that access it. + +#define define_num_seq(name) \ +private: \ + NumberSeq _all_##name##_times_ms; \ +public: \ + void record_##name##_time_ms(double ms) { \ + _all_##name##_times_ms.add(ms); \ + } \ + NumberSeq* get_##name##_seq() { \ + return &_all_##name##_times_ms; \ + } + +class MainBodySummary; + +class PauseSummary: public CHeapObj { + define_num_seq(total) + define_num_seq(other) + +public: + virtual MainBodySummary* main_body_summary() { return NULL; } +}; + +class MainBodySummary: public CHeapObj { + define_num_seq(satb_drain) // optional + define_num_seq(parallel) // parallel only + define_num_seq(ext_root_scan) + define_num_seq(mark_stack_scan) + define_num_seq(scan_only) + define_num_seq(update_rs) + define_num_seq(scan_rs) + define_num_seq(scan_new_refs) // Only for temp use; added to + // in parallel case. + define_num_seq(obj_copy) + define_num_seq(termination) // parallel only + define_num_seq(parallel_other) // parallel only + define_num_seq(mark_closure) + define_num_seq(clear_ct) // parallel only +}; + +class Summary: public PauseSummary, + public MainBodySummary { +public: + virtual MainBodySummary* main_body_summary() { return this; } +}; + +class AbandonedSummary: public PauseSummary { +}; + +class G1CollectorPolicy: public CollectorPolicy { +protected: + // The number of pauses during the execution. + long _n_pauses; + + // either equal to the number of parallel threads, if ParallelGCThreads + // has been set, or 1 otherwise + int _parallel_gc_threads; + + enum SomePrivateConstants { + NumPrevPausesForHeuristics = 10, + NumPrevGCsForHeuristics = 10, + NumAPIs = HeapRegion::MaxAge + }; + + G1MMUTracker* _mmu_tracker; + + void initialize_flags(); + + void initialize_all() { + initialize_flags(); + initialize_size_info(); + initialize_perm_generation(PermGen::MarkSweepCompact); + } + + virtual size_t default_init_heap_size() { + // Pick some reasonable default. + return 8*M; + } + + + double _cur_collection_start_sec; + size_t _cur_collection_pause_used_at_start_bytes; + size_t _cur_collection_pause_used_regions_at_start; + size_t _prev_collection_pause_used_at_end_bytes; + double _cur_collection_par_time_ms; + double _cur_satb_drain_time_ms; + double _cur_clear_ct_time_ms; + bool _satb_drain_time_set; + + double _cur_CH_strong_roots_end_sec; + double _cur_CH_strong_roots_dur_ms; + double _cur_G1_strong_roots_end_sec; + double _cur_G1_strong_roots_dur_ms; + + // Statistics for recent GC pauses. See below for how indexed. + TruncatedSeq* _recent_CH_strong_roots_times_ms; + TruncatedSeq* _recent_G1_strong_roots_times_ms; + TruncatedSeq* _recent_evac_times_ms; + // These exclude marking times. + TruncatedSeq* _recent_pause_times_ms; + TruncatedSeq* _recent_gc_times_ms; + + TruncatedSeq* _recent_CS_bytes_used_before; + TruncatedSeq* _recent_CS_bytes_surviving; + + TruncatedSeq* _recent_rs_sizes; + + TruncatedSeq* _concurrent_mark_init_times_ms; + TruncatedSeq* _concurrent_mark_remark_times_ms; + TruncatedSeq* _concurrent_mark_cleanup_times_ms; + + Summary* _summary; + AbandonedSummary* _abandoned_summary; + + NumberSeq* _all_pause_times_ms; + NumberSeq* _all_full_gc_times_ms; + double _stop_world_start; + NumberSeq* _all_stop_world_times_ms; + NumberSeq* _all_yield_times_ms; + + size_t _region_num_young; + size_t _region_num_tenured; + size_t _prev_region_num_young; + size_t _prev_region_num_tenured; + + NumberSeq* _all_mod_union_times_ms; + + int _aux_num; + NumberSeq* _all_aux_times_ms; + double* _cur_aux_start_times_ms; + double* _cur_aux_times_ms; + bool* _cur_aux_times_set; + + double* _par_last_ext_root_scan_times_ms; + double* _par_last_mark_stack_scan_times_ms; + double* _par_last_scan_only_times_ms; + double* _par_last_scan_only_regions_scanned; + double* _par_last_update_rs_start_times_ms; + double* _par_last_update_rs_times_ms; + double* _par_last_update_rs_processed_buffers; + double* _par_last_scan_rs_start_times_ms; + double* _par_last_scan_rs_times_ms; + double* _par_last_scan_new_refs_times_ms; + double* _par_last_obj_copy_times_ms; + double* _par_last_termination_times_ms; + + // indicates that we are in young GC mode + bool _in_young_gc_mode; + + // indicates whether we are in full young or partially young GC mode + bool _full_young_gcs; + + // if true, then it tries to dynamically adjust the length of the + // young list + bool _adaptive_young_list_length; + size_t _young_list_min_length; + size_t _young_list_target_length; + size_t _young_list_so_prefix_length; + size_t _young_list_fixed_length; + + size_t _young_cset_length; + bool _last_young_gc_full; + + double _target_pause_time_ms; + + unsigned _full_young_pause_num; + unsigned _partial_young_pause_num; + + bool _during_marking; + bool _in_marking_window; + bool _in_marking_window_im; + + SurvRateGroup* _short_lived_surv_rate_group; + SurvRateGroup* _survivor_surv_rate_group; + // add here any more surv rate groups + + bool during_marking() { + return _during_marking; + } + + // + +private: + enum PredictionConstants { + TruncatedSeqLength = 10 + }; + + TruncatedSeq* _alloc_rate_ms_seq; + double _prev_collection_pause_end_ms; + + TruncatedSeq* _pending_card_diff_seq; + TruncatedSeq* _rs_length_diff_seq; + TruncatedSeq* _cost_per_card_ms_seq; + TruncatedSeq* _cost_per_scan_only_region_ms_seq; + TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; + TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; + TruncatedSeq* _cost_per_entry_ms_seq; + TruncatedSeq* _partially_young_cost_per_entry_ms_seq; + TruncatedSeq* _cost_per_byte_ms_seq; + TruncatedSeq* _constant_other_time_ms_seq; + TruncatedSeq* _young_other_cost_per_region_ms_seq; + TruncatedSeq* _non_young_other_cost_per_region_ms_seq; + + TruncatedSeq* _pending_cards_seq; + TruncatedSeq* _scanned_cards_seq; + TruncatedSeq* _rs_lengths_seq; + + TruncatedSeq* _cost_per_byte_ms_during_cm_seq; + TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq; + + TruncatedSeq* _young_gc_eff_seq; + + TruncatedSeq* _max_conc_overhead_seq; + + size_t _recorded_young_regions; + size_t _recorded_scan_only_regions; + size_t _recorded_non_young_regions; + size_t _recorded_region_num; + + size_t _free_regions_at_end_of_collection; + size_t _scan_only_regions_at_end_of_collection; + + size_t _recorded_rs_lengths; + size_t _max_rs_lengths; + + size_t _recorded_marked_bytes; + size_t _recorded_young_bytes; + + size_t _predicted_pending_cards; + size_t _predicted_cards_scanned; + size_t _predicted_rs_lengths; + size_t _predicted_bytes_to_copy; + + double _predicted_survival_ratio; + double _predicted_rs_update_time_ms; + double _predicted_rs_scan_time_ms; + double _predicted_scan_only_scan_time_ms; + double _predicted_object_copy_time_ms; + double _predicted_constant_other_time_ms; + double _predicted_young_other_time_ms; + double _predicted_non_young_other_time_ms; + double _predicted_pause_time_ms; + + double _vtime_diff_ms; + + double _recorded_young_free_cset_time_ms; + double _recorded_non_young_free_cset_time_ms; + + double _sigma; + double _expensive_region_limit_ms; + + size_t _rs_lengths_prediction; + + size_t _known_garbage_bytes; + double _known_garbage_ratio; + + double sigma() { + return _sigma; + } + + // A function that prevents us putting too much stock in small sample + // sets. Returns a number between 2.0 and 1.0, depending on the number + // of samples. 5 or more samples yields one; fewer scales linearly from + // 2.0 at 1 sample to 1.0 at 5. + double confidence_factor(int samples) { + if (samples > 4) return 1.0; + else return 1.0 + sigma() * ((double)(5 - samples))/2.0; + } + + double get_new_neg_prediction(TruncatedSeq* seq) { + return seq->davg() - sigma() * seq->dsd(); + } + +#ifndef PRODUCT + bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); +#endif // PRODUCT + +protected: + double _pause_time_target_ms; + double _recorded_young_cset_choice_time_ms; + double _recorded_non_young_cset_choice_time_ms; + bool _within_target; + size_t _pending_cards; + size_t _max_pending_cards; + +public: + + void set_region_short_lived(HeapRegion* hr) { + hr->install_surv_rate_group(_short_lived_surv_rate_group); + } + + void set_region_survivors(HeapRegion* hr) { + hr->install_surv_rate_group(_survivor_surv_rate_group); + } + +#ifndef PRODUCT + bool verify_young_ages(); +#endif // PRODUCT + + void tag_scan_only(size_t short_lived_scan_only_length); + + double get_new_prediction(TruncatedSeq* seq) { + return MAX2(seq->davg() + sigma() * seq->dsd(), + seq->davg() * confidence_factor(seq->num())); + } + + size_t young_cset_length() { + return _young_cset_length; + } + + void record_max_rs_lengths(size_t rs_lengths) { + _max_rs_lengths = rs_lengths; + } + + size_t predict_pending_card_diff() { + double prediction = get_new_neg_prediction(_pending_card_diff_seq); + if (prediction < 0.00001) + return 0; + else + return (size_t) prediction; + } + + size_t predict_pending_cards() { + size_t max_pending_card_num = _g1->max_pending_card_num(); + size_t diff = predict_pending_card_diff(); + size_t prediction; + if (diff > max_pending_card_num) + prediction = max_pending_card_num; + else + prediction = max_pending_card_num - diff; + + return prediction; + } + + size_t predict_rs_length_diff() { + return (size_t) get_new_prediction(_rs_length_diff_seq); + } + + double predict_alloc_rate_ms() { + return get_new_prediction(_alloc_rate_ms_seq); + } + + double predict_cost_per_card_ms() { + return get_new_prediction(_cost_per_card_ms_seq); + } + + double predict_rs_update_time_ms(size_t pending_cards) { + return (double) pending_cards * predict_cost_per_card_ms(); + } + + double predict_fully_young_cards_per_entry_ratio() { + return get_new_prediction(_fully_young_cards_per_entry_ratio_seq); + } + + double predict_partially_young_cards_per_entry_ratio() { + if (_partially_young_cards_per_entry_ratio_seq->num() < 2) + return predict_fully_young_cards_per_entry_ratio(); + else + return get_new_prediction(_partially_young_cards_per_entry_ratio_seq); + } + + size_t predict_young_card_num(size_t rs_length) { + return (size_t) ((double) rs_length * + predict_fully_young_cards_per_entry_ratio()); + } + + size_t predict_non_young_card_num(size_t rs_length) { + return (size_t) ((double) rs_length * + predict_partially_young_cards_per_entry_ratio()); + } + + double predict_rs_scan_time_ms(size_t card_num) { + if (full_young_gcs()) + return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); + else + return predict_partially_young_rs_scan_time_ms(card_num); + } + + double predict_partially_young_rs_scan_time_ms(size_t card_num) { + if (_partially_young_cost_per_entry_ms_seq->num() < 3) + return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); + else + return (double) card_num * + get_new_prediction(_partially_young_cost_per_entry_ms_seq); + } + + double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) { + if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3) + return 1.5 * (double) scan_only_region_num * + get_new_prediction(_cost_per_scan_only_region_ms_seq); + else + return (double) scan_only_region_num * + get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq); + } + + double predict_scan_only_time_ms(size_t scan_only_region_num) { + if (_in_marking_window_im) + return predict_scan_only_time_ms_during_cm(scan_only_region_num); + else + return (double) scan_only_region_num * + get_new_prediction(_cost_per_scan_only_region_ms_seq); + } + + double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { + if (_cost_per_byte_ms_during_cm_seq->num() < 3) + return 1.1 * (double) bytes_to_copy * + get_new_prediction(_cost_per_byte_ms_seq); + else + return (double) bytes_to_copy * + get_new_prediction(_cost_per_byte_ms_during_cm_seq); + } + + double predict_object_copy_time_ms(size_t bytes_to_copy) { + if (_in_marking_window && !_in_marking_window_im) + return predict_object_copy_time_ms_during_cm(bytes_to_copy); + else + return (double) bytes_to_copy * + get_new_prediction(_cost_per_byte_ms_seq); + } + + double predict_constant_other_time_ms() { + return get_new_prediction(_constant_other_time_ms_seq); + } + + double predict_young_other_time_ms(size_t young_num) { + return + (double) young_num * + get_new_prediction(_young_other_cost_per_region_ms_seq); + } + + double predict_non_young_other_time_ms(size_t non_young_num) { + return + (double) non_young_num * + get_new_prediction(_non_young_other_cost_per_region_ms_seq); + } + + void check_if_region_is_too_expensive(double predicted_time_ms); + + double predict_young_collection_elapsed_time_ms(size_t adjustment); + double predict_base_elapsed_time_ms(size_t pending_cards); + double predict_base_elapsed_time_ms(size_t pending_cards, + size_t scanned_cards); + size_t predict_bytes_to_copy(HeapRegion* hr); + double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); + + // for use by: calculate_optimal_so_length(length) + void predict_gc_eff(size_t young_region_num, + size_t so_length, + double base_time_ms, + double *gc_eff, + double *pause_time_ms); + + // for use by: calculate_young_list_target_config(rs_length) + bool predict_gc_eff(size_t young_region_num, + size_t so_length, + double base_time_with_so_ms, + size_t init_free_regions, + double target_pause_time_ms, + double* gc_eff); + + void start_recording_regions(); + void record_cset_region(HeapRegion* hr, bool young); + void record_scan_only_regions(size_t scan_only_length); + void end_recording_regions(); + + void record_vtime_diff_ms(double vtime_diff_ms) { + _vtime_diff_ms = vtime_diff_ms; + } + + void record_young_free_cset_time_ms(double time_ms) { + _recorded_young_free_cset_time_ms = time_ms; + } + + void record_non_young_free_cset_time_ms(double time_ms) { + _recorded_non_young_free_cset_time_ms = time_ms; + } + + double predict_young_gc_eff() { + return get_new_neg_prediction(_young_gc_eff_seq); + } + + double predict_survivor_regions_evac_time(); + + // + +public: + void cset_regions_freed() { + bool propagate = _last_young_gc_full && !_in_marking_window; + _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); + _survivor_surv_rate_group->all_surviving_words_recorded(propagate); + // also call it on any more surv rate groups + } + + void set_known_garbage_bytes(size_t known_garbage_bytes) { + _known_garbage_bytes = known_garbage_bytes; + size_t heap_bytes = _g1->capacity(); + _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; + } + + void decrease_known_garbage_bytes(size_t known_garbage_bytes) { + guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); + + _known_garbage_bytes -= known_garbage_bytes; + size_t heap_bytes = _g1->capacity(); + _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; + } + + G1MMUTracker* mmu_tracker() { + return _mmu_tracker; + } + + double predict_init_time_ms() { + return get_new_prediction(_concurrent_mark_init_times_ms); + } + + double predict_remark_time_ms() { + return get_new_prediction(_concurrent_mark_remark_times_ms); + } + + double predict_cleanup_time_ms() { + return get_new_prediction(_concurrent_mark_cleanup_times_ms); + } + + // Returns an estimate of the survival rate of the region at yg-age + // "yg_age". + double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) { + TruncatedSeq* seq = surv_rate_group->get_seq(age); + if (seq->num() == 0) + gclog_or_tty->print("BARF! age is %d", age); + guarantee( seq->num() > 0, "invariant" ); + double pred = get_new_prediction(seq); + if (pred > 1.0) + pred = 1.0; + return pred; + } + + double predict_yg_surv_rate(int age) { + return predict_yg_surv_rate(age, _short_lived_surv_rate_group); + } + + double accum_yg_surv_rate_pred(int age) { + return _short_lived_surv_rate_group->accum_surv_rate_pred(age); + } + +protected: + void print_stats (int level, const char* str, double value); + void print_stats (int level, const char* str, int value); + void print_par_stats (int level, const char* str, double* data) { + print_par_stats(level, str, data, true); + } + void print_par_stats (int level, const char* str, double* data, bool summary); + void print_par_buffers (int level, const char* str, double* data, bool summary); + + void check_other_times(int level, + NumberSeq* other_times_ms, + NumberSeq* calc_other_times_ms) const; + + void print_summary (PauseSummary* stats) const; + void print_abandoned_summary(PauseSummary* summary) const; + + void print_summary (int level, const char* str, NumberSeq* seq) const; + void print_summary_sd (int level, const char* str, NumberSeq* seq) const; + + double avg_value (double* data); + double max_value (double* data); + double sum_of_values (double* data); + double max_sum (double* data1, double* data2); + + int _last_satb_drain_processed_buffers; + int _last_update_rs_processed_buffers; + double _last_pause_time_ms; + + size_t _bytes_in_to_space_before_gc; + size_t _bytes_in_to_space_after_gc; + size_t bytes_in_to_space_during_gc() { + return + _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc; + } + size_t _bytes_in_collection_set_before_gc; + // Used to count used bytes in CS. + friend class CountCSClosure; + + // Statistics kept per GC stoppage, pause or full. + TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; + + // We track markings. + int _num_markings; + double _mark_thread_startup_sec; // Time at startup of marking thread + + // Add a new GC of the given duration and end time to the record. + void update_recent_gc_times(double end_time_sec, double elapsed_ms); + + // The head of the list (via "next_in_collection_set()") representing the + // current collection set. + HeapRegion* _collection_set; + size_t _collection_set_size; + size_t _collection_set_bytes_used_before; + + // Info about marking. + int _n_marks; // Sticky at 2, so we know when we've done at least 2. + + // The number of collection pauses at the end of the last mark. + size_t _n_pauses_at_mark_end; + + // ==== This section is for stats related to starting Conc Refinement on time. + size_t _conc_refine_enabled; + size_t _conc_refine_zero_traversals; + size_t _conc_refine_max_traversals; + // In # of heap regions. + size_t _conc_refine_current_delta; + + // At the beginning of a collection pause, update the variables above, + // especially the "delta". + void update_conc_refine_data(); + // ==== + + // Stash a pointer to the g1 heap. + G1CollectedHeap* _g1; + + // The average time in ms per collection pause, averaged over recent pauses. + double recent_avg_time_for_pauses_ms(); + + // The average time in ms for processing CollectedHeap strong roots, per + // collection pause, averaged over recent pauses. + double recent_avg_time_for_CH_strong_ms(); + + // The average time in ms for processing the G1 remembered set, per + // pause, averaged over recent pauses. + double recent_avg_time_for_G1_strong_ms(); + + // The average time in ms for "evacuating followers", per pause, averaged + // over recent pauses. + double recent_avg_time_for_evac_ms(); + + // The number of "recent" GCs recorded in the number sequences + int number_of_recent_gcs(); + + // The average survival ratio, computed by the total number of bytes + // suriviving / total number of bytes before collection over the last + // several recent pauses. + double recent_avg_survival_fraction(); + // The survival fraction of the most recent pause; if there have been no + // pauses, returns 1.0. + double last_survival_fraction(); + + // Returns a "conservative" estimate of the recent survival rate, i.e., + // one that may be higher than "recent_avg_survival_fraction". + // This is conservative in several ways: + // If there have been few pauses, it will assume a potential high + // variance, and err on the side of caution. + // It puts a lower bound (currently 0.1) on the value it will return. + // To try to detect phase changes, if the most recent pause ("latest") has a + // higher-than average ("avg") survival rate, it returns that rate. + // "work" version is a utility function; young is restricted to young regions. + double conservative_avg_survival_fraction_work(double avg, + double latest); + + // The arguments are the two sequences that keep track of the number of bytes + // surviving and the total number of bytes before collection, resp., + // over the last evereal recent pauses + // Returns the survival rate for the category in the most recent pause. + // If there have been no pauses, returns 1.0. + double last_survival_fraction_work(TruncatedSeq* surviving, + TruncatedSeq* before); + + // The arguments are the two sequences that keep track of the number of bytes + // surviving and the total number of bytes before collection, resp., + // over the last several recent pauses + // Returns the average survival ration over the last several recent pauses + // If there have been no pauses, return 1.0 + double recent_avg_survival_fraction_work(TruncatedSeq* surviving, + TruncatedSeq* before); + + double conservative_avg_survival_fraction() { + double avg = recent_avg_survival_fraction(); + double latest = last_survival_fraction(); + return conservative_avg_survival_fraction_work(avg, latest); + } + + // The ratio of gc time to elapsed time, computed over recent pauses. + double _recent_avg_pause_time_ratio; + + double recent_avg_pause_time_ratio() { + return _recent_avg_pause_time_ratio; + } + + // Number of pauses between concurrent marking. + size_t _pauses_btwn_concurrent_mark; + + size_t _n_marks_since_last_pause; + + // True iff CM has been initiated. + bool _conc_mark_initiated; + + // True iff CM should be initiated + bool _should_initiate_conc_mark; + bool _should_revert_to_full_young_gcs; + bool _last_full_young_gc; + + // This set of variables tracks the collector efficiency, in order to + // determine whether we should initiate a new marking. + double _cur_mark_stop_world_time_ms; + double _mark_init_start_sec; + double _mark_remark_start_sec; + double _mark_cleanup_start_sec; + double _mark_closure_time_ms; + + void calculate_young_list_min_length(); + void calculate_young_list_target_config(); + void calculate_young_list_target_config(size_t rs_lengths); + size_t calculate_optimal_so_length(size_t young_list_length); + +public: + + G1CollectorPolicy(); + + virtual G1CollectorPolicy* as_g1_policy() { return this; } + + virtual CollectorPolicy::Name kind() { + return CollectorPolicy::G1CollectorPolicyKind; + } + + void check_prediction_validity(); + + size_t bytes_in_collection_set() { + return _bytes_in_collection_set_before_gc; + } + + size_t bytes_in_to_space() { + return bytes_in_to_space_during_gc(); + } + + unsigned calc_gc_alloc_time_stamp() { + return _all_pause_times_ms->num() + 1; + } + +protected: + + // Count the number of bytes used in the CS. + void count_CS_bytes_used(); + + // Together these do the base cleanup-recording work. Subclasses might + // want to put something between them. + void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes, + size_t max_live_bytes); + void record_concurrent_mark_cleanup_end_work2(); + +public: + + virtual void init(); + + // Create jstat counters for the policy. + virtual void initialize_gc_policy_counters(); + + virtual HeapWord* mem_allocate_work(size_t size, + bool is_tlab, + bool* gc_overhead_limit_was_exceeded); + + // This method controls how a collector handles one or more + // of its generations being fully allocated. + virtual HeapWord* satisfy_failed_allocation(size_t size, + bool is_tlab); + + BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } + + GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } + + // The number of collection pauses so far. + long n_pauses() const { return _n_pauses; } + + // Update the heuristic info to record a collection pause of the given + // start time, where the given number of bytes were used at the start. + // This may involve changing the desired size of a collection set. + + virtual void record_stop_world_start(); + + virtual void record_collection_pause_start(double start_time_sec, + size_t start_used); + + // Must currently be called while the world is stopped. + virtual void record_concurrent_mark_init_start(); + virtual void record_concurrent_mark_init_end(); + void record_concurrent_mark_init_end_pre(double + mark_init_elapsed_time_ms); + + void record_mark_closure_time(double mark_closure_time_ms); + + virtual void record_concurrent_mark_remark_start(); + virtual void record_concurrent_mark_remark_end(); + + virtual void record_concurrent_mark_cleanup_start(); + virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, + size_t max_live_bytes); + virtual void record_concurrent_mark_cleanup_completed(); + + virtual void record_concurrent_pause(); + virtual void record_concurrent_pause_end(); + + virtual void record_collection_pause_end_CH_strong_roots(); + virtual void record_collection_pause_end_G1_strong_roots(); + + virtual void record_collection_pause_end(bool abandoned); + + // Record the fact that a full collection occurred. + virtual void record_full_collection_start(); + virtual void record_full_collection_end(); + + void record_ext_root_scan_time(int worker_i, double ms) { + _par_last_ext_root_scan_times_ms[worker_i] = ms; + } + + void record_mark_stack_scan_time(int worker_i, double ms) { + _par_last_mark_stack_scan_times_ms[worker_i] = ms; + } + + void record_scan_only_time(int worker_i, double ms, int n) { + _par_last_scan_only_times_ms[worker_i] = ms; + _par_last_scan_only_regions_scanned[worker_i] = (double) n; + } + + void record_satb_drain_time(double ms) { + _cur_satb_drain_time_ms = ms; + _satb_drain_time_set = true; + } + + void record_satb_drain_processed_buffers (int processed_buffers) { + _last_satb_drain_processed_buffers = processed_buffers; + } + + void record_mod_union_time(double ms) { + _all_mod_union_times_ms->add(ms); + } + + void record_update_rs_start_time(int thread, double ms) { + _par_last_update_rs_start_times_ms[thread] = ms; + } + + void record_update_rs_time(int thread, double ms) { + _par_last_update_rs_times_ms[thread] = ms; + } + + void record_update_rs_processed_buffers (int thread, + double processed_buffers) { + _par_last_update_rs_processed_buffers[thread] = processed_buffers; + } + + void record_scan_rs_start_time(int thread, double ms) { + _par_last_scan_rs_start_times_ms[thread] = ms; + } + + void record_scan_rs_time(int thread, double ms) { + _par_last_scan_rs_times_ms[thread] = ms; + } + + void record_scan_new_refs_time(int thread, double ms) { + _par_last_scan_new_refs_times_ms[thread] = ms; + } + + double get_scan_new_refs_time(int thread) { + return _par_last_scan_new_refs_times_ms[thread]; + } + + void reset_obj_copy_time(int thread) { + _par_last_obj_copy_times_ms[thread] = 0.0; + } + + void reset_obj_copy_time() { + reset_obj_copy_time(0); + } + + void record_obj_copy_time(int thread, double ms) { + _par_last_obj_copy_times_ms[thread] += ms; + } + + void record_obj_copy_time(double ms) { + record_obj_copy_time(0, ms); + } + + void record_termination_time(int thread, double ms) { + _par_last_termination_times_ms[thread] = ms; + } + + void record_termination_time(double ms) { + record_termination_time(0, ms); + } + + void record_pause_time_ms(double ms) { + _last_pause_time_ms = ms; + } + + void record_clear_ct_time(double ms) { + _cur_clear_ct_time_ms = ms; + } + + void record_par_time(double ms) { + _cur_collection_par_time_ms = ms; + } + + void record_aux_start_time(int i) { + guarantee(i < _aux_num, "should be within range"); + _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0; + } + + void record_aux_end_time(int i) { + guarantee(i < _aux_num, "should be within range"); + double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i]; + _cur_aux_times_set[i] = true; + _cur_aux_times_ms[i] += ms; + } + + // Record the fact that "bytes" bytes allocated in a region. + void record_before_bytes(size_t bytes); + void record_after_bytes(size_t bytes); + + // Returns "true" if this is a good time to do a collection pause. + // The "word_size" argument, if non-zero, indicates the size of an + // allocation request that is prompting this query. + virtual bool should_do_collection_pause(size_t word_size) = 0; + + // Choose a new collection set. Marks the chosen regions as being + // "in_collection_set", and links them together. The head and number of + // the collection set are available via access methods. + virtual void choose_collection_set() = 0; + + void clear_collection_set() { _collection_set = NULL; } + + // The head of the list (via "next_in_collection_set()") representing the + // current collection set. + HeapRegion* collection_set() { return _collection_set; } + + // The number of elements in the current collection set. + size_t collection_set_size() { return _collection_set_size; } + + // Add "hr" to the CS. + void add_to_collection_set(HeapRegion* hr); + + bool should_initiate_conc_mark() { return _should_initiate_conc_mark; } + void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; } + void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; } + + void checkpoint_conc_overhead(); + + // If an expansion would be appropriate, because recent GC overhead had + // exceeded the desired limit, return an amount to expand by. + virtual size_t expansion_amount(); + + // note start of mark thread + void note_start_of_mark_thread(); + + // The marked bytes of the "r" has changed; reclassify it's desirability + // for marking. Also asserts that "r" is eligible for a CS. + virtual void note_change_in_marked_bytes(HeapRegion* r) = 0; + +#ifndef PRODUCT + // Check any appropriate marked bytes info, asserting false if + // something's wrong, else returning "true". + virtual bool assertMarkedBytesDataOK() = 0; +#endif + + // Print tracing information. + void print_tracing_info() const; + + // Print stats on young survival ratio + void print_yg_surv_rate_info() const; + + void finished_recalculating_age_indexes(bool is_survivors) { + if (is_survivors) { + _survivor_surv_rate_group->finished_recalculating_age_indexes(); + } else { + _short_lived_surv_rate_group->finished_recalculating_age_indexes(); + } + // do that for any other surv rate groups + } + + bool should_add_next_region_to_young_list(); + + bool in_young_gc_mode() { + return _in_young_gc_mode; + } + void set_in_young_gc_mode(bool in_young_gc_mode) { + _in_young_gc_mode = in_young_gc_mode; + } + + bool full_young_gcs() { + return _full_young_gcs; + } + void set_full_young_gcs(bool full_young_gcs) { + _full_young_gcs = full_young_gcs; + } + + bool adaptive_young_list_length() { + return _adaptive_young_list_length; + } + void set_adaptive_young_list_length(bool adaptive_young_list_length) { + _adaptive_young_list_length = adaptive_young_list_length; + } + + inline double get_gc_eff_factor() { + double ratio = _known_garbage_ratio; + + double square = ratio * ratio; + // square = square * square; + double ret = square * 9.0 + 1.0; +#if 0 + gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); +#endif // 0 + guarantee(0.0 <= ret && ret < 10.0, "invariant!"); + return ret; + } + + // + // Survivor regions policy. + // +protected: + + // Current tenuring threshold, set to 0 if the collector reaches the + // maximum amount of suvivors regions. + int _tenuring_threshold; + + // The limit on the number of regions allocated for survivors. + size_t _max_survivor_regions; + + // The amount of survor regions after a collection. + size_t _recorded_survivor_regions; + // List of survivor regions. + HeapRegion* _recorded_survivor_head; + HeapRegion* _recorded_survivor_tail; + + ageTable _survivors_age_table; + +public: + + inline GCAllocPurpose + evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) { + if (age < _tenuring_threshold && src_region->is_young()) { + return GCAllocForSurvived; + } else { + return GCAllocForTenured; + } + } + + inline bool track_object_age(GCAllocPurpose purpose) { + return purpose == GCAllocForSurvived; + } + + inline GCAllocPurpose alternative_purpose(int purpose) { + return GCAllocForTenured; + } + + static const size_t REGIONS_UNLIMITED = ~(size_t)0; + + size_t max_regions(int purpose); + + // The limit on regions for a particular purpose is reached. + void note_alloc_region_limit_reached(int purpose) { + if (purpose == GCAllocForSurvived) { + _tenuring_threshold = 0; + } + } + + void note_start_adding_survivor_regions() { + _survivor_surv_rate_group->start_adding_regions(); + } + + void note_stop_adding_survivor_regions() { + _survivor_surv_rate_group->stop_adding_regions(); + } + + void record_survivor_regions(size_t regions, + HeapRegion* head, + HeapRegion* tail) { + _recorded_survivor_regions = regions; + _recorded_survivor_head = head; + _recorded_survivor_tail = tail; + } + + void record_thread_age_table(ageTable* age_table) + { + _survivors_age_table.merge_par(age_table); + } + + // Calculates survivor space parameters. + void calculate_survivors_policy(); + +}; + +// This encapsulates a particular strategy for a g1 Collector. +// +// Start a concurrent mark when our heap size is n bytes +// greater then our heap size was at the last concurrent +// mark. Where n is a function of the CMSTriggerRatio +// and the MinHeapFreeRatio. +// +// Start a g1 collection pause when we have allocated the +// average number of bytes currently being freed in +// a collection, but only if it is at least one region +// full +// +// Resize Heap based on desired +// allocation space, where desired allocation space is +// a function of survival rate and desired future to size. +// +// Choose collection set by first picking all older regions +// which have a survival rate which beats our projected young +// survival rate. Then fill out the number of needed regions +// with young regions. + +class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy { + CollectionSetChooser* _collectionSetChooser; + // If the estimated is less then desirable, resize if possible. + void expand_if_possible(size_t numRegions); + + virtual void choose_collection_set(); + virtual void record_collection_pause_start(double start_time_sec, + size_t start_used); + virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, + size_t max_live_bytes); + virtual void record_full_collection_end(); + +public: + G1CollectorPolicy_BestRegionsFirst() { + _collectionSetChooser = new CollectionSetChooser(); + } + void record_collection_pause_end(bool abandoned); + bool should_do_collection_pause(size_t word_size); + // This is not needed any more, after the CSet choosing code was + // changed to use the pause prediction work. But let's leave the + // hook in just in case. + void note_change_in_marked_bytes(HeapRegion* r) { } +#ifndef PRODUCT + bool assertMarkedBytesDataOK(); +#endif +}; + +// This should move to some place more general... + +// If we have "n" measurements, and we've kept track of their "sum" and the +// "sum_of_squares" of the measurements, this returns the variance of the +// sequence. +inline double variance(int n, double sum_of_squares, double sum) { + double n_d = (double)n; + double avg = sum/n_d; + return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d; +} + +// Local Variables: *** +// c-indentation-style: gnu *** +// End: *** --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp 2009-08-01 04:21:06.697677340 +0100 @@ -0,0 +1,187 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1MMUTracker.cpp.incl" + +#define _DISABLE_MMU 0 + +// can't rely on comparing doubles with tolerating a small margin for error +#define SMALL_MARGIN 0.0000001 +#define is_double_leq_0(_value) ( (_value) < SMALL_MARGIN ) +#define is_double_leq(_val1, _val2) is_double_leq_0((_val1) - (_val2)) +#define is_double_geq(_val1, _val2) is_double_leq_0((_val2) - (_val1)) + +/***** ALL TIMES ARE IN SECS!!!!!!! *****/ + +G1MMUTracker::G1MMUTracker(double time_slice, double max_gc_time) : + _time_slice(time_slice), + _max_gc_time(max_gc_time), + _conc_overhead_time_sec(0.0) { } + +void +G1MMUTracker::update_conc_overhead(double conc_overhead) { + double conc_overhead_time_sec = _time_slice * conc_overhead; + if (conc_overhead_time_sec > 0.9 * _max_gc_time) { + // We are screwed, as we only seem to have <10% of the soft + // real-time goal available for pauses. Let's admit defeat and + // allow something more generous as a pause target. + conc_overhead_time_sec = 0.75 * _max_gc_time; + } + + _conc_overhead_time_sec = conc_overhead_time_sec; +} + +G1MMUTrackerQueue::G1MMUTrackerQueue(double time_slice, double max_gc_time) : + G1MMUTracker(time_slice, max_gc_time), + _head_index(0), + _tail_index(trim_index(_head_index+1)), + _no_entries(0) { } + +void G1MMUTrackerQueue::remove_expired_entries(double current_time) { + double limit = current_time - _time_slice; + while (_no_entries > 0) { + if (is_double_geq(limit, _array[_tail_index].end_time())) { + _tail_index = trim_index(_tail_index + 1); + --_no_entries; + } else + return; + } + guarantee(_no_entries == 0, "should have no entries in the array"); +} + +double G1MMUTrackerQueue::calculate_gc_time(double current_time) { + double gc_time = 0.0; + double limit = current_time - _time_slice; + for (int i = 0; i < _no_entries; ++i) { + int index = trim_index(_tail_index + i); + G1MMUTrackerQueueElem *elem = &_array[index]; + if (elem->end_time() > limit) { + if (elem->start_time() > limit) + gc_time += elem->duration(); + else + gc_time += elem->end_time() - limit; + } + } + return gc_time; +} + +void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) { + double longest_allowed = longest_pause_internal(start); + if (longest_allowed < 0.0) + longest_allowed = 0.0; + double duration = end - start; + + remove_expired_entries(end); + if (_no_entries == QueueLength) { + // OK, right now when we fill up we bomb out + // there are a few ways of dealing with this "gracefully" + // increase the array size (:-) + // remove the oldest entry (this might allow more GC time for + // the time slice than what's allowed) + // concolidate the two entries with the minimum gap between them + // (this mighte allow less GC time than what's allowed) + guarantee(0, "array full, currently we can't recover"); + } + _head_index = trim_index(_head_index + 1); + ++_no_entries; + _array[_head_index] = G1MMUTrackerQueueElem(start, end); +} + +// basically the _internal call does not remove expired entries +// this is for trying things out in the future and a couple +// of other places (debugging) + +double G1MMUTrackerQueue::longest_pause(double current_time) { + if (_DISABLE_MMU) + return _max_gc_time; + + MutexLockerEx x(MMUTracker_lock, Mutex::_no_safepoint_check_flag); + remove_expired_entries(current_time); + + return longest_pause_internal(current_time); +} + +double G1MMUTrackerQueue::longest_pause_internal(double current_time) { + double target_time = _max_gc_time; + + while( 1 ) { + double gc_time = + calculate_gc_time(current_time + target_time) + _conc_overhead_time_sec; + double diff = target_time + gc_time - _max_gc_time; + if (!is_double_leq_0(diff)) { + target_time -= diff; + if (is_double_leq_0(target_time)) { + target_time = -1.0; + break; + } + } else { + break; + } + } + + return target_time; +} + +// basically the _internal call does not remove expired entries +// this is for trying things out in the future and a couple +// of other places (debugging) + +double G1MMUTrackerQueue::when_sec(double current_time, double pause_time) { + if (_DISABLE_MMU) + return 0.0; + + MutexLockerEx x(MMUTracker_lock, Mutex::_no_safepoint_check_flag); + remove_expired_entries(current_time); + + return when_internal(current_time, pause_time); +} + +double G1MMUTrackerQueue::when_internal(double current_time, + double pause_time) { + // if the pause is over the maximum, just assume that it's the maximum + double adjusted_pause_time = + (pause_time > max_gc_time()) ? max_gc_time() : pause_time; + double earliest_end = current_time + adjusted_pause_time; + double limit = earliest_end - _time_slice; + double gc_time = calculate_gc_time(earliest_end); + double diff = gc_time + adjusted_pause_time - max_gc_time(); + if (is_double_leq_0(diff)) + return 0.0; + + int index = _tail_index; + while ( 1 ) { + G1MMUTrackerQueueElem *elem = &_array[index]; + if (elem->end_time() > limit) { + if (elem->start_time() > limit) + diff -= elem->duration(); + else + diff -= elem->end_time() - limit; + if (is_double_leq_0(diff)) + return elem->end_time() + diff + _time_slice - adjusted_pause_time - current_time; + } + index = trim_index(index+1); + guarantee(index != trim_index(_head_index + 1), "should not go past head"); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp 2009-08-01 04:21:07.130513495 +0100 @@ -0,0 +1,130 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Keeps track of the GC work and decides when it is OK to do GC work +// and for how long so that the MMU invariants are maintained. + +/***** ALL TIMES ARE IN SECS!!!!!!! *****/ + +// this is the "interface" +class G1MMUTracker: public CHeapObj { +protected: + double _time_slice; + double _max_gc_time; // this is per time slice + + double _conc_overhead_time_sec; + +public: + G1MMUTracker(double time_slice, double max_gc_time); + + void update_conc_overhead(double conc_overhead); + + virtual void add_pause(double start, double end, bool gc_thread) = 0; + virtual double longest_pause(double current_time) = 0; + virtual double when_sec(double current_time, double pause_time) = 0; + + double max_gc_time() { + return _max_gc_time - _conc_overhead_time_sec; + } + + inline bool now_max_gc(double current_time) { + return when_sec(current_time, max_gc_time()) < 0.00001; + } + + inline double when_max_gc_sec(double current_time) { + return when_sec(current_time, max_gc_time()); + } + + inline jlong when_max_gc_ms(double current_time) { + double when = when_max_gc_sec(current_time); + return (jlong) (when * 1000.0); + } + + inline jlong when_ms(double current_time, double pause_time) { + double when = when_sec(current_time, pause_time); + return (jlong) (when * 1000.0); + } +}; + +class G1MMUTrackerQueueElem VALUE_OBJ_CLASS_SPEC { +private: + double _start_time; + double _end_time; + +public: + inline double start_time() { return _start_time; } + inline double end_time() { return _end_time; } + inline double duration() { return _end_time - _start_time; } + + G1MMUTrackerQueueElem() { + _start_time = 0.0; + _end_time = 0.0; + } + + G1MMUTrackerQueueElem(double start_time, double end_time) { + _start_time = start_time; + _end_time = end_time; + } +}; + +// this is an implementation of the MMUTracker using a (fixed-size) queue +// that keeps track of all the recent pause times +class G1MMUTrackerQueue: public G1MMUTracker { +private: + enum PrivateConstants { + QueueLength = 64 + }; + + // The array keeps track of all the pauses that fall within a time + // slice (the last time slice during which pauses took place). + // The data structure implemented is a circular queue. + // Head "points" to the most recent addition, tail to the oldest one. + // The array is of fixed size and I don't think we'll need more than + // two or three entries with the current behaviour of G1 pauses. + // If the array is full, an easy fix is to look for the pauses with + // the shortest gap between them and concolidate them. + + G1MMUTrackerQueueElem _array[QueueLength]; + int _head_index; + int _tail_index; + int _no_entries; + + inline int trim_index(int index) { + return (index + QueueLength) % QueueLength; + } + + void remove_expired_entries(double current_time); + double calculate_gc_time(double current_time); + + double longest_pause_internal(double current_time); + double when_internal(double current_time, double pause_time); + +public: + G1MMUTrackerQueue(double time_slice, double max_gc_time); + + virtual void add_pause(double start, double end, bool gc_thread); + + virtual double longest_pause(double current_time); + virtual double when_sec(double current_time, double pause_time); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp 2009-08-01 04:21:07.582231922 +0100 @@ -0,0 +1,348 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1MarkSweep.cpp.incl" + +class HeapRegion; + +void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, + bool clear_all_softrefs) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); + + // hook up weak ref data so it can be used during Mark-Sweep + assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); + assert(rp != NULL, "should be non-NULL"); + GenMarkSweep::_ref_processor = rp; + rp->setup_policy(clear_all_softrefs); + + // When collecting the permanent generation methodOops may be moving, + // so we either have to flush all bcp data or convert it into bci. + CodeCache::gc_prologue(); + Threads::gc_prologue(); + + // Increment the invocation count for the permanent generation, since it is + // implicitly collected whenever we do a full mark sweep collection. + SharedHeap* sh = SharedHeap::heap(); + sh->perm_gen()->stat_record()->invocations++; + + bool marked_for_unloading = false; + + allocate_stacks(); + + // We should save the marks of the currently locked biased monitors. + // The marking doesn't preserve the marks of biased objects. + BiasedLocking::preserve_marks(); + + mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); + + if (VerifyDuringGC) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + g1h->checkConcurrentMark(); + } + + mark_sweep_phase2(); + + // Don't add any more derived pointers during phase3 + COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); + + mark_sweep_phase3(); + + mark_sweep_phase4(); + + GenMarkSweep::restore_marks(); + BiasedLocking::restore_marks(); + GenMarkSweep::deallocate_stacks(); + + // We must invalidate the perm-gen rs, so that it gets rebuilt. + GenRemSet* rs = sh->rem_set(); + rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/); + + // "free at last gc" is calculated from these. + // CHF: cheating for now!!! + // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); + // Universe::set_heap_used_at_last_gc(Universe::heap()->used()); + + Threads::gc_epilogue(); + CodeCache::gc_epilogue(); + + // refs processing: clean slate + GenMarkSweep::_ref_processor = NULL; +} + + +void G1MarkSweep::allocate_stacks() { + GenMarkSweep::_preserved_count_max = 0; + GenMarkSweep::_preserved_marks = NULL; + GenMarkSweep::_preserved_count = 0; + GenMarkSweep::_preserved_mark_stack = NULL; + GenMarkSweep::_preserved_oop_stack = NULL; + + GenMarkSweep::_marking_stack = + new (ResourceObj::C_HEAP) GrowableArray(4000, true); + + size_t size = SystemDictionary::number_of_classes() * 2; + GenMarkSweep::_revisit_klass_stack = + new (ResourceObj::C_HEAP) GrowableArray((int)size, true); +} + +void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, + bool clear_all_softrefs) { + // Recursively traverse all live objects and mark them + EventMark m("1 mark object"); + TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); + GenMarkSweep::trace(" 1"); + + SharedHeap* sh = SharedHeap::heap(); + + sh->process_strong_roots(true, // Collecting permanent generation. + SharedHeap::SO_SystemClasses, + &GenMarkSweep::follow_root_closure, + &GenMarkSweep::follow_root_closure); + + // Process reference objects found during marking + ReferenceProcessor* rp = GenMarkSweep::ref_processor(); + rp->setup_policy(clear_all_softrefs); + rp->process_discovered_references(&GenMarkSweep::is_alive, + &GenMarkSweep::keep_alive, + &GenMarkSweep::follow_stack_closure, + NULL); + + // Follow system dictionary roots and unload classes + bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); + assert(GenMarkSweep::_marking_stack->is_empty(), + "stack should be empty by now"); + + // Follow code cache roots (has to be done after system dictionary, + // assumes all live klasses are marked) + CodeCache::do_unloading(&GenMarkSweep::is_alive, + &GenMarkSweep::keep_alive, + purged_class); + GenMarkSweep::follow_stack(); + + // Update subklass/sibling/implementor links of live klasses + GenMarkSweep::follow_weak_klass_links(); + assert(GenMarkSweep::_marking_stack->is_empty(), + "stack should be empty by now"); + + // Visit symbol and interned string tables and delete unmarked oops + SymbolTable::unlink(&GenMarkSweep::is_alive); + StringTable::unlink(&GenMarkSweep::is_alive); + + assert(GenMarkSweep::_marking_stack->is_empty(), + "stack should be empty by now"); +} + +class G1PrepareCompactClosure: public HeapRegionClosure { + ModRefBarrierSet* _mrbs; + CompactPoint _cp; + + void free_humongous_region(HeapRegion* hr) { + HeapWord* bot = hr->bottom(); + HeapWord* end = hr->end(); + assert(hr->startsHumongous(), + "Only the start of a humongous region should be freed."); + G1CollectedHeap::heap()->free_region(hr); + hr->prepare_for_compaction(&_cp); + // Also clear the part of the card table that will be unused after + // compaction. + _mrbs->clear(MemRegion(hr->compaction_top(), hr->end())); + } + +public: + G1PrepareCompactClosure(CompactibleSpace* cs) : + _cp(NULL, cs, cs->initialize_threshold()), + _mrbs(G1CollectedHeap::heap()->mr_bs()) + {} + bool doHeapRegion(HeapRegion* hr) { + if (hr->isHumongous()) { + if (hr->startsHumongous()) { + oop obj = oop(hr->bottom()); + if (obj->is_gc_marked()) { + obj->forward_to(obj); + } else { + free_humongous_region(hr); + } + } else { + assert(hr->continuesHumongous(), "Invalid humongous."); + } + } else { + hr->prepare_for_compaction(&_cp); + // Also clear the part of the card table that will be unused after + // compaction. + _mrbs->clear(MemRegion(hr->compaction_top(), hr->end())); + } + return false; + } +}; + +// Finds the first HeapRegion. +class FindFirstRegionClosure: public HeapRegionClosure { + HeapRegion* _a_region; +public: + FindFirstRegionClosure() : _a_region(NULL) {} + bool doHeapRegion(HeapRegion* r) { + _a_region = r; + return true; + } + HeapRegion* result() { return _a_region; } +}; + +void G1MarkSweep::mark_sweep_phase2() { + // Now all live objects are marked, compute the new object addresses. + + // It is imperative that we traverse perm_gen LAST. If dead space is + // allowed a range of dead object may get overwritten by a dead int + // array. If perm_gen is not traversed last a klassOop may get + // overwritten. This is fine since it is dead, but if the class has dead + // instances we have to skip them, and in order to find their size we + // need the klassOop! + // + // It is not required that we traverse spaces in the same order in + // phase2, phase3 and phase4, but the ValidateMarkSweep live oops + // tracking expects us to do so. See comment under phase4. + + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + Generation* pg = g1h->perm_gen(); + + EventMark m("2 compute new addresses"); + TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); + GenMarkSweep::trace("2"); + + FindFirstRegionClosure cl; + g1h->heap_region_iterate(&cl); + HeapRegion *r = cl.result(); + CompactibleSpace* sp = r; + if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) { + sp = r->next_compaction_space(); + } + + G1PrepareCompactClosure blk(sp); + g1h->heap_region_iterate(&blk); + + CompactPoint perm_cp(pg, NULL, NULL); + pg->prepare_for_compaction(&perm_cp); +} + +class G1AdjustPointersClosure: public HeapRegionClosure { + public: + bool doHeapRegion(HeapRegion* r) { + if (r->isHumongous()) { + if (r->startsHumongous()) { + // We must adjust the pointers on the single H object. + oop obj = oop(r->bottom()); + debug_only(GenMarkSweep::track_interior_pointers(obj)); + // point all the oops to the new location + obj->adjust_pointers(); + debug_only(GenMarkSweep::check_interior_pointers()); + } + } else { + // This really ought to be "as_CompactibleSpace"... + r->adjust_pointers(); + } + return false; + } +}; + +void G1MarkSweep::mark_sweep_phase3() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + Generation* pg = g1h->perm_gen(); + + // Adjust the pointers to reflect the new locations + EventMark m("3 adjust pointers"); + TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); + GenMarkSweep::trace("3"); + + SharedHeap* sh = SharedHeap::heap(); + + sh->process_strong_roots(true, // Collecting permanent generation. + SharedHeap::SO_AllClasses, + &GenMarkSweep::adjust_root_pointer_closure, + &GenMarkSweep::adjust_pointer_closure); + + g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); + + // Now adjust pointers in remaining weak roots. (All of which should + // have been cleared if they pointed to non-surviving objects.) + g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, + &GenMarkSweep::adjust_pointer_closure); + + GenMarkSweep::adjust_marks(); + + G1AdjustPointersClosure blk; + g1h->heap_region_iterate(&blk); + pg->adjust_pointers(); +} + +class G1SpaceCompactClosure: public HeapRegionClosure { +public: + G1SpaceCompactClosure() {} + + bool doHeapRegion(HeapRegion* hr) { + if (hr->isHumongous()) { + if (hr->startsHumongous()) { + oop obj = oop(hr->bottom()); + if (obj->is_gc_marked()) { + obj->init_mark(); + } else { + assert(hr->is_empty(), "Should have been cleared in phase 2."); + } + hr->reset_during_compaction(); + } + } else { + hr->compact(); + } + return false; + } +}; + +void G1MarkSweep::mark_sweep_phase4() { + // All pointers are now adjusted, move objects accordingly + + // It is imperative that we traverse perm_gen first in phase4. All + // classes must be allocated earlier than their instances, and traversing + // perm_gen first makes sure that all klassOops have moved to their new + // location before any instance does a dispatch through it's klass! + + // The ValidateMarkSweep live oops tracking expects us to traverse spaces + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + Generation* pg = g1h->perm_gen(); + + EventMark m("4 compact heap"); + TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); + GenMarkSweep::trace("4"); + + pg->compact(); + + G1SpaceCompactClosure blk; + g1h->heap_region_iterate(&blk); + +} + +// Local Variables: *** +// c-indentation-style: gnu *** +// End: *** --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp 2009-08-01 04:21:08.040356774 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class ReferenceProcessor; + +// G1MarkSweep takes care of global mark-compact garbage collection for a +// G1CollectedHeap using a four-phase pointer forwarding algorithm. All +// generations are assumed to support marking; those that can also support +// compaction. +// +// Class unloading will only occur when a full gc is invoked. + + +class G1MarkSweep : AllStatic { + friend class VM_G1MarkSweep; + friend class Scavenge; + + public: + + static void invoke_at_safepoint(ReferenceProcessor* rp, + bool clear_all_softrefs); + + private: + + // Mark live objects + static void mark_sweep_phase1(bool& marked_for_deopt, + bool clear_all_softrefs); + // Calculate new addresses + static void mark_sweep_phase2(); + // Update pointers + static void mark_sweep_phase3(); + // Move objects to new positions + static void mark_sweep_phase4(); + + static void allocate_stacks(); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp 2009-08-01 04:21:08.484943586 +0100 @@ -0,0 +1,223 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class HeapRegion; +class G1CollectedHeap; +class G1RemSet; +class HRInto_G1RemSet; +class G1RemSet; +class ConcurrentMark; +class DirtyCardToOopClosure; +class CMBitMap; +class CMMarkStack; +class G1ParScanThreadState; + +// A class that scans oops in a given heap region (much as OopsInGenClosure +// scans oops in a generation.) +class OopsInHeapRegionClosure: public OopsInGenClosure { +protected: + HeapRegion* _from; +public: + virtual void set_region(HeapRegion* from) { _from = from; } +}; + + +class G1ScanAndBalanceClosure : public OopClosure { + G1CollectedHeap* _g1; + static int _nq; +public: + G1ScanAndBalanceClosure(G1CollectedHeap* g1) : _g1(g1) { } + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } +}; + +class G1ParClosureSuper : public OopsInHeapRegionClosure { +protected: + G1CollectedHeap* _g1; + G1RemSet* _g1_rem; + ConcurrentMark* _cm; + G1ParScanThreadState* _par_scan_state; +public: + G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state); + bool apply_to_weak_ref_discovered_field() { return true; } +}; + +class G1ParScanClosure : public G1ParClosureSuper { +public: + G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : + G1ParClosureSuper(g1, par_scan_state) { } + void do_oop_nv(oop* p); // should be made inline + inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +}; + +#define G1_PARTIAL_ARRAY_MASK 1 + +inline bool has_partial_array_mask(oop* ref) { + return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK; +} + +inline oop* set_partial_array_mask(oop obj) { + return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK); +} + +inline oop clear_partial_array_mask(oop* ref) { + return oop((intptr_t) ref & ~G1_PARTIAL_ARRAY_MASK); +} + +class G1ParScanPartialArrayClosure : public G1ParClosureSuper { + G1ParScanClosure _scanner; + template void process_array_chunk(oop obj, int start, int end); +public: + G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : + G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { } + void do_oop_nv(oop* p); + void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +}; + + +class G1ParCopyHelper : public G1ParClosureSuper { + G1ParScanClosure *_scanner; +protected: + void mark_forwardee(oop* p); + oop copy_to_survivor_space(oop obj); +public: + G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, + G1ParScanClosure *scanner) : + G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { } +}; + +template +class G1ParCopyClosure : public G1ParCopyHelper { + G1ParScanClosure _scanner; + void do_oop_work(oop* p); + void do_oop_work(narrowOop* p) { guarantee(false, "NYI"); } +public: + G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : + _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { } + inline void do_oop_nv(oop* p) { + do_oop_work(p); + if (do_mark_forwardee) + mark_forwardee(p); + } + inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +}; + +typedef G1ParCopyClosure G1ParScanExtRootClosure; +typedef G1ParCopyClosure G1ParScanPermClosure; +typedef G1ParCopyClosure G1ParScanAndMarkExtRootClosure; +typedef G1ParCopyClosure G1ParScanAndMarkPermClosure; +typedef G1ParCopyClosure G1ParScanHeapRSClosure; +typedef G1ParCopyClosure G1ParScanAndMarkHeapRSClosure; +// This is the only case when we set skip_cset_test. Basically, this +// closure is (should?) only be called directly while we're draining +// the overflow and task queues. In that case we know that the +// reference in question points into the collection set, otherwise we +// would not have pushed it on the queue. +typedef G1ParCopyClosure G1ParScanHeapEvacClosure; +// We need a separate closure to handle references during evacuation +// failure processing, as it cannot asume that the reference already + // points to the collection set (like G1ParScanHeapEvacClosure does). +typedef G1ParCopyClosure G1ParScanHeapEvacFailureClosure; + +class FilterIntoCSClosure: public OopClosure { + G1CollectedHeap* _g1; + OopClosure* _oc; + DirtyCardToOopClosure* _dcto_cl; +public: + FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl, + G1CollectedHeap* g1, OopClosure* oc) : + _dcto_cl(dcto_cl), _g1(g1), _oc(oc) + {} + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } + bool apply_to_weak_ref_discovered_field() { return true; } + bool do_header() { return false; } +}; + +class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure { + G1CollectedHeap* _g1; + OopsInHeapRegionClosure* _oc; +public: + FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, + OopsInHeapRegionClosure* oc) : + _g1(g1), _oc(oc) + {} + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } + bool apply_to_weak_ref_discovered_field() { return true; } + bool do_header() { return false; } + void set_region(HeapRegion* from) { + _oc->set_region(from); + } +}; + +class FilterAndMarkInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure { + G1CollectedHeap* _g1; + ConcurrentMark* _cm; + OopsInHeapRegionClosure* _oc; +public: + FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, + OopsInHeapRegionClosure* oc, + ConcurrentMark* cm) + : _g1(g1), _oc(oc), _cm(cm) { } + + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } + bool apply_to_weak_ref_discovered_field() { return true; } + bool do_header() { return false; } + void set_region(HeapRegion* from) { + _oc->set_region(from); + } +}; + +class FilterOutOfRegionClosure: public OopClosure { + HeapWord* _r_bottom; + HeapWord* _r_end; + OopClosure* _oc; + int _out_of_region; +public: + FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc); + inline void do_oop_nv(oop* p); + inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); } + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); } + bool apply_to_weak_ref_discovered_field() { return true; } + bool do_header() { return false; } + int out_of_region() { return _out_of_region; } +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp 2009-08-01 04:21:08.947500448 +0100 @@ -0,0 +1,112 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * This really ought to be an inline function, but apparently the C++ + * compiler sometimes sees fit to ignore inline declarations. Sigh. + */ + +// This must a ifdef'ed because the counting it controls is in a +// perf-critical inner loop. +#define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0 + +inline void FilterIntoCSClosure::do_oop_nv(oop* p) { + oop obj = *p; + if (obj != NULL && _g1->obj_in_cs(obj)) { + _oc->do_oop(p); +#if FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT + _dcto_cl->incr_count(); +#endif + } +} + +inline void FilterIntoCSClosure::do_oop(oop* p) +{ + do_oop_nv(p); +} + +#define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0 + +inline void FilterOutOfRegionClosure::do_oop_nv(oop* p) { + oop obj = *p; + HeapWord* obj_hw = (HeapWord*)obj; + if (obj_hw != NULL && (obj_hw < _r_bottom || obj_hw >= _r_end)) { + _oc->do_oop(p); +#if FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT + _out_of_region++; +#endif + } +} + +inline void FilterOutOfRegionClosure::do_oop(oop* p) +{ + do_oop_nv(p); +} + +inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) { + oop obj = *p; + if (obj != NULL && _g1->obj_in_cs(obj)) + _oc->do_oop(p); +} + +inline void FilterInHeapRegionAndIntoCSClosure::do_oop(oop* p) +{ + do_oop_nv(p); +} + + +inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) { + oop obj = *p; + if (obj != NULL) { + HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj); + if (hr != NULL) { + if (hr->in_collection_set()) + _oc->do_oop(p); + else if (!hr->is_young()) + _cm->grayRoot(obj); + } + } +} + +inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop(oop* p) +{ + do_oop_nv(p); +} + +inline void G1ScanAndBalanceClosure::do_oop_nv(oop* p) { + RefToScanQueue* q; + if (ParallelGCThreads > 0) { + // Deal the work out equally. + _nq = (_nq + 1) % ParallelGCThreads; + q = _g1->task_queue(_nq); + } else { + q = _g1->task_queue(0); + } + bool nooverflow = q->push(p); + guarantee(nooverflow, "Overflow during poplularity region processing"); +} + +inline void G1ScanAndBalanceClosure::do_oop(oop* p) { + do_oop_nv(p); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp 2009-08-01 04:21:09.376797165 +0100 @@ -0,0 +1,1066 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1RemSet.cpp.incl" + +#define CARD_REPEAT_HISTO 0 + +#if CARD_REPEAT_HISTO +static size_t ct_freq_sz; +static jbyte* ct_freq = NULL; + +void init_ct_freq_table(size_t heap_sz_bytes) { + if (ct_freq == NULL) { + ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size; + ct_freq = new jbyte[ct_freq_sz]; + for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0; + } +} + +void ct_freq_note_card(size_t index) { + assert(0 <= index && index < ct_freq_sz, "Bounds error."); + if (ct_freq[index] < 100) { ct_freq[index]++; } +} + +static IntHistogram card_repeat_count(10, 10); + +void ct_freq_update_histo_and_reset() { + for (size_t j = 0; j < ct_freq_sz; j++) { + card_repeat_count.add_entry(ct_freq[j]); + ct_freq[j] = 0; + } + +} +#endif + + +class IntoCSOopClosure: public OopsInHeapRegionClosure { + OopsInHeapRegionClosure* _blk; + G1CollectedHeap* _g1; +public: + IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : + _g1(g1), _blk(blk) {} + void set_region(HeapRegion* from) { + _blk->set_region(from); + } + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + virtual void do_oop(oop* p) { + oop obj = *p; + if (_g1->obj_in_cs(obj)) _blk->do_oop(p); + } + bool apply_to_weak_ref_discovered_field() { return true; } + bool idempotent() { return true; } +}; + +class IntoCSRegionClosure: public HeapRegionClosure { + IntoCSOopClosure _blk; + G1CollectedHeap* _g1; +public: + IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : + _g1(g1), _blk(g1, blk) {} + bool doHeapRegion(HeapRegion* r) { + if (!r->in_collection_set()) { + _blk.set_region(r); + if (r->isHumongous()) { + if (r->startsHumongous()) { + oop obj = oop(r->bottom()); + obj->oop_iterate(&_blk); + } + } else { + r->oop_before_save_marks_iterate(&_blk); + } + } + return false; + } +}; + +void +StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, + int worker_i) { + IntoCSRegionClosure rc(_g1, oc); + _g1->heap_region_iterate(&rc); +} + +class UpdateRSOutOfRegionClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; + ModRefBarrierSet* _mr_bs; + UpdateRSOopClosure _cl; + int _worker_i; +public: + UpdateRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : + _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), + _mr_bs(g1->mr_bs()), + _worker_i(worker_i), + _g1h(g1) + {} + bool doHeapRegion(HeapRegion* r) { + if (!r->in_collection_set() && !r->continuesHumongous()) { + _cl.set_from(r); + r->set_next_filter_kind(HeapRegionDCTOC::OutOfRegionFilterKind); + _mr_bs->mod_oop_in_space_iterate(r, &_cl, true, true); + } + return false; + } +}; + +class VerifyRSCleanCardOopClosure: public OopClosure { + G1CollectedHeap* _g1; +public: + VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {} + + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + virtual void do_oop(oop* p) { + oop obj = *p; + HeapRegion* to = _g1->heap_region_containing(obj); + guarantee(to == NULL || !to->in_collection_set(), + "Missed a rem set member."); + } +}; + +HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) + : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()), + _cg1r(g1->concurrent_g1_refine()), + _par_traversal_in_progress(false), _new_refs(NULL), + _cards_scanned(NULL), _total_cards_scanned(0) +{ + _seq_task = new SubTasksDone(NumSeqTasks); + guarantee(n_workers() > 0, "There should be some workers"); + _new_refs = NEW_C_HEAP_ARRAY(GrowableArray*, n_workers()); + for (uint i = 0; i < n_workers(); i++) { + _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray(8192,true); + } +} + +HRInto_G1RemSet::~HRInto_G1RemSet() { + delete _seq_task; + for (uint i = 0; i < n_workers(); i++) { + delete _new_refs[i]; + } + FREE_C_HEAP_ARRAY(GrowableArray*, _new_refs); +} + +void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { + if (_g1->is_in_g1_reserved(mr.start())) { + _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size)); + if (_start_first == NULL) _start_first = mr.start(); + } +} + +class ScanRSClosure : public HeapRegionClosure { + size_t _cards_done, _cards; + G1CollectedHeap* _g1h; + OopsInHeapRegionClosure* _oc; + G1BlockOffsetSharedArray* _bot_shared; + CardTableModRefBS *_ct_bs; + int _worker_i; + bool _try_claimed; + size_t _min_skip_distance, _max_skip_distance; +public: + ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) : + _oc(oc), + _cards(0), + _cards_done(0), + _worker_i(worker_i), + _try_claimed(false) + { + _g1h = G1CollectedHeap::heap(); + _bot_shared = _g1h->bot_shared(); + _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set()); + _min_skip_distance = 16; + _max_skip_distance = 2 * _g1h->n_par_threads() * _min_skip_distance; + } + + void set_try_claimed() { _try_claimed = true; } + + void scanCard(size_t index, HeapRegion *r) { + _cards_done++; + DirtyCardToOopClosure* cl = + r->new_dcto_closure(_oc, + CardTableModRefBS::Precise, + HeapRegionDCTOC::IntoCSFilterKind); + + // Set the "from" region in the closure. + _oc->set_region(r); + HeapWord* card_start = _bot_shared->address_for_index(index); + HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; + Space *sp = SharedHeap::heap()->space_containing(card_start); + MemRegion sm_region; + if (ParallelGCThreads > 0) { + // first find the used area + sm_region = sp->used_region_at_save_marks(); + } else { + // The closure is not idempotent. We shouldn't look at objects + // allocated during the GC. + sm_region = sp->used_region_at_save_marks(); + } + MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); + if (!mr.is_empty()) { + cl->do_MemRegion(mr); + } + } + + void printCard(HeapRegion* card_region, size_t card_index, + HeapWord* card_start) { + gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") " + "RS names card %p: " + "[" PTR_FORMAT ", " PTR_FORMAT ")", + _worker_i, + card_region->bottom(), card_region->end(), + card_index, + card_start, card_start + G1BlockOffsetSharedArray::N_words); + } + + bool doHeapRegion(HeapRegion* r) { + assert(r->in_collection_set(), "should only be called on elements of CS."); + HeapRegionRemSet* hrrs = r->rem_set(); + if (hrrs->iter_is_complete()) return false; // All done. + if (!_try_claimed && !hrrs->claim_iter()) return false; + // If we didn't return above, then + // _try_claimed || r->claim_iter() + // is true: either we're supposed to work on claimed-but-not-complete + // regions, or we successfully claimed the region. + HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); + hrrs->init_iterator(iter); + size_t card_index; + size_t skip_distance = 0, current_card = 0, jump_to_card = 0; + while (iter->has_next(card_index)) { + if (current_card < jump_to_card) { + ++current_card; + continue; + } + HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index); +#if 0 + gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n", + card_start, card_start + CardTableModRefBS::card_size_in_words); +#endif + + HeapRegion* card_region = _g1h->heap_region_containing(card_start); + assert(card_region != NULL, "Yielding cards not in the heap?"); + _cards++; + + // If the card is dirty, then we will scan it during updateRS. + if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { + if (!_ct_bs->is_card_claimed(card_index) && _ct_bs->claim_card(card_index)) { + scanCard(card_index, card_region); + } else if (_try_claimed) { + if (jump_to_card == 0 || jump_to_card != current_card) { + // We did some useful work in the previous iteration. + // Decrease the distance. + skip_distance = MAX2(skip_distance >> 1, _min_skip_distance); + } else { + // Previous iteration resulted in a claim failure. + // Increase the distance. + skip_distance = MIN2(skip_distance << 1, _max_skip_distance); + } + jump_to_card = current_card + skip_distance; + } + } + ++current_card; + } + if (!_try_claimed) { + hrrs->set_iter_complete(); + } + return false; + } + // Set all cards back to clean. + void cleanup() {_g1h->cleanUpCardTable();} + size_t cards_done() { return _cards_done;} + size_t cards_looked_up() { return _cards;} +}; + +// We want the parallel threads to start their scanning at +// different collection set regions to avoid contention. +// If we have: +// n collection set regions +// p threads +// Then thread t will start at region t * floor (n/p) + +HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) { + HeapRegion* result = _g1p->collection_set(); + if (ParallelGCThreads > 0) { + size_t cs_size = _g1p->collection_set_size(); + int n_workers = _g1->workers()->total_workers(); + size_t cs_spans = cs_size / n_workers; + size_t ind = cs_spans * worker_i; + for (size_t i = 0; i < ind; i++) + result = result->next_in_collection_set(); + } + return result; +} + +void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) { + double rs_time_start = os::elapsedTime(); + HeapRegion *startRegion = calculateStartRegion(worker_i); + + BufferingOopsInHeapRegionClosure boc(oc); + ScanRSClosure scanRScl(&boc, worker_i); + _g1->collection_set_iterate_from(startRegion, &scanRScl); + scanRScl.set_try_claimed(); + _g1->collection_set_iterate_from(startRegion, &scanRScl); + + boc.done(); + double closure_app_time_sec = boc.closure_app_seconds(); + double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) - + closure_app_time_sec; + double closure_app_time_ms = closure_app_time_sec * 1000.0; + + assert( _cards_scanned != NULL, "invariant" ); + _cards_scanned[worker_i] = scanRScl.cards_done(); + + _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0); + _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); + + double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i); + if (scan_new_refs_time_ms > 0.0) { + closure_app_time_ms += scan_new_refs_time_ms; + } + + _g1p->record_obj_copy_time(worker_i, closure_app_time_ms); +} + +void HRInto_G1RemSet::updateRS(int worker_i) { + ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); + + double start = os::elapsedTime(); + _g1p->record_update_rs_start_time(worker_i, start * 1000.0); + + if (G1RSBarrierUseQueue && !cg1r->do_traversal()) { + // Apply the appropriate closure to all remaining log entries. + _g1->iterate_dirty_card_closure(false, worker_i); + // Now there should be no dirty cards. + if (G1RSLogCheckCardTable) { + CountNonCleanMemRegionClosure cl(_g1); + _ct_bs->mod_card_iterate(&cl); + // XXX This isn't true any more: keeping cards of young regions + // marked dirty broke it. Need some reasonable fix. + guarantee(cl.n() == 0, "Card table should be clean."); + } + } else { + UpdateRSOutOfRegionClosure update_rs(_g1, worker_i); + _g1->heap_region_iterate(&update_rs); + // We did a traversal; no further one is necessary. + if (G1RSBarrierUseQueue) { + assert(cg1r->do_traversal(), "Or we shouldn't have gotten here."); + cg1r->set_pya_cancel(); + } + if (_cg1r->use_cache()) { + _cg1r->clear_and_record_card_counts(); + _cg1r->clear_hot_cache(); + } + } + _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); +} + +#ifndef PRODUCT +class PrintRSClosure : public HeapRegionClosure { + int _count; +public: + PrintRSClosure() : _count(0) {} + bool doHeapRegion(HeapRegion* r) { + HeapRegionRemSet* hrrs = r->rem_set(); + _count += (int) hrrs->occupied(); + if (hrrs->occupied() == 0) { + gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") " + "has no remset entries\n", + r->bottom(), r->end()); + } else { + gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n", + r->bottom(), r->end()); + r->print(); + hrrs->print(); + gclog_or_tty->print("\nDone printing rem set\n"); + } + return false; + } + int occupied() {return _count;} +}; +#endif + +class CountRSSizeClosure: public HeapRegionClosure { + size_t _n; + size_t _tot; + size_t _max; + HeapRegion* _max_r; + enum { + N = 20, + MIN = 6 + }; + int _histo[N]; +public: + CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) { + for (int i = 0; i < N; i++) _histo[i] = 0; + } + bool doHeapRegion(HeapRegion* r) { + if (!r->continuesHumongous()) { + size_t occ = r->rem_set()->occupied(); + _n++; + _tot += occ; + if (occ > _max) { + _max = occ; + _max_r = r; + } + // Fit it into a histo bin. + int s = 1 << MIN; + int i = 0; + while (occ > (size_t) s && i < (N-1)) { + s = s << 1; + i++; + } + _histo[i]++; + } + return false; + } + size_t n() { return _n; } + size_t tot() { return _tot; } + size_t mx() { return _max; } + HeapRegion* mxr() { return _max_r; } + void print_histo() { + int mx = N; + while (mx >= 0) { + if (_histo[mx-1] > 0) break; + mx--; + } + gclog_or_tty->print_cr("Number of regions with given RS sizes:"); + gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]); + for (int i = 1; i < mx-1; i++) { + gclog_or_tty->print_cr(" %8d - %8d %8d", + (1 << (MIN + i - 1)) + 1, + 1 << (MIN + i), + _histo[i]); + } + gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]); + } +}; + +void +HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc, + int worker_i) { + double scan_new_refs_start_sec = os::elapsedTime(); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set()); + for (int i = 0; i < _new_refs[worker_i]->length(); i++) { + oop* p = _new_refs[worker_i]->at(i); + oop obj = *p; + // *p was in the collection set when p was pushed on "_new_refs", but + // another thread may have processed this location from an RS, so it + // might not point into the CS any longer. If so, it's obviously been + // processed, and we don't need to do anything further. + if (g1h->obj_in_cs(obj)) { + HeapRegion* r = g1h->heap_region_containing(p); + + DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj)); + oc->set_region(r); + // If "p" has already been processed concurrently, this is + // idempotent. + oc->do_oop(p); + } + } + _g1p->record_scan_new_refs_time(worker_i, + (os::elapsedTime() - scan_new_refs_start_sec) + * 1000.0); +} + +void HRInto_G1RemSet::set_par_traversal(bool b) { + _par_traversal_in_progress = b; + HeapRegionRemSet::set_par_traversal(b); +} + +void HRInto_G1RemSet::cleanupHRRS() { + HeapRegionRemSet::cleanup(); +} + +void +HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, + int worker_i) { +#if CARD_REPEAT_HISTO + ct_freq_update_histo_and_reset(); +#endif + if (worker_i == 0) { + _cg1r->clear_and_record_card_counts(); + } + + // Make this into a command-line flag... + if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) { + CountRSSizeClosure count_cl; + _g1->heap_region_iterate(&count_cl); + gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, " + "max region is " PTR_FORMAT, + count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(), + count_cl.mx(), count_cl.mxr()); + count_cl.print_histo(); + } + + if (ParallelGCThreads > 0) { + // The two flags below were introduced temporarily to serialize + // the updating and scanning of remembered sets. There are some + // race conditions when these two operations are done in parallel + // and they are causing failures. When we resolve said race + // conditions, we'll revert back to parallel remembered set + // updating and scanning. See CRs 6677707 and 6677708. + if (G1ParallelRSetUpdatingEnabled || (worker_i == 0)) { + updateRS(worker_i); + scanNewRefsRS(oc, worker_i); + } else { + _g1p->record_update_rs_start_time(worker_i, os::elapsedTime()); + _g1p->record_update_rs_processed_buffers(worker_i, 0.0); + _g1p->record_update_rs_time(worker_i, 0.0); + _g1p->record_scan_new_refs_time(worker_i, 0.0); + } + if (G1ParallelRSetScanningEnabled || (worker_i == 0)) { + scanRS(oc, worker_i); + } else { + _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime()); + _g1p->record_scan_rs_time(worker_i, 0.0); + } + } else { + assert(worker_i == 0, "invariant"); + updateRS(0); + scanNewRefsRS(oc, 0); + scanRS(oc, 0); + } +} + +void HRInto_G1RemSet:: +prepare_for_oops_into_collection_set_do() { +#if G1_REM_SET_LOGGING + PrintRSClosure cl; + _g1->collection_set_iterate(&cl); +#endif + cleanupHRRS(); + ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); + _g1->set_refine_cte_cl_concurrency(false); + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + dcqs.concatenate_logs(); + + assert(!_par_traversal_in_progress, "Invariant between iterations."); + if (ParallelGCThreads > 0) { + set_par_traversal(true); + _seq_task->set_par_threads((int)n_workers()); + if (cg1r->do_traversal()) { + updateRS(0); + // Have to do this again after updaters + cleanupHRRS(); + } + } + guarantee( _cards_scanned == NULL, "invariant" ); + _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers()); + for (uint i = 0; i < n_workers(); ++i) { + _cards_scanned[i] = 0; + } + _total_cards_scanned = 0; +} + + +class cleanUpIteratorsClosure : public HeapRegionClosure { + bool doHeapRegion(HeapRegion *r) { + HeapRegionRemSet* hrrs = r->rem_set(); + hrrs->init_for_par_iteration(); + return false; + } +}; + +class UpdateRSetOopsIntoCSImmediate : public OopClosure { + G1CollectedHeap* _g1; +public: + UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { } + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + virtual void do_oop(oop* p) { + HeapRegion* to = _g1->heap_region_containing(*p); + if (to->in_collection_set()) { + to->rem_set()->add_reference(p, 0); + } + } +}; + +class UpdateRSetOopsIntoCSDeferred : public OopClosure { + G1CollectedHeap* _g1; + CardTableModRefBS* _ct_bs; + DirtyCardQueue* _dcq; +public: + UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : + _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { } + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + virtual void do_oop(oop* p) { + oop obj = *p; + if (_g1->obj_in_cs(obj)) { + size_t card_index = _ct_bs->index_for(p); + if (_ct_bs->mark_card_deferred(card_index)) { + _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); + } + } + } +}; + +void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) { + for (size_t i = 0; i < n_workers(); i++) { + for (int j = 0; j < _new_refs[i]->length(); j++) { + oop* p = _new_refs[i]->at(j); + cl->do_oop(p); + } + } +} + +void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() { + guarantee( _cards_scanned != NULL, "invariant" ); + _total_cards_scanned = 0; + for (uint i = 0; i < n_workers(); ++i) + _total_cards_scanned += _cards_scanned[i]; + FREE_C_HEAP_ARRAY(size_t, _cards_scanned); + _cards_scanned = NULL; + // Cleanup after copy +#if G1_REM_SET_LOGGING + PrintRSClosure cl; + _g1->heap_region_iterate(&cl); +#endif + _g1->set_refine_cte_cl_concurrency(true); + cleanUpIteratorsClosure iterClosure; + _g1->collection_set_iterate(&iterClosure); + // Set all cards back to clean. + _g1->cleanUpCardTable(); + if (ParallelGCThreads > 0) { + ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); + if (cg1r->do_traversal()) { + cg1r->cg1rThread()->set_do_traversal(false); + } + set_par_traversal(false); + } + + if (_g1->evacuation_failed()) { + // Restore remembered sets for the regions pointing into + // the collection set. + if (G1DeferredRSUpdate) { + DirtyCardQueue dcq(&_g1->dirty_card_queue_set()); + UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq); + new_refs_iterate(&deferred_update); + } else { + UpdateRSetOopsIntoCSImmediate immediate_update(_g1); + new_refs_iterate(&immediate_update); + } + } + for (uint i = 0; i < n_workers(); i++) { + _new_refs[i]->clear(); + } + + assert(!_par_traversal_in_progress, "Invariant between iterations."); +} + +class UpdateRSObjectClosure: public ObjectClosure { + UpdateRSOopClosure* _update_rs_oop_cl; +public: + UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) : + _update_rs_oop_cl(update_rs_oop_cl) {} + void do_object(oop obj) { + obj->oop_iterate(_update_rs_oop_cl); + } + +}; + +class ScrubRSClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; + BitMap* _region_bm; + BitMap* _card_bm; + CardTableModRefBS* _ctbs; +public: + ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) : + _g1h(G1CollectedHeap::heap()), + _region_bm(region_bm), _card_bm(card_bm), + _ctbs(NULL) + { + ModRefBarrierSet* bs = _g1h->mr_bs(); + guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); + _ctbs = (CardTableModRefBS*)bs; + } + + bool doHeapRegion(HeapRegion* r) { + if (!r->continuesHumongous()) { + r->rem_set()->scrub(_ctbs, _region_bm, _card_bm); + } + return false; + } +}; + +void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) { + ScrubRSClosure scrub_cl(region_bm, card_bm); + _g1->heap_region_iterate(&scrub_cl); +} + +void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, + int worker_num, int claim_val) { + ScrubRSClosure scrub_cl(region_bm, card_bm); + _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val); +} + + +class ConcRefineRegionClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; + CardTableModRefBS* _ctbs; + ConcurrentGCThread* _cgc_thrd; + ConcurrentG1Refine* _cg1r; + unsigned _cards_processed; + UpdateRSOopClosure _update_rs_oop_cl; +public: + ConcRefineRegionClosure(CardTableModRefBS* ctbs, + ConcurrentG1Refine* cg1r, + HRInto_G1RemSet* g1rs) : + _ctbs(ctbs), _cg1r(cg1r), _cgc_thrd(cg1r->cg1rThread()), + _update_rs_oop_cl(g1rs), _cards_processed(0), + _g1h(G1CollectedHeap::heap()) + {} + + bool doHeapRegion(HeapRegion* r) { + if (!r->in_collection_set() && + !r->continuesHumongous() && + !r->is_young()) { + _update_rs_oop_cl.set_from(r); + UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl); + + // For each run of dirty card in the region: + // 1) Clear the cards. + // 2) Process the range corresponding to the run, adding any + // necessary RS entries. + // 1 must precede 2, so that a concurrent modification redirties the + // card. If a processing attempt does not succeed, because it runs + // into an unparseable region, we will do binary search to find the + // beginning of the next parseable region. + HeapWord* startAddr = r->bottom(); + HeapWord* endAddr = r->used_region().end(); + HeapWord* lastAddr; + HeapWord* nextAddr; + + for (nextAddr = lastAddr = startAddr; + nextAddr < endAddr; + nextAddr = lastAddr) { + MemRegion dirtyRegion; + + // Get and clear dirty region from card table + MemRegion next_mr(nextAddr, endAddr); + dirtyRegion = + _ctbs->dirty_card_range_after_reset( + next_mr, + true, CardTableModRefBS::clean_card_val()); + assert(dirtyRegion.start() >= nextAddr, + "returned region inconsistent?"); + + if (!dirtyRegion.is_empty()) { + HeapWord* stop_point = + r->object_iterate_mem_careful(dirtyRegion, + &update_rs_obj_cl); + if (stop_point == NULL) { + lastAddr = dirtyRegion.end(); + _cards_processed += + (int) (dirtyRegion.word_size() / CardTableModRefBS::card_size_in_words); + } else { + // We're going to skip one or more cards that we can't parse. + HeapWord* next_parseable_card = + r->next_block_start_careful(stop_point); + // Round this up to a card boundary. + next_parseable_card = + _ctbs->addr_for(_ctbs->byte_after_const(next_parseable_card)); + // Now we invalidate the intervening cards so we'll see them + // again. + MemRegion remaining_dirty = + MemRegion(stop_point, dirtyRegion.end()); + MemRegion skipped = + MemRegion(stop_point, next_parseable_card); + _ctbs->invalidate(skipped.intersection(remaining_dirty)); + + // Now start up again where we can parse. + lastAddr = next_parseable_card; + + // Count how many we did completely. + _cards_processed += + (stop_point - dirtyRegion.start()) / + CardTableModRefBS::card_size_in_words; + } + // Allow interruption at regular intervals. + // (Might need to make them more regular, if we get big + // dirty regions.) + if (_cgc_thrd != NULL) { + if (_cgc_thrd->should_yield()) { + _cgc_thrd->yield(); + switch (_cg1r->get_pya()) { + case PYA_continue: + // This may have changed: re-read. + endAddr = r->used_region().end(); + continue; + case PYA_restart: case PYA_cancel: + return true; + } + } + } + } else { + break; + } + } + } + // A good yield opportunity. + if (_cgc_thrd != NULL) { + if (_cgc_thrd->should_yield()) { + _cgc_thrd->yield(); + switch (_cg1r->get_pya()) { + case PYA_restart: case PYA_cancel: + return true; + default: + break; + } + + } + } + return false; + } + + unsigned cards_processed() { return _cards_processed; } +}; + + +void HRInto_G1RemSet::concurrentRefinementPass(ConcurrentG1Refine* cg1r) { + ConcRefineRegionClosure cr_cl(ct_bs(), cg1r, this); + _g1->heap_region_iterate(&cr_cl); + _conc_refine_traversals++; + _conc_refine_cards += cr_cl.cards_processed(); +} + +static IntHistogram out_of_histo(50, 50); + + + +void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) { + // If the card is no longer dirty, nothing to do. + if (*card_ptr != CardTableModRefBS::dirty_card_val()) return; + + // Construct the region representing the card. + HeapWord* start = _ct_bs->addr_for(card_ptr); + // And find the region containing it. + HeapRegion* r = _g1->heap_region_containing(start); + if (r == NULL) { + guarantee(_g1->is_in_permanent(start), "Or else where?"); + return; // Not in the G1 heap (might be in perm, for example.) + } + // Why do we have to check here whether a card is on a young region, + // given that we dirty young regions and, as a result, the + // post-barrier is supposed to filter them out and never to enqueue + // them? When we allocate a new region as the "allocation region" we + // actually dirty its cards after we release the lock, since card + // dirtying while holding the lock was a performance bottleneck. So, + // as a result, it is possible for other threads to actually + // allocate objects in the region (after the acquire the lock) + // before all the cards on the region are dirtied. This is unlikely, + // and it doesn't happen often, but it can happen. So, the extra + // check below filters out those cards. + if (r->is_young()) { + return; + } + // While we are processing RSet buffers during the collection, we + // actually don't want to scan any cards on the collection set, + // since we don't want to update remebered sets with entries that + // point into the collection set, given that live objects from the + // collection set are about to move and such entries will be stale + // very soon. This change also deals with a reliability issue which + // involves scanning a card in the collection set and coming across + // an array that was being chunked and looking malformed. Note, + // however, that if evacuation fails, we have to scan any objects + // that were not moved and create any missing entries. + if (r->in_collection_set()) { + return; + } + + // Should we defer it? + if (_cg1r->use_cache()) { + card_ptr = _cg1r->cache_insert(card_ptr); + // If it was not an eviction, nothing to do. + if (card_ptr == NULL) return; + + // OK, we have to reset the card start, region, etc. + start = _ct_bs->addr_for(card_ptr); + r = _g1->heap_region_containing(start); + if (r == NULL) { + guarantee(_g1->is_in_permanent(start), "Or else where?"); + return; // Not in the G1 heap (might be in perm, for example.) + } + guarantee(!r->is_young(), "It was evicted in the current minor cycle."); + } + + HeapWord* end = _ct_bs->addr_for(card_ptr + 1); + MemRegion dirtyRegion(start, end); + +#if CARD_REPEAT_HISTO + init_ct_freq_table(_g1->g1_reserved_obj_bytes()); + ct_freq_note_card(_ct_bs->index_for(start)); +#endif + + UpdateRSOopClosure update_rs_oop_cl(this, worker_i); + update_rs_oop_cl.set_from(r); + FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, &update_rs_oop_cl); + + // Undirty the card. + *card_ptr = CardTableModRefBS::clean_card_val(); + // We must complete this write before we do any of the reads below. + OrderAccess::storeload(); + // And process it, being careful of unallocated portions of TLAB's. + HeapWord* stop_point = + r->oops_on_card_seq_iterate_careful(dirtyRegion, + &filter_then_update_rs_oop_cl); + // If stop_point is non-null, then we encountered an unallocated region + // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the + // card and re-enqueue: if we put off the card until a GC pause, then the + // unallocated portion will be filled in. Alternatively, we might try + // the full complexity of the technique used in "regular" precleaning. + if (stop_point != NULL) { + // The card might have gotten re-dirtied and re-enqueued while we + // worked. (In fact, it's pretty likely.) + if (*card_ptr != CardTableModRefBS::dirty_card_val()) { + *card_ptr = CardTableModRefBS::dirty_card_val(); + MutexLockerEx x(Shared_DirtyCardQ_lock, + Mutex::_no_safepoint_check_flag); + DirtyCardQueue* sdcq = + JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); + sdcq->enqueue(card_ptr); + } + } else { + out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); + _conc_refine_cards++; + } +} + +class HRRSStatsIter: public HeapRegionClosure { + size_t _occupied; + size_t _total_mem_sz; + size_t _max_mem_sz; + HeapRegion* _max_mem_sz_region; +public: + HRRSStatsIter() : + _occupied(0), + _total_mem_sz(0), + _max_mem_sz(0), + _max_mem_sz_region(NULL) + {} + + bool doHeapRegion(HeapRegion* r) { + if (r->continuesHumongous()) return false; + size_t mem_sz = r->rem_set()->mem_size(); + if (mem_sz > _max_mem_sz) { + _max_mem_sz = mem_sz; + _max_mem_sz_region = r; + } + _total_mem_sz += mem_sz; + size_t occ = r->rem_set()->occupied(); + _occupied += occ; + return false; + } + size_t total_mem_sz() { return _total_mem_sz; } + size_t max_mem_sz() { return _max_mem_sz; } + size_t occupied() { return _occupied; } + HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; } +}; + +void HRInto_G1RemSet::print_summary_info() { + G1CollectedHeap* g1 = G1CollectedHeap::heap(); + ConcurrentG1RefineThread* cg1r_thrd = + g1->concurrent_g1_refine()->cg1rThread(); + +#if CARD_REPEAT_HISTO + gclog_or_tty->print_cr("\nG1 card_repeat count histogram: "); + gclog_or_tty->print_cr(" # of repeats --> # of cards with that number."); + card_repeat_count.print_on(gclog_or_tty); +#endif + + if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) { + gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: "); + gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number."); + out_of_histo.print_on(gclog_or_tty); + } + gclog_or_tty->print_cr("\n Concurrent RS processed %d cards in " + "%5.2fs.", + _conc_refine_cards, cg1r_thrd->vtime_accum()); + + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + jint tot_processed_buffers = + dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread(); + gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers); + gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS thread.", + dcqs.processed_buffers_rs_thread(), + 100.0*(float)dcqs.processed_buffers_rs_thread()/ + (float)tot_processed_buffers); + gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.", + dcqs.processed_buffers_mut(), + 100.0*(float)dcqs.processed_buffers_mut()/ + (float)tot_processed_buffers); + gclog_or_tty->print_cr(" Did %d concurrent refinement traversals.", + _conc_refine_traversals); + if (!G1RSBarrierUseQueue) { + gclog_or_tty->print_cr(" Scanned %8.2f cards/traversal.", + _conc_refine_traversals > 0 ? + (float)_conc_refine_cards/(float)_conc_refine_traversals : + 0); + } + gclog_or_tty->print_cr(""); + if (G1UseHRIntoRS) { + HRRSStatsIter blk; + g1->heap_region_iterate(&blk); + gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K." + " Max = " SIZE_FORMAT "K.", + blk.total_mem_sz()/K, blk.max_mem_sz()/K); + gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K," + " free_lists = " SIZE_FORMAT "K.", + HeapRegionRemSet::static_mem_size()/K, + HeapRegionRemSet::fl_mem_size()/K); + gclog_or_tty->print_cr(" %d occupied cards represented.", + blk.occupied()); + gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )" + ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.", + blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(), + (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K, + (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K); + gclog_or_tty->print_cr(" Did %d coarsenings.", + HeapRegionRemSet::n_coarsenings()); + + } +} +void HRInto_G1RemSet::prepare_for_verify() { + if (G1HRRSFlushLogBuffersOnVerify && + (VerifyBeforeGC || VerifyAfterGC) + && !_g1->full_collection()) { + cleanupHRRS(); + _g1->set_refine_cte_cl_concurrency(false); + if (SafepointSynchronize::is_at_safepoint()) { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + dcqs.concatenate_logs(); + } + bool cg1r_use_cache = _cg1r->use_cache(); + _cg1r->set_use_cache(false); + updateRS(0); + _cg1r->set_use_cache(cg1r_use_cache); + + assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp 2009-08-01 04:21:09.839052106 +0100 @@ -0,0 +1,241 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// A G1RemSet provides ways of iterating over pointers into a selected +// collection set. + +class G1CollectedHeap; +class CardTableModRefBarrierSet; +class HRInto_G1RemSet; +class ConcurrentG1Refine; + +class G1RemSet: public CHeapObj { +protected: + G1CollectedHeap* _g1; + + unsigned _conc_refine_traversals; + unsigned _conc_refine_cards; + + size_t n_workers(); + +public: + G1RemSet(G1CollectedHeap* g1) : + _g1(g1), _conc_refine_traversals(0), _conc_refine_cards(0) + {} + + // Invoke "blk->do_oop" on all pointers into the CS in object in regions + // outside the CS (having invoked "blk->set_region" to set the "from" + // region correctly beforehand.) The "worker_i" param is for the + // parallel case where the number of the worker thread calling this + // function can be helpful in partitioning the work to be done. It + // should be the same as the "i" passed to the calling thread's + // work(i) function. In the sequential case this param will be ingored. + virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, + int worker_i) = 0; + + // Prepare for and cleanup after an oops_into_collection_set_do + // call. Must call each of these once before and after (in sequential + // code) any threads call oops into collection set do. (This offers an + // opportunity to sequential setup and teardown of structures needed by a + // parallel iteration over the CS's RS.) + virtual void prepare_for_oops_into_collection_set_do() = 0; + virtual void cleanup_after_oops_into_collection_set_do() = 0; + + // If "this" is of the given subtype, return "this", else "NULL". + virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; } + + // Record, if necessary, the fact that *p (where "p" is in region "from") + // has changed to its new value. + virtual void write_ref(HeapRegion* from, oop* p) = 0; + virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0; + + // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region + // or card, respectively, such that a region or card with a corresponding + // 0 bit contains no part of any live object. Eliminates any remembered + // set entries that correspond to dead heap ranges. + virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0; + // Like the above, but assumes is called in parallel: "worker_num" is the + // parallel thread id of the current thread, and "claim_val" is the + // value that should be used to claim heap regions. + virtual void scrub_par(BitMap* region_bm, BitMap* card_bm, + int worker_num, int claim_val) = 0; + + // Do any "refinement" activity that might be appropriate to the given + // G1RemSet. If "refinement" has iterateive "passes", do one pass. + // If "t" is non-NULL, it is the thread performing the refinement. + // Default implementation does nothing. + virtual void concurrentRefinementPass(ConcurrentG1Refine* cg1r) {} + + // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, + // join and leave around parts that must be atomic wrt GC. (NULL means + // being done at a safepoint.) + virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {} + + unsigned conc_refine_cards() { return _conc_refine_cards; } + + // Print any relevant summary info. + virtual void print_summary_info() {} + + // Prepare remebered set for verification. + virtual void prepare_for_verify() {}; +}; + + +// The simplest possible G1RemSet: iterates over all objects in non-CS +// regions, searching for pointers into the CS. +class StupidG1RemSet: public G1RemSet { +public: + StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {} + + void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, + int worker_i); + + void prepare_for_oops_into_collection_set_do() {} + void cleanup_after_oops_into_collection_set_do() {} + + // Nothing is necessary in the version below. + void write_ref(HeapRegion* from, oop* p) {} + void par_write_ref(HeapRegion* from, oop* p, int tid) {} + + void scrub(BitMap* region_bm, BitMap* card_bm) {} + void scrub_par(BitMap* region_bm, BitMap* card_bm, + int worker_num, int claim_val) {} + +}; + +// A G1RemSet in which each heap region has a rem set that records the +// external heap references into it. Uses a mod ref bs to track updates, +// so that they can be used to update the individual region remsets. + +class HRInto_G1RemSet: public G1RemSet { +protected: + enum SomePrivateConstants { + UpdateRStoMergeSync = 0, + MergeRStoDoDirtySync = 1, + DoDirtySync = 2, + LastSync = 3, + + SeqTask = 0, + NumSeqTasks = 1 + }; + + CardTableModRefBS* _ct_bs; + SubTasksDone* _seq_task; + G1CollectorPolicy* _g1p; + + ConcurrentG1Refine* _cg1r; + + size_t* _cards_scanned; + size_t _total_cards_scanned; + + // _par_traversal_in_progress is "true" iff a parallel traversal is in + // progress. If so, then cards added to remembered sets should also have + // their references into the collection summarized in "_new_refs". + bool _par_traversal_in_progress; + void set_par_traversal(bool b); + GrowableArray** _new_refs; + void new_refs_iterate(OopClosure* cl); + +public: + // This is called to reset dual hash tables after the gc pause + // is finished and the initial hash table is no longer being + // scanned. + void cleanupHRRS(); + + HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); + ~HRInto_G1RemSet(); + + void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, + int worker_i); + + void prepare_for_oops_into_collection_set_do(); + void cleanup_after_oops_into_collection_set_do(); + void scanRS(OopsInHeapRegionClosure* oc, int worker_i); + void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i); + void updateRS(int worker_i); + HeapRegion* calculateStartRegion(int i); + + HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; } + + CardTableModRefBS* ct_bs() { return _ct_bs; } + size_t cardsScanned() { return _total_cards_scanned; } + + // Record, if necessary, the fact that *p (where "p" is in region "from", + // which is required to be non-NULL) has changed to a new non-NULL value. + inline void write_ref(HeapRegion* from, oop* p); + // The "_nv" version is the same; it exists just so that it is not virtual. + inline void write_ref_nv(HeapRegion* from, oop* p); + + inline bool self_forwarded(oop obj); + inline void par_write_ref(HeapRegion* from, oop* p, int tid); + + void scrub(BitMap* region_bm, BitMap* card_bm); + void scrub_par(BitMap* region_bm, BitMap* card_bm, + int worker_num, int claim_val); + + virtual void concurrentRefinementPass(ConcurrentG1Refine* t); + virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i); + + virtual void print_summary_info(); + virtual void prepare_for_verify(); +}; + +#define G1_REM_SET_LOGGING 0 + +class CountNonCleanMemRegionClosure: public MemRegionClosure { + G1CollectedHeap* _g1; + int _n; + HeapWord* _start_first; +public: + CountNonCleanMemRegionClosure(G1CollectedHeap* g1) : + _g1(g1), _n(0), _start_first(NULL) + {} + void do_MemRegion(MemRegion mr); + int n() { return _n; }; + HeapWord* start_first() { return _start_first; } +}; + +class UpdateRSOopClosure: public OopClosure { + HeapRegion* _from; + HRInto_G1RemSet* _rs; + int _worker_i; +public: + UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) : + _from(NULL), _rs(rs), _worker_i(worker_i) { + guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); + } + + void set_from(HeapRegion* from) { + assert(from != NULL, "from region must be non-NULL"); + _from = from; + } + + virtual void do_oop(narrowOop* p); + virtual void do_oop(oop* p); + + // Override: this closure is idempotent. + // bool idempotent() { return true; } + bool apply_to_weak_ref_discovered_field() { return true; } +}; + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp 2009-08-01 04:21:10.263145111 +0100 @@ -0,0 +1,99 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +inline size_t G1RemSet::n_workers() { + if (_g1->workers() != NULL) { + return _g1->workers()->total_workers(); + } else { + return 1; + } +} + +inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, oop* p) { + par_write_ref(from, p, 0); +} + +inline void HRInto_G1RemSet::write_ref(HeapRegion* from, oop* p) { + write_ref_nv(from, p); +} + +inline bool HRInto_G1RemSet::self_forwarded(oop obj) { + bool result = (obj->is_forwarded() && (obj->forwardee()== obj)); + return result; +} + +inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) { + oop obj = *p; +#ifdef ASSERT + // can't do because of races + // assert(obj == NULL || obj->is_oop(), "expected an oop"); + + // Do the safe subset of is_oop + if (obj != NULL) { +#ifdef CHECK_UNHANDLED_OOPS + oopDesc* o = obj.obj(); +#else + oopDesc* o = obj; +#endif // CHECK_UNHANDLED_OOPS + assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); + assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); + } +#endif // ASSERT + assert(from == NULL || from->is_in_reserved(p), + "p is not in from"); + HeapRegion* to = _g1->heap_region_containing(obj); + // The test below could be optimized by applying a bit op to to and from. + if (to != NULL && from != NULL && from != to) { + // There is a tricky infinite loop if we keep pushing + // self forwarding pointers onto our _new_refs list. + // The _par_traversal_in_progress flag is true during the collection pause, + // false during the evacuation failure handing. + if (_par_traversal_in_progress && + to->in_collection_set() && !self_forwarded(obj)) { + _new_refs[tid]->push(p); + // Deferred updates to the Cset are either discarded (in the normal case), + // or processed (if an evacuation failure occurs) at the end + // of the collection. + // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do(). + } else { +#if G1_REM_SET_LOGGING + gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS" + " for region [" PTR_FORMAT ", " PTR_FORMAT ")", + p, obj, + to->bottom(), to->end()); +#endif + assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); + to->rem_set()->add_reference(p, tid); + } + } +} + +inline void UpdateRSOopClosure::do_oop(narrowOop* p) { + guarantee(false, "NYI"); +} + +inline void UpdateRSOopClosure::do_oop(oop* p) { + assert(_from != NULL, "from region must be non-NULL"); + _rs->par_write_ref(_from, p, _worker_i); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp 2009-08-01 04:21:10.663553502 +0100 @@ -0,0 +1,150 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1SATBCardTableModRefBS.cpp.incl" + +G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap, + int max_covered_regions) : + CardTableModRefBSForCTRS(whole_heap, max_covered_regions) +{ + _kind = G1SATBCT; +} + + +void G1SATBCardTableModRefBS::enqueue(oop pre_val) { + if (!JavaThread::satb_mark_queue_set().active()) return; + Thread* thr = Thread::current(); + if (thr->is_Java_thread()) { + JavaThread* jt = (JavaThread*)thr; + jt->satb_mark_queue().enqueue(pre_val); + } else { + MutexLocker x(Shared_SATB_Q_lock); + JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); + } +} + +// When we know the current java thread: +void +G1SATBCardTableModRefBS::write_ref_field_pre_static(void* field, + oop newVal, + JavaThread* jt) { + if (!JavaThread::satb_mark_queue_set().active()) return; + assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop"); + oop preVal = *(oop*)field; + if (preVal != NULL) { + jt->satb_mark_queue().enqueue(preVal); + } +} + +void +G1SATBCardTableModRefBS::write_ref_array_pre(MemRegion mr) { + if (!JavaThread::satb_mark_queue_set().active()) return; + assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop"); + oop* elem_ptr = (oop*)mr.start(); + while ((HeapWord*)elem_ptr < mr.end()) { + oop elem = *elem_ptr; + if (elem != NULL) enqueue(elem); + elem_ptr++; + } +} + + + +G1SATBCardTableLoggingModRefBS:: +G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, + int max_covered_regions) : + G1SATBCardTableModRefBS(whole_heap, max_covered_regions), + _dcqs(JavaThread::dirty_card_queue_set()) +{ + _kind = G1SATBCTLogging; +} + +void +G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field, + oop new_val) { + jbyte* byte = byte_for(field); + if (*byte != dirty_card) { + *byte = dirty_card; + Thread* thr = Thread::current(); + if (thr->is_Java_thread()) { + JavaThread* jt = (JavaThread*)thr; + jt->dirty_card_queue().enqueue(byte); + } else { + MutexLockerEx x(Shared_DirtyCardQ_lock, + Mutex::_no_safepoint_check_flag); + _dcqs.shared_dirty_card_queue()->enqueue(byte); + } + } +} + +void +G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field, + oop new_val) { + uintptr_t field_uint = (uintptr_t)field; + uintptr_t new_val_uint = (uintptr_t)new_val; + uintptr_t comb = field_uint ^ new_val_uint; + comb = comb >> HeapRegion::LogOfHRGrainBytes; + if (comb == 0) return; + if (new_val == NULL) return; + // Otherwise, log it. + G1SATBCardTableLoggingModRefBS* g1_bs = + (G1SATBCardTableLoggingModRefBS*)Universe::heap()->barrier_set(); + g1_bs->write_ref_field_work(field, new_val); +} + +void +G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { + jbyte* byte = byte_for(mr.start()); + jbyte* last_byte = byte_for(mr.last()); + Thread* thr = Thread::current(); + if (whole_heap) { + while (byte <= last_byte) { + *byte = dirty_card; + byte++; + } + } else { + // Enqueue if necessary. + if (thr->is_Java_thread()) { + JavaThread* jt = (JavaThread*)thr; + while (byte <= last_byte) { + if (*byte != dirty_card) { + *byte = dirty_card; + jt->dirty_card_queue().enqueue(byte); + } + byte++; + } + } else { + MutexLockerEx x(Shared_DirtyCardQ_lock, + Mutex::_no_safepoint_check_flag); + while (byte <= last_byte) { + if (*byte != dirty_card) { + *byte = dirty_card; + _dcqs.shared_dirty_card_queue()->enqueue(byte); + } + byte++; + } + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp 2009-08-01 04:21:11.081241181 +0100 @@ -0,0 +1,107 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#ifndef SERIALGC + +class DirtyCardQueueSet; + +// This barrier is specialized to use a logging barrier to support +// snapshot-at-the-beginning marking. + +class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS { +private: + // Add "pre_val" to a set of objects that may have been disconnected from the + // pre-marking object graph. + static void enqueue(oop pre_val); + +public: + G1SATBCardTableModRefBS(MemRegion whole_heap, + int max_covered_regions); + + bool is_a(BarrierSet::Name bsn) { + return bsn == BarrierSet::G1SATBCT || CardTableModRefBS::is_a(bsn); + } + + virtual bool has_write_ref_pre_barrier() { return true; } + + // This notes that we don't need to access any BarrierSet data + // structures, so this can be called from a static context. + static void write_ref_field_pre_static(void* field, oop newVal) { + assert(!UseCompressedOops, "Else needs to be templatized"); + oop preVal = *((oop*)field); + if (preVal != NULL) { + enqueue(preVal); + } + } + + // When we know the current java thread: + static void write_ref_field_pre_static(void* field, oop newVal, + JavaThread* jt); + + // We export this to make it available in cases where the static + // type of the barrier set is known. Note that it is non-virtual. + inline void inline_write_ref_field_pre(void* field, oop newVal) { + write_ref_field_pre_static(field, newVal); + } + + // This is the more general virtual version. + void write_ref_field_pre_work(void* field, oop new_val) { + inline_write_ref_field_pre(field, new_val); + } + + virtual void write_ref_array_pre(MemRegion mr); + +}; + +// Adds card-table logging to the post-barrier. +// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet. +class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS { + private: + DirtyCardQueueSet& _dcqs; + public: + G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, + int max_covered_regions); + + bool is_a(BarrierSet::Name bsn) { + return bsn == BarrierSet::G1SATBCTLogging || + G1SATBCardTableModRefBS::is_a(bsn); + } + + void write_ref_field_work(void* field, oop new_val); + + // Can be called from static contexts. + static void write_ref_field_static(void* field, oop new_val); + + // NB: if you do a whole-heap invalidation, the "usual invariant" defined + // above no longer applies. + void invalidate(MemRegion mr, bool whole_heap = false); + + void write_region_work(MemRegion mr) { invalidate(mr); } + void write_ref_array_work(MemRegion mr) { invalidate(mr); } + + +}; + + +#endif // SERIALGC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1_globals.cpp 2009-08-01 04:21:11.497869395 +0100 @@ -0,0 +1,32 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_g1_globals.cpp.incl" + +G1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \ + MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \ + MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, \ + MATERIALIZE_NOTPRODUCT_FLAG, \ + MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp 2009-08-01 04:21:11.884451543 +0100 @@ -0,0 +1,258 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// +// Defines all globals flags used by the garbage-first compiler. +// + +#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \ + \ + product(intx, G1ParallelGCAllocBufferSize, 8*K, \ + "Size of parallel G1 allocation buffers in to-space.") \ + \ + product(intx, G1ConfidencePercent, 50, \ + "Confidence level for MMU/pause predictions") \ + \ + develop(intx, G1MarkingOverheadPercent, 0, \ + "Overhead of concurrent marking") \ + \ + develop(bool, G1AccountConcurrentOverhead, false, \ + "Whether soft real-time compliance in G1 will take into account" \ + "concurrent overhead") \ + \ + product(intx, G1YoungGenSize, 0, \ + "Size of the G1 young generation, 0 is the adaptive policy") \ + \ + develop(bool, G1Gen, true, \ + "If true, it will enable the generational G1") \ + \ + develop(intx, G1GCPercent, 10, \ + "The desired percent time spent on GC") \ + \ + develop(intx, G1PolicyVerbose, 0, \ + "The verbosity level on G1 policy decisions") \ + \ + develop(bool, G1UseHRIntoRS, true, \ + "Determines whether the 'advanced' HR Into rem set is used.") \ + \ + develop(intx, G1MarkingVerboseLevel, 0, \ + "Level (0-4) of verboseness of the marking code") \ + \ + develop(bool, G1VerifyConcMarkPrintReachable, false, \ + "If conc mark verification fails, print reachable objects") \ + \ + develop(bool, G1TraceMarkStackOverflow, false, \ + "If true, extra debugging code for CM restart for ovflw.") \ + \ + develop(intx, G1PausesBtwnConcMark, -1, \ + "If positive, fixed number of pauses between conc markings") \ + \ + diagnostic(bool, G1SummarizeConcurrentMark, false, \ + "Summarize concurrent mark info") \ + \ + diagnostic(bool, G1SummarizeRSetStats, false, \ + "Summarize remembered set processing info") \ + \ + diagnostic(bool, G1SummarizeZFStats, false, \ + "Summarize zero-filling info") \ + \ + develop(bool, G1TraceConcurrentRefinement, false, \ + "Trace G1 concurrent refinement") \ + \ + develop(bool, G1ConcMark, true, \ + "If true, run concurrent marking for G1") \ + \ + product(intx, G1MarkStackSize, 2 * 1024 * 1024, \ + "Size of the mark stack for concurrent marking.") \ + \ + product(intx, G1MarkRegionStackSize, 1024 * 1024, \ + "Size of the region stack for concurrent marking.") \ + \ + develop(bool, G1ConcRefine, true, \ + "If true, run concurrent rem set refinement for G1") \ + \ + develop(intx, G1ConcRefineTargTraversals, 4, \ + "Number of concurrent refinement we try to achieve") \ + \ + develop(intx, G1ConcRefineInitialDelta, 4, \ + "Number of heap regions of alloc ahead of starting collection " \ + "pause to start concurrent refinement (initially)") \ + \ + develop(bool, G1SmoothConcRefine, true, \ + "Attempts to smooth out the overhead of concurrent refinement") \ + \ + develop(bool, G1ConcZeroFill, true, \ + "If true, run concurrent zero-filling thread") \ + \ + develop(intx, G1ConcZFMaxRegions, 1, \ + "Stop zero-filling when # of zf'd regions reaches") \ + \ + product(intx, G1SteadyStateUsed, 90, \ + "If non-0, try to maintain 'used' at this pct (of max)") \ + \ + product(intx, G1SteadyStateUsedDelta, 30, \ + "If G1SteadyStateUsed is non-0, then do pause this number of " \ + "of percentage points earlier if no marking is in progress.") \ + \ + develop(bool, G1SATBBarrierPrintNullPreVals, false, \ + "If true, count frac of ptr writes with null pre-vals.") \ + \ + product(intx, G1SATBLogBufferSize, 1*K, \ + "Number of entries in an SATB log buffer.") \ + \ + product(intx, G1SATBProcessCompletedThreshold, 20, \ + "Number of completed buffers that triggers log processing.") \ + \ + develop(intx, G1ExtraRegionSurvRate, 33, \ + "If the young survival rate is S, and there's room left in " \ + "to-space, we will allow regions whose survival rate is up to " \ + "S + (1 - S)*X, where X is this parameter (as a fraction.)") \ + \ + develop(intx, G1InitYoungSurvRatio, 50, \ + "Expected Survival Rate for newly allocated bytes") \ + \ + develop(bool, G1SATBPrintStubs, false, \ + "If true, print generated stubs for the SATB barrier") \ + \ + product(intx, G1ExpandByPercentOfAvailable, 20, \ + "When expanding, % of uncommitted space to claim.") \ + \ + develop(bool, G1RSBarrierRegionFilter, true, \ + "If true, generate region filtering code in RS barrier") \ + \ + develop(bool, G1RSBarrierNullFilter, true, \ + "If true, generate null-pointer filtering code in RS barrier") \ + \ + develop(bool, G1PrintCTFilterStats, false, \ + "If true, print stats on RS filtering effectiveness") \ + \ + develop(bool, G1RSBarrierUseQueue, true, \ + "If true, use queueing RS barrier") \ + \ + develop(bool, G1DeferredRSUpdate, true, \ + "If true, use deferred RS updates") \ + \ + develop(bool, G1RSLogCheckCardTable, false, \ + "If true, verify that no dirty cards remain after RS log " \ + "processing.") \ + \ + develop(bool, G1RSCountHisto, false, \ + "If true, print a histogram of RS occupancies after each pause") \ + \ + develop(intx, G1PrintRegionLivenessInfo, 0, \ + "When > 0, print the occupancies of the best and worst" \ + "regions.") \ + \ + develop(bool, G1PrintParCleanupStats, false, \ + "When true, print extra stats about parallel cleanup.") \ + \ + develop(bool, G1DisablePreBarrier, false, \ + "Disable generation of pre-barrier (i.e., marking barrier) ") \ + \ + develop(bool, G1DisablePostBarrier, false, \ + "Disable generation of post-barrier (i.e., RS barrier) ") \ + \ + product(intx, G1DirtyCardQueueMax, 30, \ + "Maximum number of completed RS buffers before mutator threads " \ + "start processing them.") \ + \ + develop(intx, G1ConcRSLogCacheSize, 10, \ + "Log base 2 of the length of conc RS hot-card cache.") \ + \ + develop(bool, G1ConcRSCountTraversals, false, \ + "If true, gather data about the number of times CR traverses " \ + "cards ") \ + \ + develop(intx, G1ConcRSHotCardLimit, 4, \ + "The threshold that defines (>=) a hot card.") \ + \ + develop(bool, G1PrintOopAppls, false, \ + "When true, print applications of closures to external locs.") \ + \ + develop(intx, G1LogRSRegionEntries, 7, \ + "Log_2 of max number of regions for which we keep bitmaps.") \ + \ + develop(bool, G1RecordHRRSOops, false, \ + "When true, record recent calls to rem set operations.") \ + \ + develop(bool, G1RecordHRRSEvents, false, \ + "When true, record recent calls to rem set operations.") \ + \ + develop(intx, G1MaxVerifyFailures, -1, \ + "The maximum number of verification failrues to print. " \ + "-1 means print all.") \ + \ + develop(bool, G1ScrubRemSets, true, \ + "When true, do RS scrubbing after cleanup.") \ + \ + develop(bool, G1RSScrubVerbose, false, \ + "When true, do RS scrubbing with verbose output.") \ + \ + develop(bool, G1YoungSurvRateVerbose, false, \ + "print out the survival rate of young regions according to age.") \ + \ + develop(intx, G1YoungSurvRateNumRegionsSummary, 0, \ + "the number of regions for which we'll print a surv rate " \ + "summary.") \ + \ + product(bool, G1UseScanOnlyPrefix, false, \ + "It determines whether the system will calculate an optimum " \ + "scan-only set.") \ + \ + product(intx, G1MinReservePercent, 10, \ + "It determines the minimum reserve we should have in the heap " \ + "to minimize the probability of promotion failure.") \ + \ + diagnostic(bool, G1PrintRegions, false, \ + "If set G1 will print information on which regions are being " \ + "allocated and which are reclaimed.") \ + \ + develop(bool, G1HRRSUseSparseTable, true, \ + "When true, use sparse table to save space.") \ + \ + develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \ + "Forces flushing of log buffers before verification.") \ + \ + product(bool, G1UseSurvivorSpaces, true, \ + "When true, use survivor space.") \ + \ + develop(bool, G1FixedTenuringThreshold, false, \ + "When set, G1 will not adjust the tenuring threshold") \ + \ + develop(bool, G1FixedEdenSize, false, \ + "When set, G1 will not allocate unused survivor space regions") \ + \ + develop(uintx, G1FixedSurvivorSpaceSize, 0, \ + "If non-0 is the size of the G1 survivor space, " \ + "otherwise SurvivorRatio is used to determine the size") \ + \ + experimental(bool, G1ParallelRSetUpdatingEnabled, false, \ + "Enables the parallelization of remembered set updating " \ + "during evacuation pauses") \ + \ + experimental(bool, G1ParallelRSetScanningEnabled, false, \ + "Enables the parallelization of remembered set scanning " \ + "during evacuation pauses") + +G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp 2009-08-01 04:21:12.304782708 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// The following OopClosure types get specialized versions of +// "oop_oop_iterate" that invoke the closures' do_oop methods +// non-virtually, using a mechanism defined in this file. Extend these +// macros in the obvious way to add specializations for new closures. + +// Forward declarations. +enum G1Barrier { + G1BarrierNone, G1BarrierRS, G1BarrierEvac +}; + +template +class G1ParCopyClosure; +class G1ParScanClosure; + +typedef G1ParCopyClosure + G1ParScanHeapEvacClosure; + +class FilterIntoCSClosure; +class FilterOutOfRegionClosure; +class FilterInHeapRegionAndIntoCSClosure; +class FilterAndMarkInHeapRegionAndIntoCSClosure; +class G1ScanAndBalanceClosure; + +#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES +#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined." +#endif + +#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ + f(G1ParScanHeapEvacClosure,_nv) \ + f(G1ParScanClosure,_nv) \ + f(FilterIntoCSClosure,_nv) \ + f(FilterOutOfRegionClosure,_nv) \ + f(FilterInHeapRegionAndIntoCSClosure,_nv) \ + f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv) \ + f(G1ScanAndBalanceClosure,_nv) + +#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES +#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined." +#endif + +#define FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp 2009-08-01 04:21:12.772460356 +0100 @@ -0,0 +1,851 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_heapRegion.cpp.incl" + +HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, + HeapRegion* hr, OopClosure* cl, + CardTableModRefBS::PrecisionStyle precision, + FilterKind fk) : + ContiguousSpaceDCTOC(hr, cl, precision, NULL), + _hr(hr), _fk(fk), _g1(g1) +{} + +FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, + OopClosure* oc) : + _r_bottom(r->bottom()), _r_end(r->end()), + _oc(oc), _out_of_region(0) +{} + +class VerifyLiveClosure: public OopClosure { + G1CollectedHeap* _g1h; + CardTableModRefBS* _bs; + oop _containing_obj; + bool _failures; + int _n_failures; +public: + VerifyLiveClosure(G1CollectedHeap* g1h) : + _g1h(g1h), _bs(NULL), _containing_obj(NULL), + _failures(false), _n_failures(0) + { + BarrierSet* bs = _g1h->barrier_set(); + if (bs->is_a(BarrierSet::CardTableModRef)) + _bs = (CardTableModRefBS*)bs; + } + + void set_containing_obj(oop obj) { + _containing_obj = obj; + } + + bool failures() { return _failures; } + int n_failures() { return _n_failures; } + + virtual void do_oop(narrowOop* p) { + guarantee(false, "NYI"); + } + + void do_oop(oop* p) { + assert(_containing_obj != NULL, "Precondition"); + assert(!_g1h->is_obj_dead(_containing_obj), "Precondition"); + oop obj = *p; + if (obj != NULL) { + bool failed = false; + if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead(obj)) { + if (!_failures) { + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr("----------"); + } + if (!_g1h->is_in_closed_subset(obj)) { + gclog_or_tty->print_cr("Field "PTR_FORMAT + " of live obj "PTR_FORMAT + " points to obj "PTR_FORMAT + " not in the heap.", + p, (void*) _containing_obj, (void*) obj); + } else { + gclog_or_tty->print_cr("Field "PTR_FORMAT + " of live obj "PTR_FORMAT + " points to dead obj "PTR_FORMAT".", + p, (void*) _containing_obj, (void*) obj); + } + gclog_or_tty->print_cr("Live obj:"); + _containing_obj->print_on(gclog_or_tty); + gclog_or_tty->print_cr("Bad referent:"); + obj->print_on(gclog_or_tty); + gclog_or_tty->print_cr("----------"); + _failures = true; + failed = true; + _n_failures++; + } + + if (!_g1h->full_collection()) { + HeapRegion* from = _g1h->heap_region_containing(p); + HeapRegion* to = _g1h->heap_region_containing(*p); + if (from != NULL && to != NULL && + from != to && + !to->isHumongous()) { + jbyte cv_obj = *_bs->byte_for_const(_containing_obj); + jbyte cv_field = *_bs->byte_for_const(p); + const jbyte dirty = CardTableModRefBS::dirty_card_val(); + + bool is_bad = !(from->is_young() + || to->rem_set()->contains_reference(p) + || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed + (_containing_obj->is_objArray() ? + cv_field == dirty + : cv_obj == dirty || cv_field == dirty)); + if (is_bad) { + if (!_failures) { + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr("----------"); + } + gclog_or_tty->print_cr("Missing rem set entry:"); + gclog_or_tty->print_cr("Field "PTR_FORMAT + " of obj "PTR_FORMAT + ", in region %d ["PTR_FORMAT + ", "PTR_FORMAT"),", + p, (void*) _containing_obj, + from->hrs_index(), + from->bottom(), + from->end()); + _containing_obj->print_on(gclog_or_tty); + gclog_or_tty->print_cr("points to obj "PTR_FORMAT + " in region %d ["PTR_FORMAT + ", "PTR_FORMAT").", + (void*) obj, to->hrs_index(), + to->bottom(), to->end()); + obj->print_on(gclog_or_tty); + gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", + cv_obj, cv_field); + gclog_or_tty->print_cr("----------"); + _failures = true; + if (!failed) _n_failures++; + } + } + } + } + } +}; + +template +HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, + HeapRegion* hr, + HeapWord* cur, HeapWord* top) { + oop cur_oop = oop(cur); + int oop_size = cur_oop->size(); + HeapWord* next_obj = cur + oop_size; + while (next_obj < top) { + // Keep filtering the remembered set. + if (!g1h->is_obj_dead(cur_oop, hr)) { + // Bottom lies entirely below top, so we can call the + // non-memRegion version of oop_iterate below. + cur_oop->oop_iterate(cl); + } + cur = next_obj; + cur_oop = oop(cur); + oop_size = cur_oop->size(); + next_obj = cur + oop_size; + } + return cur; +} + +void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, + HeapWord* bottom, + HeapWord* top, + OopClosure* cl) { + G1CollectedHeap* g1h = _g1; + + int oop_size; + + OopClosure* cl2 = cl; + FilterIntoCSClosure intoCSFilt(this, g1h, cl); + FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); + switch (_fk) { + case IntoCSFilterKind: cl2 = &intoCSFilt; break; + case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; + } + + // Start filtering what we add to the remembered set. If the object is + // not considered dead, either because it is marked (in the mark bitmap) + // or it was allocated after marking finished, then we add it. Otherwise + // we can safely ignore the object. + if (!g1h->is_obj_dead(oop(bottom), _hr)) { + oop_size = oop(bottom)->oop_iterate(cl2, mr); + } else { + oop_size = oop(bottom)->size(); + } + + bottom += oop_size; + + if (bottom < top) { + // We replicate the loop below for several kinds of possible filters. + switch (_fk) { + case NoFilterKind: + bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); + break; + case IntoCSFilterKind: { + FilterIntoCSClosure filt(this, g1h, cl); + bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); + break; + } + case OutOfRegionFilterKind: { + FilterOutOfRegionClosure filt(_hr, cl); + bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); + break; + } + default: + ShouldNotReachHere(); + } + + // Last object. Need to do dead-obj filtering here too. + if (!g1h->is_obj_dead(oop(bottom), _hr)) { + oop(bottom)->oop_iterate(cl2, mr); + } + } +} + +void HeapRegion::reset_after_compaction() { + G1OffsetTableContigSpace::reset_after_compaction(); + // After a compaction the mark bitmap is invalid, so we must + // treat all objects as being inside the unmarked area. + zero_marked_bytes(); + init_top_at_mark_start(); +} + +DirtyCardToOopClosure* +HeapRegion::new_dcto_closure(OopClosure* cl, + CardTableModRefBS::PrecisionStyle precision, + HeapRegionDCTOC::FilterKind fk) { + return new HeapRegionDCTOC(G1CollectedHeap::heap(), + this, cl, precision, fk); +} + +void HeapRegion::hr_clear(bool par, bool clear_space) { + _humongous_type = NotHumongous; + _humongous_start_region = NULL; + _in_collection_set = false; + _is_gc_alloc_region = false; + + // Age stuff (if parallel, this will be done separately, since it needs + // to be sequential). + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + set_young_index_in_cset(-1); + uninstall_surv_rate_group(); + set_young_type(NotYoung); + + // In case it had been the start of a humongous sequence, reset its end. + set_end(_orig_end); + + if (!par) { + // If this is parallel, this will be done later. + HeapRegionRemSet* hrrs = rem_set(); + if (hrrs != NULL) hrrs->clear(); + _claimed = InitialClaimValue; + } + zero_marked_bytes(); + set_sort_index(-1); + + _offsets.resize(HeapRegion::GrainWords); + init_top_at_mark_start(); + if (clear_space) clear(SpaceDecorator::Mangle); +} + +// +void HeapRegion::calc_gc_efficiency() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + _gc_efficiency = (double) garbage_bytes() / + g1h->predict_region_elapsed_time_ms(this, false); +} +// + +void HeapRegion::set_startsHumongous() { + _humongous_type = StartsHumongous; + _humongous_start_region = this; + assert(end() == _orig_end, "Should be normal before alloc."); +} + +bool HeapRegion::claimHeapRegion(jint claimValue) { + jint current = _claimed; + if (current != claimValue) { + jint res = Atomic::cmpxchg(claimValue, &_claimed, current); + if (res == current) { + return true; + } + } + return false; +} + +HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { + HeapWord* low = addr; + HeapWord* high = end(); + while (low < high) { + size_t diff = pointer_delta(high, low); + // Must add one below to bias toward the high amount. Otherwise, if + // "high" were at the desired value, and "low" were one less, we + // would not converge on "high". This is not symmetric, because + // we set "high" to a block start, which might be the right one, + // which we don't do for "low". + HeapWord* middle = low + (diff+1)/2; + if (middle == high) return high; + HeapWord* mid_bs = block_start_careful(middle); + if (mid_bs < addr) { + low = middle; + } else { + high = mid_bs; + } + } + assert(low == high && low >= addr, "Didn't work."); + return low; +} + +void HeapRegion::set_next_on_unclean_list(HeapRegion* r) { + assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list."); + _next_in_special_set = r; +} + +void HeapRegion::set_on_unclean_list(bool b) { + _is_on_unclean_list = b; +} + +void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { + G1OffsetTableContigSpace::initialize(mr, false, mangle_space); + hr_clear(false/*par*/, clear_space); +} +#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +#endif // _MSC_VER + + +HeapRegion:: +HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, + MemRegion mr, bool is_zeroed) + : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), + _next_fk(HeapRegionDCTOC::NoFilterKind), + _hrs_index(-1), + _humongous_type(NotHumongous), _humongous_start_region(NULL), + _in_collection_set(false), _is_gc_alloc_region(false), + _is_on_free_list(false), _is_on_unclean_list(false), + _next_in_special_set(NULL), _orig_end(NULL), + _claimed(InitialClaimValue), _evacuation_failed(false), + _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), + _young_type(NotYoung), _next_young_region(NULL), + _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), + _rem_set(NULL), _zfs(NotZeroFilled) +{ + _orig_end = mr.end(); + // Note that initialize() will set the start of the unmarked area of the + // region. + this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle); + set_top(bottom()); + set_saved_mark(); + + _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); + + assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); + // In case the region is allocated during a pause, note the top. + // We haven't done any counting on a brand new region. + _top_at_conc_mark_count = bottom(); +} + +class NextCompactionHeapRegionClosure: public HeapRegionClosure { + const HeapRegion* _target; + bool _target_seen; + HeapRegion* _last; + CompactibleSpace* _res; +public: + NextCompactionHeapRegionClosure(const HeapRegion* target) : + _target(target), _target_seen(false), _res(NULL) {} + bool doHeapRegion(HeapRegion* cur) { + if (_target_seen) { + if (!cur->isHumongous()) { + _res = cur; + return true; + } + } else if (cur == _target) { + _target_seen = true; + } + return false; + } + CompactibleSpace* result() { return _res; } +}; + +CompactibleSpace* HeapRegion::next_compaction_space() const { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + // cast away const-ness + HeapRegion* r = (HeapRegion*) this; + NextCompactionHeapRegionClosure blk(r); + g1h->heap_region_iterate_from(r, &blk); + return blk.result(); +} + +void HeapRegion::set_continuesHumongous(HeapRegion* start) { + // The order is important here. + start->add_continuingHumongousRegion(this); + _humongous_type = ContinuesHumongous; + _humongous_start_region = start; +} + +void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) { + // Must join the blocks of the current H region seq with the block of the + // added region. + offsets()->join_blocks(bottom(), cont->bottom()); + arrayOop obj = (arrayOop)(bottom()); + obj->set_length((int) (obj->length() + cont->capacity()/jintSize)); + set_end(cont->end()); + set_top(cont->end()); +} + +void HeapRegion::save_marks() { + set_saved_mark(); +} + +void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) { + HeapWord* p = mr.start(); + HeapWord* e = mr.end(); + oop obj; + while (p < e) { + obj = oop(p); + p += obj->oop_iterate(cl); + } + assert(p == e, "bad memregion: doesn't end on obj boundary"); +} + +#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ +void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ + ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ +} +SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) + + +void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { + oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); +} + +#ifdef DEBUG +HeapWord* HeapRegion::allocate(size_t size) { + jint state = zero_fill_state(); + assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() || + zero_fill_is_allocated(), + "When ZF is on, only alloc in ZF'd regions"); + return G1OffsetTableContigSpace::allocate(size); +} +#endif + +void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { + assert(top() == bottom() || zfs == Allocated, + "Region must be empty, or we must be setting it to allocated."); + assert(ZF_mon->owned_by_self() || + Universe::heap()->is_gc_active(), + "Must hold the lock or be a full GC to modify."); + _zfs = zfs; +} + +void HeapRegion::set_zero_fill_complete() { + set_zero_fill_state_work(ZeroFilled); + if (ZF_mon->owned_by_self()) { + ZF_mon->notify_all(); + } +} + + +void HeapRegion::ensure_zero_filled() { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + ensure_zero_filled_locked(); +} + +void HeapRegion::ensure_zero_filled_locked() { + assert(ZF_mon->owned_by_self(), "Precondition"); + bool should_ignore_zf = SafepointSynchronize::is_at_safepoint(); + assert(should_ignore_zf || Heap_lock->is_locked(), + "Either we're in a GC or we're allocating a region."); + switch (zero_fill_state()) { + case HeapRegion::NotZeroFilled: + set_zero_fill_in_progress(Thread::current()); + { + ZF_mon->unlock(); + Copy::fill_to_words(bottom(), capacity()/HeapWordSize); + ZF_mon->lock_without_safepoint_check(); + } + // A trap. + guarantee(zero_fill_state() == HeapRegion::ZeroFilling + && zero_filler() == Thread::current(), + "AHA! Tell Dave D if you see this..."); + set_zero_fill_complete(); + // gclog_or_tty->print_cr("Did sync ZF."); + ConcurrentZFThread::note_sync_zfs(); + break; + case HeapRegion::ZeroFilling: + if (should_ignore_zf) { + // We can "break" the lock and take over the work. + Copy::fill_to_words(bottom(), capacity()/HeapWordSize); + set_zero_fill_complete(); + ConcurrentZFThread::note_sync_zfs(); + break; + } else { + ConcurrentZFThread::wait_for_ZF_completed(this); + } + case HeapRegion::ZeroFilled: + // Nothing to do. + break; + case HeapRegion::Allocated: + guarantee(false, "Should not call on allocated regions."); + } + assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post"); +} + +HeapWord* +HeapRegion::object_iterate_mem_careful(MemRegion mr, + ObjectClosure* cl) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + // We used to use "block_start_careful" here. But we're actually happy + // to update the BOT while we do this... + HeapWord* cur = block_start(mr.start()); + mr = mr.intersection(used_region()); + if (mr.is_empty()) return NULL; + // Otherwise, find the obj that extends onto mr.start(). + + assert(cur <= mr.start() + && (oop(cur)->klass() == NULL || + cur + oop(cur)->size() > mr.start()), + "postcondition of block_start"); + oop obj; + while (cur < mr.end()) { + obj = oop(cur); + if (obj->klass() == NULL) { + // Ran into an unparseable point. + return cur; + } else if (!g1h->is_obj_dead(obj)) { + cl->do_object(obj); + } + if (cl->abort()) return cur; + // The check above must occur before the operation below, since an + // abort might invalidate the "size" operation. + cur += obj->size(); + } + return NULL; +} + +HeapWord* +HeapRegion:: +oops_on_card_seq_iterate_careful(MemRegion mr, + FilterOutOfRegionClosure* cl) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + // If we're within a stop-world GC, then we might look at a card in a + // GC alloc region that extends onto a GC LAB, which may not be + // parseable. Stop such at the "saved_mark" of the region. + if (G1CollectedHeap::heap()->is_gc_active()) { + mr = mr.intersection(used_region_at_save_marks()); + } else { + mr = mr.intersection(used_region()); + } + if (mr.is_empty()) return NULL; + // Otherwise, find the obj that extends onto mr.start(). + + // We used to use "block_start_careful" here. But we're actually happy + // to update the BOT while we do this... + HeapWord* cur = block_start(mr.start()); + assert(cur <= mr.start(), "Postcondition"); + + while (cur <= mr.start()) { + if (oop(cur)->klass() == NULL) { + // Ran into an unparseable point. + return cur; + } + // Otherwise... + int sz = oop(cur)->size(); + if (cur + sz > mr.start()) break; + // Otherwise, go on. + cur = cur + sz; + } + oop obj; + obj = oop(cur); + // If we finish this loop... + assert(cur <= mr.start() + && obj->klass() != NULL + && cur + obj->size() > mr.start(), + "Loop postcondition"); + if (!g1h->is_obj_dead(obj)) { + obj->oop_iterate(cl, mr); + } + + HeapWord* next; + while (cur < mr.end()) { + obj = oop(cur); + if (obj->klass() == NULL) { + // Ran into an unparseable point. + return cur; + }; + // Otherwise: + next = (cur + obj->size()); + if (!g1h->is_obj_dead(obj)) { + if (next < mr.end()) { + obj->oop_iterate(cl); + } else { + // this obj spans the boundary. If it's an array, stop at the + // boundary. + if (obj->is_objArray()) { + obj->oop_iterate(cl, mr); + } else { + obj->oop_iterate(cl); + } + } + } + cur = next; + } + return NULL; +} + +void HeapRegion::print() const { print_on(gclog_or_tty); } +void HeapRegion::print_on(outputStream* st) const { + if (isHumongous()) { + if (startsHumongous()) + st->print(" HS"); + else + st->print(" HC"); + } else { + st->print(" "); + } + if (in_collection_set()) + st->print(" CS"); + else if (is_gc_alloc_region()) + st->print(" A "); + else + st->print(" "); + if (is_young()) + st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y ")); + else + st->print(" "); + if (is_empty()) + st->print(" F"); + else + st->print(" "); + st->print(" %d", _gc_time_stamp); + G1OffsetTableContigSpace::print_on(st); +} + +#define OBJ_SAMPLE_INTERVAL 0 +#define BLOCK_SAMPLE_INTERVAL 100 + +// This really ought to be commoned up into OffsetTableContigSpace somehow. +// We would need a mechanism to make that code skip dead objects. + +void HeapRegion::verify(bool allow_dirty) const { + G1CollectedHeap* g1 = G1CollectedHeap::heap(); + HeapWord* p = bottom(); + HeapWord* prev_p = NULL; + int objs = 0; + int blocks = 0; + VerifyLiveClosure vl_cl(g1); + while (p < top()) { + size_t size = oop(p)->size(); + if (blocks == BLOCK_SAMPLE_INTERVAL) { + guarantee(p == block_start_const(p + (size/2)), + "check offset computation"); + blocks = 0; + } else { + blocks++; + } + if (objs == OBJ_SAMPLE_INTERVAL) { + oop obj = oop(p); + if (!g1->is_obj_dead(obj, this)) { + obj->verify(); + vl_cl.set_containing_obj(obj); + obj->oop_iterate(&vl_cl); + if (G1MaxVerifyFailures >= 0 + && vl_cl.n_failures() >= G1MaxVerifyFailures) break; + } + objs = 0; + } else { + objs++; + } + prev_p = p; + p += size; + } + HeapWord* rend = end(); + HeapWord* rtop = top(); + if (rtop < rend) { + guarantee(block_start_const(rtop + (rend - rtop) / 2) == rtop, + "check offset computation"); + } + if (vl_cl.failures()) { + gclog_or_tty->print_cr("Heap:"); + G1CollectedHeap::heap()->print(); + gclog_or_tty->print_cr(""); + } + if (VerifyDuringGC && + G1VerifyConcMarkPrintReachable && + vl_cl.failures()) { + g1->concurrent_mark()->print_prev_bitmap_reachable(); + } + guarantee(!vl_cl.failures(), "region verification failed"); + guarantee(p == top(), "end of last object must match end of space"); +} + +// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go +// away eventually. + +void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { + // false ==> we'll do the clearing if there's clearing to be done. + ContiguousSpace::initialize(mr, false, mangle_space); + _offsets.zero_bottom_entry(); + _offsets.initialize_threshold(); + if (clear_space) clear(mangle_space); +} + +void G1OffsetTableContigSpace::clear(bool mangle_space) { + ContiguousSpace::clear(mangle_space); + _offsets.zero_bottom_entry(); + _offsets.initialize_threshold(); +} + +void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { + Space::set_bottom(new_bottom); + _offsets.set_bottom(new_bottom); +} + +void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { + Space::set_end(new_end); + _offsets.resize(new_end - bottom()); +} + +void G1OffsetTableContigSpace::print() const { + print_short(); + gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " + INTPTR_FORMAT ", " INTPTR_FORMAT ")", + bottom(), top(), _offsets.threshold(), end()); +} + +HeapWord* G1OffsetTableContigSpace::initialize_threshold() { + return _offsets.initialize_threshold(); +} + +HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, + HeapWord* end) { + _offsets.alloc_block(start, end); + return _offsets.threshold(); +} + +HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); + if (_gc_time_stamp < g1h->get_gc_time_stamp()) + return top(); + else + return ContiguousSpace::saved_mark_word(); +} + +void G1OffsetTableContigSpace::set_saved_mark() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); + + if (_gc_time_stamp < curr_gc_time_stamp) { + // The order of these is important, as another thread might be + // about to start scanning this region. If it does so after + // set_saved_mark and before _gc_time_stamp = ..., then the latter + // will be false, and it will pick up top() as the high water mark + // of region. If it does so after _gc_time_stamp = ..., then it + // will pick up the right saved_mark_word() as the high water mark + // of the region. Either way, the behaviour will be correct. + ContiguousSpace::set_saved_mark(); + _gc_time_stamp = curr_gc_time_stamp; + OrderAccess::fence(); + } +} + +G1OffsetTableContigSpace:: +G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, + MemRegion mr, bool is_zeroed) : + _offsets(sharedOffsetArray, mr), + _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), + _gc_time_stamp(0) +{ + _offsets.set_space(this); + initialize(mr, !is_zeroed, SpaceDecorator::Mangle); +} + +size_t RegionList::length() { + size_t len = 0; + HeapRegion* cur = hd(); + DEBUG_ONLY(HeapRegion* last = NULL); + while (cur != NULL) { + len++; + DEBUG_ONLY(last = cur); + cur = get_next(cur); + } + assert(last == tl(), "Invariant"); + return len; +} + +void RegionList::insert_before_head(HeapRegion* r) { + assert(well_formed(), "Inv"); + set_next(r, hd()); + _hd = r; + _sz++; + if (tl() == NULL) _tl = r; + assert(well_formed(), "Inv"); +} + +void RegionList::prepend_list(RegionList* new_list) { + assert(well_formed(), "Precondition"); + assert(new_list->well_formed(), "Precondition"); + HeapRegion* new_tl = new_list->tl(); + if (new_tl != NULL) { + set_next(new_tl, hd()); + _hd = new_list->hd(); + _sz += new_list->sz(); + if (tl() == NULL) _tl = new_list->tl(); + } else { + assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv"); + } + assert(well_formed(), "Inv"); +} + +void RegionList::delete_after(HeapRegion* r) { + assert(well_formed(), "Precondition"); + HeapRegion* next = get_next(r); + assert(r != NULL, "Precondition"); + HeapRegion* next_tl = get_next(next); + set_next(r, next_tl); + dec_sz(); + if (next == tl()) { + assert(next_tl == NULL, "Inv"); + _tl = r; + } + assert(well_formed(), "Inv"); +} + +HeapRegion* RegionList::pop() { + assert(well_formed(), "Inv"); + HeapRegion* res = hd(); + if (res != NULL) { + _hd = get_next(res); + _sz--; + set_next(res, NULL); + if (sz() == 0) _tl = NULL; + } + assert(well_formed(), "Inv"); + return res; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp 2009-08-01 04:21:13.207899750 +0100 @@ -0,0 +1,912 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#ifndef SERIALGC + +// A HeapRegion is the smallest piece of a G1CollectedHeap that +// can be collected independently. + +// NOTE: Although a HeapRegion is a Space, its +// Space::initDirtyCardClosure method must not be called. +// The problem is that the existence of this method breaks +// the independence of barrier sets from remembered sets. +// The solution is to remove this method from the definition +// of a Space. + +class CompactibleSpace; +class ContiguousSpace; +class HeapRegionRemSet; +class HeapRegionRemSetIterator; +class HeapRegion; + +// A dirty card to oop closure for heap regions. It +// knows how to get the G1 heap and how to use the bitmap +// in the concurrent marker used by G1 to filter remembered +// sets. + +class HeapRegionDCTOC : public ContiguousSpaceDCTOC { +public: + // Specification of possible DirtyCardToOopClosure filtering. + enum FilterKind { + NoFilterKind, + IntoCSFilterKind, + OutOfRegionFilterKind + }; + +protected: + HeapRegion* _hr; + FilterKind _fk; + G1CollectedHeap* _g1; + + void walk_mem_region_with_cl(MemRegion mr, + HeapWord* bottom, HeapWord* top, + OopClosure* cl); + + // We don't specialize this for FilteringClosure; filtering is handled by + // the "FilterKind" mechanism. But we provide this to avoid a compiler + // warning. + void walk_mem_region_with_cl(MemRegion mr, + HeapWord* bottom, HeapWord* top, + FilteringClosure* cl) { + HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top, + (OopClosure*)cl); + } + + // Get the actual top of the area on which the closure will + // operate, given where the top is assumed to be (the end of the + // memory region passed to do_MemRegion) and where the object + // at the top is assumed to start. For example, an object may + // start at the top but actually extend past the assumed top, + // in which case the top becomes the end of the object. + HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) { + return ContiguousSpaceDCTOC::get_actual_top(top, top_obj); + } + + // Walk the given memory region from bottom to (actual) top + // looking for objects and applying the oop closure (_cl) to + // them. The base implementation of this treats the area as + // blocks, where a block may or may not be an object. Sub- + // classes should override this to provide more accurate + // or possibly more efficient walking. + void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) { + Filtering_DCTOC::walk_mem_region(mr, bottom, top); + } + +public: + HeapRegionDCTOC(G1CollectedHeap* g1, + HeapRegion* hr, OopClosure* cl, + CardTableModRefBS::PrecisionStyle precision, + FilterKind fk); +}; + + +// The complicating factor is that BlockOffsetTable diverged +// significantly, and we need functionality that is only in the G1 version. +// So I copied that code, which led to an alternate G1 version of +// OffsetTableContigSpace. If the two versions of BlockOffsetTable could +// be reconciled, then G1OffsetTableContigSpace could go away. + +// The idea behind time stamps is the following. Doing a save_marks on +// all regions at every GC pause is time consuming (if I remember +// well, 10ms or so). So, we would like to do that only for regions +// that are GC alloc regions. To achieve this, we use time +// stamps. For every evacuation pause, G1CollectedHeap generates a +// unique time stamp (essentially a counter that gets +// incremented). Every time we want to call save_marks on a region, +// we set the saved_mark_word to top and also copy the current GC +// time stamp to the time stamp field of the space. Reading the +// saved_mark_word involves checking the time stamp of the +// region. If it is the same as the current GC time stamp, then we +// can safely read the saved_mark_word field, as it is valid. If the +// time stamp of the region is not the same as the current GC time +// stamp, then we instead read top, as the saved_mark_word field is +// invalid. Time stamps (on the regions and also on the +// G1CollectedHeap) are reset at every cleanup (we iterate over +// the regions anyway) and at the end of a Full GC. The current scheme +// that uses sequential unsigned ints will fail only if we have 4b +// evacuation pauses between two cleanups, which is _highly_ unlikely. + +class G1OffsetTableContigSpace: public ContiguousSpace { + friend class VMStructs; + protected: + G1BlockOffsetArrayContigSpace _offsets; + Mutex _par_alloc_lock; + volatile unsigned _gc_time_stamp; + + public: + // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be + // assumed to contain zeros. + G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, + MemRegion mr, bool is_zeroed = false); + + void set_bottom(HeapWord* value); + void set_end(HeapWord* value); + + virtual HeapWord* saved_mark_word() const; + virtual void set_saved_mark(); + void reset_gc_time_stamp() { _gc_time_stamp = 0; } + + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); + virtual void clear(bool mangle_space); + + HeapWord* block_start(const void* p); + HeapWord* block_start_const(const void* p) const; + + // Add offset table update. + virtual HeapWord* allocate(size_t word_size); + HeapWord* par_allocate(size_t word_size); + + // MarkSweep support phase3 + virtual HeapWord* initialize_threshold(); + virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); + + virtual void print() const; +}; + +class HeapRegion: public G1OffsetTableContigSpace { + friend class VMStructs; + private: + + enum HumongousType { + NotHumongous = 0, + StartsHumongous, + ContinuesHumongous + }; + + // The next filter kind that should be used for a "new_dcto_cl" call with + // the "traditional" signature. + HeapRegionDCTOC::FilterKind _next_fk; + + // Requires that the region "mr" be dense with objects, and begin and end + // with an object. + void oops_in_mr_iterate(MemRegion mr, OopClosure* cl); + + // The remembered set for this region. + // (Might want to make this "inline" later, to avoid some alloc failure + // issues.) + HeapRegionRemSet* _rem_set; + + G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } + + protected: + // If this region is a member of a HeapRegionSeq, the index in that + // sequence, otherwise -1. + int _hrs_index; + + HumongousType _humongous_type; + // For a humongous region, region in which it starts. + HeapRegion* _humongous_start_region; + // For the start region of a humongous sequence, it's original end(). + HeapWord* _orig_end; + + // True iff the region is in current collection_set. + bool _in_collection_set; + + // True iff the region is on the unclean list, waiting to be zero filled. + bool _is_on_unclean_list; + + // True iff the region is on the free list, ready for allocation. + bool _is_on_free_list; + + // Is this or has it been an allocation region in the current collection + // pause. + bool _is_gc_alloc_region; + + // True iff an attempt to evacuate an object in the region failed. + bool _evacuation_failed; + + // A heap region may be a member one of a number of special subsets, each + // represented as linked lists through the field below. Currently, these + // sets include: + // The collection set. + // The set of allocation regions used in a collection pause. + // Spaces that may contain gray objects. + HeapRegion* _next_in_special_set; + + // next region in the young "generation" region set + HeapRegion* _next_young_region; + + // For parallel heapRegion traversal. + jint _claimed; + + // We use concurrent marking to determine the amount of live data + // in each heap region. + size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. + size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. + + // See "sort_index" method. -1 means is not in the array. + int _sort_index; + + // + double _gc_efficiency; + // + + enum YoungType { + NotYoung, // a region is not young + ScanOnly, // a region is young and scan-only + Young, // a region is young + Survivor // a region is young and it contains + // survivor + }; + + YoungType _young_type; + int _young_index_in_cset; + SurvRateGroup* _surv_rate_group; + int _age_index; + + // The start of the unmarked area. The unmarked area extends from this + // word until the top and/or end of the region, and is the part + // of the region for which no marking was done, i.e. objects may + // have been allocated in this part since the last mark phase. + // "prev" is the top at the start of the last completed marking. + // "next" is the top at the start of the in-progress marking (if any.) + HeapWord* _prev_top_at_mark_start; + HeapWord* _next_top_at_mark_start; + // If a collection pause is in progress, this is the top at the start + // of that pause. + + // We've counted the marked bytes of objects below here. + HeapWord* _top_at_conc_mark_count; + + void init_top_at_mark_start() { + assert(_prev_marked_bytes == 0 && + _next_marked_bytes == 0, + "Must be called after zero_marked_bytes."); + HeapWord* bot = bottom(); + _prev_top_at_mark_start = bot; + _next_top_at_mark_start = bot; + _top_at_conc_mark_count = bot; + } + + jint _zfs; // A member of ZeroFillState. Protected by ZF_lock. + Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last) + // made it so. + + void set_young_type(YoungType new_type) { + //assert(_young_type != new_type, "setting the same type" ); + // TODO: add more assertions here + _young_type = new_type; + } + + public: + // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. + HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, + MemRegion mr, bool is_zeroed); + + enum SomePublicConstants { + // HeapRegions are GrainBytes-aligned + // and have sizes that are multiples of GrainBytes. + LogOfHRGrainBytes = 20, + LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize, + GrainBytes = 1 << LogOfHRGrainBytes, + GrainWords = 1 <= marked_bytes(), + "Can't mark more than we have."); + return used_at_mark_start_bytes - marked_bytes(); + } + + // An upper bound on the number of live bytes in the region. + size_t max_live_bytes() { return used() - garbage_bytes(); } + + void add_to_marked_bytes(size_t incr_bytes) { + _next_marked_bytes = _next_marked_bytes + incr_bytes; + guarantee( _next_marked_bytes <= used(), "invariant" ); + } + + void zero_marked_bytes() { + _prev_marked_bytes = _next_marked_bytes = 0; + } + + bool isHumongous() const { return _humongous_type != NotHumongous; } + bool startsHumongous() const { return _humongous_type == StartsHumongous; } + bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; } + // For a humongous region, region in which it starts. + HeapRegion* humongous_start_region() const { + return _humongous_start_region; + } + + // Causes the current region to represent a humongous object spanning "n" + // regions. + virtual void set_startsHumongous(); + + // The regions that continue a humongous sequence should be added using + // this method, in increasing address order. + void set_continuesHumongous(HeapRegion* start); + + void add_continuingHumongousRegion(HeapRegion* cont); + + // If the region has a remembered set, return a pointer to it. + HeapRegionRemSet* rem_set() const { + return _rem_set; + } + + // True iff the region is in current collection_set. + bool in_collection_set() const { + return _in_collection_set; + } + void set_in_collection_set(bool b) { + _in_collection_set = b; + } + HeapRegion* next_in_collection_set() { + assert(in_collection_set(), "should only invoke on member of CS."); + assert(_next_in_special_set == NULL || + _next_in_special_set->in_collection_set(), + "Malformed CS."); + return _next_in_special_set; + } + void set_next_in_collection_set(HeapRegion* r) { + assert(in_collection_set(), "should only invoke on member of CS."); + assert(r == NULL || r->in_collection_set(), "Malformed CS."); + _next_in_special_set = r; + } + + // True iff it is or has been an allocation region in the current + // collection pause. + bool is_gc_alloc_region() const { + return _is_gc_alloc_region; + } + void set_is_gc_alloc_region(bool b) { + _is_gc_alloc_region = b; + } + HeapRegion* next_gc_alloc_region() { + assert(is_gc_alloc_region(), "should only invoke on member of CS."); + assert(_next_in_special_set == NULL || + _next_in_special_set->is_gc_alloc_region(), + "Malformed CS."); + return _next_in_special_set; + } + void set_next_gc_alloc_region(HeapRegion* r) { + assert(is_gc_alloc_region(), "should only invoke on member of CS."); + assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS."); + _next_in_special_set = r; + } + + bool is_on_free_list() { + return _is_on_free_list; + } + + void set_on_free_list(bool b) { + _is_on_free_list = b; + } + + HeapRegion* next_from_free_list() { + assert(is_on_free_list(), + "Should only invoke on free space."); + assert(_next_in_special_set == NULL || + _next_in_special_set->is_on_free_list(), + "Malformed Free List."); + return _next_in_special_set; + } + + void set_next_on_free_list(HeapRegion* r) { + assert(r == NULL || r->is_on_free_list(), "Malformed free list."); + _next_in_special_set = r; + } + + bool is_on_unclean_list() { + return _is_on_unclean_list; + } + + void set_on_unclean_list(bool b); + + HeapRegion* next_from_unclean_list() { + assert(is_on_unclean_list(), + "Should only invoke on unclean space."); + assert(_next_in_special_set == NULL || + _next_in_special_set->is_on_unclean_list(), + "Malformed unclean List."); + return _next_in_special_set; + } + + void set_next_on_unclean_list(HeapRegion* r); + + HeapRegion* get_next_young_region() { return _next_young_region; } + void set_next_young_region(HeapRegion* hr) { + _next_young_region = hr; + } + + // Allows logical separation between objects allocated before and after. + void save_marks(); + + // Reset HR stuff to default values. + void hr_clear(bool par, bool clear_space); + + void initialize(MemRegion mr, bool clear_space, bool mangle_space); + + // Ensure that "this" is zero-filled. + void ensure_zero_filled(); + // This one requires that the calling thread holds ZF_mon. + void ensure_zero_filled_locked(); + + // Get the start of the unmarked area in this region. + HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } + HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } + + // Apply "cl->do_oop" to (the addresses of) all reference fields in objects + // allocated in the current region before the last call to "save_mark". + void oop_before_save_marks_iterate(OopClosure* cl); + + // This call determines the "filter kind" argument that will be used for + // the next call to "new_dcto_cl" on this region with the "traditional" + // signature (i.e., the call below.) The default, in the absence of a + // preceding call to this method, is "NoFilterKind", and a call to this + // method is necessary for each such call, or else it reverts to the + // default. + // (This is really ugly, but all other methods I could think of changed a + // lot of main-line code for G1.) + void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) { + _next_fk = nfk; + } + + DirtyCardToOopClosure* + new_dcto_closure(OopClosure* cl, + CardTableModRefBS::PrecisionStyle precision, + HeapRegionDCTOC::FilterKind fk); + +#if WHASSUP + DirtyCardToOopClosure* + new_dcto_closure(OopClosure* cl, + CardTableModRefBS::PrecisionStyle precision, + HeapWord* boundary) { + assert(boundary == NULL, "This arg doesn't make sense here."); + DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk); + _next_fk = HeapRegionDCTOC::NoFilterKind; + return res; + } +#endif + + // + // Note the start or end of marking. This tells the heap region + // that the collector is about to start or has finished (concurrently) + // marking the heap. + // + + // Note the start of a marking phase. Record the + // start of the unmarked area of the region here. + void note_start_of_marking(bool during_initial_mark) { + init_top_at_conc_mark_count(); + _next_marked_bytes = 0; + if (during_initial_mark && is_young() && !is_survivor()) + _next_top_at_mark_start = bottom(); + else + _next_top_at_mark_start = top(); + } + + // Note the end of a marking phase. Install the start of + // the unmarked area that was captured at start of marking. + void note_end_of_marking() { + _prev_top_at_mark_start = _next_top_at_mark_start; + _prev_marked_bytes = _next_marked_bytes; + _next_marked_bytes = 0; + + guarantee(_prev_marked_bytes <= + (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize, + "invariant"); + } + + // After an evacuation, we need to update _next_top_at_mark_start + // to be the current top. Note this is only valid if we have only + // ever evacuated into this region. If we evacuate, allocate, and + // then evacuate we are in deep doodoo. + void note_end_of_copying() { + assert(top() >= _next_top_at_mark_start, + "Increase only"); + // Survivor regions will be scanned on the start of concurrent + // marking. + if (!is_survivor()) { + _next_top_at_mark_start = top(); + } + } + + // Returns "false" iff no object in the region was allocated when the + // last mark phase ended. + bool is_marked() { return _prev_top_at_mark_start != bottom(); } + + // If "is_marked()" is true, then this is the index of the region in + // an array constructed at the end of marking of the regions in a + // "desirability" order. + int sort_index() { + return _sort_index; + } + void set_sort_index(int i) { + _sort_index = i; + } + + void init_top_at_conc_mark_count() { + _top_at_conc_mark_count = bottom(); + } + + void set_top_at_conc_mark_count(HeapWord *cur) { + assert(bottom() <= cur && cur <= end(), "Sanity."); + _top_at_conc_mark_count = cur; + } + + HeapWord* top_at_conc_mark_count() { + return _top_at_conc_mark_count; + } + + void reset_during_compaction() { + guarantee( isHumongous() && startsHumongous(), + "should only be called for humongous regions"); + + zero_marked_bytes(); + init_top_at_mark_start(); + } + + // + void calc_gc_efficiency(void); + double gc_efficiency() { return _gc_efficiency;} + // + + bool is_young() const { return _young_type != NotYoung; } + bool is_scan_only() const { return _young_type == ScanOnly; } + bool is_survivor() const { return _young_type == Survivor; } + + int young_index_in_cset() const { return _young_index_in_cset; } + void set_young_index_in_cset(int index) { + assert( (index == -1) || is_young(), "pre-condition" ); + _young_index_in_cset = index; + } + + int age_in_surv_rate_group() { + assert( _surv_rate_group != NULL, "pre-condition" ); + assert( _age_index > -1, "pre-condition" ); + return _surv_rate_group->age_in_group(_age_index); + } + + void recalculate_age_in_surv_rate_group() { + assert( _surv_rate_group != NULL, "pre-condition" ); + assert( _age_index > -1, "pre-condition" ); + _age_index = _surv_rate_group->recalculate_age_index(_age_index); + } + + void record_surv_words_in_group(size_t words_survived) { + assert( _surv_rate_group != NULL, "pre-condition" ); + assert( _age_index > -1, "pre-condition" ); + int age_in_group = age_in_surv_rate_group(); + _surv_rate_group->record_surviving_words(age_in_group, words_survived); + } + + int age_in_surv_rate_group_cond() { + if (_surv_rate_group != NULL) + return age_in_surv_rate_group(); + else + return -1; + } + + SurvRateGroup* surv_rate_group() { + return _surv_rate_group; + } + + void install_surv_rate_group(SurvRateGroup* surv_rate_group) { + assert( surv_rate_group != NULL, "pre-condition" ); + assert( _surv_rate_group == NULL, "pre-condition" ); + assert( is_young(), "pre-condition" ); + + _surv_rate_group = surv_rate_group; + _age_index = surv_rate_group->next_age_index(); + } + + void uninstall_surv_rate_group() { + if (_surv_rate_group != NULL) { + assert( _age_index > -1, "pre-condition" ); + assert( is_young(), "pre-condition" ); + + _surv_rate_group = NULL; + _age_index = -1; + } else { + assert( _age_index == -1, "pre-condition" ); + } + } + + void set_young() { set_young_type(Young); } + + void set_scan_only() { set_young_type(ScanOnly); } + + void set_survivor() { set_young_type(Survivor); } + + void set_not_young() { set_young_type(NotYoung); } + + // Determine if an object has been allocated since the last + // mark performed by the collector. This returns true iff the object + // is within the unmarked area of the region. + bool obj_allocated_since_prev_marking(oop obj) const { + return (HeapWord *) obj >= prev_top_at_mark_start(); + } + bool obj_allocated_since_next_marking(oop obj) const { + return (HeapWord *) obj >= next_top_at_mark_start(); + } + + // For parallel heapRegion traversal. + bool claimHeapRegion(int claimValue); + jint claim_value() { return _claimed; } + // Use this carefully: only when you're sure no one is claiming... + void set_claim_value(int claimValue) { _claimed = claimValue; } + + // Returns the "evacuation_failed" property of the region. + bool evacuation_failed() { return _evacuation_failed; } + + // Sets the "evacuation_failed" property of the region. + void set_evacuation_failed(bool b) { + _evacuation_failed = b; + + if (b) { + init_top_at_conc_mark_count(); + _next_marked_bytes = 0; + } + } + + // Requires that "mr" be entirely within the region. + // Apply "cl->do_object" to all objects that intersect with "mr". + // If the iteration encounters an unparseable portion of the region, + // or if "cl->abort()" is true after a closure application, + // terminate the iteration and return the address of the start of the + // subregion that isn't done. (The two can be distinguished by querying + // "cl->abort()".) Return of "NULL" indicates that the iteration + // completed. + HeapWord* + object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); + + HeapWord* + oops_on_card_seq_iterate_careful(MemRegion mr, + FilterOutOfRegionClosure* cl); + + // The region "mr" is entirely in "this", and starts and ends at block + // boundaries. The caller declares that all the contained blocks are + // coalesced into one. + void declare_filled_region_to_BOT(MemRegion mr) { + _offsets.single_block(mr.start(), mr.end()); + } + + // A version of block start that is guaranteed to find *some* block + // boundary at or before "p", but does not object iteration, and may + // therefore be used safely when the heap is unparseable. + HeapWord* block_start_careful(const void* p) const { + return _offsets.block_start_careful(p); + } + + // Requires that "addr" is within the region. Returns the start of the + // first ("careful") block that starts at or after "addr", or else the + // "end" of the region if there is no such block. + HeapWord* next_block_start_careful(HeapWord* addr); + + // Returns the zero-fill-state of the current region. + ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; } + bool zero_fill_is_allocated() { return _zfs == Allocated; } + Thread* zero_filler() { return _zero_filler; } + + // Indicate that the contents of the region are unknown, and therefore + // might require zero-filling. + void set_zero_fill_needed() { + set_zero_fill_state_work(NotZeroFilled); + } + void set_zero_fill_in_progress(Thread* t) { + set_zero_fill_state_work(ZeroFilling); + _zero_filler = t; + } + void set_zero_fill_complete(); + void set_zero_fill_allocated() { + set_zero_fill_state_work(Allocated); + } + + void set_zero_fill_state_work(ZeroFillState zfs); + + // This is called when a full collection shrinks the heap. + // We want to set the heap region to a value which says + // it is no longer part of the heap. For now, we'll let "NotZF" fill + // that role. + void reset_zero_fill() { + set_zero_fill_state_work(NotZeroFilled); + _zero_filler = NULL; + } + +#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ + virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); + SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) + + CompactibleSpace* next_compaction_space() const; + + virtual void reset_after_compaction(); + + void print() const; + void print_on(outputStream* st) const; + + // Override + virtual void verify(bool allow_dirty) const; + +#ifdef DEBUG + HeapWord* allocate(size_t size); +#endif +}; + +// HeapRegionClosure is used for iterating over regions. +// Terminates the iteration when the "doHeapRegion" method returns "true". +class HeapRegionClosure : public StackObj { + friend class HeapRegionSeq; + friend class G1CollectedHeap; + + bool _complete; + void incomplete() { _complete = false; } + + public: + HeapRegionClosure(): _complete(true) {} + + // Typically called on each region until it returns true. + virtual bool doHeapRegion(HeapRegion* r) = 0; + + // True after iteration if the closure was applied to all heap regions + // and returned "false" in all cases. + bool complete() { return _complete; } +}; + +// A linked lists of heap regions. It leaves the "next" field +// unspecified; that's up to subtypes. +class RegionList VALUE_OBJ_CLASS_SPEC { +protected: + virtual HeapRegion* get_next(HeapRegion* chr) = 0; + virtual void set_next(HeapRegion* chr, + HeapRegion* new_next) = 0; + + HeapRegion* _hd; + HeapRegion* _tl; + size_t _sz; + + // Protected constructor because this type is only meaningful + // when the _get/_set next functions are defined. + RegionList() : _hd(NULL), _tl(NULL), _sz(0) {} +public: + void reset() { + _hd = NULL; + _tl = NULL; + _sz = 0; + } + HeapRegion* hd() { return _hd; } + HeapRegion* tl() { return _tl; } + size_t sz() { return _sz; } + size_t length(); + + bool well_formed() { + return + ((hd() == NULL && tl() == NULL && sz() == 0) + || (hd() != NULL && tl() != NULL && sz() > 0)) + && (sz() == length()); + } + virtual void insert_before_head(HeapRegion* r); + void prepend_list(RegionList* new_list); + virtual HeapRegion* pop(); + void dec_sz() { _sz--; } + // Requires that "r" is an element of the list, and is not the tail. + void delete_after(HeapRegion* r); +}; + +class EmptyNonHRegionList: public RegionList { +protected: + // Protected constructor because this type is only meaningful + // when the _get/_set next functions are defined. + EmptyNonHRegionList() : RegionList() {} + +public: + void insert_before_head(HeapRegion* r) { + // assert(r->is_empty(), "Better be empty"); + assert(!r->isHumongous(), "Better not be humongous."); + RegionList::insert_before_head(r); + } + void prepend_list(EmptyNonHRegionList* new_list) { + // assert(new_list->hd() == NULL || new_list->hd()->is_empty(), + // "Better be empty"); + assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(), + "Better not be humongous."); + // assert(new_list->tl() == NULL || new_list->tl()->is_empty(), + // "Better be empty"); + assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(), + "Better not be humongous."); + RegionList::prepend_list(new_list); + } +}; + +class UncleanRegionList: public EmptyNonHRegionList { +public: + HeapRegion* get_next(HeapRegion* hr) { + return hr->next_from_unclean_list(); + } + void set_next(HeapRegion* hr, HeapRegion* new_next) { + hr->set_next_on_unclean_list(new_next); + } + + UncleanRegionList() : EmptyNonHRegionList() {} + + void insert_before_head(HeapRegion* r) { + assert(!r->is_on_free_list(), + "Better not already be on free list"); + assert(!r->is_on_unclean_list(), + "Better not already be on unclean list"); + r->set_zero_fill_needed(); + r->set_on_unclean_list(true); + EmptyNonHRegionList::insert_before_head(r); + } + void prepend_list(UncleanRegionList* new_list) { + assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(), + "Better not already be on free list"); + assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(), + "Better already be marked as on unclean list"); + assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(), + "Better not already be on free list"); + assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(), + "Better already be marked as on unclean list"); + EmptyNonHRegionList::prepend_list(new_list); + } + HeapRegion* pop() { + HeapRegion* res = RegionList::pop(); + if (res != NULL) res->set_on_unclean_list(false); + return res; + } +}; + +// Local Variables: *** +// c-indentation-style: gnu *** +// End: *** + +#endif // SERIALGC --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp 2009-08-01 04:21:13.635081741 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { + HeapWord* res = ContiguousSpace::allocate(size); + if (res != NULL) { + _offsets.alloc_block(res, size); + } + return res; +} + +// Because of the requirement of keeping "_offsets" up to date with the +// allocations, we sequentialize these with a lock. Therefore, best if +// this is used for larger LAB allocations only. +inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { + MutexLocker x(&_par_alloc_lock); + // This ought to be just "allocate", because of the lock above, but that + // ContiguousSpace::allocate asserts that either the allocating thread + // holds the heap lock or it is the VM thread and we're at a safepoint. + // The best I (dld) could figure was to put a field in ContiguousSpace + // meaning "locking at safepoint taken care of", and set/reset that + // here. But this will do for now, especially in light of the comment + // above. Perhaps in the future some lock-free manner of keeping the + // coordination. + HeapWord* res = ContiguousSpace::par_allocate(size); + if (res != NULL) { + _offsets.alloc_block(res, size); + } + return res; +} + +inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { + return _offsets.block_start(p); +} + +inline HeapWord* +G1OffsetTableContigSpace::block_start_const(const void* p) const { + return _offsets.block_start_const(p); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp 2009-08-01 04:21:14.039570483 +0100 @@ -0,0 +1,1447 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_heapRegionRemSet.cpp.incl" + +#define HRRS_VERBOSE 0 + +#define PRT_COUNT_OCCUPIED 1 + +// OtherRegionsTable + +class PerRegionTable: public CHeapObj { + friend class OtherRegionsTable; + friend class HeapRegionRemSetIterator; + + HeapRegion* _hr; + BitMap _bm; +#if PRT_COUNT_OCCUPIED + jint _occupied; +#endif + PerRegionTable* _next_free; + + PerRegionTable* next_free() { return _next_free; } + void set_next_free(PerRegionTable* prt) { _next_free = prt; } + + + static PerRegionTable* _free_list; + +#ifdef _MSC_VER + // For some reason even though the classes are marked as friend they are unable + // to access CardsPerRegion when private/protected. Only the windows c++ compiler + // says this Sun CC and linux gcc don't have a problem with access when private + + public: + +#endif // _MSC_VER + + enum SomePrivateConstants { + CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift + }; + +protected: + // We need access in order to union things into the base table. + BitMap* bm() { return &_bm; } + +#if PRT_COUNT_OCCUPIED + void recount_occupied() { + _occupied = (jint) bm()->count_one_bits(); + } +#endif + + PerRegionTable(HeapRegion* hr) : + _hr(hr), +#if PRT_COUNT_OCCUPIED + _occupied(0), +#endif + _bm(CardsPerRegion, false /* in-resource-area */) + {} + + static void free(PerRegionTable* prt) { + while (true) { + PerRegionTable* fl = _free_list; + prt->set_next_free(fl); + PerRegionTable* res = + (PerRegionTable*) + Atomic::cmpxchg_ptr(prt, &_free_list, fl); + if (res == fl) return; + } + ShouldNotReachHere(); + } + + static PerRegionTable* alloc(HeapRegion* hr) { + PerRegionTable* fl = _free_list; + while (fl != NULL) { + PerRegionTable* nxt = fl->next_free(); + PerRegionTable* res = + (PerRegionTable*) + Atomic::cmpxchg_ptr(nxt, &_free_list, fl); + if (res == fl) { + fl->init(hr); + return fl; + } else { + fl = _free_list; + } + } + assert(fl == NULL, "Loop condition."); + return new PerRegionTable(hr); + } + + void add_card_work(short from_card, bool par) { + if (!_bm.at(from_card)) { + if (par) { + if (_bm.par_at_put(from_card, 1)) { +#if PRT_COUNT_OCCUPIED + Atomic::inc(&_occupied); +#endif + } + } else { + _bm.at_put(from_card, 1); +#if PRT_COUNT_OCCUPIED + _occupied++; +#endif + } + } + } + + void add_reference_work(oop* from, bool par) { + // Must make this robust in case "from" is not in "_hr", because of + // concurrency. + +#if HRRS_VERBOSE + gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", + from, *from); +#endif + + HeapRegion* loc_hr = hr(); + // If the test below fails, then this table was reused concurrently + // with this operation. This is OK, since the old table was coarsened, + // and adding a bit to the new table is never incorrect. + if (loc_hr->is_in_reserved(from)) { + size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); + size_t from_card = + hw_offset >> + (CardTableModRefBS::card_shift - LogHeapWordSize); + + add_card_work((short) from_card, par); + } + } + +public: + + HeapRegion* hr() const { return _hr; } + +#if PRT_COUNT_OCCUPIED + jint occupied() const { + // Overkill, but if we ever need it... + // guarantee(_occupied == _bm.count_one_bits(), "Check"); + return _occupied; + } +#else + jint occupied() const { + return _bm.count_one_bits(); + } +#endif + + void init(HeapRegion* hr) { + _hr = hr; +#if PRT_COUNT_OCCUPIED + _occupied = 0; +#endif + _bm.clear(); + } + + void add_reference(oop* from) { + add_reference_work(from, /*parallel*/ true); + } + + void seq_add_reference(oop* from) { + add_reference_work(from, /*parallel*/ false); + } + + void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { + HeapWord* hr_bot = hr()->bottom(); + size_t hr_first_card_index = ctbs->index_for(hr_bot); + bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); +#if PRT_COUNT_OCCUPIED + recount_occupied(); +#endif + } + + void add_card(short from_card_index) { + add_card_work(from_card_index, /*parallel*/ true); + } + + void seq_add_card(short from_card_index) { + add_card_work(from_card_index, /*parallel*/ false); + } + + // (Destructively) union the bitmap of the current table into the given + // bitmap (which is assumed to be of the same size.) + void union_bitmap_into(BitMap* bm) { + bm->set_union(_bm); + } + + // Mem size in bytes. + size_t mem_size() const { + return sizeof(this) + _bm.size_in_words() * HeapWordSize; + } + + static size_t fl_mem_size() { + PerRegionTable* cur = _free_list; + size_t res = 0; + while (cur != NULL) { + res += sizeof(PerRegionTable); + cur = cur->next_free(); + } + return res; + } + + // Requires "from" to be in "hr()". + bool contains_reference(oop* from) const { + assert(hr()->is_in_reserved(from), "Precondition."); + size_t card_ind = pointer_delta(from, hr()->bottom(), + CardTableModRefBS::card_size); + return _bm.at(card_ind); + } +}; + +PerRegionTable* PerRegionTable::_free_list = NULL; + + +#define COUNT_PAR_EXPANDS 0 + +#if COUNT_PAR_EXPANDS +static jint n_par_expands = 0; +static jint n_par_contracts = 0; +static jint par_expand_list_len = 0; +static jint max_par_expand_list_len = 0; + +static void print_par_expand() { + Atomic::inc(&n_par_expands); + Atomic::inc(&par_expand_list_len); + if (par_expand_list_len > max_par_expand_list_len) { + max_par_expand_list_len = par_expand_list_len; + } + if ((n_par_expands % 10) == 0) { + gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, " + "len = %d, max_len = %d\n.", + n_par_expands, n_par_contracts, par_expand_list_len, + max_par_expand_list_len); + } +} +#endif + +class PosParPRT: public PerRegionTable { + PerRegionTable** _par_tables; + + enum SomePrivateConstants { + ReserveParTableExpansion = 1 + }; + + void par_expand() { + int n = HeapRegionRemSet::num_par_rem_sets()-1; + if (n <= 0) return; + if (_par_tables == NULL) { + PerRegionTable* res = + (PerRegionTable*) + Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion, + &_par_tables, NULL); + if (res != NULL) return; + // Otherwise, we reserved the right to do the expansion. + + PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n); + for (int i = 0; i < n; i++) { + PerRegionTable* ptable = PerRegionTable::alloc(hr()); + ptables[i] = ptable; + } + // Here we do not need an atomic. + _par_tables = ptables; +#if COUNT_PAR_EXPANDS + print_par_expand(); +#endif + // We must put this table on the expanded list. + PosParPRT* exp_head = _par_expanded_list; + while (true) { + set_next_par_expanded(exp_head); + PosParPRT* res = + (PosParPRT*) + Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head); + if (res == exp_head) return; + // Otherwise. + exp_head = res; + } + ShouldNotReachHere(); + } + } + + void par_contract() { + assert(_par_tables != NULL, "Precondition."); + int n = HeapRegionRemSet::num_par_rem_sets()-1; + for (int i = 0; i < n; i++) { + _par_tables[i]->union_bitmap_into(bm()); + PerRegionTable::free(_par_tables[i]); + _par_tables[i] = NULL; + } +#if PRT_COUNT_OCCUPIED + // We must recount the "occupied." + recount_occupied(); +#endif + FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables); + _par_tables = NULL; +#if COUNT_PAR_EXPANDS + Atomic::inc(&n_par_contracts); + Atomic::dec(&par_expand_list_len); +#endif + } + + static PerRegionTable** _par_table_fl; + + PosParPRT* _next; + + static PosParPRT* _free_list; + + PerRegionTable** par_tables() const { + assert(uintptr_t(NULL) == 0, "Assumption."); + if (uintptr_t(_par_tables) <= ReserveParTableExpansion) + return NULL; + else + return _par_tables; + } + + PosParPRT* _next_par_expanded; + PosParPRT* next_par_expanded() { return _next_par_expanded; } + void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; } + static PosParPRT* _par_expanded_list; + +public: + + PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {} + + jint occupied() const { + jint res = PerRegionTable::occupied(); + if (par_tables() != NULL) { + for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { + res += par_tables()[i]->occupied(); + } + } + return res; + } + + void init(HeapRegion* hr) { + PerRegionTable::init(hr); + _next = NULL; + if (par_tables() != NULL) { + for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { + par_tables()[i]->init(hr); + } + } + } + + static void free(PosParPRT* prt) { + while (true) { + PosParPRT* fl = _free_list; + prt->set_next(fl); + PosParPRT* res = + (PosParPRT*) + Atomic::cmpxchg_ptr(prt, &_free_list, fl); + if (res == fl) return; + } + ShouldNotReachHere(); + } + + static PosParPRT* alloc(HeapRegion* hr) { + PosParPRT* fl = _free_list; + while (fl != NULL) { + PosParPRT* nxt = fl->next(); + PosParPRT* res = + (PosParPRT*) + Atomic::cmpxchg_ptr(nxt, &_free_list, fl); + if (res == fl) { + fl->init(hr); + return fl; + } else { + fl = _free_list; + } + } + assert(fl == NULL, "Loop condition."); + return new PosParPRT(hr); + } + + PosParPRT* next() const { return _next; } + void set_next(PosParPRT* nxt) { _next = nxt; } + PosParPRT** next_addr() { return &_next; } + + void add_reference(oop* from, int tid) { + // Expand if necessary. + PerRegionTable** pt = par_tables(); + if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) { + par_expand(); + pt = par_tables(); + } + if (pt != NULL) { + // We always have to assume that mods to table 0 are in parallel, + // because of the claiming scheme in parallel expansion. A thread + // with tid != 0 that finds the table to be NULL, but doesn't succeed + // in claiming the right of expanding it, will end up in the else + // clause of the above if test. That thread could be delayed, and a + // thread 0 add reference could see the table expanded, and come + // here. Both threads would be adding in parallel. But we get to + // not use atomics for tids > 0. + if (tid == 0) { + PerRegionTable::add_reference(from); + } else { + pt[tid-1]->seq_add_reference(from); + } + } else { + // Not expanded -- add to the base table. + PerRegionTable::add_reference(from); + } + } + + void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { + assert(_par_tables == NULL, "Precondition"); + PerRegionTable::scrub(ctbs, card_bm); + } + + size_t mem_size() const { + size_t res = + PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable); + if (_par_tables != NULL) { + for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { + res += _par_tables[i]->mem_size(); + } + } + return res; + } + + static size_t fl_mem_size() { + PosParPRT* cur = _free_list; + size_t res = 0; + while (cur != NULL) { + res += sizeof(PosParPRT); + cur = cur->next(); + } + return res; + } + + bool contains_reference(oop* from) const { + if (PerRegionTable::contains_reference(from)) return true; + if (_par_tables != NULL) { + for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { + if (_par_tables[i]->contains_reference(from)) return true; + } + } + return false; + } + + static void par_contract_all(); + +}; + +void PosParPRT::par_contract_all() { + PosParPRT* hd = _par_expanded_list; + while (hd != NULL) { + PosParPRT* nxt = hd->next_par_expanded(); + PosParPRT* res = + (PosParPRT*) + Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd); + if (res == hd) { + // We claimed the right to contract this table. + hd->set_next_par_expanded(NULL); + hd->par_contract(); + hd = _par_expanded_list; + } else { + hd = res; + } + } +} + +PosParPRT* PosParPRT::_free_list = NULL; +PosParPRT* PosParPRT::_par_expanded_list = NULL; + +jint OtherRegionsTable::_cache_probes = 0; +jint OtherRegionsTable::_cache_hits = 0; + +size_t OtherRegionsTable::_max_fine_entries = 0; +size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; +#if SAMPLE_FOR_EVICTION +size_t OtherRegionsTable::_fine_eviction_stride = 0; +size_t OtherRegionsTable::_fine_eviction_sample_size = 0; +#endif + +OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : + _g1h(G1CollectedHeap::heap()), + _m(Mutex::leaf, "An OtherRegionsTable lock", true), + _hr(hr), + _coarse_map(G1CollectedHeap::heap()->max_regions(), + false /* in-resource-area */), + _fine_grain_regions(NULL), + _n_fine_entries(0), _n_coarse_entries(0), +#if SAMPLE_FOR_EVICTION + _fine_eviction_start(0), +#endif + _sparse_table(hr) +{ + typedef PosParPRT* PosParPRTPtr; + if (_max_fine_entries == 0) { + assert(_mod_max_fine_entries_mask == 0, "Both or none."); + _max_fine_entries = (1 << G1LogRSRegionEntries); + _mod_max_fine_entries_mask = _max_fine_entries - 1; +#if SAMPLE_FOR_EVICTION + assert(_fine_eviction_sample_size == 0 + && _fine_eviction_stride == 0, "All init at same time."); + _fine_eviction_sample_size = MAX2((size_t)4, (size_t)G1LogRSRegionEntries); + _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; +#endif + } + _fine_grain_regions = new PosParPRTPtr[_max_fine_entries]; + if (_fine_grain_regions == NULL) + vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, + "Failed to allocate _fine_grain_entries."); + for (size_t i = 0; i < _max_fine_entries; i++) { + _fine_grain_regions[i] = NULL; + } +} + +int** OtherRegionsTable::_from_card_cache = NULL; +size_t OtherRegionsTable::_from_card_cache_max_regions = 0; +size_t OtherRegionsTable::_from_card_cache_mem_size = 0; + +void OtherRegionsTable::init_from_card_cache(size_t max_regions) { + _from_card_cache_max_regions = max_regions; + + int n_par_rs = HeapRegionRemSet::num_par_rem_sets(); + _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs); + for (int i = 0; i < n_par_rs; i++) { + _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions); + for (size_t j = 0; j < max_regions; j++) { + _from_card_cache[i][j] = -1; // An invalid value. + } + } + _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int); +} + +void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) { + for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { + assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max."); + for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) { + _from_card_cache[i][j] = -1; // An invalid value. + } + } +} + +#ifndef PRODUCT +void OtherRegionsTable::print_from_card_cache() { + for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { + for (size_t j = 0; j < _from_card_cache_max_regions; j++) { + gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.", + i, j, _from_card_cache[i][j]); + } + } +} +#endif + +void OtherRegionsTable::add_reference(oop* from, int tid) { + size_t cur_hrs_ind = hr()->hrs_index(); + +#if HRRS_VERBOSE + gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", + from, *from); +#endif + + int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); + +#if HRRS_VERBOSE + gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", + hr()->bottom(), from_card, + _from_card_cache[tid][cur_hrs_ind]); +#endif + +#define COUNT_CACHE 0 +#if COUNT_CACHE + jint p = Atomic::add(1, &_cache_probes); + if ((p % 10000) == 0) { + jint hits = _cache_hits; + gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.", + _cache_hits, p, 100.0* (float)hits/(float)p); + } +#endif + if (from_card == _from_card_cache[tid][cur_hrs_ind]) { +#if HRRS_VERBOSE + gclog_or_tty->print_cr(" from-card cache hit."); +#endif +#if COUNT_CACHE + Atomic::inc(&_cache_hits); +#endif + assert(contains_reference(from), "We just added it!"); + return; + } else { + _from_card_cache[tid][cur_hrs_ind] = from_card; + } + + // Note that this may be a continued H region. + HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); + size_t from_hrs_ind = (size_t)from_hr->hrs_index(); + + // If the region is already coarsened, return. + if (_coarse_map.at(from_hrs_ind)) { +#if HRRS_VERBOSE + gclog_or_tty->print_cr(" coarse map hit."); +#endif + assert(contains_reference(from), "We just added it!"); + return; + } + + // Otherwise find a per-region table to add it to. + size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; + PosParPRT* prt = find_region_table(ind, from_hr); + if (prt == NULL) { + MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); + // Confirm that it's really not there... + prt = find_region_table(ind, from_hr); + if (prt == NULL) { + + uintptr_t from_hr_bot_card_index = + uintptr_t(from_hr->bottom()) + >> CardTableModRefBS::card_shift; + int card_index = from_card - from_hr_bot_card_index; + assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, + "Must be in range."); + if (G1HRRSUseSparseTable && + _sparse_table.add_card((short) from_hrs_ind, card_index)) { + if (G1RecordHRRSOops) { + HeapRegionRemSet::record(hr(), from); +#if HRRS_VERBOSE + gclog_or_tty->print(" Added card " PTR_FORMAT " to region " + "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", + align_size_down(uintptr_t(from), + CardTableModRefBS::card_size), + hr()->bottom(), from); +#endif + } +#if HRRS_VERBOSE + gclog_or_tty->print_cr(" added card to sparse table."); +#endif + assert(contains_reference_locked(from), "We just added it!"); + return; + } else { +#if HRRS_VERBOSE + gclog_or_tty->print_cr(" [tid %d] sparse table entry " + "overflow(f: %d, t: %d)", + tid, from_hrs_ind, cur_hrs_ind); +#endif + } + + // Otherwise, transfer from sparse to fine-grain. + short cards[SparsePRTEntry::CardsPerEntry]; + if (G1HRRSUseSparseTable) { + bool res = _sparse_table.get_cards((short) from_hrs_ind, &cards[0]); + assert(res, "There should have been an entry"); + } + + if (_n_fine_entries == _max_fine_entries) { + prt = delete_region_table(); + } else { + prt = PosParPRT::alloc(from_hr); + } + prt->init(from_hr); + // Record the outgoing pointer in the from_region's outgoing bitmap. + from_hr->rem_set()->add_outgoing_reference(hr()); + + PosParPRT* first_prt = _fine_grain_regions[ind]; + prt->set_next(first_prt); // XXX Maybe move to init? + _fine_grain_regions[ind] = prt; + _n_fine_entries++; + + // Add in the cards from the sparse table. + if (G1HRRSUseSparseTable) { + for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) { + short c = cards[i]; + if (c != SparsePRTEntry::NullEntry) { + prt->add_card(c); + } + } + // Now we can delete the sparse entry. + bool res = _sparse_table.delete_entry((short) from_hrs_ind); + assert(res, "It should have been there."); + } + } + assert(prt != NULL && prt->hr() == from_hr, "consequence"); + } + // Note that we can't assert "prt->hr() == from_hr", because of the + // possibility of concurrent reuse. But see head comment of + // OtherRegionsTable for why this is OK. + assert(prt != NULL, "Inv"); + + prt->add_reference(from, tid); + if (G1RecordHRRSOops) { + HeapRegionRemSet::record(hr(), from); +#if HRRS_VERBOSE + gclog_or_tty->print("Added card " PTR_FORMAT " to region " + "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", + align_size_down(uintptr_t(from), + CardTableModRefBS::card_size), + hr()->bottom(), from); +#endif + } + assert(contains_reference(from), "We just added it!"); +} + +PosParPRT* +OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { + assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); + PosParPRT* prt = _fine_grain_regions[ind]; + while (prt != NULL && prt->hr() != hr) { + prt = prt->next(); + } + // Loop postcondition is the method postcondition. + return prt; +} + + +#define DRT_CENSUS 0 + +#if DRT_CENSUS +static const int HistoSize = 6; +static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; +static int coarsenings = 0; +static int occ_sum = 0; +#endif + +jint OtherRegionsTable::_n_coarsenings = 0; + +PosParPRT* OtherRegionsTable::delete_region_table() { +#if DRT_CENSUS + int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; + const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 }; +#endif + + assert(_m.owned_by_self(), "Precondition"); + assert(_n_fine_entries == _max_fine_entries, "Precondition"); + PosParPRT* max = NULL; + jint max_occ = 0; + PosParPRT** max_prev; + size_t max_ind; + +#if SAMPLE_FOR_EVICTION + size_t i = _fine_eviction_start; + for (size_t k = 0; k < _fine_eviction_sample_size; k++) { + size_t ii = i; + // Make sure we get a non-NULL sample. + while (_fine_grain_regions[ii] == NULL) { + ii++; + if (ii == _max_fine_entries) ii = 0; + guarantee(ii != i, "We must find one."); + } + PosParPRT** prev = &_fine_grain_regions[ii]; + PosParPRT* cur = *prev; + while (cur != NULL) { + jint cur_occ = cur->occupied(); + if (max == NULL || cur_occ > max_occ) { + max = cur; + max_prev = prev; + max_ind = i; + max_occ = cur_occ; + } + prev = cur->next_addr(); + cur = cur->next(); + } + i = i + _fine_eviction_stride; + if (i >= _n_fine_entries) i = i - _n_fine_entries; + } + _fine_eviction_start++; + if (_fine_eviction_start >= _n_fine_entries) + _fine_eviction_start -= _n_fine_entries; +#else + for (int i = 0; i < _max_fine_entries; i++) { + PosParPRT** prev = &_fine_grain_regions[i]; + PosParPRT* cur = *prev; + while (cur != NULL) { + jint cur_occ = cur->occupied(); +#if DRT_CENSUS + for (int k = 0; k < HistoSize; k++) { + if (cur_occ <= histo_limits[k]) { + histo[k]++; global_histo[k]++; break; + } + } +#endif + if (max == NULL || cur_occ > max_occ) { + max = cur; + max_prev = prev; + max_ind = i; + max_occ = cur_occ; + } + prev = cur->next_addr(); + cur = cur->next(); + } + } +#endif + // XXX + guarantee(max != NULL, "Since _n_fine_entries > 0"); +#if DRT_CENSUS + gclog_or_tty->print_cr("In a coarsening: histo of occs:"); + for (int k = 0; k < HistoSize; k++) { + gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]); + } + coarsenings++; + occ_sum += max_occ; + if ((coarsenings % 100) == 0) { + gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings); + for (int k = 0; k < HistoSize; k++) { + gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]); + } + gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.", + (float)occ_sum/(float)coarsenings); + } +#endif + + // Set the corresponding coarse bit. + int max_hrs_index = max->hr()->hrs_index(); + if (!_coarse_map.at(max_hrs_index)) { + _coarse_map.at_put(max_hrs_index, true); + _n_coarse_entries++; +#if 0 + gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " + "for region [" PTR_FORMAT "...] (%d coarse entries).\n", + hr()->bottom(), + max->hr()->bottom(), + _n_coarse_entries); +#endif + } + + // Unsplice. + *max_prev = max->next(); + Atomic::inc(&_n_coarsenings); + _n_fine_entries--; + return max; +} + + +// At present, this must be called stop-world single-threaded. +void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, + BitMap* region_bm, BitMap* card_bm) { + // First eliminated garbage regions from the coarse map. + if (G1RSScrubVerbose) + gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index()); + + assert(_coarse_map.size() == region_bm->size(), "Precondition"); + if (G1RSScrubVerbose) + gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries); + _coarse_map.set_intersection(*region_bm); + _n_coarse_entries = _coarse_map.count_one_bits(); + if (G1RSScrubVerbose) + gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries); + + // Now do the fine-grained maps. + for (size_t i = 0; i < _max_fine_entries; i++) { + PosParPRT* cur = _fine_grain_regions[i]; + PosParPRT** prev = &_fine_grain_regions[i]; + while (cur != NULL) { + PosParPRT* nxt = cur->next(); + // If the entire region is dead, eliminate. + if (G1RSScrubVerbose) + gclog_or_tty->print_cr(" For other region %d:", cur->hr()->hrs_index()); + if (!region_bm->at(cur->hr()->hrs_index())) { + *prev = nxt; + cur->set_next(NULL); + _n_fine_entries--; + if (G1RSScrubVerbose) + gclog_or_tty->print_cr(" deleted via region map."); + PosParPRT::free(cur); + } else { + // Do fine-grain elimination. + if (G1RSScrubVerbose) + gclog_or_tty->print(" occ: before = %4d.", cur->occupied()); + cur->scrub(ctbs, card_bm); + if (G1RSScrubVerbose) + gclog_or_tty->print_cr(" after = %4d.", cur->occupied()); + // Did that empty the table completely? + if (cur->occupied() == 0) { + *prev = nxt; + cur->set_next(NULL); + _n_fine_entries--; + PosParPRT::free(cur); + } else { + prev = cur->next_addr(); + } + } + cur = nxt; + } + } + // Since we may have deleted a from_card_cache entry from the RS, clear + // the FCC. + clear_fcc(); +} + + +size_t OtherRegionsTable::occupied() const { + // Cast away const in this case. + MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); + size_t sum = occ_fine(); + sum += occ_sparse(); + sum += occ_coarse(); + return sum; +} + +size_t OtherRegionsTable::occ_fine() const { + size_t sum = 0; + for (size_t i = 0; i < _max_fine_entries; i++) { + PosParPRT* cur = _fine_grain_regions[i]; + while (cur != NULL) { + sum += cur->occupied(); + cur = cur->next(); + } + } + return sum; +} + +size_t OtherRegionsTable::occ_coarse() const { + return (_n_coarse_entries * PosParPRT::CardsPerRegion); +} + +size_t OtherRegionsTable::occ_sparse() const { + return _sparse_table.occupied(); +} + +size_t OtherRegionsTable::mem_size() const { + // Cast away const in this case. + MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); + size_t sum = 0; + for (size_t i = 0; i < _max_fine_entries; i++) { + PosParPRT* cur = _fine_grain_regions[i]; + while (cur != NULL) { + sum += cur->mem_size(); + cur = cur->next(); + } + } + sum += (sizeof(PosParPRT*) * _max_fine_entries); + sum += (_coarse_map.size_in_words() * HeapWordSize); + sum += (_sparse_table.mem_size()); + sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above. + return sum; +} + +size_t OtherRegionsTable::static_mem_size() { + return _from_card_cache_mem_size; +} + +size_t OtherRegionsTable::fl_mem_size() { + return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size(); +} + +void OtherRegionsTable::clear_fcc() { + for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { + _from_card_cache[i][hr()->hrs_index()] = -1; + } +} + +void OtherRegionsTable::clear() { + MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); + for (size_t i = 0; i < _max_fine_entries; i++) { + PosParPRT* cur = _fine_grain_regions[i]; + while (cur != NULL) { + PosParPRT* nxt = cur->next(); + PosParPRT::free(cur); + cur = nxt; + } + _fine_grain_regions[i] = NULL; + } + _sparse_table.clear(); + _coarse_map.clear(); + _n_fine_entries = 0; + _n_coarse_entries = 0; + + clear_fcc(); +} + +void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { + MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); + size_t hrs_ind = (size_t)from_hr->hrs_index(); + size_t ind = hrs_ind & _mod_max_fine_entries_mask; + if (del_single_region_table(ind, from_hr)) { + assert(!_coarse_map.at(hrs_ind), "Inv"); + } else { + _coarse_map.par_at_put(hrs_ind, 0); + } + // Check to see if any of the fcc entries come from here. + int hr_ind = hr()->hrs_index(); + for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { + int fcc_ent = _from_card_cache[tid][hr_ind]; + if (fcc_ent != -1) { + HeapWord* card_addr = (HeapWord*) + (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift); + if (hr()->is_in_reserved(card_addr)) { + // Clear the from card cache. + _from_card_cache[tid][hr_ind] = -1; + } + } + } +} + +bool OtherRegionsTable::del_single_region_table(size_t ind, + HeapRegion* hr) { + assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); + PosParPRT** prev_addr = &_fine_grain_regions[ind]; + PosParPRT* prt = *prev_addr; + while (prt != NULL && prt->hr() != hr) { + prev_addr = prt->next_addr(); + prt = prt->next(); + } + if (prt != NULL) { + assert(prt->hr() == hr, "Loop postcondition."); + *prev_addr = prt->next(); + PosParPRT::free(prt); + _n_fine_entries--; + return true; + } else { + return false; + } +} + +bool OtherRegionsTable::contains_reference(oop* from) const { + // Cast away const in this case. + MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); + return contains_reference_locked(from); +} + +bool OtherRegionsTable::contains_reference_locked(oop* from) const { + HeapRegion* hr = _g1h->heap_region_containing_raw(from); + if (hr == NULL) return false; + size_t hr_ind = hr->hrs_index(); + // Is this region in the coarse map? + if (_coarse_map.at(hr_ind)) return true; + + PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, + hr); + if (prt != NULL) { + return prt->contains_reference(from); + + } else { + uintptr_t from_card = + (uintptr_t(from) >> CardTableModRefBS::card_shift); + uintptr_t hr_bot_card_index = + uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift; + assert(from_card >= hr_bot_card_index, "Inv"); + int card_index = from_card - hr_bot_card_index; + return _sparse_table.contains_card((short)hr_ind, card_index); + } + + +} + + +bool HeapRegionRemSet::_par_traversal = false; + +void HeapRegionRemSet::set_par_traversal(bool b) { + assert(_par_traversal != b, "Proper alternation..."); + _par_traversal = b; +} + +int HeapRegionRemSet::num_par_rem_sets() { + // We always have at least two, so that a mutator thread can claim an + // id and add to a rem set. + return (int) MAX2(ParallelGCThreads, (size_t)2); +} + +HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, + HeapRegion* hr) + : _bosa(bosa), _other_regions(hr), + _outgoing_region_map(G1CollectedHeap::heap()->max_regions(), + false /* in-resource-area */), + _iter_state(Unclaimed) +{} + + +void HeapRegionRemSet::init_for_par_iteration() { + _iter_state = Unclaimed; +} + +bool HeapRegionRemSet::claim_iter() { + if (_iter_state != Unclaimed) return false; + jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed); + return (res == Unclaimed); +} + +void HeapRegionRemSet::set_iter_complete() { + _iter_state = Complete; +} + +bool HeapRegionRemSet::iter_is_complete() { + return _iter_state == Complete; +} + + +void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { + iter->initialize(this); +} + +#ifndef PRODUCT +void HeapRegionRemSet::print() const { + HeapRegionRemSetIterator iter; + init_iterator(&iter); + size_t card_index; + while (iter.has_next(card_index)) { + HeapWord* card_start = + G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); + gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start); + } + // XXX + if (iter.n_yielded() != occupied()) { + gclog_or_tty->print_cr("Yielded disagrees with occupied:"); + gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).", + iter.n_yielded(), + iter.n_yielded_coarse(), iter.n_yielded_fine()); + gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).", + occupied(), occ_coarse(), occ_fine()); + } + guarantee(iter.n_yielded() == occupied(), + "We should have yielded all the represented cards."); +} +#endif + +void HeapRegionRemSet::cleanup() { + SparsePRT::cleanup_all(); +} + +void HeapRegionRemSet::par_cleanup() { + PosParPRT::par_contract_all(); +} + +void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) { + _outgoing_region_map.par_at_put(to_hr->hrs_index(), 1); +} + +void HeapRegionRemSet::clear() { + clear_outgoing_entries(); + _outgoing_region_map.clear(); + _other_regions.clear(); + assert(occupied() == 0, "Should be clear."); +} + +void HeapRegionRemSet::clear_outgoing_entries() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + size_t i = _outgoing_region_map.get_next_one_offset(0); + while (i < _outgoing_region_map.size()) { + HeapRegion* to_region = g1h->region_at(i); + if (!to_region->in_collection_set()) { + to_region->rem_set()->clear_incoming_entry(hr()); + } + i = _outgoing_region_map.get_next_one_offset(i+1); + } +} + + +void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, + BitMap* region_bm, BitMap* card_bm) { + _other_regions.scrub(ctbs, region_bm, card_bm); +} + +//-------------------- Iteration -------------------- + +HeapRegionRemSetIterator:: +HeapRegionRemSetIterator() : + _hrrs(NULL), + _g1h(G1CollectedHeap::heap()), + _bosa(NULL), + _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start()) + >> CardTableModRefBS::card_shift) +{} + +void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { + _hrrs = hrrs; + _coarse_map = &_hrrs->_other_regions._coarse_map; + _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions; + _bosa = _hrrs->bosa(); + + _is = Sparse; + // Set these values so that we increment to the first region. + _coarse_cur_region_index = -1; + _coarse_cur_region_cur_card = (PosParPRT::CardsPerRegion-1);; + + _cur_region_cur_card = 0; + + _fine_array_index = -1; + _fine_cur_prt = NULL; + + _n_yielded_coarse = 0; + _n_yielded_fine = 0; + _n_yielded_sparse = 0; + + _sparse_iter.init(&hrrs->_other_regions._sparse_table); +} + +bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { + if (_hrrs->_other_regions._n_coarse_entries == 0) return false; + // Go to the next card. + _coarse_cur_region_cur_card++; + // Was the last the last card in the current region? + if (_coarse_cur_region_cur_card == PosParPRT::CardsPerRegion) { + // Yes: find the next region. This may leave _coarse_cur_region_index + // Set to the last index, in which case there are no more coarse + // regions. + _coarse_cur_region_index = + (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1); + if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { + _coarse_cur_region_cur_card = 0; + HeapWord* r_bot = + _g1h->region_at(_coarse_cur_region_index)->bottom(); + _cur_region_card_offset = _bosa->index_for(r_bot); + } else { + return false; + } + } + // If we didn't return false above, then we can yield a card. + card_index = _cur_region_card_offset + _coarse_cur_region_cur_card; + return true; +} + +void HeapRegionRemSetIterator::fine_find_next_non_null_prt() { + // Otherwise, find the next bucket list in the array. + _fine_array_index++; + while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) { + _fine_cur_prt = _fine_grain_regions[_fine_array_index]; + if (_fine_cur_prt != NULL) return; + else _fine_array_index++; + } + assert(_fine_cur_prt == NULL, "Loop post"); +} + +bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { + if (fine_has_next()) { + _cur_region_cur_card = + _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1); + } + while (!fine_has_next()) { + if (_cur_region_cur_card == PosParPRT::CardsPerRegion) { + _cur_region_cur_card = 0; + _fine_cur_prt = _fine_cur_prt->next(); + } + if (_fine_cur_prt == NULL) { + fine_find_next_non_null_prt(); + if (_fine_cur_prt == NULL) return false; + } + assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0, + "inv."); + HeapWord* r_bot = + _fine_cur_prt->hr()->bottom(); + _cur_region_card_offset = _bosa->index_for(r_bot); + _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0); + } + assert(fine_has_next(), "Or else we exited the loop via the return."); + card_index = _cur_region_card_offset + _cur_region_cur_card; + return true; +} + +bool HeapRegionRemSetIterator::fine_has_next() { + return + _fine_cur_prt != NULL && + _cur_region_cur_card < PosParPRT::CardsPerRegion; +} + +bool HeapRegionRemSetIterator::has_next(size_t& card_index) { + switch (_is) { + case Sparse: + if (_sparse_iter.has_next(card_index)) { + _n_yielded_sparse++; + return true; + } + // Otherwise, deliberate fall-through + _is = Fine; + case Fine: + if (fine_has_next(card_index)) { + _n_yielded_fine++; + return true; + } + // Otherwise, deliberate fall-through + _is = Coarse; + case Coarse: + if (coarse_has_next(card_index)) { + _n_yielded_coarse++; + return true; + } + // Otherwise... + break; + } + assert(ParallelGCThreads > 1 || + n_yielded() == _hrrs->occupied(), + "Should have yielded all the cards in the rem set " + "(in the non-par case)."); + return false; +} + + + +oop** HeapRegionRemSet::_recorded_oops = NULL; +HeapWord** HeapRegionRemSet::_recorded_cards = NULL; +HeapRegion** HeapRegionRemSet::_recorded_regions = NULL; +int HeapRegionRemSet::_n_recorded = 0; + +HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL; +int* HeapRegionRemSet::_recorded_event_index = NULL; +int HeapRegionRemSet::_n_recorded_events = 0; + +void HeapRegionRemSet::record(HeapRegion* hr, oop* f) { + if (_recorded_oops == NULL) { + assert(_n_recorded == 0 + && _recorded_cards == NULL + && _recorded_regions == NULL, + "Inv"); + _recorded_oops = NEW_C_HEAP_ARRAY(oop*, MaxRecorded); + _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded); + _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded); + } + if (_n_recorded == MaxRecorded) { + gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded); + } else { + _recorded_cards[_n_recorded] = + (HeapWord*)align_size_down(uintptr_t(f), + CardTableModRefBS::card_size); + _recorded_oops[_n_recorded] = f; + _recorded_regions[_n_recorded] = hr; + _n_recorded++; + } +} + +void HeapRegionRemSet::record_event(Event evnt) { + if (!G1RecordHRRSEvents) return; + + if (_recorded_events == NULL) { + assert(_n_recorded_events == 0 + && _recorded_event_index == NULL, + "Inv"); + _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents); + _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents); + } + if (_n_recorded_events == MaxRecordedEvents) { + gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents); + } else { + _recorded_events[_n_recorded_events] = evnt; + _recorded_event_index[_n_recorded_events] = _n_recorded; + _n_recorded_events++; + } +} + +void HeapRegionRemSet::print_event(outputStream* str, Event evnt) { + switch (evnt) { + case Event_EvacStart: + str->print("Evac Start"); + break; + case Event_EvacEnd: + str->print("Evac End"); + break; + case Event_RSUpdateEnd: + str->print("RS Update End"); + break; + } +} + +void HeapRegionRemSet::print_recorded() { + int cur_evnt = 0; + Event cur_evnt_kind; + int cur_evnt_ind = 0; + if (_n_recorded_events > 0) { + cur_evnt_kind = _recorded_events[cur_evnt]; + cur_evnt_ind = _recorded_event_index[cur_evnt]; + } + + for (int i = 0; i < _n_recorded; i++) { + while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) { + gclog_or_tty->print("Event: "); + print_event(gclog_or_tty, cur_evnt_kind); + gclog_or_tty->print_cr(""); + cur_evnt++; + if (cur_evnt < MaxRecordedEvents) { + cur_evnt_kind = _recorded_events[cur_evnt]; + cur_evnt_ind = _recorded_event_index[cur_evnt]; + } + } + gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]" + " for ref " PTR_FORMAT ".\n", + _recorded_cards[i], _recorded_regions[i]->bottom(), + _recorded_oops[i]); + } +} + +#ifndef PRODUCT +void HeapRegionRemSet::test() { + os::sleep(Thread::current(), (jlong)5000, false); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + // Run with "-XX:G1LogRSRegionEntries=2", so that 1 and 5 end up in same + // hash bucket. + HeapRegion* hr0 = g1h->region_at(0); + HeapRegion* hr1 = g1h->region_at(1); + HeapRegion* hr2 = g1h->region_at(5); + HeapRegion* hr3 = g1h->region_at(6); + HeapRegion* hr4 = g1h->region_at(7); + HeapRegion* hr5 = g1h->region_at(8); + + HeapWord* hr1_start = hr1->bottom(); + HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2; + HeapWord* hr1_last = hr1->end() - 1; + + HeapWord* hr2_start = hr2->bottom(); + HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2; + HeapWord* hr2_last = hr2->end() - 1; + + HeapWord* hr3_start = hr3->bottom(); + HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2; + HeapWord* hr3_last = hr3->end() - 1; + + HeapRegionRemSet* hrrs = hr0->rem_set(); + + // Make three references from region 0x101... + hrrs->add_reference((oop*)hr1_start); + hrrs->add_reference((oop*)hr1_mid); + hrrs->add_reference((oop*)hr1_last); + + hrrs->add_reference((oop*)hr2_start); + hrrs->add_reference((oop*)hr2_mid); + hrrs->add_reference((oop*)hr2_last); + + hrrs->add_reference((oop*)hr3_start); + hrrs->add_reference((oop*)hr3_mid); + hrrs->add_reference((oop*)hr3_last); + + // Now cause a coarsening. + hrrs->add_reference((oop*)hr4->bottom()); + hrrs->add_reference((oop*)hr5->bottom()); + + // Now, does iteration yield these three? + HeapRegionRemSetIterator iter; + hrrs->init_iterator(&iter); + size_t sum = 0; + size_t card_index; + while (iter.has_next(card_index)) { + HeapWord* card_start = + G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); + gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start); + sum++; + } + guarantee(sum == 11 - 3 + 2048, "Failure"); + guarantee(sum == hrrs->occupied(), "Failure"); +} +#endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp 2009-08-01 04:21:14.514832554 +0100 @@ -0,0 +1,428 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Remembered set for a heap region. Represent a set of "cards" that +// contain pointers into the owner heap region. Cards are defined somewhat +// abstractly, in terms of what the "BlockOffsetTable" in use can parse. + +class G1CollectedHeap; +class G1BlockOffsetSharedArray; +class HeapRegion; +class HeapRegionRemSetIterator; +class PosParPRT; +class SparsePRT; + + +// The "_coarse_map" is a bitmap with one bit for each region, where set +// bits indicate that the corresponding region may contain some pointer +// into the owning region. + +// The "_fine_grain_entries" array is an open hash table of PerRegionTables +// (PRTs), indicating regions for which we're keeping the RS as a set of +// cards. The strategy is to cap the size of the fine-grain table, +// deleting an entry and setting the corresponding coarse-grained bit when +// we would overflow this cap. + +// We use a mixture of locking and lock-free techniques here. We allow +// threads to locate PRTs without locking, but threads attempting to alter +// a bucket list obtain a lock. This means that any failing attempt to +// find a PRT must be retried with the lock. It might seem dangerous that +// a read can find a PRT that is concurrently deleted. This is all right, +// because: +// +// 1) We only actually free PRT's at safe points (though we reuse them at +// other times). +// 2) We find PRT's in an attempt to add entries. If a PRT is deleted, +// it's _coarse_map bit is set, so the that we were attempting to add +// is represented. If a deleted PRT is re-used, a thread adding a bit, +// thinking the PRT is for a different region, does no harm. + +class OtherRegionsTable VALUE_OBJ_CLASS_SPEC { + friend class HeapRegionRemSetIterator; + + G1CollectedHeap* _g1h; + Mutex _m; + HeapRegion* _hr; + + // These are protected by "_m". + BitMap _coarse_map; + size_t _n_coarse_entries; + static jint _n_coarsenings; + + PosParPRT** _fine_grain_regions; + size_t _n_fine_entries; + +#define SAMPLE_FOR_EVICTION 1 +#if SAMPLE_FOR_EVICTION + size_t _fine_eviction_start; + static size_t _fine_eviction_stride; + static size_t _fine_eviction_sample_size; +#endif + + SparsePRT _sparse_table; + + // These are static after init. + static size_t _max_fine_entries; + static size_t _mod_max_fine_entries_mask; + + // Requires "prt" to be the first element of the bucket list appropriate + // for "hr". If this list contains an entry for "hr", return it, + // otherwise return "NULL". + PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const; + + // Find, delete, and return a candidate PosParPRT, if any exists, + // adding the deleted region to the coarse bitmap. Requires the caller + // to hold _m, and the fine-grain table to be full. + PosParPRT* delete_region_table(); + + // If a PRT for "hr" is in the bucket list indicated by "ind" (which must + // be the correct index for "hr"), delete it and return true; else return + // false. + bool del_single_region_table(size_t ind, HeapRegion* hr); + + static jint _cache_probes; + static jint _cache_hits; + + // Indexed by thread X heap region, to minimize thread contention. + static int** _from_card_cache; + static size_t _from_card_cache_max_regions; + static size_t _from_card_cache_mem_size; + +public: + OtherRegionsTable(HeapRegion* hr); + + HeapRegion* hr() const { return _hr; } + + // For now. Could "expand" some tables in the future, so that this made + // sense. + void add_reference(oop* from, int tid); + + void add_reference(oop* from) { + return add_reference(from, 0); + } + + // Removes any entries shown by the given bitmaps to contain only dead + // objects. + void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); + + // Not const because it takes a lock. + size_t occupied() const; + size_t occ_fine() const; + size_t occ_coarse() const; + size_t occ_sparse() const; + + static jint n_coarsenings() { return _n_coarsenings; } + + // Returns size in bytes. + // Not const because it takes a lock. + size_t mem_size() const; + static size_t static_mem_size(); + static size_t fl_mem_size(); + + bool contains_reference(oop* from) const; + bool contains_reference_locked(oop* from) const; + + void clear(); + + // Specifically clear the from_card_cache. + void clear_fcc(); + + // "from_hr" is being cleared; remove any entries from it. + void clear_incoming_entry(HeapRegion* from_hr); + + // Declare the heap size (in # of regions) to the OtherRegionsTable. + // (Uses it to initialize from_card_cache). + static void init_from_card_cache(size_t max_regions); + + // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. + // Make sure any entries for higher regions are invalid. + static void shrink_from_card_cache(size_t new_n_regs); + + static void print_from_card_cache(); + +}; + + +class HeapRegionRemSet : public CHeapObj { + friend class VMStructs; + friend class HeapRegionRemSetIterator; + +public: + enum Event { + Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd + }; + +private: + G1BlockOffsetSharedArray* _bosa; + G1BlockOffsetSharedArray* bosa() const { return _bosa; } + + static bool _par_traversal; + + OtherRegionsTable _other_regions; + + // One set bit for every region that has an entry for this one. + BitMap _outgoing_region_map; + + // Clear entries for the current region in any rem sets named in + // the _outgoing_region_map. + void clear_outgoing_entries(); + + enum ParIterState { Unclaimed, Claimed, Complete }; + ParIterState _iter_state; + + // Unused unless G1RecordHRRSOops is true. + + static const int MaxRecorded = 1000000; + static oop** _recorded_oops; + static HeapWord** _recorded_cards; + static HeapRegion** _recorded_regions; + static int _n_recorded; + + static const int MaxRecordedEvents = 1000; + static Event* _recorded_events; + static int* _recorded_event_index; + static int _n_recorded_events; + + static void print_event(outputStream* str, Event evnt); + +public: + HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, + HeapRegion* hr); + + static int num_par_rem_sets(); + static bool par_traversal() { return _par_traversal; } + static void set_par_traversal(bool b); + + HeapRegion* hr() const { + return _other_regions.hr(); + } + + size_t occupied() const { + return _other_regions.occupied(); + } + size_t occ_fine() const { + return _other_regions.occ_fine(); + } + size_t occ_coarse() const { + return _other_regions.occ_coarse(); + } + size_t occ_sparse() const { + return _other_regions.occ_sparse(); + } + + static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); } + + /* Used in the sequential case. Returns "true" iff this addition causes + the size limit to be reached. */ + void add_reference(oop* from) { + _other_regions.add_reference(from); + } + + /* Used in the parallel case. Returns "true" iff this addition causes + the size limit to be reached. */ + void add_reference(oop* from, int tid) { + _other_regions.add_reference(from, tid); + } + + // Records the fact that the current region contains an outgoing + // reference into "to_hr". + void add_outgoing_reference(HeapRegion* to_hr); + + // Removes any entries shown by the given bitmaps to contain only dead + // objects. + void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); + + // The region is being reclaimed; clear its remset, and any mention of + // entries for this region in other remsets. + void clear(); + + // Forget any entries due to pointers from "from_hr". + void clear_incoming_entry(HeapRegion* from_hr) { + _other_regions.clear_incoming_entry(from_hr); + } + +#if 0 + virtual void cleanup() = 0; +#endif + + // Should be called from single-threaded code. + void init_for_par_iteration(); + // Attempt to claim the region. Returns true iff this call caused an + // atomic transition from Unclaimed to Claimed. + bool claim_iter(); + // Sets the iteration state to "complete". + void set_iter_complete(); + // Returns "true" iff the region's iteration is complete. + bool iter_is_complete(); + + // Initialize the given iterator to iterate over this rem set. + void init_iterator(HeapRegionRemSetIterator* iter) const; + +#if 0 + // Apply the "do_card" method to the start address of every card in the + // rem set. Returns false if some application of the closure aborted. + virtual bool card_iterate(CardClosure* iter) = 0; +#endif + + // The actual # of bytes this hr_remset takes up. + size_t mem_size() { + return _other_regions.mem_size() + // This correction is necessary because the above includes the second + // part. + + sizeof(this) - sizeof(OtherRegionsTable); + } + + // Returns the memory occupancy of all static data structures associated + // with remembered sets. + static size_t static_mem_size() { + return OtherRegionsTable::static_mem_size(); + } + + // Returns the memory occupancy of all free_list data structures associated + // with remembered sets. + static size_t fl_mem_size() { + return OtherRegionsTable::fl_mem_size(); + } + + bool contains_reference(oop* from) const { + return _other_regions.contains_reference(from); + } + void print() const; + + // Called during a stop-world phase to perform any deferred cleanups. + // The second version may be called by parallel threads after then finish + // collection work. + static void cleanup(); + static void par_cleanup(); + + // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). + // (Uses it to initialize from_card_cache). + static void init_heap(size_t max_regions) { + OtherRegionsTable::init_from_card_cache(max_regions); + } + + // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. + static void shrink_heap(size_t new_n_regs) { + OtherRegionsTable::shrink_from_card_cache(new_n_regs); + } + +#ifndef PRODUCT + static void print_from_card_cache() { + OtherRegionsTable::print_from_card_cache(); + } +#endif + + static void record(HeapRegion* hr, oop* f); + static void print_recorded(); + static void record_event(Event evnt); + + // Run unit tests. +#ifndef PRODUCT + static void test(); +#endif + +}; + +class HeapRegionRemSetIterator : public CHeapObj { + + // The region over which we're iterating. + const HeapRegionRemSet* _hrrs; + + // Local caching of HRRS fields. + const BitMap* _coarse_map; + PosParPRT** _fine_grain_regions; + + G1BlockOffsetSharedArray* _bosa; + G1CollectedHeap* _g1h; + + // The number yielded since initialization. + size_t _n_yielded_fine; + size_t _n_yielded_coarse; + size_t _n_yielded_sparse; + + // If true we're iterating over the coarse table; if false the fine + // table. + enum IterState { + Sparse, + Fine, + Coarse + }; + IterState _is; + + // In both kinds of iteration, heap offset of first card of current + // region. + size_t _cur_region_card_offset; + // Card offset within cur region. + size_t _cur_region_cur_card; + + // Coarse table iteration fields: + + // Current region index; + int _coarse_cur_region_index; + int _coarse_cur_region_cur_card; + + bool coarse_has_next(size_t& card_index); + + // Fine table iteration fields: + + // Index of bucket-list we're working on. + int _fine_array_index; + // Per Region Table we're doing within current bucket list. + PosParPRT* _fine_cur_prt; + + /* SparsePRT::*/ SparsePRTIter _sparse_iter; + + void fine_find_next_non_null_prt(); + + bool fine_has_next(); + bool fine_has_next(size_t& card_index); + +public: + // We require an iterator to be initialized before use, so the + // constructor does little. + HeapRegionRemSetIterator(); + + void initialize(const HeapRegionRemSet* hrrs); + + // If there remains one or more cards to be yielded, returns true and + // sets "card_index" to one of those cards (which is then considered + // yielded.) Otherwise, returns false (and leaves "card_index" + // undefined.) + bool has_next(size_t& card_index); + + size_t n_yielded_fine() { return _n_yielded_fine; } + size_t n_yielded_coarse() { return _n_yielded_coarse; } + size_t n_yielded_sparse() { return _n_yielded_sparse; } + size_t n_yielded() { + return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse(); + } +}; + +#if 0 +class CardClosure: public Closure { +public: + virtual void do_card(HeapWord* card_start) = 0; +}; + +#endif --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp 2009-08-01 04:21:14.943089689 +0100 @@ -0,0 +1,335 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_heapRegionSeq.cpp.incl" + +// Local to this file. + +static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { + if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1; + else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1; + else if (*hr1p == *hr2p) return 0; + else { + assert(false, "We should never compare distinct overlapping regions."); + } + return 0; +} + +HeapRegionSeq::HeapRegionSeq(const size_t max_size) : + _alloc_search_start(0), + // The line below is the worst bit of C++ hackery I've ever written + // (Detlefs, 11/23). You should think of it as equivalent to + // "_regions(100, true)": initialize the growable array and inform it + // that it should allocate its elem array(s) on the C heap. The first + // argument, however, is actually a comma expression (new-expr, 100). + // The purpose of the new_expr is to inform the growable array that it + // is *already* allocated on the C heap: it uses the placement syntax to + // keep it from actually doing any allocation. + _regions((ResourceObj::operator new (sizeof(GrowableArray), + (void*)&_regions, + ResourceObj::C_HEAP), + (int)max_size), + true), + _next_rr_candidate(0), + _seq_bottom(NULL) +{} + +// Private methods. + +HeapWord* +HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) { + assert(G1CollectedHeap::isHumongous(word_size), + "Allocation size should be humongous"); + int cur = ind; + int first = cur; + size_t sumSizes = 0; + while (cur < _regions.length() && sumSizes < word_size) { + // Loop invariant: + // For all i in [first, cur): + // _regions.at(i)->is_empty() + // && _regions.at(i) is contiguous with its predecessor, if any + // && sumSizes is the sum of the sizes of the regions in the interval + // [first, cur) + HeapRegion* curhr = _regions.at(cur); + if (curhr->is_empty() + && (first == cur + || (_regions.at(cur-1)->end() == + curhr->bottom()))) { + sumSizes += curhr->capacity() / HeapWordSize; + } else { + first = cur + 1; + sumSizes = 0; + } + cur++; + } + if (sumSizes >= word_size) { + _alloc_search_start = cur; + // Mark the allocated regions as allocated. + bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); + HeapRegion* first_hr = _regions.at(first); + for (int i = first; i < cur; i++) { + HeapRegion* hr = _regions.at(i); + if (zf) + hr->ensure_zero_filled(); + { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + hr->set_zero_fill_allocated(); + } + size_t sz = hr->capacity() / HeapWordSize; + HeapWord* tmp = hr->allocate(sz); + assert(tmp != NULL, "Humongous allocation failure"); + MemRegion mr = MemRegion(tmp, sz); + CollectedHeap::fill_with_object(mr); + hr->declare_filled_region_to_BOT(mr); + if (i == first) { + first_hr->set_startsHumongous(); + } else { + assert(i > first, "sanity"); + hr->set_continuesHumongous(first_hr); + } + } + HeapWord* first_hr_bot = first_hr->bottom(); + HeapWord* obj_end = first_hr_bot + word_size; + first_hr->set_top(obj_end); + return first_hr_bot; + } else { + // If we started from the beginning, we want to know why we can't alloc. + return NULL; + } +} + +void HeapRegionSeq::print_empty_runs() { + int empty_run = 0; + int n_empty = 0; + int empty_run_start; + for (int i = 0; i < _regions.length(); i++) { + HeapRegion* r = _regions.at(i); + if (r->continuesHumongous()) continue; + if (r->is_empty()) { + assert(!r->isHumongous(), "H regions should not be empty."); + if (empty_run == 0) empty_run_start = i; + empty_run++; + n_empty++; + } else { + if (empty_run > 0) { + gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); + empty_run = 0; + } + } + } + if (empty_run > 0) { + gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); + } + gclog_or_tty->print_cr(" [tot = %d]", n_empty); +} + +int HeapRegionSeq::find(HeapRegion* hr) { + // FIXME: optimized for adjacent regions of fixed size. + int ind = hr->hrs_index(); + if (ind != -1) { + assert(_regions.at(ind) == hr, "Mismatch"); + } + return ind; +} + + +// Public methods. + +void HeapRegionSeq::insert(HeapRegion* hr) { + assert(!_regions.is_full(), "Too many elements in HeapRegionSeq"); + if (_regions.length() == 0 + || _regions.top()->end() <= hr->bottom()) { + hr->set_hrs_index(_regions.length()); + _regions.append(hr); + } else { + _regions.append(hr); + _regions.sort(orderRegions); + for (int i = 0; i < _regions.length(); i++) { + _regions.at(i)->set_hrs_index(i); + } + } + char* bot = (char*)_regions.at(0)->bottom(); + if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot; +} + +size_t HeapRegionSeq::length() { + return _regions.length(); +} + +size_t HeapRegionSeq::free_suffix() { + size_t res = 0; + int first = _regions.length() - 1; + int cur = first; + while (cur >= 0 && + (_regions.at(cur)->is_empty() + && (first == cur + || (_regions.at(cur+1)->bottom() == + _regions.at(cur)->end())))) { + res++; + cur--; + } + return res; +} + +HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) { + int cur = _alloc_search_start; + // Make sure "cur" is a valid index. + assert(cur >= 0, "Invariant."); + HeapWord* res = alloc_obj_from_region_index(cur, word_size); + if (res == NULL) + res = alloc_obj_from_region_index(0, word_size); + return res; +} + +void HeapRegionSeq::iterate(HeapRegionClosure* blk) { + iterate_from((HeapRegion*)NULL, blk); +} + +// The first argument r is the heap region at which iteration begins. +// This operation runs fastest when r is NULL, or the heap region for +// which a HeapRegionClosure most recently returned true, or the +// heap region immediately to its right in the sequence. In all +// other cases a linear search is required to find the index of r. + +void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { + + // :::: FIXME :::: + // Static cache value is bad, especially when we start doing parallel + // remembered set update. For now just don't cache anything (the + // code in the def'd out blocks). + +#if 0 + static int cached_j = 0; +#endif + int len = _regions.length(); + int j = 0; + // Find the index of r. + if (r != NULL) { +#if 0 + assert(cached_j >= 0, "Invariant."); + if ((cached_j < len) && (r == _regions.at(cached_j))) { + j = cached_j; + } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) { + j = cached_j + 1; + } else { + j = find(r); +#endif + if (j < 0) { + j = 0; + } +#if 0 + } +#endif + } + int i; + for (i = j; i < len; i += 1) { + int res = blk->doHeapRegion(_regions.at(i)); + if (res) { +#if 0 + cached_j = i; +#endif + blk->incomplete(); + return; + } + } + for (i = 0; i < j; i += 1) { + int res = blk->doHeapRegion(_regions.at(i)); + if (res) { +#if 0 + cached_j = i; +#endif + blk->incomplete(); + return; + } + } +} + +void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { + int len = _regions.length(); + int i; + for (i = idx; i < len; i++) { + if (blk->doHeapRegion(_regions.at(i))) { + blk->incomplete(); + return; + } + } + for (i = 0; i < idx; i++) { + if (blk->doHeapRegion(_regions.at(i))) { + blk->incomplete(); + return; + } + } +} + +MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, + size_t& num_regions_deleted) { + assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); + assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); + + if (_regions.length() == 0) { + num_regions_deleted = 0; + return MemRegion(); + } + int j = _regions.length() - 1; + HeapWord* end = _regions.at(j)->end(); + HeapWord* last_start = end; + while (j >= 0 && shrink_bytes > 0) { + HeapRegion* cur = _regions.at(j); + // We have to leave humongous regions where they are, + // and work around them. + if (cur->isHumongous()) { + return MemRegion(last_start, end); + } + cur->reset_zero_fill(); + assert(cur == _regions.top(), "Should be top"); + if (!cur->is_empty()) break; + shrink_bytes -= cur->capacity(); + num_regions_deleted++; + _regions.pop(); + last_start = cur->bottom(); + // We need to delete these somehow, but can't currently do so here: if + // we do, the ZF thread may still access the deleted region. We'll + // leave this here as a reminder that we have to do something about + // this. + // delete cur; + j--; + } + return MemRegion(last_start, end); +} + + +class PrintHeapRegionClosure : public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion* r) { + gclog_or_tty->print(PTR_FORMAT ":", r); + r->print(); + return false; + } +}; + +void HeapRegionSeq::print() { + PrintHeapRegionClosure cl; + iterate(&cl); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp 2009-08-01 04:21:15.365671155 +0100 @@ -0,0 +1,110 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class HeapRegion; +class HeapRegionClosure; + +class HeapRegionSeq: public CHeapObj { + + // _regions is kept sorted by start address order, and no two regions are + // overlapping. + GrowableArray _regions; + + // The index in "_regions" at which to start the next allocation search. + // (For efficiency only; private to obj_allocate after initialization.) + int _alloc_search_start; + + // Attempts to allocate a block of the (assumed humongous) word_size, + // starting at the region "ind". + HeapWord* alloc_obj_from_region_index(int ind, size_t word_size); + + // Currently, we're choosing collection sets in a round-robin fashion, + // starting here. + int _next_rr_candidate; + + // The bottom address of the bottom-most region, or else NULL if there + // are no regions in the sequence. + char* _seq_bottom; + + public: + // Initializes "this" to the empty sequence of regions. + HeapRegionSeq(const size_t max_size); + + // Adds "hr" to "this" sequence. Requires "hr" not to overlap with + // any region already in "this". (Will perform better if regions are + // inserted in ascending address order.) + void insert(HeapRegion* hr); + + // Given a HeapRegion*, returns its index within _regions, + // or returns -1 if not found. + int find(HeapRegion* hr); + + // Requires the index to be valid, and return the region at the index. + HeapRegion* at(size_t i) { return _regions.at((int)i); } + + // Return the number of regions in the sequence. + size_t length(); + + // Returns the number of contiguous regions at the end of the sequence + // that are available for allocation. + size_t free_suffix(); + + // Requires "word_size" to be humongous (in the technical sense). If + // possible, allocates a contiguous subsequence of the heap regions to + // satisfy the allocation, and returns the address of the beginning of + // that sequence, otherwise returns NULL. + HeapWord* obj_allocate(size_t word_size); + + // Apply the "doHeapRegion" method of "blk" to all regions in "this", + // in address order, terminating the iteration early + // if the "doHeapRegion" method returns "true". + void iterate(HeapRegionClosure* blk); + + // Apply the "doHeapRegion" method of "blk" to all regions in "this", + // starting at "r" (or first region, if "r" is NULL), in a circular + // manner, terminating the iteration early if the "doHeapRegion" method + // returns "true". + void iterate_from(HeapRegion* r, HeapRegionClosure* blk); + + // As above, but start from a given index in the sequence + // instead of a given heap region. + void iterate_from(int idx, HeapRegionClosure* blk); + + // Requires "shrink_bytes" to be a multiple of the page size and heap + // region granularity. Deletes as many "rightmost" completely free heap + // regions from the sequence as comprise shrink_bytes bytes. Returns the + // MemRegion indicating the region those regions comprised, and sets + // "num_regions_deleted" to the number of regions deleted. + MemRegion shrink_by(size_t shrink_bytes, size_t& num_regions_deleted); + + // If "addr" falls within a region in the sequence, return that region, + // or else NULL. + HeapRegion* addr_to_region(const void* addr); + + void print(); + + // Prints out runs of empty regions. + void print_empty_runs(); + +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp 2009-08-01 04:21:15.817452338 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +inline HeapRegion* HeapRegionSeq::addr_to_region(const void* addr) { + assert(_seq_bottom != NULL, "bad _seq_bottom in addr_to_region"); + if ((char*) addr >= _seq_bottom) { + size_t diff = (size_t) pointer_delta((HeapWord*) addr, + (HeapWord*) _seq_bottom); + int index = (int) (diff >> HeapRegion::LogOfHRGrainWords); + assert(index >= 0, "invariant / paranoia"); + if (index < _regions.length()) { + HeapRegion* hr = _regions.at(index); + assert(hr->is_in_reserved(addr), + "addr_to_region is wrong..."); + return hr; + } + } + return NULL; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp 2009-08-01 04:21:16.218380910 +0100 @@ -0,0 +1,266 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_ptrQueue.cpp.incl" + +PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm) : + _qset(qset_), _buf(NULL), _index(0), _active(false), + _perm(perm), _lock(NULL) +{} + +void PtrQueue::flush() { + if (!_perm && _buf != NULL) { + if (_index == _sz) { + // No work to do. + qset()->deallocate_buffer(_buf); + } else { + // We must NULL out the unused entries, then enqueue. + for (size_t i = 0; i < _index; i += oopSize) { + _buf[byte_index_to_index((int)i)] = NULL; + } + qset()->enqueue_complete_buffer(_buf); + } + _buf = NULL; + _index = 0; + } +} + + +static int byte_index_to_index(int ind) { + assert((ind % oopSize) == 0, "Invariant."); + return ind / oopSize; +} + +static int index_to_byte_index(int byte_ind) { + return byte_ind * oopSize; +} + +void PtrQueue::enqueue_known_active(void* ptr) { + assert(0 <= _index && _index <= _sz, "Invariant."); + assert(_index == 0 || _buf != NULL, "invariant"); + + while (_index == 0) { + handle_zero_index(); + } + assert(_index > 0, "postcondition"); + + _index -= oopSize; + _buf[byte_index_to_index((int)_index)] = ptr; + assert(0 <= _index && _index <= _sz, "Invariant."); +} + +void PtrQueue::locking_enqueue_completed_buffer(void** buf) { + assert(_lock->owned_by_self(), "Required."); + _lock->unlock(); + qset()->enqueue_complete_buffer(buf); + // We must relock only because the caller will unlock, for the normal + // case. + _lock->lock_without_safepoint_check(); +} + + +PtrQueueSet::PtrQueueSet(bool notify_when_complete) : + _max_completed_queue(0), + _cbl_mon(NULL), _fl_lock(NULL), + _notify_when_complete(notify_when_complete), + _sz(0), + _completed_buffers_head(NULL), + _completed_buffers_tail(NULL), + _n_completed_buffers(0), + _process_completed_threshold(0), _process_completed(false), + _buf_free_list(NULL), _buf_free_list_sz(0) +{ + _fl_owner = this; +} + +void** PtrQueueSet::allocate_buffer() { + assert(_sz > 0, "Didn't set a buffer size."); + MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); + if (_fl_owner->_buf_free_list != NULL) { + void** res = _fl_owner->_buf_free_list; + _fl_owner->_buf_free_list = (void**)_fl_owner->_buf_free_list[0]; + _fl_owner->_buf_free_list_sz--; + // Just override the next pointer with NULL, just in case we scan this part + // of the buffer. + res[0] = NULL; + return res; + } else { + return NEW_C_HEAP_ARRAY(void*, _sz); + } +} + +void PtrQueueSet::deallocate_buffer(void** buf) { + assert(_sz > 0, "Didn't set a buffer size."); + MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); + buf[0] = (void*)_fl_owner->_buf_free_list; + _fl_owner->_buf_free_list = buf; + _fl_owner->_buf_free_list_sz++; +} + +void PtrQueueSet::reduce_free_list() { + // For now we'll adopt the strategy of deleting half. + MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); + size_t n = _buf_free_list_sz / 2; + while (n > 0) { + assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong."); + void** head = _buf_free_list; + _buf_free_list = (void**)_buf_free_list[0]; + FREE_C_HEAP_ARRAY(void*,head); + n--; + } +} + +void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index, bool ignore_max_completed) { + // I use explicit locking here because there's a bailout in the middle. + _cbl_mon->lock_without_safepoint_check(); + + Thread* thread = Thread::current(); + assert( ignore_max_completed || + thread->is_Java_thread() || + SafepointSynchronize::is_at_safepoint(), + "invariant" ); + ignore_max_completed = ignore_max_completed || !thread->is_Java_thread(); + + if (!ignore_max_completed && _max_completed_queue > 0 && + _n_completed_buffers >= (size_t) _max_completed_queue) { + _cbl_mon->unlock(); + bool b = mut_process_buffer(buf); + if (b) { + deallocate_buffer(buf); + return; + } + + // Otherwise, go ahead and enqueue the buffer. Must reaquire the lock. + _cbl_mon->lock_without_safepoint_check(); + } + + // Here we still hold the _cbl_mon. + CompletedBufferNode* cbn = new CompletedBufferNode; + cbn->buf = buf; + cbn->next = NULL; + cbn->index = index; + if (_completed_buffers_tail == NULL) { + assert(_completed_buffers_head == NULL, "Well-formedness"); + _completed_buffers_head = cbn; + _completed_buffers_tail = cbn; + } else { + _completed_buffers_tail->next = cbn; + _completed_buffers_tail = cbn; + } + _n_completed_buffers++; + + if (!_process_completed && + _n_completed_buffers == _process_completed_threshold) { + _process_completed = true; + if (_notify_when_complete) + _cbl_mon->notify_all(); + } + debug_only(assert_completed_buffer_list_len_correct_locked()); + _cbl_mon->unlock(); +} + +int PtrQueueSet::completed_buffers_list_length() { + int n = 0; + CompletedBufferNode* cbn = _completed_buffers_head; + while (cbn != NULL) { + n++; + cbn = cbn->next; + } + return n; +} + +void PtrQueueSet::assert_completed_buffer_list_len_correct() { + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + assert_completed_buffer_list_len_correct_locked(); +} + +void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { + guarantee((size_t)completed_buffers_list_length() == _n_completed_buffers, + "Completed buffer length is wrong."); +} + +void PtrQueueSet::set_buffer_size(size_t sz) { + assert(_sz == 0 && sz > 0, "Should be called only once."); + _sz = sz * oopSize; +} + +void PtrQueueSet::set_process_completed_threshold(size_t sz) { + _process_completed_threshold = sz; +} + +// Merge lists of buffers. Notify waiting threads if the length of the list +// exceeds threshold. The source queue is emptied as a result. The queues +// must share the monitor. +void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { + assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + if (_completed_buffers_tail == NULL) { + assert(_completed_buffers_head == NULL, "Well-formedness"); + _completed_buffers_head = src->_completed_buffers_head; + _completed_buffers_tail = src->_completed_buffers_tail; + } else { + assert(_completed_buffers_head != NULL, "Well formedness"); + if (src->_completed_buffers_head != NULL) { + _completed_buffers_tail->next = src->_completed_buffers_head; + _completed_buffers_tail = src->_completed_buffers_tail; + } + } + _n_completed_buffers += src->_n_completed_buffers; + + src->_n_completed_buffers = 0; + src->_completed_buffers_head = NULL; + src->_completed_buffers_tail = NULL; + + assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || + _completed_buffers_head != NULL && _completed_buffers_tail != NULL, + "Sanity"); + + if (!_process_completed && + _n_completed_buffers >= _process_completed_threshold) { + _process_completed = true; + if (_notify_when_complete) + _cbl_mon->notify_all(); + } +} + +// Merge free lists of the two queues. The free list of the source +// queue is emptied as a result. The queues must share the same +// mutex that guards free lists. +void PtrQueueSet::merge_freelists(PtrQueueSet* src) { + assert(_fl_lock == src->_fl_lock, "Should share the same lock"); + MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); + if (_buf_free_list != NULL) { + void **p = _buf_free_list; + while (*p != NULL) { + p = (void**)*p; + } + *p = src->_buf_free_list; + } else { + _buf_free_list = src->_buf_free_list; + } + _buf_free_list_sz += src->_buf_free_list_sz; + src->_buf_free_list = NULL; + src->_buf_free_list_sz = 0; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp 2009-08-01 04:21:16.620297507 +0100 @@ -0,0 +1,240 @@ +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// There are various techniques that require threads to be able to log +// addresses. For example, a generational write barrier might log +// the addresses of modified old-generation objects. This type supports +// this operation. + +class PtrQueueSet; + +class PtrQueue VALUE_OBJ_CLASS_SPEC { + +protected: + // The ptr queue set to which this queue belongs. + PtrQueueSet* _qset; + + // Whether updates should be logged. + bool _active; + + // The buffer. + void** _buf; + // The index at which an object was last enqueued. Starts at "_sz" + // (indicating an empty buffer) and goes towards zero. + size_t _index; + + // The size of the buffer. + size_t _sz; + + // If true, the queue is permanent, and doesn't need to deallocate + // its buffer in the destructor (since that obtains a lock which may not + // be legally locked by then. + bool _perm; + + // If there is a lock associated with this buffer, this is that lock. + Mutex* _lock; + + PtrQueueSet* qset() { return _qset; } + +public: + // Initialize this queue to contain a null buffer, and be part of the + // given PtrQueueSet. + PtrQueue(PtrQueueSet*, bool perm = false); + // Release any contained resources. + void flush(); + // Calls flush() when destroyed. + ~PtrQueue() { flush(); } + + // Associate a lock with a ptr queue. + void set_lock(Mutex* lock) { _lock = lock; } + + void reset() { if (_buf != NULL) _index = _sz; } + + // Enqueues the given "obj". + void enqueue(void* ptr) { + if (!_active) return; + else enqueue_known_active(ptr); + } + + inline void handle_zero_index(); + void locking_enqueue_completed_buffer(void** buf); + + void enqueue_known_active(void* ptr); + + size_t size() { + assert(_sz >= _index, "Invariant."); + return _buf == NULL ? 0 : _sz - _index; + } + + // Set the "active" property of the queue to "b". An enqueue to an + // inactive thread is a no-op. Setting a queue to inactive resets its + // log to the empty state. + void set_active(bool b) { + _active = b; + if (!b && _buf != NULL) { + _index = _sz; + } else if (b && _buf != NULL) { + assert(_index == _sz, "invariant: queues are empty when activated."); + } + } + + static int byte_index_to_index(int ind) { + assert((ind % oopSize) == 0, "Invariant."); + return ind / oopSize; + } + + static int index_to_byte_index(int byte_ind) { + return byte_ind * oopSize; + } + + // To support compiler. + static ByteSize byte_offset_of_index() { + return byte_offset_of(PtrQueue, _index); + } + static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } + + static ByteSize byte_offset_of_buf() { + return byte_offset_of(PtrQueue, _buf); + } + static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); } + + static ByteSize byte_offset_of_active() { + return byte_offset_of(PtrQueue, _active); + } + static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } + +}; + +// A PtrQueueSet represents resources common to a set of pointer queues. +// In particular, the individual queues allocate buffers from this shared +// set, and return completed buffers to the set. +// All these variables are are protected by the TLOQ_CBL_mon. XXX ??? +class PtrQueueSet VALUE_OBJ_CLASS_SPEC { + +protected: + + class CompletedBufferNode: public CHeapObj { + public: + void** buf; + size_t index; + CompletedBufferNode* next; + CompletedBufferNode() : buf(NULL), + index(0), next(NULL){ } + }; + + Monitor* _cbl_mon; // Protects the fields below. + CompletedBufferNode* _completed_buffers_head; + CompletedBufferNode* _completed_buffers_tail; + size_t _n_completed_buffers; + size_t _process_completed_threshold; + volatile bool _process_completed; + + // This (and the interpretation of the first element as a "next" + // pointer) are protected by the TLOQ_FL_lock. + Mutex* _fl_lock; + void** _buf_free_list; + size_t _buf_free_list_sz; + // Queue set can share a freelist. The _fl_owner variable + // specifies the owner. It is set to "this" by default. + PtrQueueSet* _fl_owner; + + // The size of all buffers in the set. + size_t _sz; + + bool _all_active; + + // If true, notify_all on _cbl_mon when the threshold is reached. + bool _notify_when_complete; + + // Maximum number of elements allowed on completed queue: after that, + // enqueuer does the work itself. Zero indicates no maximum. + int _max_completed_queue; + + int completed_buffers_list_length(); + void assert_completed_buffer_list_len_correct_locked(); + void assert_completed_buffer_list_len_correct(); + +protected: + // A mutator thread does the the work of processing a buffer. + // Returns "true" iff the work is complete (and the buffer may be + // deallocated). + virtual bool mut_process_buffer(void** buf) { + ShouldNotReachHere(); + return false; + } + +public: + // Create an empty ptr queue set. + PtrQueueSet(bool notify_when_complete = false); + + // Because of init-order concerns, we can't pass these as constructor + // arguments. + void initialize(Monitor* cbl_mon, Mutex* fl_lock, + int max_completed_queue = 0, + PtrQueueSet *fl_owner = NULL) { + _max_completed_queue = max_completed_queue; + assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); + _cbl_mon = cbl_mon; + _fl_lock = fl_lock; + _fl_owner = (fl_owner != NULL) ? fl_owner : this; + } + + // Return an empty oop array of size _sz (required to be non-zero). + void** allocate_buffer(); + + // Return an empty buffer to the free list. The "buf" argument is + // required to be a pointer to the head of an array of length "_sz". + void deallocate_buffer(void** buf); + + // Declares that "buf" is a complete buffer. + void enqueue_complete_buffer(void** buf, size_t index = 0, + bool ignore_max_completed = false); + + bool completed_buffers_exist_dirty() { + return _n_completed_buffers > 0; + } + + bool process_completed_buffers() { return _process_completed; } + + bool active() { return _all_active; } + + // Set the buffer size. Should be called before any "enqueue" operation + // can be called. And should only be called once. + void set_buffer_size(size_t sz); + + // Get the buffer size. + size_t buffer_size() { return _sz; } + + // Set the number of completed buffers that triggers log processing. + void set_process_completed_threshold(size_t sz); + + // Must only be called at a safe point. Indicates that the buffer free + // list size may be reduced, if that is deemed desirable. + void reduce_free_list(); + + size_t completed_buffers_num() { return _n_completed_buffers; } + + void merge_bufferlists(PtrQueueSet* src); + void merge_freelists(PtrQueueSet* src); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.inline.hpp 2009-08-01 04:21:17.036577132 +0100 @@ -0,0 +1,41 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +void PtrQueue::handle_zero_index() { + assert(0 == _index, "Precondition."); + // This thread records the full buffer and allocates a new one (while + // holding the lock if there is one). + void** buf = _buf; + _buf = qset()->allocate_buffer(); + _sz = qset()->buffer_size(); + _index = _sz; + assert(0 <= _index && _index <= _sz, "Invariant."); + if (buf != NULL) { + if (_lock) { + locking_enqueue_completed_buffer(buf); + } else { + qset()->enqueue_complete_buffer(buf); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp 2009-08-01 04:21:17.454163316 +0100 @@ -0,0 +1,160 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_satbQueue.cpp.incl" + +void ObjPtrQueue::apply_closure(ObjectClosure* cl) { + if (_buf != NULL) { + apply_closure_to_buffer(cl, _buf, _index, _sz); + _index = _sz; + } +} + +void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl, + void** buf, size_t index, size_t sz) { + if (cl == NULL) return; + for (size_t i = index; i < sz; i += oopSize) { + oop obj = (oop)buf[byte_index_to_index((int)i)]; + // There can be NULL entries because of destructors. + if (obj != NULL) { + cl->do_object(obj); + } + } +} +#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +#endif // _MSC_VER + + +SATBMarkQueueSet::SATBMarkQueueSet() : + PtrQueueSet(), + _closure(NULL), _par_closures(NULL), + _shared_satb_queue(this, true /*perm*/) +{} + +void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, + int max_completed_queue, + Mutex* lock) { + PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue); + _shared_satb_queue.set_lock(lock); + if (ParallelGCThreads > 0) { + _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads); + } +} + + +void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) { + t->satb_mark_queue().handle_zero_index(); +} + +void SATBMarkQueueSet::set_active_all_threads(bool b) { + _all_active = b; + for(JavaThread* t = Threads::first(); t; t = t->next()) { + t->satb_mark_queue().set_active(b); + } +} + +void SATBMarkQueueSet::set_closure(ObjectClosure* closure) { + _closure = closure; +} + +void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) { + assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition"); + _par_closures[i] = par_closure; +} + +void SATBMarkQueueSet::iterate_closure_all_threads() { + for(JavaThread* t = Threads::first(); t; t = t->next()) { + t->satb_mark_queue().apply_closure(_closure); + } + shared_satb_queue()->apply_closure(_closure); +} + +void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) { + SharedHeap* sh = SharedHeap::heap(); + int parity = sh->strong_roots_parity(); + + for(JavaThread* t = Threads::first(); t; t = t->next()) { + if (t->claim_oops_do(true, parity)) { + t->satb_mark_queue().apply_closure(_par_closures[worker]); + } + } + // We'll have worker 0 do this one. + if (worker == 0) { + shared_satb_queue()->apply_closure(_par_closures[0]); + } +} + +bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par, + int worker) { + CompletedBufferNode* nd = NULL; + { + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + if (_completed_buffers_head != NULL) { + nd = _completed_buffers_head; + _completed_buffers_head = nd->next; + if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL; + _n_completed_buffers--; + if (_n_completed_buffers == 0) _process_completed = false; + } + } + ObjectClosure* cl = (par ? _par_closures[worker] : _closure); + if (nd != NULL) { + ObjPtrQueue::apply_closure_to_buffer(cl, nd->buf, 0, _sz); + deallocate_buffer(nd->buf); + delete nd; + return true; + } else { + return false; + } +} + +void SATBMarkQueueSet::abandon_partial_marking() { + CompletedBufferNode* buffers_to_delete = NULL; + { + MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); + while (_completed_buffers_head != NULL) { + CompletedBufferNode* nd = _completed_buffers_head; + _completed_buffers_head = nd->next; + nd->next = buffers_to_delete; + buffers_to_delete = nd; + } + _completed_buffers_tail = NULL; + _n_completed_buffers = 0; + debug_only(assert_completed_buffer_list_len_correct_locked()); + } + while (buffers_to_delete != NULL) { + CompletedBufferNode* nd = buffers_to_delete; + buffers_to_delete = nd->next; + deallocate_buffer(nd->buf); + delete nd; + } + assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); + // So we can safely manipulate these queues. + for (JavaThread* t = Threads::first(); t; t = t->next()) { + t->satb_mark_queue().reset(); + } + shared_satb_queue()->reset(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp 2009-08-01 04:21:17.879575292 +0100 @@ -0,0 +1,105 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class ObjectClosure; +class JavaThread; + +// A ptrQueue whose elements are "oops", pointers to object heads. +class ObjPtrQueue: public PtrQueue { +public: + ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) : + PtrQueue(qset_, perm) + {} + // Apply the closure to all elements, and reset the index to make the + // buffer empty. + void apply_closure(ObjectClosure* cl); + + // Apply the closure to all elements of "buf", down to "index" (inclusive.) + static void apply_closure_to_buffer(ObjectClosure* cl, + void** buf, size_t index, size_t sz); + +}; + + + +class SATBMarkQueueSet: public PtrQueueSet { + ObjectClosure* _closure; + ObjectClosure** _par_closures; // One per ParGCThread. + + ObjPtrQueue _shared_satb_queue; + + // Utility function to support sequential and parallel versions. If + // "par" is true, then "worker" is the par thread id; if "false", worker + // is ignored. + bool apply_closure_to_completed_buffer_work(bool par, int worker); + + +public: + SATBMarkQueueSet(); + + void initialize(Monitor* cbl_mon, Mutex* fl_lock, + int max_completed_queue = 0, + Mutex* lock = NULL); + + static void handle_zero_index_for_thread(JavaThread* t); + + // Apply "set_active(b)" to all thread tloq's. Should be called only + // with the world stopped. + void set_active_all_threads(bool b); + + // Register "blk" as "the closure" for all queues. Only one such closure + // is allowed. The "apply_closure_to_completed_buffer" method will apply + // this closure to a completed buffer, and "iterate_closure_all_threads" + // applies it to partially-filled buffers (the latter should only be done + // with the world stopped). + void set_closure(ObjectClosure* closure); + // Set the parallel closures: pointer is an array of pointers to + // closures, one for each parallel GC thread. + void set_par_closure(int i, ObjectClosure* closure); + + // If there is a registered closure for buffers, apply it to all entries + // in all currently-active buffers. This should only be applied at a + // safepoint. (Currently must not be called in parallel; this should + // change in the future.) + void iterate_closure_all_threads(); + // Parallel version of the above. + void par_iterate_closure_all_threads(int worker); + + // If there exists some completed buffer, pop it, then apply the + // registered closure to all its elements, and return true. If no + // completed buffers exist, return false. + bool apply_closure_to_completed_buffer() { + return apply_closure_to_completed_buffer_work(false, 0); + } + // Parallel version of the above. + bool par_apply_closure_to_completed_buffer(int worker) { + return apply_closure_to_completed_buffer_work(true, worker); + } + + ObjPtrQueue* shared_satb_queue() { return &_shared_satb_queue; } + + // If a marking is being abandoned, reset any unprocessed log buffers. + void abandon_partial_marking(); + +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp 2009-08-01 04:21:18.314106984 +0100 @@ -0,0 +1,531 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_sparsePRT.cpp.incl" + +#define SPARSE_PRT_VERBOSE 0 + +#define UNROLL_CARD_LOOPS 1 + +void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) { + sprt_iter->init(this); +} + +void SparsePRTEntry::init(short region_ind) { + _region_ind = region_ind; + _next_index = NullEntry; +#if UNROLL_CARD_LOOPS + assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll."); + _cards[0] = NullEntry; + _cards[1] = NullEntry; + _cards[2] = NullEntry; + _cards[3] = NullEntry; +#else + for (int i = 0; i < CardsPerEntry; i++) _cards[i] = NullEntry; +#endif +} + +bool SparsePRTEntry::contains_card(short card_index) const { +#if UNROLL_CARD_LOOPS + assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll."); + if (_cards[0] == card_index) return true; + if (_cards[1] == card_index) return true; + if (_cards[2] == card_index) return true; + if (_cards[3] == card_index) return true; +#else + for (int i = 0; i < CardsPerEntry; i++) { + if (_cards[i] == card_index) return true; + } +#endif + // Otherwise, we're full. + return false; +} + +int SparsePRTEntry::num_valid_cards() const { + int sum = 0; +#if UNROLL_CARD_LOOPS + assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll."); + if (_cards[0] != NullEntry) sum++; + if (_cards[1] != NullEntry) sum++; + if (_cards[2] != NullEntry) sum++; + if (_cards[3] != NullEntry) sum++; +#else + for (int i = 0; i < CardsPerEntry; i++) { + if (_cards[i] != NulLEntry) sum++; + } +#endif + // Otherwise, we're full. + return sum; +} + +SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(short card_index) { +#if UNROLL_CARD_LOOPS + assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll."); + short c = _cards[0]; + if (c == card_index) return found; + if (c == NullEntry) { _cards[0] = card_index; return added; } + c = _cards[1]; + if (c == card_index) return found; + if (c == NullEntry) { _cards[1] = card_index; return added; } + c = _cards[2]; + if (c == card_index) return found; + if (c == NullEntry) { _cards[2] = card_index; return added; } + c = _cards[3]; + if (c == card_index) return found; + if (c == NullEntry) { _cards[3] = card_index; return added; } +#else + for (int i = 0; i < CardsPerEntry; i++) { + short c = _cards[i]; + if (c == card_index) return found; + if (c == NullEntry) { _cards[i] = card_index; return added; } + } +#endif + // Otherwise, we're full. + return overflow; +} + +void SparsePRTEntry::copy_cards(short* cards) const { +#if UNROLL_CARD_LOOPS + assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll."); + cards[0] = _cards[0]; + cards[1] = _cards[1]; + cards[2] = _cards[2]; + cards[3] = _cards[3]; +#else + for (int i = 0; i < CardsPerEntry; i++) { + cards[i] = _cards[i]; + } +#endif +} + +void SparsePRTEntry::copy_cards(SparsePRTEntry* e) const { + copy_cards(&e->_cards[0]); +} + +// ---------------------------------------------------------------------- + +RSHashTable::RSHashTable(size_t capacity) : + _capacity(capacity), _capacity_mask(capacity-1), + _occupied_entries(0), _occupied_cards(0), + _entries(NEW_C_HEAP_ARRAY(SparsePRTEntry, capacity)), + _buckets(NEW_C_HEAP_ARRAY(short, capacity)), + _next_deleted(NULL), _deleted(false), + _free_list(NullEntry), _free_region(0) +{ + clear(); +} + +RSHashTable::~RSHashTable() { + if (_entries != NULL) { + FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries); + _entries = NULL; + } + if (_buckets != NULL) { + FREE_C_HEAP_ARRAY(short, _buckets); + _buckets = NULL; + } +} + +void RSHashTable::clear() { + _occupied_entries = 0; + _occupied_cards = 0; + guarantee(_entries != NULL, "INV"); + guarantee(_buckets != NULL, "INV"); + // This will put -1 == NullEntry in the key field of all entries. + memset(_entries, -1, _capacity * sizeof(SparsePRTEntry)); + memset(_buckets, -1, _capacity * sizeof(short)); + _free_list = NullEntry; + _free_region = 0; +} + +bool RSHashTable::add_card(short region_ind, short card_index) { + SparsePRTEntry* e = entry_for_region_ind_create(region_ind); + assert(e != NULL && e->r_ind() == region_ind, + "Postcondition of call above."); + SparsePRTEntry::AddCardResult res = e->add_card(card_index); + if (res == SparsePRTEntry::added) _occupied_cards++; +#if SPARSE_PRT_VERBOSE + gclog_or_tty->print_cr(" after add_card[%d]: valid-cards = %d.", + pointer_delta(e, _entries, sizeof(SparsePRTEntry)), + e->num_valid_cards()); +#endif + assert(e->num_valid_cards() > 0, "Postcondition"); + return res != SparsePRTEntry::overflow; +} + +bool RSHashTable::get_cards(short region_ind, short* cards) { + short ind = (short) (region_ind & capacity_mask()); + short cur_ind = _buckets[ind]; + SparsePRTEntry* cur; + while (cur_ind != NullEntry && + (cur = entry(cur_ind))->r_ind() != region_ind) { + cur_ind = cur->next_index(); + } + + if (cur_ind == NullEntry) return false; + // Otherwise... + assert(cur->r_ind() == region_ind, "Postcondition of loop + test above."); + assert(cur->num_valid_cards() > 0, "Inv"); + cur->copy_cards(cards); + return true; +} + +bool RSHashTable::delete_entry(short region_ind) { + short ind = (short) (region_ind & capacity_mask()); + short* prev_loc = &_buckets[ind]; + short cur_ind = *prev_loc; + SparsePRTEntry* cur; + while (cur_ind != NullEntry && + (cur = entry(cur_ind))->r_ind() != region_ind) { + prev_loc = cur->next_index_addr(); + cur_ind = *prev_loc; + } + + if (cur_ind == NullEntry) return false; + // Otherwise, splice out "cur". + *prev_loc = cur->next_index(); + _occupied_cards -= cur->num_valid_cards(); + free_entry(cur_ind); + _occupied_entries--; + return true; +} + +SparsePRTEntry* RSHashTable::entry_for_region_ind(short region_ind) const { + assert(occupied_entries() < capacity(), "Precondition"); + short ind = (short) (region_ind & capacity_mask()); + short cur_ind = _buckets[ind]; + SparsePRTEntry* cur; + // XXX + // int k = 0; + while (cur_ind != NullEntry && + (cur = entry(cur_ind))->r_ind() != region_ind) { + /* + k++; + if (k > 10) { + gclog_or_tty->print_cr("RSHashTable::entry_for_region_ind(%d): " + "k = %d, cur_ind = %d.", region_ind, k, cur_ind); + if (k >= 1000) { + while (1) ; + } + } + */ + cur_ind = cur->next_index(); + } + + if (cur_ind != NullEntry) { + assert(cur->r_ind() == region_ind, "Loop postcondition + test"); + return cur; + } else { + return NULL; + } +} + +SparsePRTEntry* RSHashTable::entry_for_region_ind_create(short region_ind) { + SparsePRTEntry* res = entry_for_region_ind(region_ind); + if (res == NULL) { + short new_ind = alloc_entry(); + assert(0 <= new_ind && (size_t)new_ind < capacity(), "There should be room."); + res = entry(new_ind); + res->init(region_ind); + // Insert at front. + short ind = (short) (region_ind & capacity_mask()); + res->set_next_index(_buckets[ind]); + _buckets[ind] = new_ind; + _occupied_entries++; + } + return res; +} + +short RSHashTable::alloc_entry() { + short res; + if (_free_list != NullEntry) { + res = _free_list; + _free_list = entry(res)->next_index(); + return res; + } else if ((size_t) _free_region+1 < capacity()) { + res = _free_region; + _free_region++; + return res; + } else { + return NullEntry; + } +} + + +void RSHashTable::free_entry(short fi) { + entry(fi)->set_next_index(_free_list); + _free_list = fi; +} + + +void RSHashTable::add_entry(SparsePRTEntry* e) { + assert(e->num_valid_cards() > 0, "Precondition."); + SparsePRTEntry* e2 = entry_for_region_ind_create(e->r_ind()); + e->copy_cards(e2); + _occupied_cards += e2->num_valid_cards(); + assert(e2->num_valid_cards() > 0, "Postcondition."); +} + +RSHashTable* RSHashTable::_head_deleted_list = NULL; + +void RSHashTable::add_to_deleted_list(RSHashTable* rsht) { + assert(!rsht->deleted(), "Should delete only once."); + rsht->set_deleted(true); + RSHashTable* hd = _head_deleted_list; + while (true) { + rsht->_next_deleted = hd; + RSHashTable* res = + (RSHashTable*) + Atomic::cmpxchg_ptr(rsht, &_head_deleted_list, hd); + if (res == hd) return; + else hd = res; + } +} + +RSHashTable* RSHashTable::get_from_deleted_list() { + RSHashTable* hd = _head_deleted_list; + while (hd != NULL) { + RSHashTable* next = hd->next_deleted(); + RSHashTable* res = + (RSHashTable*) + Atomic::cmpxchg_ptr(next, &_head_deleted_list, hd); + if (res == hd) { + hd->set_next_deleted(NULL); + hd->set_deleted(false); + return hd; + } else { + hd = res; + } + } + return NULL; +} + +short /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() { + short res; + while (_bl_ind != RSHashTable::NullEntry) { + res = _rsht->entry(_bl_ind)->card(0); + if (res != SparsePRTEntry::NullEntry) { + return res; + } else { + _bl_ind = _rsht->entry(_bl_ind)->next_index(); + } + } + // Otherwise, none found: + return SparsePRTEntry::NullEntry; +} + +size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(short ci) { + return + _heap_bot_card_ind + + (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion) + + ci; +} + +bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) { + _card_ind++; + short ci; + if (_card_ind < SparsePRTEntry::CardsPerEntry && + ((ci = _rsht->entry(_bl_ind)->card(_card_ind)) != + SparsePRTEntry::NullEntry)) { + card_index = compute_card_ind(ci); + return true; + } + // Otherwise, must find the next valid entry. + _card_ind = 0; + + if (_bl_ind != RSHashTable::NullEntry) { + _bl_ind = _rsht->entry(_bl_ind)->next_index(); + ci = find_first_card_in_list(); + if (ci != SparsePRTEntry::NullEntry) { + card_index = compute_card_ind(ci); + return true; + } + } + // If we didn't return above, must go to the next non-null table index. + _tbl_ind++; + while ((size_t)_tbl_ind < _rsht->capacity()) { + _bl_ind = _rsht->_buckets[_tbl_ind]; + ci = find_first_card_in_list(); + if (ci != SparsePRTEntry::NullEntry) { + card_index = compute_card_ind(ci); + return true; + } + // Otherwise, try next entry. + _tbl_ind++; + } + // Otherwise, there were no entry. + return false; +} + +bool RSHashTable::contains_card(short region_index, short card_index) const { + SparsePRTEntry* e = entry_for_region_ind(region_index); + return (e != NULL && e->contains_card(card_index)); +} + +size_t RSHashTable::mem_size() const { + return sizeof(this) + capacity() * (sizeof(SparsePRTEntry) + sizeof(short)); +} + + +// ---------------------------------------------------------------------- + +SparsePRT* SparsePRT::_head_expanded_list = NULL; + +void SparsePRT::add_to_expanded_list(SparsePRT* sprt) { + // We could expand multiple times in a pause -- only put on list once. + if (sprt->expanded()) return; + sprt->set_expanded(true); + SparsePRT* hd = _head_expanded_list; + while (true) { + sprt->_next_expanded = hd; + SparsePRT* res = + (SparsePRT*) + Atomic::cmpxchg_ptr(sprt, &_head_expanded_list, hd); + if (res == hd) return; + else hd = res; + } +} + +SparsePRT* SparsePRT::get_from_expanded_list() { + SparsePRT* hd = _head_expanded_list; + while (hd != NULL) { + SparsePRT* next = hd->next_expanded(); + SparsePRT* res = + (SparsePRT*) + Atomic::cmpxchg_ptr(next, &_head_expanded_list, hd); + if (res == hd) { + hd->set_next_expanded(NULL); + return hd; + } else { + hd = res; + } + } + return NULL; +} + + +void SparsePRT::cleanup_all() { + // First clean up all expanded tables so they agree on next and cur. + SparsePRT* sprt = get_from_expanded_list(); + while (sprt != NULL) { + sprt->cleanup(); + sprt = get_from_expanded_list(); + } + // Now delete all deleted RSHashTables. + RSHashTable* rsht = RSHashTable::get_from_deleted_list(); + while (rsht != NULL) { +#if SPARSE_PRT_VERBOSE + gclog_or_tty->print_cr("About to delete RSHT " PTR_FORMAT ".", rsht); +#endif + delete rsht; + rsht = RSHashTable::get_from_deleted_list(); + } +} + + +SparsePRT::SparsePRT(HeapRegion* hr) : + _expanded(false), _next_expanded(NULL) +{ + _cur = new RSHashTable(InitialCapacity); + _next = _cur; +} + +SparsePRT::~SparsePRT() { + assert(_next != NULL && _cur != NULL, "Inv"); + if (_cur != _next) { delete _cur; } + delete _next; +} + + +size_t SparsePRT::mem_size() const { + // We ignore "_cur" here, because it either = _next, or else it is + // on the deleted list. + return sizeof(this) + _next->mem_size(); +} + +bool SparsePRT::add_card(short region_id, short card_index) { +#if SPARSE_PRT_VERBOSE + gclog_or_tty->print_cr(" Adding card %d from region %d to region %d sparse.", + card_index, region_id, _hr->hrs_index()); +#endif + if (_next->occupied_entries() * 2 > _next->capacity()) { + expand(); + } + return _next->add_card(region_id, card_index); +} + +bool SparsePRT::get_cards(short region_id, short* cards) { + return _next->get_cards(region_id, cards); +} + +bool SparsePRT::delete_entry(short region_id) { + return _next->delete_entry(region_id); +} + +void SparsePRT::clear() { + // If they differ, _next is bigger then cur, so next has no chance of + // being the initial size. + if (_next != _cur) { + delete _next; + } + + if (_cur->capacity() != InitialCapacity) { + delete _cur; + _cur = new RSHashTable(InitialCapacity); + } else { + _cur->clear(); + } + _next = _cur; +} + +void SparsePRT::cleanup() { + // Make sure that the current and next tables agree. (Another mechanism + // takes care of deleting now-unused tables.) + _cur = _next; + set_expanded(false); +} + +void SparsePRT::expand() { + RSHashTable* last = _next; + _next = new RSHashTable(last->capacity() * 2); + +#if SPARSE_PRT_VERBOSE + gclog_or_tty->print_cr(" Expanded sparse table for %d to %d.", + _hr->hrs_index(), _next->capacity()); +#endif + for (size_t i = 0; i < last->capacity(); i++) { + SparsePRTEntry* e = last->entry((int)i); + if (e->valid_entry()) { +#if SPARSE_PRT_VERBOSE + gclog_or_tty->print_cr(" During expansion, transferred entry for %d.", + e->r_ind()); +#endif + _next->add_entry(e); + } + } + if (last != _cur) + RSHashTable::add_to_deleted_list(last); + add_to_expanded_list(this); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp 2009-08-01 04:21:18.793659059 +0100 @@ -0,0 +1,308 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Sparse remembered set for a heap region (the "owning" region). Maps +// indices of other regions to short sequences of cards in the other region +// that might contain pointers into the owner region. + +// These tables only expand while they are accessed in parallel -- +// deletions may be done in single-threaded code. This allows us to allow +// unsynchronized reads/iterations, as long as expansions caused by +// insertions only enqueue old versions for deletions, but do not delete +// old versions synchronously. + + +class SparsePRTEntry: public CHeapObj { +public: + enum SomePublicConstants { + CardsPerEntry = (short)4, + NullEntry = (short)-1, + DeletedEntry = (short)-2 + }; + +private: + short _region_ind; + short _next_index; + short _cards[CardsPerEntry]; + +public: + + // Set the region_ind to the given value, and delete all cards. + inline void init(short region_ind); + + short r_ind() const { return _region_ind; } + bool valid_entry() const { return r_ind() >= 0; } + void set_r_ind(short rind) { _region_ind = rind; } + + short next_index() const { return _next_index; } + short* next_index_addr() { return &_next_index; } + void set_next_index(short ni) { _next_index = ni; } + + // Returns "true" iff the entry contains the given card index. + inline bool contains_card(short card_index) const; + + // Returns the number of non-NULL card entries. + inline int num_valid_cards() const; + + // Requires that the entry not contain the given card index. If there is + // space available, add the given card index to the entry and return + // "true"; otherwise, return "false" to indicate that the entry is full. + enum AddCardResult { + overflow, + found, + added + }; + inline AddCardResult add_card(short card_index); + + // Copy the current entry's cards into "cards". + inline void copy_cards(short* cards) const; + // Copy the current entry's cards into the "_card" array of "e." + inline void copy_cards(SparsePRTEntry* e) const; + + inline short card(int i) const { return _cards[i]; } +}; + + +class RSHashTable : public CHeapObj { + + friend class RSHashTableIter; + + enum SomePrivateConstants { + NullEntry = -1 + }; + + size_t _capacity; + size_t _capacity_mask; + size_t _occupied_entries; + size_t _occupied_cards; + + SparsePRTEntry* _entries; + short* _buckets; + short _free_region; + short _free_list; + + static RSHashTable* _head_deleted_list; + RSHashTable* _next_deleted; + RSHashTable* next_deleted() { return _next_deleted; } + void set_next_deleted(RSHashTable* rsht) { _next_deleted = rsht; } + bool _deleted; + void set_deleted(bool b) { _deleted = b; } + + // Requires that the caller hold a lock preventing parallel modifying + // operations, and that the the table be less than completely full. If + // an entry for "region_ind" is already in the table, finds it and + // returns its address; otherwise returns "NULL." + SparsePRTEntry* entry_for_region_ind(short region_ind) const; + + // Requires that the caller hold a lock preventing parallel modifying + // operations, and that the the table be less than completely full. If + // an entry for "region_ind" is already in the table, finds it and + // returns its address; otherwise allocates, initializes, inserts and + // returns a new entry for "region_ind". + SparsePRTEntry* entry_for_region_ind_create(short region_ind); + + // Returns the index of the next free entry in "_entries". + short alloc_entry(); + // Declares the entry "fi" to be free. (It must have already been + // deleted from any bucket lists. + void free_entry(short fi); + +public: + RSHashTable(size_t capacity); + ~RSHashTable(); + + // Attempts to ensure that the given card_index in the given region is in + // the sparse table. If successful (because the card was already + // present, or because it was successfullly added) returns "true". + // Otherwise, returns "false" to indicate that the addition would + // overflow the entry for the region. The caller must transfer these + // entries to a larger-capacity representation. + bool add_card(short region_id, short card_index); + + bool get_cards(short region_id, short* cards); + bool delete_entry(short region_id); + + bool contains_card(short region_id, short card_index) const; + + void add_entry(SparsePRTEntry* e); + + void clear(); + + size_t capacity() const { return _capacity; } + size_t capacity_mask() const { return _capacity_mask; } + size_t occupied_entries() const { return _occupied_entries; } + size_t occupied_cards() const { return _occupied_cards; } + size_t mem_size() const; + bool deleted() { return _deleted; } + + SparsePRTEntry* entry(int i) const { return &_entries[i]; } + + void print(); + + static void add_to_deleted_list(RSHashTable* rsht); + static RSHashTable* get_from_deleted_list(); + + +}; + + // ValueObj because will be embedded in HRRS iterator. +class RSHashTableIter VALUE_OBJ_CLASS_SPEC { + short _tbl_ind; + short _bl_ind; + short _card_ind; + RSHashTable* _rsht; + size_t _heap_bot_card_ind; + + enum SomePrivateConstants { + CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift + }; + + // If the bucket list pointed to by _bl_ind contains a card, sets + // _bl_ind to the index of that entry, and returns the card. + // Otherwise, returns SparseEntry::NullEnty. + short find_first_card_in_list(); + // Computes the proper card index for the card whose offset in the + // current region (as indicated by _bl_ind) is "ci". + // This is subject to errors when there is iteration concurrent with + // modification, but these errors should be benign. + size_t compute_card_ind(short ci); + + public: + RSHashTableIter(size_t heap_bot_card_ind) : + _tbl_ind(RSHashTable::NullEntry), + _bl_ind(RSHashTable::NullEntry), + _card_ind((SparsePRTEntry::CardsPerEntry-1)), + _rsht(NULL), + _heap_bot_card_ind(heap_bot_card_ind) + {} + + void init(RSHashTable* rsht) { + _rsht = rsht; + _tbl_ind = -1; // So that first increment gets to 0. + _bl_ind = RSHashTable::NullEntry; + _card_ind = (SparsePRTEntry::CardsPerEntry-1); + } + + bool has_next(size_t& card_index); + + }; + +// Concurrent accesss to a SparsePRT must be serialized by some external +// mutex. + +class SparsePRTIter; + +class SparsePRT VALUE_OBJ_CLASS_SPEC { + // Iterations are done on the _cur hash table, since they only need to + // see entries visible at the start of a collection pause. + // All other operations are done using the _next hash table. + RSHashTable* _cur; + RSHashTable* _next; + + HeapRegion* _hr; + + enum SomeAdditionalPrivateConstants { + InitialCapacity = 16 + }; + + void expand(); + + bool _expanded; + + bool expanded() { return _expanded; } + void set_expanded(bool b) { _expanded = b; } + + SparsePRT* _next_expanded; + + SparsePRT* next_expanded() { return _next_expanded; } + void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; } + + + static SparsePRT* _head_expanded_list; + +public: + SparsePRT(HeapRegion* hr); + + ~SparsePRT(); + + size_t occupied() const { return _next->occupied_cards(); } + size_t mem_size() const; + + // Attempts to ensure that the given card_index in the given region is in + // the sparse table. If successful (because the card was already + // present, or because it was successfullly added) returns "true". + // Otherwise, returns "false" to indicate that the addition would + // overflow the entry for the region. The caller must transfer these + // entries to a larger-capacity representation. + bool add_card(short region_id, short card_index); + + // If the table hold an entry for "region_ind", Copies its + // cards into "cards", which must be an array of length at least + // "CardsPerEntry", and returns "true"; otherwise, returns "false". + bool get_cards(short region_ind, short* cards); + + // If there is an entry for "region_ind", removes it and return "true"; + // otherwise returns "false." + bool delete_entry(short region_ind); + + // Clear the table, and reinitialize to initial capacity. + void clear(); + + // Ensure that "_cur" and "_next" point to the same table. + void cleanup(); + + // Clean up all tables on the expanded list. Called single threaded. + static void cleanup_all(); + RSHashTable* cur() const { return _cur; } + + + void init_iterator(SparsePRTIter* sprt_iter); + + static void add_to_expanded_list(SparsePRT* sprt); + static SparsePRT* get_from_expanded_list(); + + bool contains_card(short region_id, short card_index) const { + return _next->contains_card(region_id, card_index); + } + +#if 0 + void verify_is_cleared(); + void print(); +#endif +}; + + +class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter { +public: + SparsePRTIter(size_t heap_bot_card_ind) : + /* RSHashTable:: */RSHashTableIter(heap_bot_card_ind) + {} + + void init(const SparsePRT* sprt) { + RSHashTableIter::init(sprt->cur()); + } + bool has_next(size_t& card_index) { + return RSHashTableIter::has_next(card_index); + } +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.cpp 2009-08-01 04:21:19.581698294 +0100 @@ -0,0 +1,274 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_survRateGroup.cpp.incl" + +SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p, + const char* name, + size_t summary_surv_rates_len) : + _g1p(g1p), _name(name), + _summary_surv_rates_len(summary_surv_rates_len), + _summary_surv_rates_max_len(0), + _summary_surv_rates(NULL), + _surv_rate(NULL), + _accum_surv_rate_pred(NULL), + _surv_rate_pred(NULL) +{ + reset(); + if (summary_surv_rates_len > 0) { + size_t length = summary_surv_rates_len; + _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length); + if (_summary_surv_rates == NULL) { + vm_exit_out_of_memory(sizeof(NumberSeq*) * length, + "Not enough space for surv rate summary"); + } + for (size_t i = 0; i < length; ++i) + _summary_surv_rates[i] = new NumberSeq(); + } + + start_adding_regions(); +} + + +void SurvRateGroup::reset() +{ + _all_regions_allocated = 0; + _scan_only_prefix = 0; + _setup_seq_num = 0; + _stats_arrays_length = 0; + _accum_surv_rate = 0.0; + _last_pred = 0.0; + // the following will set up the arrays with length 1 + _region_num = 1; + stop_adding_regions(); + guarantee( _stats_arrays_length == 1, "invariant" ); + guarantee( _surv_rate_pred[0] != NULL, "invariant" ); + _surv_rate_pred[0]->add(0.4); + all_surviving_words_recorded(false); + _region_num = 0; +} + + +void +SurvRateGroup::start_adding_regions() { + _setup_seq_num = _stats_arrays_length; + _region_num = _scan_only_prefix; + _accum_surv_rate = 0.0; + +#if 0 + gclog_or_tty->print_cr("[%s] start adding regions, seq num %d, length %d", + _name, _setup_seq_num, _region_num); +#endif // 0 +} + +void +SurvRateGroup::stop_adding_regions() { + +#if 0 + gclog_or_tty->print_cr("[%s] stop adding regions, length %d", _name, _region_num); +#endif // 0 + + if (_region_num > _stats_arrays_length) { + double* old_surv_rate = _surv_rate; + double* old_accum_surv_rate_pred = _accum_surv_rate_pred; + TruncatedSeq** old_surv_rate_pred = _surv_rate_pred; + + _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num); + if (_surv_rate == NULL) { + vm_exit_out_of_memory(sizeof(double) * _region_num, + "Not enough space for surv rate array."); + } + _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num); + if (_accum_surv_rate_pred == NULL) { + vm_exit_out_of_memory(sizeof(double) * _region_num, + "Not enough space for accum surv rate pred array."); + } + _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num); + if (_surv_rate == NULL) { + vm_exit_out_of_memory(sizeof(TruncatedSeq*) * _region_num, + "Not enough space for surv rate pred array."); + } + + for (size_t i = 0; i < _stats_arrays_length; ++i) + _surv_rate_pred[i] = old_surv_rate_pred[i]; + +#if 0 + gclog_or_tty->print_cr("[%s] stop adding regions, new seqs %d to %d", + _name, _array_length, _region_num - 1); +#endif // 0 + + for (size_t i = _stats_arrays_length; i < _region_num; ++i) { + _surv_rate_pred[i] = new TruncatedSeq(10); + // _surv_rate_pred[i]->add(last_pred); + } + + _stats_arrays_length = _region_num; + + if (old_surv_rate != NULL) + FREE_C_HEAP_ARRAY(double, old_surv_rate); + if (old_accum_surv_rate_pred != NULL) + FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred); + if (old_surv_rate_pred != NULL) + FREE_C_HEAP_ARRAY(NumberSeq*, old_surv_rate_pred); + } + + for (size_t i = 0; i < _stats_arrays_length; ++i) + _surv_rate[i] = 0.0; +} + +double +SurvRateGroup::accum_surv_rate(size_t adjustment) { + // we might relax this one in the future... + guarantee( adjustment == 0 || adjustment == 1, "pre-condition" ); + + double ret = _accum_surv_rate; + if (adjustment > 0) { + TruncatedSeq* seq = get_seq(_region_num+1); + double surv_rate = _g1p->get_new_prediction(seq); + ret += surv_rate; + } + + return ret; +} + +int +SurvRateGroup::next_age_index() { + TruncatedSeq* seq = get_seq(_region_num); + double surv_rate = _g1p->get_new_prediction(seq); + _accum_surv_rate += surv_rate; + + ++_region_num; + return (int) ++_all_regions_allocated; +} + +void +SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) { + guarantee( scan_only_prefix <= _region_num, "pre-condition" ); + _scan_only_prefix = scan_only_prefix; +} + +void +SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) { + guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num, + "pre-condition" ); + guarantee( _surv_rate[age_in_group] <= 0.00001, + "should only update each slot once" ); + + double surv_rate = (double) surv_words / (double) HeapRegion::GrainWords; + _surv_rate[age_in_group] = surv_rate; + _surv_rate_pred[age_in_group]->add(surv_rate); + if ((size_t)age_in_group < _summary_surv_rates_len) { + _summary_surv_rates[age_in_group]->add(surv_rate); + if ((size_t)(age_in_group+1) > _summary_surv_rates_max_len) + _summary_surv_rates_max_len = age_in_group+1; + } +} + +void +SurvRateGroup::all_surviving_words_recorded(bool propagate) { + if (propagate && _region_num > 0) { // conservative + double surv_rate = _surv_rate_pred[_region_num-1]->last(); + +#if 0 + gclog_or_tty->print_cr("propagating %1.2lf from %d to %d", + surv_rate, _curr_length, _array_length - 1); +#endif // 0 + + for (size_t i = _region_num; i < _stats_arrays_length; ++i) { + guarantee( _surv_rate[i] <= 0.00001, + "the slot should not have been updated" ); + _surv_rate_pred[i]->add(surv_rate); + } + } + + double accum = 0.0; + double pred = 0.0; + for (size_t i = 0; i < _stats_arrays_length; ++i) { + pred = _g1p->get_new_prediction(_surv_rate_pred[i]); + if (pred > 1.0) pred = 1.0; + accum += pred; + _accum_surv_rate_pred[i] = accum; + // gclog_or_tty->print_cr("age %3d, accum %10.2lf", i, accum); + } + _last_pred = pred; +} + +#ifndef PRODUCT +void +SurvRateGroup::print() { + gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)", + _name, _region_num, _scan_only_prefix); + for (size_t i = 0; i < _region_num; ++i) { + gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%%s", + i, _surv_rate[i] * 100.0, + _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0, + (i < _scan_only_prefix) ? " S-O" : " "); + } +} + +void +SurvRateGroup::print_surv_rate_summary() { + size_t length = _summary_surv_rates_max_len; + if (length == 0) + return; + + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr("%s Rate Summary (for up to age %d)", _name, length-1); + gclog_or_tty->print_cr(" age range survival rate (avg) samples (avg)"); + gclog_or_tty->print_cr(" ---------------------------------------------------------"); + + size_t index = 0; + size_t limit = MIN2((int) length, 10); + while (index < limit) { + gclog_or_tty->print_cr(" %4d %6.2lf%% %6.2lf", + index, _summary_surv_rates[index]->avg() * 100.0, + (double) _summary_surv_rates[index]->num()); + ++index; + } + + gclog_or_tty->print_cr(" ---------------------------------------------------------"); + + int num = 0; + double sum = 0.0; + int samples = 0; + while (index < length) { + ++num; + sum += _summary_surv_rates[index]->avg() * 100.0; + samples += _summary_surv_rates[index]->num(); + ++index; + + if (index == length || num % 10 == 0) { + gclog_or_tty->print_cr(" %4d .. %4d %6.2lf%% %6.2lf", + (index-1) / 10 * 10, index-1, sum / (double) num, + (double) samples / (double) num); + sum = 0.0; + num = 0; + samples = 0; + } + } + + gclog_or_tty->print_cr(" ---------------------------------------------------------"); +} +#endif // PRODUCT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.hpp 2009-08-01 04:21:20.028709430 +0100 @@ -0,0 +1,102 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class G1CollectorPolicy; + +class SurvRateGroup : public CHeapObj { +private: + G1CollectorPolicy* _g1p; + const char* _name; + + size_t _stats_arrays_length; + double* _surv_rate; + double* _accum_surv_rate_pred; + double _last_pred; + double _accum_surv_rate; + TruncatedSeq** _surv_rate_pred; + NumberSeq** _summary_surv_rates; + size_t _summary_surv_rates_len; + size_t _summary_surv_rates_max_len; + + int _all_regions_allocated; + size_t _region_num; + size_t _scan_only_prefix; + size_t _setup_seq_num; + +public: + SurvRateGroup(G1CollectorPolicy* g1p, + const char* name, + size_t summary_surv_rates_len); + void reset(); + void start_adding_regions(); + void stop_adding_regions(); + void record_scan_only_prefix(size_t scan_only_prefix); + void record_surviving_words(int age_in_group, size_t surv_words); + void all_surviving_words_recorded(bool propagate); + const char* name() { return _name; } + + size_t region_num() { return _region_num; } + size_t scan_only_length() { return _scan_only_prefix; } + double accum_surv_rate_pred(int age) { + assert(age >= 0, "must be"); + if ((size_t)age < _stats_arrays_length) + return _accum_surv_rate_pred[age]; + else { + double diff = (double) (age - _stats_arrays_length + 1); + return _accum_surv_rate_pred[_stats_arrays_length-1] + diff * _last_pred; + } + } + + double accum_surv_rate(size_t adjustment); + + TruncatedSeq* get_seq(size_t age) { + if (age >= _setup_seq_num) { + guarantee( _setup_seq_num > 0, "invariant" ); + age = _setup_seq_num-1; + } + TruncatedSeq* seq = _surv_rate_pred[age]; + guarantee( seq != NULL, "invariant" ); + return seq; + } + + int next_age_index(); + int age_in_group(int age_index) { + int ret = (int) (_all_regions_allocated - age_index); + assert( ret >= 0, "invariant" ); + return ret; + } + int recalculate_age_index(int age_index) { + int new_age_index = (int) _scan_only_prefix - age_in_group(age_index); + guarantee( new_age_index >= 0, "invariant" ); + return new_age_index; + } + void finished_recalculating_age_indexes() { + _all_regions_allocated = (int) _scan_only_prefix; + } + +#ifndef PRODUCT + void print(); + void print_surv_rate_summary(); +#endif // PRODUCT +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp 2009-08-01 04:21:20.445403276 +0100 @@ -0,0 +1,72 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_vm_operations_g1.cpp.incl" + +void VM_G1CollectForAllocation::doit() { + JvmtiGCForAllocationMarker jgcm; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + _res = g1h->satisfy_failed_allocation(_size); + assert(g1h->is_in_or_null(_res), "result not in heap"); +} + +void VM_G1CollectFull::doit() { + JvmtiGCFullMarker jgcm; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + GCCauseSetter x(g1h, _gc_cause); + g1h->do_full_collection(false /* clear_all_soft_refs */); +} + +void VM_G1IncCollectionPause::doit() { + JvmtiGCForAllocationMarker jgcm; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause); + g1h->do_collection_pause_at_safepoint(); +} + +void VM_CGC_Operation::doit() { + gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); + TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); + TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty); + SharedHeap* sh = SharedHeap::heap(); + // This could go away if CollectedHeap gave access to _gc_is_active... + if (sh != NULL) { + IsGCActiveMark x; + _cl->do_void(); + } else { + _cl->do_void(); + } +} + +bool VM_CGC_Operation::doit_prologue() { + Heap_lock->lock(); + SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true; + return true; +} + +void VM_CGC_Operation::doit_epilogue() { + SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false; + Heap_lock->unlock(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp 2009-08-01 04:21:20.846521403 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// VM_operations for the G1 collector. +// VM_GC_Operation: +// - VM_CGC_Operation +// - VM_G1CollectFull +// - VM_G1CollectForAllocation +// - VM_G1IncCollectionPause +// - VM_G1PopRegionCollectionPause + +class VM_G1CollectFull: public VM_GC_Operation { + private: + public: + VM_G1CollectFull(int gc_count_before, + GCCause::Cause gc_cause) + : VM_GC_Operation(gc_count_before) + { + _gc_cause = gc_cause; + } + ~VM_G1CollectFull() {} + virtual VMOp_Type type() const { return VMOp_G1CollectFull; } + virtual void doit(); + virtual const char* name() const { + return "full garbage-first collection"; + } +}; + +class VM_G1CollectForAllocation: public VM_GC_Operation { + private: + HeapWord* _res; + size_t _size; // size of object to be allocated + public: + VM_G1CollectForAllocation(size_t size, int gc_count_before) + : VM_GC_Operation(gc_count_before) { + _size = size; + _res = NULL; + } + ~VM_G1CollectForAllocation() {} + virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; } + virtual void doit(); + virtual const char* name() const { + return "garbage-first collection to satisfy allocation"; + } + HeapWord* result() { return _res; } +}; + +class VM_G1IncCollectionPause: public VM_GC_Operation { + public: + VM_G1IncCollectionPause(int gc_count_before) : + VM_GC_Operation(gc_count_before) {} + virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; } + virtual void doit(); + virtual const char* name() const { + return "garbage-first incremental collection pause"; + } +}; + +// Concurrent GC stop-the-world operations such as initial and final mark; +// consider sharing these with CMS's counterparts. +class VM_CGC_Operation: public VM_Operation { + VoidClosure* _cl; + const char* _printGCMessage; + public: + VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg) : + _cl(cl), + _printGCMessage(printGCMsg) + {} + + ~VM_CGC_Operation() {} + + virtual VMOp_Type type() const { return VMOp_CGC_Operation; } + virtual void doit(); + virtual bool doit_prologue(); + virtual void doit_epilogue(); + virtual const char* name() const { + return "concurrent gc"; + } +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1 2009-08-01 04:21:21.255366065 +0100 @@ -0,0 +1,354 @@ +// +// Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +// CA 95054 USA or visit www.sun.com if you need additional information or +// have any questions. +// +// + +// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! + +bufferingOopClosure.hpp genOopClosures.hpp +bufferingOopClosure.hpp generation.hpp +bufferingOopClosure.hpp os.hpp + +cardTableRS.cpp concurrentMark.hpp +cardTableRS.cpp g1SATBCardTableModRefBS.hpp + +collectionSetChooser.cpp g1CollectedHeap.inline.hpp +collectionSetChooser.cpp g1CollectorPolicy.hpp +collectionSetChooser.cpp collectionSetChooser.hpp + +collectionSetChooser.hpp heapRegion.hpp +collectionSetChooser.hpp growableArray.hpp + +concurrentG1Refine.cpp atomic.hpp +concurrentG1Refine.cpp concurrentG1Refine.hpp +concurrentG1Refine.cpp concurrentG1RefineThread.hpp +concurrentG1Refine.cpp copy.hpp +concurrentG1Refine.cpp g1CollectedHeap.inline.hpp +concurrentG1Refine.cpp g1RemSet.hpp + +concurrentG1Refine.hpp globalDefinitions.hpp +concurrentG1Refine.hpp allocation.hpp + +concurrentG1RefineThread.cpp concurrentG1Refine.hpp +concurrentG1RefineThread.cpp concurrentG1RefineThread.hpp +concurrentG1RefineThread.cpp g1CollectedHeap.inline.hpp +concurrentG1RefineThread.cpp g1CollectorPolicy.hpp +concurrentG1RefineThread.cpp handles.inline.hpp +concurrentG1RefineThread.cpp mutexLocker.hpp +concurrentG1RefineThread.cpp resourceArea.hpp + +concurrentG1RefineThread.hpp concurrentGCThread.hpp +concurrentG1RefineThread.hpp coTracker.hpp + +concurrentMark.cpp concurrentMark.hpp +concurrentMark.cpp concurrentMarkThread.inline.hpp +concurrentMark.cpp g1CollectedHeap.inline.hpp +concurrentMark.cpp g1CollectorPolicy.hpp +concurrentMark.cpp g1RemSet.hpp +concurrentMark.cpp gcOverheadReporter.hpp +concurrentMark.cpp genOopClosures.inline.hpp +concurrentMark.cpp heapRegionRemSet.hpp +concurrentMark.cpp heapRegionSeq.inline.hpp +concurrentMark.cpp handles.inline.hpp +concurrentMark.cpp java.hpp +concurrentMark.cpp oop.inline.hpp +concurrentMark.cpp referencePolicy.hpp +concurrentMark.cpp resourceArea.hpp +concurrentMark.cpp symbolTable.hpp + +concurrentMark.hpp coTracker.hpp +concurrentMark.hpp heapRegion.hpp +concurrentMark.hpp taskqueue.hpp + +concurrentMarkThread.cpp concurrentMarkThread.inline.hpp +concurrentMarkThread.cpp g1CollectedHeap.inline.hpp +concurrentMarkThread.cpp g1CollectorPolicy.hpp +concurrentMarkThread.cpp g1MMUTracker.hpp +concurrentMarkThread.cpp resourceArea.hpp +concurrentMarkThread.cpp vm_operations_g1.hpp +concurrentMarkThread.cpp vmThread.hpp + +concurrentMarkThread.hpp concurrentGCThread.hpp + +concurrentMarkThread.inline.hpp concurrentMark.hpp +concurrentMarkThread.inline.hpp concurrentMarkThread.hpp + +concurrentZFThread.cpp concurrentZFThread.hpp +concurrentZFThread.cpp heapRegion.hpp +concurrentZFThread.cpp g1CollectedHeap.inline.hpp +concurrentZFThread.cpp copy.hpp +concurrentZFThread.cpp mutexLocker.hpp +concurrentZFThread.cpp space.inline.hpp + +concurrentZFThread.hpp concurrentGCThread.hpp +concurrentZFThread.hpp coTracker.hpp + +dirtyCardQueue.cpp atomic.hpp +dirtyCardQueue.cpp dirtyCardQueue.hpp +dirtyCardQueue.cpp heapRegionRemSet.hpp +dirtyCardQueue.cpp mutexLocker.hpp +dirtyCardQueue.cpp ptrQueue.inline.hpp +dirtyCardQueue.cpp safepoint.hpp +dirtyCardQueue.cpp thread.hpp +dirtyCardQueue.cpp thread_.inline.hpp +dirtyCardQueue.cpp workgroup.hpp + +dirtyCardQueue.hpp allocation.hpp +dirtyCardQueue.hpp ptrQueue.hpp + +g1BlockOffsetTable.cpp g1BlockOffsetTable.inline.hpp +g1BlockOffsetTable.cpp java.hpp +g1BlockOffsetTable.cpp oop.inline.hpp +g1BlockOffsetTable.cpp space.hpp + +g1BlockOffsetTable.hpp globalDefinitions.hpp +g1BlockOffsetTable.hpp memRegion.hpp +g1BlockOffsetTable.hpp virtualspace.hpp + +g1BlockOffsetTable.inline.hpp g1BlockOffsetTable.hpp +g1BlockOffsetTable.inline.hpp space.hpp + +g1CollectedHeap.cpp aprofiler.hpp +g1CollectedHeap.cpp bufferingOopClosure.hpp +g1CollectedHeap.cpp concurrentG1Refine.hpp +g1CollectedHeap.cpp concurrentG1RefineThread.hpp +g1CollectedHeap.cpp concurrentMarkThread.inline.hpp +g1CollectedHeap.cpp concurrentZFThread.hpp +g1CollectedHeap.cpp g1CollectedHeap.inline.hpp +g1CollectedHeap.cpp g1CollectorPolicy.hpp +g1CollectedHeap.cpp g1MarkSweep.hpp +g1CollectedHeap.cpp g1RemSet.hpp +g1CollectedHeap.cpp g1OopClosures.inline.hpp +g1CollectedHeap.cpp genOopClosures.inline.hpp +g1CollectedHeap.cpp gcLocker.inline.hpp +g1CollectedHeap.cpp gcOverheadReporter.hpp +g1CollectedHeap.cpp generationSpec.hpp +g1CollectedHeap.cpp heapRegionRemSet.hpp +g1CollectedHeap.cpp heapRegionSeq.inline.hpp +g1CollectedHeap.cpp icBuffer.hpp +g1CollectedHeap.cpp isGCActiveMark.hpp +g1CollectedHeap.cpp oop.inline.hpp +g1CollectedHeap.cpp oop.pcgc.inline.hpp +g1CollectedHeap.cpp parGCAllocBuffer.hpp +g1CollectedHeap.cpp vm_operations_g1.hpp +g1CollectedHeap.cpp vmThread.hpp + +g1CollectedHeap.hpp barrierSet.hpp +g1CollectedHeap.hpp heapRegion.hpp +g1CollectedHeap.hpp memRegion.hpp +g1CollectedHeap.hpp sharedHeap.hpp + +g1CollectedHeap.inline.hpp concurrentMark.hpp +g1CollectedHeap.inline.hpp g1CollectedHeap.hpp +g1CollectedHeap.inline.hpp heapRegionSeq.hpp +g1CollectedHeap.inline.hpp taskqueue.hpp + +g1CollectorPolicy.cpp concurrentG1Refine.hpp +g1CollectorPolicy.cpp concurrentMark.hpp +g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp +g1CollectorPolicy.cpp debug.hpp +g1CollectorPolicy.cpp java.hpp +g1CollectorPolicy.cpp g1CollectedHeap.inline.hpp +g1CollectorPolicy.cpp g1CollectorPolicy.hpp +g1CollectorPolicy.cpp heapRegionRemSet.hpp +g1CollectorPolicy.cpp mutexLocker.hpp +g1CollectorPolicy.cpp gcPolicyCounters.hpp + +g1CollectorPolicy.hpp collectorPolicy.hpp +g1CollectorPolicy.hpp collectionSetChooser.hpp +g1CollectorPolicy.hpp g1MMUTracker.hpp + +g1_globals.cpp g1_globals.hpp + +g1_globals.hpp globals.hpp + +globals.cpp g1_globals.hpp +top.hpp g1_globals.hpp + +g1MarkSweep.cpp aprofiler.hpp +g1MarkSweep.cpp biasedLocking.hpp +g1MarkSweep.cpp codeCache.hpp +g1MarkSweep.cpp events.hpp +g1MarkSweep.cpp fprofiler.hpp +g1MarkSweep.hpp g1CollectedHeap.inline.hpp +g1MarkSweep.cpp g1MarkSweep.hpp +g1MarkSweep.cpp gcLocker.hpp +g1MarkSweep.cpp genCollectedHeap.hpp +g1MarkSweep.hpp heapRegion.hpp +g1MarkSweep.cpp icBuffer.hpp +g1MarkSweep.cpp instanceRefKlass.hpp +g1MarkSweep.cpp javaClasses.hpp +g1MarkSweep.cpp jvmtiExport.hpp +g1MarkSweep.cpp copy.hpp +g1MarkSweep.cpp modRefBarrierSet.hpp +g1MarkSweep.cpp oop.inline.hpp +g1MarkSweep.cpp referencePolicy.hpp +g1MarkSweep.cpp space.hpp +g1MarkSweep.cpp symbolTable.hpp +g1MarkSweep.cpp synchronizer.hpp +g1MarkSweep.cpp systemDictionary.hpp +g1MarkSweep.cpp thread.hpp +g1MarkSweep.cpp vmSymbols.hpp +g1MarkSweep.cpp vmThread.hpp + +g1MarkSweep.hpp generation.hpp +g1MarkSweep.hpp growableArray.hpp +g1MarkSweep.hpp markOop.hpp +g1MarkSweep.hpp genMarkSweep.hpp +g1MarkSweep.hpp oop.hpp +g1MarkSweep.hpp timer.hpp +g1MarkSweep.hpp universe.hpp + +g1OopClosures.inline.hpp concurrentMark.hpp +g1OopClosures.inline.hpp g1OopClosures.hpp +g1OopClosures.inline.hpp g1CollectedHeap.hpp +g1OopClosures.inline.hpp g1RemSet.hpp + +g1MMUTracker.cpp g1MMUTracker.hpp +g1MMUTracker.cpp ostream.hpp +g1MMUTracker.cpp mutexLocker.hpp + +g1MMUTracker.hpp debug.hpp +g1MMUTracker.hpp allocation.hpp +g1RemSet.cpp bufferingOopClosure.hpp +g1RemSet.cpp concurrentG1Refine.hpp +g1RemSet.cpp concurrentG1RefineThread.hpp +g1RemSet.cpp g1BlockOffsetTable.inline.hpp +g1RemSet.cpp g1CollectedHeap.inline.hpp +g1RemSet.cpp g1CollectorPolicy.hpp +g1RemSet.cpp g1RemSet.inline.hpp +g1RemSet.cpp g1OopClosures.inline.hpp +g1RemSet.cpp heapRegionSeq.inline.hpp +g1RemSet.cpp intHisto.hpp +g1RemSet.cpp iterator.hpp +g1RemSet.cpp oop.inline.hpp + +g1RemSet.inline.hpp g1RemSet.hpp +g1RemSet.inline.hpp heapRegionRemSet.hpp + +g1SATBCardTableModRefBS.cpp g1SATBCardTableModRefBS.hpp +g1SATBCardTableModRefBS.cpp heapRegion.hpp +g1SATBCardTableModRefBS.cpp mutexLocker.hpp +g1SATBCardTableModRefBS.cpp thread.hpp +g1SATBCardTableModRefBS.cpp thread_.inline.hpp +g1SATBCardTableModRefBS.cpp satbQueue.hpp + +g1SATBCardTableModRefBS.hpp cardTableModRefBS.hpp +g1SATBCardTableModRefBS.hpp memRegion.hpp + +heapRegion.cpp concurrentZFThread.hpp +heapRegion.cpp g1BlockOffsetTable.inline.hpp +heapRegion.cpp g1CollectedHeap.inline.hpp +heapRegion.cpp g1OopClosures.inline.hpp +heapRegion.cpp genOopClosures.inline.hpp +heapRegion.cpp heapRegion.inline.hpp +heapRegion.cpp heapRegionRemSet.hpp +heapRegion.cpp heapRegionSeq.inline.hpp +heapRegion.cpp iterator.hpp +heapRegion.cpp oop.inline.hpp + +heapRegion.hpp space.hpp +heapRegion.hpp spaceDecorator.hpp +heapRegion.hpp g1BlockOffsetTable.inline.hpp +heapRegion.hpp watermark.hpp +heapRegion.hpp g1_specialized_oop_closures.hpp +heapRegion.hpp survRateGroup.hpp +heapRegion.hpp ageTable.hpp + +heapRegionRemSet.hpp sparsePRT.hpp + +heapRegionRemSet.cpp allocation.hpp +heapRegionRemSet.cpp bitMap.inline.hpp +heapRegionRemSet.cpp g1BlockOffsetTable.inline.hpp +heapRegionRemSet.cpp g1CollectedHeap.inline.hpp +heapRegionRemSet.cpp heapRegionRemSet.hpp +heapRegionRemSet.cpp heapRegionSeq.inline.hpp +heapRegionRemSet.cpp globalDefinitions.hpp +heapRegionRemSet.cpp space.inline.hpp + +heapRegionSeq.cpp allocation.hpp +heapRegionSeq.cpp g1CollectedHeap.inline.hpp +heapRegionSeq.cpp heapRegionSeq.hpp + +heapRegionSeq.hpp growableArray.hpp +heapRegionSeq.hpp heapRegion.hpp + +heapRegionSeq.inline.hpp heapRegionSeq.hpp + +klass.hpp g1OopClosures.hpp + +ptrQueue.cpp allocation.hpp +ptrQueue.cpp allocation.inline.hpp +ptrQueue.cpp mutex.hpp +ptrQueue.cpp mutexLocker.hpp +ptrQueue.cpp ptrQueue.hpp +ptrQueue.cpp ptrQueue.inline.hpp +ptrQueue.cpp thread_.inline.hpp + +ptrQueue.hpp allocation.hpp +ptrQueue.hpp sizes.hpp + +ptrQueue.inline.hpp ptrQueue.hpp + +satbQueue.cpp allocation.inline.hpp +satbQueue.cpp mutexLocker.hpp +satbQueue.cpp ptrQueue.inline.hpp +satbQueue.cpp satbQueue.hpp +satbQueue.cpp sharedHeap.hpp +satbQueue.cpp thread.hpp + +satbQueue.hpp ptrQueue.hpp + +sparsePRT.cpp allocation.inline.hpp +sparsePRT.cpp cardTableModRefBS.hpp +sparsePRT.cpp heapRegion.hpp +sparsePRT.cpp heapRegionRemSet.hpp +sparsePRT.cpp mutexLocker.hpp +sparsePRT.cpp sparsePRT.hpp +sparsePRT.cpp space.inline.hpp + +sparsePRT.hpp allocation.hpp +sparsePRT.hpp cardTableModRefBS.hpp +sparsePRT.hpp globalDefinitions.hpp +sparsePRT.hpp heapRegion.hpp +sparsePRT.hpp mutex.hpp + +specialized_oop_closures.hpp g1_specialized_oop_closures.hpp + +survRateGroup.hpp numberSeq.hpp + +survRateGroup.cpp allocation.hpp +survRateGroup.cpp g1CollectedHeap.inline.hpp +survRateGroup.cpp g1CollectorPolicy.hpp +survRateGroup.cpp heapRegion.hpp +survRateGroup.cpp survRateGroup.hpp + +thread.cpp concurrentMarkThread.inline.hpp + +universe.cpp g1CollectedHeap.inline.hpp +universe.cpp g1CollectorPolicy.hpp + +vm_operations_g1.hpp vmGCOperations.hpp + +vm_operations_g1.cpp vm_operations_g1.hpp +vm_operations_g1.cpp g1CollectedHeap.inline.hpp +vm_operations_g1.cpp isGCActiveMark.hpp --- old/hotspot/src/share/vm/memory/allocationStats.cpp 2009-08-01 04:21:21.780190952 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,34 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)allocationStats.cpp 1.6 07/05/05 17:05:42 JVM" -#endif -/* - * Copyright 2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -# include "incls/_precompiled.incl" -# include "incls/_allocationStats.cpp.incl" - -// Technically this should be derived from machine speed, and -// ideally it would be dynamically adjusted. -float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000; - --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/allocationStats.cpp 2009-08-01 04:21:21.703185485 +0100 @@ -0,0 +1,34 @@ +#ifdef USE_PRAGMA_IDENT_SRC +#pragma ident "@(#)allocationStats.cpp 1.6 07/05/05 17:05:42 JVM" +#endif +/* + * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_allocationStats.cpp.incl" + +// Technically this should be derived from machine speed, and +// ideally it would be dynamically adjusted. +float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000; + --- old/hotspot/src/share/vm/memory/allocationStats.hpp 2009-08-01 04:21:22.620106205 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,139 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)allocationStats.hpp 1.19 07/05/05 17:05:41 JVM" -#endif -/* - * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -class AllocationStats VALUE_OBJ_CLASS_SPEC { - // A duration threshold (in ms) used to filter - // possibly unreliable samples. - static float _threshold; - - // We measure the demand between the end of the previous sweep and - // beginning of this sweep: - // Count(end_last_sweep) - Count(start_this_sweep) - // + splitBirths(between) - splitDeaths(between) - // The above number divided by the time since the start [END???] of the - // previous sweep gives us a time rate of demand for blocks - // of this size. We compute a padded average of this rate as - // our current estimate for the time rate of demand for blocks - // of this size. Similarly, we keep a padded average for the time - // between sweeps. Our current estimate for demand for blocks of - // this size is then simply computed as the product of these two - // estimates. - AdaptivePaddedAverage _demand_rate_estimate; - - ssize_t _desired; // Estimate computed as described above - ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing - - ssize_t _surplus; // count - (desired +/- small-percent), - // used to tune splitting in best fit - ssize_t _bfrSurp; // surplus at start of current sweep - ssize_t _prevSweep; // count from end of previous sweep - ssize_t _beforeSweep; // count from before current sweep - ssize_t _coalBirths; // additional chunks from coalescing - ssize_t _coalDeaths; // loss from coalescing - ssize_t _splitBirths; // additional chunks from splitting - ssize_t _splitDeaths; // loss from splitting - size_t _returnedBytes; // number of bytes returned to list. - public: - void initialize() { - AdaptivePaddedAverage* dummy = - new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight, - CMS_FLSPadding); - _desired = 0; - _coalDesired = 0; - _surplus = 0; - _bfrSurp = 0; - _prevSweep = 0; - _beforeSweep = 0; - _coalBirths = 0; - _coalDeaths = 0; - _splitBirths = 0; - _splitDeaths = 0; - _returnedBytes = 0; - } - - AllocationStats() { - initialize(); - } - // The rate estimate is in blocks per second. - void compute_desired(size_t count, - float inter_sweep_current, - float inter_sweep_estimate) { - // If the latest inter-sweep time is below our granularity - // of measurement, we may call in here with - // inter_sweep_current == 0. However, even for suitably small - // but non-zero inter-sweep durations, we may not trust the accuracy - // of accumulated data, since it has not been "integrated" - // (read "low-pass-filtered") long enough, and would be - // vulnerable to noisy glitches. In such cases, we - // ignore the current sample and use currently available - // historical estimates. - if (inter_sweep_current > _threshold) { - ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths(); - float rate = ((float)demand)/inter_sweep_current; - _demand_rate_estimate.sample(rate); - _desired = (ssize_t)(_demand_rate_estimate.padded_average() - *inter_sweep_estimate); - } - } - - ssize_t desired() const { return _desired; } - ssize_t coalDesired() const { return _coalDesired; } - void set_coalDesired(ssize_t v) { _coalDesired = v; } - - ssize_t surplus() const { return _surplus; } - void set_surplus(ssize_t v) { _surplus = v; } - void increment_surplus() { _surplus++; } - void decrement_surplus() { _surplus--; } - - ssize_t bfrSurp() const { return _bfrSurp; } - void set_bfrSurp(ssize_t v) { _bfrSurp = v; } - ssize_t prevSweep() const { return _prevSweep; } - void set_prevSweep(ssize_t v) { _prevSweep = v; } - ssize_t beforeSweep() const { return _beforeSweep; } - void set_beforeSweep(ssize_t v) { _beforeSweep = v; } - - ssize_t coalBirths() const { return _coalBirths; } - void set_coalBirths(ssize_t v) { _coalBirths = v; } - void increment_coalBirths() { _coalBirths++; } - - ssize_t coalDeaths() const { return _coalDeaths; } - void set_coalDeaths(ssize_t v) { _coalDeaths = v; } - void increment_coalDeaths() { _coalDeaths++; } - - ssize_t splitBirths() const { return _splitBirths; } - void set_splitBirths(ssize_t v) { _splitBirths = v; } - void increment_splitBirths() { _splitBirths++; } - - ssize_t splitDeaths() const { return _splitDeaths; } - void set_splitDeaths(ssize_t v) { _splitDeaths = v; } - void increment_splitDeaths() { _splitDeaths++; } - - NOT_PRODUCT( - size_t returnedBytes() const { return _returnedBytes; } - void set_returnedBytes(size_t v) { _returnedBytes = v; } - ) -}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp 2009-08-01 04:21:22.537910734 +0100 @@ -0,0 +1,141 @@ +#ifdef USE_PRAGMA_IDENT_HDR +#pragma ident "@(#)allocationStats.hpp 1.19 07/05/05 17:05:41 JVM" +#endif +/* + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class AllocationStats VALUE_OBJ_CLASS_SPEC { + // A duration threshold (in ms) used to filter + // possibly unreliable samples. + static float _threshold; + + // We measure the demand between the end of the previous sweep and + // beginning of this sweep: + // Count(end_last_sweep) - Count(start_this_sweep) + // + splitBirths(between) - splitDeaths(between) + // The above number divided by the time since the start [END???] of the + // previous sweep gives us a time rate of demand for blocks + // of this size. We compute a padded average of this rate as + // our current estimate for the time rate of demand for blocks + // of this size. Similarly, we keep a padded average for the time + // between sweeps. Our current estimate for demand for blocks of + // this size is then simply computed as the product of these two + // estimates. + AdaptivePaddedAverage _demand_rate_estimate; + + ssize_t _desired; // Estimate computed as described above + ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing + + ssize_t _surplus; // count - (desired +/- small-percent), + // used to tune splitting in best fit + ssize_t _bfrSurp; // surplus at start of current sweep + ssize_t _prevSweep; // count from end of previous sweep + ssize_t _beforeSweep; // count from before current sweep + ssize_t _coalBirths; // additional chunks from coalescing + ssize_t _coalDeaths; // loss from coalescing + ssize_t _splitBirths; // additional chunks from splitting + ssize_t _splitDeaths; // loss from splitting + size_t _returnedBytes; // number of bytes returned to list. + public: + void initialize() { + AdaptivePaddedAverage* dummy = + new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight, + CMS_FLSPadding); + _desired = 0; + _coalDesired = 0; + _surplus = 0; + _bfrSurp = 0; + _prevSweep = 0; + _beforeSweep = 0; + _coalBirths = 0; + _coalDeaths = 0; + _splitBirths = 0; + _splitDeaths = 0; + _returnedBytes = 0; + } + + AllocationStats() { + initialize(); + } + // The rate estimate is in blocks per second. + void compute_desired(size_t count, + float inter_sweep_current, + float inter_sweep_estimate) { + // If the latest inter-sweep time is below our granularity + // of measurement, we may call in here with + // inter_sweep_current == 0. However, even for suitably small + // but non-zero inter-sweep durations, we may not trust the accuracy + // of accumulated data, since it has not been "integrated" + // (read "low-pass-filtered") long enough, and would be + // vulnerable to noisy glitches. In such cases, we + // ignore the current sample and use currently available + // historical estimates. + if (inter_sweep_current > _threshold) { + ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths(); + float rate = ((float)demand)/inter_sweep_current; + _demand_rate_estimate.sample(rate); + _desired = (ssize_t)(_demand_rate_estimate.padded_average() + *inter_sweep_estimate); + } + } + + ssize_t desired() const { return _desired; } + void set_desired(ssize_t v) { _desired = v; } + + ssize_t coalDesired() const { return _coalDesired; } + void set_coalDesired(ssize_t v) { _coalDesired = v; } + + ssize_t surplus() const { return _surplus; } + void set_surplus(ssize_t v) { _surplus = v; } + void increment_surplus() { _surplus++; } + void decrement_surplus() { _surplus--; } + + ssize_t bfrSurp() const { return _bfrSurp; } + void set_bfrSurp(ssize_t v) { _bfrSurp = v; } + ssize_t prevSweep() const { return _prevSweep; } + void set_prevSweep(ssize_t v) { _prevSweep = v; } + ssize_t beforeSweep() const { return _beforeSweep; } + void set_beforeSweep(ssize_t v) { _beforeSweep = v; } + + ssize_t coalBirths() const { return _coalBirths; } + void set_coalBirths(ssize_t v) { _coalBirths = v; } + void increment_coalBirths() { _coalBirths++; } + + ssize_t coalDeaths() const { return _coalDeaths; } + void set_coalDeaths(ssize_t v) { _coalDeaths = v; } + void increment_coalDeaths() { _coalDeaths++; } + + ssize_t splitBirths() const { return _splitBirths; } + void set_splitBirths(ssize_t v) { _splitBirths = v; } + void increment_splitBirths() { _splitBirths++; } + + ssize_t splitDeaths() const { return _splitDeaths; } + void set_splitDeaths(ssize_t v) { _splitDeaths = v; } + void increment_splitDeaths() { _splitDeaths++; } + + NOT_PRODUCT( + size_t returnedBytes() const { return _returnedBytes; } + void set_returnedBytes(size_t v) { _returnedBytes = v; } + ) +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/coTracker.cpp 2009-08-01 04:21:23.396196996 +0100 @@ -0,0 +1,189 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_coTracker.cpp.incl" + +COTracker* COTracker::_head = NULL; +double COTracker::_cpu_number = -1.0; + +void +COTracker::resetPeriod(double now_sec, double vnow_sec) { + guarantee( _enabled, "invariant" ); + _period_start_time_sec = now_sec; + _period_start_vtime_sec = vnow_sec; +} + +void +COTracker::setConcOverhead(double time_stamp_sec, + double conc_overhead) { + guarantee( _enabled, "invariant" ); + _conc_overhead = conc_overhead; + _time_stamp_sec = time_stamp_sec; + if (conc_overhead > 0.001) + _conc_overhead_seq.add(conc_overhead); +} + +void +COTracker::reset(double starting_conc_overhead) { + guarantee( _enabled, "invariant" ); + double now_sec = os::elapsedTime(); + setConcOverhead(now_sec, starting_conc_overhead); +} + +void +COTracker::start() { + guarantee( _enabled, "invariant" ); + resetPeriod(os::elapsedTime(), os::elapsedVTime()); +} + +void +COTracker::update(bool force_end) { + assert( _enabled, "invariant" ); + double end_time_sec = os::elapsedTime(); + double elapsed_time_sec = end_time_sec - _period_start_time_sec; + if (force_end || elapsed_time_sec > _update_period_sec) { + // reached the end of the period + double end_vtime_sec = os::elapsedVTime(); + double elapsed_vtime_sec = end_vtime_sec - _period_start_vtime_sec; + + double conc_overhead = elapsed_vtime_sec / elapsed_time_sec; + + setConcOverhead(end_time_sec, conc_overhead); + resetPeriod(end_time_sec, end_vtime_sec); + } +} + +void +COTracker::updateForSTW(double start_sec, double end_sec) { + if (!_enabled) + return; + + // During a STW pause, no concurrent GC thread has done any + // work. So, we can safely adjust the start of the current period by + // adding the duration of the STW pause to it, so that the STW pause + // doesn't affect the reading of the concurrent overhead (it's + // basically like excluding the time of the STW pause from the + // concurrent overhead calculation). + + double stw_duration_sec = end_sec - start_sec; + guarantee( stw_duration_sec > 0.0, "invariant" ); + + if (outOfDate(start_sec)) + _conc_overhead = 0.0; + else + _time_stamp_sec = end_sec; + _period_start_time_sec += stw_duration_sec; + _conc_overhead_seq = NumberSeq(); + + guarantee( os::elapsedTime() > _period_start_time_sec, "invariant" ); +} + +double +COTracker::predConcOverhead() { + if (_enabled) { + // tty->print(" %1.2lf", _conc_overhead_seq.maximum()); + return _conc_overhead_seq.maximum(); + } else { + // tty->print(" DD"); + return 0.0; + } +} + +void +COTracker::resetPred() { + _conc_overhead_seq = NumberSeq(); +} + +COTracker::COTracker(int group) + : _enabled(false), + _group(group), + _period_start_time_sec(-1.0), + _period_start_vtime_sec(-1.0), + _conc_overhead(-1.0), + _time_stamp_sec(-1.0), + _next(NULL) { + // GCOverheadReportingPeriodMS indicates how frequently the + // concurrent overhead will be recorded by the GC Overhead + // Reporter. We want to take readings less often than that. If we + // took readings more often than some of them might be lost. + _update_period_sec = ((double) GCOverheadReportingPeriodMS) / 1000.0 * 1.25; + _next = _head; + _head = this; + + if (_cpu_number < 0.0) + _cpu_number = (double) os::processor_count(); +} + +// statics + +void +COTracker::updateAllForSTW(double start_sec, double end_sec) { + for (COTracker* curr = _head; curr != NULL; curr = curr->_next) { + curr->updateForSTW(start_sec, end_sec); + } +} + +double +COTracker::totalConcOverhead(double now_sec) { + double total_conc_overhead = 0.0; + + for (COTracker* curr = _head; curr != NULL; curr = curr->_next) { + double conc_overhead = curr->concOverhead(now_sec); + total_conc_overhead += conc_overhead; + } + + return total_conc_overhead; +} + +double +COTracker::totalConcOverhead(double now_sec, + size_t group_num, + double* co_per_group) { + double total_conc_overhead = 0.0; + + for (size_t i = 0; i < group_num; ++i) + co_per_group[i] = 0.0; + + for (COTracker* curr = _head; curr != NULL; curr = curr->_next) { + size_t group = curr->_group; + assert( 0 <= group && group < group_num, "invariant" ); + double conc_overhead = curr->concOverhead(now_sec); + + co_per_group[group] += conc_overhead; + total_conc_overhead += conc_overhead; + } + + return total_conc_overhead; +} + +double +COTracker::totalPredConcOverhead() { + double total_pred_conc_overhead = 0.0; + for (COTracker* curr = _head; curr != NULL; curr = curr->_next) { + total_pred_conc_overhead += curr->predConcOverhead(); + curr->resetPred(); + } + return total_pred_conc_overhead / _cpu_number; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/coTracker.hpp 2009-08-01 04:21:23.814949981 +0100 @@ -0,0 +1,181 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// COTracker keeps track of the concurrent overhead of a GC thread. + +// A thread that needs to be tracked must, itself, start up its +// tracker with the start() method and then call the update() method +// at regular intervals. What the tracker does is to calculate the +// concurrent overhead of a process at a given update period. The +// tracker starts and when is detects that it has exceeded the given +// period, it calculates the duration of the period in wall-clock time +// and the duration of the period in vtime (i.e. how much time the +// concurrent processes really took up during this period). The ratio +// of the latter over the former is the concurrent overhead of that +// process for that period over a single CPU. This overhead is stored +// on the tracker, "timestamped" with the wall-clock time of the end +// of the period. When the concurrent overhead of this process needs +// to be queried, this last "reading" provides a good approximation +// (we assume that the concurrent overhead of a particular thread +// stays largely constant over time). The timestamp is necessary to +// detect when the process has stopped working and the recorded +// reading hasn't been updated for some time. + +// Each concurrent GC thread is considered to be part of a "group" +// (i.e. any available concurrent marking threads are part of the +// "concurrent marking thread group"). A COTracker is associated with +// a single group at construction-time. It's up to each collector to +// decide how groups will be mapped to such an id (ids should start +// from 0 and be consecutive; there's a hardcoded max group num +// defined on the GCOverheadTracker class). The notion of a group has +// been introduced to be able to identify how much overhead was +// imposed by each group, instead of getting a single value that +// covers all concurrent overhead. + +class COTracker { +private: + // It indicates whether this tracker is enabled or not. When the + // tracker is disabled, then it returns 0.0 as the latest concurrent + // overhead and several methods (reset, start, and update) are not + // supposed to be called on it. This enabling / disabling facility + // is really provided to make a bit more explicit in the code when a + // particulary tracker of a processes that doesn't run all the time + // (e.g. concurrent marking) is supposed to be used and not it's not. + bool _enabled; + + // The ID of the group associated with this tracker. + int _group; + + // The update period of the tracker. A new value for the concurrent + // overhead of the associated process will be made at intervals no + // smaller than this. + double _update_period_sec; + + // The start times (both wall-block time and vtime) of the current + // interval. + double _period_start_time_sec; + double _period_start_vtime_sec; + + // Number seq of the concurrent overhead readings within a period + NumberSeq _conc_overhead_seq; + + // The latest reading of the concurrent overhead (over a single CPU) + // imposed by the associated concurrent thread, made available at + // the indicated wall-clock time. + double _conc_overhead; + double _time_stamp_sec; + + // The number of CPUs that the host machine has (for convenience + // really, as we'd have to keep translating it into a double) + static double _cpu_number; + + // Fields that keep a list of all trackers created. This is useful, + // since it allows us to sum up the concurrent overhead without + // having to write code for a specific collector to broadcast a + // request to all its concurrent processes. + COTracker* _next; + static COTracker* _head; + + // It indicates that a new period is starting by updating the + // _period_start_time_sec and _period_start_vtime_sec fields. + void resetPeriod(double now_sec, double vnow_sec); + // It updates the latest concurrent overhead reading, taken at a + // given wall-clock time. + void setConcOverhead(double time_stamp_sec, double conc_overhead); + + // It determines whether the time stamp of the latest concurrent + // overhead reading is out of date or not. + bool outOfDate(double now_sec) { + // The latest reading is considered out of date, if it was taken + // 1.2x the update period. + return (now_sec - _time_stamp_sec) > 1.2 * _update_period_sec; + } + +public: + // The constructor which associates the tracker with a group ID. + COTracker(int group); + + // Methods to enable / disable the tracker and query whether it is enabled. + void enable() { _enabled = true; } + void disable() { _enabled = false; } + bool enabled() { return _enabled; } + + // It resets the tracker and sets concurrent overhead reading to be + // the given parameter and the associated time stamp to be now. + void reset(double starting_conc_overhead = 0.0); + // The tracker starts tracking. IT should only be called from the + // concurrent thread that is tracked by this tracker. + void start(); + // It updates the tracker and, if the current period is longer than + // the update period, the concurrent overhead reading will be + // updated. force_end being true indicates that it's the last call + // to update() by this process before the tracker is disabled (the + // tracker can be re-enabled later if necessary). It should only be + // called from the concurrent thread that is tracked by this tracker + // and while the thread has joined the STS. + void update(bool force_end = false); + // It adjusts the contents of the tracker to take into account a STW + // pause. + void updateForSTW(double start_sec, double end_sec); + + // It returns the last concurrent overhead reading over a single + // CPU. If the reading is out of date, or the tracker is disabled, + // it returns 0.0. + double concCPUOverhead(double now_sec) { + if (!_enabled || outOfDate(now_sec)) + return 0.0; + else + return _conc_overhead; + } + + // It returns the last concurrent overhead reading over all CPUs + // that the host machine has. If the reading is out of date, or the + // tracker is disabled, it returns 0.0. + double concOverhead(double now_sec) { + return concCPUOverhead(now_sec) / _cpu_number; + } + + double predConcOverhead(); + + void resetPred(); + + // statics + + // It notifies all trackers about a STW pause. + static void updateAllForSTW(double start_sec, double end_sec); + + // It returns the sum of the concurrent overhead readings of all + // available (and enabled) trackers for the given time stamp. The + // overhead is over all the CPUs of the host machine. + + static double totalConcOverhead(double now_sec); + // Like the previous method, but it also sums up the overheads per + // group number. The length of the co_per_group array must be at + // least as large group_num + static double totalConcOverhead(double now_sec, + size_t group_num, + double* co_per_group); + + static double totalPredConcOverhead(); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp 2009-08-01 04:21:24.216418083 +0100 @@ -0,0 +1,314 @@ +/* + * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// CopyrightVersion 1.2 + +# include "incls/_precompiled.incl" +# include "incls/_concurrentGCThread.cpp.incl" + +bool ConcurrentGCThread::_should_terminate = false; +bool ConcurrentGCThread::_has_terminated = false; +int ConcurrentGCThread::_CGC_flag = CGC_nil; + +SuspendibleThreadSet ConcurrentGCThread::_sts; + +ConcurrentGCThread::ConcurrentGCThread() { + _sts.initialize(); +}; + +void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) { + MutexLockerEx x(Heap_lock, + Mutex::_no_safepoint_check_flag); + // warning("CGC: about to try stopping world"); + SafepointSynchronize::begin(); + // warning("CGC: successfully stopped world"); + op->do_void(); + SafepointSynchronize::end(); + // warning("CGC: successfully restarted world"); +} + +void ConcurrentGCThread::safepoint_synchronize() { + _sts.suspend_all(); +} + +void ConcurrentGCThread::safepoint_desynchronize() { + _sts.resume_all(); +} + +void ConcurrentGCThread::create_and_start() { + if (os::create_thread(this, os::cgc_thread)) { + // XXX: need to set this to low priority + // unless "agressive mode" set; priority + // should be just less than that of VMThread. + os::set_priority(this, NearMaxPriority); + if (!_should_terminate && !DisableStartThread) { + os::start_thread(this); + } + } +} + +void ConcurrentGCThread::initialize_in_thread() { + this->record_stack_base_and_size(); + this->initialize_thread_local_storage(); + this->set_active_handles(JNIHandleBlock::allocate_block()); + // From this time Thread::current() should be working. + assert(this == Thread::current(), "just checking"); +} + +void ConcurrentGCThread::wait_for_universe_init() { + MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); + while (!is_init_completed() && !_should_terminate) { + CGC_lock->wait(Mutex::_no_safepoint_check_flag, 200); + } +} + +void ConcurrentGCThread::terminate() { + // Signal that it is terminated + { + MutexLockerEx mu(Terminator_lock, + Mutex::_no_safepoint_check_flag); + _has_terminated = true; + Terminator_lock->notify(); + } + + // Thread destructor usually does this.. + ThreadLocalStorage::set_thread(NULL); +} + + +void SuspendibleThreadSet::initialize_work() { + MutexLocker x(STS_init_lock); + if (!_initialized) { + _m = new Monitor(Mutex::leaf, + "SuspendibleThreadSetLock", true); + _async = 0; + _async_stop = false; + _async_stopped = 0; + _initialized = true; + } +} + +void SuspendibleThreadSet::join() { + initialize(); + MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); + while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag); + _async++; + assert(_async > 0, "Huh."); +} + +void SuspendibleThreadSet::leave() { + assert(_initialized, "Must be initialized."); + MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); + _async--; + assert(_async >= 0, "Huh."); + if (_async_stop) _m->notify_all(); +} + +void SuspendibleThreadSet::yield(const char* id) { + assert(_initialized, "Must be initialized."); + if (_async_stop) { + MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); + if (_async_stop) { + _async_stopped++; + assert(_async_stopped > 0, "Huh."); + if (_async_stopped == _async) { + if (ConcGCYieldTimeout > 0) { + double now = os::elapsedTime(); + guarantee((now - _suspend_all_start) * 1000.0 < + (double)ConcGCYieldTimeout, + "Long delay; whodunit?"); + } + } + _m->notify_all(); + while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag); + _async_stopped--; + assert(_async >= 0, "Huh"); + _m->notify_all(); + } + } +} + +void SuspendibleThreadSet::suspend_all() { + initialize(); // If necessary. + if (ConcGCYieldTimeout > 0) { + _suspend_all_start = os::elapsedTime(); + } + MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); + assert(!_async_stop, "Only one at a time."); + _async_stop = true; + while (_async_stopped < _async) _m->wait(Mutex::_no_safepoint_check_flag); +} + +void SuspendibleThreadSet::resume_all() { + assert(_initialized, "Must be initialized."); + MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); + assert(_async_stopped == _async, "Huh."); + _async_stop = false; + _m->notify_all(); +} + +static void _sltLoop(JavaThread* thread, TRAPS) { + SurrogateLockerThread* slt = (SurrogateLockerThread*)thread; + slt->loop(); +} + +SurrogateLockerThread::SurrogateLockerThread() : + JavaThread(&_sltLoop), + _monitor(Mutex::nonleaf, "SLTMonitor"), + _buffer(empty) +{} + +SurrogateLockerThread* SurrogateLockerThread::make(TRAPS) { + klassOop k = + SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), + true, CHECK_NULL); + instanceKlassHandle klass (THREAD, k); + instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL); + + const char thread_name[] = "Surrogate Locker Thread (CMS)"; + Handle string = java_lang_String::create_from_str(thread_name, CHECK_NULL); + + // Initialize thread_oop to put it into the system threadGroup + Handle thread_group (THREAD, Universe::system_thread_group()); + JavaValue result(T_VOID); + JavaCalls::call_special(&result, thread_oop, + klass, + vmSymbolHandles::object_initializer_name(), + vmSymbolHandles::threadgroup_string_void_signature(), + thread_group, + string, + CHECK_NULL); + + SurrogateLockerThread* res; + { + MutexLocker mu(Threads_lock); + res = new SurrogateLockerThread(); + + // At this point it may be possible that no osthread was created for the + // JavaThread due to lack of memory. We would have to throw an exception + // in that case. However, since this must work and we do not allow + // exceptions anyway, check and abort if this fails. + if (res == NULL || res->osthread() == NULL) { + vm_exit_during_initialization("java.lang.OutOfMemoryError", + "unable to create new native thread"); + } + java_lang_Thread::set_thread(thread_oop(), res); + java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); + java_lang_Thread::set_daemon(thread_oop()); + + res->set_threadObj(thread_oop()); + Threads::add(res); + Thread::start(res); + } + os::yield(); // This seems to help with initial start-up of SLT + return res; +} + +void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) { + MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag); + assert(_buffer == empty, "Should be empty"); + assert(msg != empty, "empty message"); + _buffer = msg; + while (_buffer != empty) { + _monitor.notify(); + _monitor.wait(Mutex::_no_safepoint_check_flag); + } +} + +// ======= Surrogate Locker Thread ============= + +void SurrogateLockerThread::loop() { + BasicLock pll_basic_lock; + SLT_msg_type msg; + debug_only(unsigned int owned = 0;) + + while (/* !isTerminated() */ 1) { + { + MutexLocker x(&_monitor); + // Since we are a JavaThread, we can't be here at a safepoint. + assert(!SafepointSynchronize::is_at_safepoint(), + "SLT is a JavaThread"); + // wait for msg buffer to become non-empty + while (_buffer == empty) { + _monitor.notify(); + _monitor.wait(); + } + msg = _buffer; + } + switch(msg) { + case acquirePLL: { + instanceRefKlass::acquire_pending_list_lock(&pll_basic_lock); + debug_only(owned++;) + break; + } + case releaseAndNotifyPLL: { + assert(owned > 0, "Don't have PLL"); + instanceRefKlass::release_and_notify_pending_list_lock(&pll_basic_lock); + debug_only(owned--;) + break; + } + case empty: + default: { + guarantee(false,"Unexpected message in _buffer"); + break; + } + } + { + MutexLocker x(&_monitor); + // Since we are a JavaThread, we can't be here at a safepoint. + assert(!SafepointSynchronize::is_at_safepoint(), + "SLT is a JavaThread"); + _buffer = empty; + _monitor.notify(); + } + } + assert(!_monitor.owned_by_self(), "Should unlock before exit."); +} + + +// ===== STS Access From Outside CGCT ===== + +void ConcurrentGCThread::stsYield(const char* id) { + assert( Thread::current()->is_ConcurrentGC_thread(), + "only a conc GC thread can call this" ); + _sts.yield(id); +} + +bool ConcurrentGCThread::stsShouldYield() { + assert( Thread::current()->is_ConcurrentGC_thread(), + "only a conc GC thread can call this" ); + return _sts.should_yield(); +} + +void ConcurrentGCThread::stsJoin() { + assert( Thread::current()->is_ConcurrentGC_thread(), + "only a conc GC thread can call this" ); + _sts.join(); +} + +void ConcurrentGCThread::stsLeave() { + assert( Thread::current()->is_ConcurrentGC_thread(), + "only a conc GC thread can call this" ); + _sts.leave(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp 2009-08-01 04:21:24.630065870 +0100 @@ -0,0 +1,167 @@ +/* + * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class VoidClosure; + +// A SuspendibleThreadSet is (obviously) a set of threads that can be +// suspended. A thread can join and later leave the set, and periodically +// yield. If some thread (not in the set) requests, via suspend_all, that +// the threads be suspended, then the requesting thread is blocked until +// all the threads in the set have yielded or left the set. (Threads may +// not enter the set when an attempted suspension is in progress.) The +// suspending thread later calls resume_all, allowing the suspended threads +// to continue. + +class SuspendibleThreadSet { + Monitor* _m; + int _async; + bool _async_stop; + int _async_stopped; + bool _initialized; + double _suspend_all_start; + + void initialize_work(); + + public: + SuspendibleThreadSet() : _initialized(false) {} + + // Add the current thread to the set. May block if a suspension + // is in progress. + void join(); + // Removes the current thread from the set. + void leave(); + // Returns "true" iff an suspension is in progress. + bool should_yield() { return _async_stop; } + // Suspends the current thread if a suspension is in progress (for + // the duration of the suspension.) + void yield(const char* id); + // Return when all threads in the set are suspended. + void suspend_all(); + // Allow suspended threads to resume. + void resume_all(); + // Redundant initializations okay. + void initialize() { + // Double-check dirty read idiom. + if (!_initialized) initialize_work(); + } +}; + + +class ConcurrentGCThread: public NamedThread { + friend class VMStructs; + +protected: + static bool _should_terminate; + static bool _has_terminated; + + enum CGC_flag_type { + CGC_nil = 0x0, + CGC_dont_suspend = 0x1, + CGC_CGC_safepoint = 0x2, + CGC_VM_safepoint = 0x4 + }; + + static int _CGC_flag; + + static bool CGC_flag_is_set(int b) { return (_CGC_flag & b) != 0; } + static int set_CGC_flag(int b) { return _CGC_flag |= b; } + static int reset_CGC_flag(int b) { return _CGC_flag &= ~b; } + + void stopWorldAndDo(VoidClosure* op); + + // All instances share this one set. + static SuspendibleThreadSet _sts; + + // Create and start the thread (setting it's priority high.) + void create_and_start(); + + // Do initialization steps in the thread: record stack base and size, + // init thread local storage, set JNI handle block. + void initialize_in_thread(); + + // Wait until Universe::is_fully_initialized(); + void wait_for_universe_init(); + + // Record that the current thread is terminating, and will do more + // concurrent work. + void terminate(); + +public: + // Constructor + + ConcurrentGCThread(); + ~ConcurrentGCThread() {} // Exists to call NamedThread destructor. + + // Tester + bool is_ConcurrentGC_thread() const { return true; } + + static void safepoint_synchronize(); + static void safepoint_desynchronize(); + + // All overridings should probably do _sts::yield, but we allow + // overriding for distinguished debugging messages. Default is to do + // nothing. + virtual void yield() {} + + bool should_yield() { return _sts.should_yield(); } + + // they are prefixed by sts since there are already yield() and + // should_yield() (non-static) methods in this class and it was an + // easy way to differentiate them. + static void stsYield(const char* id); + static bool stsShouldYield(); + static void stsJoin(); + static void stsLeave(); + +}; + +// The SurrogateLockerThread is used by concurrent GC threads for +// manipulating Java monitors, in particular, currently for +// manipulating the pending_list_lock. XXX +class SurrogateLockerThread: public JavaThread { + friend class VMStructs; + public: + enum SLT_msg_type { + empty = 0, // no message + acquirePLL, // acquire pending list lock + releaseAndNotifyPLL // notify and release pending list lock + }; + private: + // the following are shared with the CMSThread + SLT_msg_type _buffer; // communication buffer + Monitor _monitor; // monitor controlling buffer + BasicLock _basicLock; // used for PLL locking + + public: + static SurrogateLockerThread* make(TRAPS); + + SurrogateLockerThread(); + + bool is_hidden_from_external_view() const { return true; } + + void loop(); // main method + + void manipulatePLL(SLT_msg_type msg); + +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/gcOverheadReporter.cpp 2009-08-01 04:21:25.038849079 +0100 @@ -0,0 +1,179 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_gcOverheadReporter.cpp.incl" + +class COReportingThread : public ConcurrentGCThread { +private: + GCOverheadReporter* _reporter; + +public: + COReportingThread(GCOverheadReporter* reporter) : _reporter(reporter) { + guarantee( _reporter != NULL, "precondition" ); + create_and_start(); + } + + virtual void run() { + initialize_in_thread(); + wait_for_universe_init(); + + int period_ms = GCOverheadReportingPeriodMS; + + while ( true ) { + os::sleep(Thread::current(), period_ms, false); + + _sts.join(); + double now_sec = os::elapsedTime(); + _reporter->collect_and_record_conc_overhead(now_sec); + _sts.leave(); + } + + terminate(); + } +}; + +GCOverheadReporter* GCOverheadReporter::_reporter = NULL; + +GCOverheadReporter::GCOverheadReporter(size_t group_num, + const char* group_names[], + size_t length) + : _group_num(group_num), _prev_end_sec(0.0) { + guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum, + "precondition" ); + + _base = NEW_C_HEAP_ARRAY(GCOverheadReporterEntry, length); + _top = _base + length; + _curr = _base; + + for (size_t i = 0; i < group_num; ++i) { + guarantee( group_names[i] != NULL, "precondition" ); + _group_names[i] = group_names[i]; + } +} + +void +GCOverheadReporter::add(double start_sec, double end_sec, + double* conc_overhead, + double stw_overhead) { + assert( _curr <= _top, "invariant" ); + + if (_curr == _top) { + guarantee( false, "trace full" ); + return; + } + + _curr->_start_sec = start_sec; + _curr->_end_sec = end_sec; + for (size_t i = 0; i < _group_num; ++i) { + _curr->_conc_overhead[i] = + (conc_overhead != NULL) ? conc_overhead[i] : 0.0; + } + _curr->_stw_overhead = stw_overhead; + + ++_curr; +} + +void +GCOverheadReporter::collect_and_record_conc_overhead(double end_sec) { + double start_sec = _prev_end_sec; + guarantee( end_sec > start_sec, "invariant" ); + + double conc_overhead[MaxGCOverheadGroupNum]; + COTracker::totalConcOverhead(end_sec, _group_num, conc_overhead); + add_conc_overhead(start_sec, end_sec, conc_overhead); + _prev_end_sec = end_sec; +} + +void +GCOverheadReporter::record_stw_start(double start_sec) { + guarantee( start_sec > _prev_end_sec, "invariant" ); + collect_and_record_conc_overhead(start_sec); +} + +void +GCOverheadReporter::record_stw_end(double end_sec) { + double start_sec = _prev_end_sec; + COTracker::updateAllForSTW(start_sec, end_sec); + add_stw_overhead(start_sec, end_sec, 1.0); + + _prev_end_sec = end_sec; +} + +void +GCOverheadReporter::print() const { + tty->print_cr(""); + tty->print_cr("GC Overhead (%d entries)", _curr - _base); + tty->print_cr(""); + GCOverheadReporterEntry* curr = _base; + while (curr < _curr) { + double total = curr->_stw_overhead; + for (size_t i = 0; i < _group_num; ++i) + total += curr->_conc_overhead[i]; + + tty->print("OVERHEAD %12.8lf %12.8lf ", + curr->_start_sec, curr->_end_sec); + + for (size_t i = 0; i < _group_num; ++i) + tty->print("%s %12.8lf ", _group_names[i], curr->_conc_overhead[i]); + + tty->print_cr("STW %12.8lf TOT %12.8lf", curr->_stw_overhead, total); + ++curr; + } + tty->print_cr(""); +} + +// statics + +void +GCOverheadReporter::initGCOverheadReporter(size_t group_num, + const char* group_names[]) { + guarantee( _reporter == NULL, "should only be called once" ); + guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum, + "precondition" ); + guarantee( group_names != NULL, "pre-condition" ); + + if (GCOverheadReporting) { + _reporter = new GCOverheadReporter(group_num, group_names); + new COReportingThread(_reporter); + } +} + +void +GCOverheadReporter::recordSTWStart(double start_sec) { + if (_reporter != NULL) + _reporter->record_stw_start(start_sec); +} + +void +GCOverheadReporter::recordSTWEnd(double end_sec) { + if (_reporter != NULL) + _reporter->record_stw_end(end_sec); +} + +void +GCOverheadReporter::printGCOverhead() { + if (_reporter != NULL) + _reporter->print(); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/gcOverheadReporter.hpp 2009-08-01 04:21:25.452616325 +0100 @@ -0,0 +1,141 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Keeps track of the GC overhead (both concurrent and STW). It stores +// it in a large array and then prints it to tty at the end of the +// execution. + +// See coTracker.hpp for the explanation on what groups are. + +// Let's set a maximum number of concurrent overhead groups, to +// statically allocate any arrays we need and not to have to +// malloc/free them. This is just a bit more convenient. +enum { + MaxGCOverheadGroupNum = 4 +}; + +typedef struct { + double _start_sec; + double _end_sec; + + double _conc_overhead[MaxGCOverheadGroupNum]; + double _stw_overhead; +} GCOverheadReporterEntry; + +class GCOverheadReporter { + friend class COReportingThread; + +private: + enum PrivateConstants { + DefaultReporterLength = 128 * 1024 + }; + + // Reference to the single instance of this class. + static GCOverheadReporter* _reporter; + + // These three references point to the array that contains the GC + // overhead entries (_base is the base of the array, _top is the + // address passed the last entry of the array, _curr is the next + // entry to be used). + GCOverheadReporterEntry* _base; + GCOverheadReporterEntry* _top; + GCOverheadReporterEntry* _curr; + + // The number of concurrent overhead groups. + size_t _group_num; + + // The wall-clock time of the end of the last recorded period of GC + // overhead. + double _prev_end_sec; + + // Names for the concurrent overhead groups. + const char* _group_names[MaxGCOverheadGroupNum]; + + // Add a new entry to the large array. conc_overhead being NULL is + // equivalent to an array full of 0.0s. conc_overhead should have a + // length of at least _group_num. + void add(double start_sec, double end_sec, + double* conc_overhead, + double stw_overhead); + + // Add an entry that represents concurrent GC overhead. + // conc_overhead must be at least of length _group_num. + // conc_overhead being NULL is equivalent to an array full of 0.0s. + void add_conc_overhead(double start_sec, double end_sec, + double* conc_overhead) { + add(start_sec, end_sec, conc_overhead, 0.0); + } + + // Add an entry that represents STW GC overhead. + void add_stw_overhead(double start_sec, double end_sec, + double stw_overhead) { + add(start_sec, end_sec, NULL, stw_overhead); + } + + // It records the start of a STW pause (i.e. it records the + // concurrent overhead up to that point) + void record_stw_start(double start_sec); + + // It records the end of a STW pause (i.e. it records the overhead + // associated with the pause and adjusts all the trackers to reflect + // the pause) + void record_stw_end(double end_sec); + + // It queries all the trackers of their concurrent overhead and + // records it. + void collect_and_record_conc_overhead(double end_sec); + + // It prints the contents of the GC overhead array + void print() const; + + + // Constructor. The same preconditions for group_num and group_names + // from initGCOverheadReporter apply here too. + GCOverheadReporter(size_t group_num, + const char* group_names[], + size_t length = DefaultReporterLength); + +public: + + // statics + + // It initialises the GCOverheadReporter and launches the concurrent + // overhead reporting thread. Both actions happen only if the + // GCOverheadReporting parameter is set. The length of the + // group_names array should be >= group_num and group_num should be + // <= MaxGCOverheadGroupNum. Entries group_namnes[0..group_num-1] + // should not be NULL. + static void initGCOverheadReporter(size_t group_num, + const char* group_names[]); + + // The following three are provided for convenience and they are + // wrappers around record_stw_start(start_sec), record_stw_end(end_sec), + // and print(). Each of these checks whether GC overhead reporting + // is on (i.e. _reporter != NULL) and, if it is, calls the + // corresponding method. Saves from repeating this pattern again and + // again from the places where they need to be called. + static void recordSTWStart(double start_sec); + static void recordSTWEnd(double end_sec); + static void printGCOverhead(); +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.cpp 2009-08-01 04:21:25.871132301 +0100 @@ -0,0 +1,141 @@ +/* + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_spaceDecorator.cpp.incl" + +// Catch-all file for utility classes + +#ifndef PRODUCT + +// Returns true is the location q matches the mangling +// pattern. +bool SpaceMangler::is_mangled(HeapWord* q) { + // This test loses precision but is good enough + return badHeapWord == (max_juint & (uintptr_t) q->value()); +} + + +void SpaceMangler::set_top_for_allocations(HeapWord* v) { + if (v < end()) { + assert(!CheckZapUnusedHeapArea || is_mangled(v), + "The high water mark is not mangled"); + } + _top_for_allocations = v; +} + +// Mangle only the unused space that has not previously +// been mangled and that has not been allocated since being +// mangled. +void SpaceMangler::mangle_unused_area() { + assert(ZapUnusedHeapArea, "Mangling should not be in use"); + // Mangle between top and the high water mark. Safeguard + // against the space changing since top_for_allocations was + // set. + HeapWord* mangled_end = MIN2(top_for_allocations(), end()); + if (top() < mangled_end) { + MemRegion mangle_mr(top(), mangled_end); + SpaceMangler::mangle_region(mangle_mr); + // Light weight check of mangling. + check_mangled_unused_area(end()); + } + // Complete check of unused area which is functional when + // DEBUG_MANGLING is defined. + check_mangled_unused_area_complete(); +} + +// A complete mangle is expected in the +// exceptional case where top_for_allocations is not +// properly tracking the high water mark for mangling. +// This can be the case when to-space is being used for +// scratch space during a mark-sweep-compact. See +// contribute_scratch() and PSMarkSweep::allocate_stacks(). +void SpaceMangler::mangle_unused_area_complete() { + assert(ZapUnusedHeapArea, "Mangling should not be in use"); + MemRegion mangle_mr(top(), end()); + SpaceMangler::mangle_region(mangle_mr); +} + +// Simply mangle the MemRegion mr. +void SpaceMangler::mangle_region(MemRegion mr) { + assert(ZapUnusedHeapArea, "Mangling should not be in use"); +#ifdef ASSERT + if(TraceZapUnusedHeapArea) { + gclog_or_tty->print("Mangling [0x%x to 0x%x)", mr.start(), mr.end()); + } + Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord); + if(TraceZapUnusedHeapArea) { + gclog_or_tty->print_cr(" done"); + } +#endif +} + +// Check that top, top_for_allocations and the last +// word of the space are mangled. In a tight memory +// situation even this light weight mangling could +// cause paging by touching the end of the space. +void SpaceMangler::check_mangled_unused_area(HeapWord* limit) { + if (CheckZapUnusedHeapArea) { + // This method can be called while the spaces are + // being reshaped so skip the test if the end of the + // space is beyond the specified limit; + if (end() > limit) return; + + assert(top() == end() || + (is_mangled(top())), "Top not mangled"); + assert((top_for_allocations() < top()) || + (top_for_allocations() >= end()) || + (is_mangled(top_for_allocations())), + "Older unused not mangled"); + assert(top() == end() || + (is_mangled(end() - 1)), "End not properly mangled"); + // Only does checking when DEBUG_MANGLING is defined. + check_mangled_unused_area_complete(); + } +} + +#undef DEBUG_MANGLING +// This should only be used while debugging the mangling +// because of the high cost of checking the completeness. +void SpaceMangler::check_mangled_unused_area_complete() { + if (CheckZapUnusedHeapArea) { + assert(ZapUnusedHeapArea, "Not mangling unused area"); +#ifdef DEBUG_MANGLING + HeapWord* q = top(); + HeapWord* limit = end(); + + bool passed = true; + while (q < limit) { + if (!is_mangled(q)) { + passed = false; + break; + } + q++; + } + assert(passed, "Mangling is not complete"); +#endif + } +} +#undef DEBUG_MANGLING +#endif // not PRODUCT --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp 2009-08-01 04:21:26.311993951 +0100 @@ -0,0 +1,141 @@ +/* + * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class SpaceDecorator: public AllStatic { + public: + // Initialization flags. + static const bool Clear = true; + static const bool DontClear = false; + static const bool Mangle = true; + static const bool DontMangle = false; +}; + +// Functionality for use with class Space and class MutableSpace. +// The approach taken with the mangling is to mangle all +// the space initially and then to mangle areas that have +// been allocated since the last collection. Mangling is +// done in the context of a generation and in the context +// of a space. +// The space in a generation is mangled when it is first +// initialized and when the generation grows. The spaces +// are not necessarily up-to-date when this mangling occurs +// and the method mangle_region() is used. +// After allocations have been done in a space, the space generally +// need to be remangled. Remangling is only done on the +// recently allocated regions in the space. Typically, that is +// the region between the new top and the top just before a +// garbage collection. +// An exception to the usual mangling in a space is done when the +// space is used for an extraordinary purpose. Specifically, when +// to-space is used as scratch space for a mark-sweep-compact +// collection. +// Spaces are mangled after a collection. If the generation +// grows after a collection, the added space is mangled as part of +// the growth of the generation. No additional mangling is needed when the +// spaces are resized after an expansion. +// The class SpaceMangler keeps a pointer to the top of the allocated +// area and provides the methods for doing the piece meal mangling. +// Methods for doing sparces and full checking of the mangling are +// included. The full checking is done if DEBUG_MANGLING is defined. +// GenSpaceMangler is used with the GenCollectedHeap collectors and +// MutableSpaceMangler is used with the ParallelScavengeHeap collectors. +// These subclasses abstract the differences in the types of spaces used +// by each heap. + +class SpaceMangler: public CHeapObj { + friend class VMStructs; + + // High water mark for allocations. Typically, the space above + // this point have been mangle previously and don't need to be + // touched again. Space belows this point has been allocated + // and remangling is needed between the current top and this + // high water mark. + HeapWord* _top_for_allocations; + HeapWord* top_for_allocations() { return _top_for_allocations; } + + public: + + // Setting _top_for_allocations to NULL at initialization + // makes it always below top so that mangling done as part + // of the initialize() call of a space does nothing (as it + // should since the mangling is done as part of the constructor + // for the space. + SpaceMangler() : _top_for_allocations(NULL) {} + + // Methods for top and end that delegate to the specific + // space type. + virtual HeapWord* top() const = 0; + virtual HeapWord* end() const = 0; + + // Return true if q matches the mangled pattern. + static bool is_mangled(HeapWord* q) PRODUCT_RETURN0; + + // Used to save the an address in a space for later use during mangling. + void set_top_for_allocations(HeapWord* v); + + // Overwrites the unused portion of this space. + // Mangle only the region not previously mangled [top, top_previously_mangled) + void mangle_unused_area(); + // Mangle all the unused region [top, end) + void mangle_unused_area_complete(); + // Do some sparse checking on the area that should have been mangled. + void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; + // Do a complete check of the area that should be mangled. + void check_mangled_unused_area_complete() PRODUCT_RETURN; + + // Mangle the MemRegion. This is a non-space specific mangler. It + // is used during the initial mangling of a space before the space + // is fully constructed. Also is used when a generation is expanded + // and possibly before the spaces have been reshaped to to the new + // size of the generation. + static void mangle_region(MemRegion mr); +}; + +class ContiguousSpace; + +// For use with GenCollectedHeap's +class GenSpaceMangler: public SpaceMangler { + ContiguousSpace* _sp; + + ContiguousSpace* sp() { return _sp; } + + HeapWord* top() const { return _sp->top(); } + HeapWord* end() const { return _sp->end(); } + + public: + GenSpaceMangler(ContiguousSpace* sp) : SpaceMangler(), _sp(sp) {} +}; + +// For use with ParallelScavengeHeap's. +class MutableSpaceMangler: public SpaceMangler { + MutableSpace* _sp; + + MutableSpace* sp() { return _sp; } + + HeapWord* top() const { return _sp->top(); } + HeapWord* end() const { return _sp->end(); } + + public: + MutableSpaceMangler(MutableSpace* sp) : SpaceMangler(), _sp(sp) {} +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/memory/barrierSet.cpp 2009-08-01 04:21:26.728902040 +0100 @@ -0,0 +1,36 @@ +/* + * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_barrierSet.cpp.incl" + +// count is in HeapWord's +void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) { + Universe::heap()->barrier_set()->write_ref_array_pre(MemRegion(start, start + count)); +} + +// count is in HeapWord's +void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) { + Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, start + count)); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/runtime/dtraceJSDT.cpp 2009-08-01 04:21:27.125161090 +0100 @@ -0,0 +1,117 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_dtraceJSDT.cpp.incl" + +#ifdef HAVE_DTRACE_H + +jlong DTraceJSDT::activate( + jint version, jstring module_name, jint providers_count, + JVM_DTraceProvider* providers, TRAPS) { + + size_t count = 0; + RegisteredProbes* probes = NULL; + + if (!is_supported()) { + return 0; + } + + assert(module_name != NULL, "valid module name"); + assert(providers != NULL, "valid provider array"); + + for (int i = 0; i < providers_count; ++i) { + count += providers[i].probe_count; + } + probes = new RegisteredProbes(count); + count = 0; + + for (int i = 0; i < providers_count; ++i) { + assert(providers[i].name != NULL, "valid provider name"); + assert(providers[i].probe_count == 0 || providers[i].probes != NULL, + "valid probe count"); + for (int j = 0; j < providers[i].probe_count; ++j) { + JVM_DTraceProbe* probe = &(providers[i].probes[j]); + assert(probe != NULL, "valid probe"); + assert(probe->method != NULL, "valid method"); + assert(probe->name != NULL, "valid probe name"); + assert(probe->function != NULL, "valid probe function spec"); + methodHandle h_method = + methodHandle(THREAD, JNIHandles::resolve_jmethod_id(probe->method)); + nmethod* nm = AdapterHandlerLibrary::create_dtrace_nmethod(h_method); + h_method()->set_not_compilable(CompLevel_highest_tier); + h_method()->set_code(h_method, nm); + probes->nmethod_at_put(count++, nm); + } + } + + int handle = pd_activate((void*)probes, + module_name, providers_count, providers); + if (handle <= 0) { + delete probes; + THROW_MSG_0(vmSymbols::java_lang_RuntimeException(), + "Unable to register DTrace probes (internal error)."); + } + probes->set_helper_handle(handle); + return RegisteredProbes::toOpaqueProbes(probes); +} + +jboolean DTraceJSDT::is_probe_enabled(jmethodID method) { + methodOop m = JNIHandles::resolve_jmethod_id(method); + return nativeInstruction_at(m->code()->trap_address())->is_dtrace_trap(); +} + +void DTraceJSDT::dispose(OpaqueProbes probes) { + RegisteredProbes* p = RegisteredProbes::toRegisteredProbes(probes); + if (probes != -1 && p != NULL) { + pd_dispose(p->helper_handle()); + delete p; + } +} + +jboolean DTraceJSDT::is_supported() { + return pd_is_supported(); +} + +#else // HAVE_DTRACE_H + +jlong DTraceJSDT::activate( + jint version, jstring module_name, jint providers_count, + JVM_DTraceProvider* providers, TRAPS) { + return 0; +} + +jboolean DTraceJSDT::is_probe_enabled(jmethodID method) { + return false; +} + +void DTraceJSDT::dispose(OpaqueProbes probes) { + return; +} + +jboolean DTraceJSDT::is_supported() { + return false; +} + +#endif // ndef HAVE_DTRACE_H --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/runtime/dtraceJSDT.hpp 2009-08-01 04:21:27.558592594 +0100 @@ -0,0 +1,89 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class RegisteredProbes; +typedef jlong OpaqueProbes; + +class DTraceJSDT : AllStatic { + private: + + static int pd_activate(void* moduleBaseAddress, jstring module, + jint providers_count, JVM_DTraceProvider* providers); + static void pd_dispose(int handle); + static jboolean pd_is_supported(); + + public: + + static OpaqueProbes activate( + jint version, jstring module_name, jint providers_count, + JVM_DTraceProvider* providers, TRAPS); + static jboolean is_probe_enabled(jmethodID method); + static void dispose(OpaqueProbes handle); + static jboolean is_supported(); +}; + +class RegisteredProbes : public CHeapObj { + private: + nmethod** _nmethods; // all the probe methods + size_t _count; // number of probe methods + int _helper_handle; // DTrace-assigned identifier + + public: + RegisteredProbes(size_t count) { + _count = count; + _nmethods = NEW_C_HEAP_ARRAY(nmethod*, count); + } + + ~RegisteredProbes() { + for (size_t i = 0; i < _count; ++i) { + // Let the sweeper reclaim it + _nmethods[i]->make_not_entrant(); + _nmethods[i]->method()->clear_code(); + } + FREE_C_HEAP_ARRAY(nmethod*, _nmethods); + _nmethods = NULL; + _count = 0; + } + + static RegisteredProbes* toRegisteredProbes(OpaqueProbes p) { + return (RegisteredProbes*)(intptr_t)p; + } + + static OpaqueProbes toOpaqueProbes(RegisteredProbes* p) { + return (OpaqueProbes)(intptr_t)p; + } + + void set_helper_handle(int handle) { _helper_handle = handle; } + int helper_handle() const { return _helper_handle; } + + nmethod* nmethod_at(size_t i) { + assert(i >= 0 && i < _count, "bad nmethod index"); + return _nmethods[i]; + } + + void nmethod_at_put(size_t i, nmethod* nm) { + assert(i >= 0 && i < _count, "bad nmethod index"); + _nmethods[i] = nm; + } +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/utilities/intHisto.cpp 2009-08-01 04:21:27.989077655 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_intHisto.cpp.incl" + +IntHistogram::IntHistogram(int est, int max) : _max(max), _tot(0) { + assert(0 <= est && est <= max, "Preconditions"); + _elements = new (ResourceObj::C_HEAP) GrowableArray(est, true); + guarantee(_elements != NULL, "alloc failure"); +} + +void IntHistogram::add_entry(int outcome) { + if (outcome > _max) outcome = _max; + int new_count = _elements->at_grow(outcome) + 1; + _elements->at_put(outcome, new_count); + _tot++; +} + +int IntHistogram::entries_for_outcome(int outcome) { + return _elements->at_grow(outcome); +} + +void IntHistogram::print_on(outputStream* st) const { + double tot_d = (double)_tot; + st->print_cr("Outcome # of occurrences %% of occurrences"); + st->print_cr("-----------------------------------------------"); + for (int i=0; i < _elements->length()-2; i++) { + int cnt = _elements->at(i); + if (cnt != 0) { + st->print_cr("%7d %10d %8.4f", + i, cnt, (double)cnt/tot_d); + } + } + // Does it have any max entries? + if (_elements->length()-1 == _max) { + int cnt = _elements->at(_max); + st->print_cr(">= %4d %10d %8.4f", + _max, cnt, (double)cnt/tot_d); + } + st->print_cr("-----------------------------------------------"); + st->print_cr(" All %10d %8.4f", _tot, 1.0); +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/utilities/intHisto.hpp 2009-08-01 04:21:28.407427875 +0100 @@ -0,0 +1,70 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// This class implements a simple histogram. + +// A histogram summarizes a series of "measurements", each of which is +// assumed (required in this implementation) to have an outcome that is a +// non-negative integer. The histogram efficiently maps measurement outcomes +// to the number of measurements had that outcome. + +// To print the results, invoke print() on your Histogram*. + +// Note: there is already an existing "Histogram" class, in file +// histogram.{hpp,cpp}, but to my mind that's not a histogram, it's a table +// mapping strings to counts. To be a histogram (IMHO) it needs to map +// numbers (in fact, integers) to number of occurrences of that number. + +// ysr: (i am not sure i agree with the above note.) i suspect we want to have a +// histogram template that will map an arbitrary type (with a defined order +// relation) to a count. + + +class IntHistogram : public CHeapObj { + protected: + int _max; + int _tot; + GrowableArray* _elements; + +public: + // Create a new, empty table. "est" is an estimate of the maximum outcome + // that will be added, and "max" is an outcome such that all outcomes at + // least that large will be bundled with it. + IntHistogram(int est, int max); + // Add a measurement with the given outcome to the sequence. + void add_entry(int outcome); + // Return the number of entries recorded so far with the given outcome. + int entries_for_outcome(int outcome); + // Return the total number of entries recorded so far. + int total_entries() { return _tot; } + // Return the number of entries recorded so far with the given outcome as + // a fraction of the total number recorded so far. + double fraction_for_outcome(int outcome) { + return + (double)entries_for_outcome(outcome)/ + (double)total_entries(); + } + // Print the histogram on the given output stream. + void print_on(outputStream* st) const; +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/utilities/numberSeq.cpp 2009-08-01 04:21:28.815748584 +0100 @@ -0,0 +1,243 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +# include "incls/_precompiled.incl" +# include "incls/_numberSeq.cpp.incl" + +AbsSeq::AbsSeq(double alpha) : + _num(0), _sum(0.0), _sum_of_squares(0.0), + _davg(0.0), _dvariance(0.0), _alpha(alpha) { +} + +void AbsSeq::add(double val) { + if (_num == 0) { + // if the sequence is empty, the davg is the same as the value + _davg = val; + // and the variance is 0 + _dvariance = 0.0; + } else { + // otherwise, calculate both + _davg = (1.0 - _alpha) * val + _alpha * _davg; + double diff = val - _davg; + _dvariance = (1.0 - _alpha) * diff * diff + _alpha * _dvariance; + } +} + +double AbsSeq::avg() const { + if (_num == 0) + return 0.0; + else + return _sum / total(); +} + +double AbsSeq::variance() const { + if (_num <= 1) + return 0.0; + + double x_bar = avg(); + double result = _sum_of_squares / total() - x_bar * x_bar; + if (result < 0.0) { + // due to loss-of-precision errors, the variance might be negative + // by a small bit + + // guarantee(-0.1 < result && result < 0.0, + // "if variance is negative, it should be very small"); + result = 0.0; + } + return result; +} + +double AbsSeq::sd() const { + double var = variance(); + guarantee( var >= 0.0, "variance should not be negative" ); + return sqrt(var); +} + +double AbsSeq::davg() const { + return _davg; +} + +double AbsSeq::dvariance() const { + if (_num <= 1) + return 0.0; + + double result = _dvariance; + if (result < 0.0) { + // due to loss-of-precision errors, the variance might be negative + // by a small bit + + guarantee(-0.1 < result && result < 0.0, + "if variance is negative, it should be very small"); + result = 0.0; + } + return result; +} + +double AbsSeq::dsd() const { + double var = dvariance(); + guarantee( var >= 0.0, "variance should not be negative" ); + return sqrt(var); +} + +NumberSeq::NumberSeq(double alpha) : + AbsSeq(alpha), _maximum(0.0), _last(0.0) { +} + +bool NumberSeq::check_nums(NumberSeq *total, int n, NumberSeq **parts) { + for (int i = 0; i < n; ++i) { + if (parts[i] != NULL && total->num() != parts[i]->num()) + return false; + } + return true; +} + +NumberSeq::NumberSeq(NumberSeq *total, int n, NumberSeq **parts) { + guarantee(check_nums(total, n, parts), "all seq lengths should match"); + double sum = total->sum(); + for (int i = 0; i < n; ++i) { + if (parts[i] != NULL) + sum -= parts[i]->sum(); + } + + _num = total->num(); + _sum = sum; + + // we do not calculate these... + _sum_of_squares = -1.0; + _maximum = -1.0; + _davg = -1.0; + _dvariance = -1.0; +} + +void NumberSeq::add(double val) { + AbsSeq::add(val); + + _last = val; + if (_num == 0) { + _maximum = val; + } else { + if (val > _maximum) + _maximum = val; + } + _sum += val; + _sum_of_squares += val * val; + ++_num; +} + + +TruncatedSeq::TruncatedSeq(int length, double alpha): + AbsSeq(alpha), _length(length), _next(0) { + _sequence = NEW_C_HEAP_ARRAY(double, _length); + for (int i = 0; i < _length; ++i) + _sequence[i] = 0.0; +} + +void TruncatedSeq::add(double val) { + AbsSeq::add(val); + + // get the oldest value in the sequence... + double old_val = _sequence[_next]; + // ...remove it from the sum and sum of squares + _sum -= old_val; + _sum_of_squares -= old_val * old_val; + + // ...and update them with the new value + _sum += val; + _sum_of_squares += val * val; + + // now replace the old value with the new one + _sequence[_next] = val; + _next = (_next + 1) % _length; + + // only increase it if the buffer is not full + if (_num < _length) + ++_num; + + guarantee( variance() > -1.0, "variance should be >= 0" ); +} + +// can't easily keep track of this incrementally... +double TruncatedSeq::maximum() const { + if (_num == 0) + return 0.0; + double ret = _sequence[0]; + for (int i = 1; i < _num; ++i) { + double val = _sequence[i]; + if (val > ret) + ret = val; + } + return ret; +} + +double TruncatedSeq::last() const { + if (_num == 0) + return 0.0; + unsigned last_index = (_next + _length - 1) % _length; + return _sequence[last_index]; +} + +double TruncatedSeq::oldest() const { + if (_num == 0) + return 0.0; + else if (_num < _length) + // index 0 always oldest value until the array is full + return _sequence[0]; + else { + // since the array is full, _next is over the oldest value + return _sequence[_next]; + } +} + +double TruncatedSeq::predict_next() const { + if (_num == 0) + return 0.0; + + double num = (double) _num; + double x_squared_sum = 0.0; + double x_sum = 0.0; + double y_sum = 0.0; + double xy_sum = 0.0; + double x_avg = 0.0; + double y_avg = 0.0; + + int first = (_next + _length - _num) % _length; + for (int i = 0; i < _num; ++i) { + double x = (double) i; + double y = _sequence[(first + i) % _length]; + + x_squared_sum += x * x; + x_sum += x; + y_sum += y; + xy_sum += x * y; + } + x_avg = x_sum / num; + y_avg = y_sum / num; + + double Sxx = x_squared_sum - x_sum * x_sum / num; + double Sxy = xy_sum - x_sum * y_sum / num; + double b1 = Sxy / Sxx; + double b0 = y_avg - b1 * x_avg; + + return b0 + b1 * num; +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/src/share/vm/utilities/numberSeq.hpp 2009-08-01 04:21:29.249608662 +0100 @@ -0,0 +1,117 @@ +/* + * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/** + ** This file contains a few classes that represent number sequence, + ** x1, x2, x3, ..., xN, and can calculate their avg, max, and sd. + ** + ** Here's a quick description of the classes: + ** + ** AbsSeq: abstract superclass + ** NumberSeq: the sequence is assumed to be very long and the + ** maximum, avg, sd, davg, and dsd are calculated over all its elements + ** TruncatedSeq: this class keeps track of the last L elements + ** of the sequence and calculates avg, max, and sd only over them + **/ + +#define DEFAULT_ALPHA_VALUE 0.7 + +class AbsSeq { +private: + void init(double alpha); + +protected: + int _num; // the number of elements in the sequence + double _sum; // the sum of the elements in the sequence + double _sum_of_squares; // the sum of squares of the elements in the sequence + + double _davg; // decaying average + double _dvariance; // decaying variance + double _alpha; // factor for the decaying average / variance + + // This is what we divide with to get the average. In a standard + // number sequence, this should just be the number of elements in it. + virtual double total() const { return (double) _num; }; + +public: + AbsSeq(double alpha = DEFAULT_ALPHA_VALUE); + + virtual void add(double val); // adds a new element to the sequence + void add(unsigned val) { add((double) val); } + virtual double maximum() const = 0; // maximum element in the sequence + virtual double last() const = 0; // last element added in the sequence + + // the number of elements in the sequence + int num() const { return _num; } + // the sum of the elements in the sequence + double sum() const { return _sum; } + + double avg() const; // the average of the sequence + double variance() const; // the variance of the sequence + double sd() const; // the standard deviation of the sequence + + double davg() const; // decaying average + double dvariance() const; // decaying variance + double dsd() const; // decaying "standard deviation" +}; + +class NumberSeq: public AbsSeq { +private: + bool check_nums(NumberSeq* total, int n, NumberSeq** parts); + +protected: + double _last; + double _maximum; // keep track of maximum value + +public: + NumberSeq(double alpha = DEFAULT_ALPHA_VALUE); + NumberSeq(NumberSeq* total, int n_parts, NumberSeq** parts); + + virtual void add(double val); + virtual double maximum() const { return _maximum; } + virtual double last() const { return _last; } +}; + +class TruncatedSeq: public AbsSeq { +private: + enum PrivateConstants { + DefaultSeqLength = 10 + }; + void init(); +protected: + double *_sequence; // buffers the last L elements in the sequence + int _length; // this is L + int _next; // oldest slot in the array, i.e. next to be overwritten + +public: + // accepts a value for L + TruncatedSeq(int length = DefaultSeqLength, + double alpha = DEFAULT_ALPHA_VALUE); + virtual void add(double val); + virtual double maximum() const; + virtual double last() const; // the last value added to the sequence + + double oldest() const; // the oldest valid value in the sequence + double predict_next() const; // prediction based on linear regression +}; --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6646019/Test.java 2009-08-01 04:21:30.844092100 +0100 @@ -0,0 +1,51 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6646019 + * @summary array subscript expressions become top() with -d64 + * @run main/othervm -Xcomp -XX:CompileOnly=Test.test Test +*/ + + +public class Test { + final static int i = 2076285318; + long l = 2; + short s; + + public static void main(String[] args) { + Test t = new Test(); + try { t.test(); } + catch (Throwable e) { + if (t.l != 5) { + System.out.println("Fails: " + t.l + " != 5"); + } + } + } + + private void test() { + l = 5; + l = (new short[(byte)-2])[(byte)(l = i)]; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6646020/Tester.java 2009-08-01 04:21:31.290561205 +0100 @@ -0,0 +1,886 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6646020 + * @summary assert(in_bb(n),"must be in block") in -Xcomp mode + */ + +/* Complexity upper bound: 3361 ops */ + +class Tester_Class_0 { + static byte var_1; + + + public Tester_Class_0() + { + "".length(); + { + var_1 = (var_1 = (new byte[(byte)'D'])[(byte)2.40457E38F]); + var_1 = (var_1 = (byte)1.738443503665377E307); + var_1 = (var_1 = (byte)1237144669662298112L); + } + var_1 = "baldh".equalsIgnoreCase("") ? (var_1 = (byte)7.2932087E37F) : (byte)3909726578709910528L; + var_1 = (var_1 = (var_1 = (var_1 = (byte)7.223761846153971E307))); + var_1 = (var_1 = (var_1 = (var_1 = (var_1 = (byte)((short)7860452029249754112L + (byte)1.7374232546809952E308))))); + var_1 = (!true ? (var_1 = (byte)4359229782598970368L) : (short)(byte)1.7509836746850026E308) >= 'P' ? (var_1 = (byte)3.275114793095594E307) : (byte)(- ((byte)1.5595572E38F) / 8.2971296E37F); + byte var_9 = (true ? true : (false ? true : false)) ? (var_1 = (var_1 = (byte)9.928434E37F)) : (var_1 = (byte)9.785060633966518E307); + final byte var_10 = 53; + var_9 <<= (true | true) & (((var_10 == "".substring(2001075014).compareToIgnoreCase("rhbytggv") ? !true : ! !true) ? !false : false) ? !true & true : !false) ? var_10 : var_10; + var_9 <<= - (var_9 -= - ~6397182310329038848L >> (char)955837891 << (short)- - -8.4452034E37F >> + ~5485157895941338112L); + --var_9; + var_9 >>= 'V'; + var_9 -= (new char[var_10])[var_9]; + double var_11; + var_11 = (var_11 = (new int[var_9 = (var_9 %= 684423748)])[var_9]); + var_9 /= 'q'; + var_9 *= ~var_9 | (short)1.7667766368850557E308 - "w".trim().charAt(- (var_9 /= + (var_11 = 'q'))); + if (var_10 <= 605036859609030656L | !false & false) + { + var_9 >>>= false ^ false ? (new short[var_10])[var_10] : (short)1013619326108001280L; + } + else + { + var_11 = var_9; + } + var_9 -= 'X'; + var_9 *= 'E'; + { + var_9 ^= (new short[var_9])[var_9 >>>= 'c']; + } + var_11 = 4315867074042433536L; + double var_12 = 1.2183900219527627E308; + var_9 <<= (false ? !false : false) ? '\\' : 'D'; + } + + + + + private final long func_0() + { + float var_2 = 0F; + var_1 = (var_1 = (var_1 = (byte)((short)1.4106931056021857E308 % var_2))); + for (new String(); true & (! !true ^ !false | false) && var_2 < 1; var_1 = (var_1 = (var_1 = (var_1 = (byte)1183673628639185920L)))) + { + var_1 = true | false ? (var_1 = (byte)1.6263855E37F) : (byte)'O'; + var_2++; + "fui".toUpperCase(); + final int var_3 = (var_1 = (var_1 = (byte)'i')) + (byte)2008561384 / (byte)1.4413369179905006E308; + } + var_1 = (var_1 = false ^ false ? (byte)2.3850814E38F : (byte)4.42887E37F); + final float var_4 = 3.052265E38F; + var_1 = (var_1 = (var_1 = (var_1 = (var_1 = (byte)'o')))); + long var_5; + var_1 = (var_1 = (byte)((var_1 = (byte)1913212786) * (var_1 = (byte)var_2))); + var_5 = (short)3.2024069E38F * (short)(var_5 = 'Q'); + var_5 = (false ? true : false) ? (short)1098137179 : (byte)~695765814858203136L; + var_1 = (var_1 = true & false ^ true ? (byte)1662737306 : (byte)'r'); + { + (true ? "a" : "lymivj".toString()).codePointCount((short)3.032349E38F + (var_1 = (var_1 = (var_1 = (var_1 = (byte)1.3159799E37F)))), (byte)2.0898819853138264E307 & (new short[(byte)(short)var_2])[var_1 = (byte)(short)4.859332921376913E307]); + } + double var_6; + var_6 = 1359078277; + final float var_7 = 3.5952457E37F; + var_5 = ('u' | 9005660398910009344L) << 'j'; + int var_8; + var_5 = (!false || true & !false) && false ? (byte)1836342254 : (byte)1.4836203E38F; + var_1 = (var_1 = (var_1 = (var_1 = (byte)1.5824984701060493E308))); + var_1 = (var_1 = (var_1 = (byte)~ (var_1 = (var_1 = (var_1 = (byte)var_7))))); + return +9.067416E37F <= (true | true ^ false ? (var_1 = (byte)(short)1.5243446E38F) : (var_1 = (byte)1.6893049E37F)) ? (byte)~4408841475280588800L - (var_5 = (var_1 = (byte)2.1542209E38F)) : (var_8 = (short)var_4); + } + + protected final static double func_1(final char arg_0, final long arg_1) + { + var_1 = (short)8779631802405542912L << 'x' <= arg_0 ? (byte)+9.96859509852443E307 : (var_1 = (var_1 = (byte)(short)5.218454879223281E307)); + return 5.57437404144192E307; + } + + double func_2(byte arg_0, final boolean arg_1, Object arg_2) + { + arg_2 = arg_1 != arg_1 ? "wq" : "w"; + arg_2 = arg_2; + if (arg_1) + { + arg_2 = false & arg_1 ? "hasmp" : (arg_2 = arg_2); + } + else + { + arg_2 = "lcquv"; + } + arg_0 -= arg_1 ^ false ? (arg_0 |= (short)arg_0) : (~3462197988186869760L | 7274210797196514304L) % - - + +130998764279904256L; + arg_0 &= (true ? - - ~7861994999369861120L << 'l' : 'c') * 1246069704; + return (arg_1 ? 9.311174E37F : 1.7085558737202237E308) * 1168887722; + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_0.var_1 = "; result += Tester.Printer.print(var_1); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Tester_Class_1 extends Tester_Class_0 { + static Object var_13; + final static boolean var_14 = false | (false ? false : true); + Object var_15; + static byte var_16; + final long var_17 = (long)(-9.40561658911133E307 - (short)2.2016736E38F) ^ (char)1099667310; + static boolean var_18; + static float var_19; + final static byte var_20 = 123; + static byte var_21 = var_1 = (var_1 = var_20); + final static float var_22 = 1.5415572E38F; + + + public Tester_Class_1() + { + char[][] var_39; + boolean var_40 = false | !var_14; + if (var_14) + { + final String[] var_41 = (new String[var_21][var_20])[var_21 *= var_21]; + var_15 = (new Tester_Class_0[var_20])[var_20]; + --var_21; + int var_42; + } + else + { + var_19 = (short)325110146; + } + var_40 &= true; + var_13 = (((new Tester_Class_1[var_21 |= (new char[var_20])[var_21]])[var_21]).var_15 = (new String[var_21][var_20][var_20])[var_21 >>= (byte)(int)var_22]); + var_15 = "m"; + } + + + + + + protected final static Tester_Class_0 func_0(final char arg_0, boolean arg_1) + { + final short var_23 = false ? (short)2.2956268E38F : var_20; + { + ((new Tester_Class_1[var_21])[var_20]).var_15 = ((new Tester_Class_0[var_20][var_21])[var_21])[var_20]; + } + var_19 = var_23; + { + var_21++; + --var_21; + var_13 = (false ? arg_1 : arg_1) ? "" : "aianteahl"; + arg_1 ^= ! (var_14 ? var_14 : !var_14); + } + (arg_1 ? "rq" : "certd").trim(); + arg_1 ^= 's' < var_22; + var_19 = 'T'; + var_19 = var_14 ? --var_21 : var_20; + var_19 = (var_21 >>>= ~ -1559436447128426496L >> 88912720393932800L) | (new char[var_20][var_21])[var_21][var_20]; + short var_24 = 7601; + if (arg_1) + { + var_13 = (new Tester_Class_0[var_20])[var_21]; + } + else + { + var_19 = var_23; + } + var_19 = var_24; + var_19 = 174274929356416000L; + return arg_1 ? (Tester_Class_0)(new Object[var_20])[var_21 >>>= - ((byte)6471979169965446144L)] : (new Tester_Class_0[var_21])[var_20]; + } + + private static int func_1(final Object arg_0, final boolean arg_1) + { + var_19 = 'N'; + var_13 = "ftspm".toUpperCase(); + var_18 = arg_1 ? !arg_1 : var_14; + var_19 = var_21 % 'j'; + { + var_13 = new short[var_21 >>= 8019540572802872320L]; + } + final Tester_Class_0 var_25 = arg_1 ? ((short)1.3614569631193786E308 >= (short)var_20 ? func_0('O', true) : (Tester_Class_0)arg_0) : func_0('e', false); + "cltpxrg".offsetByCodePoints((new short[var_20])[(byte)'F'] & var_20, 942627356); + final Object var_26 = ((new Tester_Class_1[var_21])[var_20]).var_15 = arg_0; + { + var_21 |= 'H'; + } + var_19 = 4705089801895780352L; + var_19 = (var_18 = arg_1 & false) ? var_20 : (! (~var_21 > var_22) ? (new short[var_20])[var_21] : (short)3904907750551380992L); + var_18 = false; + { + var_18 = "aoy".startsWith("ia", 18060804); + if (true) + { + final short var_27 = 4832; + } + else + { + var_18 = (var_18 = arg_1) ? !false : !var_14; + } + var_18 = (var_18 = var_14); + var_19 = 'L'; + } + func_0((false ? ! ((var_21 -= 4.670301365216022E307) > 1.1839209E37F) : (var_18 = false)) ? 's' : 'R', 'Z' > - ((long)var_21) << 2585724390819764224L & var_25.func_2(var_21, false, var_13 = var_25) != 4918861136400833536L); + double var_28 = 0; + var_21 %= -var_28; + for (byte var_29 = 91; arg_1 && (var_28 < 1 && false); var_19 = var_20) + { + var_19 = (var_18 = arg_1) & (var_18 = false) ? 'm' : '['; + var_28++; + var_18 = var_14; + var_21 += (short)1363703973; + } + var_19 = (var_19 = var_22); + var_18 = (var_18 = false | false ? 1743087391 <= (var_21 >>= 8790741242417599488L) : !arg_1); + var_18 = true | true; + --var_21; + var_18 = !var_14 & false; + "mt".indexOf(var_14 ? new String("fpu") : "awivb", (var_14 ? !true : (var_18 = var_14)) ? + ++var_21 : ~var_20); + return (short)(new float[var_21--])[var_21] & ((var_18 = false) ? (var_21 *= 'N') : var_20 + (short)1680927063794178048L) & 1839004800; + } + + protected static int func_2(Tester_Class_0[][] arg_0) + { + ((new Tester_Class_1[var_20][var_21])[var_20][var_20]).var_15 = ((new int[var_21][var_21][(byte)var_22])[var_21 <<= var_20])[var_20]; + ((new Tester_Class_1[var_20])[var_20]).var_15 = "d"; + int var_30 = 0; + "joxjgpywp".lastIndexOf(1834367264 >> var_21, (byte)7.572305E37F >>> (false ? (short)2.3909862E38F : + - +3939434849912855552L)); + while (var_14 | false ^ var_14 && (var_30 < 1 && true)) + { + var_1 = var_20; + var_30++; + var_13 = new float[var_21][--var_21]; + boolean var_31; + } + var_19 = ((new Tester_Class_1[var_21])[var_20]).var_17 <= (~2158227803735181312L & 6001748808824762368L) ? (short)var_20 : var_20; + var_18 = (var_18 = true); + return (byte)(new short[var_20])[var_20] >>> ((new char[var_21][var_21])[var_21 |= 6074708801143703552L])[var_20]; + } + + private final String func_3(boolean arg_0, short arg_1, short arg_2) + { + var_13 = (Tester_Class_0)((arg_0 ^= arg_0) ? (var_13 = (var_15 = (var_15 = "grfphyrs"))) : (var_13 = new Object[var_21 *= ']'])); + if (true & ! (arg_0 ^= !arg_0 | true)) + { + boolean var_32 = true; + var_19 = --arg_1; + arg_2 <<= var_21; + } + else + { + arg_0 |= false; + } + var_21 >>>= arg_1; + final float var_33 = 2.5500976E38F; + return ""; + } + + private static String func_4(final double arg_0, final Object arg_1, final short[] arg_2, final char arg_3) + { + float var_34; + var_21++; + ((new Tester_Class_1[var_20])[var_20]).var_15 = false ? arg_1 : arg_1; + var_13 = arg_1; + var_19 = var_22; + var_13 = new long[var_21 /= 1038797776 + var_21][--var_21]; + ++var_21; + var_18 = false && false; + var_21--; + "".lastIndexOf("kjro"); + final int var_35 = (var_21 <<= var_21--) * var_21--; + if ("kohilkx".startsWith("gy", var_35)) + { + var_34 = 2.0849673E37F; + } + else + { + double var_36 = arg_0; + } + var_34 = (var_21 /= var_20); + { + func_2(new Tester_Class_0[var_20][var_21]); + var_34 = var_20 * (- ~5805881602002385920L / arg_3) << (short)~8041668398152312832L; + var_13 = (var_13 = "qfwbfdf"); + } + ((new Tester_Class_1[var_20])[var_21 += var_20]).var_15 = false ? func_0(arg_3, var_14) : func_0('J', var_18 = var_14); + var_18 = (var_18 = var_14) & var_14; + if ((new boolean[var_21])[var_21 >>= 121380821]) + { + var_34 = 1382979413; + } + else + { + var_34 = (var_20 & var_20) + (true ? 'I' : arg_3); + } + byte var_37; + ((new Tester_Class_1[var_20][var_21])[var_14 ^ var_14 | !var_14 ? var_20 : var_20][var_21 ^= (short)1692053070 & + ~7232298887878750208L - 1512699919]).var_15 = arg_2; + byte var_38 = 1; + var_38 -= arg_0; + var_34 = arg_3; + return var_14 ? "" : "xgkr".toUpperCase(); + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_1.var_1 = "; result += Tester.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_1.var_16 = "; result += Tester.Printer.print(var_16); + result += "\n"; + result += "Tester_Class_1.var_20 = "; result += Tester.Printer.print(var_20); + result += "\n"; + result += "Tester_Class_1.var_21 = "; result += Tester.Printer.print(var_21); + result += "\n"; + result += "Tester_Class_1.var_14 = "; result += Tester.Printer.print(var_14); + result += "\n"; + result += "Tester_Class_1.var_18 = "; result += Tester.Printer.print(var_18); + result += "\n"; + result += "Tester_Class_1.var_17 = "; result += Tester.Printer.print(var_17); + result += "\n"; + result += "Tester_Class_1.var_19 = "; result += Tester.Printer.print(var_19); + result += "\n"; + result += "Tester_Class_1.var_22 = "; result += Tester.Printer.print(var_22); + result += "\n"; + result += "Tester_Class_1.var_13 = "; result += Tester.Printer.print(var_13); + result += "\n"; + result += "Tester_Class_1.var_15 = "; result += Tester.Printer.print(var_15); + result += ""; + result += "\n]"; + return result; + } +} + + +class Tester_Class_2 extends Tester_Class_0 { + final int var_43 = 1600723343; + static long var_44 = ~1297640037857117184L; + static String var_45 = "ejaglds"; + double var_46; + static float var_47 = 7.9423827E37F; + static Tester_Class_1[][] var_48; + + + public Tester_Class_2() + { + var_45 = (var_45 = "nkulkweqt"); + var_47 %= (new char[Tester_Class_1.var_21 >>= (short)Tester_Class_1.var_20])[Tester_Class_1.var_20]; + { + Tester_Class_1.var_18 = Tester_Class_1.var_14; + } + var_47 %= 1.559461406041646E308; + var_44 -= Tester_Class_1.var_21++ & ((new Tester_Class_1[Tester_Class_1.var_20])[Tester_Class_1.var_20]).var_17; + var_44 *= false ? (short)Tester_Class_1.var_20 : (short)var_47; + Tester_Class_1.var_13 = (new Tester_Class_1().var_15 = new char[Tester_Class_1.var_20]); + var_46 = 'i'; + double var_49 = var_46 = false ? (var_47 *= (var_46 = var_43)) : Tester_Class_1.var_20; + var_49 += 'k'; + } + + + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_2.var_43 = "; result += Tester.Printer.print(var_43); + result += "\n"; + result += "Tester_Class_2.var_48 = "; result += Tester.Printer.print(var_48); + result += "\n"; + result += "Tester_Class_2.var_44 = "; result += Tester.Printer.print(var_44); + result += "\n"; + result += "Tester_Class_2.var_46 = "; result += Tester.Printer.print(var_46); + result += "\n"; + result += "Tester_Class_2.var_47 = "; result += Tester.Printer.print(var_47); + result += "\n"; + result += "Tester_Class_2.var_1 = "; result += Tester.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_2.var_45 = "; result += Tester.Printer.print(var_45); + result += ""; + result += "\n]"; + return result; + } +} + + +class Tester_Class_3 extends Tester_Class_0 { + byte var_50; + int var_51; + static double var_52; + static boolean var_53 = true; + long var_54; + static short var_55; + short var_56; + + + public Tester_Class_3() + { + var_53 |= false; + (Tester_Class_2.var_45 = "gpbcgq").replaceAll("m".concat(Tester_Class_2.var_45 = "q"), Tester_Class_2.var_45).indexOf(Tester_Class_2.var_45 = "d"); + Tester_Class_2.var_45 = Tester_Class_2.var_45; + double var_68 = 0; + Tester_Class_1.var_19 = (var_55 = Tester_Class_1.var_20); + do + { + var_53 ^= 'T' > Tester_Class_1.var_21-- & (var_53 |= Tester_Class_1.var_14); + Tester_Class_2.var_44 >>= (char)3.928497616986412E307; + var_68++; + new Tester_Class_2().func_2(Tester_Class_1.var_20, !var_53 & Tester_Class_1.var_14, Tester_Class_1.var_13 = (Tester_Class_2.var_45 = Tester_Class_2.var_45)); + } while ((((var_56 = (short)1161292485) != 'M' ? var_53 : Tester_Class_1.var_14) ? Tester_Class_1.var_14 ^ true : var_53) && var_68 < 1); + Tester_Class_2.var_45 = Tester_Class_2.var_45; + ((Tester_Class_1)(Tester_Class_1.var_13 = new Tester_Class_2())).var_15 = Tester_Class_2.var_45; + var_55 = func_1() | ((Tester_Class_1.var_18 = var_53) | (var_53 |= Tester_Class_1.var_14) | Tester_Class_1.var_14 | !Tester_Class_1.var_14) || false ? (short)Tester_Class_2.var_44 : (var_56 = (var_56 = (short)'[')); + var_52 = (var_51 = (var_55 = Tester_Class_1.var_20)); + double var_69 = 0; + Tester_Class_2.var_44 |= (Tester_Class_1.var_14 ? (Tester_Class_2)(Tester_Class_1.var_13 = (Tester_Class_2)(Tester_Class_1.var_13 = Tester_Class_2.var_45)) : (Tester_Class_2)(Tester_Class_0)(Tester_Class_1.var_13 = Tester_Class_2.var_45)).var_43; + do + { + var_51 = 495861255; + var_69++; + } while (var_69 < 3); + Tester_Class_2.var_47 -= Tester_Class_1.var_20; + Tester_Class_2.var_47 %= '['; + } + + + + + static Object func_0(final Tester_Class_0 arg_0, String arg_1, final float arg_2, final long arg_3) + { + (!var_53 | (var_53 &= var_53) ^ false ? new Tester_Class_1() : (Tester_Class_1)(new Tester_Class_0[Tester_Class_1.var_21])[Tester_Class_1.var_21]).var_15 = Tester_Class_1.var_14 ? new Tester_Class_1() : new Tester_Class_1(); + Tester_Class_2.var_47 /= !var_53 || var_53 ? (short)(((Tester_Class_2)arg_0).var_46 = (new char[Tester_Class_1.var_21][Tester_Class_1.var_21])[Tester_Class_1.var_20][Tester_Class_1.var_20]) : Tester_Class_1.var_21; + return (new Object[Tester_Class_1.var_21])[Tester_Class_1.var_21]; + } + + boolean func_1() + { + { + Tester_Class_1.var_21 >>= (var_56 = (Tester_Class_1.var_21 |= (Tester_Class_1.var_21 -= Tester_Class_1.var_20))); + Tester_Class_2.var_45 = "w"; + var_51 = Tester_Class_1.var_21; + Object var_57; + ((Tester_Class_2)(Tester_Class_0)((new Object[Tester_Class_1.var_21][Tester_Class_1.var_21])[Tester_Class_1.var_20])[Tester_Class_1.var_20]).var_46 = (var_52 = 1.3957085765622284E308); + } + Tester_Class_1.var_21 &= (var_55 = (byte)(Tester_Class_1.var_14 ? -Tester_Class_1.var_20 : 4290961666344782848L)); + Tester_Class_2.var_45 = Tester_Class_2.var_45; + var_51 = (var_53 ^= ((var_53 &= Tester_Class_1.var_14) ? 'J' : 'M') > (var_56 = Tester_Class_1.var_21)) && (var_53 = Tester_Class_1.var_14) ? (Tester_Class_1.var_21 &= ~Tester_Class_1.var_20) : Tester_Class_1.var_20; + { + final Tester_Class_1 var_58 = (Tester_Class_1)(Tester_Class_0)(Tester_Class_1.var_13 = (new Object[Tester_Class_1.var_21])[Tester_Class_1.var_20]); + Object var_59; + Tester_Class_1.var_21 |= 'X'; + var_53 ^= Tester_Class_1.var_14; + } + int var_60 = 0; + var_53 |= var_53; + for (char var_61 = 'i'; (Tester_Class_1.var_14 ? false : Tester_Class_1.var_14) | (true | Tester_Class_1.var_14) && var_60 < 1; var_53 &= !Tester_Class_1.var_14) + { + var_51 = var_61; + var_60++; + var_61 &= (new short[Tester_Class_1.var_20][Tester_Class_1.var_20])[Tester_Class_1.var_20][Tester_Class_1.var_21]; + Tester_Class_2.var_45 = "vsuy"; + } + Tester_Class_2 var_62 = ((var_53 &= Tester_Class_1.var_14 | Tester_Class_1.var_14 || Tester_Class_1.var_14) ? Tester_Class_1.var_14 : "hgwne".startsWith("etyhd", var_60)) ? (var_53 ? (Tester_Class_2)(Tester_Class_1.var_13 = "uyiaxtqc") : (Tester_Class_2)(Tester_Class_1.var_13 = Tester_Class_2.var_45)) : new Tester_Class_2(); + var_62 = var_62; + float var_63; + Object var_64; + Tester_Class_2.var_44 <<= 'v'; + String var_65; + { + var_51 = Tester_Class_1.var_21; + } + var_55 = true ? (var_56 = Tester_Class_1.var_20) : (var_55 = Tester_Class_1.var_20); + var_56 = Tester_Class_1.var_21; + Tester_Class_1.var_21 |= var_60; + Object var_66; + Tester_Class_2 var_67; + return true & Tester_Class_1.var_14 ^ (false ? var_53 : var_53); + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_3.var_51 = "; result += Tester.Printer.print(var_51); + result += "\n"; + result += "Tester_Class_3.var_54 = "; result += Tester.Printer.print(var_54); + result += "\n"; + result += "Tester_Class_3.var_52 = "; result += Tester.Printer.print(var_52); + result += "\n"; + result += "Tester_Class_3.var_55 = "; result += Tester.Printer.print(var_55); + result += "\n"; + result += "Tester_Class_3.var_56 = "; result += Tester.Printer.print(var_56); + result += "\n"; + result += "Tester_Class_3.var_1 = "; result += Tester.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_3.var_50 = "; result += Tester.Printer.print(var_50); + result += "\n"; + result += "Tester_Class_3.var_53 = "; result += Tester.Printer.print(var_53); + result += ""; + result += "\n]"; + return result; + } +} + +public class Tester { + final long var_70 = Tester_Class_2.var_44; + int var_71; + static double var_72; + static short var_73 = (Tester_Class_3.var_53 &= (Tester_Class_3.var_53 ^= Tester_Class_3.var_53)) ? (short)(byte)(Tester_Class_3.var_55 = Tester_Class_1.var_20) : (Tester_Class_3.var_55 = Tester_Class_1.var_20); + final static short var_74 = (Tester_Class_3.var_53 &= Tester_Class_3.var_53) ? (Tester_Class_3.var_53 ? var_73 : var_73++) : (var_73 *= (Tester_Class_1.var_21 |= var_73)); + float var_75; + + + protected final Tester_Class_2 func_0() + { + Tester_Class_1.var_21 ^= ~Tester_Class_1.var_21; + if (false) + { + ((Tester_Class_3)(new Object[Tester_Class_1.var_21])[Tester_Class_1.var_21 -= + + (Tester_Class_2.var_44 >>>= Tester_Class_1.var_21)]).var_50 = (Tester_Class_1.var_21 &= (var_71 = 554295231)); + } + else + { + Tester_Class_2.var_47 += 'H'; + } + final Tester_Class_0 var_76 = ((new Tester_Class_0[Tester_Class_1.var_20][Tester_Class_1.var_21])[Tester_Class_1.var_20])[Tester_Class_1.var_20]; + (Tester_Class_1.var_14 ? (Tester_Class_2)var_76 : (Tester_Class_2)var_76).var_46 = (var_73 %= var_74 / (((new Tester_Class_2[Tester_Class_1.var_20])[Tester_Class_1.var_21 |= Tester_Class_1.var_20]).var_46 = Tester_Class_1.var_22)); + var_73 |= ((Tester_Class_2)(Tester_Class_1.var_13 = var_76)).var_43 | Tester_Class_1.var_20; + return new Tester_Class_2(); + } + + private static Tester_Class_3 func_1(byte arg_0, Tester_Class_1 arg_1, Tester_Class_1 arg_2, final int arg_3) + { + arg_0 <<= '`'; + return false ? (Tester_Class_3)(Tester_Class_0)(arg_1.var_15 = (arg_1 = arg_2)) : (Tester_Class_3)((new Tester_Class_0[Tester_Class_1.var_20][arg_0])[Tester_Class_1.var_20])[Tester_Class_1.var_20]; + } + + public static String execute() + { + try { + Tester t = new Tester(); + try { t.test(); } + catch(Throwable e) { } + try { return t.toString(); } + catch (Throwable e) { return "Error during result conversion to String"; } + } catch (Throwable e) { return "Error during test execution"; } + } + + public static void main(String[] args) + { + for (int i = 0; i < 20000; i++) { + Tester t = new Tester(); + try { t.test(); } + catch(Throwable e) { } + if (t.var_71 != 0 || + t.var_70 != -1297640037857117185L || + t.var_72 != 0.0 || + t.var_75 != 0.0 || + t.var_73 != -1 || + t.var_74 != 15129) { + throw new InternalError("wrong answer"); + } + } + } + + private void test() + { + long var_77 = 0L; + var_73 /= (Tester_Class_2.var_47 = 'D' | 'Q'); + Tester_Class_2.var_47 *= 't'; + while (var_77 < 36) + { + var_73 += Tester_Class_1.var_22; + Tester_Class_2.var_47 += Tester_Class_1.var_20; + var_77++; + Tester_Class_2.var_45 = ""; + Tester_Class_2.var_45 = (Tester_Class_2.var_45 = Tester_Class_2.var_45); + } + if (Tester_Class_3.var_53 |= false) + { + int var_78 = 0; + (false ? "idipdjrln" : "l").startsWith(Tester_Class_2.var_45); + while ((Tester_Class_3.var_53 |= (Tester_Class_3.var_53 &= ! (Tester_Class_1.var_18 = true)) | Tester_Class_3.var_53) && (var_78 < 15 && (Tester_Class_3.var_53 &= Tester_Class_1.var_14))) + { + Tester_Class_2.var_44 <<= 'b'; + var_78++; + var_72 = var_74; + var_71 = (char)6792782617594333184L; + } + float var_79 = Tester_Class_2.var_47 /= 1.5148047552641134E308; + ((new boolean[Tester_Class_1.var_20])[Tester_Class_1.var_21 <= (Tester_Class_1.var_21 -= 9.675021723726166E307) / - + (var_72 = 4.3844763012510596E307) ? (byte)(Tester_Class_2.var_44 += ~Tester_Class_1.var_21) : (Tester_Class_1.var_21 += 1.7430965313164616E308)] ? (Tester_Class_2)(new Tester_Class_1().var_15 = func_0()) : new Tester_Class_2()).var_46 = (var_72 = (Tester_Class_1.var_21 *= 'j')); + Tester_Class_1.var_13 = (new Tester_Class_3[Tester_Class_1.var_21 >>>= var_78][Tester_Class_1.var_21])[Tester_Class_1.var_21][Tester_Class_1.var_20]; + } + else + { + long var_80 = 0L; + ((Tester_Class_2)(Tester_Class_1.var_13 = new long[Tester_Class_1.var_21])).var_46 = 'r'; + do + { + final float var_81 = 7.3633934E37F; + var_80++; + var_73 ^= Tester_Class_2.var_44; + } while (Tester_Class_3.var_53 && var_80 < 4); + Tester_Class_1.var_18 = Tester_Class_2.var_47 >= var_73; + Tester_Class_2.var_45 = "xvodcylp"; + Tester_Class_2.var_45.codePointCount("indreb".charAt(+(new byte[Tester_Class_1.var_20][Tester_Class_1.var_20])[Tester_Class_1.var_21][Tester_Class_1.var_21]) * ~ (Tester_Class_1.var_21 %= (var_71 = --var_73)), ((Tester_Class_3.var_53 ^= Tester_Class_2.var_45.equalsIgnoreCase("rkxwa")) || Tester_Class_2.var_47 <= (Tester_Class_2.var_47 %= -var_80) ? (Tester_Class_1.var_21 ^= var_70) : var_73) & (var_71 = 'k')); + Tester_Class_1.var_13 = ((new long[Tester_Class_1.var_21][Tester_Class_1.var_20][Tester_Class_1.var_21])[Tester_Class_1.var_21])[Tester_Class_1.var_21]; + } + var_73 <<= (Tester_Class_1.var_18 = false) ? 't' : (false ? 'E' : 'u'); + var_73++; + int var_82 = 0; + Tester_Class_1.var_13 = func_1(Tester_Class_1.var_20, new Tester_Class_1(), (new Tester_Class_1[Tester_Class_1.var_21])[Tester_Class_1.var_21], 'M' & var_74); + "gdrlrsubb".substring(12438522, var_82); + Tester_Class_2.var_44 |= (((new Tester_Class_3[Tester_Class_1.var_21][Tester_Class_1.var_21])[Tester_Class_1.var_21 >>= 7993744087962264576L][Tester_Class_1.var_21]).var_51 = Tester_Class_3.var_53 ? 'B' : '['); + final long var_83 = ~ (4544638910183665664L << (((Tester_Class_3)((new Tester_Class_0[Tester_Class_1.var_20][Tester_Class_1.var_21])[Tester_Class_1.var_21])[Tester_Class_1.var_21]).var_56 = (Tester_Class_3.var_53 &= Tester_Class_3.var_53) ? Tester_Class_1.var_21 : Tester_Class_1.var_20)); + Tester_Class_2.var_45 = Tester_Class_2.var_45; + while (var_82 < 2 && Tester_Class_3.var_53 & (Tester_Class_3.var_53 ^= !false)) + { + (Tester_Class_3.var_53 ? "xqeisnyf" : (Tester_Class_2.var_45 = (Tester_Class_2.var_45 = (Tester_Class_2.var_45 = Tester_Class_2.var_45)))).concat(Tester_Class_2.var_45 = "i"); + var_82++; + boolean var_84 = false; + Tester_Class_2.var_45 = Tester_Class_2.var_45; + } + var_71 = ~Tester_Class_2.var_44 != Tester_Class_2.var_44-- ? (var_73 = var_73) : (var_73 >>>= var_73); + char var_85; + Tester_Class_3.var_53 |= (Tester_Class_3.var_53 ^= true); + int var_86 = 0; + Tester_Class_1.var_21 %= (var_73 | (Tester_Class_1.var_21 *= 9.831691E37F)) * (Tester_Class_1.var_21 += 6784278051481715712L); + while (Tester_Class_3.var_53 && (var_86 < 24 && ((((Tester_Class_3.var_53 ^= true) ? Tester_Class_3.var_53 : Tester_Class_1.var_14) ? !Tester_Class_3.var_53 : Tester_Class_3.var_53) ? (Tester_Class_1.var_18 = Tester_Class_3.var_53) : Tester_Class_1.var_14 || true))) + { + final byte var_87 = (byte)((false & true ? Tester_Class_1.var_20 : 257407175) & 4242055901066916864L * (var_73 *= 1621204618) / ((((Tester_Class_1)(new Object[(byte)4.925362697409246E307])[Tester_Class_1.var_21]).var_17 ^ (var_71 = var_86)) & 1859382584)); + var_86++; + Tester_Class_2.var_45 = (Tester_Class_2.var_45 = (Tester_Class_2.var_45 = "arceo")); + float var_88; + } + "a".lastIndexOf(var_71 = Tester_Class_3.var_53 ^ false ? (var_71 = 1058420888) : Tester_Class_1.var_20); + int var_89 = 0; + { + var_71 = 661164411; + } + boolean var_90; + --var_73; + Tester_Class_2.var_45.concat(Tester_Class_2.var_45); + { + var_85 = (Tester_Class_3.var_53 ? Tester_Class_3.var_53 : Tester_Class_3.var_53) ? 'R' : '['; + } + ((new Tester_Class_2[Tester_Class_1.var_21][Tester_Class_1.var_21])[Tester_Class_1.var_20][Tester_Class_1.var_20]).var_46 = Tester_Class_1.var_20; + final float var_91 = ((new Tester_Class_0[Tester_Class_1.var_21][Tester_Class_1.var_21])[Tester_Class_1.var_20][Tester_Class_1.var_21 -= Tester_Class_1.var_21]).equals(((new Tester_Class_1[Tester_Class_1.var_20])[Tester_Class_1.var_21]).var_15 = (Tester_Class_2.var_45 = Tester_Class_2.var_45)) ? (var_71 = Tester_Class_1.var_20) : 2.2259766E38F + Tester_Class_2.var_44; + Tester_Class_2.var_47 *= ((Tester_Class_2)(Tester_Class_0)(Tester_Class_1.var_13 = Tester_Class_2.var_45)).var_43; + Tester_Class_2.var_45 = Tester_Class_2.var_45; + Tester_Class_3.var_53 &= Tester_Class_1.var_14; + while (Tester_Class_1.var_20 >= ++Tester_Class_1.var_21 && var_89 < 2) + { + Tester_Class_1.var_13 = (Tester_Class_3)(new Tester_Class_0[Tester_Class_1.var_21])[Tester_Class_1.var_21]; + var_89++; + if (true) + { + Tester_Class_3.var_53 |= true; + break; + } + else + { + Tester_Class_2 var_92; + } + ((Tester_Class_3)((Tester_Class_3.var_53 |= Tester_Class_3.var_53) ? (new Tester_Class_1().var_15 = (Tester_Class_0)(Tester_Class_1.var_13 = new boolean[Tester_Class_1.var_20][Tester_Class_1.var_21])) : new Tester_Class_0[Tester_Class_1.var_21][Tester_Class_1.var_21])).var_54 = (Tester_Class_1.var_21 = (Tester_Class_1.var_21 /= (Tester_Class_2.var_44 |= (int)(Tester_Class_1.var_21 >>>= var_82)))); + ((Tester_Class_3)(Tester_Class_1.var_13 = (new Tester_Class_1().var_15 = new Tester_Class_1()))).var_51 = Tester_Class_1.var_20; + final char var_93 = 'u'; + ((Tester_Class_2)(new Tester_Class_1().var_15 = (Tester_Class_2.var_45 = Tester_Class_2.var_45))).var_46 = var_93; + Tester_Class_2.var_45.toUpperCase(); + Tester_Class_2.var_45 = "mhk"; + (true | false ? new Tester_Class_1() : (new Tester_Class_1[Tester_Class_1.var_20])[Tester_Class_1.var_20]).var_15 = (Tester_Class_1)(((new Tester_Class_1[Tester_Class_1.var_21 |= Tester_Class_1.var_20][Tester_Class_1.var_21])[Tester_Class_1.var_21][Tester_Class_1.var_21]).var_15 = (Tester_Class_1.var_13 = (Tester_Class_1)(Tester_Class_1.var_13 = (Tester_Class_2.var_45 = "ofkbg")))); + } + float var_94 = 0F; + Tester_Class_2.var_44 |= (var_73 >>>= (var_85 = (var_85 = 'j'))); + Tester_Class_3.var_52 = 1835242863964218368L; + do + { + int var_95 = 1361237611; + var_94++; + Tester_Class_3.var_53 ^= (Tester_Class_3.var_53 |= Tester_Class_1.var_14); + } while (var_94 < 16); + { + var_73 = var_73--; + Tester_Class_2.var_45 = (Tester_Class_1.var_14 ? Tester_Class_1.var_14 : !false) ? "oaxg" : "igdnja"; + } + ((new Tester_Class_1[Tester_Class_1.var_21])[Tester_Class_1.var_21]).equals(new Tester_Class_1().var_15 = (Tester_Class_2.var_45 = "agdnue").charAt(1416972150) != Tester_Class_2.var_47 ? new Tester_Class_1() : new Tester_Class_1()); + byte var_96 = Tester_Class_1.var_21 >>>= (var_85 = (var_85 = '`')); + Tester_Class_2.var_45 = ""; + Tester_Class_2.var_47 += Tester_Class_2.var_47; + Tester_Class_2.var_45 = Tester_Class_2.var_45; + } + public String toString() + { + String result = "[\n"; + result += "Tester.var_71 = "; result += Printer.print(var_71); + result += "\n"; + result += "Tester.var_70 = "; result += Printer.print(var_70); + result += "\n"; + result += "Tester.var_72 = "; result += Printer.print(var_72); + result += "\n"; + result += "Tester.var_75 = "; result += Printer.print(var_75); + result += "\n"; + result += "Tester.var_73 = "; result += Printer.print(var_73); + result += "\n"; + result += "Tester.var_74 = "; result += Printer.print(var_74); + result += ""; + result += "\n]"; + return result; + } + static class Printer + { + public static String print(boolean arg) { return String.valueOf(arg); } + public static String print(byte arg) { return String.valueOf(arg); } + public static String print(short arg) { return String.valueOf(arg); } + public static String print(char arg) { return String.valueOf((int)arg); } + public static String print(int arg) { return String.valueOf(arg); } + public static String print(long arg) { return String.valueOf(arg); } + public static String print(float arg) { return String.valueOf(arg); } + public static String print(double arg) { return String.valueOf(arg); } + + + public static String print(Object arg) + { + return print_r(new java.util.Stack(), arg); + } + + private static String print_r(java.util.Stack visitedObjects, Object arg) + { + String result = ""; + if (arg == null) + result += "null"; + else + if (arg.getClass().isArray()) + { + for (int i = 0; i < visitedObjects.size(); i++) + if (visitedObjects.elementAt(i) == arg) return ""; + + visitedObjects.push(arg); + + final String delimiter = ", "; + result += "["; + + if (arg instanceof Object[]) + { + Object[] array = (Object[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print_r(visitedObjects, array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof boolean[]) + { + boolean[] array = (boolean[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof byte[]) + { + byte[] array = (byte[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof short[]) + { + short[] array = (short[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof char[]) + { + char[] array = (char[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof int[]) + { + int[] array = (int[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof long[]) + { + long[] array = (long[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof float[]) + { + float[] array = (float[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof double[]) + { + double[] array = (double[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + + result += "]"; + visitedObjects.pop(); + + } else + { + result += arg.toString(); + } + + return result; + } + } +} + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6661247/Test.java 2009-08-01 04:21:31.764094024 +0100 @@ -0,0 +1,156 @@ +/* + * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * @test + * @bug 6661247 + * @summary Internal bug in 32-bit HotSpot optimizer while bit manipulations + */ + +import java.util.Random; +import java.nio.*; + +// This isn't a completely reliable test for 6661247 since the results +// depend on what the local schedule looks like but it does reproduce +// the issue in current builds. + +public class Test { + + public static void test(boolean[] src, int srcPos, LongBuffer dest, long destPos, int count) { + int countStart = (destPos & 63) == 0 ? 0 : 64 - (int)(destPos & 63); + if (countStart > count) + countStart = count; + for (int srcPosMax = srcPos + countStart; srcPos < srcPosMax; srcPos++, destPos++) { + if (src[srcPos]) + dest.put((int)(destPos >>> 6), dest.get((int)(destPos >>> 6)) | 1L << (destPos & 63)); + else + dest.put((int)(destPos >>> 6), dest.get((int)(destPos >>> 6)) & ~(1L << (destPos & 63))); + } + count -= countStart; + int cnt = count >>> 6; + for (int k = (int)(destPos >>> 6), kMax = k + cnt; k < kMax; k++) { + int low = (src[srcPos] ? 1 : 0) + | (src[srcPos + 1] ? 1 << 1 : 0) + | (src[srcPos + 2] ? 1 << 2 : 0) + | (src[srcPos + 3] ? 1 << 3 : 0) + | (src[srcPos + 4] ? 1 << 4 : 0) + | (src[srcPos + 5] ? 1 << 5 : 0) + | (src[srcPos + 6] ? 1 << 6 : 0) + | (src[srcPos + 7] ? 1 << 7 : 0) + | (src[srcPos + 8] ? 1 << 8 : 0) + | (src[srcPos + 9] ? 1 << 9 : 0) + | (src[srcPos + 10] ? 1 << 10 : 0) + | (src[srcPos + 11] ? 1 << 11 : 0) + | (src[srcPos + 12] ? 1 << 12 : 0) + | (src[srcPos + 13] ? 1 << 13 : 0) + | (src[srcPos + 14] ? 1 << 14 : 0) + | (src[srcPos + 15] ? 1 << 15 : 0) + | (src[srcPos + 16] ? 1 << 16 : 0) + | (src[srcPos + 17] ? 1 << 17 : 0) + | (src[srcPos + 18] ? 1 << 18 : 0) + | (src[srcPos + 19] ? 1 << 19 : 0) + | (src[srcPos + 20] ? 1 << 20 : 0) + | (src[srcPos + 21] ? 1 << 21 : 0) + | (src[srcPos + 22] ? 1 << 22 : 0) + | (src[srcPos + 23] ? 1 << 23 : 0) + | (src[srcPos + 24] ? 1 << 24 : 0) + | (src[srcPos + 25] ? 1 << 25 : 0) + | (src[srcPos + 26] ? 1 << 26 : 0) + | (src[srcPos + 27] ? 1 << 27 : 0) + | (src[srcPos + 28] ? 1 << 28 : 0) + | (src[srcPos + 29] ? 1 << 29 : 0) + | (src[srcPos + 30] ? 1 << 30 : 0) + | (src[srcPos + 31] ? 1 << 31 : 0) + ; + srcPos += 32; + int high = (src[srcPos] ? 1 : 0) // PROBLEM! + | (src[srcPos + 1] ? 1 << 1 : 0) + | (src[srcPos + 2] ? 1 << 2 : 0) + | (src[srcPos + 3] ? 1 << 3 : 0) + | (src[srcPos + 4] ? 1 << 4 : 0) + | (src[srcPos + 5] ? 1 << 5 : 0) + | (src[srcPos + 6] ? 1 << 6 : 0) + | (src[srcPos + 7] ? 1 << 7 : 0) + | (src[srcPos + 8] ? 1 << 8 : 0) + | (src[srcPos + 9] ? 1 << 9 : 0) + | (src[srcPos + 10] ? 1 << 10 : 0) + | (src[srcPos + 11] ? 1 << 11 : 0) + | (src[srcPos + 12] ? 1 << 12 : 0) + | (src[srcPos + 13] ? 1 << 13 : 0) + | (src[srcPos + 14] ? 1 << 14 : 0) + | (src[srcPos + 15] ? 1 << 15 : 0) + | (src[srcPos + 16] ? 1 << 16 : 0) + | (src[srcPos + 17] ? 1 << 17 : 0) + | (src[srcPos + 18] ? 1 << 18 : 0) + | (src[srcPos + 19] ? 1 << 19 : 0) + | (src[srcPos + 20] ? 1 << 20 : 0) + | (src[srcPos + 21] ? 1 << 21 : 0) + | (src[srcPos + 22] ? 1 << 22 : 0) + | (src[srcPos + 23] ? 1 << 23 : 0) + | (src[srcPos + 24] ? 1 << 24 : 0) + | (src[srcPos + 25] ? 1 << 25 : 0) + | (src[srcPos + 26] ? 1 << 26 : 0) + | (src[srcPos + 27] ? 1 << 27 : 0) + | (src[srcPos + 28] ? 1 << 28 : 0) + | (src[srcPos + 29] ? 1 << 29 : 0) + | (src[srcPos + 30] ? 1 << 30 : 0) + | (src[srcPos + 31] ? 1 << 31 : 0) + ; + srcPos += 32; + dest.put(k, ((long)low & 0xFFFFFFFFL) | (((long)high) << 32)); + destPos += 64; + } + int countFinish = count & 63; + for (int srcPosMax = srcPos + countFinish; srcPos < srcPosMax; srcPos++, destPos++) { + if (src[srcPos]) + dest.put((int)(destPos >>> 6), dest.get((int)(destPos >>> 6)) | 1L << (destPos & 63)); + else + dest.put((int)(destPos >>> 6), dest.get((int)(destPos >>> 6)) & ~(1L << (destPos & 63))); + } + } + public static void main(String[] args) { + Random r = new Random(); + int entries = 1000; + boolean[] src = new boolean[entries * 64]; + long[] dest = new long[entries]; + long[] result = new long[entries]; + + for (int c = 0; c < 2000; c++) { + for (int i = 0; i < entries; i++) { + long l = r.nextLong(); + for (int bit = 0; bit < 64; bit++) { + src[i * 64 + bit] = (l & (1L << bit)) != 0; + } + dest[i] = 0; + result[i] = l; + } + test(src, 0, LongBuffer.wrap(dest, 0, dest.length), 0, src.length); + for (int i = 0; i < entries; i++) { + if (dest[i] != result[i]) { + throw new InternalError(i + ": " + Long.toHexString(dest[i]) + " != " + Long.toHexString(result[i])); + } + } + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6663848/Tester.java 2009-08-01 04:21:32.206137360 +0100 @@ -0,0 +1,478 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6663848 + * @summary assert(i < Max(),"oob") + */ + +/* Complexity upper bound: 296055 ops */ + +final class Tester_Class_0 { + byte var_1; + static double var_2; + String var_3; + char var_4 = 'n'; + static int var_5; + String var_6; + final long var_7 = 4161100809902398464L; + static String var_8 = "a"; + + + public Tester_Class_0() + { + switch (((false ^ !"n".startsWith("kmmhtohv", 553985525) ? true : false) ? 'k' : (var_4 *= - (var_2 = 3.1182935E38F))) - (~2013121027650726912L >= 2.929692E38F / (var_1 = (byte)4.2723157E37F) ? var_4 | (short)var_7 : ~ ((byte)"".indexOf("yuno", 4922080)))) + { + case 125: + + case ']': + + case 6: + var_5 = (false ? false : 3708707602755734528L >= 1648075631) ? var_4 : (false ? var_4 : ++var_4); + break; + + case 46: + Object var_15; + ++var_4; + float var_16 = 3.1085987E38F; + var_5 = 'h'; + var_1 = true ? (byte)9.482988718680618E307 : (false && false ? (byte)var_7 : (byte)6793807430041920512L); + final byte var_17 = var_1 = (337740577 ^ ~8932537004307666944L) / (short)var_16 - (byte)var_7 << var_7 > ~1539422023641354240L ? (false ? (byte)var_4 : (byte)9.302678E37F) : (byte)(var_5 = (byte)1.3007792E38F); + var_15 = "an"; + break; + + case 29: + var_3 = (var_6 = (var_8 = "kgc")); + break; + + case 60: + + } + char var_18; + ((new Tester_Class_0[var_1 = (byte)(var_2 = 93813743)])[var_1 = (byte)var_4]).var_1 = (var_1 = (var_1 = (byte)5.1405316E37F)); + var_8 = var_8; + float var_19 = 0F; + var_2 = var_4; + do + { + var_4 >>= var_7; + var_19++; + var_4 %= true ? (short)7643330105057892352L : (short)1.1014013E38F; + } while (var_19 < 2 && (! (true & (!true && false)) && true)); + var_4++; + int var_20 = 0; + var_4 = ~var_7 == (var_1 = (byte)var_7) | (float)var_20 <= 'H' ? 'r' : (var_4 |= (byte)var_4); + for (var_6 = (var_8 = "wqmnvxava"); (false ? (short)+ ~3540350558052792320L : + ~ ~3244965056572428288L - (var_4 *= 8.314953959831226E307)) <= +9.34280058703911E307 && var_20 < 6; var_2 = 3.0507823E38F) + { + var_2 = ~ ((byte)844279629935048704L) ^ (var_19 <= (false ? '\\' : 'B') ? (byte)(short)(var_1 = (byte)(short)var_19) : (var_1 = (var_1 = (var_1 = (byte)'T')))); + var_20++; + var_5 = (short)(var_7 >>> (! !true & true ? 'D' : (var_4 -= 1.1444072012663494E308))); + (var_6 = "jnjbrmaus").compareTo(var_3 = false ? "pfmv" : (var_8 = var_8)); + } + var_2 = func_0(1248385981, 'V' != '[' ? (short)1.0082348960424545E308 : (byte)var_4, new double[var_1 = (byte)'p'][(byte)var_19], 1.3646683639847343E308); + var_5 = (var_1 = (var_1 = (true ^ false ? "bfh".startsWith(var_8) : true || !true) | false ? (byte)(var_2 = - ~var_7) : (var_1 = (byte)var_19))); + var_3 = (var_3 = "dn"); + var_2 = false ? (var_1 = (byte)9.136750130102702E307) : var_7; + } + + + + + final short func_0(int arg_0, short arg_1, double[][] arg_2, double arg_3) + { + double[][] var_9 = arg_2; + arg_0 <<= true ? (byte)- +1.1174307E37F : (var_1 = (byte)911334714); + switch ("touoh".endsWith(var_8) ^ ! ! (++var_4 != 1.8567045E38F) ? (var_4 <<= arg_1) : 'E') + { + case 'a': + + case 46: + + case 's': + + case 50: + ((new Tester_Class_0[(byte)arg_0][(byte)- (- + - - + - +1.775205E38F - (arg_1 |= 'Z'))])[var_1 = (var_1 = (byte)4.020531E37F)][var_1 = (var_1 = (byte)883328311549528064L)]).var_4 &= (var_6 = var_8).charAt(arg_0); + var_3 = false ? var_8 : "amfijbpwa"; + var_4 -= -1.4699719646972257E308; + var_2 = (var_1 = (byte)(arg_1 += 1.6757431E38F)); + var_1 = (var_1 = (var_1 = (byte)+1.4416583523884388E308)); + final Object var_10 = (new Object[(byte)(arg_1 += var_4)])[var_1 = (byte)- +1.0281942E38F]; + arg_1 = arg_1; + break; + + case 7: + var_8 = var_8 + var_8; + break; + + case 'N': + + case 'V': + + case 56: + + case 'I': + + } + arg_0 &= arg_1; + var_3 = false ? var_8 : "vpaj"; + var_4 = (var_4 *= arg_1); + arg_3 *= (var_4 = var_4); + final char var_11 = true ? var_4 : (char)(var_4 << (var_1 = (byte)(arg_1 &= arg_0))); + boolean var_12; + var_4 ^= '`'; + "nkj".startsWith(var_6 = "wrcnrdd"); + var_1 = (byte)~var_7; + var_6 = (var_8 = "ul"); + return (var_12 = 3548576322219573248L <= var_4++ & ((var_12 = !true) || (var_12 = !false | false))) ? arg_1-- : (var_1 = (byte)+ (var_2 = 2.7633542E38F)); + } + + protected final void func_1(final String arg_0, Object arg_1, short arg_2) + { + arg_2 *= ~8267766955221100544L; + arg_1 = (var_6 = var_8); + arg_2 <<= false ? (var_1 = (var_1 = (var_1 = (byte)1.6645553629318776E308))) : arg_2; + } + + protected final static float func_2(byte arg_0, final String arg_1) + { + var_8 = arg_1; + char var_13 = ((new Tester_Class_0[arg_0 |= ']'])[arg_0]).var_4--; + var_5 = 907889433; + { + var_13 ^= (var_5 = var_13); + } + var_8 = arg_1; + var_5 = (byte)1759688161; + var_8 = (new String[arg_0 >>>= (short)1072761211])[arg_0]; + return 5.108221E37F; + } + + private static boolean func_3(boolean arg_0, final boolean arg_1) + { + var_2 = ((new Tester_Class_0[(byte)(short)'H'])[(byte)(short)(var_2 = (short)'k')]).var_4; + if (false) + { + var_8 = "cl"; + } + else + { + final byte[] var_14 = new byte[true ? (byte)(- ((byte)9.760296114722793E307) | ~1867374212153383936L) : (byte)(short)'Q']; + var_2 = (float)~3838271533006646272L / (- ~ ~1786841397228277760L ^ ~3695911615719734272L & 'Z'); + } + { + var_8 = var_8; + } + ((new Tester_Class_0[(byte)(var_2 = (short)~ - +4818709334539164672L)])[(byte)'W']).var_1 = true || false & arg_0 ^ (arg_1 ^ arg_0 ? arg_0 : (arg_0 ^= true)) ? (byte)1.5309163701271477E308 : (byte)3.0904342E38F; + ((new Tester_Class_0[(byte)756871578277111808L][(byte)+ + -3.0687752E38F])[(byte)'f'][(byte)1544156315]).var_6 = (var_8 = "vqey"); + return arg_1; + } + + public final char func_4(short arg_0) + { + { + var_6 = (var_3 = "hjtjar"); + var_1 = false ? (byte)4.02486350499973E307 : (byte)1.3222663E38F; + } + var_1 = (new byte[var_1 = (var_1 = (byte)1770517884)])[var_1 = (byte)arg_0]; + var_4++; + --arg_0; + var_5 = true ? 'D' : (var_4 ^= (var_5 = 134858941)); + return (char)~ (7273058621469586432L << (byte)3.1756883E38F ^ (false ? (byte)(var_5 = var_4) : (arg_0 >>= 6165812289376474112L))) < (2046127339 ^ + ((byte)arg_0)) ? (!false ? (var_4 %= 1.8187417377124746E307) : (var_4 *= 445936805)) : var_4; + } + +} + + +class Tester_Class_1 { + Tester_Class_0[][] var_21; + static long var_22 = 6671342492736446464L; + float var_23 = 2.9329673E38F; + final int var_24 = 1834862519; + int var_25 = 69920645; + static char var_26; + static Object var_27; + static int var_28 = Tester_Class_0.var_5 = false ? 'U' : (var_26 = (var_26 = 'R')); + + + public Tester_Class_1() + { + (false ? (Tester_Class_0)(var_27 = Tester_Class_0.var_8) : (Tester_Class_0)(var_27 = "a")).var_1 = (short)(var_23 %= var_23) >= (byte)1217257602 | var_25 == (char)(var_23 += var_23) ? (byte)new Tester_Class_0().var_4-- : ((true | false) ^ !false ? (byte)6.122806E37F : (byte)1.084542872057614E308); + Tester_Class_0.var_2 = new Tester_Class_0().var_7; + --var_22; + boolean var_32 = ! ((new Tester_Class_0().var_1 = (((new Tester_Class_0[(byte)var_22])[(byte)var_23]).var_1 = false ? (byte)(var_23 = var_28) : (byte)1.5858707076311894E308)) != (char)+var_23); + var_25 -= (true ? (byte)5.488240359086226E307 : (((Tester_Class_0)(var_27 = Tester_Class_0.var_8)).var_1 = (byte)'L')) * (Tester_Class_0.var_2 = 7.045106259776882E307); + Object var_33 = (var_32 ^= var_32) ? (var_27 = (Tester_Class_0.var_8 = (Tester_Class_0.var_8 = "gaemnaep"))) : (new Tester_Class_0[(byte)'g'])[(byte)(short)271735827]; + byte var_34 = var_32 ? (byte)((Tester_Class_0)var_33).var_7 : ((var_32 &= true) ? (byte)(Tester_Class_0.var_2 = 1.6975344767401616E307) : (byte)- ((double)var_22)); + Tester_Class_0.var_2 = 1.4644308179397427E308; + var_28 /= (short)1681483575; + Tester_Class_0.var_2 = (var_34 <<= (var_25 ^= ~ (var_22 |= (var_22 = var_22)))); + var_23 *= (char)(var_28 *= var_32 ? var_34 ^ --var_34 : 3220732582528450560L); + if ((((Tester_Class_0)var_33).var_4 &= var_34) != (short)var_28) + { + Tester_Class_0.var_8 = ((false ? (Tester_Class_0)var_33 : (Tester_Class_0)var_33).var_6 = Tester_Class_0.var_8); + } + else + { + var_33 = false | (var_32 ^= true) ? ((new Tester_Class_0[var_34][var_34])[var_34])[var_34] : (Tester_Class_0)var_33; + } + if (false) + { + var_22 = 107656877775594496L; + } + else + { + ((var_32 &= (var_32 |= var_32)) || (var_23 /= var_25) == (Tester_Class_0.var_2 = 7649348100017113088L) ? (new Tester_Class_0[var_34])[var_34] : (Tester_Class_0)var_33).var_4 >>>= (((new Tester_Class_0[var_34])[var_34 <<= 'C']).var_6 = (((Tester_Class_0)(var_33 = (var_33 = var_33))).var_6 = ++var_28 > var_23 ? "qgq" : (Tester_Class_0.var_8 = Tester_Class_0.var_8))).equalsIgnoreCase(Tester_Class_0.var_8) ? var_34++ : ++var_34; + } + String[][] var_35; + Tester_Class_0.var_8 = Tester_Class_0.var_8; + var_27 = (new Tester_Class_0[var_34][var_34 /= 226411329])[false ? --var_34 : (var_34 /= 1.7237614E38F)][var_34]; + var_23 %= var_25; + } + + + + protected Object clone() + { + var_28 >>>= (new Tester_Class_0().var_1 = (byte)new Tester_Class_0().var_4); + ((Tester_Class_0)(var_27 = (Tester_Class_0.var_8 = (Tester_Class_0.var_8 = (Tester_Class_0.var_8 = "ybndugrur"))))).var_3 = Tester_Class_0.var_8; + var_22--; + new Tester_Class_0().var_4 -= (new Tester_Class_0().var_1 = (byte)'O'); + { + short var_29 = 12378; + } + Tester_Class_0.var_8 = "fd"; + "".lastIndexOf("bgsxwmil"); + new Tester_Class_0().var_6 = (Tester_Class_0.var_2 = -1.7590174497347678E308) == (var_26 = 'o') | true && !false ? Tester_Class_0.var_8 : (((Tester_Class_0)(var_27 = "")).var_6 = Tester_Class_0.var_8); + return var_27 = (var_27 = (var_27 = (var_27 = (Tester_Class_0.var_8 = Tester_Class_0.var_8).substring(var_24)))); + } + + public boolean equals(Object obj) + { + --var_28; + var_23 /= +var_23 * -6025098819014877184L / 3.3957492E38F / (short)'i'; + Tester_Class_0.var_2 = 3.0420988E38F * ((short)var_23 <= (var_23 %= 8.761205585617465E307) % + -1.2374670294031777E308 ? (var_23 = 'P') : (float)+ +1.0313120780554142E308); + (7489001532003495936L >= 'C' ? (Tester_Class_0)obj : (Tester_Class_0)(var_27 = obj)).func_4((short)(float)(byte)(float)(Tester_Class_0.var_2 = 1601763635)); + (var_23 * 2.2882572E38F <= (short)var_25 * (true || false ? (short)~ ((byte)1.1382317160718865E307) : (Tester_Class_0.var_2 = 7.909133507918336E307)) ? (Tester_Class_0)obj : (Tester_Class_0)obj).var_4++; + boolean var_30 = true; + var_27 = new Tester_Class_0(); + final String var_31 = "aiqnc"; + return 1.1357028E38F + (var_30 ? (Tester_Class_0)(var_27 = obj) : (Tester_Class_0)obj).var_7 == 3.860172628750592E306; + } + + +} + +public class Tester { + final static long var_36 = (4.4957056E37F < Tester_Class_1.var_22 + 281107777128915968L ? (Tester_Class_1.var_26 = 't') : (char)Tester_Class_1.var_28) - (4654994097042818048L | (byte)(Tester_Class_0.var_2 = (short)(Tester_Class_1.var_26 = ']'))) ^ 349774342780012544L; + + + static long func_0(final Tester_Class_1 arg_0, long arg_1) + { + ((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_0)(Tester_Class_1.var_27 = Tester_Class_0.var_8))).var_4 |= --new Tester_Class_0().var_4; + return Tester_Class_1.var_22 &= new Tester_Class_0().var_4; + } + + protected Tester_Class_1 func_1(final boolean arg_0, Object arg_1) + { + Tester_Class_0.var_2 = (short)Tester_Class_1.var_22; + ((arg_0 ? (byte)8.639448452214698E307 : ((Tester_Class_1)arg_1).var_24) <= ((Tester_Class_1)(Tester_Class_1.var_27 = new float[(byte)Tester_Class_1.var_22])).var_25++ ? (Tester_Class_0)arg_1 : (Tester_Class_0)(arg_1 = (Tester_Class_1.var_27 = (Tester_Class_0)arg_1))).var_3 = (Tester_Class_0.var_8 = "pgfdbinj"); + arg_1 = (new Tester_Class_0[((Tester_Class_0)arg_1).var_1 = (byte)1.0730194668655324E308])[(byte)'l']; + Tester_Class_0.var_8 = Tester_Class_0.var_8; + Tester_Class_1.var_27 = arg_0 & (true | !true) ? (Tester_Class_1)arg_1 : (Tester_Class_1)arg_1; + Tester_Class_1.var_28 += arg_0 ? (byte)(Tester_Class_0.var_8.compareToIgnoreCase(Tester_Class_0.var_8) % (Tester_Class_1.var_28 %= 2.2770412E38F)) : (byte)((byte)(short)Tester_Class_1.var_28 ^ var_36); + Tester_Class_1.var_28 <<= ((Tester_Class_0)arg_1).var_4; + return arg_0 ? (false ^ false ? (Tester_Class_1)arg_1 : (Tester_Class_1)arg_1) : (Tester_Class_1)arg_1; + } + + protected final static String[][] func_2(final double arg_0) + { + Tester_Class_0.var_2 = (((Tester_Class_1.var_22 = ((Tester_Class_1)(Tester_Class_1.var_27 = "")).var_25++) != + ((byte)(Tester_Class_0.var_2 = - ((byte)2.690435E38F))) ? (Tester_Class_1)(Tester_Class_1.var_27 = "twoj") : (new Tester_Class_1[(byte)'n'])[(byte)- ((byte)'p')]).var_25 /= (new short[false ? (byte)arg_0 : (byte)3.1713847E38F])[(byte)(short)((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_1.var_27 = "fi"))).var_7]); + { + ((new Tester_Class_1[(byte)9.709543613377303E307])[((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_0.var_8 = "efwkox"))).var_1 = (byte)7789404846284517376L]).var_23 *= (((new Tester_Class_0[(byte)'J'][(byte)++Tester_Class_1.var_28])[(byte)Tester_Class_1.var_28][(byte)(Tester_Class_1.var_28 = 1677818267)]).var_1 = false || true ? (byte)1.4659824E38F : (byte)(Tester_Class_1.var_26 = 'T')); + } + Tester_Class_0.var_2 = !true | !false | false & ! (!true & (true ^ false)) ? (byte)(Tester_Class_1.var_26 = 'l') : (short)(arg_0 * (char)(byte)Tester_Class_1.var_28); + Tester_Class_1.var_28 <<= false ^ (! ! (!false | ! !true | true) | (Tester_Class_0.var_2 = arg_0) == 245171309) ? (byte)arg_0 : (short)Tester_Class_1.var_22; + { + ((Tester_Class_1)(true ? (Tester_Class_1.var_27 = "axpbpadi") : Tester_Class_0.var_8)).var_23 = ((Tester_Class_1)(Tester_Class_1.var_27 = (new Tester_Class_0[(byte)1.1668668415637981E308][(byte)1.4116134699564312E308])[(byte)-7.4415765E37F][(byte)5156322492367086592L])).var_25; + } + final double var_37 = 1.6970877829548446E308; + --(Tester_Class_1.var_28 == (byte)((byte)arg_0 + (byte)1.1632396E38F) ? (Tester_Class_0)(Tester_Class_1.var_27 = "vluk") : (Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_0.var_8 = "pfki"))).var_4; + Tester_Class_1.var_22--; + return new String[new Tester_Class_0().var_1 = (((Tester_Class_0)(Tester_Class_1.var_27 = "filxvch")).var_1 = (byte)var_36)][new Tester_Class_0().var_1 = (byte)'C']; + } + + final short func_3(byte arg_0, final short arg_1) + { + ((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_0.var_8 = "oenvgqdit"))).var_6 = Tester_Class_0.var_8; + new Tester_Class_0().var_4 >>>= + -var_36; + Tester_Class_0.var_2 = (((new Tester_Class_1[arg_0])[arg_0 %= ++Tester_Class_1.var_28]).var_25 &= Tester_Class_1.var_22); + ((new Tester_Class_1[arg_0])[arg_0 |= 1942533325]).var_23 %= arg_0 < arg_0 ? 'm' : 'N'; + float var_38; + ((new Tester_Class_1[arg_0])[arg_0]).var_23 /= (((new Tester_Class_1[arg_0][arg_0])[arg_0 |= 'N'])[arg_0 <<= - ((byte)- (Tester_Class_0.var_2 = 3.3324301E38F))]).var_23; + return true ? arg_1 : arg_1; + } + + private String func_4() + { + if (true) + { + ((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_1.var_27 = (Tester_Class_1.var_27 = Tester_Class_0.var_8)))).var_1 = (byte)6.4527776E37F; + ((Tester_Class_0)(Tester_Class_1.var_27 = new char[(byte)1.5121402849337185E307])).var_4 >>= - ((byte)3.3631582E37F) + (Tester_Class_1.var_28 /= 2.813947549586372E307); + } + else + { + { + Tester_Class_1.var_22 *= 1.6498653E36F; + } + Tester_Class_0.var_2 = + ((byte)7.750601265069686E307) > (short)(byte)3131520439106527232L ? (short)4699552681135671296L : (short)Tester_Class_1.var_22; + Tester_Class_1.var_22++; + ((Tester_Class_1)(new Object[(byte)6.231994821505742E307])[(byte)Tester_Class_1.var_22]).var_23 %= 30526551; + { + Tester_Class_0.var_2 = ((Tester_Class_1)(Tester_Class_1.var_27 = new short[(byte)9.628297E37F])).var_25; + } + Tester_Class_1.var_28 /= (byte)(false ^ Tester_Class_0.var_8.equalsIgnoreCase(Tester_Class_0.var_8) ? (byte)2.689633745095358E307 : (short)1.2532476E38F); + float var_39; + long[] var_40 = new long[((Tester_Class_0)(Tester_Class_1.var_27 = Tester_Class_0.var_8)).var_1 = (((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_0.var_8 = Tester_Class_0.var_8))).var_1 = (byte)(1.8335008E38F % (true | false ? (short)Tester_Class_1.var_22 : (byte)'P')))]; + } + Tester_Class_0.var_2 = (((new Tester_Class_0[((Tester_Class_0)(Tester_Class_1.var_27 = "inufeoe")).var_1 = (byte)(short)'M'])[(byte)(Tester_Class_0.var_2 = + - -2.274269E38F)]).var_1 = (((Tester_Class_0)(Tester_Class_1.var_27 = "c")).var_1 = (byte)'Z')); + Tester_Class_0.var_8 = (Tester_Class_0.var_8 = Tester_Class_0.var_8); + return "rkvffvlf"; + } + + final char func_5(final char arg_0, final Object[] arg_1, final double arg_2, Object arg_3) + { + arg_3 = true && Tester_Class_1.var_22 < (((Tester_Class_0)arg_3).var_1 = (((Tester_Class_0)(Tester_Class_1.var_27 = arg_3)).var_1 = (byte)arg_2)) ? "dgmwbkv" : Tester_Class_0.var_8; + (true ? (Tester_Class_1)arg_3 : (Tester_Class_1)arg_3).var_23 -= (Tester_Class_0.var_2 = arg_0); + arg_3 = (new String[(byte)arg_2])[(byte)-2.797633529863769E307]; + (false ^ !false ^ (!true && true) ? (Tester_Class_0)arg_3 : (Tester_Class_0)arg_3).var_4 -= (new char[((Tester_Class_0)arg_3).var_1 = (((Tester_Class_0)arg_3).var_1 = (((Tester_Class_0)arg_3).var_1 = (byte)2.433897E38F))])[((Tester_Class_0)arg_3).var_1 = (byte)+7.036923762392132E307]; + Tester_Class_0.var_8 = Tester_Class_0.var_8; + Tester_Class_0.var_2 = true ^ +((Tester_Class_1)(arg_3 = "o")).var_23 <= arg_2 ? (short)Tester_Class_1.var_22 : (((Tester_Class_0)arg_3).var_1 = (byte)1.9730195E38F); + (false ? (Tester_Class_0)arg_3 : (Tester_Class_0)arg_3).var_6 = "bpjqfacys"; + ((new Tester_Class_0[((Tester_Class_0)arg_3).var_1 = (byte)1969581340][((Tester_Class_0)arg_3).var_1 = (byte)(Tester_Class_0.var_2 = arg_0)])[((Tester_Class_0)arg_3).var_1 = (byte)(Tester_Class_0.var_2 = 4044194664687833088L)][((Tester_Class_0)arg_3).var_1 = (byte)((Tester_Class_1)arg_3).var_24]).var_1 = (false ? (byte)+ ((byte)1.2689328633821032E305) == arg_2 : false) ? (byte)arg_2 : (((Tester_Class_0)arg_3).var_1 = (byte)1586517741); + return (func_3(((Tester_Class_0)arg_3).var_1 = (byte)Tester_Class_1.var_28, (short)(byte)- +func_1(true, arg_3).var_23) > 1882532904 ? (short)2.6362656E38F >= (char)2.445034E38F & false : var_36 > Tester_Class_1.var_22) ? ((new Tester_Class_0[(byte)1.2074529E38F][(byte)'N'])[(byte)1.3365433211782782E308][(byte)Tester_Class_1.var_28]).var_4 : 'O'; + } + + private final static Tester_Class_1 func_6(String arg_0, String arg_1) + { + Tester_Class_1.var_22 += ((new Tester_Class_0[(byte)4.1707075152824266E306])[(byte)(short)(((Tester_Class_0)(Tester_Class_1.var_27 = "tmyiha")).var_4 &= 'e')]).var_4; + return (new Tester_Class_1[((Tester_Class_0)(Tester_Class_1.var_27 = arg_1)).var_1 = (byte)2.8419246E38F][(byte)Tester_Class_1.var_22])[((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_1.var_27 = arg_0))).var_1 = (((Tester_Class_0)(Tester_Class_1.var_27 = (Tester_Class_0.var_8 = "nvyfpdyms"))).var_1 = (byte)Tester_Class_1.var_22--)][((Tester_Class_0)(Tester_Class_1.var_27 = "kklsnsnia")).var_1 = (byte)'[']; + } + + double func_7(Object arg_0, final String arg_1) + { + if (false) + { + ((Tester_Class_0)arg_0).var_1 = (byte)'Z'; + } + else + { + ((Tester_Class_0)arg_0).var_3 = Tester_Class_0.var_8; + } + float var_41 = (true ? (new Tester_Class_1[((Tester_Class_0)arg_0).var_1 = (byte)var_36])[((Tester_Class_0)arg_0).var_1 = (byte)Tester_Class_1.var_22] : (Tester_Class_1)arg_0).var_23 /= 'n'; + ++Tester_Class_1.var_22; + return true ^ +func_6(arg_1, Tester_Class_0.var_8).var_23 < ~6945306015697774592L & (Tester_Class_1.var_28 |= var_36) <= var_36 ? + -1.4330949313452472E308 - -6602331706844466176L : Tester_Class_1.var_28; + } + + private final static byte func_8(final Object arg_0, double arg_1, final double arg_2, double arg_3) + { + ((Tester_Class_1)arg_0).var_23 = (short)(!false && (new boolean[(byte)2.2566308E38F])[((Tester_Class_0)arg_0).var_1 = (byte)((Tester_Class_1)arg_0).var_25] ? arg_2 : 3.0315489414155014E307); + short var_42 = (new short[((Tester_Class_0)arg_0).var_1 = (((Tester_Class_0)arg_0).var_1 = (byte)var_36)])[(byte)arg_2]; + Tester_Class_1.var_22 <<= ((new Tester_Class_0[(byte)var_42])[((Tester_Class_0)arg_0).var_1 = (byte)'X']).var_4; + (false ? new Tester_Class_0() : new Tester_Class_0()).var_4 *= 6.636831399350763E307; + (true & false ^ !((Tester_Class_1)arg_0).equals(arg_0) ? (Tester_Class_0)arg_0 : (Tester_Class_0)arg_0).var_4 <<= (Tester_Class_1.var_22 >>>= (Tester_Class_1.var_28 -= Tester_Class_1.var_28)) / 1064434; + int var_43; + final Object var_44 = Tester_Class_1.var_27 = new byte[((Tester_Class_0)arg_0).var_1 = (byte)+arg_2]; + ((Tester_Class_0)var_44).var_1 = (byte)arg_1; + Tester_Class_1 var_45 = (3582921389614857216L != 2.132918E38F / Tester_Class_1.var_22 % (((Tester_Class_1)var_44).var_23 %= var_42--) ? false : false ^ !false) ? func_6(((Tester_Class_0)arg_0).var_6 = Tester_Class_0.var_8, Tester_Class_0.var_8 = Tester_Class_0.var_8) : (Tester_Class_1)arg_0; + arg_1 *= false ? (false ? (byte)var_45.var_23 : (byte)var_45.var_24) : (byte)8158132319185776640L; + Tester_Class_0.var_8 = (new Tester_Class_0().var_6 = "gvxkyj"); + Tester_Class_1.var_27 = "bkfbu"; + arg_1 *= (((Tester_Class_0)arg_0).var_4 += new Tester_Class_0().var_4); + Tester_Class_0 var_46 = (false ? false && - (((Tester_Class_0)arg_0).var_1 = (byte)var_45.var_24) > var_45.var_23 - arg_2 : ("fn".startsWith(Tester_Class_0.var_8) && false) & !true) ? (Tester_Class_0)arg_0 : (Tester_Class_0)arg_0; + return var_46.var_1 = (var_46.var_1 = (var_46.var_1 = (byte)var_45.var_23)); + } + + public static void main(String[] args) + { + for (int i = 0; i < 100000; i++) { + Tester t = new Tester(); + try { + t.test(); + } catch(Throwable e) { + } + if (t.var_36 != -4918908939899620363L) { + throw new InternalError(t.var_36 + " != -4918908939899620363"); + } + } + } + + private void test() + { + ((Tester_Class_0)(Tester_Class_1.var_27 = new Tester_Class_0())).var_4 &= new Tester_Class_0().var_4 >>> (short)new Tester_Class_1().var_25; + Tester_Class_1.var_22 <<= Tester_Class_1.var_22; + Tester_Class_1.var_27 = ((true | ! !false) & false ? ! !true : 6.5017485E37F == (short)(Tester_Class_1.var_22 &= 'h')) ? (Tester_Class_1.var_27 = new Tester_Class_1()) : new Tester_Class_1(); + (1.252611E37F < (((new Tester_Class_0[(byte)'X'])[(byte)6.916916470825763E307]).var_4 /= (byte)Tester_Class_1.var_28 < (new short[(byte)7.626803503643197E307])[(byte)var_36] ? (short)new Tester_Class_0().var_4 : (short)(byte)Tester_Class_1.var_22) ? (true ? new Tester_Class_0() : new Tester_Class_0()) : (true ? new Tester_Class_0() : (Tester_Class_0)(Tester_Class_1.var_27 = Tester_Class_0.var_8))).var_4 ^= Tester_Class_1.var_28++; + (true ? new Tester_Class_1() : func_1(true, Tester_Class_1.var_27 = "jjgccelm")).var_23 -= (- - + + +1.2976166388790213E308 != ((!true ^ ! !true) & (short)(Tester_Class_1.var_28 &= var_36) <= (Tester_Class_1.var_26 = 'C') ? 1163089569715148800L : 8.591879058615699E307) ? (new Tester_Class_0().var_1 = (new Tester_Class_0().var_1 = (byte)2.7209893E38F)) : (!false ^ ! !false ? (short)'x' : (short)'a')) + 7620981797791666176L; + new Tester_Class_0().var_4 ^= 8777687662500669440L; + final String[] var_47 = new String[((1864097118983963648L | (Tester_Class_1.var_26 = '[')) < + (new Tester_Class_1().var_23 += --new Tester_Class_0().var_4) ? ! !true : false) ? (new Tester_Class_0().var_1 = (new Tester_Class_0().var_1 = (byte)2.6448988E38F)) : (byte)Tester_Class_1.var_22]; + long var_48 = 0L; + Tester_Class_0.var_2 = "nwcmc".codePointAt("wgcdlmd".compareTo("jyt")); + do + { + Tester_Class_1.var_22 += new Tester_Class_0().var_4; + var_48++; + Tester_Class_1.var_27 = false ? "dfvpqs" : Tester_Class_0.var_8; + new Tester_Class_0().var_1 = (new Tester_Class_0().var_1 = (new byte[(byte)2.2825838E38F])[(byte)4.2446597794703817E307]); + } while ((true ? !false : false & !false) && (var_48 < 117 && true)); + int var_49 = 0; + Tester_Class_1.var_26 = 'I'; + short var_50; + while (var_49 < 225 && ('U' | ~ ((byte)9.556538701292864E306)) < var_49) + { + var_50 = (var_50 = ((byte)1.2016701369644112E308 != (var_50 = (short)1.2518271E38F) ^ !true ? !true : false) ? (short)6.629572378442352E307 : (byte)'O'); + var_49++; + var_50 = true ? (byte)Tester_Class_1.var_22 : (byte)(Tester_Class_1.var_22 = (byte)var_48); + byte var_51; + short var_52; + } + Tester_Class_1.var_27 = Tester_Class_0.var_8 + "r"; + var_50 = (var_50 = true ^ ! (!false ^ false) ? (byte)573442894 : (byte)2.1479471E38F); + ((var_50 = (short)'w') >= (new Tester_Class_0().var_1 = (byte)5.148172E37F) & true ? new Tester_Class_0() : new Tester_Class_0()).var_4 >>= true ? (new Tester_Class_0().var_4 /= (byte)Tester_Class_1.var_28) : (Tester_Class_1.var_26 = '^'); + float var_53; + (func_6(Tester_Class_0.var_8, Tester_Class_0.var_8).var_24 <= (var_50 = (var_50 = (short)var_48)) ^ !true | true & true | true ^ false ? (Tester_Class_1)(Tester_Class_1.var_27 = Tester_Class_0.var_8) : new Tester_Class_1()).var_23 /= ((new Tester_Class_0[(byte)1.6656795E38F])[new Tester_Class_0().var_1 = (byte)1.212530193895014E308]).var_4; + long var_54 = 0L; + Object var_55; + (true | --new Tester_Class_0().var_4 == 2898909413610959872L & true == true ? func_6(Tester_Class_0.var_8, Tester_Class_0.var_8) : new Tester_Class_1()).var_23 %= 7471272661059674112L; + while (false & (false | 5.7300464E37F != (short)(Tester_Class_0.var_2 = (short)var_36)) && var_54 < 293) + { + func_6(Tester_Class_0.var_8 = "wts", Tester_Class_0.var_8 = (Tester_Class_0.var_8 = Tester_Class_0.var_8)).var_25 |= (Tester_Class_1.var_22 ^= (var_50 = (byte)1.0904691577897794E308)); + var_54++; + (false ? func_6(Tester_Class_0.var_8, "inujn") : func_6(Tester_Class_0.var_8, Tester_Class_0.var_8 = Tester_Class_0.var_8)).var_23 /= (Tester_Class_0.var_2 = (var_50 = (((Tester_Class_0)(var_55 = Tester_Class_0.var_8)).var_1 = (byte)(short)Tester_Class_1.var_28))); + Tester_Class_0.var_8 = Tester_Class_0.var_8; + } + var_50 = func_3(new Tester_Class_0().var_1 = (new Tester_Class_0().var_1 = (byte)var_36), var_50 = (var_50 = (var_50 = (byte)var_36))); + Tester_Class_1.var_22++; + Tester_Class_1.var_28 <<= 'Y'; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6689060/Test.java 2009-08-01 04:21:32.663583672 +0100 @@ -0,0 +1,576 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6689060 + * @summary Escape Analysis does not work with Compressed Oops + * @run main/othervm -Xbatch -XX:CompileCommand=exclude,Test.dummy -XX:+AggressiveOpts Test + */ + +import java.lang.reflect.Array; + +class Point { + int x; + int y; + Point next; + int ax[]; + int ay[]; + Point pax[]; + Point pay[]; + public Point getNext() { + return next; + } +} + +public class Test { + + void dummy() { + // Empty method to verify correctness of DebugInfo. + // Use -XX:CompileCommand=exclude,Test.dummy + } + + int ival(int i) { + return i*2; + } + + int test80(int y, int l, int i) { + Point p = new Point(); + p.ax = new int[2]; + p.ay = new int[2]; + int x = 3; + p.ax[0] = x; + p.ay[1] = 3 * x + y; + dummy(); + return p.ax[0] * p.ay[1]; + } + + int test81(int y, int l, int i) { + Point p = new Point(); + p.ax = new int[2]; + p.ay = new int[2]; + int x = 3; + p.ax[0] = x; + p.ay[1] = 3 * x + y; + dummy(); + return p.ax[0] * p.ay[1]; + } + + + int test44(int y) { + Point p1 = new Point(); + p1.x = ival(3); + dummy(); + p1.y = 3 * p1.x + y; + return p1.y; + } + + int test43(int y) { + Point p1 = new Point(); + if ( (y & 1) == 1 ) { + p1.x = ival(3); + } else { + p1.x = ival(5); + } + dummy(); + p1.y = 3 * p1.x + y; + return p1.y; + } + + int test42(int y) { + Point p1 = new Point(); + p1.x = 3; + for (int i = 0; i < y; i++) { + if ( (i & 1) == 1 ) { + p1.x += 4; + } + } + p1.y = 3 * y + p1.x; + return p1.y; + } + + int test40(int y) { + Point p1 = new Point(); + if ( (y & 1) == 1 ) { + p1.x = 3; + } else { + p1.x = 5; + } + p1.y = 3 * p1.x + y; + return p1.y; + } + + int test41(int y) { + Point p1 = new Point(); + if ( (y & 1) == 1 ) { + p1.x += 4; + } else { + p1.x += 5; + } + p1.y = 3 * p1.x + y; + return p1.y; + } + + Point test00(int y) { + int x = 3; + Point p = new Point(); + p.x = x; + p.y = 3 * x + y; + return p; + } + + Point test01(int y) { + int x = 3; + Point p = new Point(); + p.x = x; + p.y = 3 * x + y; + dummy(); + return p; + } + + Point test02(int y) { + int x = 3; + Point p1 = null; + for (int i = 0; i < y; i++) { + Point p2 = new Point(); + p2.x = x; + p2.y = 3 * y + x; + p2.next = p1; + p1 = p2; + } + return p1; + } + + Point test03(int y) { + int x = 3; + Point p1 = null; + for (int i = 0; i < y; i++) { + Point p2 = new Point(); + p2.x = x; + p2.y = 3 * y + x; + p2.next = p1; + p1 = p2; + } + dummy(); + return p1; + } + + Point test04(int y) { + int x = 3; + Point p1 = null; + for (int i = 0; i < y; i++) { + Point p2 = new Point(); + p2.x = x; + p2.y = 3 * y + x; + p2.next = p1; + dummy(); + p1 = p2; + } + return p1; + } + + int test05(int y) { + int x = 3; + Point p1 = new Point(); + for (int i = 0; i < y; i++) { + Point p2 = new Point(); + p2.x = x; + p2.y = 3 * y + x; + p1.next = p2; + p1 = p2; + } + return p1.y; + } + + int test0(int y) { + int x = 3; + Point p = new Point(); + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test1(int y) { + Point p = new Point(); + if ( (y & 1) == 1 ) { + p = new Point(); // Kill previous + } + int x = 3; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test2(int y) { + Point p1 = new Point(); + Point p2 = new Point(); + p1.x = 3; + p2.x = 4; + p1.y = 3 * p2.x + y; + p2.y = 3 * p1.x + y; + dummy(); + return p1.y * p2.y; + } + + int test3(int y, Point p1) { + Point p2 = new Point(); + p1.x = 3; + p2.x = 4; + p1.y = 3 * p2.x + y; + p2.y = 3 * p1.x + y; + dummy(); + return p1.y * p2.y; + } + + int test4(int y) { + Point p1 = new Point(); + Point p2 = new Point(); + if ( (y & 1) == 1 ) { + p1.x = 3; + p2.x = 4; + } else { + p1.x = 5; + p2.x = 6; + } + p1.y = 3 * p2.x + y; + p2.y = 3 * p1.x + y; + dummy(); + return p1.y * p2.y; + } + + int test5(int y, Point p1) { + Point p2 = new Point(); + if ( (y & 1) == 1 ) { + p1.x = 3; + p2.x = 4; + } else { + p1.x = 5; + p2.x = 6; + } + p1.y = 3 * p2.x + y; + p2.y = 3 * p1.x + y; + dummy(); + return p1.y * p2.y; + } + + int test6(int y) { + Point p1 = new Point(); + Point p2 = new Point(); + p1.next = p2; + if ( (y & 1) == 1 ) { + p1.x = 3; + p1.getNext().x = 4; + } else { + p1.x = 5; + p1.getNext().x = 6; + } + p1.y = 3 * p2.x + y; + p2.y = 3 * p1.x + y; + dummy(); + return p1.y * p2.y; + } + + int test7(int y, Point p1) { + Point p2 = new Point(); + p1.next = p2; + if ( (y & 1) == 1 ) { + p1.x = 3; + p1.getNext().x = 4; + } else { + p1.x = 5; + p1.getNext().x = 6; + } + p1.y = 3 * p2.x + y; + p2.y = 3 * p1.x + y; + dummy(); + return p1.y * p2.y; + } + + int test8(int y, int l, int i) { + Point p = new Point(); + p.ax = new int[l]; + p.ay = new int[l]; + int x = 3; + p.ax[i] = x; + p.ay[i] = 3 * x + y; + dummy(); + return p.ax[i] * p.ay[i]; + } + + int test9(int y, int l, int i) { + Point p = new Point(); + p.pax = new Point[l]; + p.pay = new Point[l]; + p.pax[i] = new Point(); + p.pay[i] = new Point(); + p.pax[i].x = 3; + p.pay[i].x = 4; + p.pax[i].y = 3 * p.pay[i].x + y; + p.pay[i].y = 3 * p.pax[i].x + y; + dummy(); + return p.pax[i].y * p.pay[i].y; + } + + int test10(int y, int l, int i, Class cls) { + Point p = new Point(); + try { + p.pax = (Point[])Array.newInstance(cls, l); + p.pax[i] = (Point)cls.newInstance(); + } + catch(java.lang.InstantiationException ex) { + return 0; + } + catch(java.lang.IllegalAccessException ex) { + return 0; + } + p.pax[i].x = 3; + p.pax[i].y = 3 * p.pax[i].x + y; + dummy(); + return p.pax[i].x * p.pax[i].y; + } + + int test11(int y) { + Point p1 = new Point(); + Point p2 = new Point(); + p1.next = p2; + if ( (y & 1) == 1 ) { + p1.x = 3; + p1.next.x = 4; + } else { + p1.x = 5; + p1.next.x = 6; + } + p1.y = 3 * p1.next.x + y; + p1.next.y = 3 * p1.x + y; + dummy(); + return p1.y * p1.next.y; + } + + int test12(int y) { + Point p1 = new Point(); + p1.next = p1; + if ( (y & 1) == 1 ) { + p1.x = 3; + p1.next.x = 4; + } else { + p1.x = 5; + p1.next.x = 6; + } + p1.y = 3 * p1.next.x + y; + p1.next.y = 3 * p1.x + y; + dummy(); + return p1.y * p1.next.y; + } + + + public static void main(String args[]) { + Test tsr = new Test(); + Point p = new Point(); + Point ptmp = p; + Class cls = Point.class; + int y = 0; + for (int i=0; i<10000; i++) { + ptmp.next = tsr.test00(1); + ptmp.next = tsr.test01(1); + ptmp.next = tsr.test02(1); + ptmp.next = tsr.test03(1); + ptmp.next = tsr.test04(1); + + y = tsr.test05(1); + + y = tsr.test80(y, 1, 0); + y = tsr.test81(y, 1, 0); + + y = tsr.test44(y); + y = tsr.test43(y); + y = tsr.test42(y); + y = tsr.test40(y); + y = tsr.test41(y); + + y = tsr.test0(y); + y = tsr.test1(y); + y = tsr.test2(y); + y = tsr.test3(y, p); + y = tsr.test4(y); + y = tsr.test5(y, p); + y = tsr.test6(y); + y = tsr.test7(y, p); + y = tsr.test8(y, 1, 0); + y = tsr.test9(y, 1, 0); + y = tsr.test10(y, 1, 0, cls); + y = tsr.test11(y); + y = tsr.test12(y); + } + for (int i=0; i<10000; i++) { + ptmp.next = tsr.test00(1); + ptmp.next = tsr.test01(1); + ptmp.next = tsr.test02(1); + ptmp.next = tsr.test03(1); + ptmp.next = tsr.test04(1); + + y = tsr.test05(1); + + y = tsr.test80(y, 1, 0); + y = tsr.test81(y, 1, 0); + + y = tsr.test44(y); + y = tsr.test43(y); + y = tsr.test42(y); + y = tsr.test40(y); + y = tsr.test41(y); + + y = tsr.test0(y); + y = tsr.test1(y); + y = tsr.test2(y); + y = tsr.test3(y, p); + y = tsr.test4(y); + y = tsr.test5(y, p); + y = tsr.test6(y); + y = tsr.test7(y, p); + y = tsr.test8(y, 1, 0); + y = tsr.test9(y, 1, 0); + y = tsr.test10(y, 1, 0, cls); + y = tsr.test11(y); + y = tsr.test12(y); + } + for (int i=0; i<10000; i++) { + ptmp.next = tsr.test00(1); + ptmp.next = tsr.test01(1); + ptmp.next = tsr.test02(1); + ptmp.next = tsr.test03(1); + ptmp.next = tsr.test04(1); + + y = tsr.test05(1); + + y = tsr.test80(y, 1, 0); + y = tsr.test81(y, 1, 0); + + y = tsr.test44(y); + y = tsr.test43(y); + y = tsr.test42(y); + y = tsr.test40(y); + y = tsr.test41(y); + + y = tsr.test0(y); + y = tsr.test1(y); + y = tsr.test2(y); + y = tsr.test3(y, p); + y = tsr.test4(y); + y = tsr.test5(y, p); + y = tsr.test6(y); + y = tsr.test7(y, p); + y = tsr.test8(y, 1, 0); + y = tsr.test9(y, 1, 0); + y = tsr.test10(y, 1, 0, cls); + y = tsr.test11(y); + y = tsr.test12(y); + } + + int z = 0; + y = tsr.test80(0, 1, 0); + z += y; + System.out.println("After 'test80' y=" + y); + y = tsr.test81(0, 1, 0); + z += y; + System.out.println("After 'test81' y=" + y); + + y = tsr.test44(0); + z += y; + System.out.println("After 'test44' y=" + y); + y = tsr.test43(0); + z += y; + System.out.println("After 'test43' y=" + y); + y = tsr.test42(0); + z += y; + System.out.println("After 'test42' y=" + y); + y = tsr.test40(0); + z += y; + System.out.println("After 'test40' y=" + y); + y = tsr.test41(0); + z += y; + System.out.println("After 'test41' y=" + y); + + ptmp.next = tsr.test00(1); + z += y; + System.out.println("After 'test00' p.y=" + ptmp.next.y); + ptmp.next = tsr.test01(1); + z += y; + System.out.println("After 'test01' p.y=" + ptmp.next.y); + ptmp.next = tsr.test02(1); + z += y; + System.out.println("After 'test02' p.y=" + ptmp.next.y); + ptmp.next = tsr.test03(1); + z += y; + System.out.println("After 'test03' p.y=" + ptmp.next.y); + ptmp.next = tsr.test04(1); + z += y; + System.out.println("After 'test04' p.y=" + ptmp.next.y); + + y = tsr.test05(1); + z += y; + System.out.println("After 'test05' y=" + y); + + y = tsr.test0(0); + z += y; + System.out.println("After 'test0' y=" + y); + y = tsr.test1(0); + z += y; + System.out.println("After 'test1' y=" + y); + y = tsr.test2(0); + z += y; + System.out.println("After 'test2' y=" + y); + y = tsr.test3(0, new Point()); + z += y; + System.out.println("After 'test3' y=" + y); + y = tsr.test4(0); + z += y; + System.out.println("After 'test4' y=" + y); + y = tsr.test5(0, new Point()); + z += y; + System.out.println("After 'test5' y=" + y); + y = tsr.test6(0); + z += y; + System.out.println("After 'test6' y=" + y); + y = tsr.test7(0, new Point()); + z += y; + System.out.println("After 'test7' y=" + y); + y = tsr.test8(0, 1, 0); + z += y; + System.out.println("After 'test8' y=" + y); + y = tsr.test9(0, 1, 0); + z += y; + System.out.println("After 'test9' y=" + y); + y = tsr.test10(0, 1, 0, cls); + z += y; + System.out.println("After 'test10' y=" + y); + y = tsr.test11(0); + z += y; + System.out.println("After 'test11' y=" + y); + y = tsr.test12(0); + z += y; + System.out.println("After 'test12' y=" + y); + System.out.println("Sum of y =" + z); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6695810/Test.java 2009-08-01 04:21:33.105198095 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6695810 + * @summary null oop passed to encode_heap_oop_not_null + * @run main/othervm -Xbatch Test + */ + +public class Test { + Test _t; + + static void test(Test t1, Test t2) { + if (t2 != null) + t1._t = t2; + + if (t2 != null) + t1._t = t2; + } + + public static void main(String[] args) { + Test t = new Test(); + for (int i = 0; i < 50; i++) { + for (int j = 0; j < 100; j++) { + test(t, t); + } + test(t, null); + } + for (int i = 0; i < 10000; i++) { + test(t, t); + } + test(t, null); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6700047/Test6700047.java 2009-08-01 04:21:33.540240870 +0100 @@ -0,0 +1,63 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6700047 + * @summary C2 failed in idom_no_update + * @run main Test6700047 + */ + +public class Test6700047 { + static byte[] dummy = new byte[256]; + + public static void main(String[] args) { + for (int i = 0; i < 100000; i++) { + intToLeftPaddedAsciiBytes(); + } + } + + public static int intToLeftPaddedAsciiBytes() { + int offset = 40; + int q; + int r; + int i = 100; + int result = 1; + while (offset > 0) { + q = (i * 52429); + r = i; + offset--; + i = q; + if (i == 0) { + break; + } + } + if (offset > 0) { + for(int j = 0; j < offset; j++) { + result++; + dummy[i] = 0; + } + } + return result; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6711100/Test.java 2009-08-01 04:21:33.982235908 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6711100 + * @summary 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int") + * @run main/othervm -Xcomp -XX:CompileOnly=Test. Test + */ + +public class Test { + + static byte b; + + // The server compiler chokes on compiling + // this method when f() is not inlined + public Test() { + b = (new byte[1])[(new byte[f()])[-1]]; + } + + protected static int f() { + return 1; + } + + public static void main(String[] args) { + try { + Test t = new Test(); + } catch (ArrayIndexOutOfBoundsException e) { + } + } +} + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6712835/Test6712835.java 2009-08-01 04:21:34.431329557 +0100 @@ -0,0 +1,1578 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6712835 + * @summary Server compiler fails with assertion (loop_count < K,"infinite loop in PhaseIterGVN::transform") + * @run main/othervm -Xcomp Test6712835 + */ + +/* Complexity upper bound: 349851 ops */ + +abstract class Tester_Class_0 { + boolean var_1 = true; + static double var_2; + float var_3 = 1.8301116E38F; + final String var_4 = "wck"; + final static short var_5 = 25624; + + + public Tester_Class_0() + { + var_2 = (byte)1.7374809293839066E308; + { + double var_18 = false ? 8027040614338917376L * var_3 + - (var_2 = var_5) : (var_3 += (char)4.491494085158084E307); + var_3 *= ~ ((byte)702579792) / 6600332715431236608L; + long var_19 = 0L; + var_18 -= 1759091496; + do + { + final long var_20 = (new long[(byte)(var_3 += + +1.6695243696502334E308)][(byte)((byte)1110410742 | ~var_19)])[var_1 & var_1 ? (byte)1047514041090199552L : (byte)var_5][(byte)(var_1 ? 123309551 : - ((byte)5932930312361050112L))]; + var_19++; + final short var_21 = var_5; + } while (var_19 < 1 && var_1 ^ var_3 == + ((byte)var_5)); + { + int var_22; + } + { + var_4.endsWith("o"); + } + int var_23 = 0; + var_1 &= (var_1 = true); + for (byte var_24 = 26; (var_1 = !var_1) && var_23 < 1; var_18 += var_1 ^ (var_1 |= false) ^ true ? var_24 : (byte)1504077779675035648L) + { + var_18 *= var_23; + var_23++; + float var_25; + (((new Tester_Class_0[var_24][var_24][var_24])[var_24])[var_24 >>= var_19][var_24 &= 6702582681202665472L]).var_3 *= var_5; + } + var_1 = (var_3 -= var_5) > (byte)func_2(1317089759, var_5, (byte)var_19) % (false & true ? 475183200 : 8947159119888251904L); + var_18 /= ~var_19 ^ ((byte)(var_18 %= (int)var_5) >= 6773554922270913536L ? (byte)var_5 : (byte)'u'); + var_3 = ~ ((byte)var_19); + } + double var_26 = 0; + var_1 &= (var_1 |= ! (var_1 |= true)); + while (var_26 < 1) + { + var_2 = 'e'; + var_26++; + var_1 ^= !true | 'j' * ((var_2 = 93384362) + var_5) <= var_5; + var_2 = true ? 2056852215 : var_5; + } + switch ((new char[(byte)var_3])[(byte)(short)var_4.charAt(438929928)] / (new byte[(byte)1779353916050551808L][(byte)+ ~8903539475459755008L])[(byte)836413337621087232L][(byte)784406244]) + { + case 101: + var_3 -= var_5; + break; + + case 'L': + + case 20: + final int var_27 = 2146473580; + break; + + case 18: + + default: + "mwh".substring((byte)(float)'A' % var_5, ']' | var_5 ^ ~ ((byte)'E')); + break; + + case 'H': + + } + var_3 = var_5; + long var_28; + var_28 = (var_1 = 'u' != (var_3 = var_1 ? 1384770002488557568L : ~ ~6691557565676772352L)) ? - ((byte)938410603) : var_5; + ((new Tester_Class_0[(byte)var_26])[(byte)'w']).var_3 = (byte)(short)'I'; + var_2 = (var_1 ^= "sfltwylm".startsWith("ytmeds")) ? 1837260339 * 434565574 : (new double[(byte)var_26])[(byte)var_3]; + } + + + + public boolean equals(Object obj) + { + var_2 = 785819716 / 'i'; + switch ((! (var_1 ^= var_1) ^ (! ((false | (var_1 |= var_1)) ^ !false) ? false : (var_1 |= !false)) ? var_1 : ! !var_1 ^ var_1) ? 1426689390 : var_5 * var_5) + { + case '`': + + case 89: + + case 13: + char var_9 = 'W'; + break; + + case 31: + + case 15: + + case 'm': + var_1 &= var_1; + break; + + case 'Z': + + case 34: + String[] var_10 = (new String[(byte)5534253842608756736L][(byte)'M'])[(byte)8717534666212195328L]; + break; + + case 124: + + } + var_3 += var_5; + var_1 |= (var_1 |= (var_1 = (var_1 |= var_5 >= (var_2 = (byte)var_3)))); + var_1 ^= (var_1 = var_4.endsWith(new String())); + var_2 = (var_3 %= 664966429); + { + var_4.lastIndexOf((int)('i' * (! !true & (true & !var_1) ? (byte)2.2562587635371023E307 : (byte)(var_3 %= var_3)) / var_3), 'P' % (false ? (byte)'N' : (byte)943393108)); + } + var_3 /= false | ! !var_1 ? (char)1.3721055E38F : '\\'; + if (var_1) + { + var_4.compareTo("uaqmqwg"); + } + else + { + var_1 ^= var_1 & (var_1 &= (var_1 ^= (var_1 ^= var_1))); + } + var_3 *= (new int[(byte)1980200282][(byte)'i'])[(byte)(var_2 = (byte)'O')][false ? (byte)2.4739911E38F : (byte)- ((byte)1.6045903096088714E308)]; + var_1 = var_5 != (byte)var_5 & (1.5002759009669559E308 < (byte)5110733568033040384L ^ (var_1 ? (var_1 ^= true) : var_1)); + long var_11; + return (var_2 = (byte)'B') < 550125954; + } + + + public static char func_0(final int arg_0, long[] arg_1, final boolean arg_2) + { + var_2 = (short)(false ? (byte)1.2577737E38F : (byte)'t'); + "xdf".codePointBefore((!arg_2 ? (byte)1426638765 : (byte)541094055) * ((byte)var_5 / var_5)); + ((new Tester_Class_0[(byte)(short)(var_2 = 'A')])[(byte)arg_0]).var_3 = 7823141134226481152L; + ((new Tester_Class_0[(byte)- ~1368497135389664256L])[!false || true ? (byte)2.5393905E38F : (byte)2.4415902E38F]).var_3 -= (int)(false ? (byte)var_5 : (byte)"musnlk".charAt(785792957)); + ((new Tester_Class_0[(byte)357672172])[(byte)7.709380171237795E307]).var_3 = arg_0; + ((new Tester_Class_0[(byte)var_5])[(byte)('Z' / + + -2.6037312E38F)]).var_3 %= arg_2 ? + - - + - + +4.6761156E37F : (byte)- (var_2 = - - ~3113191255384341504L); + (("exseqpham" + "uigdxg").equalsIgnoreCase("oeutvibnv") ? "l" : "qra").replace(false ^ true ? 't' : "jwpf".charAt(+ ((byte)arg_0)), 6.624090730243228E307 > 2.7771497E38F ? 't' : "tcfesyg".charAt(arg_0)); + ((new Tester_Class_0[(byte)arg_0][(byte)6943189372481268736L])[(byte)2.6713643513095145E307][(byte)var_5]).var_1 &= !"ipgqq".endsWith("aecnyvpmf"); + ((new Tester_Class_0[(byte)(+ +2158971337956592640L ^ var_5)])[false ? (byte)8594725249859841024L : (byte)var_5]).var_3 = (byte)"jd".charAt((byte)1.6298661301128909E307 << (byte)'B'); + var_2 = (float)1014982842 * (byte)var_5 * ((new Tester_Class_0[(byte)2.7842814E38F])[(byte)"n".charAt('e' ^ (byte)arg_0)]).var_3; + if (false) + { + ((new Tester_Class_0[(byte)8.702990410251979E307][(byte)8.865924E37F])[(byte)var_5][(byte)+ ((long)var_5)]).var_1 ^= arg_2; + } + else + { + ((new Tester_Class_0[(byte)('I' | var_5)])[(byte)('L' + (+ - - (var_2 = 'N') + 1.324025E38F))]).var_3 = var_5 % '[' + (byte)var_5; + } + ((new Tester_Class_0[(byte)7.41761E37F][(byte)(var_2 = var_5)])[(byte)var_5][(byte)'o']).var_1 &= false; + ((new Tester_Class_0[(byte)+ ((byte)7.9065203E37F)])[(byte)var_5]).var_1 ^= 630582880 > - (var_2 = var_5); + return 'K'; + } + + protected float func_1(int arg_0, final Object arg_1, Object arg_2) + { + var_1 ^= (var_1 ^= true) & !var_1; + { + var_3 -= var_3; + var_2 = var_1 && (var_1 &= ! !true) | + ~3353396000385141760L < 7949306917320622080L ? (byte)306954754 : (byte)var_5; + final long var_12 = 1048994076885686272L; + } + short var_13 = 8706; + byte var_14 = (new byte[(byte)6.697464316212731E307])[(byte)var_4.indexOf("clbr", (byte)var_5 + 'F')]; + ((new Tester_Class_0[var_14][var_14 &= 'b'])[var_14][var_14]).var_1 |= var_14 >= var_3; + (((new String[var_14][var_14])[var_14])[var_14]).codePointAt(585064460); + var_14 -= 2121015302; + var_2 = 1.241922E38F; + { + (((new Tester_Class_0[var_14][var_14 ^= 'y'])[var_14])[var_14 |= var_14]).var_3 *= 5756647686007829504L; + } + { + var_13--; + } + double var_15; + var_1 = (var_1 = true) ? false : true; + arg_0--; + return var_3; + } + + public final static short func_2(int arg_0, final short arg_1, byte arg_2) + { + arg_0 %= (((new Tester_Class_0[arg_2][arg_2])[arg_2++][--arg_2]).var_1 |= true) ? 'e' : var_5 >>> arg_2; + float var_16 = ((false ? ~3951083684045828096L >>> - -3880809660598466560L : arg_0) ^ arg_1) - 1.1257035E37F; + var_2 = var_5 + 3.3679594E38F; + arg_2 += true & (((new Tester_Class_0[arg_2])[arg_2 *= 4301185995603340288L]).var_1 = arg_1 != arg_1) ? (var_2 = arg_0) : 988311987505040384L + ']' >>> --arg_2; + arg_2 = arg_2; + var_16 /= (arg_2 += (arg_0 += (var_16 %= arg_2)) + (var_16 -= arg_2)); + var_16 += 7416220016668043264L; + ((new Tester_Class_0[arg_2])[arg_2]).var_1 &= false; + ((new Tester_Class_0[--arg_2])[--arg_2]).var_1 = true | (true & true ? true : false); + arg_2 -= (var_2 = 7997355759027275776L); + ((new Tester_Class_0[arg_2])[arg_2 %= 8660960251961819136L]).var_3 *= 4180634858198604800L; + arg_0 /= -1.3063173E38F; + var_2 = arg_2; + var_2 = (6266377813429248L ^ 'j') / (!false & (1.1423139843154216E308 >= (var_2 = arg_2) || (((new Tester_Class_0[arg_2])[arg_2]).var_1 ^= true)) ? (short)('e' * arg_0) : var_5); + --arg_0; + var_2 = (+ - ~8598445599816821760L << arg_1) % 1890075208 & (!true & !true ^ false & false ? 'w' : 'm') % (5614521287604667392L / arg_2) & ~193105176465084416L; + arg_2 &= (arg_2 |= arg_0) ^ ((((new Tester_Class_0[arg_2][arg_2])[arg_2])[arg_2]).var_1 ? arg_2 : (new long[arg_2])[arg_2]); + ((new Tester_Class_0[arg_2 &= 'V'][arg_2])[arg_2 /= 5486057194586717184L][arg_2 %= var_16]).var_1 |= (new boolean[((new Tester_Class_0[arg_2])[arg_2]).var_1 ? arg_2 : arg_2])[arg_2]; + return ((((new Tester_Class_0[arg_2][arg_2][arg_2])[--arg_2])[arg_2 |= arg_2][arg_2 %= 6782653882738869248L]).var_1 ? false : !true | "hopq".equalsIgnoreCase("wvm") | "qmhtjvm".endsWith("gewqas")) && ! !false & false ? arg_1 : arg_1; + } + + protected final static char func_3(byte arg_0, final int arg_1, final short arg_2, long[] arg_3) + { + ((new Tester_Class_0[arg_0 ^= 1902924521091955712L])[arg_0]).var_1 &= ((((new Tester_Class_0[arg_0][arg_0])[--arg_0])[arg_0 *= - -1.0959788E38F]).var_1 = false); + { + var_2 = (new float[arg_0][(byte)1082004329])[arg_0][arg_0 <<= 'T']; + } + ((new Tester_Class_0[arg_0 >>= arg_1][arg_0])[arg_0][arg_0]).var_1 |= ((new Tester_Class_0[arg_0])[--arg_0]).var_4.startsWith(((new Tester_Class_0[arg_0])[arg_0]).var_4); + ((new Tester_Class_0[(byte)var_5])[arg_0]).var_4.substring(273513722, 'f' * 'n').substring((new short[arg_0][arg_0])[arg_0][arg_0] % 'C' >> (arg_3[arg_0] - 's') % ("".charAt(arg_1) & var_5)); + var_2 = 'Q' + (char)arg_0; + { + ((new Tester_Class_0[++arg_0])[arg_0]).var_1 ^= !true || !true ? !false ^ false : ! (1.7030813E38F != ~arg_0); + } + { + "jbdu".indexOf(((new Tester_Class_0[arg_0 *= 2628674024589069312L])[arg_0 -= arg_1]).var_4, "gqglwwbab".charAt(~arg_0) >>> 'M'); + } + { + --arg_0; + } + ((new Tester_Class_0[arg_0])[arg_0]).var_1 = 'n' == ('t' | (+9156142987836739584L | 's')) - 2915339344736463872L; + int var_17; + var_17 = 'k'; + var_17 = (((new Tester_Class_0[arg_0])[arg_0]).var_1 &= false) ? (short)'q' : arg_2; + return '`'; + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_0.var_5 = "; result += Test6712835.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_0.var_4 = "; result += Test6712835.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_0.var_1 = "; result += Test6712835.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_0.var_2 = "; result += Test6712835.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_0.var_3 = "; result += Test6712835.Printer.print(var_3); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Tester_Class_1 extends Tester_Class_0 { + final boolean var_29 = false; + static short var_30; + Tester_Class_0 var_31; + + + public Tester_Class_1() + { + new String(); + byte var_43 = (var_1 ? var_29 : var_1) ? (byte)(~ ~ ~6520122970162626560L | ~6642750731731981312L) : (byte)(var_30 = var_5); + { + var_2 = Tester_Class_0.var_5; + } + ((Tester_Class_0)(new Object[var_43])[var_43]).var_1 = var_29; + var_43 += 512311665; + } + + + + + final int func_0() + { + Tester_Class_0.var_2 = var_29 ? (var_29 ? (byte)'D' : (byte)Tester_Class_0.var_5) : (!var_1 ^ var_1 | (var_1 ^= var_1) ? (byte)'J' : (byte)51510881); + new String(); + new String(); + new String(); + return 1731501229; + } + + private final static void func_1(final String arg_0, final Object arg_1) + { + long var_32 = ((new Tester_Class_1[(byte)37719380])['I' == Tester_Class_0.var_5 + Tester_Class_0.var_5 ? (byte)(var_30 = (byte)1.3043569561522328E308) : (byte)1.1111420042091164E308]).var_1 ? ~2569063513521638400L - Tester_Class_0.var_5 ^ 'm' : 660383226; + ((Tester_Class_0)arg_1).var_3 += (char)8417109805993570304L; + var_30 = var_5; + var_2 = (new byte[(byte)2102078692])[(byte)7.942050823719592E307]; + if (((new Tester_Class_1[(byte)224717297])[(byte)2889830453578512384L]).var_1) + { + Tester_Class_0.var_2 = (new byte[(byte)'C'])[(byte)Tester_Class_0.var_5]; + } + else + { + var_32 <<= 'u'; + } + Tester_Class_0.var_2 = Tester_Class_0.var_5; + final Object var_33 = arg_1; + final byte var_34 = 40; + ++var_32; + (((new Tester_Class_1[var_34][var_34])[var_34][var_34]).var_31 = ((new Tester_Class_0[var_34][var_34])[var_34])[var_34]).var_1 ^= (((new Tester_Class_1[var_34][var_34])[var_34][var_34]).var_31 = (Tester_Class_0)var_33).var_1; + ((new Tester_Class_1[var_34])[var_34]).var_31 = (((new Tester_Class_1[var_34])[((new Tester_Class_1[var_34][var_34])[var_34][var_34]).var_1 ? var_34 : var_34]).var_31 = (((new Tester_Class_1[(byte)2.4941036E38F])[var_34]).var_31 = (Tester_Class_0)arg_1)); + } + + public static int[][] func_2(long arg_0, final float arg_1, short arg_2, final double arg_3) + { + long var_35; + { + arg_0++; + var_2 = true ? (byte)9.691601510156328E307 : (byte)"a".charAt(~ ((byte)arg_1)); + if (((new Tester_Class_1[(byte)'\\'][(byte)arg_2])[(byte)arg_2][(byte)arg_0]).var_29) + { + arg_2++; + } + else + { + Tester_Class_0.var_2 = arg_2; + var_30 = arg_2; + Tester_Class_0.var_2 = arg_0; + } + arg_2 /= 157487965; + arg_2 -= func_2(~ ((byte)arg_0), (short)arg_3, (byte)+2.2503214E38F); + } + arg_0--; + double var_36; + arg_0 <<= (arg_0 >>= (arg_0 = 'O')); + { + arg_0++; + --arg_0; + } + --arg_2; + ++arg_2; + "gbcrkn".length(); + var_30 = (short)7.14672E37F; + { + arg_0 %= (arg_0 >>= (arg_2 *= (byte)1.5835087622116814E308)) % arg_3; + var_36 = 'n'; + int[][] var_37 = new int[(byte)(double)arg_0][(byte)(arg_2 >>= 'o')]; + if ((byte)1390907656194158592L <= arg_2) + { + "uuoeps".indexOf("", 899321600); + } + else + { + var_36 = - ~ -arg_0; + } + short var_38 = var_5; + var_36 = ~arg_0 + (6482428938632186880L + 6995927649252739072L); + } + if (((new Tester_Class_1[(byte)arg_1][(byte)arg_2])[(new byte[(byte)arg_0])[(byte)var_5]][(byte)'s']).var_1 = false) + { + ++arg_0; + } + else + { + ((new Tester_Class_1[(byte)2.7176027E38F])[(byte)((arg_2 -= 2.595396436487417E307) % 'p')]).var_1 ^= ((new Tester_Class_1[(byte)4.393706E36F])[false ? (byte)4826960994531808256L : (byte)arg_0]).var_29; + } + int var_39 = 0; + arg_2 <<= 'Y'; + while (var_39 < 1 && false) + { + arg_0++; + var_39++; + Object var_40; + ((Tester_Class_0)(var_40 = new long[(byte)3.285531E38F])).var_3 += var_39; + } + Object var_41; + "w".substring(1359453539); + return new int[(byte)((arg_2 /= 4.143015135482291E307) - 3.2659622E38F)][(byte)++arg_2]; + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_1.var_5 = "; result += Test6712835.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_1.var_30 = "; result += Test6712835.Printer.print(var_30); + result += "\n"; + result += "Tester_Class_1.var_4 = "; result += Test6712835.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_1.var_1 = "; result += Test6712835.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_1.var_29 = "; result += Test6712835.Printer.print(var_29); + result += "\n"; + result += "Tester_Class_1.var_2 = "; result += Test6712835.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_1.var_3 = "; result += Test6712835.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_1.var_31 = "; result += Test6712835.Printer.print(var_31); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Tester_Class_2 extends Tester_Class_0 { + static float var_44 = 2.7867988E38F; + static byte var_45; + static long var_46 = 4319798868443575296L; + + + public Tester_Class_2() + { + Tester_Class_1.var_30 = (byte)3.1718026E38F; + var_45 = (new byte[(byte)'o'])[var_45 = (byte)Tester_Class_0.var_5]; + Tester_Class_1.var_30 = (Tester_Class_1.var_30 = Tester_Class_0.var_5); + if (true) + { + ++var_46; + boolean var_51 = false ? (var_1 &= !var_1) : true; + --var_46; + if (false) + { + var_3 *= 6.882788442363403E307; + } + else + { + Tester_Class_0.var_2 = '`'; + } + final float var_52 = (var_1 ^= var_1 || (var_1 &= false)) | (var_51 |= (var_51 &= false)) ? (byte)4.751813848964725E307 : (var_3 *= var_5); + (false ? var_4 : var_4).startsWith("j" + var_4); + var_46++; + var_3 %= Tester_Class_1.var_5; + } + else + { + Tester_Class_1.var_30 = (var_45 = (var_45 = (var_45 = (byte)Tester_Class_1.var_5))); + Tester_Class_1.var_2 = (var_3 -= ~ ((byte)var_46) - 2018787280); + Tester_Class_1.var_30 = (Tester_Class_1.var_30 = (Tester_Class_1.var_30 = (Tester_Class_1.var_30 = var_5))); + } + char var_53; + ++var_46; + short var_54 = 138; + ++var_46; + var_2 = 1435782089; + Tester_Class_0.var_2 = var_46; + } + + + + + protected final boolean func_0(final boolean arg_0, final boolean arg_1) + { + var_2 = 2.6153986361247174E307; + var_45 = (var_45 = (var_45 = (var_45 = (var_45 = (byte)(var_44 += var_46))))); + var_46++; + long var_47 = 0L; + var_3 -= + ((byte)(~var_46 * ~var_46 ^ var_46 % 1910419567)); + do + { + ++var_46; + var_47++; + char var_48 = 'b'; + } while (var_47 < 2); + new Tester_Class_1().var_31 = ((new Tester_Class_1[var_45 = (byte)3.0853839E38F])[(new byte[var_45 = (byte)1.4974966426791287E308])[var_45 = (byte)Tester_Class_0.var_5]]).var_1 ? new Tester_Class_1() : new Tester_Class_1(); + var_45 = (var_45 = (byte)var_44); + double var_49 = 0; + var_45 = (byte)(Tester_Class_1.var_30 = Tester_Class_0.var_5); + while (((false ^ (var_1 &= var_1) | (var_1 |= arg_0) ? new Tester_Class_1() : new Tester_Class_1()).var_29 ? var_1 : false && (var_1 ^= arg_0)) && (var_49 < 3 && (true ? new Tester_Class_1() : new Tester_Class_1()).var_1)) + { + var_45 = (var_45 = (var_45 = (var_45 = (var_45 = (byte)1.933612E38F)))); + var_49++; + var_45 = (var_45 = (var_45 = (var_45 = (byte)685709636))); + long var_50; + } + var_45 = (var_45 = (var_45 = (byte)var_5)); + var_46--; + return true; + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_2.var_46 = "; result += Test6712835.Printer.print(var_46); + result += "\n"; + result += "Tester_Class_2.var_2 = "; result += Test6712835.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_2.var_3 = "; result += Test6712835.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_2.var_44 = "; result += Test6712835.Printer.print(var_44); + result += "\n"; + result += "Tester_Class_2.var_5 = "; result += Test6712835.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_2.var_45 = "; result += Test6712835.Printer.print(var_45); + result += "\n"; + result += "Tester_Class_2.var_4 = "; result += Test6712835.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_2.var_1 = "; result += Test6712835.Printer.print(var_1); + result += ""; + result += "\n]"; + return result; + } +} + + +class Tester_Class_3 extends Tester_Class_0 { + static boolean var_55 = true; + short var_56; + char var_57 = (char)723612093; + final static byte var_58 = 118; + static float var_59 = true ? -2818156175448416256L : - - (Tester_Class_2.var_44 += var_58); + static Tester_Class_1 var_60; + byte var_61 = 112; + Tester_Class_2[] var_62; + static short var_63 = 19813; + static double var_64 = (var_55 = true) ? (Tester_Class_1.var_2 = 'M') : Tester_Class_2.var_46; + + + public Tester_Class_3() + { + var_56 = var_58; + Tester_Class_1 var_65 = var_60 = (var_60 = (var_60 = (new Tester_Class_1[var_61 |= '\\'])[(var_1 = true) || var_55 ? var_58 : var_61])); + var_64 /= 1253632965 * '`'; + Tester_Class_2.var_46 >>>= var_58; + (((var_61 = var_58) * (var_55 ? 1641980027 : var_63) >= 1490788063 ? var_65 : var_65).var_29 ? var_65 : var_65).var_31 = (new Tester_Class_2[var_58])[var_58]; + ++var_63; + new String(); + var_64 += var_55 ? (var_61 >>>= 'Q') : (var_63 <<= var_57); + ((new Tester_Class_2().var_3 >= Tester_Class_2.var_46 ? !var_55 : var_4.startsWith(var_4, 586086925)) ? "gjsdhuop" : "juqrt").substring(("pm" + ((new Tester_Class_2[var_61][var_58])[var_58][var_58]).var_4).codePointBefore((~var_61 << 3032688286897486848L) - Tester_Class_1.var_5), (var_61 += 4.0796373033184064E306) >> (Tester_Class_2.var_46 >>> var_58)); + var_63 -= (var_63 ^= var_57); + var_64 = var_5 - (Tester_Class_2.var_46 *= var_57); + Tester_Class_2.var_46 &= 7544159045139005440L; + var_55 |= false; + Tester_Class_2.var_46 = var_61; + } + + + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_3.var_57 = "; result += Test6712835.Printer.print(var_57); + result += "\n"; + result += "Tester_Class_3.var_62 = "; result += Test6712835.Printer.print(var_62); + result += "\n"; + result += "Tester_Class_3.var_2 = "; result += Test6712835.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_3.var_64 = "; result += Test6712835.Printer.print(var_64); + result += "\n"; + result += "Tester_Class_3.var_3 = "; result += Test6712835.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_3.var_59 = "; result += Test6712835.Printer.print(var_59); + result += "\n"; + result += "Tester_Class_3.var_5 = "; result += Test6712835.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_3.var_56 = "; result += Test6712835.Printer.print(var_56); + result += "\n"; + result += "Tester_Class_3.var_63 = "; result += Test6712835.Printer.print(var_63); + result += "\n"; + result += "Tester_Class_3.var_58 = "; result += Test6712835.Printer.print(var_58); + result += "\n"; + result += "Tester_Class_3.var_61 = "; result += Test6712835.Printer.print(var_61); + result += "\n"; + result += "Tester_Class_3.var_4 = "; result += Test6712835.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_3.var_1 = "; result += Test6712835.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_3.var_55 = "; result += Test6712835.Printer.print(var_55); + result += "\n"; + result += "Tester_Class_3.var_60 = "; result += Test6712835.Printer.print(var_60); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Tester_Class_4 { + static long var_66; + final long var_67 = 7113579489152300032L * 985636454; + int[] var_68; + Tester_Class_3 var_69; + final long var_70 = Tester_Class_2.var_46 <<= Tester_Class_1.var_5; + byte var_71 = Tester_Class_3.var_58; + + + public Tester_Class_4() + { + Tester_Class_2.var_46++; + (var_69 = new Tester_Class_3()).var_61 += (!true | (Tester_Class_3.var_55 ^= Tester_Class_3.var_55) ? new Tester_Class_3() : new Tester_Class_3()).var_61; + final String[][] var_79 = new String[var_71 >>= (Tester_Class_3.var_63 ^= 'm')][((Tester_Class_3)(new Tester_Class_1().var_31 = new Tester_Class_2())).var_61 >>= (var_71 >>>= (Tester_Class_2.var_46 += 465205188010511360L))]; + ++(var_69 = (var_69 = (var_69 = (Tester_Class_3)(new Object[Tester_Class_3.var_58][var_71])[Tester_Class_3.var_58][var_71]))).var_61; + (((new Tester_Class_2[var_71][Tester_Class_3.var_58])[Tester_Class_2.var_45 = var_71])[var_71]).var_3 += (Tester_Class_2.var_46 <<= (Tester_Class_2.var_46 /= 9.03047405760868E307) >> (new Tester_Class_2().var_1 ? 2099696051 : Tester_Class_3.var_63)); + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = new Tester_Class_1()))); + char var_80; + Tester_Class_3.var_64 += 355712574; + ++Tester_Class_2.var_46; + } + + + + + private final static Tester_Class_1 func_0(boolean arg_0, double arg_1) + { + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = new Tester_Class_1()); + byte var_72 = (byte)Tester_Class_2.var_46; + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = new Tester_Class_1())))); + float var_73 = 0F; + "flfix".offsetByCodePoints((Tester_Class_3.var_63 ^= 3286104714651747328L) + ((Tester_Class_3)(new Tester_Class_0[var_72])[var_72]).var_61, Tester_Class_0.var_5 + Tester_Class_3.var_58); + while (var_73 < 2 && (false ? (Tester_Class_3.var_60 = new Tester_Class_1()) : (Tester_Class_1)(new Tester_Class_0[var_72])[var_72]).var_29) + { + ((Tester_Class_3)(Tester_Class_0)(new Object[var_72])[Tester_Class_3.var_58]).var_61 >>= ((new Tester_Class_4[var_72])[var_72]).var_67; + var_73++; + new String("blod"); + --var_72; + } + ((new Tester_Class_4[Tester_Class_3.var_58][var_72])[new Tester_Class_3().var_61][Tester_Class_3.var_58]).var_69 = new Tester_Class_3(); + float var_74 = (! ("dkcx".lastIndexOf(Tester_Class_1.var_5 >> - (var_72 >>>= 1433506903139345408L)) == Tester_Class_2.var_46) ? 'O' : 'e' - new Tester_Class_2().var_3) * ~ (var_72 ^= var_72); + Tester_Class_3.var_60 = !true ? new Tester_Class_1() : (new Tester_Class_1[Tester_Class_3.var_58])[var_72]; + ((arg_0 &= Tester_Class_3.var_55 | (Tester_Class_3.var_60 = new Tester_Class_1()).var_29) ? (Tester_Class_3.var_60 = (Tester_Class_1)(new Tester_Class_1().var_31 = new Tester_Class_2())) : (Tester_Class_3.var_60 = (new Tester_Class_1[var_72])[Tester_Class_3.var_58])).var_31 = (new Tester_Class_3[var_72 |= 546982927])[Tester_Class_3.var_58]; + long var_75 = 0L; + final double var_76 = +arg_1; + while (var_75 < 1) + { + short var_77; + var_75++; + new Tester_Class_3().var_57 = (false & true ? new Tester_Class_3() : new Tester_Class_3()).var_57; + (Tester_Class_3.var_60 = (new Tester_Class_1[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_31 = (new Tester_Class_2[Tester_Class_3.var_58][var_72])[var_72][var_72]; + } + Tester_Class_3.var_64 *= (arg_0 ? (Tester_Class_3.var_55 ^= (arg_0 ^= arg_0)) & ! (Tester_Class_3.var_55 = arg_0) : arg_0) ^ new Tester_Class_1().var_29 ? ++((new Tester_Class_3[var_72][var_72])[(new byte[Tester_Class_3.var_58])[Tester_Class_3.var_58]][(((new Tester_Class_4[var_72][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_3.var_58]).var_69 = (new Tester_Class_3[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_61]).var_57 : 'C'; + long var_78; + var_74 %= (Tester_Class_3.var_55 |= (arg_0 = (arg_0 ^= (arg_0 &= !arg_0)))) ? new Tester_Class_3().var_61 : (Tester_Class_3.var_63 ^= var_72); + arg_1 /= (Tester_Class_2.var_46 &= 'W'); + --(((new Tester_Class_4[var_72])[var_72]).var_69 = (((new Tester_Class_4[var_72])[var_72]).var_69 = new Tester_Class_3())).var_61; + return (new Tester_Class_1[var_72][Tester_Class_3.var_58])[var_72][new Tester_Class_3().var_61]; + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_4.var_68 = "; result += Test6712835.Printer.print(var_68); + result += "\n"; + result += "Tester_Class_4.var_66 = "; result += Test6712835.Printer.print(var_66); + result += "\n"; + result += "Tester_Class_4.var_67 = "; result += Test6712835.Printer.print(var_67); + result += "\n"; + result += "Tester_Class_4.var_70 = "; result += Test6712835.Printer.print(var_70); + result += "\n"; + result += "Tester_Class_4.var_71 = "; result += Test6712835.Printer.print(var_71); + result += "\n"; + result += "Tester_Class_4.var_69 = "; result += Test6712835.Printer.print(var_69); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Tester_Class_5 extends Tester_Class_0 { + static boolean var_81; + final int var_82 = 174395841; + int var_83; + byte var_84; + boolean var_85 = Tester_Class_3.var_55; + static boolean var_86 = Tester_Class_3.var_55; + + + public Tester_Class_5() + { + { + short var_87 = (new short[Tester_Class_3.var_58][var_84 = Tester_Class_3.var_58])[(((new Tester_Class_4[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_69 = (Tester_Class_3)(Tester_Class_0)(new Object[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_61][Tester_Class_3.var_58]; + Tester_Class_4 var_88 = var_85 ^ (var_81 = false) ? (new Tester_Class_4[Tester_Class_3.var_58])[Tester_Class_3.var_58] : (new Tester_Class_4[Tester_Class_3.var_58])[Tester_Class_3.var_58]; + { + ++var_87; + } + short var_89; + (var_88.var_69 = (new Tester_Class_3[var_88.var_71][var_88.var_71])[var_88.var_71][var_88.var_71]).var_61 += (((Tester_Class_2)(new Tester_Class_1().var_31 = new Tester_Class_2())).var_3 = Tester_Class_3.var_58); + var_88 = var_88; + } + { + ++Tester_Class_2.var_46; + --Tester_Class_2.var_46; + } + { + Tester_Class_2.var_46++; + Tester_Class_3.var_64 /= Tester_Class_3.var_59; + ((Tester_Class_4)(new Object[Tester_Class_2.var_45 = Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_71 %= (var_3 /= 3637233239489444864L); + ++Tester_Class_2.var_46; + } + new Tester_Class_3().var_57++; + var_85 &= (Tester_Class_3.var_55 |= false); + Tester_Class_3.var_60 = new Tester_Class_1(); + Tester_Class_2.var_46++; + ((Tester_Class_3)(true ? (new Tester_Class_2[Tester_Class_3.var_58])[Tester_Class_3.var_58] : (new Tester_Class_0[Tester_Class_3.var_58])[Tester_Class_2.var_45 = Tester_Class_3.var_58])).var_57 *= ((new Tester_Class_3[Tester_Class_3.var_58])[(byte)'`']).var_57; + var_3 += (int)Tester_Class_3.var_59 ^ (Tester_Class_2.var_46 -= Tester_Class_2.var_46) % ~((new Tester_Class_4[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_71; + ++Tester_Class_2.var_46; + --Tester_Class_2.var_46; + var_83 = Tester_Class_3.var_58; + } + + + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_5.var_82 = "; result += Test6712835.Printer.print(var_82); + result += "\n"; + result += "Tester_Class_5.var_83 = "; result += Test6712835.Printer.print(var_83); + result += "\n"; + result += "Tester_Class_5.var_2 = "; result += Test6712835.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_5.var_3 = "; result += Test6712835.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_5.var_5 = "; result += Test6712835.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_5.var_84 = "; result += Test6712835.Printer.print(var_84); + result += "\n"; + result += "Tester_Class_5.var_4 = "; result += Test6712835.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_5.var_1 = "; result += Test6712835.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_5.var_81 = "; result += Test6712835.Printer.print(var_81); + result += "\n"; + result += "Tester_Class_5.var_85 = "; result += Test6712835.Printer.print(var_85); + result += "\n"; + result += "Tester_Class_5.var_86 = "; result += Test6712835.Printer.print(var_86); + result += ""; + result += "\n]"; + return result; + } +} + + +class Tester_Class_6 extends Tester_Class_0 { + long var_90 = 8467263472031702016L; + final static int var_91 = 1648594448 * ']'; + char var_92 = 'x'; + short var_93 = Tester_Class_3.var_63; + Tester_Class_4 var_94; + String[] var_95; + static short var_96 = Tester_Class_3.var_63 -= 83376045 << 40225606; + final static double var_97 = 5.387227213380301E307; + final static short var_98 = Tester_Class_3.var_63 &= var_91; + byte var_99 = 44; + + + public Tester_Class_6() + { + (Tester_Class_3.var_60 = (Tester_Class_1)(new Object[Tester_Class_3.var_58][var_99])[Tester_Class_3.var_58][var_99]).var_31 = true | true ? (Tester_Class_5)(new Object[var_99])[Tester_Class_3.var_58] : (Tester_Class_5)(new Object[Tester_Class_3.var_58])[var_99]; + var_92 &= 'p'; + Tester_Class_5.var_81 = (((new Tester_Class_1[var_99][Tester_Class_3.var_58])[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_29; + { + { + ++Tester_Class_2.var_46; + Tester_Class_3.var_2 = var_98; + var_93 -= var_96; + } + Tester_Class_2.var_46--; + { + (var_5 == (((Tester_Class_3)(new Tester_Class_0[var_99])[Tester_Class_3.var_58]).var_61 /= var_5) ? "fsajxeuao".replace('s', 'K') : var_4).substring('e' >>> var_5).toLowerCase(); + } + var_93 %= ((new Tester_Class_6[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_90; + var_93 /= var_93; + if (Tester_Class_5.var_86) + { + (var_94 = (new Tester_Class_4[var_99])[var_99]).var_69 = (new Tester_Class_3[var_99])[var_99 %= -var_90]; + } + else + { + --var_96; + } + var_93 *= 'O'; + final long var_103 = 7573900518735055872L; + --Tester_Class_3.var_63; + } + Tester_Class_3.var_64 /= var_93; + if (true) + { + --Tester_Class_2.var_46; + Tester_Class_5 var_104; + final double var_105 = Tester_Class_3.var_64 += Tester_Class_5.var_86 & (new Tester_Class_2().var_1 & ((Tester_Class_3.var_55 = (var_1 ^= Tester_Class_5.var_86) & false) & (Tester_Class_5.var_81 = Tester_Class_5.var_86))) ? (byte)'g' : var_99; + Tester_Class_3.var_64 *= var_99; + } + else + { + char var_106 = var_92 -= Tester_Class_3.var_58; + } + double[] var_107 = ((new double[Tester_Class_3.var_58][var_99][var_99])[var_99])[false ? Tester_Class_3.var_58 : Tester_Class_3.var_58]; + var_99 <<= (Tester_Class_3.var_63 >>= Tester_Class_3.var_58); + ++var_99; + } + + + + + final static byte func_0(final byte arg_0, final char arg_1, final Tester_Class_5[] arg_2) + { + ((Tester_Class_4)(new Object[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][arg_0]).var_69 = (Tester_Class_3)(new Tester_Class_0[Tester_Class_3.var_58])[Tester_Class_2.var_45 = Tester_Class_3.var_58]; + long var_100 = 0L; + Tester_Class_3.var_64 /= (Tester_Class_5.var_86 = true) || 'o' > (Tester_Class_3.var_63 -= (float)arg_0) ? var_98 : 1.7875238E38F; + do + { + Tester_Class_3.var_64 %= var_5; + var_100++; + Tester_Class_3.var_64 += var_96 + 'r'; + } while (true && (var_100 < 1 && (new Tester_Class_1().var_29 ? new Tester_Class_1() : (new Tester_Class_1[arg_0][Tester_Class_3.var_58])[arg_0][Tester_Class_3.var_58]).var_29)); + (Tester_Class_3.var_55 ^ (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = new Tester_Class_1()))).var_29 ? new Tester_Class_3() : new Tester_Class_3()).var_57 = ((((new Tester_Class_6[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_3.var_58]).var_94 = (((new Tester_Class_6[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][arg_0]).var_94 = (new Tester_Class_4[Tester_Class_3.var_58][arg_0])[Tester_Class_3.var_58][Tester_Class_3.var_58])).var_69 = new Tester_Class_3()).var_57; + final double var_101 = 1.6798216578519203E308; + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = false ? new Tester_Class_1() : (Tester_Class_3.var_60 = new Tester_Class_1())); + Tester_Class_2 var_102 = new Tester_Class_2(); + return Tester_Class_3.var_58; + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_6.var_92 = "; result += Test6712835.Printer.print(var_92); + result += "\n"; + result += "Tester_Class_6.var_91 = "; result += Test6712835.Printer.print(var_91); + result += "\n"; + result += "Tester_Class_6.var_95 = "; result += Test6712835.Printer.print(var_95); + result += "\n"; + result += "Tester_Class_6.var_90 = "; result += Test6712835.Printer.print(var_90); + result += "\n"; + result += "Tester_Class_6.var_2 = "; result += Test6712835.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_6.var_97 = "; result += Test6712835.Printer.print(var_97); + result += "\n"; + result += "Tester_Class_6.var_3 = "; result += Test6712835.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_6.var_5 = "; result += Test6712835.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_6.var_93 = "; result += Test6712835.Printer.print(var_93); + result += "\n"; + result += "Tester_Class_6.var_96 = "; result += Test6712835.Printer.print(var_96); + result += "\n"; + result += "Tester_Class_6.var_98 = "; result += Test6712835.Printer.print(var_98); + result += "\n"; + result += "Tester_Class_6.var_99 = "; result += Test6712835.Printer.print(var_99); + result += "\n"; + result += "Tester_Class_6.var_4 = "; result += Test6712835.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_6.var_1 = "; result += Test6712835.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_6.var_94 = "; result += Test6712835.Printer.print(var_94); + result += ""; + result += "\n]"; + return result; + } +} + + +abstract class Tester_Class_7 { + final static char var_108 = '_'; + static Tester_Class_3 var_109; + final short var_110 = 4360; + short var_111; + Object var_112; + Tester_Class_4 var_113; + static Tester_Class_5 var_114; + final short var_115 = Tester_Class_6.var_96; + final static float var_116 = Tester_Class_3.var_59; + + + public Tester_Class_7() + { + --Tester_Class_2.var_46; + --Tester_Class_6.var_96; + var_113 = (new Tester_Class_4[new Tester_Class_6().var_99])[Tester_Class_3.var_58]; + --Tester_Class_2.var_46; + Tester_Class_6.var_96--; + Tester_Class_3.var_63 -= 'i'; + if (!Tester_Class_5.var_86) + { + Tester_Class_3.var_64 %= var_116; + if ((Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)(Tester_Class_0)(var_112 = "yosyghjm"))).var_29) + { + Tester_Class_2.var_46++; + } + else + { + (var_114 = (var_114 = (Tester_Class_5)(Tester_Class_0)(var_112 = "bxt"))).var_83 = (Tester_Class_2.var_45 = (Tester_Class_2.var_45 = Tester_Class_3.var_58)); + } + var_114 = (var_114 = (var_114 = (var_114 = (var_114 = (var_114 = (Tester_Class_5)(var_112 = "blrobgg")))))); + var_113 = (((Tester_Class_6)(var_112 = "popebwfp")).var_94 = (new Tester_Class_4[Tester_Class_3.var_58])[Tester_Class_3.var_58]); + } + else + { + Tester_Class_3.var_60 = new Tester_Class_1(); + } + final Tester_Class_6 var_122 = new Tester_Class_6(); + var_122.var_92 &= (var_122.var_92 |= var_108); + ((new Tester_Class_5[var_122.var_99])[((new Tester_Class_3[Tester_Class_3.var_58])[var_122.var_99--]).var_61]).var_83 = 1708230145; + } + + + + public boolean equals(Object obj) + { + (((Tester_Class_5.var_81 = (Tester_Class_5.var_81 = false)) ? (Tester_Class_3.var_55 &= false) : !Tester_Class_3.var_55 & ((Tester_Class_1)obj).var_29) ? (new Tester_Class_2[Tester_Class_3.var_58])[Tester_Class_3.var_58] : (Tester_Class_2)obj).equals((Tester_Class_5.var_86 |= Tester_Class_3.var_55) | (Tester_Class_3.var_55 = Tester_Class_3.var_55) ? obj : (Tester_Class_6)(Tester_Class_0)obj); + Tester_Class_3.var_64 *= 2.8258473339654136E307; + { + final int var_118 = 1248523063; + short var_119 = 30906; + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)obj); + ((Tester_Class_6)(((Tester_Class_1)obj).var_31 = ((var_113 = (Tester_Class_4)obj).var_69 = (Tester_Class_3)obj))).var_94 = (var_113 = (Tester_Class_4)(var_112 = (Tester_Class_1)obj)); + } + final Tester_Class_1 var_120 = false ^ (((Tester_Class_1)obj).var_1 = !true) ^ (((Tester_Class_6)(Tester_Class_0)obj).var_92 *= (((Tester_Class_3)obj).var_57 |= (Tester_Class_2.var_46 >>= 6986775136305733632L))) < (byte)Tester_Class_6.var_97 ? (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)obj)) : (true ? (Tester_Class_1)obj : (Tester_Class_1)obj); + (var_114 = (var_114 = (Tester_Class_5)obj)).var_83 = (((new Tester_Class_6[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_92 &= ((Tester_Class_4)obj).var_70 << (Tester_Class_2.var_45 = Tester_Class_3.var_58)); + var_114 = (Tester_Class_5)obj; + obj = ((Tester_Class_3.var_60 = var_120).var_29 ? false : false) ? (new Tester_Class_6[Tester_Class_3.var_58])[Tester_Class_3.var_58] : obj; + (var_120.var_29 ? (Tester_Class_6)(obj = (Tester_Class_3.var_60 = var_120)) : (new Tester_Class_6[Tester_Class_3.var_58])[((Tester_Class_3)obj).var_61 ^= Tester_Class_6.var_91]).var_90 ^= 2127530040436251648L; + Object var_121; + return (new boolean[Tester_Class_3.var_58])[((var_113 = (Tester_Class_4)obj).var_69 = (var_109 = (new Tester_Class_3[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_3.var_58])).var_61]; + } + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_7.var_108 = "; result += Test6712835.Printer.print(var_108); + result += "\n"; + result += "Tester_Class_7.var_116 = "; result += Test6712835.Printer.print(var_116); + result += "\n"; + result += "Tester_Class_7.var_110 = "; result += Test6712835.Printer.print(var_110); + result += "\n"; + result += "Tester_Class_7.var_111 = "; result += Test6712835.Printer.print(var_111); + result += "\n"; + result += "Tester_Class_7.var_115 = "; result += Test6712835.Printer.print(var_115); + result += "\n"; + result += "Tester_Class_7.var_114 = "; result += Test6712835.Printer.print(var_114); + result += "\n"; + result += "Tester_Class_7.var_113 = "; result += Test6712835.Printer.print(var_113); + result += "\n"; + result += "Tester_Class_7.var_109 = "; result += Test6712835.Printer.print(var_109); + result += "\n"; + result += "Tester_Class_7.var_112 = "; result += Test6712835.Printer.print(var_112); + result += ""; + result += "\n]"; + return result; + } +} + + +class Tester_Class_8 extends Tester_Class_7 { + static char var_123; + Tester_Class_4 var_124; + static short var_125; + + + public Tester_Class_8() + { + { + Tester_Class_3.var_64 -= (Tester_Class_2.var_46 *= Tester_Class_3.var_64); + { + Tester_Class_2.var_46--; + } + ++Tester_Class_3.var_63; + Tester_Class_5.var_86 |= true; + Tester_Class_6.var_96--; + } + "w".indexOf(312689020); + if (false) + { + (Tester_Class_7.var_114 = (new Tester_Class_5[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_83 = 'I'; + } + else + { + --Tester_Class_6.var_96; + } + switch (Tester_Class_5.var_86 ? Tester_Class_3.var_58 : Tester_Class_3.var_58) + { + case 95: + + case 35: + + } + Tester_Class_6.var_96--; + Tester_Class_3.var_64 *= 4.516167673347119E307; + --Tester_Class_3.var_63; + { + int var_126; + } + Tester_Class_3.var_60 = new Tester_Class_1(); + Tester_Class_2.var_46++; + ((new Tester_Class_6[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_99 &= Tester_Class_6.var_91; + ((new Tester_Class_1[((new Tester_Class_4[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_71])[((Tester_Class_3)(var_112 = "fsmtm")).var_61]).var_31 = (Tester_Class_2)(new Tester_Class_0[Tester_Class_3.var_58])[Tester_Class_3.var_58]; + } + + + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_8.var_108 = "; result += Test6712835.Printer.print(var_108); + result += "\n"; + result += "Tester_Class_8.var_123 = "; result += Test6712835.Printer.print(var_123); + result += "\n"; + result += "Tester_Class_8.var_116 = "; result += Test6712835.Printer.print(var_116); + result += "\n"; + result += "Tester_Class_8.var_110 = "; result += Test6712835.Printer.print(var_110); + result += "\n"; + result += "Tester_Class_8.var_111 = "; result += Test6712835.Printer.print(var_111); + result += "\n"; + result += "Tester_Class_8.var_115 = "; result += Test6712835.Printer.print(var_115); + result += "\n"; + result += "Tester_Class_8.var_125 = "; result += Test6712835.Printer.print(var_125); + result += "\n"; + result += "Tester_Class_8.var_114 = "; result += Test6712835.Printer.print(var_114); + result += "\n"; + result += "Tester_Class_8.var_113 = "; result += Test6712835.Printer.print(var_113); + result += "\n"; + result += "Tester_Class_8.var_124 = "; result += Test6712835.Printer.print(var_124); + result += "\n"; + result += "Tester_Class_8.var_109 = "; result += Test6712835.Printer.print(var_109); + result += "\n"; + result += "Tester_Class_8.var_112 = "; result += Test6712835.Printer.print(var_112); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Tester_Class_9 { + final static String var_127 = "pxk"; + Tester_Class_2 var_128; + final static char var_129 = '\\'; + static float var_130; + static boolean var_131; + final static float var_132 = Tester_Class_3.var_59; + static Tester_Class_0 var_133; + boolean[] var_134; + + + public Tester_Class_9() + { + Tester_Class_2.var_44 -= Tester_Class_3.var_58; + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (new Tester_Class_1[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_3.var_58])); + { + Tester_Class_8 var_136; + } + ++Tester_Class_2.var_46; + Tester_Class_6.var_96--; + var_128 = (var_128 = (var_128 = (Tester_Class_2)(var_133 = (new Tester_Class_1[Tester_Class_3.var_58])[Tester_Class_3.var_58]))); + ++Tester_Class_6.var_96; + ++Tester_Class_2.var_46; + Tester_Class_4 var_137; + var_128 = (var_128 = (new Tester_Class_2[Tester_Class_3.var_58])[Tester_Class_3.var_58]); + (Tester_Class_8.var_114 = (Tester_Class_8.var_114 = (new Tester_Class_5[Tester_Class_3.var_58])[Tester_Class_3.var_58])).var_83 = (((new Tester_Class_4[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_3.var_58]).var_69 = (new Tester_Class_3[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_3.var_58]).var_57++; + Tester_Class_2.var_46++; + } + + + + + protected static short func_1() + { + { + Tester_Class_3.var_63--; + } + Tester_Class_3.var_64 *= Tester_Class_2.var_46; + short var_135; + Tester_Class_3.var_64 -= Tester_Class_6.var_96; + return new Tester_Class_6().var_93; + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_9.var_129 = "; result += Test6712835.Printer.print(var_129); + result += "\n"; + result += "Tester_Class_9.var_134 = "; result += Test6712835.Printer.print(var_134); + result += "\n"; + result += "Tester_Class_9.var_130 = "; result += Test6712835.Printer.print(var_130); + result += "\n"; + result += "Tester_Class_9.var_132 = "; result += Test6712835.Printer.print(var_132); + result += "\n"; + result += "Tester_Class_9.var_131 = "; result += Test6712835.Printer.print(var_131); + result += "\n"; + result += "Tester_Class_9.var_127 = "; result += Test6712835.Printer.print(var_127); + result += "\n"; + result += "Tester_Class_9.var_128 = "; result += Test6712835.Printer.print(var_128); + result += "\n"; + result += "Tester_Class_9.var_133 = "; result += Test6712835.Printer.print(var_133); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Tester_Class_10 extends Tester_Class_0 { + final static byte var_138 = 78; + Object var_139; + final static boolean var_140 = true; + float var_141 = 1.2816267E38F; + Tester_Class_8 var_142; + static Tester_Class_3 var_143; + short var_144 = var_1 ? (Tester_Class_6.var_96 &= 8024552544994698240L) : Tester_Class_0.var_5; + final boolean var_145 = var_140; + long var_146; + float[] var_147; + + + public Tester_Class_10() + { + "xuc".codePointCount(new Tester_Class_6().var_99 / ((new Tester_Class_9().var_128 = new Tester_Class_2()).var_1 ? var_138 : (int)(Tester_Class_3.var_64 += Tester_Class_3.var_64)), 882345740); + Tester_Class_3.var_64 /= Tester_Class_9.var_132; + Tester_Class_9.var_127.indexOf((Tester_Class_7.var_114 = (Tester_Class_8.var_114 = (Tester_Class_5)(var_139 = "mcyagebtv"))).var_83 = var_145 ? (Tester_Class_2.var_45 = Tester_Class_3.var_58) : Tester_Class_6.var_96); + --Tester_Class_2.var_46; + final float var_148 = 3.0263434E38F; + ((Tester_Class_7.var_114 = (Tester_Class_5)(Tester_Class_9.var_133 = new Tester_Class_1())).var_85 & ((Tester_Class_1)(var_139 = new Tester_Class_6())).var_1 ? "gmxwrgik" : Tester_Class_9.var_127).compareTo(var_4); + --Tester_Class_2.var_46; + new Tester_Class_6(); + ++Tester_Class_2.var_46; + Tester_Class_3.var_60 = Tester_Class_5.var_86 ? new Tester_Class_1() : new Tester_Class_1(); + { + --Tester_Class_6.var_96; + ((Tester_Class_7)(var_139 = new Tester_Class_1().var_4)).var_112 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)(var_139 = "gugsy"))); + } + Tester_Class_9.var_133 = (Tester_Class_3.var_60 = new Tester_Class_1()); + if (var_140 & !var_140) + { + Tester_Class_6.var_96++; + } + else + { + Tester_Class_2.var_46++; + } + { + ++new Tester_Class_6().var_92; + } + Tester_Class_7.var_109 = (((new Tester_Class_4[Tester_Class_3.var_58])[Tester_Class_3.var_58]).var_69 = (var_143 = new Tester_Class_3())); + Tester_Class_3.var_63--; + } + + + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_10.var_147 = "; result += Test6712835.Printer.print(var_147); + result += "\n"; + result += "Tester_Class_10.var_146 = "; result += Test6712835.Printer.print(var_146); + result += "\n"; + result += "Tester_Class_10.var_3 = "; result += Test6712835.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_10.var_141 = "; result += Test6712835.Printer.print(var_141); + result += "\n"; + result += "Tester_Class_10.var_5 = "; result += Test6712835.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_10.var_144 = "; result += Test6712835.Printer.print(var_144); + result += "\n"; + result += "Tester_Class_10.var_138 = "; result += Test6712835.Printer.print(var_138); + result += "\n"; + result += "Tester_Class_10.var_1 = "; result += Test6712835.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_10.var_140 = "; result += Test6712835.Printer.print(var_140); + result += "\n"; + result += "Tester_Class_10.var_145 = "; result += Test6712835.Printer.print(var_145); + result += "\n"; + result += "Tester_Class_10.var_139 = "; result += Test6712835.Printer.print(var_139); + result += "\n"; + result += "Tester_Class_10.var_142 = "; result += Test6712835.Printer.print(var_142); + result += "\n"; + result += "Tester_Class_10.var_2 = "; result += Test6712835.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_10.var_4 = "; result += Test6712835.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_10.var_143 = "; result += Test6712835.Printer.print(var_143); + result += ""; + result += "\n]"; + return result; + } +} + + +interface Tester_Interface_11 { + public Tester_Class_4 func_0(final int arg_0, final byte arg_1); + public Tester_Class_2 func_1(Tester_Class_5 arg_0, final Tester_Class_0 arg_1, final int arg_2); +} + +public class Test6712835 { + final boolean var_149 = false; + Tester_Class_8 var_150; + final long var_151 = 8058077687473630208L; + + + protected final Tester_Class_1 func_0(final Object arg_0, Tester_Class_3 arg_1, final Tester_Class_4 arg_2, int arg_3) + { + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)arg_0)); + --Tester_Class_3.var_63; + (var_150 = (((new Tester_Class_10[arg_2.var_71])[(((Tester_Class_6)arg_0).var_94 = arg_2).var_71 &= Tester_Class_3.var_63 << ~arg_2.var_71]).var_142 = (var_150 = (((Tester_Class_10)arg_0).var_142 = (Tester_Class_8)arg_0)))).var_113 = arg_2; + Tester_Class_7.var_114 = (Tester_Class_7.var_114 = false ? (Tester_Class_5)arg_0 : (Tester_Class_5)arg_0); + ((((arg_1 = arg_1).var_1 |= "lgcrda".equalsIgnoreCase("ontlkst")) ? (Tester_Class_1)arg_0 : (Tester_Class_3.var_60 = (Tester_Class_1)arg_0)).var_29 ? (arg_1 = (Tester_Class_3)(((Tester_Class_7)arg_0).var_112 = (Tester_Class_9)arg_0)) : arg_1).var_57 >>>= ']'; + Tester_Class_8.var_114 = (Tester_Class_5)arg_0; + ((Tester_Class_3.var_55 &= (arg_1.var_1 = true)) ? (Tester_Class_6)(new Tester_Class_0[Tester_Class_3.var_58][Tester_Class_10.var_138])[Tester_Class_10.var_138][Tester_Class_10.var_138] : (Tester_Class_6)arg_0).var_94 = arg_2; + { + Tester_Class_3.var_55 &= ((Tester_Class_3.var_60 = new Tester_Class_1()).var_1 &= false); + Tester_Class_2.var_44 -= (arg_3 |= + ~6610561718704644096L); + ((Tester_Class_8)arg_0).var_113 = ((((Tester_Class_10)(Tester_Class_0)arg_0).var_142 = (var_150 = (Tester_Class_8)arg_0)).var_124 = arg_2); + (! (false | Tester_Class_5.var_86) ? (Tester_Class_10)arg_0 : (new Tester_Class_10[arg_1.var_61][arg_1.var_61])[Tester_Class_10.var_138][Tester_Class_10.var_138]).var_139 = ((Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)arg_0)).var_31 = (((Tester_Class_9)arg_0).var_128 = (((Tester_Class_9)arg_0).var_128 = (Tester_Class_2)arg_0))); + } + final Tester_Interface_11 var_152 = !((Tester_Class_1)arg_0).var_29 ^ Tester_Class_5.var_86 ? (new Tester_Interface_11[arg_2.var_71][arg_1.var_61])[arg_1.var_61][arg_1.var_61] : (new Tester_Interface_11[arg_2.var_71][arg_2.var_71])[Tester_Class_10.var_138][Tester_Class_3.var_58]; + Tester_Class_3.var_64 /= (arg_3 >>= ++((Tester_Class_6)(Tester_Class_0)arg_0).var_92) * Tester_Class_9.var_132; + Tester_Class_0 var_153 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)arg_0))))).var_31 = (((new Tester_Class_9[arg_1.var_61])[arg_1.var_61 *= 634692606]).var_128 = !false ? (Tester_Class_2)arg_0 : (Tester_Class_2)arg_0); + (Tester_Class_10.var_140 ? (Tester_Class_7)arg_0 : (var_150 = (Tester_Class_8)(Tester_Class_7)arg_0)).var_112 = Tester_Class_3.var_64 != ((((Tester_Class_10)(var_153 = (Tester_Class_8.var_114 = (Tester_Class_5)arg_0))).var_1 |= arg_1.var_1) ? (Tester_Class_6)var_153 : (Tester_Class_6)var_153).var_99-- ? (Tester_Class_7)((var_150 = (Tester_Class_8)arg_0).var_112 = (Tester_Class_10)var_153) : (Tester_Class_7)arg_0; + (((new Tester_Class_7[Tester_Class_10.var_138][arg_2.var_71])[Tester_Class_3.var_58])[arg_2.var_71]).var_112 = arg_0; + if (!false) + { + arg_3 <<= (Tester_Class_2.var_46 /= - ((byte)((Tester_Class_10)arg_0).var_144)) - ((Tester_Class_6)arg_0).var_99; + } + else + { + ((Tester_Class_7)(((Tester_Class_8)arg_0).var_112 = var_153)).var_113 = arg_2; + ((Tester_Class_9)arg_0).var_128 = (((Tester_Class_9)(((Tester_Class_7)arg_0).var_112 = (Tester_Class_7)arg_0)).var_128 = (((Tester_Class_9)arg_0).var_128 = (Tester_Class_2)arg_0)); + } + (((Tester_Class_10)arg_0).var_142 = (Tester_Class_8)arg_0).var_124 = (((Tester_Class_6)var_153).var_94 = arg_2); + final char var_154 = arg_1.var_57 %= ((Tester_Class_6)var_153).var_93--; + (true ? arg_1 : (arg_1 = arg_1)).equals(arg_0); + (Tester_Class_10.var_140 ? (new Tester_Class_6[Tester_Class_10.var_138])[arg_2.var_71] : (new Tester_Class_6[(Tester_Class_10.var_143 = arg_1).var_61])[arg_1.var_61]).var_94 = ((((new Tester_Class_7[arg_2.var_71][arg_1.var_61][Tester_Class_10.var_138])[Tester_Class_10.var_138])[arg_2.var_71 = arg_2.var_71][Tester_Class_10.var_138]).var_113 = (((Tester_Class_7)arg_0).var_113 = arg_2)); + Tester_Class_3.var_60 = ((Tester_Class_10)(((Tester_Class_7)arg_0).var_112 = (Tester_Class_7)(((Tester_Class_10)var_153).var_139 = new Tester_Class_6[Tester_Class_10.var_138][Tester_Class_10.var_138]))).var_1 ? (Tester_Class_3.var_60 = (Tester_Class_1)var_153) : (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)(Tester_Class_9.var_133 = (Tester_Class_10)arg_0))); + ((Tester_Class_7)(((Tester_Class_10)arg_0).var_139 = new Tester_Class_10[Tester_Class_3.var_58][--arg_2.var_71])).var_112 = new byte[(((Tester_Class_8)(Tester_Class_7)((var_150 = (var_150 = (Tester_Class_8)arg_0)).var_112 = arg_2)).var_113 = (((Tester_Class_7)arg_0).var_113 = arg_2)).var_71]; + Tester_Class_8 var_155; + (Tester_Class_3.var_55 & arg_2.equals(arg_0) ? (Tester_Class_10)var_153 : (Tester_Class_10)var_153).var_3 %= Tester_Class_6.var_91; + return ((Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)var_153)))).var_29 ? ! !true : Tester_Class_10.var_140 | Tester_Class_3.var_55) || Tester_Class_3.var_55 ? (Tester_Class_3.var_60 = (Tester_Class_1)(((Tester_Class_10)var_153).var_139 = (Tester_Class_6)var_153)) : new Tester_Class_1(); + } + + protected Tester_Class_5 func_1(Tester_Class_0 arg_0, final float arg_1) + { + (!Tester_Class_10.var_140 ? (Tester_Class_6)arg_0 : (Tester_Class_6)arg_0).var_90 /= ((Tester_Class_8.var_109 = (new boolean[Tester_Class_10.var_138][Tester_Class_3.var_58])[((Tester_Class_6)arg_0).var_99][Tester_Class_10.var_138] ? (Tester_Class_3)((Tester_Class_3.var_60 = (Tester_Class_1)arg_0).var_31 = (Tester_Class_6)arg_0) : (Tester_Class_3)arg_0).var_61 *= Tester_Class_3.var_58); + { + "".toLowerCase(); + } + ((Tester_Class_10)arg_0).var_139 = new Tester_Class_8(); + arg_0 = (new Tester_Class_6[((Tester_Class_6)arg_0).var_99])[Tester_Class_3.var_58]; + if (((Tester_Class_10)(arg_0 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)arg_0)))).var_145) + { + Tester_Class_3.var_63++; + } + else + { + ++Tester_Class_2.var_46; + } + (((Tester_Class_3.var_55 ^= Tester_Class_3.var_55 ^ true) ? (Tester_Class_10)arg_0 : (Tester_Class_10)arg_0).var_145 || true ? (Tester_Class_6)arg_0 : (Tester_Class_6)(((Tester_Class_7)(((Tester_Class_10)arg_0).var_139 = (Tester_Class_10)arg_0)).var_112 = "jlixai")).var_99--; + Tester_Class_5.var_81 = Tester_Class_3.var_55 && ! (arg_0.var_1 = arg_0.var_1); + { + ((new Tester_Class_6[Tester_Class_3.var_58])[(true ? (Tester_Class_6)(Tester_Class_9.var_133 = (Tester_Class_10)arg_0) : (Tester_Class_6)(((Tester_Class_1)arg_0).var_31 = (Tester_Class_10)arg_0)).var_99]).var_90 *= (Tester_Class_3.var_64 %= Tester_Class_3.var_63); + } + ++Tester_Class_2.var_46; + Tester_Class_0 var_156; + Tester_Class_2.var_46++; + Tester_Class_8.var_114 = (Tester_Class_7.var_114 = (Tester_Class_8.var_114 = (Tester_Class_5)arg_0)); + Tester_Class_6.func_2((Tester_Class_7.var_114 = (Tester_Class_7.var_114 = (Tester_Class_7.var_114 = (Tester_Class_5)arg_0))).var_83 = (byte)(((Tester_Class_10)arg_0).var_142 = (new Tester_Class_8[Tester_Class_3.var_58][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_10.var_138]).var_110, Tester_Class_6.var_96, (new byte[Tester_Class_3.var_58])[Tester_Class_10.var_138]); + Tester_Class_7.var_114 = (new Tester_Class_5[Tester_Class_10.var_138])[((Tester_Class_3)arg_0).var_61]; + boolean var_157 = Tester_Class_10.var_140; + (Tester_Class_3.var_60 = (Tester_Class_1)arg_0).var_1 ^= Tester_Class_10.var_140; + return Tester_Class_8.var_114 = (Tester_Class_7.var_114 = (Tester_Class_8.var_114 = (Tester_Class_5)arg_0)); + } + + final static int func_2(Tester_Class_6 arg_0) + { + new Tester_Class_9(); + { + ++Tester_Class_3.var_63; + } + new Tester_Class_3().var_57--; + Tester_Class_1 var_158; + String var_159; + --Tester_Class_6.var_96; + { + new String(); + } + var_159 = (var_159 = arg_0.var_4); + { + --Tester_Class_2.var_46; + } + final double var_160 = (Tester_Class_7.var_114 = (Tester_Class_8.var_114 = (Tester_Class_8.var_114 = (Tester_Class_5)(new Tester_Class_0[arg_0.var_99][arg_0.var_99])[Tester_Class_3.var_58][Tester_Class_3.var_58]))).var_1 ? Tester_Class_9.var_132 : Tester_Class_6.var_97; + Tester_Class_8 var_161; + char var_162 = 'O'; + Tester_Class_2.var_46++; + Tester_Class_6.var_96++; + { + new String(); + } + ++Tester_Class_6.var_96; + var_162 >>= ((new Tester_Class_4[arg_0.var_99])[arg_0.var_99++]).var_70 >> Tester_Class_6.var_91; + (Tester_Class_7.var_114 = (Tester_Class_7.var_114 = (new Tester_Class_5[Tester_Class_3.var_58])[++arg_0.var_99])).var_83 = (arg_0.var_93 <<= Tester_Class_7.var_108); + --Tester_Class_6.var_96; + { + new Tester_Class_9().var_128 = new Tester_Class_2(); + } + arg_0 = arg_0; + { + Tester_Class_9 var_163; + } + ((Tester_Class_5)(Tester_Class_9.var_133 = arg_0)).var_83 = (arg_0.var_99 >>= Tester_Class_5.var_5); + arg_0.var_99 = Tester_Class_10.var_138; + Tester_Class_3.var_60 = (var_158 = (Tester_Class_3.var_60 = (Tester_Class_1)(Tester_Class_9.var_133 = arg_0))); + return Tester_Class_6.var_91; + } + + protected final Tester_Class_9 func_3() + { + Tester_Class_2.var_44 = 3210658399310388224L; + ++Tester_Class_6.var_96; + short var_164 = 15978; + var_164++; + Tester_Class_5.var_81 = true; + return Tester_Class_3.var_55 ? new Tester_Class_9() : new Tester_Class_9(); + } + + final static Tester_Class_10 func_4(Tester_Class_3 arg_0, String arg_1, final byte[] arg_2, final Object arg_3) + { + Tester_Class_1 var_165; + Tester_Class_3.var_63 += new Tester_Class_6().var_92 >= 3821095133162842112L ? (arg_0.var_61 |= Tester_Class_6.var_91) : Tester_Class_10.var_138; + return false ? ((var_165 = (Tester_Class_1)arg_3).var_29 ? (Tester_Class_10)arg_3 : (Tester_Class_10)arg_3) : (Tester_Class_10)(Tester_Class_0)arg_3; + } + + private static Object func_7(final short arg_0, String arg_1, final Tester_Class_3 arg_2) + { + Tester_Class_3.var_60 = (new Tester_Class_1[arg_2.var_61])[Tester_Class_10.var_138]; + return ((new Tester_Class_7[arg_2.var_61 |= Tester_Class_3.var_63])[arg_2.var_61 *= Tester_Class_6.var_98]).var_112 = new Tester_Class_8(); + } + + public static String execute() + { + try { + Test6712835 t = new Test6712835(); + try { t.test(); } + catch(Throwable e) { } + try { return t.toString(); } + catch (Throwable e) { return "Error during result conversion to String"; } + } catch (Throwable e) { return "Error during test execution"; } + } + + public static void main(String[] args) + { + try { + Test6712835 t = new Test6712835(); + try { t.test(); } + catch(Throwable e) { } + try { System.out.println(t); } + catch(Throwable e) { } + } catch (Throwable e) { } + } + + private void test() + { + Tester_Class_3.var_60 = true ? (Tester_Class_3.var_60 = new Tester_Class_1()) : new Tester_Class_1(); + double var_170 = 0; + Tester_Class_9.var_133 = (new Tester_Class_4().var_69 = new Tester_Class_3()); + new Tester_Class_6(); + String var_171; + new Tester_Class_9(); + do + { + new String(); + var_170++; + Tester_Class_3.var_64 = 1.0240330514364089E307; + new String(); + var_171 = (var_171 = Tester_Class_9.var_127); + Tester_Class_3.var_63--; + } while (var_170 < 525); + ((new Tester_Class_10[Tester_Class_10.var_138])[Tester_Class_2.var_45 = Tester_Class_3.var_58]).var_142 = (Tester_Class_8)(Tester_Class_7)(new Tester_Class_10().var_139 = new Tester_Class_2()); + long var_172 = 0L; + Tester_Class_3.var_64 /= (((new Tester_Class_6[Tester_Class_3.var_58])[Tester_Class_10.var_138]).var_99 ^= ((new Tester_Class_6[Tester_Class_3.var_58])[Tester_Class_10.var_138]).var_90) > 9.462466046830147E307 ? new Tester_Class_6().var_99 : Tester_Class_3.var_58; + short var_173; + (true ? new Tester_Class_2() : (func_3().var_128 = new Tester_Class_2())).var_3 *= (var_150 = new Tester_Class_8()).var_115; + (Tester_Class_3.var_60 = new Tester_Class_1()).var_31 = (((new Tester_Class_9[Tester_Class_3.var_58])[Tester_Class_10.var_138]).var_128 = (func_3().var_128 = (func_3().var_128 = (new Tester_Class_9().var_128 = new Tester_Class_2())))); + for (((new Tester_Class_10[new Tester_Class_6().var_99])[new Tester_Class_6().var_99++]).var_142 = (new Tester_Class_8[Tester_Class_10.var_138])[Tester_Class_3.var_58]; var_172 < 203 && (Tester_Class_3.var_55 &= (new boolean[Tester_Class_2.var_45 = Tester_Class_3.var_58])[Tester_Class_10.var_138]); Tester_Class_9.var_133 = (Tester_Class_7.var_114 = (new Tester_Class_5[Tester_Class_2.var_45 = Tester_Class_10.var_138][Tester_Class_10.var_138])[Tester_Class_3.var_58][Tester_Class_2.var_45 = Tester_Class_3.var_58])) + { + var_171 = Tester_Class_9.var_127; + var_172++; + Tester_Class_3.var_63++; + Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_1)(new Object[Tester_Class_3.var_58][Tester_Class_10.var_138])[Tester_Class_3.var_58][Tester_Class_3.var_58]))); + ++Tester_Class_2.var_46; + Tester_Class_2.var_46--; + Tester_Class_3.var_64 -= Tester_Class_3.var_58; + } + (Tester_Class_3.var_60 = new Tester_Class_1()).var_31 = ((new Tester_Class_8().var_124 = new Tester_Class_4()).var_69 = new Tester_Class_3()); + int var_174 = 0; + ((new Tester_Class_6[Tester_Class_10.var_138][Tester_Class_10.var_138])[Tester_Class_2.var_45 = Tester_Class_10.var_138][Tester_Class_2.var_45 = Tester_Class_3.var_58]).var_92 = 'Z'; + while ((Tester_Class_9.var_131 = Tester_Class_3.var_55) && (var_174 < 24 && !true)) + { + new Tester_Class_10(); + var_174++; + Tester_Class_3.var_64 %= (((new Tester_Class_6[Tester_Class_3.var_58])[Tester_Class_2.var_45 = Tester_Class_3.var_58]).var_93 ^= (byte)Tester_Class_3.var_59); + ((Tester_Class_10)(Tester_Class_9.var_133 = (new Tester_Class_5[((Tester_Class_6)(new Tester_Class_0[Tester_Class_10.var_138])[(byte)(Tester_Class_2.var_46 >>>= Tester_Class_7.var_108)]).var_99])[Tester_Class_10.var_138])).var_139 = (new Tester_Class_10[new Tester_Class_6().var_99][new Tester_Class_4().var_71])[new Tester_Class_4().var_71]; + } + int var_175 = 0; + (Tester_Class_10.var_140 ? (Tester_Class_2)(Tester_Class_9.var_133 = (Tester_Class_7.var_114 = (new Tester_Class_5[Tester_Class_10.var_138])[Tester_Class_10.var_138])) : new Tester_Class_2()).var_1 &= Tester_Class_3.var_55; + do + { + Tester_Class_10.var_143 = new Tester_Class_3(); + var_175++; + ++Tester_Class_2.var_46; + } while ((false ? true : var_149) | !Tester_Class_10.var_140 && var_175 < 97); + Tester_Class_9.var_131 = true; + (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = (Tester_Class_3.var_60 = new Tester_Class_1())))).var_1 &= (((new Tester_Class_10().var_1 = !true) ? new Tester_Class_10() : new Tester_Class_10()).var_145 ? new Tester_Class_3() : new Tester_Class_3()).var_1; + (true ? func_3() : func_3()).var_128 = ((((Tester_Class_5.var_86 = (Tester_Class_3.var_55 &= !var_149)) ? new Tester_Class_10() : new Tester_Class_10()).var_145 ? new Tester_Class_9() : func_3()).var_128 = var_149 ? new Tester_Class_2() : new Tester_Class_2()); + Tester_Class_3.var_59 -= (Tester_Class_5.var_81 = new Tester_Class_1().var_29) ^ !true ? 7920143378515332096L : new Tester_Class_6().var_92; + ((Tester_Class_3.var_60 = new Tester_Class_1()).var_1 ? (new Tester_Class_5[Tester_Class_10.var_138][Tester_Class_3.var_58])[Tester_Class_3.var_58][Tester_Class_3.var_58] : (Tester_Class_8.var_114 = new Tester_Class_5())).var_83 = Tester_Class_10.var_140 ? (Tester_Class_3.var_63 -= 2.0167496E38F) : ++Tester_Class_3.var_63; + double var_176 = 9.327780852480363E307; + } + public String toString() + { + String result = "[\n"; + result += "Test6712835.var_151 = "; result += Printer.print(var_151); + result += "\n"; + result += "Test6712835.var_149 = "; result += Printer.print(var_149); + result += "\n"; + result += "Test6712835.var_150 = "; result += Printer.print(var_150); + result += ""; + result += "\n]"; + return result; + } + static class Printer + { + public static String print(boolean arg) { return String.valueOf(arg); } + public static String print(byte arg) { return String.valueOf(arg); } + public static String print(short arg) { return String.valueOf(arg); } + public static String print(char arg) { return String.valueOf((int)arg); } + public static String print(int arg) { return String.valueOf(arg); } + public static String print(long arg) { return String.valueOf(arg); } + public static String print(float arg) { return String.valueOf(arg); } + public static String print(double arg) { return String.valueOf(arg); } + + + public static String print(Object arg) + { + return print_r(new java.util.Stack(), arg); + } + + private static String print_r(java.util.Stack visitedObjects, Object arg) + { + String result = ""; + if (arg == null) + result += "null"; + else + if (arg.getClass().isArray()) + { + for (int i = 0; i < visitedObjects.size(); i++) + if (visitedObjects.elementAt(i) == arg) return ""; + + visitedObjects.push(arg); + + final String delimiter = ", "; + result += "["; + + if (arg instanceof Object[]) + { + Object[] array = (Object[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print_r(visitedObjects, array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof boolean[]) + { + boolean[] array = (boolean[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof byte[]) + { + byte[] array = (byte[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof short[]) + { + short[] array = (short[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof char[]) + { + char[] array = (char[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof int[]) + { + int[] array = (int[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof long[]) + { + long[] array = (long[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof float[]) + { + float[] array = (float[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof double[]) + { + double[] array = (double[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + + result += "]"; + visitedObjects.pop(); + + } else + { + result += arg.toString(); + } + + return result; + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6714694/Tester.java 2009-08-01 04:21:34.920341549 +0100 @@ -0,0 +1,820 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6714694 + * @summary assertion in 64bit server vm (store->find_edge(load) != -1,"missing precedence edge") with COOPs + * @run main/othervm -Xcomp Tester + */ + +/* Complexity upper bound: 38602 ops */ + +interface Tester_Interface_0 { +} + + +abstract class Tester_Class_1 implements Tester_Interface_0 { + static int var_1 = (false ? (short)'b' : (short)-2.4256387E38F) | (byte)('g' * -7.660532860983624E307); + float var_2; + byte var_3; + static boolean var_4 = true; + double var_5 = 8.818325751338691E307; + Object var_6; + static short var_7; + final static char var_8 = 'x'; + final static float var_9 = 2.2030989E38F; + + + public Tester_Class_1() + { + var_6 = (var_6 = (var_6 = "xkx")); + switch (var_7 = (var_3 = (byte)var_5)) + { + case 113: + + case 114: + Object var_12; + var_4 = 4.9121917E37F < 1957795258; + var_4 |= (var_4 ^= !var_4) ^ (var_4 |= var_4); + var_3 = (var_3 = (var_3 = (byte)+6010964363045275648L)); + break; + + case 102: + + } + final float var_13 = 1.2443151E38F; + var_3 = (byte)(var_1 |= (var_7 = (var_3 = (byte)var_5))); + var_2 = (long)(var_7 = (var_3 = (byte)var_8)) - (var_7 = (byte)386742565); + var_4 &= var_4; + var_2 = (long)((var_3 = (var_3 = (byte)var_8)) / ((var_4 ^= (var_5 /= var_9) <= (var_1 &= var_1)) ? (var_7 = (short)6872886933545336832L) : (byte)var_8)); + var_6 = "uqflj"; + { + switch (((new String[var_3 = (byte)var_5])[var_3 = (byte)8097442298927900672L]).charAt(1540148550)) + { + case 'l': + + } + var_2 = (var_7 = (byte)2.9859440663042714E307); + { + Object var_14; + } + var_3 = (var_3 = (var_3 = (byte)3.3634427195550136E307)); + var_5 += '_'; + } + var_6 = "tempfdjen"; + var_3 = (((var_4 ^= new String("jmwiwmk").endsWith("rtlstmnuo")) ? !true : !false) ? true : (var_4 = false)) ? (var_3 = (byte)var_5) : (var_3 = (var_3 = (byte)var_5)); + var_4 ^= false; + if (1.6435436003809043E307 != var_9) + { + boolean var_15 = true; + } + else + { + var_4 = false; + } + { + Object var_16 = ((new Tester_Class_1[(byte)71832757][(byte)1.0694914E38F])[(byte)1315653071][(byte)(var_7 = (var_7 = (byte)var_8))]).var_6 = new int[(byte)var_8][var_3 = (byte)1933656747]; + } + var_7 = (var_4 = var_4) ? (short)2.756967E37F : (short)'K'; + byte var_17; + } + + + + abstract public Tester_Interface_0 func_0(double[][] arg_0, final Object arg_1); + + + final double func_0(final float arg_0, final short arg_1, final boolean arg_2) + { + var_6 = (var_6 = "lmshbl"); + var_3 = (var_3 = (new byte[(new byte[(byte)arg_1])[var_3 = (byte)arg_0]])[var_3 = (var_3 = (byte)(var_1 >>>= var_1))]); + var_5 %= (var_3 = (byte)1909375874); + var_1 /= (char)(short)'i'; + { + "vgar".length(); + } + int var_10; + { + var_3 = (var_4 &= true) ? (byte)(var_5 *= 6375499657746206720L) : (byte)+ (var_5 /= var_9); + var_7 = (var_4 = true) ? (byte)(false ? (short)749593632 : (byte)8.692758043260743E307) : (byte)var_1; + ((new Tester_Class_1[(byte)1.2890904018345944E308])[(byte)var_1]).var_3 = (var_3 = (byte)arg_0); + var_4 = true ^ var_4; + } + { + var_1 ^= (var_3 = (var_3 = (var_3 = (byte)'U'))); + } + var_3 = (var_3 = (var_3 = (var_3 = (byte)arg_1))); + char var_11; + var_1 += (var_2 = (var_7 = arg_1)); + { + var_7 = (var_7 = arg_1); + } + var_7 = arg_1; + var_6 = (new char[(byte)1985094111797788672L][var_3 = (byte)3112604683090268160L])[var_3 = (byte)~ (var_3 = (byte)(var_5 += var_1))]; + var_3 = (var_3 = (var_3 = (var_3 = (byte)3694858000202921984L))); + var_1 /= ~ ((byte)1311538336); + (((var_4 |= arg_2 ? !true && arg_2 : false) ? arg_2 : arg_2) ? "iih".substring(~ (var_3 = (byte)3.5401308E37F), 'g' * arg_1) : "gynskmvoj").trim(); + var_3 = (var_3 = arg_2 ? (byte)+ ~5247392660383928320L : (byte)8392160279007184896L); + var_3 = (var_3 = (var_3 = (byte)var_8)); + return (var_5 += 7.157559E37F) + (var_11 = 'V'); + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_1.var_7 = "; result += Tester.Printer.print(var_7); + result += "\n"; + result += "Tester_Class_1.var_3 = "; result += Tester.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_1.var_8 = "; result += Tester.Printer.print(var_8); + result += "\n"; + result += "Tester_Class_1.var_1 = "; result += Tester.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_1.var_4 = "; result += Tester.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_1.var_5 = "; result += Tester.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_1.var_2 = "; result += Tester.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_1.var_9 = "; result += Tester.Printer.print(var_9); + result += "\n"; + result += "Tester_Class_1.var_6 = "; result += Tester.Printer.print(var_6); + result += ""; + result += "\n]"; + return result; + } +} + + +class Tester_Class_2 extends Tester_Class_1 implements Tester_Interface_0 { + final static String var_18 = false | Tester_Class_1.var_4 | (Tester_Class_1.var_4 &= (Tester_Class_1.var_4 |= (Tester_Class_1.var_4 = var_4))) ? "tbobyhqne" : ""; + static String var_19 = "acxfj"; + + + public Tester_Class_2() + { + Tester_Class_1.var_4 = !Tester_Class_1.var_4; + var_1++; + var_2 = (byte)2.4009747E38F; + new String(); + var_6 = (var_19 = "hsshyw"); + var_19 = var_19; + } + + + public Tester_Interface_0 func_0(double[][] arg_0, final Object arg_1) + { + var_5 = 4.0352057E37F; + (((false && ! ((Tester_Class_1.var_4 |= !true) ^ (Tester_Class_1.var_4 ^ false))) ^ (var_4 &= true) ? var_4 : (var_4 ^= true)) ? "spskwj" : "xcqianm").length(); + ((var_4 |= (Tester_Class_1.var_4 ^= Tester_Class_1.var_4) ? (Tester_Class_1.var_4 &= false) : (Tester_Class_1.var_4 |= Tester_Class_1.var_4)) ? (Tester_Class_1)(var_6 = new double[(byte)6.628342687109622E307]) : (Tester_Class_1)arg_1).var_6 = arg_0; + var_7 = (short)(byte)(short)8775325134193811456L; + var_4 ^= (var_4 &= !false); + ((Tester_Class_1)arg_1).var_3 = (var_3 = (byte)(var_5 %= 8.933448E37F)); + Tester_Class_1 var_20 = Tester_Class_1.var_4 ? (Tester_Class_1)arg_1 : (Tester_Class_1)arg_1; + { + var_19.endsWith(var_19); + var_6 = var_20; + (var_20 = (var_20 = var_20)).var_2 = (short)('p' <= 1986176769 % (int)2242661265280256000L % 2664882044098145280L ? ~ (var_3 = (byte)1.1892553447967157E308) & ~1806805036550279168L : (var_7 = (byte)var_8)); + } + final boolean var_21 = Tester_Class_1.var_4; + var_20.var_3 = (var_3 = (var_20.var_3 = (byte)'t')); + boolean var_22 = true; + Tester_Class_1.var_4 |= (var_4 = var_21); + var_19 = "ocn"; + var_19 = var_19; + var_1 *= Tester_Class_1.var_8; + var_20 = var_22 ? var_20 : var_20; + var_7 = var_21 ? (byte)+ ((byte)var_1) : ((var_20 = (var_20 = var_20)).var_3 = (var_3 = (var_3 = (byte)'L'))); + return true ? (var_20 = var_20) : (new Tester_Interface_0[(byte)5618282952859970560L])[var_3 = (byte)Tester_Class_1.var_8]; + } + + + public boolean equals(Object obj) + { + Tester_Class_1.var_7 = (var_7 = (((Tester_Class_1)obj).var_3 = (byte)var_9)); + { + final Tester_Class_1 var_23 = (Tester_Class_1)obj; + } + ++Tester_Class_1.var_1; + var_5 = (Tester_Class_1.var_7 = var_4 ? (Tester_Class_1.var_7 = (((Tester_Class_1)obj).var_3 = (byte)Tester_Class_1.var_8)) : (var_7 = (byte)var_9)); + ((Tester_Class_1)obj).var_6 = var_18.replace(Tester_Class_1.var_8, Tester_Class_1.var_8); + ((new Tester_Class_1[((Tester_Class_1)(obj = new char[var_3 = (byte)Tester_Class_1.var_8])).var_3 = (((Tester_Class_1)obj).var_3 = (byte)(var_1 %= 787509251458841600L))])[(new byte[var_3 = (byte)Tester_Class_1.var_1])[((Tester_Class_1)obj).var_3 = (byte)1.2382548E38F]]).var_3 = (((Tester_Class_1)obj).var_3 = var_4 ? (byte)Tester_Class_1.var_8 : (byte)4.1085164E36F); + var_1 &= var_8; + var_7 = var_4 ? (var_3 = (byte)var_8) : (byte)var_5; + var_19 = var_18; + ("o".compareTo("kwlfk") > (var_2 = 5289241662482067456L) ? (Tester_Class_1)obj : (Tester_Class_1)obj).var_5 -= (((Tester_Class_1)obj).var_3 = (((Tester_Class_1)obj).var_3 = (((Tester_Class_1)obj).var_3 = (byte)var_9))); + return true; + } + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_2.var_7 = "; result += Tester.Printer.print(var_7); + result += "\n"; + result += "Tester_Class_2.var_8 = "; result += Tester.Printer.print(var_8); + result += "\n"; + result += "Tester_Class_2.var_3 = "; result += Tester.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_2.var_18 = "; result += Tester.Printer.print(var_18); + result += "\n"; + result += "Tester_Class_2.var_19 = "; result += Tester.Printer.print(var_19); + result += "\n"; + result += "Tester_Class_2.var_1 = "; result += Tester.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_2.var_4 = "; result += Tester.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_2.var_5 = "; result += Tester.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_2.var_2 = "; result += Tester.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_2.var_9 = "; result += Tester.Printer.print(var_9); + result += "\n"; + result += "Tester_Class_2.var_6 = "; result += Tester.Printer.print(var_6); + result += ""; + result += "\n]"; + return result; + } +} + + +class Tester_Class_3 extends Tester_Class_2 implements Tester_Interface_0 { + long var_24 = 9026266006808413184L; + char var_25; + String var_26 = ((var_4 ^= Tester_Class_1.var_4) ? (!true ? false : (var_4 |= true)) : (Tester_Class_2.var_4 ^= var_4)) ? "dkmhvhl" : (var_19 = (Tester_Class_2.var_19 = (Tester_Class_2.var_19 = var_18))); + static Tester_Class_2 var_27; + short var_28 = Tester_Class_2.var_7 = (short)(Tester_Class_2.var_1 &= (var_3 = (var_3 = (var_3 = (byte)Tester_Class_2.var_9)))); + static boolean var_29 = false; + static Object[][] var_30; + int var_31 = 750583762; + Tester_Class_2 var_32; + final static long var_33 = 3050784555932008448L; + + + public Tester_Class_3() + { + byte[] var_34; + var_4 &= (Tester_Class_1.var_4 = true); + Tester_Class_1.var_1--; + switch (var_28 >>= ~ ((byte)var_28)) + { + case 9: + + case 26: + Tester_Class_1.var_4 ^= Tester_Class_1.var_4; + (Tester_Class_2.var_19 = "pwtic").indexOf(Tester_Class_2.var_18); + var_26.indexOf(var_19); + ((Tester_Class_1)(new Tester_Interface_0[(byte)var_5])[var_24 <= var_31 ? (byte)'^' : (byte)var_24]).var_2 = 5611775846881101824L; + var_29 |= (Tester_Class_2.var_4 ^= var_29); + Tester_Class_2 var_35; + var_24 <<= (var_31 >>= (var_25 = var_8)); + break; + + case 28: + + } + new String(); + var_5 %= (var_25 = 'n'); + ((Tester_Class_2)(Tester_Class_1)(((Tester_Class_1)(var_6 = Tester_Class_2.var_18)).var_6 = (var_26 = ""))).var_2 = var_31; + --var_1; + } + + + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_3.var_8 = "; result += Tester.Printer.print(var_8); + result += "\n"; + result += "Tester_Class_3.var_25 = "; result += Tester.Printer.print(var_25); + result += "\n"; + result += "Tester_Class_3.var_1 = "; result += Tester.Printer.print(var_1); + result += "\n"; + result += "Tester_Class_3.var_31 = "; result += Tester.Printer.print(var_31); + result += "\n"; + result += "Tester_Class_3.var_30 = "; result += Tester.Printer.print(var_30); + result += "\n"; + result += "Tester_Class_3.var_24 = "; result += Tester.Printer.print(var_24); + result += "\n"; + result += "Tester_Class_3.var_33 = "; result += Tester.Printer.print(var_33); + result += "\n"; + result += "Tester_Class_3.var_5 = "; result += Tester.Printer.print(var_5); + result += "\n"; + result += "Tester_Class_3.var_2 = "; result += Tester.Printer.print(var_2); + result += "\n"; + result += "Tester_Class_3.var_9 = "; result += Tester.Printer.print(var_9); + result += "\n"; + result += "Tester_Class_3.var_7 = "; result += Tester.Printer.print(var_7); + result += "\n"; + result += "Tester_Class_3.var_28 = "; result += Tester.Printer.print(var_28); + result += "\n"; + result += "Tester_Class_3.var_3 = "; result += Tester.Printer.print(var_3); + result += "\n"; + result += "Tester_Class_3.var_18 = "; result += Tester.Printer.print(var_18); + result += "\n"; + result += "Tester_Class_3.var_19 = "; result += Tester.Printer.print(var_19); + result += "\n"; + result += "Tester_Class_3.var_26 = "; result += Tester.Printer.print(var_26); + result += "\n"; + result += "Tester_Class_3.var_4 = "; result += Tester.Printer.print(var_4); + result += "\n"; + result += "Tester_Class_3.var_29 = "; result += Tester.Printer.print(var_29); + result += "\n"; + result += "Tester_Class_3.var_27 = "; result += Tester.Printer.print(var_27); + result += "\n"; + result += "Tester_Class_3.var_32 = "; result += Tester.Printer.print(var_32); + result += "\n"; + result += "Tester_Class_3.var_6 = "; result += Tester.Printer.print(var_6); + result += ""; + result += "\n]"; + return result; + } +} + +public class Tester { + static double var_36 = 2.679028326789642E307; + float var_37; + String var_38 = Tester_Class_2.var_18; + static Tester_Interface_0 var_39; + static char var_40 = 'D'; + Tester_Class_1 var_41; + static int var_42; + final static boolean var_43 = false; + + + final static Tester_Class_2 func_0(Tester_Class_1 arg_0, final Tester_Class_2 arg_1) + { + "ooots".replaceFirst("rdxor", ((new Tester_Class_3[arg_1.var_3 = (byte)2.7836305E38F])[arg_0.var_3 = (byte)+ + +1.4958218616334936E307]).var_26); + if (true) + { + arg_0 = (Tester_Class_3)arg_0; + ((Tester_Class_3)arg_0).var_25 = var_40; + final Tester_Class_2 var_44 = (Tester_Class_2)((Tester_Class_3.var_29 |= var_43) ? arg_0 : (arg_0.var_6 = Tester_Class_3.var_18)); + } + else + { + var_39 = (Tester_Class_3.var_27 = (Tester_Class_3)arg_1); + } + Tester_Class_3.var_19 = "onndgsil"; + var_39 = arg_0; + return (Tester_Class_2.var_4 &= Tester_Class_2.var_4 ^ true) ? (((Tester_Class_3)arg_0).var_32 = (Tester_Class_3)arg_1) : (((Tester_Class_3)arg_0).var_32 = (Tester_Class_3)arg_1); + } + + private final static float func_1(final short arg_0, int[][] arg_1, final long arg_2) + { + Tester_Class_2.var_1 *= arg_0; + double var_45 = 6.841391103184752E307; + long var_46; + Tester_Class_2.var_1--; + --var_40; + ++var_40; + ++Tester_Class_3.var_1; + Tester_Class_1.var_4 = false; + var_36 %= 'X'; + ++Tester_Class_2.var_1; + Tester_Class_1.var_1++; + return 3.2422038E38F; + } + + private final static char func_2(double arg_0, final byte arg_1, int arg_2) + { + --Tester_Class_3.var_1; + if (Tester_Class_1.var_4) + { + if (var_43) + { + Tester_Class_3.var_1++; + } + else + { + var_40 <<= 1329560515532651520L; + } + (false & Tester_Class_2.var_4 ? (new Tester_Class_1[arg_1])[arg_1] : (new Tester_Class_1[arg_1][arg_1])[arg_1][arg_1]).var_3 = arg_1; + Tester_Class_2.var_19 = Tester_Class_3.var_19; + --var_40; + final long var_47 = ~Tester_Class_3.var_33 << var_40--; + ((Tester_Class_3)(new Tester_Class_2[arg_1][arg_1])[arg_1][arg_1]).var_24 *= (var_36 *= (long)arg_1 * ~arg_1); + Tester_Class_2.var_19 = Tester_Class_2.var_19; + ++((new Tester_Class_3[arg_1])[arg_1]).var_24; + } + else + { + var_40++; + } + var_40 <<= var_40; + if (true) + { + ++arg_2; + } + else + { + Tester_Class_2.var_7 = arg_1; + } + boolean var_48 = true; + var_36 /= arg_1; + final short var_49 = 15276; + Tester_Interface_0 var_50; + ((Tester_Class_2.var_19 = (Tester_Class_2.var_19 = Tester_Class_2.var_19)) + "xhi").toString(); + arg_2++; + return var_40; + } + + public final static char func_4(final boolean arg_0) + { + float var_52 = 2.8063675E38F; + var_40--; + Object var_53; + Tester_Class_3.var_29 |= (Tester_Class_3.var_29 &= true); + if (!Tester_Class_1.var_4) + { + --var_40; + } + else + { + var_52 %= 2027756834; + } + int var_54 = Tester_Class_1.var_1++; + var_40--; + long var_55; + byte var_56 = 97; + var_36 *= 9.75628909363086E307 % + -1.9812653793936264E306; + int var_57; + boolean var_58 = Tester_Class_1.var_4 ^= var_43; + return 'J'; + } + + static float func_5(final Object arg_0, float arg_1, final Tester_Class_2 arg_2) + { + var_39 = arg_2; + Tester_Class_3.var_27 = arg_2; + arg_1 %= 1.7777554E38F; + var_39 = (Tester_Class_3.var_27 = arg_2); + Tester_Class_3 var_59; + { + var_40 -= arg_1 - ~ (((Tester_Class_3)arg_2).var_3 = (byte)1455854212); + } + Object var_60 = Tester_Class_1.var_4 ? arg_0 : new String[arg_2.var_3 = (byte)arg_1][(byte)((Tester_Class_3)arg_0).var_28]; + Tester_Class_3.var_27 = (Tester_Class_2)(var_39 = arg_2); + ((Tester_Class_3.var_4 |= var_43) ? (var_59 = (var_59 = (var_59 = (Tester_Class_3)var_60))) : (var_59 = (Tester_Class_3)arg_2)).var_24 ^= Tester_Class_3.var_1; + return Tester_Class_1.var_9; + } + + private static void func_6(char arg_0, final Tester_Class_3 arg_1, String arg_2, final double arg_3) + { + ((new Tester_Class_1[(byte)arg_1.var_28])[(arg_1.var_32 = arg_1).var_3 = (byte)var_40]).var_2 = Tester_Class_3.var_9; + double var_61; + (true ? (arg_1.var_32 = arg_1) : (arg_1.var_32 = (Tester_Class_3.var_27 = (arg_1.var_32 = arg_1)))).var_6 = var_43 | (Tester_Class_2.var_4 = !Tester_Class_3.var_4) ? (arg_1.var_26 = arg_2) : (Tester_Class_2.var_19 = Tester_Class_2.var_18); + } + + private final char func_7(int arg_0) + { + Tester_Class_2.var_4 &= var_43; + float var_62 = Tester_Class_3.var_9; + --var_40; + int var_63 = Tester_Class_1.var_1++; + { + "nncjfoit".indexOf((new int[(byte)'\\'])[(byte)var_36]); + if (var_43) + { + ((new Tester_Class_3[(byte)var_40][(byte)Tester_Class_2.var_1])[(byte)5046997225818337280L][(byte)var_63]).var_24 >>>= var_40; + } + else + { + --var_40; + } + --Tester_Class_2.var_1; + --var_63; + } + { + final byte var_64 = Tester_Class_1.var_4 ? (byte)'M' : (byte)(var_62 -= + ((byte)Tester_Class_1.var_8)); + float var_65; + var_62 *= ((Tester_Class_3)(new Tester_Interface_0[var_64])[var_64]).var_24++; + var_36 /= var_64; + { + double var_66; + } + var_40 += 3500240160155094016L; + ((new Tester_Class_1[var_64][var_64])[var_64][var_64]).var_3 = (byte)(Tester_Class_2.var_7 = (Tester_Class_1.var_7 = (Tester_Class_1.var_7 = (Tester_Class_1.var_7 = var_64)))); + ++Tester_Class_3.var_1; + } + --arg_0; + { + arg_0++; + } + Tester_Class_2.var_1++; + var_40 &= (short)((byte)Tester_Class_2.var_8 >> (((new Tester_Class_3[(byte)var_36])[(byte)(var_40 = Tester_Class_3.var_8)]).var_3 = (byte)((byte)3.3531374E38F * var_40))); + var_36 %= (var_62 = (byte)900943133); + var_36 = Tester_Class_3.var_33; + var_62 += (var_40 /= (byte)6766658341842315264L % (byte)'p') * (short)2019461672; + --var_40; + if (true) + { + var_62 *= 365879806965555200L; + } + else + { + var_36 -= ~9163555887358003200L; + } + Tester_Class_1.var_4 = Tester_Class_1.var_4; + { + var_40 <<= var_63; + } + var_40++; + String var_67; + return Tester_Class_1.var_8; + } + + private final static Tester_Interface_0 func_8(char arg_0, final Tester_Class_2 arg_1, final String arg_2) + { + ((new Tester[(byte)((Tester_Class_3)arg_1).var_28])[((Tester_Class_1)(var_39 = arg_1)).var_3 = ((Tester_Class_3.var_27 = (Tester_Class_3)arg_1).var_3 = (byte)+ -9.9100855E36F)]).var_38 = (var_43 ? "k" : Tester_Class_2.var_19).substring(350785312); + return (new Tester_Interface_0[(byte)'l'])[((Tester_Class_1)(var_39 = (Tester_Class_3.var_27 = (Tester_Class_3)arg_1))).var_3 = ((Tester_Class_3.var_27 = arg_1).var_3 = (((Tester_Class_3)arg_1).var_3 = (arg_1.var_3 = (arg_1.var_3 = (byte)'['))))]; + } + + private final int func_9(Tester_Class_3 arg_0, char arg_1) + { + final float var_68 = Tester_Class_3.var_9; + Tester_Class_2.var_18.toLowerCase(); + double var_69; + { + Tester_Class_3.var_29 ^= !false || Tester_Class_2.var_4; + } + Tester_Class_1 var_70; + (Tester_Class_3.var_27 = (Tester_Class_2)(var_70 = arg_0)).var_6 = (Tester_Class_2)((var_41 = arg_0).var_6 = (arg_0.var_6 = arg_0)); + "hv".codePointBefore(--Tester_Class_2.var_1); + var_41 = arg_0; + return ~ (((arg_0 = arg_0).var_24 &= arg_1) == 3.0764282E38F ? (byte)457565863 : ((arg_0 = arg_0).var_3 = (byte)arg_0.var_28)); + } + + private static void func_10(double arg_0, final Tester_Class_3 arg_1, double arg_2) + { + arg_1.var_32 = 'g' != 1.520646515461986E307 ? (arg_1.var_32 = arg_1) : arg_1; + Tester_Class_2.var_19.startsWith(Tester_Class_2.var_19 = Tester_Class_3.var_18); + Tester_Class_1.var_4 ^= true & (arg_1.var_3 = (arg_1.var_3 = (byte)- ((byte)1.4509185661781193E308))) > (arg_1.var_2 = var_40); + var_36 += Tester_Class_3.var_9; + } + + Tester_Interface_0 func_12(final Object arg_0, float arg_1) + { + switch (((Tester_Class_3)arg_0).var_3 = (byte)arg_1) + { + case 4: + var_41 = (Tester_Class_3)(var_39 = (Tester_Class_3.var_27 = (Tester_Class_3.var_27 = (Tester_Class_3)arg_0))); + double var_72 = (double)3858573493713776640L; + byte var_73 = (var_41 = (Tester_Class_2)arg_0).var_3 = (((Tester_Class_3)arg_0).var_3 = (byte)var_72); + break; + + case 13: + (Tester_Class_3.var_27 = (((Tester_Class_3)arg_0).var_32 = (Tester_Class_3)(Tester_Class_2)arg_0)).var_3 = (Tester_Class_2.var_1 *= ((Tester_Class_3)arg_0).var_24) == (byte)Tester_Class_3.var_33 ? (byte)188693954866039808L : (byte)Tester_Class_2.var_8; + break; + + default: + var_40 <<= (byte)157510337; + break; + + case 26: + + case 122: + + } + Tester_Interface_0 var_74; + long var_75; + var_41 = (var_41 = (var_41 = (Tester_Class_2)arg_0)); + arg_1 *= 1601420762; + var_74 = (var_41 = Tester_Class_1.var_4 ? (Tester_Class_3)arg_0 : (Tester_Class_2)arg_0); + (Tester_Class_1.var_4 ? (Tester_Class_3)(var_39 = (Tester_Class_3)arg_0) : (true ? (Tester_Class_3)arg_0 : (Tester_Class_3)arg_0)).var_28 *= 1066935145; + var_40 >>>= (byte)6.643183E36F / - ((byte)1.277596E37F); + { + ((Tester_Class_3)(((Tester_Class_3)((Tester_Class_3.var_29 ^= (Tester_Class_3.var_29 &= var_43)) ? (Tester_Class_2)arg_0 : (Tester_Class_2)arg_0)).var_32 = (Tester_Class_3.var_27 = (Tester_Class_2)arg_0))).var_28--; + } + var_38 = "qad"; + byte var_76 = ((Tester_Class_2)(var_39 = (Tester_Class_3)arg_0)).var_3 = true ? ((var_41 = (var_41 = (Tester_Class_3)arg_0)).var_3 = (byte)1.7128118638075888E308) : (byte)1.6562746603631249E308; + return var_39 = (Tester_Class_3)((var_41 = (Tester_Class_3)arg_0).var_6 = Tester_Class_2.var_18); + } + + protected final String func_13() + { + float var_77; + var_38 = (Tester_Class_2.var_19 = var_38); + Tester_Class_2.var_4 ^= !var_43 | (Tester_Class_3.var_29 ^= Tester_Class_1.var_4); + Tester_Class_3.var_1--; + Tester_Class_2.var_1++; + return Tester_Class_2.var_18; + } + + public static String execute() + { + try { + Tester t = new Tester(); + try { t.test(); } + catch(Throwable e) { } + try { return t.toString(); } + catch (Throwable e) { return "Error during result conversion to String"; } + } catch (Throwable e) { return "Error during test execution"; } + } + + public static void main(String[] args) + { + try { + Tester t = new Tester(); + try { t.test(); } + catch(Throwable e) { } + try { System.out.println(t); } + catch(Throwable e) { } + } catch (Throwable e) { } + } + + private void test() + { + int var_78 = 0; + var_39 = (new Tester_Class_1[(byte)var_40])[(byte)Tester_Class_3.var_33]; + while (var_43 && (var_78 < 70 && true)) + { + var_40 *= ~ ~Tester_Class_3.var_33 % Tester_Class_3.var_9; + var_78++; + var_39 = new Tester_Class_3(); + var_39 = (var_41 = (Tester_Class_3.var_27 = new Tester_Class_2())); + } + final Tester_Class_3 var_79 = (Tester_Class_1.var_4 ? ~Tester_Class_3.var_33 : var_36) == 1433764895112462336L ? new Tester_Class_3() : new Tester_Class_3(); + Tester_Class_2 var_80; + } + public String toString() + { + String result = "[\n"; + result += "Tester.var_40 = "; result += Printer.print(var_40); + result += "\n"; + result += "Tester.var_42 = "; result += Printer.print(var_42); + result += "\n"; + result += "Tester.var_36 = "; result += Printer.print(var_36); + result += "\n"; + result += "Tester.var_37 = "; result += Printer.print(var_37); + result += "\n"; + result += "Tester.var_39 = "; result += Printer.print(var_39); + result += "\n"; + result += "Tester.var_38 = "; result += Printer.print(var_38); + result += "\n"; + result += "Tester.var_43 = "; result += Printer.print(var_43); + result += "\n"; + result += "Tester.var_41 = "; result += Printer.print(var_41); + result += ""; + result += "\n]"; + return result; + } + static class Printer + { + public static String print(boolean arg) { return String.valueOf(arg); } + public static String print(byte arg) { return String.valueOf(arg); } + public static String print(short arg) { return String.valueOf(arg); } + public static String print(char arg) { return String.valueOf((int)arg); } + public static String print(int arg) { return String.valueOf(arg); } + public static String print(long arg) { return String.valueOf(arg); } + public static String print(float arg) { return String.valueOf(arg); } + public static String print(double arg) { return String.valueOf(arg); } + + + public static String print(Object arg) + { + return print_r(new java.util.Stack(), arg); + } + + private static String print_r(java.util.Stack visitedObjects, Object arg) + { + String result = ""; + if (arg == null) + result += "null"; + else + if (arg.getClass().isArray()) + { + for (int i = 0; i < visitedObjects.size(); i++) + if (visitedObjects.elementAt(i) == arg) return ""; + + visitedObjects.push(arg); + + final String delimiter = ", "; + result += "["; + + if (arg instanceof Object[]) + { + Object[] array = (Object[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print_r(visitedObjects, array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof boolean[]) + { + boolean[] array = (boolean[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof byte[]) + { + byte[] array = (byte[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof short[]) + { + short[] array = (short[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof char[]) + { + char[] array = (char[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof int[]) + { + int[] array = (int[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof long[]) + { + long[] array = (long[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof float[]) + { + float[] array = (float[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof double[]) + { + double[] array = (double[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + + result += "]"; + visitedObjects.pop(); + + } else + { + result += arg.toString(); + } + + return result; + } + } +} + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6716441/Tester.java 2009-08-01 04:21:35.365282574 +0100 @@ -0,0 +1,940 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6716441 + * @summary error in meet with +DoEscapeAnalysis + * @run main/othervm -Xcomp -XX:+AggressiveOpts Tester + */ + +/* Complexity upper bound: 70070 ops */ + +class Tester_Class_0 { + Object var_1; + + + public Tester_Class_0() + { + var_1 = "cmlv"; + { + final Object var_7 = false ? (var_1 = "flwnujmm") : (var_1 = ""); + } + float var_8 = 0F; + boolean var_9; + do + { + ((var_9 = (var_9 = (var_9 = false))) && true ? new String("yvirc") : "rmfkjbx").indexOf(99854165); + var_8++; + boolean var_10 = true; + var_10 |= true ? true : false; + var_10 |= false | var_10 ? (var_10 |= true) : true; + } while (var_8 < 1 && "tpb".charAt(~ ((byte)2449669946597620736L)) >>> (byte)1.704554E38F >= ('t' < var_8 ? (short)var_8 : 1237471887 / +4199632387838836736L)); + var_1 = (var_1 = "y"); + switch ((var_9 = "".equalsIgnoreCase("kqalxwsnu") | false) ? (new char[(byte)1.3230508534158742E308])[(byte)(short)93390917] : 'c') + { + case 'U': + + case 'W': + + case 'A': + + case 72: + + } + var_9 = (var_9 = false); + float var_11 = 0F; + var_9 = true; + for (var_1 = (var_1 = new boolean[(new byte[(byte)'O'])[(byte)'l']]); var_11 < 2; var_1 = (true ? "" : "nn").toUpperCase()) + { + var_9 = true; + var_11++; + var_9 = true; + int var_12 = (new short[(byte)1825213428])[(byte)1.6151095881186834E308] + (byte)(short)'V'; + } + var_1 = (new boolean[(byte)1546704177][(byte)6314352927310996480L][(byte)6838913812451802112L])["e".equalsIgnoreCase("d") ? (byte)3.964472879846357E307 : (byte)'L']; + var_9 = (var_9 = false); + } + + + + protected Object clone() + { + var_1 = (var_1 = "dbogknllu"); + var_1 = "jljax"; + var_1 = (var_1 = false ? "s" : "upqinst"); + if (false) + { + var_1 = (var_1 = (var_1 = "nwhu")); + } + else + { + short var_2; + var_2 = false ? (byte)((short)'e' - (byte)2.267975E38F) : (byte)1.28920093132789E308; + } + (((byte)2.3450009E38F >= (byte)- (+ - -1.6256751428449317E306 * 954071273906192384L) ? true : true) ? "gpv".substring(468176669, 1915798010) : "macqbvf").offsetByCodePoints("ivwivjege".charAt(1819923298) >>> (byte)((- ~653213024937366528L + 'N') * +1.0297164E38F) + (short)~ + -4535402293751053312L, (short)3837656677199316992L + (new byte[(byte)2.1586717E38F])[(byte)((short)719847962 - 8.599263E37F)]); + new String((char)650047353 > (short)'I' ? "maygldu".replaceAll("hpn", "bacoyows") : "nkx").endsWith(new String("h" + "vx")); + byte var_3; + return var_1 = "vbiccvdu"; + } + + public boolean equals(Object obj) + { + var_1 = "rxpocj"; + obj = "ndogtwvuh"; + { + { + var_1 = "vmdkvs"; + } + final boolean var_4 = false; + long var_5; + var_5 = (short)'G' >> + ((byte)1.1872624048733707E308); + } + var_1 = (new byte[(byte)(short)1.7804576633213148E308][(byte)(short)8.300151422843056E307])[(byte)3110360603258978304L]; + obj = "fudebwroh"; + obj = new Object[(false && false) & false ? (byte)8796624364629753856L : (byte)- +1.5843542184394165E308]; + if (true) + { + var_1 = new float[(byte)1372189277][(byte)((byte)8.352642520619892E307 == (! !false ^ true ? (short)1.3871033E37F : (byte)'f') ? 7.33899E37F - (short)2.2558552E37F : 2.6072269E38F)]; + } + else + { + var_1 = obj; + obj = new boolean[(byte)8.623747E37F][(byte)~ ((byte)(short)(byte)2100816354)]; + } + var_1 = new int[(new byte[(byte)7410132537719084032L])[(byte)1524949007]][(byte)((long)9188640 ^ 'n')]; + if (-3.0364305E38F > 8.114229090672013E307) + { + var_1 = (var_1 = new boolean[(byte)1.0354788699727844E308][(byte)-4867581638981979136L]); + } + else + { + obj = (obj = obj); + obj = (new int[(byte)'f'][(byte)7563033606293564416L])[(byte)2052737645]; + } + long var_6; + var_6 = (+ + ~3862500564271147008L != (short)8549491352795953152L || (byte)+ + (var_6 = 'D') > (var_6 = 1687952259)) && true ? (short)1756260278 : (byte)(short)3.202384E36F; + return false ^ ((false ? !true : true) ? false : (true ? ! !false : false)); + } + + + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_0.var_1 = "; result += Tester.Printer.print(var_1); + result += ""; + result += "\n]"; + return result; + } +} + + +interface Tester_Interface_1 { +} + + +final class Tester_Class_2 extends Tester_Class_0 { + static char var_13; + final static long var_14 = ~ ~ (((false ? !true ^ !true : !false | true) ? (byte)1.4363583E38F : (byte)(short)'X') * + + +3767002589844008960L << (var_13 = (var_13 = 'X'))); + + + public Tester_Class_2() + { + ((true ? (byte)- ((byte)2.6338962E38F) : (byte)'H') <= (short)8.671266E37F || !false & true ? (Tester_Class_0)(var_1 = "tgoexsr") : (Tester_Class_0)(var_1 = "mptusv")).var_1 = "tbxbgyti"; + var_1 = true ? (var_1 = "islgpxwa") : "rypkbu"; + var_1 = "vngiif"; + var_13 = (var_13 = (short)~ (-3250577075350561792L + +2983539228635145216L - '`') < 861590495 * (short)1.5299644E38F ? 'V' : 'G'); + Object[] var_27; + var_1 = (var_1 = "glu"); + } + + + + + + private static int func_0(Tester_Interface_1 arg_0, Tester_Interface_1 arg_1, final int arg_2) + { + var_13 = 'i'; + new String(); + (true ? "pu" : "oltbcysyd").substring(1236315614); + byte var_15; + var_15 = (new byte[(byte)1582653493940204544L][(byte)(char)-1.5806537192822515E308])[(byte)1.2986292E38F][(byte)(short)arg_2]; + if (true || true) + { + ((new Tester_Class_2[var_15 = (byte)'q'])[var_15 = 7.653082E37F < (var_13 = 'o') ? (byte)- - + +3.1640875E38F : (byte)-6820511772487837696L]).var_1 = (new Tester_Class_0[(byte)(short)(new byte[(byte)1080194717])[(byte)8297829563314362368L]])[var_15 = (var_15 = (var_15 = (var_15 = (byte)'t')))]; + double[] var_16; + } + else + { + arg_1 = (arg_1 = arg_0); + } + var_13 = (var_13 = 'o'); + var_13 = (false || false ^ false) ^ !true ? 'Z' : (var_13 = 'i'); + return true ? (var_15 = (byte)'C') : 1120493294; + } + + static double func_1(final int arg_0, long arg_1, long arg_2) + { + "ghtfhmbeg".compareToIgnoreCase("djn"); + { + ((true ? true : ! !true) ? "" : "ioowivxge").substring(1844979226, "ppsk".startsWith("lllmn") | false ? arg_0 * arg_0 : (byte)663320199); + ((Tester_Class_0)(((new Tester_Class_2[(byte)4797509007239530496L])[(byte)(- -2.8961086E38F - arg_1)]).var_1 = new float[(byte)('m' / 6.0657416E37F)])).var_1 = new boolean[(byte)(short)(+4.515459E37F - (short)arg_0)][(byte)(-1.3002677717712297E308 + (var_13 = "bcjfdnc".charAt(arg_0)) * (arg_2 - 1.6058080036165979E308))]; + var_13 = 'E'; + } + float var_17 = 0F; + --arg_1; + do + { + final String[] var_18 = new String[(byte)(short)arg_0]; + var_17++; + Object var_19 = (new Object[(byte)var_17])[(false ? "xfkx".equalsIgnoreCase("j") : !false) ^ !false ? (byte)(1.6627987E35F * 2.62825E38F) : (byte)var_17]; + } while (var_17 < 2 && false | ((byte)1.4661115E38F | arg_0 - 'l') < ('J' & (byte)+ ((float)1200013759))); + long var_20 = 0L; + arg_2--; + for (var_13 = (var_13 = (var_13 = (var_13 = (var_13 = (var_13 = 'p'))))); var_20 < 4; var_13 = 'y') + { + new String((true | true ? "oxpfy" : "gaynucv") + (true ? "cmrrk".substring(653981013, arg_0) : "ociewl" + "srhbvcgci")); + var_20++; + arg_2 >>= arg_0; + arg_2 = false ? (byte)'l' : (byte)(~var_20 - (byte)1.1290483681734838E308); + } + ((new Tester_Class_2[(byte)arg_1++])[(byte)685259385]).var_1 = new Tester_Interface_1[(byte)arg_0]; + --arg_1; + var_13 = 483446392 - (byte)'P' < ((short)(true ? (var_13 = 't') : 'n') & (short)'l') ? (var_13 = 'q') : (var_13 = (var_13 = 'h')); + var_13 = 'y'; + return 1.097000731360761E308; + } + + private final boolean func_2(final short[][] arg_0, byte arg_1, Object arg_2) + { + var_1 = new char[arg_1]; + arg_1 /= (var_13 = 'A') + -var_14; + int var_21 = 0; + arg_2 = (new Tester_Class_0[arg_1 += var_14][arg_1])[arg_1 >>>= 'R' + + -var_14 - 4078879817303787520L]; + for (arg_1--; (+ +1.1473423E38F < 'w' & false | var_21 != (short)4633877342445358080L | !false ? false | !false : true) && (var_21 < 1 && (arg_0[arg_1])[arg_1 %= 7.136449148111178E307] > 7.362329280963185E307); arg_1++) + { + arg_1 = (arg_1 %= + -9.872181578576624E307); + var_21++; + arg_1 /= 7.365334241099395E307; + var_13 = (var_13 = (var_13 = (var_13 = (var_13 = (var_13 = 'A'))))); + } + arg_1 >>= var_14; + boolean var_22 = false; + return ((var_22 &= true) && var_22 ? (short)1.568578228250838E308 : arg_1) >= (new float[arg_1])[arg_1 /= 1.6849858816110731E308]; + } + + public final String func_3(Tester_Interface_1 arg_0) + { + arg_0 = arg_0; + arg_0 = (arg_0 = arg_0); + var_13 = (var_13 = (var_13 = (var_13 = "rqimya".charAt(390686412)))); + ((Tester_Class_0)(var_1 = "suxd")).var_1 = new Tester_Class_0[(byte)(float)(short)1.584667675541511E308]; + arg_0 = ((byte)+ + - +1.3636011E38F == 'L' ^ true) & (!true | true) ? (arg_0 = arg_0) : arg_0; + var_1 = (((Tester_Class_0)(var_1 = (Tester_Class_0)(var_1 = "ndxcurdr"))).var_1 = (var_1 = (var_1 = "schnhe"))); + var_1 = (new Object[(byte)(var_13 = (var_13 = 'G'))])[(byte)((byte)1.5366658198344074E308 - 1.0051541518022535E308)]; + var_13 = (var_13 = 'x'); + (true ? (Tester_Class_0)(var_1 = "slesdph") : (Tester_Class_0)(var_1 = "wxj")).var_1 = false ? (Tester_Class_0)(var_1 = "iddvoil") : (Tester_Class_0)(var_1 = "yjbxhuip"); + var_13 = (var_13 = 'f'); + var_1 = (new short[(byte)1.8902424351360974E307])[(new byte[(byte)1552392028])[(byte)'V']] >= (1.2449926736063704E308 + 'K') / (short)1.0847178E38F / ((short)'M' % (1.225890077244388E308 + + ((byte)1.5358312881295287E308))) ? (Tester_Class_0)(var_1 = "khfntg".toString()) : (Tester_Class_0)(var_1 = "jkttx"); + { + arg_0 = arg_0; + } + arg_0 = arg_0; + var_13 = (true ? "jndbffakc" : "hhcsc").charAt((byte)7322932713210745856L >> (short)(var_13 = (var_13 = 'R'))); + var_13 = (var_13 = (var_13 = 'U')); + { + "gepdu".trim(); + } + var_13 = 'H'; + new String(); + { + (false ^ (true ^ ((!true | ! !true) ^ ! (1.7800668444792402E308 * var_14 >= (var_13 = (var_13 = (var_13 = 'g')))))) ? "dsm" : "g").toUpperCase().toString(); + ((Tester_Class_0)(var_1 = "fcdvd")).var_1 = (Tester_Class_0)(((Tester_Class_0)(var_1 = "uwhnte")).var_1 = "vpatqpd"); + } + switch (((byte)((byte)((int)'n' / var_14) >>> (short)7.786475993917233E307 - (var_13 = 'm')) <= (("".endsWith("pbypgis") ^ false) & true ? (byte)func_0(arg_0, arg_0, 723551781) : (short)140766856) ? true | (!false || false) : 'F' != (var_13 = 'V')) ? (var_13 = 'j') : 'D') + { + case 'p': + + } + var_13 = (var_13 = (var_13 = 'y')); + var_13 = ("" + "wwbxwyt").charAt((short)8373354379880418304L << (short)(byte)1.557224945027141E308); + arg_0 = true ? (arg_0 = arg_0) : arg_0; + ((Tester_Class_0)(((Tester_Class_0)(var_1 = "rmkvbp")).var_1 = (var_1 = "gvb"))).var_1 = new Tester_Class_0[(byte)var_14]; + arg_0 = arg_0; + return 2.5245162E38F + 3.9088502076337727E307 > 1.0883030748712796E308 ? "efttufmbg" : "xy"; + } + + protected short func_4(final double arg_0, long arg_1, final Object arg_2, final byte arg_3) + { + (((("ptnmpn".startsWith("xxgxjvsie") ? !true : ! !true) ? ! !false : false) ? (true ? true : false) : (var_13 = (var_13 = 'X')) == 1948276447) ? (Tester_Class_0)(var_1 = new float[arg_3][arg_3]) : (Tester_Class_0)arg_2).var_1 = new double[(new byte[arg_3][arg_3])[arg_3][arg_3]][arg_3]; + boolean var_23; + return true ? (short)'r' : (byte)(short)('\\' >>> arg_3); + } + + final static byte func_5(final long arg_0, float arg_1, final double arg_2) + { + arg_1 = arg_2 <= ((var_13 = 'D') <= - (3463565869695079424L << 'S') ? 'a' : (var_13 = 'o')) | (true ? !false : !false) ? (byte)(short)(var_13 = 'r') : (short)(byte)'S'; + { + { + arg_1 %= '_'; + } + char var_24 = 'G'; + "hc".lastIndexOf(""); + arg_1 *= (arg_1 /= (byte)-8.308933840953204E306); + --var_24; + arg_1 /= ~ ((byte)2.2736606E38F) <= (3392301601783633920L == 3.015267E38F ^ ! !true ? (byte)88382550 : (byte)1.2019729E38F) ? 1027309963 : + -4.355887546298483E307; + ((new Tester_Class_2[(byte)arg_2])[(byte)870592870]).var_1 = "i"; + arg_1 /= 't'; + } + var_13 = 'f'; + Tester_Class_0 var_25 = (new Tester_Class_0[(byte)arg_1])[(true | false ^ !false) & ! !true & - +2.758844E38F >= (new short[(byte)5.9160883E37F])[(byte)'r'] ? (byte)arg_0 : (byte)((short)1802078016 & arg_0)]; + arg_1 /= (byte)arg_2; + var_13 = 'R'; + boolean var_26 = (true ? false : !false) ? false : !true == ! !false; + arg_1 %= (var_26 ^= (byte)1101752087 <= (short)arg_1) ? arg_1 : (float)1882235314; + var_25 = var_25; + if (var_26 &= var_26) + { + var_13 = "".startsWith("vp", - ((byte)2040501187) ^ (byte)-3921263616446438400L) ? "gam".charAt(+ ((byte)arg_1)) : (var_13 = 'H'); + arg_1 /= var_26 | ! (var_26 ^ (byte)arg_0 <= (var_13 = 'r') | (var_26 |= var_26)) ? 'N' : ~8963227563301903360L; + } + else + { + var_13 = 'l'; + var_25 = (var_25 = var_25); + } + arg_1 += (byte)(("m".substring(837996717, 861903057).startsWith("vwpky", (var_13 = 'Z') % (byte)arg_1) ? !false ^ !var_26 : var_26) ? (byte)arg_1 : ((var_26 | var_26) ^ !false ? (short)'U' : (short)'d')); + ((1.3445538E38F < (arg_1 -= 1236356135) ^ (var_26 = true)) & var_26 ? var_25 : (var_25 = var_25)).var_1 = true ? (var_25.var_1 = var_25) : (var_25.var_1 = var_25); + var_26 |= - ((byte)251113415) < (short)arg_1 & false & false; + arg_1 += (var_26 &= true) ? 7.546147305340402E307 : (true & !var_26 ? (byte)1.5107802154474685E307 : (byte)arg_1); + var_25.var_1 = true ? "bqsewhn" : "aojssvan"; + return (byte)(var_13 = (char)(new short[(byte)46697464])[(byte)var_14]); + } + + public String toString() + { + String result = "[\n"; + result += "Tester_Class_2.var_13 = "; result += Tester.Printer.print(var_13); + result += "\n"; + result += "Tester_Class_2.var_14 = "; result += Tester.Printer.print(var_14); + result += "\n"; + result += "Tester_Class_2.var_1 = "; result += Tester.Printer.print(var_1); + result += ""; + result += "\n]"; + return result; + } +} + +public class Tester { + static Tester_Class_0 var_28; + double var_29 = 4.861846831496217E307; + static byte var_30 = 79; + static boolean var_31 = false; + final byte var_32 = 48; + + + public short func_0(final byte arg_0, boolean arg_1, final double arg_2) + { + --var_30; + new Tester_Class_2().var_1 = "xdwldeqhc"; + char var_33 = 'd'; + return (var_31 = var_30 < (new short[var_30])[var_30]) ? (short)(var_33 >>>= (short)2.3152642E38F) : var_30; + } + + private final float func_1(final long arg_0, final Tester_Class_0 arg_1, Tester_Class_0 arg_2, final char arg_3) + { + { + new Tester_Class_0(); + } + var_30 >>= (short)1.1469118780047631E308; + var_31 |= var_31; + if (true) + { + Tester_Class_2 var_34 = (var_31 ^= (var_31 ^= false)) ? (Tester_Class_2)arg_1 : (new Tester_Class_2[var_32])[var_30]; + } + else + { + "shqjwg".compareToIgnoreCase(false ? "gxoyw" + "cx" : "r" + "icj"); + } + ((new Tester_Class_2[var_32])[++var_30]).var_1 = (new Object[var_30][var_32][var_32])[var_30 |= var_32]; + arg_1.var_1 = (arg_2 = arg_2); + arg_1.var_1 = (((new Tester_Class_2[var_30])[var_30 = var_32]).var_1 = new String[var_32]); + var_28 = (Tester_Class_2)((arg_2 = arg_2).var_1 = new double[var_30]); + arg_2 = arg_1; + final boolean var_35 = "qjsdfuyru".endsWith("nuw" + "bne") | arg_3 >= 1.2054153346397785E308; + var_31 &= (var_31 ^= var_35); + { + var_31 &= var_31 ? false : !false; + } + if (var_31) + { + var_28 = (new Tester_Class_2[var_32])[var_32]; + } + else + { + arg_1.equals((new Object[var_32])[var_32]); + } + ((var_31 ^= true) ? "x" : "euc").substring(+ (var_30 ^= var_30) & (byte)(new short[var_30])[var_30 /= 4.220757896697652E307]); + arg_2 = var_31 ? (Tester_Class_2)arg_1 : (Tester_Class_2)arg_2; + arg_2 = (Tester_Class_2)(new Object[var_32][var_32])[var_30][var_32]; + return 9.932983E36F * (Tester_Class_2.var_13 = arg_3); + } + + public int func_2(Tester_Class_0 arg_0) + { + "ptwusxrxn".indexOf("v"); + var_30 |= 1770666843; + var_29 += (short)(Tester_Class_2.var_14 / Tester_Class_2.var_14); + if (var_31) + { + var_30 = var_32; + } + else + { + double var_36; + } + if (true) + { + var_30 -= 2.1474216E38F; + } + else + { + var_29 += 'f'; + } + var_29 -= Tester_Class_2.var_14; + if (false) + { + arg_0.var_1 = arg_0; + } + else + { + arg_0 = (var_28 = (var_28 = arg_0)); + } + { + Tester_Class_2.var_13 = 'p'; + } + var_29 = ~Tester_Class_2.var_14 % 'y'; + switch (var_30 >>>= var_30) + { + case 115: + + } + arg_0 = arg_0; + var_31 ^= (var_31 = true); + return "".charAt(~var_32) >> ((var_31 &= true) ? var_30 : var_32); + } + + protected double func_3(boolean[] arg_0, double arg_1, long[][] arg_2) + { + switch (var_31 ? (short)(var_32 / (arg_1 /= (int)(arg_1 *= (Tester_Class_2.var_13 = ']')))) : (var_30 ^= + +Tester_Class_2.var_14 * (+ +3075166460237931520L / '['))) + { + case 31: + + case 118: + + } + Tester_Class_2.var_13 = 'm'; + "mwkh".indexOf("hdgiquve", (new char[var_32])[var_30 &= 'S'] % var_30); + final boolean var_37 = false; + char var_38 = 'F'; + var_28 = (new Tester_Class_2[var_30][var_30])[var_30 *= var_38][var_32]; + new Tester_Class_0().var_1 = new char[var_32][var_30]; + var_30 &= var_30; + var_31 |= (var_31 |= true); + int var_39 = 1769831681; + final Tester_Class_2 var_40 = (var_31 |= false ? var_31 : var_37) ? (Tester_Class_2)((new Object[var_30][var_30][var_32])[var_30 <<= var_30])[var_30][var_30++] : (Tester_Class_2)new Tester_Class_0(); + var_30 -= +var_30; + "dvmuim".replaceFirst("clmisuxu", "".toLowerCase() + "xi"); + var_39 = var_38 - (short)585806817; + arg_1 += (double)(short)(6.4329855E37F / var_32 * (var_38 += 'w')); + ++var_39; + var_40.var_1 = (var_28 = (var_28 = var_40)); + var_29 /= 3.5705444317400964E306; + Tester_Interface_1 var_41; + return arg_1; + } + + final char func_4(final long arg_0, int arg_1, final int arg_2) + { + { + Tester_Class_2.var_13 = 'D'; + var_31 = false; + } + if (var_31) + { + var_30--; + Tester_Class_0 var_42 = var_28 = (new Tester_Class_0[var_32])[var_32]; + } + else + { + Tester_Class_2.var_13 = (Tester_Class_2.var_13 = 'H'); + } + var_28 = (Tester_Class_2)(new Tester_Class_2().var_1 = (var_28 = (new Tester_Class_0[var_30])[var_30])); + arg_1 &= var_32; + --var_30; + return Tester_Class_2.var_13 = 'n'; + } + + final static Object func_5() + { + var_28 = (Tester_Class_2)(! (var_31 |= true & var_31) ? (new Tester_Class_0[var_30])[var_30] : (var_28 = (new Tester_Class_0[var_30])[var_30])); + var_31 |= false; + return var_31 ? (((Tester_Class_2)(new Object[var_30])[var_30]).var_1 = (new Tester_Class_0[var_30 /= 1366944127])[var_30]) : (new Tester_Class_0[var_30][var_30])[var_30 += 1.4070924450257891E307][var_30]; + } + + static Tester_Class_2 func_6(final Object arg_0) + { + var_28 = (var_31 ^= true) ? (Tester_Class_2)arg_0 : (Tester_Class_2)arg_0; + ((new Tester[--var_30])[var_30]).var_29 -= -3.0029087E38F * (Tester_Class_2.var_13 = (Tester_Class_2.var_13 = 'k')); + var_28 = (Tester_Class_2)(var_28 = (var_28 = (var_28 = (Tester_Class_0)arg_0))); + return false ? new Tester_Class_2() : new Tester_Class_2(); + } + + private static Tester_Interface_1 func_7(byte arg_0, Tester_Class_2 arg_1, final int arg_2, char arg_3) + { + { + var_31 = var_31; + var_31 |= (var_31 &= (var_31 |= !true)); + arg_0 += Tester_Class_2.var_14; + } + var_30 = (var_30 *= ((new Tester[arg_0 ^= 1039546367])[arg_0]).var_32); + arg_1.var_1 = (var_31 ^= (var_31 &= !var_31 && !var_31)) ? (arg_1 = (arg_1 = arg_1)) : (new Tester_Class_0[arg_0++][arg_0])[arg_0][arg_0]; + arg_1.var_1 = var_31 ? "jpmg" : "epwvupaqp"; + var_28 = arg_1; + if (false) + { + arg_1 = arg_1; + } + else + { + arg_3 += var_30 % -9.298157E37F; + } + arg_1.var_1 = ((var_28 = (var_28 = arg_1)).var_1 = ((var_28 = arg_1).var_1 = "rlmf")); + var_28 = (arg_1 = arg_1); + arg_1 = arg_1; + { + var_31 = true; + } + (arg_1 = arg_1).var_1 = (arg_1.var_1 = (arg_1 = arg_1)); + final short[] var_43 = new short[(var_31 &= false) ? arg_0 : var_30]; + final short var_44 = 18087; + return false ? (new Tester_Interface_1[var_30 %= Tester_Class_2.var_14])[arg_0 -= 8.913955E37F * +arg_0] : (new Tester_Interface_1[var_30])[var_30]; + } + + private static float func_8(final Object[] arg_0, final byte arg_1, Tester_Class_0[] arg_2) + { + ((new Tester_Class_2[var_30 /= -2.37832E38F])[((new Tester[arg_1])[(new byte[arg_1])[arg_1]]).var_32]).var_1 = new Tester_Interface_1[arg_1]; + if (var_31) + { + var_28 = (Tester_Class_2)(var_28 = arg_2[var_30]); + } + else + { + var_30 -= Tester_Class_2.var_14; + final int var_45 = 1864933386; + final Tester_Class_0 var_46 = var_28 = (new Tester_Class_2[arg_1][var_30 |= Tester_Class_2.var_14])[((new Tester[arg_1][var_30])[var_30][var_30]).var_32][arg_1]; + var_31 &= (var_31 ^= false); + var_30 += (false ? 2.7581227E37F : + +1.8832631E38F) * ('P' * -1.9479086E38F); + } + (var_28 = (Tester_Class_2)arg_2[arg_1]).var_1 = (new Tester_Class_2[var_30][arg_1][arg_1])[arg_1]; + ++var_30; + (var_28 = (new Tester_Class_2[var_30])[arg_1]).var_1 = false ? (new Tester_Class_0[var_30][var_30])[var_30][arg_1] : arg_2[var_30]; + var_31 |= false; + ((new Tester[arg_1][var_30 >>>= (char)1981599])[arg_1][arg_1]).var_29 = false ^ (var_31 = var_31) ? (Tester_Class_2.var_13 = "nes".charAt(1080898679)) : - (var_30 %= 'n') ^ arg_1; + { + ((new Tester[arg_1])[var_30]).var_29 = (short)(Tester_Class_2.var_13 = (Tester_Class_2.var_13 = 'h')); + } + Tester_Class_2.var_13 = 'j'; + ((new Tester_Class_2[arg_1])[var_30]).var_1 = new byte[var_30]; + var_31 ^= (var_31 ^= !false); + "wcbi".toString(); + final byte var_47 = var_30; + if (true) + { + "mpbkilw".substring(199662864); + } + else + { + var_30++; + } + Tester_Class_2.var_13 = '`'; + "yujypq".indexOf("crve" + "inmvh", false ? ~arg_1 : "vukbkc".charAt(528096652)); + Tester_Class_2 var_48 = (Tester_Class_2)((!var_31 & (var_31 ^= false ^ !true) ? false | !true : false) ? (var_28 = (Tester_Class_2)arg_0[var_30]) : (new Tester_Class_2[arg_1])[var_30]); + return (float)(var_31 ^ var_31 ? (var_30 ^= (short)((new Tester[var_30])[var_30]).var_29) : (var_30 /= - -5.917837279005691E307 - - +3.2004715720509636E306)); + } + + final Tester_Class_0 func_9() + { + var_30 += -3531557622894482432L / ~var_30; + var_28 = (var_28 = (new Tester_Class_2[var_30 += + -7.0203367E37F])[var_30]); + var_31 ^= ! !false ^ false; + if (true) + { + byte[] var_49; + var_31 &= true; + short var_50 = 14869; + } + else + { + (((new Tester[var_32][var_30][var_32])[var_30])[var_32][var_30]).var_29 *= 'h'; + } + final int var_51 = false ? +var_32 >>> ~var_30 : --var_30; + var_31 |= false; + return (new Tester_Class_0[var_32][var_32])[var_30][var_30]; + } + + public static Object func_10() + { + Tester_Class_2.var_13 = (Tester_Class_2.var_13 = 'O'); + return ((Tester_Class_2)(new Object[--var_30])[var_30]).var_1 = (((Tester_Class_2)(new Object[var_30])[var_30]).var_1 = "iu"); + } + + protected byte func_11(final Tester_Interface_1[] arg_0) + { + { + --var_30; + var_29 *= (short)1749707986816114688L; + var_31 = (var_31 = (var_31 |= var_31)); + var_30 -= 6.4575257E37F; + } + var_28 = (var_28 = func_9()); + Tester_Class_2.var_13 = 'b'; + var_30--; + var_28 = (var_28 = (var_28 = (var_28 = (Tester_Class_2)func_10()))); + var_28 = (new Tester_Class_2[var_32])[var_30]; + char var_52 = (var_31 = false) ? (char)+ +Tester_Class_2.var_14 : (Tester_Class_2.var_13 = (Tester_Class_2.var_13 = '_')); + if (false) + { + var_30 += (short)(var_32 + 840104459); + } + else + { + var_29 /= 2.5320765E38F; + } + if (true) + { + var_30 %= 379977167934025728L; + } + else + { + var_52 = var_52; + } + var_30 >>= ('A' ^ --var_30) & 'H'; + Tester_Class_2 var_53; + final int var_54 = 771623383; + var_29 %= (+1.938582E37F - 'G') * ++var_30; + int var_55 = (short)6.234053079170724E306 - (int)'^' / ((var_31 &= (var_31 ^= var_31)) ? (short)var_30 : ++var_30); + var_55 >>>= var_55 / ~6865244393150017536L; + var_31 ^= !false; + return var_30 >>>= 4351766607072508928L >> func_0(var_32, var_31, 1.021781609675458E308); + } + + public static String execute() + { + try { + Tester t = new Tester(); + try { t.test(); } + catch(Throwable e) { } + try { return t.toString(); } + catch (Throwable e) { return "Error during result conversion to String"; } + } catch (Throwable e) { return "Error during test execution"; } + } + + public static void main(String[] args) + { + try { + Tester t = new Tester(); + try { t.test(); } + catch(Throwable e) { } + try { System.out.println(t); } + catch(Throwable e) { } + } catch (Throwable e) { } + } + + private void test() + { + { + ((Tester_Class_2)(Tester_Class_0)((var_31 = true) ^ (var_31 |= false & true) ? func_10() : new Tester_Class_2[var_32])).var_1 = ((var_28 = new Tester_Class_0()).var_1 = (new Tester_Class_0().var_1 = new Tester_Class_2())); + } + new String(); + Tester_Class_2.var_13 = '['; + var_28 = (Tester_Class_2)(((var_31 ^= true) ? func_9() : new Tester_Class_0()).var_1 = new Tester_Class_2()); + var_28 = new Tester_Class_0(); + new String(); + { + if ((new boolean[var_30])[var_30] & 732877932 == 1.4654316E38F & "xpqy".replace(']', 'f').startsWith("pqrqunvfx") ? ((var_31 ^= (var_31 &= var_31)) ? !var_31 : (var_31 &= true)) : var_31) + { + var_31 &= var_31; + } + else + { + func_2(var_28 = new Tester_Class_0()); + } + { + ((new Tester_Class_0[var_30][var_30])[var_30][var_30]).var_1 = "uwrbjvl"; + var_28 = (Tester_Class_0)func_5(); + } + Tester_Class_2.var_13 = 's'; + Tester_Class_0 var_56 = var_28 = true ? func_6(func_5()) : (new Tester_Class_2[var_30][var_32])[var_32][var_32]; + double var_57 = 0; + var_29 += 1024895641427426304L % ~ ~Tester_Class_2.var_14; + while ((true || (var_31 = var_31)) && var_57 < 12) + { + (false & !false ? var_56 : (var_56 = var_56)).var_1 = "vbc"; + var_57++; + var_29 += func_0(var_30 /= (short)var_30 / var_32, false, var_57); + (var_31 ? (Tester_Class_2)var_56 : (new Tester_Class_2[var_30])[var_30]).var_1 = new double[var_31 ? (var_30 |= 177265600183083008L) : var_32]; + } + switch (var_31 ^ var_31 | ! (var_31 = false) & (var_31 & false) ? (true ? (short)var_30 : func_0(var_30, true, var_57)) : func_0(var_30, var_31, var_29 /= 2.7203017E38F)) + { + case 23: + var_31 ^= true ? var_31 : var_31; + break; + + case 40: + + default: + + case 104: + + } + Object var_58 = (var_31 ^= (var_31 ^= false) | (~3392263608888292352L < 1681682009 | var_31)) ? (((new Tester_Class_2[Tester_Class_2.func_5(Tester_Class_2.var_14, 3.3507317E38F, var_57)])[var_30 %= var_32]).var_1 = (var_56 = (Tester_Class_0)func_5())) : func_5(); + (var_56 = (var_28 = (var_56 = func_6(var_58)))).var_1 = (((new Tester_Class_2[var_30])[var_30]).var_1 = new float[(byte)Tester_Class_2.var_14][var_32]); + new Tester_Class_2().var_1 = new double[var_30]; + final Tester_Class_0 var_59 = var_28 = (!false & (false || (var_31 && !var_31) & false) ? var_31 : (var_31 |= !var_31)) ? (Tester_Class_2)(var_28 = new Tester_Class_2()) : (false ? new Tester_Class_2() : (Tester_Class_2)var_58); + } + var_30--; + { + var_29 += ~func_11(((new Tester_Interface_1[var_32][var_30][var_32])[var_30 ^= var_30])[var_32]); + } + float var_60 = 0F; + double var_61 = var_29; + do + { + long var_62; + var_60++; + "vcs".offsetByCodePoints((short)('w' | (short)var_30) & (new short[--var_30])[var_30], ((var_31 |= var_31) ^ (var_31 = var_31) ? var_32 : var_30) - (short)86413218); + } while (false && (var_60 < 100 && ((var_31 = false) ? var_31 : (var_31 ^= false)) & var_31)); + long var_63 = 0L; + double var_64 = (var_31 ? (Tester_Class_2.var_13 = 'V') : 'e') - (var_29 = 'A') * (var_29 = var_30); + var_31 &= (short)+ var_30-- + var_30 <= var_60; + while (false && (var_63 < 23 && false)) + { + var_30 >>= func_0(var_30, var_31, var_29) ^ ("".endsWith("mecvlajq" + "jhrbf") ? var_30 : (short)var_61); + var_63++; + var_28 = (Tester_Class_2)(var_28 = new Tester_Class_2()); + new Tester_Class_0(); + } + double var_65 = 0; + var_61 -= var_65; + for (((var_31 &= (var_31 ^= var_31)) | var_31 ? new Tester_Class_2() : new Tester_Class_2()).var_1 = ! (var_31 = false) && !var_31 ? func_10() : new Object[var_32][var_32]; var_31 | var_31 && (var_65 < 107 && (false ? !var_31 : (var_31 &= var_31))); var_30 *= Tester_Class_2.var_14) + { + var_28 = var_30 > (var_31 | true ? (Tester_Class_2.var_13 = (Tester_Class_2.var_13 = 'k')) : (Tester_Class_2.var_13 = (Tester_Class_2.var_13 = 'r'))) ? (Tester_Class_2)((var_28 = (Tester_Class_0)func_5()).var_1 = func_5()) : new Tester_Class_2(); + var_65++; + var_29 = "efothsl".startsWith("qk") ? 3.2306712E37F : +1.1835607464479516E308; + new Tester_Class_2().var_1 = new Tester_Class_0(); + } + double var_66 = 0; + func_6(((Tester_Class_2)((true ? (Tester_Class_2)func_10() : (Tester_Class_2)func_9()).var_1 = (var_28 = (Tester_Class_2)func_10()))).var_1 = ""); + while (false | (new boolean[var_30][var_30])[var_30][var_32] && (var_66 < 104 && false)) + { + ((var_31 ^= !false) ? "" : "slgr").toUpperCase(); + var_66++; + var_28 = func_6(func_9().var_1 = func_10()); + var_31 |= (var_31 |= true); + } + Tester_Class_2.var_13 = var_31 ? (Tester_Class_2.var_13 = 'Y') : (Tester_Class_2.var_13 = (Tester_Class_2.var_13 = (Tester_Class_2.var_13 = func_4(var_63, 137999726, 1870981930)))); + int var_67 = 0; + final double var_68 = 9.955130040461458E307; + do + { + "".lastIndexOf(var_32 != (Tester_Class_2.var_13 = 'f') ? (Tester_Class_2.var_13 = 'C') : ~var_30, (char)3.187607E38F + (var_30 /= var_30)); + var_67++; + Tester_Class_2.var_13 = (Tester_Class_2.var_13 = 'B'); + } while (var_67 < 9 && var_31 ^ var_31); + int var_69 = 0; + (true ? func_6(func_10()) : (Tester_Class_2)func_9()).var_1 = ((var_28 = (var_28 = new Tester_Class_2())).var_1 = (func_6(new Tester_Class_2().var_1 = new Tester_Class_0()).var_1 = (Tester_Class_2)func_9())); + do + { + var_28 = (Tester_Class_2)(var_28 = (Tester_Class_0)(new Tester_Class_2().var_1 = "qs")); + var_69++; + var_31 ^= var_31; + } while (var_69 < 189); + (! (var_31 = true) | var_31 ? new Tester_Class_2() : (new Tester_Class_0[var_30])[var_30]).var_1 = (((new Tester_Class_2[var_30 ^= (short)var_30])[var_30]).var_1 = (var_31 ? var_31 : (var_31 |= false)) ? "lfhliyphg" : "impovq"); + ((var_31 ? var_32 == (short)8901596111720974336L : 220705842 != var_60) ? (new Tester_Class_2[var_30])[var_32] : (new Tester_Class_2[var_30][var_32])[var_30][var_30]).var_1 = (var_28 = ((char)var_30 >= "pngjthlf".lastIndexOf("jjbuojp") & var_31 ? var_31 : var_31) ? (new Tester_Class_2[var_32])[var_30] : func_6(new Tester_Class_2().var_1 = new String[var_32])); + Tester_Class_2.var_13 = 'c'; + } + public String toString() + { + String result = "[\n"; + result += "Tester.var_29 = "; result += Printer.print(var_29); + result += "\n"; + result += "Tester.var_30 = "; result += Printer.print(var_30); + result += "\n"; + result += "Tester.var_32 = "; result += Printer.print(var_32); + result += "\n"; + result += "Tester.var_31 = "; result += Printer.print(var_31); + result += "\n"; + result += "Tester.var_28 = "; result += Printer.print(var_28); + result += ""; + result += "\n]"; + return result; + } + static class Printer + { + public static String print(boolean arg) { return String.valueOf(arg); } + public static String print(byte arg) { return String.valueOf(arg); } + public static String print(short arg) { return String.valueOf(arg); } + public static String print(char arg) { return String.valueOf((int)arg); } + public static String print(int arg) { return String.valueOf(arg); } + public static String print(long arg) { return String.valueOf(arg); } + public static String print(float arg) { return String.valueOf(arg); } + public static String print(double arg) { return String.valueOf(arg); } + + + public static String print(Object arg) + { + return print_r(new java.util.Stack(), arg); + } + + private static String print_r(java.util.Stack visitedObjects, Object arg) + { + String result = ""; + if (arg == null) + result += "null"; + else + if (arg.getClass().isArray()) + { + for (int i = 0; i < visitedObjects.size(); i++) + if (visitedObjects.elementAt(i) == arg) return ""; + + visitedObjects.push(arg); + + final String delimiter = ", "; + result += "["; + + if (arg instanceof Object[]) + { + Object[] array = (Object[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print_r(visitedObjects, array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof boolean[]) + { + boolean[] array = (boolean[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof byte[]) + { + byte[] array = (byte[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof short[]) + { + short[] array = (short[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof char[]) + { + char[] array = (char[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof int[]) + { + int[] array = (int[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof long[]) + { + long[] array = (long[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof float[]) + { + float[] array = (float[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof double[]) + { + double[] array = (double[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + + result += "]"; + visitedObjects.pop(); + + } else + { + result += arg.toString(); + } + + return result; + } + } +} + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6726999/Test.java 2009-08-01 04:21:35.833033715 +0100 @@ -0,0 +1,1419 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * @test + * @bug 6726999 + * @summary nsk/stress/jck12a/jck12a010 assert(n != NULL,"Bad immediate dominator info."); + * @run main/othervm -Xbatch -XX:CompileCommand=exclude,Test.dummy -XX:+AggressiveOpts Test + */ + +import java.lang.reflect.Array; + +class Point { + int x; + int y; +} + +public class Test { + + void dummy() { + // Empty method to verify correctness of DebugInfo. + // Use -XX:CompileCommand=exclude,Test.dummy + } + + int test0_0_0(int y) { + int x = 3; + Point p = new Point(); + dummy(); + p.x = x; + p.y = 3 * x + y; + return p.x * p.y; + } + + int test0_0_1(int y) { + int x = 3; + Point p = null; + dummy(); + p = new Point(); + dummy(); + p.x = x; + p.y = 3 * x + y; + return p.x * p.y; + } + + int test0_0_2(int y) { + int x = 3; + Point p = new Point(); + dummy(); + p = new Point(); + dummy(); + p.x = x; + p.y = 3 * x + y; + return p.x * p.y; + } + + int test0_0_3(int y) { + int x = 3; + Point p[] = new Point[1]; + p[0] = new Point(); + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test0_0_4(int y) { + int x = 3; + Point p[] = new Point[1]; + dummy(); + p[0] = new Point(); + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test0_0_5(int y) { + int x = 3; + Point p[] = new Point[1]; + dummy(); + p[0] = null; + dummy(); + p[0] = new Point(); + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test0_0_6(int y) { + int x = 3; + Point p[] = new Point[1]; + p[0] = new Point(); + dummy(); + p[0] = new Point(); + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test0_1_3(int y) { + int x = 3; + Point p1 = new Point(); + dummy(); + Point p[] = new Point[1]; + p[0] = p1; + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test0_1_4(int y) { + int x = 3; + Point p1 = new Point(); + dummy(); + Point p[] = new Point[1]; + dummy(); + p[0] = p1; + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test0_1_5(int y) { + int x = 3; + Point p1 = new Point(); + dummy(); + Point p[] = new Point[1]; + dummy(); + p[0] = null; + dummy(); + p[0] = p1; + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test0_1_6(int y) { + int x = 3; + Point p1 = new Point(); + dummy(); + Point p2 = new Point(); + dummy(); + Point p[] = new Point[1]; + p[0] = p1; + dummy(); + p[0] = p2; + dummy(); + p[0].x = x; + p[0].y = 3 * x + y; + return p[0].x * p[0].y; + } + + int test1_0_0(int y) { + Point p = new Point(); + if ( (y & 1) == 1 ) { + p = new Point(); + } + int x = 3; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test1_0_1(int y) { + Point p = null; + if ( (y & 1) == 1 ) { + p = new Point(); + } + int x = 3; + if ( p == null ) + return (3 * x + y) * x; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test1_0_2(int y) { + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + p[0] = new Point(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_0_3(int y) { + Point p[] = new Point[1]; + p[0] = null; + if ( (y & 1) == 1 ) { + p[0] = new Point(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_0_4(int y) { + Point p[] = new Point[1]; + p[0] = new Point(); + if ( (y & 1) == 1 ) { + p[0] = new Point(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_0_5(int y) { + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + p[0] = new Point(); + } else { + p[0] = null; + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_0_6(int y) { + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + p[0] = new Point(); + } else { + p[0] = new Point(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_1_0(int y) { + Point p = new Point(); + if ( (y & 1) == 1 ) { + dummy(); + p = new Point(); + dummy(); + } + int x = 3; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test1_1_1(int y) { + Point p = null; + if ( (y & 1) == 1 ) { + dummy(); + p = new Point(); + dummy(); + } + int x = 3; + if ( p == null ) + return (3 * x + y) * x; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test1_1_2(int y) { + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = new Point(); + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_1_3(int y) { + Point p[] = new Point[1]; + dummy(); + p[0] = null; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = new Point(); + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_1_4(int y) { + Point p[] = new Point[1]; + dummy(); + p[0] = new Point(); + if ( (y & 1) == 1 ) { + dummy(); + p[0] = new Point(); + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_1_5(int y) { + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = new Point(); + dummy(); + } else { + dummy(); + p[0] = null; + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_1_6(int y) { + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = new Point(); + dummy(); + } else { + dummy(); + p[0] = new Point(); + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_2_0(int y) { + Point p1 = new Point(); + dummy(); + Point p = new Point(); + if ( (y & 1) == 1 ) { + dummy(); + p = p1; + dummy(); + } + int x = 3; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test1_2_1(int y) { + Point p1 = new Point(); + dummy(); + Point p = null; + if ( (y & 1) == 1 ) { + dummy(); + p = p1; + dummy(); + } + int x = 3; + if ( p == null ) + return (3 * x + y) * x; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test1_2_2(int y) { + Point p1 = new Point(); + dummy(); + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = p1; + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_2_3(int y) { + Point p1 = new Point(); + dummy(); + Point p[] = new Point[1]; + dummy(); + p[0] = null; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = p1; + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_2_4(int y) { + Point p1 = new Point(); + dummy(); + Point p2 = new Point(); + dummy(); + Point p[] = new Point[1]; + dummy(); + p[0] = p1; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = p2; + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_2_5(int y) { + Point p1 = new Point(); + dummy(); + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = p1; + dummy(); + } else { + dummy(); + p[0] = null; + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test1_2_6(int y) { + Point p1 = new Point(); + dummy(); + Point p2 = new Point(); + dummy(); + Point p[] = new Point[1]; + if ( (y & 1) == 1 ) { + dummy(); + p[0] = p1; + dummy(); + } else { + dummy(); + p[0] = p2; + dummy(); + } + int x = 3; + if ( p[0] == null ) + return (3 * x + y) * x; + p[0].x = x; + p[0].y = 3 * x + y; + dummy(); + return p[0].x * p[0].y; + } + + int test2_0_0(int y) { + Point p = new Point(); + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + p = new Point(); + } + int x = 3; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test2_0_1(int y) { + Point p = null; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + p = new Point(); + } + int x = 3; + if ( p == null ) + return (3 * x + y) * x; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test2_0_2(int y) { + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + p[i] = new Point(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_0_3(int y) { + Point p[] = new Point[3]; + int j = (y & 1); + p[j] = null; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + p[i] = new Point(); + } + int x = 3; + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_0_4(int y) { + Point p[] = new Point[3]; + int j = (y & 1); + p[j] = new Point(); + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + p[i] = new Point(); + } + int x = 3; + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_0_5(int y) { + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + p[i] = new Point(); + } + for (int i = 0; i < lim; i++) { + p[i] = null; + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_0_6(int y) { + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + p[i] = new Point(); + } + for (int i = 0; i < lim; i++) { + p[i] = new Point(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_1_0(int y) { + Point p = new Point(); + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p = new Point(); + dummy(); + } + int x = 3; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test2_1_1(int y) { + Point p = null; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p = new Point(); + dummy(); + } + int x = 3; + if ( p == null ) + return (3 * x + y) * x; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test2_1_2(int y) { + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = new Point(); + dummy(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_1_3(int y) { + Point p[] = new Point[3]; + dummy(); + int j = (y & 1); + p[j] = null; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = new Point(); + dummy(); + } + int x = 3; + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_1_4(int y) { + Point p[] = new Point[3]; + dummy(); + int j = (y & 1); + p[j] = new Point(); + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = new Point(); + dummy(); + } + int x = 3; + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_1_5(int y) { + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = new Point(); + dummy(); + } + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = null; + dummy(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_1_6(int y) { + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = new Point(); + dummy(); + } + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = new Point(); + dummy(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_2_0(int y) { + Point p1 = new Point(); + dummy(); + Point p = new Point(); + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p = p1; + dummy(); + } + int x = 3; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test2_2_1(int y) { + Point p1 = new Point(); + dummy(); + Point p = null; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p = p1; + dummy(); + } + int x = 3; + if ( p == null ) + return (3 * x + y) * x; + p.x = x; + p.y = 3 * x + y; + dummy(); + return p.x * p.y; + } + + int test2_2_2(int y) { + Point p1 = new Point(); + dummy(); + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = p1; + dummy(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_2_3(int y) { + Point p1 = new Point(); + dummy(); + Point p[] = new Point[3]; + dummy(); + int j = (y & 1); + p[j] = null; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = p1; + dummy(); + } + int x = 3; + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_2_4(int y) { + Point p1 = new Point(); + dummy(); + Point p2 = new Point(); + dummy(); + Point p[] = new Point[3]; + dummy(); + int j = (y & 1); + p[j] = p1; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = p2; + dummy(); + } + int x = 3; + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_2_5(int y) { + Point p1 = new Point(); + dummy(); + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = p1; + dummy(); + } + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = null; + dummy(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + int test2_2_6(int y) { + Point p1 = new Point(); + dummy(); + Point p2 = new Point(); + dummy(); + Point p[] = new Point[3]; + int lim = (y & 3); + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = p1; + dummy(); + } + for (int i = 0; i < lim; i++) { + dummy(); + p[i] = p2; + dummy(); + } + int x = 3; + int j = (y & 1); + if ( p[j] == null ) + return (3 * x + y) * x; + p[j].x = x; + p[j].y = 3 * x + y; + dummy(); + return p[j].x * p[0].y; + } + + public static void main(String args[]) { + Test tsr = new Test(); + Point p = new Point(); + Point ptmp = p; + Class cls = Point.class; + int y = 0; + for (int i=0; i<10000; i++) { + y = tsr.test0_0_0(y); + y = tsr.test0_0_0(y); + y = tsr.test0_0_1(y); + y = tsr.test0_0_1(y); + y = tsr.test0_0_2(y); + y = tsr.test0_0_2(y); + y = tsr.test0_0_3(y); + y = tsr.test0_0_3(y); + y = tsr.test0_0_4(y); + y = tsr.test0_0_4(y); + y = tsr.test0_0_5(y); + y = tsr.test0_0_5(y); + y = tsr.test0_0_6(y); + y = tsr.test0_0_6(y); + + y = tsr.test0_1_3(y); + y = tsr.test0_1_3(y); + y = tsr.test0_1_4(y); + y = tsr.test0_1_4(y); + y = tsr.test0_1_5(y); + y = tsr.test0_1_5(y); + y = tsr.test0_1_6(y); + y = tsr.test0_1_6(y); + + y = tsr.test1_0_0(y&~1); + y = tsr.test1_0_1(y&~1); + y = tsr.test1_0_2(y&~1); + y = tsr.test1_0_3(y&~1); + y = tsr.test1_0_4(y&~1); + y = tsr.test1_0_5(y&~1); + y = tsr.test1_0_6(y&~1); + y = tsr.test1_0_0((y&~1)+1); + y = tsr.test1_0_1((y&~1)+1); + y = tsr.test1_0_2((y&~1)+1); + y = tsr.test1_0_3((y&~1)+1); + y = tsr.test1_0_4((y&~1)+1); + y = tsr.test1_0_5((y&~1)+1); + y = tsr.test1_0_6((y&~1)+1); + + y = tsr.test1_1_0(y&~1); + y = tsr.test1_1_1(y&~1); + y = tsr.test1_1_2(y&~1); + y = tsr.test1_1_3(y&~1); + y = tsr.test1_1_4(y&~1); + y = tsr.test1_1_5(y&~1); + y = tsr.test1_1_6(y&~1); + y = tsr.test1_1_0((y&~1)+1); + y = tsr.test1_1_1((y&~1)+1); + y = tsr.test1_1_2((y&~1)+1); + y = tsr.test1_1_3((y&~1)+1); + y = tsr.test1_1_4((y&~1)+1); + y = tsr.test1_1_5((y&~1)+1); + y = tsr.test1_1_6((y&~1)+1); + + y = tsr.test1_2_0(y&~1); + y = tsr.test1_2_1(y&~1); + y = tsr.test1_2_2(y&~1); + y = tsr.test1_2_3(y&~1); + y = tsr.test1_2_4(y&~1); + y = tsr.test1_2_5(y&~1); + y = tsr.test1_2_6(y&~1); + y = tsr.test1_2_0((y&~1)+1); + y = tsr.test1_2_1((y&~1)+1); + y = tsr.test1_2_2((y&~1)+1); + y = tsr.test1_2_3((y&~1)+1); + y = tsr.test1_2_4((y&~1)+1); + y = tsr.test1_2_5((y&~1)+1); + y = tsr.test1_2_6((y&~1)+1); + + y = tsr.test2_0_0(y&~3); + y = tsr.test2_0_1(y&~3); + y = tsr.test2_0_2(y&~3); + y = tsr.test2_0_3(y&~3); + y = tsr.test2_0_4(y&~3); + y = tsr.test2_0_5(y&~3); + y = tsr.test2_0_6(y&~3); + y = tsr.test2_0_0((y&~3)+3); + y = tsr.test2_0_1((y&~3)+3); + y = tsr.test2_0_2((y&~3)+3); + y = tsr.test2_0_3((y&~3)+3); + y = tsr.test2_0_4((y&~3)+3); + y = tsr.test2_0_5((y&~3)+3); + y = tsr.test2_0_6((y&~3)+3); + + y = tsr.test2_1_0(y&~3); + y = tsr.test2_1_1(y&~3); + y = tsr.test2_1_2(y&~3); + y = tsr.test2_1_3(y&~3); + y = tsr.test2_1_4(y&~3); + y = tsr.test2_1_5(y&~3); + y = tsr.test2_1_6(y&~3); + y = tsr.test2_1_0((y&~3)+3); + y = tsr.test2_1_1((y&~3)+3); + y = tsr.test2_1_2((y&~3)+3); + y = tsr.test2_1_3((y&~3)+3); + y = tsr.test2_1_4((y&~3)+3); + y = tsr.test2_1_5((y&~3)+3); + y = tsr.test2_1_6((y&~3)+3); + + y = tsr.test2_2_0(y&~3); + y = tsr.test2_2_1(y&~3); + y = tsr.test2_2_2(y&~3); + y = tsr.test2_2_3(y&~3); + y = tsr.test2_2_4(y&~3); + y = tsr.test2_2_5(y&~3); + y = tsr.test2_2_6(y&~3); + y = tsr.test2_2_0((y&~3)+3); + y = tsr.test2_2_1((y&~3)+3); + y = tsr.test2_2_2((y&~3)+3); + y = tsr.test2_2_3((y&~3)+3); + y = tsr.test2_2_4((y&~3)+3); + y = tsr.test2_2_5((y&~3)+3); + y = tsr.test2_2_6((y&~3)+3); + + } + for (int i=0; i<10000; i++) { + y = tsr.test0_0_0(y); + y = tsr.test0_0_0(y); + y = tsr.test0_0_1(y); + y = tsr.test0_0_1(y); + y = tsr.test0_0_2(y); + y = tsr.test0_0_2(y); + y = tsr.test0_0_3(y); + y = tsr.test0_0_3(y); + y = tsr.test0_0_4(y); + y = tsr.test0_0_4(y); + y = tsr.test0_0_5(y); + y = tsr.test0_0_5(y); + y = tsr.test0_0_6(y); + y = tsr.test0_0_6(y); + + y = tsr.test0_1_3(y); + y = tsr.test0_1_3(y); + y = tsr.test0_1_4(y); + y = tsr.test0_1_4(y); + y = tsr.test0_1_5(y); + y = tsr.test0_1_5(y); + y = tsr.test0_1_6(y); + y = tsr.test0_1_6(y); + + y = tsr.test1_0_0(y&~1); + y = tsr.test1_0_1(y&~1); + y = tsr.test1_0_2(y&~1); + y = tsr.test1_0_3(y&~1); + y = tsr.test1_0_4(y&~1); + y = tsr.test1_0_5(y&~1); + y = tsr.test1_0_6(y&~1); + y = tsr.test1_0_0((y&~1)+1); + y = tsr.test1_0_1((y&~1)+1); + y = tsr.test1_0_2((y&~1)+1); + y = tsr.test1_0_3((y&~1)+1); + y = tsr.test1_0_4((y&~1)+1); + y = tsr.test1_0_5((y&~1)+1); + y = tsr.test1_0_6((y&~1)+1); + + y = tsr.test1_1_0(y&~1); + y = tsr.test1_1_1(y&~1); + y = tsr.test1_1_2(y&~1); + y = tsr.test1_1_3(y&~1); + y = tsr.test1_1_4(y&~1); + y = tsr.test1_1_5(y&~1); + y = tsr.test1_1_6(y&~1); + y = tsr.test1_1_0((y&~1)+1); + y = tsr.test1_1_1((y&~1)+1); + y = tsr.test1_1_2((y&~1)+1); + y = tsr.test1_1_3((y&~1)+1); + y = tsr.test1_1_4((y&~1)+1); + y = tsr.test1_1_5((y&~1)+1); + y = tsr.test1_1_6((y&~1)+1); + + y = tsr.test1_2_0(y&~1); + y = tsr.test1_2_1(y&~1); + y = tsr.test1_2_2(y&~1); + y = tsr.test1_2_3(y&~1); + y = tsr.test1_2_4(y&~1); + y = tsr.test1_2_5(y&~1); + y = tsr.test1_2_6(y&~1); + y = tsr.test1_2_0((y&~1)+1); + y = tsr.test1_2_1((y&~1)+1); + y = tsr.test1_2_2((y&~1)+1); + y = tsr.test1_2_3((y&~1)+1); + y = tsr.test1_2_4((y&~1)+1); + y = tsr.test1_2_5((y&~1)+1); + y = tsr.test1_2_6((y&~1)+1); + + y = tsr.test2_0_0(y&~3); + y = tsr.test2_0_1(y&~3); + y = tsr.test2_0_2(y&~3); + y = tsr.test2_0_3(y&~3); + y = tsr.test2_0_4(y&~3); + y = tsr.test2_0_5(y&~3); + y = tsr.test2_0_6(y&~3); + y = tsr.test2_0_0((y&~3)+3); + y = tsr.test2_0_1((y&~3)+3); + y = tsr.test2_0_2((y&~3)+3); + y = tsr.test2_0_3((y&~3)+3); + y = tsr.test2_0_4((y&~3)+3); + y = tsr.test2_0_5((y&~3)+3); + y = tsr.test2_0_6((y&~3)+3); + + y = tsr.test2_1_0(y&~3); + y = tsr.test2_1_1(y&~3); + y = tsr.test2_1_2(y&~3); + y = tsr.test2_1_3(y&~3); + y = tsr.test2_1_4(y&~3); + y = tsr.test2_1_5(y&~3); + y = tsr.test2_1_6(y&~3); + y = tsr.test2_1_0((y&~3)+3); + y = tsr.test2_1_1((y&~3)+3); + y = tsr.test2_1_2((y&~3)+3); + y = tsr.test2_1_3((y&~3)+3); + y = tsr.test2_1_4((y&~3)+3); + y = tsr.test2_1_5((y&~3)+3); + y = tsr.test2_1_6((y&~3)+3); + + y = tsr.test2_2_0(y&~3); + y = tsr.test2_2_1(y&~3); + y = tsr.test2_2_2(y&~3); + y = tsr.test2_2_3(y&~3); + y = tsr.test2_2_4(y&~3); + y = tsr.test2_2_5(y&~3); + y = tsr.test2_2_6(y&~3); + y = tsr.test2_2_0((y&~3)+3); + y = tsr.test2_2_1((y&~3)+3); + y = tsr.test2_2_2((y&~3)+3); + y = tsr.test2_2_3((y&~3)+3); + y = tsr.test2_2_4((y&~3)+3); + y = tsr.test2_2_5((y&~3)+3); + y = tsr.test2_2_6((y&~3)+3); + + } + for (int i=0; i<10000; i++) { + y = tsr.test0_0_0(y); + y = tsr.test0_0_0(y); + y = tsr.test0_0_1(y); + y = tsr.test0_0_1(y); + y = tsr.test0_0_2(y); + y = tsr.test0_0_2(y); + y = tsr.test0_0_3(y); + y = tsr.test0_0_3(y); + y = tsr.test0_0_4(y); + y = tsr.test0_0_4(y); + y = tsr.test0_0_5(y); + y = tsr.test0_0_5(y); + y = tsr.test0_0_6(y); + y = tsr.test0_0_6(y); + + y = tsr.test0_1_3(y); + y = tsr.test0_1_3(y); + y = tsr.test0_1_4(y); + y = tsr.test0_1_4(y); + y = tsr.test0_1_5(y); + y = tsr.test0_1_5(y); + y = tsr.test0_1_6(y); + y = tsr.test0_1_6(y); + + y = tsr.test1_0_0(y&~1); + y = tsr.test1_0_1(y&~1); + y = tsr.test1_0_2(y&~1); + y = tsr.test1_0_3(y&~1); + y = tsr.test1_0_4(y&~1); + y = tsr.test1_0_5(y&~1); + y = tsr.test1_0_6(y&~1); + y = tsr.test1_0_0((y&~1)+1); + y = tsr.test1_0_1((y&~1)+1); + y = tsr.test1_0_2((y&~1)+1); + y = tsr.test1_0_3((y&~1)+1); + y = tsr.test1_0_4((y&~1)+1); + y = tsr.test1_0_5((y&~1)+1); + y = tsr.test1_0_6((y&~1)+1); + + y = tsr.test1_1_0(y&~1); + y = tsr.test1_1_1(y&~1); + y = tsr.test1_1_2(y&~1); + y = tsr.test1_1_3(y&~1); + y = tsr.test1_1_4(y&~1); + y = tsr.test1_1_5(y&~1); + y = tsr.test1_1_6(y&~1); + y = tsr.test1_1_0((y&~1)+1); + y = tsr.test1_1_1((y&~1)+1); + y = tsr.test1_1_2((y&~1)+1); + y = tsr.test1_1_3((y&~1)+1); + y = tsr.test1_1_4((y&~1)+1); + y = tsr.test1_1_5((y&~1)+1); + y = tsr.test1_1_6((y&~1)+1); + + y = tsr.test1_2_0(y&~1); + y = tsr.test1_2_1(y&~1); + y = tsr.test1_2_2(y&~1); + y = tsr.test1_2_3(y&~1); + y = tsr.test1_2_4(y&~1); + y = tsr.test1_2_5(y&~1); + y = tsr.test1_2_6(y&~1); + y = tsr.test1_2_0((y&~1)+1); + y = tsr.test1_2_1((y&~1)+1); + y = tsr.test1_2_2((y&~1)+1); + y = tsr.test1_2_3((y&~1)+1); + y = tsr.test1_2_4((y&~1)+1); + y = tsr.test1_2_5((y&~1)+1); + y = tsr.test1_2_6((y&~1)+1); + + y = tsr.test2_0_0(y&~3); + y = tsr.test2_0_1(y&~3); + y = tsr.test2_0_2(y&~3); + y = tsr.test2_0_3(y&~3); + y = tsr.test2_0_4(y&~3); + y = tsr.test2_0_5(y&~3); + y = tsr.test2_0_6(y&~3); + y = tsr.test2_0_0((y&~3)+3); + y = tsr.test2_0_1((y&~3)+3); + y = tsr.test2_0_2((y&~3)+3); + y = tsr.test2_0_3((y&~3)+3); + y = tsr.test2_0_4((y&~3)+3); + y = tsr.test2_0_5((y&~3)+3); + y = tsr.test2_0_6((y&~3)+3); + + y = tsr.test2_1_0(y&~3); + y = tsr.test2_1_1(y&~3); + y = tsr.test2_1_2(y&~3); + y = tsr.test2_1_3(y&~3); + y = tsr.test2_1_4(y&~3); + y = tsr.test2_1_5(y&~3); + y = tsr.test2_1_6(y&~3); + y = tsr.test2_1_0((y&~3)+3); + y = tsr.test2_1_1((y&~3)+3); + y = tsr.test2_1_2((y&~3)+3); + y = tsr.test2_1_3((y&~3)+3); + y = tsr.test2_1_4((y&~3)+3); + y = tsr.test2_1_5((y&~3)+3); + y = tsr.test2_1_6((y&~3)+3); + + y = tsr.test2_2_0(y&~3); + y = tsr.test2_2_1(y&~3); + y = tsr.test2_2_2(y&~3); + y = tsr.test2_2_3(y&~3); + y = tsr.test2_2_4(y&~3); + y = tsr.test2_2_5(y&~3); + y = tsr.test2_2_6(y&~3); + y = tsr.test2_2_0((y&~3)+3); + y = tsr.test2_2_1((y&~3)+3); + y = tsr.test2_2_2((y&~3)+3); + y = tsr.test2_2_3((y&~3)+3); + y = tsr.test2_2_4((y&~3)+3); + y = tsr.test2_2_5((y&~3)+3); + y = tsr.test2_2_6((y&~3)+3); + + } + + int z = 0; + y = tsr.test0_0_0(0); + System.out.println("After 'test0_0_0' y=" + y); + y = tsr.test0_0_1(0); + System.out.println("After 'test0_0_1' y=" + y); + y = tsr.test0_0_2(0); + System.out.println("After 'test0_0_2' y=" + y); + y = tsr.test0_0_3(0); + System.out.println("After 'test0_0_3' y=" + y); + y = tsr.test0_0_4(0); + System.out.println("After 'test0_0_4' y=" + y); + y = tsr.test0_0_5(0); + System.out.println("After 'test0_0_5' y=" + y); + y = tsr.test0_0_6(0); + System.out.println("After 'test0_0_6' y=" + y); + y = tsr.test0_1_3(0); + System.out.println("After 'test0_1_3' y=" + y); + y = tsr.test0_1_4(0); + System.out.println("After 'test0_1_4' y=" + y); + y = tsr.test0_1_5(0); + System.out.println("After 'test0_1_5' y=" + y); + y = tsr.test0_1_6(0); + System.out.println("After 'test0_1_6' y=" + y); + + y = tsr.test1_0_0(0); + System.out.println("After 'test1_0_0' y=" + y); + y = tsr.test1_0_1(0); + System.out.println("After 'test1_0_1' y=" + y); + y = tsr.test1_0_2(0); + System.out.println("After 'test1_0_2' y=" + y); + y = tsr.test1_0_3(0); + System.out.println("After 'test1_0_3' y=" + y); + y = tsr.test1_0_4(0); + System.out.println("After 'test1_0_4' y=" + y); + y = tsr.test1_0_5(0); + System.out.println("After 'test1_0_5' y=" + y); + y = tsr.test1_0_6(0); + System.out.println("After 'test1_0_6' y=" + y); + + y = tsr.test1_1_0(0); + System.out.println("After 'test1_1_0' y=" + y); + y = tsr.test1_1_1(0); + System.out.println("After 'test1_1_1' y=" + y); + y = tsr.test1_1_2(0); + System.out.println("After 'test1_1_2' y=" + y); + y = tsr.test1_1_3(0); + System.out.println("After 'test1_1_3' y=" + y); + y = tsr.test1_1_4(0); + System.out.println("After 'test1_1_4' y=" + y); + y = tsr.test1_1_5(0); + System.out.println("After 'test1_1_5' y=" + y); + y = tsr.test1_1_6(0); + System.out.println("After 'test1_1_6' y=" + y); + + y = tsr.test1_2_0(0); + System.out.println("After 'test1_2_0' y=" + y); + y = tsr.test1_2_1(0); + System.out.println("After 'test1_2_1' y=" + y); + y = tsr.test1_2_2(0); + System.out.println("After 'test1_2_2' y=" + y); + y = tsr.test1_2_3(0); + System.out.println("After 'test1_2_3' y=" + y); + y = tsr.test1_2_4(0); + System.out.println("After 'test1_2_4' y=" + y); + y = tsr.test1_2_5(0); + System.out.println("After 'test1_2_5' y=" + y); + y = tsr.test1_2_6(0); + System.out.println("After 'test1_2_6' y=" + y); + + y = tsr.test2_0_0(0); + System.out.println("After 'test2_0_0' y=" + y); + y = tsr.test2_0_1(0); + System.out.println("After 'test2_0_1' y=" + y); + y = tsr.test2_0_2(0); + System.out.println("After 'test2_0_2' y=" + y); + y = tsr.test2_0_3(0); + System.out.println("After 'test2_0_3' y=" + y); + y = tsr.test2_0_4(0); + System.out.println("After 'test2_0_4' y=" + y); + y = tsr.test2_0_5(0); + System.out.println("After 'test2_0_5' y=" + y); + y = tsr.test2_0_6(0); + System.out.println("After 'test2_0_6' y=" + y); + + y = tsr.test2_1_0(0); + System.out.println("After 'test2_1_0' y=" + y); + y = tsr.test2_1_1(0); + System.out.println("After 'test2_1_1' y=" + y); + y = tsr.test2_1_2(0); + System.out.println("After 'test2_1_2' y=" + y); + y = tsr.test2_1_3(0); + System.out.println("After 'test2_1_3' y=" + y); + y = tsr.test2_1_4(0); + System.out.println("After 'test2_1_4' y=" + y); + y = tsr.test2_1_5(0); + System.out.println("After 'test2_1_5' y=" + y); + y = tsr.test2_1_6(0); + System.out.println("After 'test2_1_6' y=" + y); + + y = tsr.test2_2_0(0); + System.out.println("After 'test2_2_0' y=" + y); + y = tsr.test2_2_1(0); + System.out.println("After 'test2_2_1' y=" + y); + y = tsr.test2_2_2(0); + System.out.println("After 'test2_2_2' y=" + y); + y = tsr.test2_2_3(0); + System.out.println("After 'test2_2_3' y=" + y); + y = tsr.test2_2_4(0); + System.out.println("After 'test2_2_4' y=" + y); + y = tsr.test2_2_5(0); + System.out.println("After 'test2_2_5' y=" + y); + y = tsr.test2_2_6(0); + System.out.println("After 'test2_2_6' y=" + y); + + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6741738/Tester.java 2009-08-01 04:21:36.291803122 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test + * @bug 6741738 + * @summary TypePtr::add_offset() set incorrect offset when the add overflows + * @run main/othervm -Xcomp -XX:CompileOnly=Tester.foo Tester + */ + +public class Tester { + private String[] values; + private int count; + + String foo() { + int i = Integer.MAX_VALUE-1; + String s; + try { + s = values[i]; + } catch (Throwable e) { + s = ""; + } + return s; + } + + public static void main(String[] args) { + Tester t = new Tester(); + String s = t.foo(); + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6756768/Test6756768.java 2009-08-01 04:21:36.742624943 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6756768 + * @summary C1 generates invalid code + * + * @run main/othervm -Xcomp Test6756768 + */ + +class Test6756768a +{ + static boolean var_1 = true; +} + +final class Test6756768b +{ + static boolean var_24 = false; + static int var_25 = 0; + + static boolean var_temp1 = Test6756768a.var_1 = false; +} + +public final class Test6756768 extends Test6756768a +{ + final static int var = var_1 ^ (Test6756768b.var_24 ? var_1 : var_1) ? Test6756768b.var_25 : 1; + + static public void main(String[] args) { + if (var != 0) { + throw new InternalError("var = " + var); + } + } + +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6756768/Test6756768_2.java 2009-08-01 04:21:37.152885694 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6756768 + * @summary C1 generates invalid code + * + * @run main/othervm -Xcomp Test6756768_2 + */ + +class Test6756768_2a { + static int var = ++Test6756768_2.var; +} + +public class Test6756768_2 { + static int var = 1; + + static Object d2 = null; + + static void test_static_field() { + int v = var; + int v2 = Test6756768_2a.var; + int v3 = var; + var = v3; + } + + public static void main(String[] args) { + var = 1; + test_static_field(); + if (var != 2) { + throw new InternalError(); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6757316/Test6757316.java 2009-08-01 04:21:37.597408063 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6757316 + * @summary load_constant() produces a wrong long constant, with high a low words swapped + * @run main/othervm -Xcomp Test6757316 + */ + +public class Test6757316 { + public static void main(String[] args) { + long[] arr = { + 0x11111111aaaaaaaaL, + 0xaaaaaaaa11111111L, + 0x11111111aaaaaaaaL, + 0xaaaaaaaa11111111L + }; + if (arr[0] == arr[1]) { + throw new InternalError(); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6758234/Test6758234.java 2009-08-01 04:21:38.027674606 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6758234 + * @summary if (k cond (a ? : b: c)) returns reversed answer if k is constant and b and c are longs + * @run main/othervm -Xcomp -XX:CompileOnly=Test6758234.main Test6758234 + */ + +public class Test6758234 { + static int x = 0; + static int y = 1; + + public static void main(String[] args) { + if (1 != ((x < y) ? 1L : 0)) { + throw new InternalError(); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6775880/Test.java 2009-08-01 04:21:38.478206506 +0100 @@ -0,0 +1,67 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * @test + * @bug 6775880 + * @summary EA +DeoptimizeALot: assert(mon_info->owner()->is_locked(),"object must be locked now") + * @compile -source 1.4 -target 1.4 Test.java + * @run main/othervm -server -Xbatch -XX:+DoEscapeAnalysis -XX:+DeoptimizeALot -XX:CompileCommand=exclude,java.lang.AbstractStringBuilder::append Test + */ + +public class Test { + + int cnt; + int b[]; + String s; + + String test() { + String res=""; + for (int i=0; i < cnt; i++) { + if (i != 0) { + res = res +"."; + } + res = res + b[i]; + } + return res; + } + + public static void main(String[] args) { + Test t = new Test(); + t.cnt = 3; + t.b = new int[3]; + t.b[0] = 0; + t.b[1] = 1; + t.b[2] = 2; + int j=0; + t.s = ""; + for (int i=0; i<10001; i++) { + t.s = "c"; + t.s = t.test(); + } + System.out.println("After s=" + t.s); + } +} + + --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6795362/Test6795362.java 2009-08-01 04:21:38.928698187 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6795362 + * @summary 32bit server compiler leads to wrong results on solaris-x86 + * + * @run main/othervm -Xcomp -XX:CompileOnly=Test6795362.sub Test6795362 + */ + +public class Test6795362 { + public static void main(String[] args) + { + sub(); + + if (var_bad != 0) + throw new InternalError(var_bad + " != 0"); + } + + static long var_bad = -1L; + + static void sub() + { + var_bad >>= 65; + var_bad /= 65; + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/compiler/6805724/Test6805724.java 2009-08-01 04:21:39.379070406 +0100 @@ -0,0 +1,80 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6805724 + * @summary ModLNode::Ideal() generates functionally incorrect graph when divisor is any (2^k-1) constant. + * + * @run main/othervm -Xcomp -XX:CompileOnly=Test6805724.fcomp Test6805724 + */ + +import java.net.URLClassLoader; + +public class Test6805724 implements Runnable { + // Initialize DIVISOR so that it is final in this class. + static final long DIVISOR; // 2^k-1 constant + + static { + long value = 0; + try { + value = Long.decode(System.getProperty("divisor")); + } catch (Throwable t) { + // This one is required for the Class.forName() in main. + } + DIVISOR = value; + } + + static long fint(long x) { + return x % DIVISOR; + } + + static long fcomp(long x) { + return x % DIVISOR; + } + + public void run() { + long a = 0x617981E1L; + + long expected = fint(a); + long result = fcomp(a); + + if (result != expected) + throw new InternalError(result + " != " + expected); + } + + public static void main(String args[]) throws Exception { + Class cl = Class.forName("Test6805724"); + URLClassLoader apploader = (URLClassLoader) cl.getClassLoader(); + + // Iterate over all 2^k-1 divisors. + for (int k = 1; k < Long.SIZE; k++) { + long divisor = (1L << k) - 1; + System.setProperty("divisor", "" + divisor); + ClassLoader loader = new URLClassLoader(apploader.getURLs(), apploader.getParent()); + Class c = loader.loadClass("Test6805724"); + Runnable r = (Runnable) c.newInstance(); + r.run(); + } + } +} --- /dev/null 2009-07-29 04:07:07.141863912 +0100 +++ new/hotspot/test/runtime/6819213/TestBootNativeLibraryPath.java 2009-08-01 04:21:39.872062061 +0100 @@ -0,0 +1,133 @@ +/* + * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/* + * @test TestBootNativeLibraryPath.java + * @bug 6819213 + * @compile -XDignore.symbol.file TestBootNativeLibraryPath.java + * @summary verify sun.boot.native.library.path is expandable on 32 bit systems + * @run main TestBootNativeLibraryPath + * @author ksrini +*/ + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.tools.JavaCompiler; +import javax.tools.ToolProvider; + +public class TestBootNativeLibraryPath { + + private static final String TESTFILE = "Test6"; + + static void createTestClass() throws IOException { + FileOutputStream fos = new FileOutputStream(TESTFILE + ".java"); + PrintStream ps = new PrintStream(fos); + ps.println("public class " + TESTFILE + "{"); + ps.println("public static void main(String[] args) {\n"); + ps.println("System.out.println(System.getProperty(\"sun.boot.library.path\"));\n"); + ps.println("}}\n"); + ps.close(); + fos.close(); + + JavaCompiler javac = ToolProvider.getSystemJavaCompiler(); + String javacOpts[] = {TESTFILE + ".java"}; + if (javac.run(null, null, null, javacOpts) != 0) { + throw new RuntimeException("compilation of " + TESTFILE + ".java Failed"); + } + } + + static List doExec(String... args) { + String javaCmd = System.getProperty("java.home") + "/bin/java"; + if (!new File(javaCmd).exists()) { + javaCmd = System.getProperty("java.home") + "/bin/java.exe"; + } + + ArrayList cmds = new ArrayList(); + cmds.add(javaCmd); + for (String x : args) { + cmds.add(x); + } + System.out.println("cmds=" + cmds); + ProcessBuilder pb = new ProcessBuilder(cmds); + + Map env = pb.environment(); + pb.directory(new File(".")); + + List out = new ArrayList(); + try { + pb.redirectErrorStream(true); + Process p = pb.start(); + BufferedReader rd = new BufferedReader(new InputStreamReader(p.getInputStream()),8192); + String in = rd.readLine(); + while (in != null) { + out.add(in); + System.out.println(in); + in = rd.readLine(); + } + int retval = p.waitFor(); + p.destroy(); + if (retval != 0) { + throw new RuntimeException("Error: test returned non-zero value"); + } + return out; + } catch (Exception ex) { + ex.printStackTrace(); + throw new RuntimeException(ex.getMessage()); + } + } + + public static void main(String[] args) { + try { + if (!System.getProperty("sun.arch.data.model").equals("32")) { + System.out.println("Warning: test skipped for 64-bit systems\n"); + return; + } + String osname = System.getProperty("os.name"); + if (osname.startsWith("Windows")) { + osname = "Windows"; + } + + createTestClass(); + + // Test a simple path + String libpath = File.pathSeparator + "tmp" + File.pathSeparator + "foobar"; + List processOut = null; + String sunbootlibrarypath = "-Dsun.boot.library.path=" + libpath; + processOut = doExec(sunbootlibrarypath, "-cp", ".", TESTFILE); + if (processOut == null || !processOut.get(0).endsWith(libpath)) { + throw new RuntimeException("Error: did not get expected error string"); + } + } catch (IOException ex) { + throw new RuntimeException("Unexpected error " + ex); + } + } +} --- old/hotspot/src/cpu/sparc/vm/disassembler_sparc.cpp 2009-08-01 04:21:40.360001525 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,233 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)disassembler_sparc.cpp 1.51 07/05/17 15:48:03 JVM" -#endif -/* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -# include "incls/_precompiled.incl" -# include "incls/_disassembler_sparc.cpp.incl" - -#ifndef PRODUCT - -#define SPARC_VERSION (VM_Version::v9_instructions_work()? \ - (VM_Version::v8_instructions_work()? "" : "9") : "8") - -// This routine is in the shared library: -typedef unsigned char* print_insn_sparc_t(unsigned char* start, DisassemblerEnv* env, - const char* sparc_version); - -void* Disassembler::_library = NULL; -dll_func Disassembler::_print_insn_sparc = NULL; - -bool Disassembler::load_library() { - if (_library == NULL) { - char buf[1024]; - char ebuf[1024]; - sprintf(buf, "disassembler%s", os::dll_file_extension()); - _library = hpi::dll_load(buf, ebuf, sizeof ebuf); - if (_library != NULL) { - tty->print_cr("Loaded disassembler"); - _print_insn_sparc = CAST_TO_FN_PTR(dll_func, hpi::dll_lookup(_library, "print_insn_sparc")); - } - } - return (_library != NULL) && (_print_insn_sparc != NULL); -} - - -class sparc_env : public DisassemblerEnv { - private: - nmethod* code; - outputStream* output; - const char* version; - - static void print_address(address value, outputStream* st); - - public: - sparc_env(nmethod* rcode, outputStream* routput) { - code = rcode; - output = routput; - version = SPARC_VERSION; - } - const char* sparc_version() { return version; } - void print_label(intptr_t value); - void print_raw(char* str) { output->print_raw(str); } - void print(char* format, ...); - char* string_for_offset(intptr_t value); - char* string_for_constant(unsigned char* pc, intptr_t value, int is_decimal); -}; - - -void sparc_env::print_address(address adr, outputStream* st) { - if (!Universe::is_fully_initialized()) { - st->print(INTPTR_FORMAT, (intptr_t)adr); - return; - } - if (StubRoutines::contains(adr)) { - StubCodeDesc *desc = StubCodeDesc::desc_for(adr); - if (desc == NULL) - desc = StubCodeDesc::desc_for(adr + frame::pc_return_offset); - if (desc == NULL) - st->print("Unknown stub at " INTPTR_FORMAT, adr); - else { - st->print("Stub::%s", desc->name()); - if (desc->begin() != adr) - st->print("%+d 0x%p",adr - desc->begin(), adr); - else if (WizardMode) st->print(" " INTPTR_FORMAT, adr); - } - } else { - BarrierSet* bs = Universe::heap()->barrier_set(); - if (bs->kind() == BarrierSet::CardTableModRef && - adr == (address)((CardTableModRefBS*)(bs))->byte_map_base) { - st->print("word_map_base"); - if (WizardMode) st->print(" " INTPTR_FORMAT, (intptr_t)adr); - } else { - st->print(INTPTR_FORMAT, (intptr_t)adr); - } - } -} - - -// called by the disassembler to print out jump addresses -void sparc_env::print_label(intptr_t value) { - print_address((address) value, output); -} - -void sparc_env::print(char* format, ...) { - va_list ap; - va_start(ap, format); - output->vprint(format, ap); - va_end(ap); -} - -char* sparc_env::string_for_offset(intptr_t value) { - stringStream st; - print_address((address) value, &st); - return st.as_string(); -} - -char* sparc_env::string_for_constant(unsigned char* pc, intptr_t value, int is_decimal) { - stringStream st; - oop obj; - if (code && (obj = code->embeddedOop_at(pc)) != NULL) { - obj->print_value_on(&st); - } else - { - print_address((address) value, &st); - } - return st.as_string(); -} - - -address Disassembler::decode_instruction(address start, DisassemblerEnv* env) { - const char* version = ((sparc_env*)env)->sparc_version(); - return ((print_insn_sparc_t*) _print_insn_sparc)(start, env, version); -} - - -const int show_bytes = false; // for disassembler debugging - - -void Disassembler::decode(CodeBlob* cb, outputStream* st) { - st = st ? st : tty; - st->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb); - decode(cb->instructions_begin(), cb->instructions_end(), st); -} - - -void Disassembler::decode(u_char* begin, u_char* end, outputStream* st) { - assert ((((intptr_t)begin | (intptr_t)end) % sizeof(int) == 0), "misaligned insn addr"); - st = st ? st : tty; - if (!load_library()) { - st->print_cr("Could not load disassembler"); - return; - } - sparc_env env(NULL, st); - unsigned char* p = (unsigned char*) begin; - CodeBlob* cb = CodeCache::find_blob_unsafe(begin); - while (p < (unsigned char*) end && p) { - if (cb != NULL) { - cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin())); - } - - unsigned char* p0 = p; - st->print(INTPTR_FORMAT ": ", p); - p = decode_instruction(p, &env); - if (show_bytes && p) { - st->print("\t\t\t"); - while (p0 < p) { st->print("%08lx ", *(int*)p0); p0 += sizeof(int); } - } - st->cr(); - } -} - - -void Disassembler::decode(nmethod* nm, outputStream* st) { - st = st ? st : tty; - - st->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm); - st->print("Code:"); - st->cr(); - - if (!load_library()) { - st->print_cr("Could not load disassembler"); - return; - } - sparc_env env(nm, st); - unsigned char* p = nm->instructions_begin(); - unsigned char* end = nm->instructions_end(); - assert ((((intptr_t)p | (intptr_t)end) % sizeof(int) == 0), "misaligned insn addr"); - - unsigned char *p1 = p; - int total_bucket_count = 0; - while (p1 < end && p1) { - unsigned char *p0 = p1; - ++p1; - address bucket_pc = FlatProfiler::bucket_start_for(p1); - if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p1) - total_bucket_count += FlatProfiler::bucket_count_for(p0); - } - - while (p < end && p) { - if (p == nm->entry_point()) st->print_cr("[Entry Point]"); - if (p == nm->verified_entry_point()) st->print_cr("[Verified Entry Point]"); - if (p == nm->exception_begin()) st->print_cr("[Exception Handler]"); - if (p == nm->stub_begin()) st->print_cr("[Stub Code]"); - if (p == nm->consts_begin()) st->print_cr("[Constants]"); - nm->print_block_comment(st, (intptr_t)(p - nm->instructions_begin())); - unsigned char* p0 = p; - st->print(" " INTPTR_FORMAT ": ", p); - p = decode_instruction(p, &env); - nm->print_code_comment_on(st, 40, p0, p); - st->cr(); - // Output pc bucket ticks if we have any - address bucket_pc = FlatProfiler::bucket_start_for(p); - if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p) { - int bucket_count = FlatProfiler::bucket_count_for(p0); - tty->print_cr("%3.1f%% [%d]", bucket_count*100.0/total_bucket_count, bucket_count); - tty->cr(); - } - } -} - -#endif // PRODUCT --- old/hotspot/src/cpu/x86/vm/assembler_x86_32.cpp 2009-08-01 04:21:40.673069674 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,4982 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_x86_32.cpp 1.245 07/09/20 10:42:55 JVM" -#endif -/* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_x86_32.cpp.incl" - -// Implementation of AddressLiteral - -AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { - _is_lval = false; - _target = target; - switch (rtype) { - case relocInfo::oop_type: - // Oops are a special case. Normally they would be their own section - // but in cases like icBuffer they are literals in the code stream that - // we don't have a section for. We use none so that we get a literal address - // which is always patchable. - break; - case relocInfo::external_word_type: - _rspec = external_word_Relocation::spec(target); - break; - case relocInfo::internal_word_type: - _rspec = internal_word_Relocation::spec(target); - break; - case relocInfo::opt_virtual_call_type: - _rspec = opt_virtual_call_Relocation::spec(); - break; - case relocInfo::static_call_type: - _rspec = static_call_Relocation::spec(); - break; - case relocInfo::runtime_call_type: - _rspec = runtime_call_Relocation::spec(); - break; - case relocInfo::poll_type: - case relocInfo::poll_return_type: - _rspec = Relocation::spec_simple(rtype); - break; - case relocInfo::none: - break; - default: - ShouldNotReachHere(); - break; - } -} - -// Implementation of Address - -Address Address::make_array(ArrayAddress adr) { -#ifdef _LP64 - // Not implementable on 64bit machines - // Should have been handled higher up the call chain. - ShouldNotReachHere(); -#else - AddressLiteral base = adr.base(); - Address index = adr.index(); - assert(index._disp == 0, "must not have disp"); // maybe it can? - Address array(index._base, index._index, index._scale, (intptr_t) base.target()); - array._rspec = base._rspec; - return array; -#endif // _LP64 -} - -#ifndef _LP64 - -// exceedingly dangerous constructor -Address::Address(address loc, RelocationHolder spec) { - _base = noreg; - _index = noreg; - _scale = no_scale; - _disp = (intptr_t) loc; - _rspec = spec; -} -#endif // _LP64 - -// Convert the raw encoding form into the form expected by the constructor for -// Address. An index of 4 (rsp) corresponds to having no index, so convert -// that to noreg for the Address constructor. -Address Address::make_raw(int base, int index, int scale, int disp) { - bool valid_index = index != rsp->encoding(); - if (valid_index) { - Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); - return madr; - } else { - Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); - return madr; - } -} - -// Implementation of Assembler - -int AbstractAssembler::code_fill_byte() { - return (u_char)'\xF4'; // hlt -} - -// make this go away someday -void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { - if (rtype == relocInfo::none) - emit_long(data); - else emit_data(data, Relocation::spec_simple(rtype), format); -} - - -void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { - assert(imm32_operand == 0, "default format must be imm32 in this file"); - assert(inst_mark() != NULL, "must be inside InstructionMark"); - if (rspec.type() != relocInfo::none) { - #ifdef ASSERT - check_relocation(rspec, format); - #endif - // Do not use AbstractAssembler::relocate, which is not intended for - // embedded words. Instead, relocate to the enclosing instruction. - - // hack. call32 is too wide for mask so use disp32 - if (format == call32_operand) - code_section()->relocate(inst_mark(), rspec, disp32_operand); - else - code_section()->relocate(inst_mark(), rspec, format); - } - emit_long(data); -} - - -void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { - assert(dst->has_byte_register(), "must have byte register"); - assert(isByte(op1) && isByte(op2), "wrong opcode"); - assert(isByte(imm8), "not a byte"); - assert((op1 & 0x01) == 0, "should be 8bit operation"); - emit_byte(op1); - emit_byte(op2 | dst->encoding()); - emit_byte(imm8); -} - - -void Assembler::emit_arith(int op1, int op2, Register dst, int imm32) { - assert(isByte(op1) && isByte(op2), "wrong opcode"); - assert((op1 & 0x01) == 1, "should be 32bit operation"); - assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); - if (is8bit(imm32)) { - emit_byte(op1 | 0x02); // set sign bit - emit_byte(op2 | dst->encoding()); - emit_byte(imm32 & 0xFF); - } else { - emit_byte(op1); - emit_byte(op2 | dst->encoding()); - emit_long(imm32); - } -} - -// immediate-to-memory forms -void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int imm32) { - assert((op1 & 0x01) == 1, "should be 32bit operation"); - assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); - if (is8bit(imm32)) { - emit_byte(op1 | 0x02); // set sign bit - emit_operand(rm,adr); - emit_byte(imm32 & 0xFF); - } else { - emit_byte(op1); - emit_operand(rm,adr); - emit_long(imm32); - } -} - -void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) { - assert(isByte(op1) && isByte(op2), "wrong opcode"); - assert((op1 & 0x01) == 1, "should be 32bit operation"); - assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); - InstructionMark im(this); - emit_byte(op1); - emit_byte(op2 | dst->encoding()); - emit_data((int)obj, relocInfo::oop_type, 0); -} - - -void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { - assert(isByte(op1) && isByte(op2), "wrong opcode"); - emit_byte(op1); - emit_byte(op2 | dst->encoding() << 3 | src->encoding()); -} - - -void Assembler::emit_operand(Register reg, - Register base, - Register index, - Address::ScaleFactor scale, - int disp, - RelocationHolder const& rspec) { - - relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); - if (base->is_valid()) { - if (index->is_valid()) { - assert(scale != Address::no_scale, "inconsistent address"); - // [base + index*scale + disp] - if (disp == 0 && rtype == relocInfo::none && base != rbp) { - // [base + index*scale] - // [00 reg 100][ss index base] - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x04 | reg->encoding() << 3); - emit_byte(scale << 6 | index->encoding() << 3 | base->encoding()); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [base + index*scale + imm8] - // [01 reg 100][ss index base] imm8 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x44 | reg->encoding() << 3); - emit_byte(scale << 6 | index->encoding() << 3 | base->encoding()); - emit_byte(disp & 0xFF); - } else { - // [base + index*scale + imm32] - // [10 reg 100][ss index base] imm32 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x84 | reg->encoding() << 3); - emit_byte(scale << 6 | index->encoding() << 3 | base->encoding()); - emit_data(disp, rspec, disp32_operand); - } - } else if (base == rsp) { - // [esp + disp] - if (disp == 0 && rtype == relocInfo::none) { - // [esp] - // [00 reg 100][00 100 100] - emit_byte(0x04 | reg->encoding() << 3); - emit_byte(0x24); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [esp + imm8] - // [01 reg 100][00 100 100] imm8 - emit_byte(0x44 | reg->encoding() << 3); - emit_byte(0x24); - emit_byte(disp & 0xFF); - } else { - // [esp + imm32] - // [10 reg 100][00 100 100] imm32 - emit_byte(0x84 | reg->encoding() << 3); - emit_byte(0x24); - emit_data(disp, rspec, disp32_operand); - } - } else { - // [base + disp] - assert(base != rsp, "illegal addressing mode"); - if (disp == 0 && rtype == relocInfo::none && base != rbp) { - // [base] - // [00 reg base] - assert(base != rbp, "illegal addressing mode"); - emit_byte(0x00 | reg->encoding() << 3 | base->encoding()); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [base + imm8] - // [01 reg base] imm8 - emit_byte(0x40 | reg->encoding() << 3 | base->encoding()); - emit_byte(disp & 0xFF); - } else { - // [base + imm32] - // [10 reg base] imm32 - emit_byte(0x80 | reg->encoding() << 3 | base->encoding()); - emit_data(disp, rspec, disp32_operand); - } - } - } else { - if (index->is_valid()) { - assert(scale != Address::no_scale, "inconsistent address"); - // [index*scale + disp] - // [00 reg 100][ss index 101] imm32 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x04 | reg->encoding() << 3); - emit_byte(scale << 6 | index->encoding() << 3 | 0x05); - emit_data(disp, rspec, disp32_operand); - } else { - // [disp] - // [00 reg 101] imm32 - emit_byte(0x05 | reg->encoding() << 3); - emit_data(disp, rspec, disp32_operand); - } - } -} - -// Secret local extension to Assembler::WhichOperand: -#define end_pc_operand (_WhichOperand_limit) - -address Assembler::locate_operand(address inst, WhichOperand which) { - // Decode the given instruction, and return the address of - // an embedded 32-bit operand word. - - // If "which" is disp32_operand, selects the displacement portion - // of an effective address specifier. - // If "which" is imm32_operand, selects the trailing immediate constant. - // If "which" is call32_operand, selects the displacement of a call or jump. - // Caller is responsible for ensuring that there is such an operand, - // and that it is 32 bits wide. - - // If "which" is end_pc_operand, find the end of the instruction. - - address ip = inst; - - debug_only(bool has_imm32 = false); - int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn - - again_after_prefix: - switch (0xFF & *ip++) { - - // These convenience macros generate groups of "case" labels for the switch. - #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 - #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ - case (x)+4: case (x)+5: case (x)+6: case (x)+7 - #define REP16(x) REP8((x)+0): \ - case REP8((x)+8) - - case CS_segment: - case SS_segment: - case DS_segment: - case ES_segment: - case FS_segment: - case GS_segment: - assert(ip == inst+1, "only one prefix allowed"); - goto again_after_prefix; - - case 0xFF: // pushl a; decl a; incl a; call a; jmp a - case 0x88: // movb a, r - case 0x89: // movl a, r - case 0x8A: // movb r, a - case 0x8B: // movl r, a - case 0x8F: // popl a - break; - - case 0x68: // pushl #32(oop?) - if (which == end_pc_operand) return ip + 4; - assert(which == imm32_operand, "pushl has no disp32"); - return ip; // not produced by emit_operand - - case 0x66: // movw ... (size prefix) - switch (0xFF & *ip++) { - case 0x8B: // movw r, a - case 0x89: // movw a, r - break; - case 0xC7: // movw a, #16 - tail_size = 2; // the imm16 - break; - case 0x0F: // several SSE/SSE2 variants - ip--; // reparse the 0x0F - goto again_after_prefix; - default: - ShouldNotReachHere(); - } - break; - - case REP8(0xB8): // movl r, #32(oop?) - if (which == end_pc_operand) return ip + 4; - assert(which == imm32_operand || which == disp32_operand, ""); - return ip; - - case 0x69: // imul r, a, #32 - case 0xC7: // movl a, #32(oop?) - tail_size = 4; - debug_only(has_imm32 = true); // has both kinds of operands! - break; - - case 0x0F: // movx..., etc. - switch (0xFF & *ip++) { - case 0x12: // movlps - case 0x28: // movaps - case 0x2E: // ucomiss - case 0x2F: // comiss - case 0x54: // andps - case 0x55: // andnps - case 0x56: // orps - case 0x57: // xorps - case 0x6E: // movd - case 0x7E: // movd - case 0xAE: // ldmxcsr a - // amd side says it these have both operands but that doesn't - // appear to be true. - // debug_only(has_imm32 = true); // has both kinds of operands! - break; - - case 0xAD: // shrd r, a, %cl - case 0xAF: // imul r, a - case 0xBE: // movsxb r, a - case 0xBF: // movsxw r, a - case 0xB6: // movzxb r, a - case 0xB7: // movzxw r, a - case REP16(0x40): // cmovl cc, r, a - case 0xB0: // cmpxchgb - case 0xB1: // cmpxchg - case 0xC1: // xaddl - case 0xC7: // cmpxchg8 - case REP16(0x90): // setcc a - // fall out of the switch to decode the address - break; - case 0xAC: // shrd r, a, #8 - tail_size = 1; // the imm8 - break; - case REP16(0x80): // jcc rdisp32 - if (which == end_pc_operand) return ip + 4; - assert(which == call32_operand, "jcc has no disp32 or imm32"); - return ip; - default: - ShouldNotReachHere(); - } - break; - - case 0x81: // addl a, #32; addl r, #32 - // also: orl, adcl, sbbl, andl, subl, xorl, cmpl - // in the case of cmpl, the imm32 might be an oop - tail_size = 4; - debug_only(has_imm32 = true); // has both kinds of operands! - break; - - case 0x85: // test r/m, r - break; - - case 0x83: // addl a, #8; addl r, #8 - // also: orl, adcl, sbbl, andl, subl, xorl, cmpl - tail_size = 1; - break; - - case 0x9B: - switch (0xFF & *ip++) { - case 0xD9: // fnstcw a - break; - default: - ShouldNotReachHere(); - } - break; - - case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a - case REP4(0x10): // adc... - case REP4(0x20): // and... - case REP4(0x30): // xor... - case REP4(0x08): // or... - case REP4(0x18): // sbb... - case REP4(0x28): // sub... - case REP4(0x38): // cmp... - case 0xF7: // mull a - case 0x8D: // leal r, a - case 0x87: // xchg r, a - break; - - case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 - case 0xC6: // movb a, #8 - case 0x80: // cmpb a, #8 - case 0x6B: // imul r, a, #8 - tail_size = 1; // the imm8 - break; - - case 0xE8: // call rdisp32 - case 0xE9: // jmp rdisp32 - if (which == end_pc_operand) return ip + 4; - assert(which == call32_operand, "call has no disp32 or imm32"); - return ip; - - case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 - case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl - case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a - case 0xDD: // fld_d a; fst_d a; fstp_d a - case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a - case 0xDF: // fild_d a; fistp_d a - case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a - case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a - case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a - break; - - case 0xF3: // For SSE - case 0xF2: // For SSE2 - ip++; ip++; - break; - - default: - ShouldNotReachHere(); - - #undef REP8 - #undef REP16 - } - - assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); - assert(which != imm32_operand || has_imm32, "instruction has no imm32 field"); - - // parse the output of emit_operand - int op2 = 0xFF & *ip++; - int base = op2 & 0x07; - int op3 = -1; - const int b100 = 4; - const int b101 = 5; - if (base == b100 && (op2 >> 6) != 3) { - op3 = 0xFF & *ip++; - base = op3 & 0x07; // refetch the base - } - // now ip points at the disp (if any) - - switch (op2 >> 6) { - case 0: - // [00 reg 100][ss index base] - // [00 reg 100][00 100 rsp] - // [00 reg base] - // [00 reg 100][ss index 101][disp32] - // [00 reg 101] [disp32] - - if (base == b101) { - if (which == disp32_operand) - return ip; // caller wants the disp32 - ip += 4; // skip the disp32 - } - break; - - case 1: - // [01 reg 100][ss index base][disp8] - // [01 reg 100][00 100 rsp][disp8] - // [01 reg base] [disp8] - ip += 1; // skip the disp8 - break; - - case 2: - // [10 reg 100][ss index base][disp32] - // [10 reg 100][00 100 rsp][disp32] - // [10 reg base] [disp32] - if (which == disp32_operand) - return ip; // caller wants the disp32 - ip += 4; // skip the disp32 - break; - - case 3: - // [11 reg base] (not a memory addressing mode) - break; - } - - if (which == end_pc_operand) { - return ip + tail_size; - } - - assert(which == imm32_operand, "instruction has only an imm32 field"); - return ip; -} - -address Assembler::locate_next_instruction(address inst) { - // Secretly share code with locate_operand: - return locate_operand(inst, end_pc_operand); -} - - -#ifdef ASSERT -void Assembler::check_relocation(RelocationHolder const& rspec, int format) { - address inst = inst_mark(); - assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); - address opnd; - - Relocation* r = rspec.reloc(); - if (r->type() == relocInfo::none) { - return; - } else if (r->is_call() || format == call32_operand) { - // assert(format == imm32_operand, "cannot specify a nonzero format"); - opnd = locate_operand(inst, call32_operand); - } else if (r->is_data()) { - assert(format == imm32_operand || format == disp32_operand, "format ok"); - opnd = locate_operand(inst, (WhichOperand)format); - } else { - assert(format == imm32_operand, "cannot specify a format"); - return; - } - assert(opnd == pc(), "must put operand where relocs can find it"); -} -#endif - - - -void Assembler::emit_operand(Register reg, Address adr) { - emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); -} - - -void Assembler::emit_farith(int b1, int b2, int i) { - assert(isByte(b1) && isByte(b2), "wrong opcode"); - assert(0 <= i && i < 8, "illegal stack offset"); - emit_byte(b1); - emit_byte(b2 + i); -} - - -void Assembler::pushad() { - emit_byte(0x60); -} - -void Assembler::popad() { - emit_byte(0x61); -} - -void Assembler::pushfd() { - emit_byte(0x9C); -} - -void Assembler::popfd() { - emit_byte(0x9D); -} - -void Assembler::pushl(int imm32) { - emit_byte(0x68); - emit_long(imm32); -} - -#ifndef _LP64 -void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { - InstructionMark im(this); - emit_byte(0x68); - emit_data(imm32, rspec, 0); -} -#endif // _LP64 - -void Assembler::pushl(Register src) { - emit_byte(0x50 | src->encoding()); -} - - -void Assembler::pushl(Address src) { - InstructionMark im(this); - emit_byte(0xFF); - emit_operand(rsi, src); -} - -void Assembler::popl(Register dst) { - emit_byte(0x58 | dst->encoding()); -} - - -void Assembler::popl(Address dst) { - InstructionMark im(this); - emit_byte(0x8F); - emit_operand(rax, dst); -} - - -void Assembler::prefix(Prefix p) { - a_byte(p); -} - - -void Assembler::movb(Register dst, Address src) { - assert(dst->has_byte_register(), "must have byte register"); - InstructionMark im(this); - emit_byte(0x8A); - emit_operand(dst, src); -} - - -void Assembler::movb(Address dst, int imm8) { - InstructionMark im(this); - emit_byte(0xC6); - emit_operand(rax, dst); - emit_byte(imm8); -} - - -void Assembler::movb(Address dst, Register src) { - assert(src->has_byte_register(), "must have byte register"); - InstructionMark im(this); - emit_byte(0x88); - emit_operand(src, dst); -} - - -void Assembler::movw(Address dst, int imm16) { - InstructionMark im(this); - - emit_byte(0x66); // switch to 16-bit mode - emit_byte(0xC7); - emit_operand(rax, dst); - emit_word(imm16); -} - - -void Assembler::movw(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x8B); - emit_operand(dst, src); -} - - -void Assembler::movw(Address dst, Register src) { - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x89); - emit_operand(src, dst); -} - - -void Assembler::movl(Register dst, int imm32) { - emit_byte(0xB8 | dst->encoding()); - emit_long(imm32); -} - -#ifndef _LP64 -void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { - - InstructionMark im(this); - emit_byte(0xB8 | dst->encoding()); - emit_data((int)imm32, rspec, 0); -} -#endif // _LP64 - -void Assembler::movl(Register dst, Register src) { - emit_byte(0x8B); - emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding()); -} - - -void Assembler::movl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x8B); - emit_operand(dst, src); -} - - -void Assembler::movl(Address dst, int imm32) { - InstructionMark im(this); - emit_byte(0xC7); - emit_operand(rax, dst); - emit_long(imm32); -} - -#ifndef _LP64 -void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { - InstructionMark im(this); - emit_byte(0xC7); - emit_operand(rax, dst); - emit_data((int)imm32, rspec, 0); -} -#endif // _LP64 - -void Assembler::movl(Address dst, Register src) { - InstructionMark im(this); - emit_byte(0x89); - emit_operand(src, dst); -} - -void Assembler::movsxb(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xBE); - emit_operand(dst, src); -} - -void Assembler::movsxb(Register dst, Register src) { - assert(src->has_byte_register(), "must have byte register"); - emit_byte(0x0F); - emit_byte(0xBE); - emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding()); -} - - -void Assembler::movsxw(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xBF); - emit_operand(dst, src); -} - - -void Assembler::movsxw(Register dst, Register src) { - emit_byte(0x0F); - emit_byte(0xBF); - emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding()); -} - - -void Assembler::movzxb(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xB6); - emit_operand(dst, src); -} - - -void Assembler::movzxb(Register dst, Register src) { - assert(src->has_byte_register(), "must have byte register"); - emit_byte(0x0F); - emit_byte(0xB6); - emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding()); -} - - -void Assembler::movzxw(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xB7); - emit_operand(dst, src); -} - - -void Assembler::movzxw(Register dst, Register src) { - emit_byte(0x0F); - emit_byte(0xB7); - emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding()); -} - - -void Assembler::cmovl(Condition cc, Register dst, Register src) { - guarantee(VM_Version::supports_cmov(), "illegal instruction"); - emit_byte(0x0F); - emit_byte(0x40 | cc); - emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding()); -} - - -void Assembler::cmovl(Condition cc, Register dst, Address src) { - guarantee(VM_Version::supports_cmov(), "illegal instruction"); - // The code below seems to be wrong - however the manual is inconclusive - // do not use for now (remember to enable all callers when fixing this) - Unimplemented(); - // wrong bytes? - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0x40 | cc); - emit_operand(dst, src); -} - - -void Assembler::prefetcht0(Address src) { - assert(VM_Version::supports_sse(), "must support"); - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0x18); - emit_operand(rcx, src); // 1, src -} - - -void Assembler::prefetcht1(Address src) { - assert(VM_Version::supports_sse(), "must support"); - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0x18); - emit_operand(rdx, src); // 2, src -} - - -void Assembler::prefetcht2(Address src) { - assert(VM_Version::supports_sse(), "must support"); - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0x18); - emit_operand(rbx, src); // 3, src -} - - -void Assembler::prefetchnta(Address src) { - assert(VM_Version::supports_sse2(), "must support"); - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0x18); - emit_operand(rax, src); // 0, src -} - - -void Assembler::prefetchw(Address src) { - assert(VM_Version::supports_3dnow(), "must support"); - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0x0D); - emit_operand(rcx, src); // 1, src -} - - -void Assembler::prefetchr(Address src) { - assert(VM_Version::supports_3dnow(), "must support"); - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0x0D); - emit_operand(rax, src); // 0, src -} - - -void Assembler::adcl(Register dst, int imm32) { - emit_arith(0x81, 0xD0, dst, imm32); -} - - -void Assembler::adcl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x13); - emit_operand(dst, src); -} - - -void Assembler::adcl(Register dst, Register src) { - emit_arith(0x13, 0xC0, dst, src); -} - - -void Assembler::addl(Address dst, int imm32) { - InstructionMark im(this); - emit_arith_operand(0x81,rax,dst,imm32); -} - - -void Assembler::addl(Address dst, Register src) { - InstructionMark im(this); - emit_byte(0x01); - emit_operand(src, dst); -} - - -void Assembler::addl(Register dst, int imm32) { - emit_arith(0x81, 0xC0, dst, imm32); -} - - -void Assembler::addl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x03); - emit_operand(dst, src); -} - - -void Assembler::addl(Register dst, Register src) { - emit_arith(0x03, 0xC0, dst, src); -} - - -void Assembler::andl(Register dst, int imm32) { - emit_arith(0x81, 0xE0, dst, imm32); -} - - -void Assembler::andl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x23); - emit_operand(dst, src); -} - - -void Assembler::andl(Register dst, Register src) { - emit_arith(0x23, 0xC0, dst, src); -} - - -void Assembler::cmpb(Address dst, int imm8) { - InstructionMark im(this); - emit_byte(0x80); - emit_operand(rdi, dst); - emit_byte(imm8); -} - -void Assembler::cmpw(Address dst, int imm16) { - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x81); - emit_operand(rdi, dst); - emit_word(imm16); -} - -void Assembler::cmpl(Address dst, int imm32) { - InstructionMark im(this); - emit_byte(0x81); - emit_operand(rdi, dst); - emit_long(imm32); -} - -#ifndef _LP64 -void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { - InstructionMark im(this); - emit_byte(0x81); - emit_byte(0xF8 | src1->encoding()); - emit_data(imm32, rspec, 0); -} - -void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { - InstructionMark im(this); - emit_byte(0x81); - emit_operand(rdi, src1); - emit_data(imm32, rspec, 0); -} -#endif // _LP64 - - -void Assembler::cmpl(Register dst, int imm32) { - emit_arith(0x81, 0xF8, dst, imm32); -} - - -void Assembler::cmpl(Register dst, Register src) { - emit_arith(0x3B, 0xC0, dst, src); -} - - -void Assembler::cmpl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x3B); - emit_operand(dst, src); -} - - -void Assembler::decl(Register dst) { - // Don't use it directly. Use MacroAssembler::decrement() instead. - emit_byte(0x48 | dst->encoding()); -} - - -void Assembler::decl(Address dst) { - // Don't use it directly. Use MacroAssembler::decrement() instead. - InstructionMark im(this); - emit_byte(0xFF); - emit_operand(rcx, dst); -} - - -void Assembler::idivl(Register src) { - emit_byte(0xF7); - emit_byte(0xF8 | src->encoding()); -} - - -void Assembler::cdql() { - emit_byte(0x99); -} - - -void Assembler::imull(Register dst, Register src) { - emit_byte(0x0F); - emit_byte(0xAF); - emit_byte(0xC0 | dst->encoding() << 3 | src->encoding()); -} - - -void Assembler::imull(Register dst, Register src, int value) { - if (is8bit(value)) { - emit_byte(0x6B); - emit_byte(0xC0 | dst->encoding() << 3 | src->encoding()); - emit_byte(value); - } else { - emit_byte(0x69); - emit_byte(0xC0 | dst->encoding() << 3 | src->encoding()); - emit_long(value); - } -} - - -void Assembler::incl(Register dst) { - // Don't use it directly. Use MacroAssembler::increment() instead. - emit_byte(0x40 | dst->encoding()); -} - - -void Assembler::incl(Address dst) { - // Don't use it directly. Use MacroAssembler::increment() instead. - InstructionMark im(this); - emit_byte(0xFF); - emit_operand(rax, dst); -} - - -void Assembler::leal(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x8D); - emit_operand(dst, src); -} - -void Assembler::mull(Address src) { - InstructionMark im(this); - emit_byte(0xF7); - emit_operand(rsp, src); -} - - -void Assembler::mull(Register src) { - emit_byte(0xF7); - emit_byte(0xE0 | src->encoding()); -} - - -void Assembler::negl(Register dst) { - emit_byte(0xF7); - emit_byte(0xD8 | dst->encoding()); -} - - -void Assembler::notl(Register dst) { - emit_byte(0xF7); - emit_byte(0xD0 | dst->encoding()); -} - - -void Assembler::orl(Address dst, int imm32) { - InstructionMark im(this); - emit_byte(0x81); - emit_operand(rcx, dst); - emit_long(imm32); -} - -void Assembler::orl(Register dst, int imm32) { - emit_arith(0x81, 0xC8, dst, imm32); -} - - -void Assembler::orl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x0B); - emit_operand(dst, src); -} - - -void Assembler::orl(Register dst, Register src) { - emit_arith(0x0B, 0xC0, dst, src); -} - - -void Assembler::rcll(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); - if (imm8 == 1) { - emit_byte(0xD1); - emit_byte(0xD0 | dst->encoding()); - } else { - emit_byte(0xC1); - emit_byte(0xD0 | dst->encoding()); - emit_byte(imm8); - } -} - - -void Assembler::sarl(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); - if (imm8 == 1) { - emit_byte(0xD1); - emit_byte(0xF8 | dst->encoding()); - } else { - emit_byte(0xC1); - emit_byte(0xF8 | dst->encoding()); - emit_byte(imm8); - } -} - - -void Assembler::sarl(Register dst) { - emit_byte(0xD3); - emit_byte(0xF8 | dst->encoding()); -} - - -void Assembler::sbbl(Address dst, int imm32) { - InstructionMark im(this); - emit_arith_operand(0x81,rbx,dst,imm32); -} - - -void Assembler::sbbl(Register dst, int imm32) { - emit_arith(0x81, 0xD8, dst, imm32); -} - - -void Assembler::sbbl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x1B); - emit_operand(dst, src); -} - - -void Assembler::sbbl(Register dst, Register src) { - emit_arith(0x1B, 0xC0, dst, src); -} - - -void Assembler::shldl(Register dst, Register src) { - emit_byte(0x0F); - emit_byte(0xA5); - emit_byte(0xC0 | src->encoding() << 3 | dst->encoding()); -} - - -void Assembler::shll(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); - if (imm8 == 1 ) { - emit_byte(0xD1); - emit_byte(0xE0 | dst->encoding()); - } else { - emit_byte(0xC1); - emit_byte(0xE0 | dst->encoding()); - emit_byte(imm8); - } -} - - -void Assembler::shll(Register dst) { - emit_byte(0xD3); - emit_byte(0xE0 | dst->encoding()); -} - - -void Assembler::shrdl(Register dst, Register src) { - emit_byte(0x0F); - emit_byte(0xAD); - emit_byte(0xC0 | src->encoding() << 3 | dst->encoding()); -} - - -void Assembler::shrl(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); - emit_byte(0xC1); - emit_byte(0xE8 | dst->encoding()); - emit_byte(imm8); -} - - -void Assembler::shrl(Register dst) { - emit_byte(0xD3); - emit_byte(0xE8 | dst->encoding()); -} - - -void Assembler::subl(Address dst, int imm32) { - if (is8bit(imm32)) { - InstructionMark im(this); - emit_byte(0x83); - emit_operand(rbp, dst); - emit_byte(imm32 & 0xFF); - } else { - InstructionMark im(this); - emit_byte(0x81); - emit_operand(rbp, dst); - emit_long(imm32); - } -} - - -void Assembler::subl(Register dst, int imm32) { - emit_arith(0x81, 0xE8, dst, imm32); -} - - -void Assembler::subl(Address dst, Register src) { - InstructionMark im(this); - emit_byte(0x29); - emit_operand(src, dst); -} - - -void Assembler::subl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x2B); - emit_operand(dst, src); -} - - -void Assembler::subl(Register dst, Register src) { - emit_arith(0x2B, 0xC0, dst, src); -} - - -void Assembler::testb(Register dst, int imm8) { - assert(dst->has_byte_register(), "must have byte register"); - emit_arith_b(0xF6, 0xC0, dst, imm8); -} - - -void Assembler::testl(Register dst, int imm32) { - // not using emit_arith because test - // doesn't support sign-extension of - // 8bit operands - if (dst->encoding() == 0) { - emit_byte(0xA9); - } else { - emit_byte(0xF7); - emit_byte(0xC0 | dst->encoding()); - } - emit_long(imm32); -} - - -void Assembler::testl(Register dst, Register src) { - emit_arith(0x85, 0xC0, dst, src); -} - -void Assembler::testl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x85); - emit_operand(dst, src); -} - -void Assembler::xaddl(Address dst, Register src) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xC1); - emit_operand(src, dst); -} - -void Assembler::xorl(Register dst, int imm32) { - emit_arith(0x81, 0xF0, dst, imm32); -} - - -void Assembler::xorl(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x33); - emit_operand(dst, src); -} - - -void Assembler::xorl(Register dst, Register src) { - emit_arith(0x33, 0xC0, dst, src); -} - - -void Assembler::bswap(Register reg) { - emit_byte(0x0F); - emit_byte(0xC8 | reg->encoding()); -} - - -void Assembler::lock() { - if (Atomics & 1) { - // Emit either nothing, a NOP, or a NOP: prefix - emit_byte(0x90) ; - } else { - emit_byte(0xF0); - } -} - - -void Assembler::xchg(Register reg, Address adr) { - InstructionMark im(this); - emit_byte(0x87); - emit_operand(reg, adr); -} - - -void Assembler::xchgl(Register dst, Register src) { - emit_byte(0x87); - emit_byte(0xc0 | dst->encoding() << 3 | src->encoding()); -} - - -// The 32-bit cmpxchg compares the value at adr with the contents of rax, -// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. -// The ZF is set if the compared values were equal, and cleared otherwise. -void Assembler::cmpxchg(Register reg, Address adr) { - if (Atomics & 2) { - // caveat: no instructionmark, so this isn't relocatable. - // Emit a synthetic, non-atomic, CAS equivalent. - // Beware. The synthetic form sets all ICCs, not just ZF. - // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r) - cmpl (rax, adr) ; - movl (rax, adr) ; - if (reg != rax) { - Label L ; - jcc (Assembler::notEqual, L) ; - movl (adr, reg) ; - bind (L) ; - } - } else { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xB1); - emit_operand(reg, adr); - } -} - -// The 64-bit cmpxchg compares the value at adr with the contents of rdx:rax, -// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded -// into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. -void Assembler::cmpxchg8(Address adr) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xc7); - emit_operand(rcx, adr); -} - -void Assembler::hlt() { - emit_byte(0xF4); -} - - -void Assembler::addr_nop_4() { - // 4 bytes: NOP DWORD PTR [EAX+0] - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); - emit_byte(0); // 8-bits offset (1 byte) -} - -void Assembler::addr_nop_5() { - // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); - emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); - emit_byte(0); // 8-bits offset (1 byte) -} - -void Assembler::addr_nop_7() { - // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); - emit_long(0); // 32-bits offset (4 bytes) -} - -void Assembler::addr_nop_8() { - // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4); - emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); - emit_long(0); // 32-bits offset (4 bytes) -} - -void Assembler::nop(int i) { - assert(i > 0, " "); - if (UseAddressNop && VM_Version::is_intel()) { - // - // Using multi-bytes nops "0x0F 0x1F [address]" for Intel - // 1: 0x90 - // 2: 0x66 0x90 - // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) - // 4: 0x0F 0x1F 0x40 0x00 - // 5: 0x0F 0x1F 0x44 0x00 0x00 - // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - - // The rest coding is Intel specific - don't use consecutive address nops - - // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - - while(i >= 15) { - // For Intel don't generate consecutive addess nops (mix with regular nops) - i -= 15; - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - addr_nop_8(); - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x90); // nop - } - switch (i) { - case 14: - emit_byte(0x66); // size prefix - case 13: - emit_byte(0x66); // size prefix - case 12: - addr_nop_8(); - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x90); // nop - break; - case 11: - emit_byte(0x66); // size prefix - case 10: - emit_byte(0x66); // size prefix - case 9: - emit_byte(0x66); // size prefix - case 8: - addr_nop_8(); - break; - case 7: - addr_nop_7(); - break; - case 6: - emit_byte(0x66); // size prefix - case 5: - addr_nop_5(); - break; - case 4: - addr_nop_4(); - break; - case 3: - // Don't use "0x0F 0x1F 0x00" - need patching safe padding - emit_byte(0x66); // size prefix - case 2: - emit_byte(0x66); // size prefix - case 1: - emit_byte(0x90); // nop - break; - default: - assert(i == 0, " "); - } - return; - } - if (UseAddressNop && VM_Version::is_amd()) { - // - // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. - // 1: 0x90 - // 2: 0x66 0x90 - // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) - // 4: 0x0F 0x1F 0x40 0x00 - // 5: 0x0F 0x1F 0x44 0x00 0x00 - // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - - // The rest coding is AMD specific - use consecutive address nops - - // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // Size prefixes (0x66) are added for larger sizes - - while(i >= 22) { - i -= 11; - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - addr_nop_8(); - } - // Generate first nop for size between 21-12 - switch (i) { - case 21: - i -= 1; - emit_byte(0x66); // size prefix - case 20: - case 19: - i -= 1; - emit_byte(0x66); // size prefix - case 18: - case 17: - i -= 1; - emit_byte(0x66); // size prefix - case 16: - case 15: - i -= 8; - addr_nop_8(); - break; - case 14: - case 13: - i -= 7; - addr_nop_7(); - break; - case 12: - i -= 6; - emit_byte(0x66); // size prefix - addr_nop_5(); - break; - default: - assert(i < 12, " "); - } - - // Generate second nop for size between 11-1 - switch (i) { - case 11: - emit_byte(0x66); // size prefix - case 10: - emit_byte(0x66); // size prefix - case 9: - emit_byte(0x66); // size prefix - case 8: - addr_nop_8(); - break; - case 7: - addr_nop_7(); - break; - case 6: - emit_byte(0x66); // size prefix - case 5: - addr_nop_5(); - break; - case 4: - addr_nop_4(); - break; - case 3: - // Don't use "0x0F 0x1F 0x00" - need patching safe padding - emit_byte(0x66); // size prefix - case 2: - emit_byte(0x66); // size prefix - case 1: - emit_byte(0x90); // nop - break; - default: - assert(i == 0, " "); - } - return; - } - - // Using nops with size prefixes "0x66 0x90". - // From AMD Optimization Guide: - // 1: 0x90 - // 2: 0x66 0x90 - // 3: 0x66 0x66 0x90 - // 4: 0x66 0x66 0x66 0x90 - // 5: 0x66 0x66 0x90 0x66 0x90 - // 6: 0x66 0x66 0x90 0x66 0x66 0x90 - // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 - // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 - // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 - // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 - // - while(i > 12) { - i -= 4; - emit_byte(0x66); // size prefix - emit_byte(0x66); - emit_byte(0x66); - emit_byte(0x90); // nop - } - // 1 - 12 nops - if(i > 8) { - if(i > 9) { - i -= 1; - emit_byte(0x66); - } - i -= 3; - emit_byte(0x66); - emit_byte(0x66); - emit_byte(0x90); - } - // 1 - 8 nops - if(i > 4) { - if(i > 6) { - i -= 1; - emit_byte(0x66); - } - i -= 3; - emit_byte(0x66); - emit_byte(0x66); - emit_byte(0x90); - } - switch (i) { - case 4: - emit_byte(0x66); - case 3: - emit_byte(0x66); - case 2: - emit_byte(0x66); - case 1: - emit_byte(0x90); - break; - default: - assert(i == 0, " "); - } -} - -void Assembler::ret(int imm16) { - if (imm16 == 0) { - emit_byte(0xC3); - } else { - emit_byte(0xC2); - emit_word(imm16); - } -} - - -void Assembler::set_byte_if_not_zero(Register dst) { - emit_byte(0x0F); - emit_byte(0x95); - emit_byte(0xE0 | dst->encoding()); -} - - -// copies a single word from [esi] to [edi] -void Assembler::smovl() { - emit_byte(0xA5); -} - -// copies data from [esi] to [edi] using rcx double words (m32) -void Assembler::rep_movl() { - emit_byte(0xF3); - emit_byte(0xA5); -} - - -// sets rcx double words (m32) with rax, value at [edi] -void Assembler::rep_set() { - emit_byte(0xF3); - emit_byte(0xAB); -} - -// scans rcx double words (m32) at [edi] for occurance of rax, -void Assembler::repne_scan() { - emit_byte(0xF2); - emit_byte(0xAF); -} - - -void Assembler::setb(Condition cc, Register dst) { - assert(0 <= cc && cc < 16, "illegal cc"); - emit_byte(0x0F); - emit_byte(0x90 | cc); - emit_byte(0xC0 | dst->encoding()); -} - -void Assembler::cld() { - emit_byte(0xfc); -} - -void Assembler::std() { - emit_byte(0xfd); -} - -void Assembler::emit_raw (unsigned char b) { - emit_byte (b) ; -} - -// Serializes memory. -void Assembler::membar() { - // Memory barriers are only needed on multiprocessors - if (os::is_MP()) { - if( VM_Version::supports_sse2() ) { - emit_byte( 0x0F ); // MFENCE; faster blows no regs - emit_byte( 0xAE ); - emit_byte( 0xF0 ); - } else { - // All usable chips support "locked" instructions which suffice - // as barriers, and are much faster than the alternative of - // using cpuid instruction. We use here a locked add [esp],0. - // This is conveniently otherwise a no-op except for blowing - // flags (which we save and restore.) - pushfd(); // Save eflags register - lock(); - addl(Address(rsp, 0), 0);// Assert the lock# signal here - popfd(); // Restore eflags register - } - } -} - -// Identify processor type and features -void Assembler::cpuid() { - // Note: we can't assert VM_Version::supports_cpuid() here - // because this instruction is used in the processor - // identification code. - emit_byte( 0x0F ); - emit_byte( 0xA2 ); -} - -void Assembler::call(Label& L, relocInfo::relocType rtype) { - if (L.is_bound()) { - const int long_size = 5; - int offs = target(L) - pc(); - assert(offs <= 0, "assembler error"); - InstructionMark im(this); - // 1110 1000 #32-bit disp - emit_byte(0xE8); - emit_data(offs - long_size, rtype, 0); - } else { - InstructionMark im(this); - // 1110 1000 #32-bit disp - L.add_patch_at(code(), locator()); - emit_byte(0xE8); - emit_data(int(0), rtype, 0); - } -} - -void Assembler::call(Register dst) { - emit_byte(0xFF); - emit_byte(0xD0 | dst->encoding()); -} - - -void Assembler::call(Address adr) { - InstructionMark im(this); - relocInfo::relocType rtype = adr.reloc(); - if (rtype != relocInfo::runtime_call_type) { - emit_byte(0xFF); - emit_operand(rdx, adr); - } else { - assert(false, "ack"); - } - -} - -void Assembler::call_literal(address dest, RelocationHolder const& rspec) { - InstructionMark im(this); - emit_byte(0xE8); - intptr_t disp = dest - (_code_pos + sizeof(int32_t)); - assert(dest != NULL, "must have a target"); - emit_data(disp, rspec, call32_operand); - -} - -void Assembler::jmp(Register entry) { - emit_byte(0xFF); - emit_byte(0xE0 | entry->encoding()); -} - - -void Assembler::jmp(Address adr) { - InstructionMark im(this); - emit_byte(0xFF); - emit_operand(rsp, adr); -} - -void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { - InstructionMark im(this); - emit_byte(0xE9); - assert(dest != NULL, "must have a target"); - intptr_t disp = dest - (_code_pos + sizeof(int32_t)); - emit_data(disp, rspec.reloc(), call32_operand); -} - -void Assembler::jmp(Label& L, relocInfo::relocType rtype) { - if (L.is_bound()) { - address entry = target(L); - assert(entry != NULL, "jmp most probably wrong"); - InstructionMark im(this); - const int short_size = 2; - const int long_size = 5; - intptr_t offs = entry - _code_pos; - if (rtype == relocInfo::none && is8bit(offs - short_size)) { - emit_byte(0xEB); - emit_byte((offs - short_size) & 0xFF); - } else { - emit_byte(0xE9); - emit_long(offs - long_size); - } - } else { - // By default, forward jumps are always 32-bit displacements, since - // we can't yet know where the label will be bound. If you're sure that - // the forward jump will not run beyond 256 bytes, use jmpb to - // force an 8-bit displacement. - InstructionMark im(this); - relocate(rtype); - L.add_patch_at(code(), locator()); - emit_byte(0xE9); - emit_long(0); - } -} - -void Assembler::jmpb(Label& L) { - if (L.is_bound()) { - const int short_size = 2; - address entry = target(L); - assert(is8bit((entry - _code_pos) + short_size), - "Dispacement too large for a short jmp"); - assert(entry != NULL, "jmp most probably wrong"); - intptr_t offs = entry - _code_pos; - emit_byte(0xEB); - emit_byte((offs - short_size) & 0xFF); - } else { - InstructionMark im(this); - L.add_patch_at(code(), locator()); - emit_byte(0xEB); - emit_byte(0); - } -} - -void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) { - InstructionMark im(this); - relocate(rtype); - assert((0 <= cc) && (cc < 16), "illegal cc"); - if (L.is_bound()) { - address dst = target(L); - assert(dst != NULL, "jcc most probably wrong"); - - const int short_size = 2; - const int long_size = 6; - int offs = (int)dst - ((int)_code_pos); - if (rtype == relocInfo::none && is8bit(offs - short_size)) { - // 0111 tttn #8-bit disp - emit_byte(0x70 | cc); - emit_byte((offs - short_size) & 0xFF); - } else { - // 0000 1111 1000 tttn #32-bit disp - emit_byte(0x0F); - emit_byte(0x80 | cc); - emit_long(offs - long_size); - } - } else { - // Note: could eliminate cond. jumps to this jump if condition - // is the same however, seems to be rather unlikely case. - // Note: use jccb() if label to be bound is very close to get - // an 8-bit displacement - L.add_patch_at(code(), locator()); - emit_byte(0x0F); - emit_byte(0x80 | cc); - emit_long(0); - } -} - -void Assembler::jccb(Condition cc, Label& L) { - if (L.is_bound()) { - const int short_size = 2; - address entry = target(L); - assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)), - "Dispacement too large for a short jmp"); - intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos; - // 0111 tttn #8-bit disp - emit_byte(0x70 | cc); - emit_byte((offs - short_size) & 0xFF); - jcc(cc, L); - } else { - InstructionMark im(this); - L.add_patch_at(code(), locator()); - emit_byte(0x70 | cc); - emit_byte(0); - } -} - -// FPU instructions - -void Assembler::fld1() { - emit_byte(0xD9); - emit_byte(0xE8); -} - - -void Assembler::fldz() { - emit_byte(0xD9); - emit_byte(0xEE); -} - - -void Assembler::fld_s(Address adr) { - InstructionMark im(this); - emit_byte(0xD9); - emit_operand(rax, adr); -} - - -void Assembler::fld_s (int index) { - emit_farith(0xD9, 0xC0, index); -} - - -void Assembler::fld_d(Address adr) { - InstructionMark im(this); - emit_byte(0xDD); - emit_operand(rax, adr); -} - - -void Assembler::fld_x(Address adr) { - InstructionMark im(this); - emit_byte(0xDB); - emit_operand(rbp, adr); -} - - -void Assembler::fst_s(Address adr) { - InstructionMark im(this); - emit_byte(0xD9); - emit_operand(rdx, adr); -} - - -void Assembler::fst_d(Address adr) { - InstructionMark im(this); - emit_byte(0xDD); - emit_operand(rdx, adr); -} - - -void Assembler::fstp_s(Address adr) { - InstructionMark im(this); - emit_byte(0xD9); - emit_operand(rbx, adr); -} - - -void Assembler::fstp_d(Address adr) { - InstructionMark im(this); - emit_byte(0xDD); - emit_operand(rbx, adr); -} - - -void Assembler::fstp_x(Address adr) { - InstructionMark im(this); - emit_byte(0xDB); - emit_operand(rdi, adr); -} - - -void Assembler::fstp_d(int index) { - emit_farith(0xDD, 0xD8, index); -} - - -void Assembler::fild_s(Address adr) { - InstructionMark im(this); - emit_byte(0xDB); - emit_operand(rax, adr); -} - - -void Assembler::fild_d(Address adr) { - InstructionMark im(this); - emit_byte(0xDF); - emit_operand(rbp, adr); -} - - -void Assembler::fistp_s(Address adr) { - InstructionMark im(this); - emit_byte(0xDB); - emit_operand(rbx, adr); -} - - -void Assembler::fistp_d(Address adr) { - InstructionMark im(this); - emit_byte(0xDF); - emit_operand(rdi, adr); -} - - -void Assembler::fist_s(Address adr) { - InstructionMark im(this); - emit_byte(0xDB); - emit_operand(rdx, adr); -} - - -void Assembler::fabs() { - emit_byte(0xD9); - emit_byte(0xE1); -} - - -void Assembler::fldln2() { - emit_byte(0xD9); - emit_byte(0xED); -} - -void Assembler::fyl2x() { - emit_byte(0xD9); - emit_byte(0xF1); -} - - -void Assembler::fldlg2() { - emit_byte(0xD9); - emit_byte(0xEC); -} - - -void Assembler::flog() { - fldln2(); - fxch(); - fyl2x(); -} - - -void Assembler::flog10() { - fldlg2(); - fxch(); - fyl2x(); -} - - -void Assembler::fsin() { - emit_byte(0xD9); - emit_byte(0xFE); -} - - -void Assembler::fcos() { - emit_byte(0xD9); - emit_byte(0xFF); -} - -void Assembler::ftan() { - emit_byte(0xD9); - emit_byte(0xF2); - emit_byte(0xDD); - emit_byte(0xD8); -} - -void Assembler::fsqrt() { - emit_byte(0xD9); - emit_byte(0xFA); -} - - -void Assembler::fchs() { - emit_byte(0xD9); - emit_byte(0xE0); -} - - -void Assembler::fadd_s(Address src) { - InstructionMark im(this); - emit_byte(0xD8); - emit_operand(rax, src); -} - - -void Assembler::fadd_d(Address src) { - InstructionMark im(this); - emit_byte(0xDC); - emit_operand(rax, src); -} - - -void Assembler::fadd(int i) { - emit_farith(0xD8, 0xC0, i); -} - - -void Assembler::fadda(int i) { - emit_farith(0xDC, 0xC0, i); -} - - -void Assembler::fsub_d(Address src) { - InstructionMark im(this); - emit_byte(0xDC); - emit_operand(rsp, src); -} - - -void Assembler::fsub_s(Address src) { - InstructionMark im(this); - emit_byte(0xD8); - emit_operand(rsp, src); -} - - -void Assembler::fsubr_s(Address src) { - InstructionMark im(this); - emit_byte(0xD8); - emit_operand(rbp, src); -} - - -void Assembler::fsubr_d(Address src) { - InstructionMark im(this); - emit_byte(0xDC); - emit_operand(rbp, src); -} - - -void Assembler::fmul_s(Address src) { - InstructionMark im(this); - emit_byte(0xD8); - emit_operand(rcx, src); -} - - -void Assembler::fmul_d(Address src) { - InstructionMark im(this); - emit_byte(0xDC); - emit_operand(rcx, src); -} - - -void Assembler::fmul(int i) { - emit_farith(0xD8, 0xC8, i); -} - - -void Assembler::fmula(int i) { - emit_farith(0xDC, 0xC8, i); -} - - -void Assembler::fdiv_s(Address src) { - InstructionMark im(this); - emit_byte(0xD8); - emit_operand(rsi, src); -} - - -void Assembler::fdiv_d(Address src) { - InstructionMark im(this); - emit_byte(0xDC); - emit_operand(rsi, src); -} - - -void Assembler::fdivr_s(Address src) { - InstructionMark im(this); - emit_byte(0xD8); - emit_operand(rdi, src); -} - - -void Assembler::fdivr_d(Address src) { - InstructionMark im(this); - emit_byte(0xDC); - emit_operand(rdi, src); -} - - -void Assembler::fsub(int i) { - emit_farith(0xD8, 0xE0, i); -} - - -void Assembler::fsuba(int i) { - emit_farith(0xDC, 0xE8, i); -} - - -void Assembler::fsubr(int i) { - emit_farith(0xD8, 0xE8, i); -} - - -void Assembler::fsubra(int i) { - emit_farith(0xDC, 0xE0, i); -} - - -void Assembler::fdiv(int i) { - emit_farith(0xD8, 0xF0, i); -} - - -void Assembler::fdiva(int i) { - emit_farith(0xDC, 0xF8, i); -} - - -void Assembler::fdivr(int i) { - emit_farith(0xD8, 0xF8, i); -} - - -void Assembler::fdivra(int i) { - emit_farith(0xDC, 0xF0, i); -} - - -// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) -// is erroneous for some of the floating-point instructions below. - -void Assembler::fdivp(int i) { - emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) -} - - -void Assembler::fdivrp(int i) { - emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) -} - - -void Assembler::fsubp(int i) { - emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) -} - - -void Assembler::fsubrp(int i) { - emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) -} - - -void Assembler::faddp(int i) { - emit_farith(0xDE, 0xC0, i); -} - - -void Assembler::fmulp(int i) { - emit_farith(0xDE, 0xC8, i); -} - - -void Assembler::fprem() { - emit_byte(0xD9); - emit_byte(0xF8); -} - - -void Assembler::fprem1() { - emit_byte(0xD9); - emit_byte(0xF5); -} - - -void Assembler::fxch(int i) { - emit_farith(0xD9, 0xC8, i); -} - - -void Assembler::fincstp() { - emit_byte(0xD9); - emit_byte(0xF7); -} - - -void Assembler::fdecstp() { - emit_byte(0xD9); - emit_byte(0xF6); -} - - -void Assembler::ffree(int i) { - emit_farith(0xDD, 0xC0, i); -} - - -void Assembler::fcomp_s(Address src) { - InstructionMark im(this); - emit_byte(0xD8); - emit_operand(rbx, src); -} - - -void Assembler::fcomp_d(Address src) { - InstructionMark im(this); - emit_byte(0xDC); - emit_operand(rbx, src); -} - - -void Assembler::fcom(int i) { - emit_farith(0xD8, 0xD0, i); -} - - -void Assembler::fcomp(int i) { - emit_farith(0xD8, 0xD8, i); -} - - -void Assembler::fcompp() { - emit_byte(0xDE); - emit_byte(0xD9); -} - - -void Assembler::fucomi(int i) { - // make sure the instruction is supported (introduced for P6, together with cmov) - guarantee(VM_Version::supports_cmov(), "illegal instruction"); - emit_farith(0xDB, 0xE8, i); -} - - -void Assembler::fucomip(int i) { - // make sure the instruction is supported (introduced for P6, together with cmov) - guarantee(VM_Version::supports_cmov(), "illegal instruction"); - emit_farith(0xDF, 0xE8, i); -} - - -void Assembler::ftst() { - emit_byte(0xD9); - emit_byte(0xE4); -} - - -void Assembler::fnstsw_ax() { - emit_byte(0xdF); - emit_byte(0xE0); -} - - -void Assembler::fwait() { - emit_byte(0x9B); -} - - -void Assembler::finit() { - emit_byte(0x9B); - emit_byte(0xDB); - emit_byte(0xE3); -} - - -void Assembler::fldcw(Address src) { - InstructionMark im(this); - emit_byte(0xd9); - emit_operand(rbp, src); -} - - -void Assembler::fnstcw(Address src) { - InstructionMark im(this); - emit_byte(0x9B); - emit_byte(0xD9); - emit_operand(rdi, src); -} - -void Assembler::fnsave(Address dst) { - InstructionMark im(this); - emit_byte(0xDD); - emit_operand(rsi, dst); -} - - -void Assembler::frstor(Address src) { - InstructionMark im(this); - emit_byte(0xDD); - emit_operand(rsp, src); -} - - -void Assembler::fldenv(Address src) { - InstructionMark im(this); - emit_byte(0xD9); - emit_operand(rsp, src); -} - - -void Assembler::sahf() { - emit_byte(0x9E); -} - -// MMX operations -void Assembler::emit_operand(MMXRegister reg, Address adr) { - emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); -} - -void Assembler::movq( MMXRegister dst, Address src ) { - assert( VM_Version::supports_mmx(), "" ); - emit_byte(0x0F); - emit_byte(0x6F); - emit_operand(dst,src); -} - -void Assembler::movq( Address dst, MMXRegister src ) { - assert( VM_Version::supports_mmx(), "" ); - emit_byte(0x0F); - emit_byte(0x7F); - emit_operand(src,dst); -} - -void Assembler::emms() { - emit_byte(0x0F); - emit_byte(0x77); -} - - - - -// SSE and SSE2 instructions -inline void Assembler::emit_sse_operand(XMMRegister reg, Address adr) { - assert(((Register)reg)->encoding() == reg->encoding(), "otherwise typecast is invalid"); - emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); -} -inline void Assembler::emit_sse_operand(Register reg, Address adr) { - emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); -} - -inline void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) { - emit_byte(0xC0 | dst->encoding() << 3 | src->encoding()); -} -inline void Assembler::emit_sse_operand(XMMRegister dst, Register src) { - emit_byte(0xC0 | dst->encoding() << 3 | src->encoding()); -} -inline void Assembler::emit_sse_operand(Register dst, XMMRegister src) { - emit_byte(0xC0 | dst->encoding() << 3 | src->encoding()); -} - - -// Macro for creation of SSE2 instructions -// The SSE2 instricution set is highly regular, so this macro saves -// a lot of cut&paste -// Each macro expansion creates two methods (same name with different -// parameter list) -// -// Macro parameters: -// * name: name of the created methods -// * sse_version: either sse or sse2 for the assertion if instruction supported by processor -// * prefix: first opcode byte of the instruction (or 0 if no prefix byte) -// * opcode: last opcode byte of the instruction -// * conversion instruction have parameters of type Register instead of XMMRegister, -// so this can also configured with macro parameters -#define emit_sse_instruction(name, sse_version, prefix, opcode, dst_register_type, src_register_type) \ - \ - void Assembler:: name (dst_register_type dst, Address src) { \ - assert(VM_Version::supports_##sse_version(), ""); \ - \ - InstructionMark im(this); \ - if (prefix != 0) emit_byte(prefix); \ - emit_byte(0x0F); \ - emit_byte(opcode); \ - emit_sse_operand(dst, src); \ - } \ - \ - void Assembler:: name (dst_register_type dst, src_register_type src) { \ - assert(VM_Version::supports_##sse_version(), ""); \ - \ - if (prefix != 0) emit_byte(prefix); \ - emit_byte(0x0F); \ - emit_byte(opcode); \ - emit_sse_operand(dst, src); \ - } \ - -emit_sse_instruction(addss, sse, 0xF3, 0x58, XMMRegister, XMMRegister); -emit_sse_instruction(addsd, sse2, 0xF2, 0x58, XMMRegister, XMMRegister) -emit_sse_instruction(subss, sse, 0xF3, 0x5C, XMMRegister, XMMRegister) -emit_sse_instruction(subsd, sse2, 0xF2, 0x5C, XMMRegister, XMMRegister) -emit_sse_instruction(mulss, sse, 0xF3, 0x59, XMMRegister, XMMRegister) -emit_sse_instruction(mulsd, sse2, 0xF2, 0x59, XMMRegister, XMMRegister) -emit_sse_instruction(divss, sse, 0xF3, 0x5E, XMMRegister, XMMRegister) -emit_sse_instruction(divsd, sse2, 0xF2, 0x5E, XMMRegister, XMMRegister) -emit_sse_instruction(sqrtss, sse, 0xF3, 0x51, XMMRegister, XMMRegister) -emit_sse_instruction(sqrtsd, sse2, 0xF2, 0x51, XMMRegister, XMMRegister) - -emit_sse_instruction(pxor, sse2, 0x66, 0xEF, XMMRegister, XMMRegister) - -emit_sse_instruction(comiss, sse, 0, 0x2F, XMMRegister, XMMRegister) -emit_sse_instruction(comisd, sse2, 0x66, 0x2F, XMMRegister, XMMRegister) -emit_sse_instruction(ucomiss, sse, 0, 0x2E, XMMRegister, XMMRegister) -emit_sse_instruction(ucomisd, sse2, 0x66, 0x2E, XMMRegister, XMMRegister) - -emit_sse_instruction(cvtss2sd, sse2, 0xF3, 0x5A, XMMRegister, XMMRegister); -emit_sse_instruction(cvtsd2ss, sse2, 0xF2, 0x5A, XMMRegister, XMMRegister) -emit_sse_instruction(cvtsi2ss, sse, 0xF3, 0x2A, XMMRegister, Register); -emit_sse_instruction(cvtsi2sd, sse2, 0xF2, 0x2A, XMMRegister, Register) -emit_sse_instruction(cvtss2si, sse, 0xF3, 0x2D, Register, XMMRegister); -emit_sse_instruction(cvtsd2si, sse2, 0xF2, 0x2D, Register, XMMRegister) -emit_sse_instruction(cvttss2si, sse, 0xF3, 0x2C, Register, XMMRegister); -emit_sse_instruction(cvttsd2si, sse2, 0xF2, 0x2C, Register, XMMRegister) - -emit_sse_instruction(movss, sse, 0xF3, 0x10, XMMRegister, XMMRegister) -emit_sse_instruction(movsd, sse2, 0xF2, 0x10, XMMRegister, XMMRegister) - -emit_sse_instruction(movq, sse2, 0xF3, 0x7E, XMMRegister, XMMRegister); -emit_sse_instruction(movd, sse2, 0x66, 0x6E, XMMRegister, Register); -emit_sse_instruction(movdqa, sse2, 0x66, 0x6F, XMMRegister, XMMRegister); - -emit_sse_instruction(punpcklbw, sse2, 0x66, 0x60, XMMRegister, XMMRegister); - - -// Instruction not covered by macro -void Assembler::movq(Address dst, XMMRegister src) { - assert(VM_Version::supports_sse2(), ""); - - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0xD6); - emit_sse_operand(src, dst); -} - -void Assembler::movd(Address dst, XMMRegister src) { - assert(VM_Version::supports_sse2(), ""); - - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x7E); - emit_sse_operand(src, dst); -} - -void Assembler::movd(Register dst, XMMRegister src) { - assert(VM_Version::supports_sse2(), ""); - - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x7E); - emit_sse_operand(src, dst); -} - -void Assembler::movdqa(Address dst, XMMRegister src) { - assert(VM_Version::supports_sse2(), ""); - - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x7F); - emit_sse_operand(src, dst); -} - -void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { - assert(isByte(mode), "invalid value"); - assert(VM_Version::supports_sse2(), ""); - - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x70); - emit_sse_operand(dst, src); - emit_byte(mode & 0xFF); -} - -void Assembler::pshufd(XMMRegister dst, Address src, int mode) { - assert(isByte(mode), "invalid value"); - assert(VM_Version::supports_sse2(), ""); - - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x70); - emit_sse_operand(dst, src); - emit_byte(mode & 0xFF); -} - -void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { - assert(isByte(mode), "invalid value"); - assert(VM_Version::supports_sse2(), ""); - - emit_byte(0xF2); - emit_byte(0x0F); - emit_byte(0x70); - emit_sse_operand(dst, src); - emit_byte(mode & 0xFF); -} - -void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { - assert(isByte(mode), "invalid value"); - assert(VM_Version::supports_sse2(), ""); - - InstructionMark im(this); - emit_byte(0xF2); - emit_byte(0x0F); - emit_byte(0x70); - emit_sse_operand(dst, src); - emit_byte(mode & 0xFF); -} - -void Assembler::psrlq(XMMRegister dst, int shift) { - assert(VM_Version::supports_sse2(), ""); - - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x73); - emit_sse_operand(xmm2, dst); - emit_byte(shift); -} - -void Assembler::movss( Address dst, XMMRegister src ) { - assert(VM_Version::supports_sse(), ""); - - InstructionMark im(this); - emit_byte(0xF3); // single - emit_byte(0x0F); - emit_byte(0x11); // store - emit_sse_operand(src, dst); -} - -void Assembler::movsd( Address dst, XMMRegister src ) { - assert(VM_Version::supports_sse2(), ""); - - InstructionMark im(this); - emit_byte(0xF2); // double - emit_byte(0x0F); - emit_byte(0x11); // store - emit_sse_operand(src,dst); -} - -// New cpus require to use movaps and movapd to avoid partial register stall -// when moving between registers. -void Assembler::movaps(XMMRegister dst, XMMRegister src) { - assert(VM_Version::supports_sse(), ""); - - emit_byte(0x0F); - emit_byte(0x28); - emit_sse_operand(dst, src); -} -void Assembler::movapd(XMMRegister dst, XMMRegister src) { - assert(VM_Version::supports_sse2(), ""); - - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x28); - emit_sse_operand(dst, src); -} - -// New cpus require to use movsd and movss to avoid partial register stall -// when loading from memory. But for old Opteron use movlpd instead of movsd. -// The selection is done in MacroAssembler::movdbl() and movflt(). -void Assembler::movlpd(XMMRegister dst, Address src) { - assert(VM_Version::supports_sse(), ""); - - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x12); - emit_sse_operand(dst, src); -} - - -emit_sse_instruction(andps, sse, 0, 0x54, XMMRegister, XMMRegister); -emit_sse_instruction(andpd, sse2, 0x66, 0x54, XMMRegister, XMMRegister); -emit_sse_instruction(andnps, sse, 0, 0x55, XMMRegister, XMMRegister); -emit_sse_instruction(andnpd, sse2, 0x66, 0x55, XMMRegister, XMMRegister); -emit_sse_instruction(orps, sse, 0, 0x56, XMMRegister, XMMRegister); -emit_sse_instruction(orpd, sse2, 0x66, 0x56, XMMRegister, XMMRegister); -emit_sse_instruction(xorps, sse, 0, 0x57, XMMRegister, XMMRegister); -emit_sse_instruction(xorpd, sse2, 0x66, 0x57, XMMRegister, XMMRegister); - - -void Assembler::ldmxcsr( Address src) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xAE); - emit_operand(rdx /* 2 */, src); -} - -void Assembler::stmxcsr( Address dst) { - InstructionMark im(this); - emit_byte(0x0F); - emit_byte(0xAE); - emit_operand(rbx /* 3 */, dst); -} - -// Implementation of MacroAssembler - -Address MacroAssembler::as_Address(AddressLiteral adr) { - // amd64 always does this as a pc-rel - // we can be absolute or disp based on the instruction type - // jmp/call are displacements others are absolute - assert(!adr.is_lval(), "must be rval"); - - return Address(adr.target(), adr.rspec()); -} - -Address MacroAssembler::as_Address(ArrayAddress adr) { - return Address::make_array(adr); -} - -void MacroAssembler::fat_nop() { - // A 5 byte nop that is safe for patching (see patch_verified_entry) - emit_byte(0x26); // es: - emit_byte(0x2e); // cs: - emit_byte(0x64); // fs: - emit_byte(0x65); // gs: - emit_byte(0x90); -} - -// 32bit can do a case table jump in one instruction but we no longer allow the base -// to be installed in the Address class -void MacroAssembler::jump(ArrayAddress entry) { - jmp(as_Address(entry)); -} - -void MacroAssembler::jump(AddressLiteral dst) { - jmp_literal(dst.target(), dst.rspec()); -} - -void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { - assert((0 <= cc) && (cc < 16), "illegal cc"); - - InstructionMark im(this); - - relocInfo::relocType rtype = dst.reloc(); - relocate(rtype); - const int short_size = 2; - const int long_size = 6; - int offs = (int)dst.target() - ((int)_code_pos); - if (rtype == relocInfo::none && is8bit(offs - short_size)) { - // 0111 tttn #8-bit disp - emit_byte(0x70 | cc); - emit_byte((offs - short_size) & 0xFF); - } else { - // 0000 1111 1000 tttn #32-bit disp - emit_byte(0x0F); - emit_byte(0x80 | cc); - emit_long(offs - long_size); - } -} - -// Calls -void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { - Assembler::call(L, rtype); -} - -void MacroAssembler::call(Register entry) { - Assembler::call(entry); -} - -void MacroAssembler::call(AddressLiteral entry) { - Assembler::call_literal(entry.target(), entry.rspec()); -} - - -void MacroAssembler::cmp8(AddressLiteral src1, int8_t imm) { - Assembler::cmpb(as_Address(src1), imm); -} - -void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) { - Assembler::cmpl(as_Address(src1), imm); -} - -void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { - if (src2.is_lval()) { - cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); - } else { - Assembler::cmpl(src1, as_Address(src2)); - } -} - -void MacroAssembler::cmp32(Register src1, int32_t imm) { - Assembler::cmpl(src1, imm); -} - -void MacroAssembler::cmp32(Register src1, Address src2) { - Assembler::cmpl(src1, src2); -} - -void MacroAssembler::cmpoop(Address src1, jobject obj) { - cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); -} - -void MacroAssembler::cmpoop(Register src1, jobject obj) { - cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); -} - -void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { - if (src2.is_lval()) { - // compare the effect address of src2 to src1 - cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); - } else { - Assembler::cmpl(src1, as_Address(src2)); - } -} - -void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { - assert(src2.is_lval(), "not a mem-mem compare"); - cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); -} - - -void MacroAssembler::cmpxchgptr(Register reg, AddressLiteral adr) { - cmpxchg(reg, as_Address(adr)); -} - -void MacroAssembler::increment(AddressLiteral dst) { - increment(as_Address(dst)); -} - -void MacroAssembler::increment(ArrayAddress dst) { - increment(as_Address(dst)); -} - -void MacroAssembler::lea(Register dst, AddressLiteral adr) { - // leal(dst, as_Address(adr)); - // see note in movl as to why we musr use a move - mov_literal32(dst, (int32_t) adr.target(), adr.rspec()); -} - -void MacroAssembler::lea(Address dst, AddressLiteral adr) { - // leal(dst, as_Address(adr)); - // see note in movl as to why we musr use a move - mov_literal32(dst, (int32_t) adr.target(), adr.rspec()); -} - -void MacroAssembler::mov32(AddressLiteral dst, Register src) { - Assembler::movl(as_Address(dst), src); -} - -void MacroAssembler::mov32(Register dst, AddressLiteral src) { - Assembler::movl(dst, as_Address(src)); -} - -void MacroAssembler::movbyte(ArrayAddress dst, int src) { - movb(as_Address(dst), src); -} - -void MacroAssembler::movoop(Address dst, jobject obj) { - mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); -} - -void MacroAssembler::movoop(Register dst, jobject obj) { - mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); -} - -void MacroAssembler::movptr(Register dst, AddressLiteral src) { - if (src.is_lval()) { - // essentially an lea - mov_literal32(dst, (int32_t) src.target(), src.rspec()); - } else { - // mov 32bits from an absolute address - movl(dst, as_Address(src)); - } -} - -void MacroAssembler::movptr(ArrayAddress dst, Register src) { - movl(as_Address(dst), src); -} - -void MacroAssembler::movptr(Register dst, ArrayAddress src) { - movl(dst, as_Address(src)); -} - -void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { - movss(dst, as_Address(src)); -} - -void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { - if (UseXmmLoadAndClearUpper) { movsd (dst, as_Address(src)); return; } - else { movlpd(dst, as_Address(src)); return; } -} - -void Assembler::pushoop(jobject obj) { - push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); -} - - -void MacroAssembler::pushptr(AddressLiteral src) { - if (src.is_lval()) { - push_literal32((int32_t)src.target(), src.rspec()); - } else { - pushl(as_Address(src)); - } -} - -void MacroAssembler::test32(Register src1, AddressLiteral src2) { - // src2 must be rval - testl(src1, as_Address(src2)); -} - -// FPU - -void MacroAssembler::fld_x(AddressLiteral src) { - Assembler::fld_x(as_Address(src)); -} - -void MacroAssembler::fld_d(AddressLiteral src) { - fld_d(as_Address(src)); -} - -void MacroAssembler::fld_s(AddressLiteral src) { - fld_s(as_Address(src)); -} - -void MacroAssembler::fldcw(AddressLiteral src) { - Assembler::fldcw(as_Address(src)); -} - -void MacroAssembler::ldmxcsr(AddressLiteral src) { - Assembler::ldmxcsr(as_Address(src)); -} - -// SSE - -void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { - andpd(dst, as_Address(src)); -} - -void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { - comisd(dst, as_Address(src)); -} - -void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { - comiss(dst, as_Address(src)); -} - -void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { - movsd(dst, as_Address(src)); -} - -void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { - movss(dst, as_Address(src)); -} - -void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) { - xorpd(dst, as_Address(src)); -} - -void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { - xorps(dst, as_Address(src)); -} - -void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { - ucomisd(dst, as_Address(src)); -} - -void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { - ucomiss(dst, as_Address(src)); -} - -void MacroAssembler::null_check(Register reg, int offset) { - if (needs_explicit_null_check(offset)) { - // provoke OS NULL exception if reg = NULL by - // accessing M[reg] w/o changing any (non-CC) registers - cmpl(rax, Address(reg, 0)); - // Note: should probably use testl(rax, Address(reg, 0)); - // may be shorter code (however, this version of - // testl needs to be implemented first) - } else { - // nothing to do, (later) access of M[reg + offset] - // will provoke OS NULL exception if reg = NULL - } -} - - -int MacroAssembler::load_unsigned_byte(Register dst, Address src) { - // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, - // and "3.9 Partial Register Penalties", p. 22). - int off; - if (VM_Version::is_P6() || src.uses(dst)) { - off = offset(); - movzxb(dst, src); - } else { - xorl(dst, dst); - off = offset(); - movb(dst, src); - } - return off; -} - - -int MacroAssembler::load_unsigned_word(Register dst, Address src) { - // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, - // and "3.9 Partial Register Penalties", p. 22). - int off; - if (VM_Version::is_P6() || src.uses(dst)) { - off = offset(); - movzxw(dst, src); - } else { - xorl(dst, dst); - off = offset(); - movw(dst, src); - } - return off; -} - - -int MacroAssembler::load_signed_byte(Register dst, Address src) { - int off; - if (VM_Version::is_P6()) { - off = offset(); - movsxb(dst, src); - } else { - off = load_unsigned_byte(dst, src); - shll(dst, 24); - sarl(dst, 24); - } - return off; -} - - -int MacroAssembler::load_signed_word(Register dst, Address src) { - int off; - if (VM_Version::is_P6()) { - off = offset(); - movsxw(dst, src); - } else { - off = load_unsigned_word(dst, src); - shll(dst, 16); - sarl(dst, 16); - } - return off; -} - - -void MacroAssembler::extend_sign(Register hi, Register lo) { - // According to Intel Doc. AP-526, "Integer Divide", p.18. - if (VM_Version::is_P6() && hi == rdx && lo == rax) { - cdql(); - } else { - movl(hi, lo); - sarl(hi, 31); - } -} - - -void MacroAssembler::increment(Register reg, int value) { - if (value == min_jint) {addl(reg, value); return; } - if (value < 0) { decrement(reg, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { incl(reg); return; } - /* else */ { addl(reg, value) ; return; } -} - -void MacroAssembler::increment(Address dst, int value) { - if (value == min_jint) {addl(dst, value); return; } - if (value < 0) { decrement(dst, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { incl(dst); return; } - /* else */ { addl(dst, value) ; return; } -} - -void MacroAssembler::decrement(Register reg, int value) { - if (value == min_jint) {subl(reg, value); return; } - if (value < 0) { increment(reg, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { decl(reg); return; } - /* else */ { subl(reg, value) ; return; } -} - -void MacroAssembler::decrement(Address dst, int value) { - if (value == min_jint) {subl(dst, value); return; } - if (value < 0) { increment(dst, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { decl(dst); return; } - /* else */ { subl(dst, value) ; return; } -} - -void MacroAssembler::align(int modulus) { - if (offset() % modulus != 0) nop(modulus - (offset() % modulus)); -} - - -void MacroAssembler::enter() { - pushl(rbp); - movl(rbp, rsp); -} - - -void MacroAssembler::leave() { - movl(rsp, rbp); - popl(rbp); -} - -void MacroAssembler::set_last_Java_frame(Register java_thread, - Register last_java_sp, - Register last_java_fp, - address last_java_pc) { - // determine java_thread register - if (!java_thread->is_valid()) { - java_thread = rdi; - get_thread(java_thread); - } - // determine last_java_sp register - if (!last_java_sp->is_valid()) { - last_java_sp = rsp; - } - - // last_java_fp is optional - - if (last_java_fp->is_valid()) { - movl(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); - } - - // last_java_pc is optional - - if (last_java_pc != NULL) { - lea(Address(java_thread, - JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), - InternalAddress(last_java_pc)); - - } - movl(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); -} - -void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { - // determine java_thread register - if (!java_thread->is_valid()) { - java_thread = rdi; - get_thread(java_thread); - } - // we must set sp to zero to clear frame - movl(Address(java_thread, JavaThread::last_Java_sp_offset()), 0); - if (clear_fp) { - movl(Address(java_thread, JavaThread::last_Java_fp_offset()), 0); - } - - if (clear_pc) - movl(Address(java_thread, JavaThread::last_Java_pc_offset()), 0); - -} - - - -// Implementation of call_VM versions - -void MacroAssembler::call_VM_leaf_base( - address entry_point, - int number_of_arguments -) { - call(RuntimeAddress(entry_point)); - increment(rsp, number_of_arguments * wordSize); -} - - -void MacroAssembler::call_VM_base( - Register oop_result, - Register java_thread, - Register last_java_sp, - address entry_point, - int number_of_arguments, - bool check_exceptions -) { - // determine java_thread register - if (!java_thread->is_valid()) { - java_thread = rdi; - get_thread(java_thread); - } - // determine last_java_sp register - if (!last_java_sp->is_valid()) { - last_java_sp = rsp; - } - // debugging support - assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); - assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); - assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); - // push java thread (becomes first argument of C function) - pushl(java_thread); - // set last Java frame before call - assert(last_java_sp != rbp, "this code doesn't work for last_java_sp == rbp, which currently can't portably work anyway since C2 doesn't save rbp,"); - // Only interpreter should have to set fp - set_last_Java_frame(java_thread, last_java_sp, rbp, NULL); - // do the call - call(RuntimeAddress(entry_point)); - // restore the thread (cannot use the pushed argument since arguments - // may be overwritten by C code generated by an optimizing compiler); - // however can use the register value directly if it is callee saved. - if (java_thread == rdi || java_thread == rsi) { - // rdi & rsi are callee saved -> nothing to do -#ifdef ASSERT - guarantee(java_thread != rax, "change this code"); - pushl(rax); - { Label L; - get_thread(rax); - cmpl(java_thread, rax); - jcc(Assembler::equal, L); - stop("MacroAssembler::call_VM_base: rdi not callee saved?"); - bind(L); - } - popl(rax); -#endif - } else { - get_thread(java_thread); - } - // reset last Java frame - // Only interpreter should have to clear fp - reset_last_Java_frame(java_thread, true, false); - // discard thread and arguments - addl(rsp, (1 + number_of_arguments)*wordSize); - -#ifndef CC_INTERP - // C++ interp handles this in the interpreter - check_and_handle_popframe(java_thread); - check_and_handle_earlyret(java_thread); -#endif /* CC_INTERP */ - - if (check_exceptions) { - // check for pending exceptions (java_thread is set upon return) - cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); - jump_cc(Assembler::notEqual, - RuntimeAddress(StubRoutines::forward_exception_entry())); - } - - // get oop result if there is one and reset the value in the thread - if (oop_result->is_valid()) { - movl(oop_result, Address(java_thread, JavaThread::vm_result_offset())); - movl(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); - verify_oop(oop_result); - } -} - - -void MacroAssembler::check_and_handle_popframe(Register java_thread) { -} - -void MacroAssembler::check_and_handle_earlyret(Register java_thread) { -} - -void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { - leal(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); - call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); -} - - -void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { - Label C, E; - call(C, relocInfo::none); - jmp(E); - - bind(C); - call_VM_helper(oop_result, entry_point, 0, check_exceptions); - ret(0); - - bind(E); -} - - -void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { - Label C, E; - call(C, relocInfo::none); - jmp(E); - - bind(C); - pushl(arg_1); - call_VM_helper(oop_result, entry_point, 1, check_exceptions); - ret(0); - - bind(E); -} - - -void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { - Label C, E; - call(C, relocInfo::none); - jmp(E); - - bind(C); - pushl(arg_2); - pushl(arg_1); - call_VM_helper(oop_result, entry_point, 2, check_exceptions); - ret(0); - - bind(E); -} - - -void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { - Label C, E; - call(C, relocInfo::none); - jmp(E); - - bind(C); - pushl(arg_3); - pushl(arg_2); - pushl(arg_1); - call_VM_helper(oop_result, entry_point, 3, check_exceptions); - ret(0); - - bind(E); -} - - -void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { - call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); -} - - -void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { - pushl(arg_1); - call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); -} - - -void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { - pushl(arg_2); - pushl(arg_1); - call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); -} - - -void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { - pushl(arg_3); - pushl(arg_2); - pushl(arg_1); - call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); -} - - -void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { - call_VM_leaf_base(entry_point, number_of_arguments); -} - - -void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { - pushl(arg_1); - call_VM_leaf(entry_point, 1); -} - - -void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { - pushl(arg_2); - pushl(arg_1); - call_VM_leaf(entry_point, 2); -} - - -void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { - pushl(arg_3); - pushl(arg_2); - pushl(arg_1); - call_VM_leaf(entry_point, 3); -} - - -// Calls to C land -// -// When entering C land, the rbp, & rsp of the last Java frame have to be recorded -// in the (thread-local) JavaThread object. When leaving C land, the last Java fp -// has to be reset to 0. This is required to allow proper stack traversal. - -void MacroAssembler::store_check(Register obj) { - // Does a store check for the oop in register obj. The content of - // register obj is destroyed afterwards. - store_check_part_1(obj); - store_check_part_2(obj); -} - - -void MacroAssembler::store_check(Register obj, Address dst) { - store_check(obj); -} - - -// split the store check operation so that other instructions can be scheduled inbetween -void MacroAssembler::store_check_part_1(Register obj) { - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - shrl(obj, CardTableModRefBS::card_shift); -} - - -void MacroAssembler::store_check_part_2(Register obj) { - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - ExternalAddress cardtable((address)ct->byte_map_base); - Address index(noreg, obj, Address::times_1); - - movb(as_Address(ArrayAddress(cardtable, index)), 0); -} - - -void MacroAssembler::c2bool(Register x) { - // implements x == 0 ? 0 : 1 - // note: must only look at least-significant byte of x - // since C-style booleans are stored in one byte - // only! (was bug) - andl(x, 0xFF); - setb(Assembler::notZero, x); -} - - -int MacroAssembler::corrected_idivl(Register reg) { - // Full implementation of Java idiv and irem; checks for - // special case as described in JVM spec., p.243 & p.271. - // The function returns the (pc) offset of the idivl - // instruction - may be needed for implicit exceptions. - // - // normal case special case - // - // input : rax,: dividend min_int - // reg: divisor (may not be rax,/rdx) -1 - // - // output: rax,: quotient (= rax, idiv reg) min_int - // rdx: remainder (= rax, irem reg) 0 - assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); - const int min_int = 0x80000000; - Label normal_case, special_case; - - // check for special case - cmpl(rax, min_int); - jcc(Assembler::notEqual, normal_case); - xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) - cmpl(reg, -1); - jcc(Assembler::equal, special_case); - - // handle normal case - bind(normal_case); - cdql(); - int idivl_offset = offset(); - idivl(reg); - - // normal and special case exit - bind(special_case); - - return idivl_offset; -} - - -void MacroAssembler::lneg(Register hi, Register lo) { - negl(lo); - adcl(hi, 0); - negl(hi); -} - - -void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { - // Multiplication of two Java long values stored on the stack - // as illustrated below. Result is in rdx:rax. - // - // rsp ---> [ ?? ] \ \ - // .... | y_rsp_offset | - // [ y_lo ] / (in bytes) | x_rsp_offset - // [ y_hi ] | (in bytes) - // .... | - // [ x_lo ] / - // [ x_hi ] - // .... - // - // Basic idea: lo(result) = lo(x_lo * y_lo) - // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) - Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); - Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); - Label quick; - // load x_hi, y_hi and check if quick - // multiplication is possible - movl(rbx, x_hi); - movl(rcx, y_hi); - movl(rax, rbx); - orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 - jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply - // do full multiplication - // 1st step - mull(y_lo); // x_hi * y_lo - movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, - // 2nd step - movl(rax, x_lo); - mull(rcx); // x_lo * y_hi - addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, - // 3rd step - bind(quick); // note: rbx, = 0 if quick multiply! - movl(rax, x_lo); - mull(y_lo); // x_lo * y_lo - addl(rdx, rbx); // correct hi(x_lo * y_lo) -} - - -void MacroAssembler::lshl(Register hi, Register lo) { - // Java shift left long support (semantics as described in JVM spec., p.305) - // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) - // shift value is in rcx ! - assert(hi != rcx, "must not use rcx"); - assert(lo != rcx, "must not use rcx"); - const Register s = rcx; // shift count - const int n = BitsPerWord; - Label L; - andl(s, 0x3f); // s := s & 0x3f (s < 0x40) - cmpl(s, n); // if (s < n) - jcc(Assembler::less, L); // else (s >= n) - movl(hi, lo); // x := x << n - xorl(lo, lo); - // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! - bind(L); // s (mod n) < n - shldl(hi, lo); // x := x << s - shll(lo); -} - - -void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { - // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) - // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) - assert(hi != rcx, "must not use rcx"); - assert(lo != rcx, "must not use rcx"); - const Register s = rcx; // shift count - const int n = BitsPerWord; - Label L; - andl(s, 0x3f); // s := s & 0x3f (s < 0x40) - cmpl(s, n); // if (s < n) - jcc(Assembler::less, L); // else (s >= n) - movl(lo, hi); // x := x >> n - if (sign_extension) sarl(hi, 31); - else xorl(hi, hi); - // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! - bind(L); // s (mod n) < n - shrdl(lo, hi); // x := x >> s - if (sign_extension) sarl(hi); - else shrl(hi); -} - - -// Note: y_lo will be destroyed -void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { - // Long compare for Java (semantics as described in JVM spec.) - Label high, low, done; - - cmpl(x_hi, y_hi); - jcc(Assembler::less, low); - jcc(Assembler::greater, high); - // x_hi is the return register - xorl(x_hi, x_hi); - cmpl(x_lo, y_lo); - jcc(Assembler::below, low); - jcc(Assembler::equal, done); - - bind(high); - xorl(x_hi, x_hi); - increment(x_hi); - jmp(done); - - bind(low); - xorl(x_hi, x_hi); - decrement(x_hi); - - bind(done); -} - - -void MacroAssembler::save_rax(Register tmp) { - if (tmp == noreg) pushl(rax); - else if (tmp != rax) movl(tmp, rax); -} - - -void MacroAssembler::restore_rax(Register tmp) { - if (tmp == noreg) popl(rax); - else if (tmp != rax) movl(rax, tmp); -} - - -void MacroAssembler::fremr(Register tmp) { - save_rax(tmp); - { Label L; - bind(L); - fprem(); - fwait(); fnstsw_ax(); - sahf(); - jcc(Assembler::parity, L); - } - restore_rax(tmp); - // Result is in ST0. - // Note: fxch & fpop to get rid of ST1 - // (otherwise FPU stack could overflow eventually) - fxch(1); - fpop(); -} - - -static const double pi_4 = 0.7853981633974483; - -void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) { - // A hand-coded argument reduction for values in fabs(pi/4, pi/2) - // was attempted in this code; unfortunately it appears that the - // switch to 80-bit precision and back causes this to be - // unprofitable compared with simply performing a runtime call if - // the argument is out of the (-pi/4, pi/4) range. - - Register tmp = noreg; - if (!VM_Version::supports_cmov()) { - // fcmp needs a temporary so preserve rbx, - tmp = rbx; - pushl(tmp); - } - - Label slow_case, done; - - // x ?<= pi/4 - fld_d(ExternalAddress((address)&pi_4)); - fld_s(1); // Stack: X PI/4 X - fabs(); // Stack: |X| PI/4 X - fcmp(tmp); - jcc(Assembler::above, slow_case); - - // fastest case: -pi/4 <= x <= pi/4 - switch(trig) { - case 's': - fsin(); - break; - case 'c': - fcos(); - break; - case 't': - ftan(); - break; - default: - assert(false, "bad intrinsic"); - break; - } - jmp(done); - - // slow case: runtime call - bind(slow_case); - // Preserve registers across runtime call - pushad(); - int incoming_argument_and_return_value_offset = -1; - if (num_fpu_regs_in_use > 1) { - // Must preserve all other FPU regs (could alternatively convert - // SharedRuntime::dsin and dcos into assembly routines known not to trash - // FPU state, but can not trust C compiler) - NEEDS_CLEANUP; - // NOTE that in this case we also push the incoming argument to - // the stack and restore it later; we also use this stack slot to - // hold the return value from dsin or dcos. - for (int i = 0; i < num_fpu_regs_in_use; i++) { - subl(rsp, wordSize*2); - fstp_d(Address(rsp, 0)); - } - incoming_argument_and_return_value_offset = 2*wordSize*(num_fpu_regs_in_use-1); - fld_d(Address(rsp, incoming_argument_and_return_value_offset)); - } - subl(rsp, wordSize*2); - fstp_d(Address(rsp, 0)); - // NOTE: we must not use call_VM_leaf here because that requires a - // complete interpreter frame in debug mode -- same bug as 4387334 - NEEDS_CLEANUP; - // Need to add stack banging before this runtime call if it needs to - // be taken; however, there is no generic stack banging routine at - // the MacroAssembler level - switch(trig) { - case 's': - { - call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dsin))); - } - break; - case 'c': - { - call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dcos))); - } - break; - case 't': - { - call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtan))); - } - break; - default: - assert(false, "bad intrinsic"); - break; - } - addl(rsp, wordSize * 2); - if (num_fpu_regs_in_use > 1) { - // Must save return value to stack and then restore entire FPU stack - fstp_d(Address(rsp, incoming_argument_and_return_value_offset)); - for (int i = 0; i < num_fpu_regs_in_use; i++) { - fld_d(Address(rsp, 0)); - addl(rsp, wordSize*2); - } - } - popad(); - - // Come here with result in F-TOS - bind(done); - - if (tmp != noreg) { - popl(tmp); - } -} - -void MacroAssembler::jC2(Register tmp, Label& L) { - // set parity bit if FPU flag C2 is set (via rax) - save_rax(tmp); - fwait(); fnstsw_ax(); - sahf(); - restore_rax(tmp); - // branch - jcc(Assembler::parity, L); -} - - -void MacroAssembler::jnC2(Register tmp, Label& L) { - // set parity bit if FPU flag C2 is set (via rax) - save_rax(tmp); - fwait(); fnstsw_ax(); - sahf(); - restore_rax(tmp); - // branch - jcc(Assembler::noParity, L); -} - - -void MacroAssembler::fcmp(Register tmp) { - fcmp(tmp, 1, true, true); -} - - -void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { - assert(!pop_right || pop_left, "usage error"); - if (VM_Version::supports_cmov()) { - assert(tmp == noreg, "unneeded temp"); - if (pop_left) { - fucomip(index); - } else { - fucomi(index); - } - if (pop_right) { - fpop(); - } - } else { - assert(tmp != noreg, "need temp"); - if (pop_left) { - if (pop_right) { - fcompp(); - } else { - fcomp(index); - } - } else { - fcom(index); - } - // convert FPU condition into eflags condition via rax, - save_rax(tmp); - fwait(); fnstsw_ax(); - sahf(); - restore_rax(tmp); - } - // condition codes set as follows: - // - // CF (corresponds to C0) if x < y - // PF (corresponds to C2) if unordered - // ZF (corresponds to C3) if x = y -} - - -void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { - fcmp2int(dst, unordered_is_less, 1, true, true); -} - - -void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { - fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); - Label L; - if (unordered_is_less) { - movl(dst, -1); - jcc(Assembler::parity, L); - jcc(Assembler::below , L); - movl(dst, 0); - jcc(Assembler::equal , L); - increment(dst); - } else { // unordered is greater - movl(dst, 1); - jcc(Assembler::parity, L); - jcc(Assembler::above , L); - movl(dst, 0); - jcc(Assembler::equal , L); - decrement(dst); - } - bind(L); -} - -void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { - ucomiss(opr1, opr2); - - Label L; - if (unordered_is_less) { - movl(dst, -1); - jcc(Assembler::parity, L); - jcc(Assembler::below , L); - movl(dst, 0); - jcc(Assembler::equal , L); - increment(dst); - } else { // unordered is greater - movl(dst, 1); - jcc(Assembler::parity, L); - jcc(Assembler::above , L); - movl(dst, 0); - jcc(Assembler::equal , L); - decrement(dst); - } - bind(L); -} - -void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { - ucomisd(opr1, opr2); - - Label L; - if (unordered_is_less) { - movl(dst, -1); - jcc(Assembler::parity, L); - jcc(Assembler::below , L); - movl(dst, 0); - jcc(Assembler::equal , L); - increment(dst); - } else { // unordered is greater - movl(dst, 1); - jcc(Assembler::parity, L); - jcc(Assembler::above , L); - movl(dst, 0); - jcc(Assembler::equal , L); - decrement(dst); - } - bind(L); -} - - - -void MacroAssembler::fpop() { - ffree(); - fincstp(); -} - - -void MacroAssembler::sign_extend_short(Register reg) { - if (VM_Version::is_P6()) { - movsxw(reg, reg); - } else { - shll(reg, 16); - sarl(reg, 16); - } -} - - -void MacroAssembler::sign_extend_byte(Register reg) { - if (VM_Version::is_P6() && reg->has_byte_register()) { - movsxb(reg, reg); - } else { - shll(reg, 24); - sarl(reg, 24); - } -} - - -void MacroAssembler::division_with_shift (Register reg, int shift_value) { - assert (shift_value > 0, "illegal shift value"); - Label _is_positive; - testl (reg, reg); - jcc (Assembler::positive, _is_positive); - int offset = (1 << shift_value) - 1 ; - - increment(reg, offset); - - bind (_is_positive); - sarl(reg, shift_value); -} - - -void MacroAssembler::round_to(Register reg, int modulus) { - addl(reg, modulus - 1); - andl(reg, -modulus); -} - -// C++ bool manipulation - -void MacroAssembler::movbool(Register dst, Address src) { - if(sizeof(bool) == 1) - movb(dst, src); - else if(sizeof(bool) == 2) - movw(dst, src); - else if(sizeof(bool) == 4) - movl(dst, src); - else - // unsupported - ShouldNotReachHere(); -} - -void MacroAssembler::movbool(Address dst, bool boolconst) { - if(sizeof(bool) == 1) - movb(dst, (int) boolconst); - else if(sizeof(bool) == 2) - movw(dst, (int) boolconst); - else if(sizeof(bool) == 4) - movl(dst, (int) boolconst); - else - // unsupported - ShouldNotReachHere(); -} - -void MacroAssembler::movbool(Address dst, Register src) { - if(sizeof(bool) == 1) - movb(dst, src); - else if(sizeof(bool) == 2) - movw(dst, src); - else if(sizeof(bool) == 4) - movl(dst, src); - else - // unsupported - ShouldNotReachHere(); -} - -void MacroAssembler::testbool(Register dst) { - if(sizeof(bool) == 1) - testb(dst, (int) 0xff); - else if(sizeof(bool) == 2) { - // testw implementation needed for two byte bools - ShouldNotReachHere(); - } else if(sizeof(bool) == 4) - testl(dst, dst); - else - // unsupported - ShouldNotReachHere(); -} - -void MacroAssembler::verify_oop(Register reg, const char* s) { - if (!VerifyOops) return; - // Pass register number to verify_oop_subroutine - char* b = new char[strlen(s) + 50]; - sprintf(b, "verify_oop: %s: %s", reg->name(), s); - pushl(rax); // save rax, - pushl(reg); // pass register argument - ExternalAddress buffer((address) b); - pushptr(buffer.addr()); - // call indirectly to solve generation ordering problem - movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); - call(rax); -} - - -void MacroAssembler::verify_oop_addr(Address addr, const char* s) { - if (!VerifyOops) return; - // QQQ fix this - // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); - // Pass register number to verify_oop_subroutine - char* b = new char[strlen(s) + 50]; - sprintf(b, "verify_oop_addr: %s", s); - pushl(rax); // save rax, - // addr may contain rsp so we will have to adjust it based on the push - // we just did - if (addr.uses(rsp)) { - leal(rax, addr); - pushl(Address(rax, BytesPerWord)); - } else { - pushl(addr); - } - ExternalAddress buffer((address) b); - // pass msg argument - pushptr(buffer.addr()); - // call indirectly to solve generation ordering problem - movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); - call(rax); - // Caller pops the arguments and restores rax, from the stack -} - - -void MacroAssembler::stop(const char* msg) { - ExternalAddress message((address)msg); - // push address of message - pushptr(message.addr()); - { Label L; call(L, relocInfo::none); bind(L); } // push eip - pushad(); // push registers - call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug))); - hlt(); -} - - -void MacroAssembler::warn(const char* msg) { - push_CPU_state(); - - ExternalAddress message((address) msg); - // push address of message - pushptr(message.addr()); - - call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); - addl(rsp, wordSize); // discard argument - pop_CPU_state(); -} - - -void MacroAssembler::debug(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { - // In order to get locks to work, we need to fake a in_VM state - JavaThread* thread = JavaThread::current(); - JavaThreadState saved_state = thread->thread_state(); - thread->set_thread_state(_thread_in_vm); - if (ShowMessageBoxOnError) { - JavaThread* thread = JavaThread::current(); - JavaThreadState saved_state = thread->thread_state(); - thread->set_thread_state(_thread_in_vm); - ttyLocker ttyl; - if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { - BytecodeCounter::print(); - } - // To see where a verify_oop failed, get $ebx+40/X for this frame. - // This is the value of eip which points to where verify_oop will return. - if (os::message_box(msg, "Execution stopped, print registers?")) { - tty->print_cr("eip = 0x%08x", eip); - tty->print_cr("rax, = 0x%08x", rax); - tty->print_cr("rbx, = 0x%08x", rbx); - tty->print_cr("rcx = 0x%08x", rcx); - tty->print_cr("rdx = 0x%08x", rdx); - tty->print_cr("rdi = 0x%08x", rdi); - tty->print_cr("rsi = 0x%08x", rsi); - tty->print_cr("rbp, = 0x%08x", rbp); - tty->print_cr("rsp = 0x%08x", rsp); - BREAKPOINT; - } - } else { - ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); - assert(false, "DEBUG MESSAGE"); - } - ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); -} - - - -void MacroAssembler::os_breakpoint() { - // instead of directly emitting a breakpoint, call os:breakpoint for better debugability - // (e.g., MSVC can't call ps() otherwise) - call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); -} - - -void MacroAssembler::push_fTOS() { - subl(rsp, 2 * wordSize); - fstp_d(Address(rsp, 0)); -} - - -void MacroAssembler::pop_fTOS() { - fld_d(Address(rsp, 0)); - addl(rsp, 2 * wordSize); -} - - -void MacroAssembler::empty_FPU_stack() { - if (VM_Version::supports_mmx()) { - emms(); - } else { - for (int i = 8; i-- > 0; ) ffree(i); - } -} - - -class ControlWord { - public: - int32_t _value; - - int rounding_control() const { return (_value >> 10) & 3 ; } - int precision_control() const { return (_value >> 8) & 3 ; } - bool precision() const { return ((_value >> 5) & 1) != 0; } - bool underflow() const { return ((_value >> 4) & 1) != 0; } - bool overflow() const { return ((_value >> 3) & 1) != 0; } - bool zero_divide() const { return ((_value >> 2) & 1) != 0; } - bool denormalized() const { return ((_value >> 1) & 1) != 0; } - bool invalid() const { return ((_value >> 0) & 1) != 0; } - - void print() const { - // rounding control - const char* rc; - switch (rounding_control()) { - case 0: rc = "round near"; break; - case 1: rc = "round down"; break; - case 2: rc = "round up "; break; - case 3: rc = "chop "; break; - }; - // precision control - const char* pc; - switch (precision_control()) { - case 0: pc = "24 bits "; break; - case 1: pc = "reserved"; break; - case 2: pc = "53 bits "; break; - case 3: pc = "64 bits "; break; - }; - // flags - char f[9]; - f[0] = ' '; - f[1] = ' '; - f[2] = (precision ()) ? 'P' : 'p'; - f[3] = (underflow ()) ? 'U' : 'u'; - f[4] = (overflow ()) ? 'O' : 'o'; - f[5] = (zero_divide ()) ? 'Z' : 'z'; - f[6] = (denormalized()) ? 'D' : 'd'; - f[7] = (invalid ()) ? 'I' : 'i'; - f[8] = '\x0'; - // output - printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); - } - -}; - - -class StatusWord { - public: - int32_t _value; - - bool busy() const { return ((_value >> 15) & 1) != 0; } - bool C3() const { return ((_value >> 14) & 1) != 0; } - bool C2() const { return ((_value >> 10) & 1) != 0; } - bool C1() const { return ((_value >> 9) & 1) != 0; } - bool C0() const { return ((_value >> 8) & 1) != 0; } - int top() const { return (_value >> 11) & 7 ; } - bool error_status() const { return ((_value >> 7) & 1) != 0; } - bool stack_fault() const { return ((_value >> 6) & 1) != 0; } - bool precision() const { return ((_value >> 5) & 1) != 0; } - bool underflow() const { return ((_value >> 4) & 1) != 0; } - bool overflow() const { return ((_value >> 3) & 1) != 0; } - bool zero_divide() const { return ((_value >> 2) & 1) != 0; } - bool denormalized() const { return ((_value >> 1) & 1) != 0; } - bool invalid() const { return ((_value >> 0) & 1) != 0; } - - void print() const { - // condition codes - char c[5]; - c[0] = (C3()) ? '3' : '-'; - c[1] = (C2()) ? '2' : '-'; - c[2] = (C1()) ? '1' : '-'; - c[3] = (C0()) ? '0' : '-'; - c[4] = '\x0'; - // flags - char f[9]; - f[0] = (error_status()) ? 'E' : '-'; - f[1] = (stack_fault ()) ? 'S' : '-'; - f[2] = (precision ()) ? 'P' : '-'; - f[3] = (underflow ()) ? 'U' : '-'; - f[4] = (overflow ()) ? 'O' : '-'; - f[5] = (zero_divide ()) ? 'Z' : '-'; - f[6] = (denormalized()) ? 'D' : '-'; - f[7] = (invalid ()) ? 'I' : '-'; - f[8] = '\x0'; - // output - printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); - } - -}; - - -class TagWord { - public: - int32_t _value; - - int tag_at(int i) const { return (_value >> (i*2)) & 3; } - - void print() const { - printf("%04x", _value & 0xFFFF); - } - -}; - - -class FPU_Register { - public: - int32_t _m0; - int32_t _m1; - int16_t _ex; - - bool is_indefinite() const { - return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; - } - - void print() const { - char sign = (_ex < 0) ? '-' : '+'; - const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; - printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); - }; - -}; - - -class FPU_State { - public: - enum { - register_size = 10, - number_of_registers = 8, - register_mask = 7 - }; - - ControlWord _control_word; - StatusWord _status_word; - TagWord _tag_word; - int32_t _error_offset; - int32_t _error_selector; - int32_t _data_offset; - int32_t _data_selector; - int8_t _register[register_size * number_of_registers]; - - int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } - FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } - - const char* tag_as_string(int tag) const { - switch (tag) { - case 0: return "valid"; - case 1: return "zero"; - case 2: return "special"; - case 3: return "empty"; - } - ShouldNotReachHere() - return NULL; - } - - void print() const { - // print computation registers - { int t = _status_word.top(); - for (int i = 0; i < number_of_registers; i++) { - int j = (i - t) & register_mask; - printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); - st(j)->print(); - printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); - } - } - printf("\n"); - // print control registers - printf("ctrl = "); _control_word.print(); printf("\n"); - printf("stat = "); _status_word .print(); printf("\n"); - printf("tags = "); _tag_word .print(); printf("\n"); - } - -}; - - -class Flag_Register { - public: - int32_t _value; - - bool overflow() const { return ((_value >> 11) & 1) != 0; } - bool direction() const { return ((_value >> 10) & 1) != 0; } - bool sign() const { return ((_value >> 7) & 1) != 0; } - bool zero() const { return ((_value >> 6) & 1) != 0; } - bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } - bool parity() const { return ((_value >> 2) & 1) != 0; } - bool carry() const { return ((_value >> 0) & 1) != 0; } - - void print() const { - // flags - char f[8]; - f[0] = (overflow ()) ? 'O' : '-'; - f[1] = (direction ()) ? 'D' : '-'; - f[2] = (sign ()) ? 'S' : '-'; - f[3] = (zero ()) ? 'Z' : '-'; - f[4] = (auxiliary_carry()) ? 'A' : '-'; - f[5] = (parity ()) ? 'P' : '-'; - f[6] = (carry ()) ? 'C' : '-'; - f[7] = '\x0'; - // output - printf("%08x flags = %s", _value, f); - } - -}; - - -class IU_Register { - public: - int32_t _value; - - void print() const { - printf("%08x %11d", _value, _value); - } - -}; - - -class IU_State { - public: - Flag_Register _eflags; - IU_Register _rdi; - IU_Register _rsi; - IU_Register _rbp; - IU_Register _rsp; - IU_Register _rbx; - IU_Register _rdx; - IU_Register _rcx; - IU_Register _rax; - - void print() const { - // computation registers - printf("rax, = "); _rax.print(); printf("\n"); - printf("rbx, = "); _rbx.print(); printf("\n"); - printf("rcx = "); _rcx.print(); printf("\n"); - printf("rdx = "); _rdx.print(); printf("\n"); - printf("rdi = "); _rdi.print(); printf("\n"); - printf("rsi = "); _rsi.print(); printf("\n"); - printf("rbp, = "); _rbp.print(); printf("\n"); - printf("rsp = "); _rsp.print(); printf("\n"); - printf("\n"); - // control registers - printf("flgs = "); _eflags.print(); printf("\n"); - } -}; - - -class CPU_State { - public: - FPU_State _fpu_state; - IU_State _iu_state; - - void print() const { - printf("--------------------------------------------------\n"); - _iu_state .print(); - printf("\n"); - _fpu_state.print(); - printf("--------------------------------------------------\n"); - } - -}; - - -static void _print_CPU_state(CPU_State* state) { - state->print(); -}; - - -void MacroAssembler::print_CPU_state() { - push_CPU_state(); - pushl(rsp); // pass CPU state - call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); - addl(rsp, wordSize); // discard argument - pop_CPU_state(); -} - - -static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { - static int counter = 0; - FPU_State* fs = &state->_fpu_state; - counter++; - // For leaf calls, only verify that the top few elements remain empty. - // We only need 1 empty at the top for C2 code. - if( stack_depth < 0 ) { - if( fs->tag_for_st(7) != 3 ) { - printf("FPR7 not empty\n"); - state->print(); - assert(false, "error"); - return false; - } - return true; // All other stack states do not matter - } - - assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std, - "bad FPU control word"); - - // compute stack depth - int i = 0; - while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; - int d = i; - while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; - // verify findings - if (i != FPU_State::number_of_registers) { - // stack not contiguous - printf("%s: stack not contiguous at ST%d\n", s, i); - state->print(); - assert(false, "error"); - return false; - } - // check if computed stack depth corresponds to expected stack depth - if (stack_depth < 0) { - // expected stack depth is -stack_depth or less - if (d > -stack_depth) { - // too many elements on the stack - printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); - state->print(); - assert(false, "error"); - return false; - } - } else { - // expected stack depth is stack_depth - if (d != stack_depth) { - // wrong stack depth - printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); - state->print(); - assert(false, "error"); - return false; - } - } - // everything is cool - return true; -} - - -void MacroAssembler::verify_FPU(int stack_depth, const char* s) { - if (!VerifyFPU) return; - push_CPU_state(); - pushl(rsp); // pass CPU state - ExternalAddress msg((address) s); - // pass message string s - pushptr(msg.addr()); - pushl(stack_depth); // pass stack depth - call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); - addl(rsp, 3 * wordSize); // discard arguments - // check for error - { Label L; - testl(rax, rax); - jcc(Assembler::notZero, L); - int3(); // break if error condition - bind(L); - } - pop_CPU_state(); -} - - -void MacroAssembler::push_IU_state() { - pushad(); - pushfd(); -} - - -void MacroAssembler::pop_IU_state() { - popfd(); - popad(); -} - - -void MacroAssembler::push_FPU_state() { - subl(rsp, FPUStateSizeInWords * wordSize); - fnsave(Address(rsp, 0)); - fwait(); -} - - -void MacroAssembler::pop_FPU_state() { - frstor(Address(rsp, 0)); - addl(rsp, FPUStateSizeInWords * wordSize); -} - - -void MacroAssembler::push_CPU_state() { - push_IU_state(); - push_FPU_state(); -} - - -void MacroAssembler::pop_CPU_state() { - pop_FPU_state(); - pop_IU_state(); -} - - -void MacroAssembler::push_callee_saved_registers() { - pushl(rsi); - pushl(rdi); - pushl(rdx); - pushl(rcx); -} - - -void MacroAssembler::pop_callee_saved_registers() { - popl(rcx); - popl(rdx); - popl(rdi); - popl(rsi); -} - - -void MacroAssembler::set_word_if_not_zero(Register dst) { - xorl(dst, dst); - set_byte_if_not_zero(dst); -} - -// Write serialization page so VM thread can do a pseudo remote membar. -// We use the current thread pointer to calculate a thread specific -// offset to write to within the page. This minimizes bus traffic -// due to cache line collision. -void MacroAssembler::serialize_memory(Register thread, Register tmp) { - movl(tmp, thread); - shrl(tmp, os::get_serialize_page_shift_count()); - andl(tmp, (os::vm_page_size() - sizeof(int))); - - Address index(noreg, tmp, Address::times_1); - ExternalAddress page(os::get_memory_serialize_page()); - - movptr(ArrayAddress(page, index), tmp); -} - - -void MacroAssembler::verify_tlab() { -#ifdef ASSERT - if (UseTLAB && VerifyOops) { - Label next, ok; - Register t1 = rsi; - Register thread_reg = rbx; - - pushl(t1); - pushl(thread_reg); - get_thread(thread_reg); - - movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); - jcc(Assembler::aboveEqual, next); - stop("assert(top >= start)"); - should_not_reach_here(); - - bind(next); - movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); - cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - jcc(Assembler::aboveEqual, ok); - stop("assert(top <= end)"); - should_not_reach_here(); - - bind(ok); - popl(thread_reg); - popl(t1); - } -#endif -} - - -// Defines obj, preserves var_size_in_bytes -void MacroAssembler::eden_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, - Register t1, Label& slow_case) { - assert(obj == rax, "obj must be in rax, for cmpxchg"); - assert_different_registers(obj, var_size_in_bytes, t1); - Register end = t1; - Label retry; - bind(retry); - ExternalAddress heap_top((address) Universe::heap()->top_addr()); - movptr(obj, heap_top); - if (var_size_in_bytes == noreg) { - leal(end, Address(obj, con_size_in_bytes)); - } else { - leal(end, Address(obj, var_size_in_bytes, Address::times_1)); - } - // if end < obj then we wrapped around => object too long => slow case - cmpl(end, obj); - jcc(Assembler::below, slow_case); - cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr())); - jcc(Assembler::above, slow_case); - // Compare obj with the top addr, and if still equal, store the new top addr in - // end at the address of the top addr pointer. Sets ZF if was equal, and clears - // it otherwise. Use lock prefix for atomicity on MPs. - if (os::is_MP()) { - lock(); - } - cmpxchgptr(end, heap_top); - jcc(Assembler::notEqual, retry); -} - - -// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. -void MacroAssembler::tlab_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, - Register t1, Register t2, Label& slow_case) { - assert_different_registers(obj, t1, t2); - assert_different_registers(obj, var_size_in_bytes, t1); - Register end = t2; - Register thread = t1; - - verify_tlab(); - - get_thread(thread); - - movl(obj, Address(thread, JavaThread::tlab_top_offset())); - if (var_size_in_bytes == noreg) { - leal(end, Address(obj, con_size_in_bytes)); - } else { - leal(end, Address(obj, var_size_in_bytes, Address::times_1)); - } - cmpl(end, Address(thread, JavaThread::tlab_end_offset())); - jcc(Assembler::above, slow_case); - - // update the tlab top pointer - movl(Address(thread, JavaThread::tlab_top_offset()), end); - - // recover var_size_in_bytes if necessary - if (var_size_in_bytes == end) { - subl(var_size_in_bytes, obj); - } - verify_tlab(); -} - -// Preserves rbx, and rdx. -void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { - Register top = rax; - Register t1 = rcx; - Register t2 = rsi; - Register thread_reg = rdi; - assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx); - Label do_refill, discard_tlab; - - if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { - // No allocation in the shared eden. - jmp(slow_case); - } - - get_thread(thread_reg); - - movl(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); - - // calculate amount of free space - subl(t1, top); - shrl(t1, LogHeapWordSize); - - // Retain tlab and allocate object in shared space if - // the amount free in the tlab is too large to discard. - cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); - jcc(Assembler::lessEqual, discard_tlab); - - // Retain - movl(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment()); - addl(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2); - if (TLABStats) { - // increment number of slow_allocations - addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1); - } - jmp(try_eden); - - bind(discard_tlab); - if (TLABStats) { - // increment number of refills - addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1); - // accumulate wastage -- t1 is amount free in tlab - addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1); - } - - // if tlab is currently allocated (top or end != null) then - // fill [top, end + alignment_reserve) with array object - testl (top, top); - jcc(Assembler::zero, do_refill); - - // set up the mark word - movl(Address(top, oopDesc::mark_offset_in_bytes()), (int)markOopDesc::prototype()->copy_set_hash(0x2)); - // set the length to the remaining space - subl(t1, typeArrayOopDesc::header_size(T_INT)); - addl(t1, ThreadLocalAllocBuffer::alignment_reserve()); - shll(t1, log2_intptr(HeapWordSize/sizeof(jint))); - movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1); - // set klass to intArrayKlass - // dubious reloc why not an oop reloc? - movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr())); - movl(Address(top, oopDesc::klass_offset_in_bytes()), t1); - - // refill the tlab with an eden allocation - bind(do_refill); - movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); - shll(t1, LogHeapWordSize); - // add object_size ?? - eden_allocate(top, t1, 0, t2, slow_case); - - // Check that t1 was preserved in eden_allocate. -#ifdef ASSERT - if (UseTLAB) { - Label ok; - Register tsize = rsi; - assert_different_registers(tsize, thread_reg, t1); - pushl(tsize); - movl(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); - shll(tsize, LogHeapWordSize); - cmpl(t1, tsize); - jcc(Assembler::equal, ok); - stop("assert(t1 != tlab size)"); - should_not_reach_here(); - - bind(ok); - popl(tsize); - } -#endif - movl(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top); - movl(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); - addl(top, t1); - subl(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - movl(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); - verify_tlab(); - jmp(retry); -} - - -int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg, - bool swap_reg_contains_mark, - Label& done, Label* slow_case, - BiasedLockingCounters* counters) { - assert(UseBiasedLocking, "why call this otherwise?"); - assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg"); - assert_different_registers(lock_reg, obj_reg, swap_reg); - - if (PrintBiasedLockingStatistics && counters == NULL) - counters = BiasedLocking::counters(); - - bool need_tmp_reg = false; - if (tmp_reg == noreg) { - need_tmp_reg = true; - tmp_reg = lock_reg; - } else { - assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); - } - assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); - Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); - Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); - Address saved_mark_addr(lock_reg, 0); - - // Biased locking - // See whether the lock is currently biased toward our thread and - // whether the epoch is still valid - // Note that the runtime guarantees sufficient alignment of JavaThread - // pointers to allow age to be placed into low bits - // First check to see whether biasing is even enabled for this object - Label cas_label; - int null_check_offset = -1; - if (!swap_reg_contains_mark) { - null_check_offset = offset(); - movl(swap_reg, mark_addr); - } - if (need_tmp_reg) { - pushl(tmp_reg); - } - movl(tmp_reg, swap_reg); - andl(tmp_reg, markOopDesc::biased_lock_mask_in_place); - cmpl(tmp_reg, markOopDesc::biased_lock_pattern); - if (need_tmp_reg) { - popl(tmp_reg); - } - jcc(Assembler::notEqual, cas_label); - // The bias pattern is present in the object's header. Need to check - // whether the bias owner and the epoch are both still current. - // Note that because there is no current thread register on x86 we - // need to store off the mark word we read out of the object to - // avoid reloading it and needing to recheck invariants below. This - // store is unfortunate but it makes the overall code shorter and - // simpler. - movl(saved_mark_addr, swap_reg); - if (need_tmp_reg) { - pushl(tmp_reg); - } - get_thread(tmp_reg); - xorl(swap_reg, tmp_reg); - if (swap_reg_contains_mark) { - null_check_offset = offset(); - } - movl(tmp_reg, klass_addr); - xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - andl(swap_reg, ~((int) markOopDesc::age_mask_in_place)); - if (need_tmp_reg) { - popl(tmp_reg); - } - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address)counters->biased_lock_entry_count_addr())); - } - jcc(Assembler::equal, done); - - Label try_revoke_bias; - Label try_rebias; - - // At this point we know that the header has the bias pattern and - // that we are not the bias owner in the current epoch. We need to - // figure out more details about the state of the header in order to - // know what operations can be legally performed on the object's - // header. - - // If the low three bits in the xor result aren't clear, that means - // the prototype header is no longer biased and we have to revoke - // the bias on this object. - testl(swap_reg, markOopDesc::biased_lock_mask_in_place); - jcc(Assembler::notZero, try_revoke_bias); - - // Biasing is still enabled for this data type. See whether the - // epoch of the current bias is still valid, meaning that the epoch - // bits of the mark word are equal to the epoch bits of the - // prototype header. (Note that the prototype header's epoch bits - // only change at a safepoint.) If not, attempt to rebias the object - // toward the current thread. Note that we must be absolutely sure - // that the current epoch is invalid in order to do this because - // otherwise the manipulations it performs on the mark word are - // illegal. - testl(swap_reg, markOopDesc::epoch_mask_in_place); - jcc(Assembler::notZero, try_rebias); - - // The epoch of the current bias is still valid but we know nothing - // about the owner; it might be set or it might be clear. Try to - // acquire the bias of the object using an atomic operation. If this - // fails we will go in to the runtime to revoke the object's bias. - // Note that we first construct the presumed unbiased header so we - // don't accidentally blow away another thread's valid bias. - movl(swap_reg, saved_mark_addr); - andl(swap_reg, - markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); - if (need_tmp_reg) { - pushl(tmp_reg); - } - get_thread(tmp_reg); - orl(tmp_reg, swap_reg); - if (os::is_MP()) { - lock(); - } - cmpxchg(tmp_reg, Address(obj_reg, 0)); - if (need_tmp_reg) { - popl(tmp_reg); - } - // If the biasing toward our thread failed, this means that - // another thread succeeded in biasing it toward itself and we - // need to revoke that bias. The revocation will occur in the - // interpreter runtime in the slow case. - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr())); - } - if (slow_case != NULL) { - jcc(Assembler::notZero, *slow_case); - } - jmp(done); - - bind(try_rebias); - // At this point we know the epoch has expired, meaning that the - // current "bias owner", if any, is actually invalid. Under these - // circumstances _only_, we are allowed to use the current header's - // value as the comparison value when doing the cas to acquire the - // bias in the current epoch. In other words, we allow transfer of - // the bias from one thread to another directly in this situation. - // - // FIXME: due to a lack of registers we currently blow away the age - // bits in this situation. Should attempt to preserve them. - if (need_tmp_reg) { - pushl(tmp_reg); - } - get_thread(tmp_reg); - movl(swap_reg, klass_addr); - orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - movl(swap_reg, saved_mark_addr); - if (os::is_MP()) { - lock(); - } - cmpxchg(tmp_reg, Address(obj_reg, 0)); - if (need_tmp_reg) { - popl(tmp_reg); - } - // If the biasing toward our thread failed, then another thread - // succeeded in biasing it toward itself and we need to revoke that - // bias. The revocation will occur in the runtime in the slow case. - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address)counters->rebiased_lock_entry_count_addr())); - } - if (slow_case != NULL) { - jcc(Assembler::notZero, *slow_case); - } - jmp(done); - - bind(try_revoke_bias); - // The prototype mark in the klass doesn't have the bias bit set any - // more, indicating that objects of this data type are not supposed - // to be biased any more. We are going to try to reset the mark of - // this object to the prototype value and fall through to the - // CAS-based locking scheme. Note that if our CAS fails, it means - // that another thread raced us for the privilege of revoking the - // bias of this particular object, so it's okay to continue in the - // normal locking code. - // - // FIXME: due to a lack of registers we currently blow away the age - // bits in this situation. Should attempt to preserve them. - movl(swap_reg, saved_mark_addr); - if (need_tmp_reg) { - pushl(tmp_reg); - } - movl(tmp_reg, klass_addr); - movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - if (os::is_MP()) { - lock(); - } - cmpxchg(tmp_reg, Address(obj_reg, 0)); - if (need_tmp_reg) { - popl(tmp_reg); - } - // Fall through to the normal CAS-based lock, because no matter what - // the result of the above CAS, some thread must have succeeded in - // removing the bias bit from the object's header. - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address)counters->revoked_lock_entry_count_addr())); - } - - bind(cas_label); - - return null_check_offset; -} - - -void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { - assert(UseBiasedLocking, "why call this otherwise?"); - - // Check for biased locking unlock case, which is a no-op - // Note: we do not have to check the thread ID for two reasons. - // First, the interpreter checks for IllegalMonitorStateException at - // a higher level. Second, if the bias was revoked while we held the - // lock, the object could not be rebiased toward another thread, so - // the bias bit would be clear. - movl(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); - andl(temp_reg, markOopDesc::biased_lock_mask_in_place); - cmpl(temp_reg, markOopDesc::biased_lock_pattern); - jcc(Assembler::equal, done); -} - - -Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { - switch (cond) { - // Note some conditions are synonyms for others - case Assembler::zero: return Assembler::notZero; - case Assembler::notZero: return Assembler::zero; - case Assembler::less: return Assembler::greaterEqual; - case Assembler::lessEqual: return Assembler::greater; - case Assembler::greater: return Assembler::lessEqual; - case Assembler::greaterEqual: return Assembler::less; - case Assembler::below: return Assembler::aboveEqual; - case Assembler::belowEqual: return Assembler::above; - case Assembler::above: return Assembler::belowEqual; - case Assembler::aboveEqual: return Assembler::below; - case Assembler::overflow: return Assembler::noOverflow; - case Assembler::noOverflow: return Assembler::overflow; - case Assembler::negative: return Assembler::positive; - case Assembler::positive: return Assembler::negative; - case Assembler::parity: return Assembler::noParity; - case Assembler::noParity: return Assembler::parity; - } - ShouldNotReachHere(); return Assembler::overflow; -} - - -void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { - Condition negated_cond = negate_condition(cond); - Label L; - jcc(negated_cond, L); - atomic_incl(counter_addr); - bind(L); -} - -void MacroAssembler::atomic_incl(AddressLiteral counter_addr) { - pushfd(); - if (os::is_MP()) - lock(); - increment(counter_addr); - popfd(); -} - -SkipIfEqual::SkipIfEqual( - MacroAssembler* masm, const bool* flag_addr, bool value) { - _masm = masm; - _masm->cmp8(ExternalAddress((address)flag_addr), value); - _masm->jcc(Assembler::equal, _label); -} - -SkipIfEqual::~SkipIfEqual() { - _masm->bind(_label); -} - - -// Writes to stack successive pages until offset reached to check for -// stack overflow + shadow pages. This clobbers tmp. -void MacroAssembler::bang_stack_size(Register size, Register tmp) { - movl(tmp, rsp); - // Bang stack for total size given plus shadow page size. - // Bang one page at a time because large size can bang beyond yellow and - // red zones. - Label loop; - bind(loop); - movl(Address(tmp, (-os::vm_page_size())), size ); - subl(tmp, os::vm_page_size()); - subl(size, os::vm_page_size()); - jcc(Assembler::greater, loop); - - // Bang down shadow pages too. - // The -1 because we already subtracted 1 page. - for (int i = 0; i< StackShadowPages-1; i++) { - movl(Address(tmp, (-i*os::vm_page_size())), size ); - } -} --- old/hotspot/src/cpu/x86/vm/assembler_x86_32.hpp 2009-08-01 04:21:41.070212462 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,1507 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)assembler_x86_32.hpp 1.171 07/09/20 10:42:56 JVM" -#endif -/* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -class BiasedLockingCounters; - -// Contains all the definitions needed for x86 assembly code generation. - -// Calling convention -class Argument VALUE_OBJ_CLASS_SPEC { - public: - enum { -#ifdef _LP64 -#ifdef _WIN64 - n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) - n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) -#else - n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) - n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) -#endif // _WIN64 - n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... - n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... -#else - n_register_parameters = 0 // 0 registers used to pass arguments -#endif // _LP64 - }; -}; - - -#ifdef _LP64 -// Symbolically name the register arguments used by the c calling convention. -// Windows is different from linux/solaris. So much for standards... - -#ifdef _WIN64 - -REGISTER_DECLARATION(Register, c_rarg0, rcx); -REGISTER_DECLARATION(Register, c_rarg1, rdx); -REGISTER_DECLARATION(Register, c_rarg2, r8); -REGISTER_DECLARATION(Register, c_rarg3, r9); - -REGISTER_DECLARATION(FloatRegister, c_farg0, xmm0); -REGISTER_DECLARATION(FloatRegister, c_farg1, xmm1); -REGISTER_DECLARATION(FloatRegister, c_farg2, xmm2); -REGISTER_DECLARATION(FloatRegister, c_farg3, xmm3); - -#else - -REGISTER_DECLARATION(Register, c_rarg0, rdi); -REGISTER_DECLARATION(Register, c_rarg1, rsi); -REGISTER_DECLARATION(Register, c_rarg2, rdx); -REGISTER_DECLARATION(Register, c_rarg3, rcx); -REGISTER_DECLARATION(Register, c_rarg4, r8); -REGISTER_DECLARATION(Register, c_rarg5, r9); - -REGISTER_DECLARATION(FloatRegister, c_farg0, xmm0); -REGISTER_DECLARATION(FloatRegister, c_farg1, xmm1); -REGISTER_DECLARATION(FloatRegister, c_farg2, xmm2); -REGISTER_DECLARATION(FloatRegister, c_farg3, xmm3); -REGISTER_DECLARATION(FloatRegister, c_farg4, xmm4); -REGISTER_DECLARATION(FloatRegister, c_farg5, xmm5); -REGISTER_DECLARATION(FloatRegister, c_farg6, xmm6); -REGISTER_DECLARATION(FloatRegister, c_farg7, xmm7); - -#endif // _WIN64 - -// Symbolically name the register arguments used by the Java calling convention. -// We have control over the convention for java so we can do what we please. -// What pleases us is to offset the java calling convention so that when -// we call a suitable jni method the arguments are lined up and we don't -// have to do little shuffling. A suitable jni method is non-static and a -// small number of arguments (two fewer args on windows) -// -// |-------------------------------------------------------| -// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | -// |-------------------------------------------------------| -// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) -// | rdi rsi rdx rcx r8 r9 | solaris/linux -// |-------------------------------------------------------| -// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | -// |-------------------------------------------------------| - -REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); -REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); -REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); -// Windows runs out of register args here -#ifdef _WIN64 -REGISTER_DECLARATION(Register, j_rarg3, rdi); -REGISTER_DECLARATION(Register, j_rarg4, rsi); -#else -REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); -REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); -#endif /* _WIN64 */ -REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); - -REGISTER_DECLARATION(FloatRegister, j_farg0, xmm0); -REGISTER_DECLARATION(FloatRegister, j_farg1, xmm1); -REGISTER_DECLARATION(FloatRegister, j_farg2, xmm2); -REGISTER_DECLARATION(FloatRegister, j_farg3, xmm3); -REGISTER_DECLARATION(FloatRegister, j_farg4, xmm4); -REGISTER_DECLARATION(FloatRegister, j_farg5, xmm5); -REGISTER_DECLARATION(FloatRegister, j_farg6, xmm6); -REGISTER_DECLARATION(FloatRegister, j_farg7, xmm7); - -REGISTER_DECLARATION(Register, rscratch1, r10); // volatile -REGISTER_DECLARATION(Register, rscratch2, r11); // volatile - -REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved - -#endif // _LP64 - -// Address is an abstraction used to represent a memory location -// using any of the amd64 addressing modes with one object. -// -// Note: A register location is represented via a Register, not -// via an address for efficiency & simplicity reasons. - -class ArrayAddress; - -class Address VALUE_OBJ_CLASS_SPEC { - public: - enum ScaleFactor { - no_scale = -1, - times_1 = 0, - times_2 = 1, - times_4 = 2, - times_8 = 3 - }; - - private: - Register _base; - Register _index; - ScaleFactor _scale; - int _disp; - RelocationHolder _rspec; - - // Easily misused constructor make them private -#ifndef _LP64 - Address(address loc, RelocationHolder spec); -#endif // _LP64 - - public: - // creation - Address() - : _base(noreg), - _index(noreg), - _scale(no_scale), - _disp(0) { - } - - // No default displacement otherwise Register can be implicitly - // converted to 0(Register) which is quite a different animal. - - Address(Register base, int disp) - : _base(base), - _index(noreg), - _scale(no_scale), - _disp(disp) { - } - - Address(Register base, Register index, ScaleFactor scale, int disp = 0) - : _base (base), - _index(index), - _scale(scale), - _disp (disp) { - assert(!index->is_valid() == (scale == Address::no_scale), - "inconsistent address"); - } - - // The following two overloads are used in connection with the - // ByteSize type (see sizes.hpp). They simplify the use of - // ByteSize'd arguments in assembly code. Note that their equivalent - // for the optimized build are the member functions with int disp - // argument since ByteSize is mapped to an int type in that case. - // - // Note: DO NOT introduce similar overloaded functions for WordSize - // arguments as in the optimized mode, both ByteSize and WordSize - // are mapped to the same type and thus the compiler cannot make a - // distinction anymore (=> compiler errors). - -#ifdef ASSERT - Address(Register base, ByteSize disp) - : _base(base), - _index(noreg), - _scale(no_scale), - _disp(in_bytes(disp)) { - } - - Address(Register base, Register index, ScaleFactor scale, ByteSize disp) - : _base(base), - _index(index), - _scale(scale), - _disp(in_bytes(disp)) { - assert(!index->is_valid() == (scale == Address::no_scale), - "inconsistent address"); - } -#endif // ASSERT - - // accessors - bool uses(Register reg) const { - return _base == reg || _index == reg; - } - - // Convert the raw encoding form into the form expected by the constructor for - // Address. An index of 4 (rsp) corresponds to having no index, so convert - // that to noreg for the Address constructor. - static Address make_raw(int base, int index, int scale, int disp); - - static Address make_array(ArrayAddress); - - - private: - bool base_needs_rex() const { - return _base != noreg && _base->encoding() >= 8; - } - - bool index_needs_rex() const { - return _index != noreg &&_index->encoding() >= 8; - } - - relocInfo::relocType reloc() const { return _rspec.type(); } - - friend class Assembler; - friend class MacroAssembler; - friend class LIR_Assembler; // base/index/scale/disp -}; - -// -// AddressLiteral has been split out from Address because operands of this type -// need to be treated specially on 32bit vs. 64bit platforms. By splitting it out -// the few instructions that need to deal with address literals are unique and the -// MacroAssembler does not have to implement every instruction in the Assembler -// in order to search for address literals that may need special handling depending -// on the instruction and the platform. As small step on the way to merging i486/amd64 -// directories. -// -class AddressLiteral VALUE_OBJ_CLASS_SPEC { - friend class ArrayAddress; - RelocationHolder _rspec; - // Typically we use AddressLiterals we want to use their rval - // However in some situations we want the lval (effect address) of the item. - // We provide a special factory for making those lvals. - bool _is_lval; - - // If the target is far we'll need to load the ea of this to - // a register to reach it. Otherwise if near we can do rip - // relative addressing. - - address _target; - - protected: - // creation - AddressLiteral() - : _is_lval(false), - _target(NULL) - {} - - public: - - - AddressLiteral(address target, relocInfo::relocType rtype); - - AddressLiteral(address target, RelocationHolder const& rspec) - : _rspec(rspec), - _is_lval(false), - _target(target) - {} - - AddressLiteral addr() { - AddressLiteral ret = *this; - ret._is_lval = true; - return ret; - } - - - private: - - address target() { return _target; } - bool is_lval() { return _is_lval; } - - relocInfo::relocType reloc() const { return _rspec.type(); } - const RelocationHolder& rspec() const { return _rspec; } - - friend class Assembler; - friend class MacroAssembler; - friend class Address; - friend class LIR_Assembler; -}; - -// Convience classes -class RuntimeAddress: public AddressLiteral { - - public: - - RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} - -}; - -class OopAddress: public AddressLiteral { - - public: - - OopAddress(address target) : AddressLiteral(target, relocInfo::oop_type){} - -}; - -class ExternalAddress: public AddressLiteral { - - public: - - ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){} - -}; - -class InternalAddress: public AddressLiteral { - - public: - - InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} - -}; - -// x86 can do array addressing as a single operation since disp can be an absolute -// address amd64 can't. We create a class that expresses the concept but does extra -// magic on amd64 to get the final result - -class ArrayAddress VALUE_OBJ_CLASS_SPEC { - private: - - AddressLiteral _base; - Address _index; - - public: - - ArrayAddress() {}; - ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; - AddressLiteral base() { return _base; } - Address index() { return _index; } - -}; - -#ifndef _LP64 -const int FPUStateSizeInWords = 27; -#else -const int FPUStateSizeInWords = 512 / wordSize; -#endif // _LP64 - -// The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction -// level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write -// is what you get. The Assembler is generating code into a CodeBuffer. - -class Assembler : public AbstractAssembler { - friend class AbstractAssembler; // for the non-virtual hack - friend class LIR_Assembler; // as_Address() - - protected: - #ifdef ASSERT - void check_relocation(RelocationHolder const& rspec, int format); - #endif - - inline void emit_long64(jlong x); - - void emit_data(jint data, relocInfo::relocType rtype, int format /* = 0 */); - void emit_data(jint data, RelocationHolder const& rspec, int format /* = 0 */); - void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); - void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); - - // Helper functions for groups of instructions - void emit_arith_b(int op1, int op2, Register dst, int imm8); - - void emit_arith(int op1, int op2, Register dst, int imm32); - // only x86?? - void emit_arith(int op1, int op2, Register dst, jobject obj); - void emit_arith(int op1, int op2, Register dst, Register src); - - void emit_operand(Register reg, - Register base, Register index, Address::ScaleFactor scale, - int disp, - RelocationHolder const& rspec); - void emit_operand(Register reg, Address adr); - - // Immediate-to-memory forms - void emit_arith_operand(int op1, Register rm, Address adr, int imm32); - - void emit_farith(int b1, int b2, int i); - - // macroassembler?? QQQ - bool reachable(AddressLiteral adr) { return true; } - - // These are all easily abused and hence protected - - // Make these disappear in 64bit mode since they would never be correct -#ifndef _LP64 - void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); - void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); - - void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); - void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); - - void push_literal32(int32_t imm32, RelocationHolder const& rspec); -#endif // _LP64 - - // These are unique in that we are ensured by the caller that the 32bit - // relative in these instructions will always be able to reach the potentially - // 64bit address described by entry. Since they can take a 64bit address they - // don't have the 32 suffix like the other instructions in this class. - - void call_literal(address entry, RelocationHolder const& rspec); - void jmp_literal(address entry, RelocationHolder const& rspec); - - - public: - enum Condition { // The x86 condition codes used for conditional jumps/moves. - zero = 0x4, - notZero = 0x5, - equal = 0x4, - notEqual = 0x5, - less = 0xc, - lessEqual = 0xe, - greater = 0xf, - greaterEqual = 0xd, - below = 0x2, - belowEqual = 0x6, - above = 0x7, - aboveEqual = 0x3, - overflow = 0x0, - noOverflow = 0x1, - carrySet = 0x2, - carryClear = 0x3, - negative = 0x8, - positive = 0x9, - parity = 0xa, - noParity = 0xb - }; - - enum Prefix { - // segment overrides - CS_segment = 0x2e, - SS_segment = 0x36, - DS_segment = 0x3e, - ES_segment = 0x26, - FS_segment = 0x64, - GS_segment = 0x65, - - REX = 0x40, - - REX_B = 0x41, - REX_X = 0x42, - REX_XB = 0x43, - REX_R = 0x44, - REX_RB = 0x45, - REX_RX = 0x46, - REX_RXB = 0x47, - - REX_W = 0x48, - - REX_WB = 0x49, - REX_WX = 0x4A, - REX_WXB = 0x4B, - REX_WR = 0x4C, - REX_WRB = 0x4D, - REX_WRX = 0x4E, - REX_WRXB = 0x4F - }; - - enum WhichOperand { - // input to locate_operand, and format code for relocations - imm32_operand = 0, // embedded 32-bit immediate operand - disp32_operand = 1, // embedded 32-bit displacement or address - call32_operand = 2, // embedded 32-bit self-relative displacement - _WhichOperand_limit = 3 - }; - - public: - - // Creation - Assembler(CodeBuffer* code) : AbstractAssembler(code) {} - - // Decoding - static address locate_operand(address inst, WhichOperand which); - static address locate_next_instruction(address inst); - - // Stack - void pushad(); - void popad(); - - void pushfd(); - void popfd(); - - void pushl(int imm32); - void pushoop(jobject obj); - - void pushl(Register src); - void pushl(Address src); - // void pushl(Label& L, relocInfo::relocType rtype); ? needed? - - // dummy to prevent NULL being converted to Register - void pushl(void* dummy); - - void popl(Register dst); - void popl(Address dst); - - // Instruction prefixes - void prefix(Prefix p); - - // Moves - void movb(Register dst, Address src); - void movb(Address dst, int imm8); - void movb(Address dst, Register src); - - void movw(Address dst, int imm16); - void movw(Register dst, Address src); - void movw(Address dst, Register src); - - // these are dummies used to catch attempting to convert NULL to Register - void movl(Register dst, void* junk); - void movl(Address dst, void* junk); - - void movl(Register dst, int imm32); - void movl(Address dst, int imm32); - void movl(Register dst, Register src); - void movl(Register dst, Address src); - void movl(Address dst, Register src); - - void movsxb(Register dst, Address src); - void movsxb(Register dst, Register src); - - void movsxw(Register dst, Address src); - void movsxw(Register dst, Register src); - - void movzxb(Register dst, Address src); - void movzxb(Register dst, Register src); - - void movzxw(Register dst, Address src); - void movzxw(Register dst, Register src); - - // Conditional moves (P6 only) - void cmovl(Condition cc, Register dst, Register src); - void cmovl(Condition cc, Register dst, Address src); - - // Prefetches (SSE, SSE2, 3DNOW only) - void prefetcht0(Address src); - void prefetcht1(Address src); - void prefetcht2(Address src); - void prefetchnta(Address src); - void prefetchw(Address src); - void prefetchr(Address src); - - // Arithmetics - void adcl(Register dst, int imm32); - void adcl(Register dst, Address src); - void adcl(Register dst, Register src); - - void addl(Address dst, int imm32); - void addl(Address dst, Register src); - void addl(Register dst, int imm32); - void addl(Register dst, Address src); - void addl(Register dst, Register src); - - void andl(Register dst, int imm32); - void andl(Register dst, Address src); - void andl(Register dst, Register src); - - void cmpb(Address dst, int imm8); - void cmpw(Address dst, int imm16); - void cmpl(Address dst, int imm32); - void cmpl(Register dst, int imm32); - void cmpl(Register dst, Register src); - void cmpl(Register dst, Address src); - - // this is a dummy used to catch attempting to convert NULL to Register - void cmpl(Register dst, void* junk); - - protected: - // Don't use next inc() and dec() methods directly. INC & DEC instructions - // could cause a partial flag stall since they don't set CF flag. - // Use MacroAssembler::decrement() & MacroAssembler::increment() methods - // which call inc() & dec() or add() & sub() in accordance with - // the product flag UseIncDec value. - - void decl(Register dst); - void decl(Address dst); - - void incl(Register dst); - void incl(Address dst); - - public: - void idivl(Register src); - void cdql(); - - void imull(Register dst, Register src); - void imull(Register dst, Register src, int value); - - void leal(Register dst, Address src); - - void mull(Address src); - void mull(Register src); - - void negl(Register dst); - - void notl(Register dst); - - void orl(Address dst, int imm32); - void orl(Register dst, int imm32); - void orl(Register dst, Address src); - void orl(Register dst, Register src); - - void rcll(Register dst, int imm8); - - void sarl(Register dst, int imm8); - void sarl(Register dst); - - void sbbl(Address dst, int imm32); - void sbbl(Register dst, int imm32); - void sbbl(Register dst, Address src); - void sbbl(Register dst, Register src); - - void shldl(Register dst, Register src); - - void shll(Register dst, int imm8); - void shll(Register dst); - - void shrdl(Register dst, Register src); - - void shrl(Register dst, int imm8); - void shrl(Register dst); - - void subl(Address dst, int imm32); - void subl(Address dst, Register src); - void subl(Register dst, int imm32); - void subl(Register dst, Address src); - void subl(Register dst, Register src); - - void testb(Register dst, int imm8); - void testl(Register dst, int imm32); - void testl(Register dst, Address src); - void testl(Register dst, Register src); - - void xaddl(Address dst, Register src); - - void xorl(Register dst, int imm32); - void xorl(Register dst, Address src); - void xorl(Register dst, Register src); - - // Miscellaneous - void bswap(Register reg); - void lock(); - - void xchg (Register reg, Address adr); - void xchgl(Register dst, Register src); - - void cmpxchg (Register reg, Address adr); - void cmpxchg8 (Address adr); - - void nop(int i = 1); - void addr_nop_4(); - void addr_nop_5(); - void addr_nop_7(); - void addr_nop_8(); - - void hlt(); - void ret(int imm16); - void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 - void smovl(); - void rep_movl(); - void rep_set(); - void repne_scan(); - void setb(Condition cc, Register dst); - void membar(); // Serializing memory-fence - void cpuid(); - void cld(); - void std(); - - void emit_raw (unsigned char); - - // Calls - void call(Label& L, relocInfo::relocType rtype); - void call(Register reg); // push pc; pc <- reg - void call(Address adr); // push pc; pc <- adr - - // Jumps - void jmp(Address entry); // pc <- entry - void jmp(Register entry); // pc <- entry - - // Label operations & relative jumps (PPUM Appendix D) - void jmp(Label& L, relocInfo::relocType rtype = relocInfo::none); // unconditional jump to L - - // Force an 8-bit jump offset - // void jmpb(address entry); - - // Unconditional 8-bit offset jump to L. - // WARNING: be very careful using this for forward jumps. If the label is - // not bound within an 8-bit offset of this instruction, a run-time error - // will occur. - void jmpb(Label& L); - - // jcc is the generic conditional branch generator to run- - // time routines, jcc is used for branches to labels. jcc - // takes a branch opcode (cc) and a label (L) and generates - // either a backward branch or a forward branch and links it - // to the label fixup chain. Usage: - // - // Label L; // unbound label - // jcc(cc, L); // forward branch to unbound label - // bind(L); // bind label to the current pc - // jcc(cc, L); // backward branch to bound label - // bind(L); // illegal: a label may be bound only once - // - // Note: The same Label can be used for forward and backward branches - // but it may be bound only once. - - void jcc(Condition cc, Label& L, - relocInfo::relocType rtype = relocInfo::none); - - // Conditional jump to a 8-bit offset to L. - // WARNING: be very careful using this for forward jumps. If the label is - // not bound within an 8-bit offset of this instruction, a run-time error - // will occur. - void jccb(Condition cc, Label& L); - - // Floating-point operations - void fld1(); - void fldz(); - - void fld_s(Address adr); - void fld_s(int index); - void fld_d(Address adr); - void fld_x(Address adr); // extended-precision (80-bit) format - - void fst_s(Address adr); - void fst_d(Address adr); - - void fstp_s(Address adr); - void fstp_d(Address adr); - void fstp_d(int index); - void fstp_x(Address adr); // extended-precision (80-bit) format - - void fild_s(Address adr); - void fild_d(Address adr); - - void fist_s (Address adr); - void fistp_s(Address adr); - void fistp_d(Address adr); - - void fabs(); - void fchs(); - - void flog(); - void flog10(); - - void fldln2(); - void fyl2x(); - void fldlg2(); - - void fcos(); - void fsin(); - void ftan(); - void fsqrt(); - - // "Alternate" versions of instructions place result down in FPU - // stack instead of on TOS - void fadd_s(Address src); - void fadd_d(Address src); - void fadd(int i); - void fadda(int i); // "alternate" fadd - - void fsub_s(Address src); - void fsub_d(Address src); - void fsubr_s(Address src); - void fsubr_d(Address src); - - void fmul_s(Address src); - void fmul_d(Address src); - void fmul(int i); - void fmula(int i); // "alternate" fmul - - void fdiv_s(Address src); - void fdiv_d(Address src); - void fdivr_s(Address src); - void fdivr_d(Address src); - - void fsub(int i); - void fsuba(int i); // "alternate" fsub - void fsubr(int i); - void fsubra(int i); // "alternate" reversed fsub - void fdiv(int i); - void fdiva(int i); // "alternate" fdiv - void fdivr(int i); - void fdivra(int i); // "alternate" reversed fdiv - - void faddp(int i = 1); - void fsubp(int i = 1); - void fsubrp(int i = 1); - void fmulp(int i = 1); - void fdivp(int i = 1); - void fdivrp(int i = 1); - void fprem(); - void fprem1(); - - void fxch(int i = 1); - void fincstp(); - void fdecstp(); - void ffree(int i = 0); - - void fcomp_s(Address src); - void fcomp_d(Address src); - void fcom(int i); - void fcomp(int i = 1); - void fcompp(); - - void fucomi(int i = 1); - void fucomip(int i = 1); - - void ftst(); - void fnstsw_ax(); - void fwait(); - void finit(); - void fldcw(Address src); - void fnstcw(Address src); - - void fnsave(Address dst); - void frstor(Address src); - void fldenv(Address src); - - void sahf(); - - protected: - void emit_sse_operand(XMMRegister reg, Address adr); - void emit_sse_operand(Register reg, Address adr); - void emit_sse_operand(XMMRegister dst, XMMRegister src); - void emit_sse_operand(XMMRegister dst, Register src); - void emit_sse_operand(Register dst, XMMRegister src); - - void emit_operand(MMXRegister reg, Address adr); - - public: - // mmx operations - void movq( MMXRegister dst, Address src ); - void movq( Address dst, MMXRegister src ); - void emms(); - - // xmm operations - void addss(XMMRegister dst, Address src); // Add Scalar Single-Precision Floating-Point Values - void addss(XMMRegister dst, XMMRegister src); - void addsd(XMMRegister dst, Address src); // Add Scalar Double-Precision Floating-Point Values - void addsd(XMMRegister dst, XMMRegister src); - - void subss(XMMRegister dst, Address src); // Subtract Scalar Single-Precision Floating-Point Values - void subss(XMMRegister dst, XMMRegister src); - void subsd(XMMRegister dst, Address src); // Subtract Scalar Double-Precision Floating-Point Values - void subsd(XMMRegister dst, XMMRegister src); - - void mulss(XMMRegister dst, Address src); // Multiply Scalar Single-Precision Floating-Point Values - void mulss(XMMRegister dst, XMMRegister src); - void mulsd(XMMRegister dst, Address src); // Multiply Scalar Double-Precision Floating-Point Values - void mulsd(XMMRegister dst, XMMRegister src); - - void divss(XMMRegister dst, Address src); // Divide Scalar Single-Precision Floating-Point Values - void divss(XMMRegister dst, XMMRegister src); - void divsd(XMMRegister dst, Address src); // Divide Scalar Double-Precision Floating-Point Values - void divsd(XMMRegister dst, XMMRegister src); - - void sqrtss(XMMRegister dst, Address src); // Compute Square Root of Scalar Single-Precision Floating-Point Value - void sqrtss(XMMRegister dst, XMMRegister src); - void sqrtsd(XMMRegister dst, Address src); // Compute Square Root of Scalar Double-Precision Floating-Point Value - void sqrtsd(XMMRegister dst, XMMRegister src); - - void pxor(XMMRegister dst, Address src); // Xor Packed Byte Integer Values - void pxor(XMMRegister dst, XMMRegister src); // Xor Packed Byte Integer Values - - void comiss(XMMRegister dst, Address src); // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS - void comiss(XMMRegister dst, XMMRegister src); - void comisd(XMMRegister dst, Address src); // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS - void comisd(XMMRegister dst, XMMRegister src); - - void ucomiss(XMMRegister dst, Address src); // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS - void ucomiss(XMMRegister dst, XMMRegister src); - void ucomisd(XMMRegister dst, Address src); // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS - void ucomisd(XMMRegister dst, XMMRegister src); - - void cvtss2sd(XMMRegister dst, Address src); // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value - void cvtss2sd(XMMRegister dst, XMMRegister src); - void cvtsd2ss(XMMRegister dst, Address src); // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value - void cvtsd2ss(XMMRegister dst, XMMRegister src); - - void cvtsi2ss(XMMRegister dst, Address src); // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value - void cvtsi2ss(XMMRegister dst, Register src); - void cvtsi2sd(XMMRegister dst, Address src); // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value - void cvtsi2sd(XMMRegister dst, Register src); - - void cvtss2si(Register dst, Address src); // Convert Scalar Single-Precision Floating-Point Value to Doubleword Integer - void cvtss2si(Register dst, XMMRegister src); - void cvtsd2si(Register dst, Address src); // Convert Scalar Double-Precision Floating-Point Value to Doubleword Integer - void cvtsd2si(Register dst, XMMRegister src); - - void cvttss2si(Register dst, Address src); // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer - void cvttss2si(Register dst, XMMRegister src); - void cvttsd2si(Register dst, Address src); // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer - void cvttsd2si(Register dst, XMMRegister src); - - protected: // Avoid using the next instructions directly. - // New cpus require use of movsd and movss to avoid partial register stall - // when loading from memory. But for old Opteron use movlpd instead of movsd. - // The selection is done in MacroAssembler::movdbl() and movflt(). - void movss(XMMRegister dst, Address src); // Move Scalar Single-Precision Floating-Point Values - void movss(XMMRegister dst, XMMRegister src); - void movss(Address dst, XMMRegister src); - void movsd(XMMRegister dst, Address src); // Move Scalar Double-Precision Floating-Point Values - void movsd(XMMRegister dst, XMMRegister src); - void movsd(Address dst, XMMRegister src); - void movlpd(XMMRegister dst, Address src); - // New cpus require use of movaps and movapd to avoid partial register stall - // when moving between registers. - void movaps(XMMRegister dst, XMMRegister src); - void movapd(XMMRegister dst, XMMRegister src); - public: - - void andps(XMMRegister dst, Address src); // Bitwise Logical AND of Packed Single-Precision Floating-Point Values - void andps(XMMRegister dst, XMMRegister src); - void andpd(XMMRegister dst, Address src); // Bitwise Logical AND of Packed Double-Precision Floating-Point Values - void andpd(XMMRegister dst, XMMRegister src); - - void andnps(XMMRegister dst, Address src); // Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values - void andnps(XMMRegister dst, XMMRegister src); - void andnpd(XMMRegister dst, Address src); // Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values - void andnpd(XMMRegister dst, XMMRegister src); - - void orps(XMMRegister dst, Address src); // Bitwise Logical OR of Packed Single-Precision Floating-Point Values - void orps(XMMRegister dst, XMMRegister src); - void orpd(XMMRegister dst, Address src); // Bitwise Logical OR of Packed Double-Precision Floating-Point Values - void orpd(XMMRegister dst, XMMRegister src); - - void xorps(XMMRegister dst, Address src); // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values - void xorps(XMMRegister dst, XMMRegister src); - void xorpd(XMMRegister dst, Address src); // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values - void xorpd(XMMRegister dst, XMMRegister src); - - void movq(XMMRegister dst, Address src); // Move Quadword - void movq(XMMRegister dst, XMMRegister src); - void movq(Address dst, XMMRegister src); - - void movd(XMMRegister dst, Address src); // Move Doubleword - void movd(XMMRegister dst, Register src); - void movd(Register dst, XMMRegister src); - void movd(Address dst, XMMRegister src); - - void movdqa(XMMRegister dst, Address src); // Move Aligned Double Quadword - void movdqa(XMMRegister dst, XMMRegister src); - void movdqa(Address dst, XMMRegister src); - - void pshufd(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Doublewords - void pshufd(XMMRegister dst, Address src, int mode); - void pshuflw(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Low Words - void pshuflw(XMMRegister dst, Address src, int mode); - - void psrlq(XMMRegister dst, int shift); // Shift Right Logical Quadword Immediate - - void punpcklbw(XMMRegister dst, XMMRegister src); // Interleave Low Bytes - void punpcklbw(XMMRegister dst, Address src); - - void ldmxcsr( Address src ); - void stmxcsr( Address dst ); -}; - - -// MacroAssembler extends Assembler by frequently used macros. -// -// Instructions for which a 'better' code sequence exists depending -// on arguments should also go in here. - -class MacroAssembler: public Assembler { - friend class LIR_Assembler; - protected: - - Address as_Address(AddressLiteral adr); - Address as_Address(ArrayAddress adr); - - // Support for VM calls - // - // This is the base routine called by the different versions of call_VM_leaf. The interpreter - // may customize this version by overriding it for its purposes (e.g., to save/restore - // additional registers when doing a VM call). -#ifdef CC_INTERP - // c++ interpreter never wants to use interp_masm version of call_VM - #define VIRTUAL -#else - #define VIRTUAL virtual -#endif - - VIRTUAL void call_VM_leaf_base( - address entry_point, // the entry point - int number_of_arguments // the number of arguments to pop after the call - ); - - // This is the base routine called by the different versions of call_VM. The interpreter - // may customize this version by overriding it for its purposes (e.g., to save/restore - // additional registers when doing a VM call). - // - // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base - // returns the register which contains the thread upon return. If a thread register has been - // specified, the return value will correspond to that register. If no last_java_sp is specified - // (noreg) than rsp will be used instead. - VIRTUAL void call_VM_base( // returns the register containing the thread upon return - Register oop_result, // where an oop-result ends up if any; use noreg otherwise - Register java_thread, // the thread if computed before ; use noreg otherwise - Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise - address entry_point, // the entry point - int number_of_arguments, // the number of arguments (w/o thread) to pop after the call - bool check_exceptions // whether to check for pending exceptions after return - ); - - // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. - // The implementation is only non-empty for the InterpreterMacroAssembler, - // as only the interpreter handles PopFrame and ForceEarlyReturn requests. - virtual void check_and_handle_popframe(Register java_thread); - virtual void check_and_handle_earlyret(Register java_thread); - - void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); - - // helpers for FPU flag access - // tmp is a temporary register, if none is available use noreg - void save_rax (Register tmp); - void restore_rax(Register tmp); - - public: - MacroAssembler(CodeBuffer* code) : Assembler(code) {} - - // Support for NULL-checks - // - // Generates code that causes a NULL OS exception if the content of reg is NULL. - // If the accessed location is M[reg + offset] and the offset is known, provide the - // offset. No explicit code generation is needed if the offset is within a certain - // range (0 <= offset <= page_size). - - void null_check(Register reg, int offset = -1); - static bool needs_explicit_null_check(int offset); - - // Required platform-specific helpers for Label::patch_instructions. - // They _shadow_ the declarations in AbstractAssembler, which are undefined. - void pd_patch_instruction(address branch, address target); -#ifndef PRODUCT - static void pd_print_patched_instruction(address branch); -#endif - - // The following 4 methods return the offset of the appropriate move instruction - - // Support for fast byte/word loading with zero extension (depending on particular CPU) - int load_unsigned_byte(Register dst, Address src); - int load_unsigned_word(Register dst, Address src); - - // Support for fast byte/word loading with sign extension (depending on particular CPU) - int load_signed_byte(Register dst, Address src); - int load_signed_word(Register dst, Address src); - - // Support for sign-extension (hi:lo = extend_sign(lo)) - void extend_sign(Register hi, Register lo); - - // Support for inc/dec with optimal instruction selection depending on value - void increment(Register reg, int value = 1); - void decrement(Register reg, int value = 1); - void increment(Address dst, int value = 1); - void decrement(Address dst, int value = 1); - - // Support optimal SSE move instructions. - void movflt(XMMRegister dst, XMMRegister src) { - if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } - else { movss (dst, src); return; } - } - void movflt(XMMRegister dst, Address src) { movss(dst, src); } - void movflt(XMMRegister dst, AddressLiteral src); - void movflt(Address dst, XMMRegister src) { movss(dst, src); } - - void movdbl(XMMRegister dst, XMMRegister src) { - if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } - else { movsd (dst, src); return; } - } - - void movdbl(XMMRegister dst, AddressLiteral src); - - void movdbl(XMMRegister dst, Address src) { - if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } - else { movlpd(dst, src); return; } - } - void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } - - void increment(AddressLiteral dst); - void increment(ArrayAddress dst); - - - // Alignment - void align(int modulus); - - // Misc - void fat_nop(); // 5 byte nop - - // Stack frame creation/removal - void enter(); - void leave(); - - // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) - // The pointer will be loaded into the thread register. - void get_thread(Register thread); - - // Support for VM calls - // - // It is imperative that all calls into the VM are handled via the call_VM macros. - // They make sure that the stack linkage is setup correctly. call_VM's correspond - // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. - - void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); - void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); - void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); - void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); - - void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); - void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); - void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); - void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); - - void call_VM_leaf(address entry_point, int number_of_arguments = 0); - void call_VM_leaf(address entry_point, Register arg_1); - void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); - void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); - - // last Java Frame (fills frame anchor) - void set_last_Java_frame(Register thread, Register last_java_sp, Register last_java_fp, address last_java_pc); - void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); - - // Stores - void store_check(Register obj); // store check for obj - register is destroyed afterwards - void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) - - // split store_check(Register obj) to enhance instruction interleaving - void store_check_part_1(Register obj); - void store_check_part_2(Register obj); - - // C 'boolean' to Java boolean: x == 0 ? 0 : 1 - void c2bool(Register x); - - // C++ bool manipulation - - void movbool(Register dst, Address src); - void movbool(Address dst, bool boolconst); - void movbool(Address dst, Register src); - void testbool(Register dst); - - // Int division/reminder for Java - // (as idivl, but checks for special case as described in JVM spec.) - // returns idivl instruction offset for implicit exception handling - int corrected_idivl(Register reg); - - void int3(); - - // Long negation for Java - void lneg(Register hi, Register lo); - - // Long multiplication for Java - // (destroys contents of rax, rbx, rcx and rdx) - void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y - - // Long shifts for Java - // (semantics as described in JVM spec.) - void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) - void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) - - // Long compare for Java - // (semantics as described in JVM spec.) - void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) - - // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: - // - // CF (corresponds to C0) if x < y - // PF (corresponds to C2) if unordered - // ZF (corresponds to C3) if x = y - // - // The arguments are in reversed order on the stack (i.e., top of stack is first argument). - // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) - void fcmp(Register tmp); - // Variant of the above which allows y to be further down the stack - // and which only pops x and y if specified. If pop_right is - // specified then pop_left must also be specified. - void fcmp(Register tmp, int index, bool pop_left, bool pop_right); - - // Floating-point comparison for Java - // Compares the top-most stack entries on the FPU stack and stores the result in dst. - // The arguments are in reversed order on the stack (i.e., top of stack is first argument). - // (semantics as described in JVM spec.) - void fcmp2int(Register dst, bool unordered_is_less); - // Variant of the above which allows y to be further down the stack - // and which only pops x and y if specified. If pop_right is - // specified then pop_left must also be specified. - void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); - - // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) - // tmp is a temporary register, if none is available use noreg - void fremr(Register tmp); - - - // same as fcmp2int, but using SSE2 - void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); - void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); - - // Inlined sin/cos generator for Java; must not use CPU instruction - // directly on Intel as it does not have high enough precision - // outside of the range [-pi/4, pi/4]. Extra argument indicate the - // number of FPU stack slots in use; all but the topmost will - // require saving if a slow case is necessary. Assumes argument is - // on FP TOS; result is on FP TOS. No cpu registers are changed by - // this code. - void trigfunc(char trig, int num_fpu_regs_in_use = 1); - - // branch to L if FPU flag C2 is set/not set - // tmp is a temporary register, if none is available use noreg - void jC2 (Register tmp, Label& L); - void jnC2(Register tmp, Label& L); - - // Pop ST (ffree & fincstp combined) - void fpop(); - - // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack - void push_fTOS(); - - // pops double TOS element from CPU stack and pushes on FPU stack - void pop_fTOS(); - - void empty_FPU_stack(); - - void push_IU_state(); - void pop_IU_state(); - - void push_FPU_state(); - void pop_FPU_state(); - - void push_CPU_state(); - void pop_CPU_state(); - - // Sign extension - void sign_extend_short(Register reg); - void sign_extend_byte(Register reg); - - // Division by power of 2, rounding towards 0 - void division_with_shift(Register reg, int shift_value); - - // Round up to a power of two - void round_to(Register reg, int modulus); - - // Callee saved registers handling - void push_callee_saved_registers(); - void pop_callee_saved_registers(); - - // allocation - void eden_allocate( - Register obj, // result: pointer to object after successful allocation - Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise - int con_size_in_bytes, // object size in bytes if known at compile time - Register t1, // temp register - Label& slow_case // continuation point if fast allocation fails - ); - void tlab_allocate( - Register obj, // result: pointer to object after successful allocation - Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise - int con_size_in_bytes, // object size in bytes if known at compile time - Register t1, // temp register - Register t2, // temp register - Label& slow_case // continuation point if fast allocation fails - ); - void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); - - //---- - void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 - - // Debugging - void verify_oop(Register reg, const char* s = "broken oop"); // only if +VerifyOops - void verify_oop_addr(Address addr, const char * s = "broken oop addr"); - - void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); // only if +VerifyFPU - void stop(const char* msg); // prints msg, dumps registers and stops execution - void warn(const char* msg); // prints msg and continues - static void debug(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); - void os_breakpoint(); - void untested() { stop("untested"); } - void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, sizeof(b), "unimplemented: %s", what); stop(b); } - void should_not_reach_here() { stop("should not reach here"); } - void print_CPU_state(); - - // Stack overflow checking - void bang_stack_with_offset(int offset) { - // stack grows down, caller passes positive offset - assert(offset > 0, "must bang with negative offset"); - movl(Address(rsp, (-offset)), rax); - } - - // Writes to stack successive pages until offset reached to check for - // stack overflow + shadow pages. Also, clobbers tmp - void bang_stack_size(Register size, Register tmp); - - // Support for serializing memory accesses between threads - void serialize_memory(Register thread, Register tmp); - - void verify_tlab(); - - // Biased locking support - // lock_reg and obj_reg must be loaded up with the appropriate values. - // swap_reg must be rax, and is killed. - // tmp_reg is optional. If it is supplied (i.e., != noreg) it will - // be killed; if not supplied, push/pop will be used internally to - // allocate a temporary (inefficient, avoid if possible). - // Optional slow case is for implementations (interpreter and C1) which branch to - // slow case directly. Leaves condition codes set for C2's Fast_Lock node. - // Returns offset of first potentially-faulting instruction for null - // check info (currently consumed only by C1). If - // swap_reg_contains_mark is true then returns -1 as it is assumed - // the calling code has already passed any potential faults. - int biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg, - bool swap_reg_contains_mark, - Label& done, Label* slow_case = NULL, - BiasedLockingCounters* counters = NULL); - void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); - - - Condition negate_condition(Condition cond); - - // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit - // operands. In general the names are modified to avoid hiding the instruction in Assembler - // so that we don't need to implement all the varieties in the Assembler with trivial wrappers - // here in MacroAssembler. The major exception to this rule is call - - // Arithmetics - - void cmp8(AddressLiteral src1, int8_t imm); - - // QQQ renamed to drag out the casting of address to int32_t/intptr_t - void cmp32(Register src1, int32_t imm); - - void cmp32(AddressLiteral src1, int32_t imm); - // compare reg - mem, or reg - &mem - void cmp32(Register src1, AddressLiteral src2); - - void cmp32(Register src1, Address src2); - - // NOTE src2 must be the lval. This is NOT an mem-mem compare - void cmpptr(Address src1, AddressLiteral src2); - - void cmpptr(Register src1, AddressLiteral src2); - - void cmpoop(Address dst, jobject obj); - void cmpoop(Register dst, jobject obj); - - - void cmpxchgptr(Register reg, AddressLiteral adr); - - // Helper functions for statistics gathering. - // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. - void cond_inc32(Condition cond, AddressLiteral counter_addr); - // Unconditional atomic increment. - void atomic_incl(AddressLiteral counter_addr); - - void lea(Register dst, AddressLiteral adr); - void lea(Address dst, AddressLiteral adr); - - void test32(Register dst, AddressLiteral src); - - // Calls - - void call(Label& L, relocInfo::relocType rtype); - void call(Register entry); - - // NOTE: this call tranfers to the effective address of entry NOT - // the address contained by entry. This is because this is more natural - // for jumps/calls. - void call(AddressLiteral entry); - - // Jumps - - // NOTE: these jumps tranfer to the effective address of dst NOT - // the address contained by dst. This is because this is more natural - // for jumps/calls. - void jump(AddressLiteral dst); - void jump_cc(Condition cc, AddressLiteral dst); - - // 32bit can do a case table jump in one instruction but we no longer allow the base - // to be installed in the Address class. This jump will tranfers to the address - // contained in the location described by entry (not the address of entry) - void jump(ArrayAddress entry); - - // Floating - - void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } - void andpd(XMMRegister dst, AddressLiteral src); - - void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } - void comiss(XMMRegister dst, AddressLiteral src); - - void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } - void comisd(XMMRegister dst, AddressLiteral src); - - void fldcw(Address src) { Assembler::fldcw(src); } - void fldcw(AddressLiteral src); - - void fld_s(int index) { Assembler::fld_s(index); } - void fld_s(Address src) { Assembler::fld_s(src); } - void fld_s(AddressLiteral src); - - void fld_d(Address src) { Assembler::fld_d(src); } - void fld_d(AddressLiteral src); - - void fld_x(Address src) { Assembler::fld_x(src); } - void fld_x(AddressLiteral src); - - void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } - void ldmxcsr(AddressLiteral src); - - void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } - void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } - void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } - void movss(XMMRegister dst, AddressLiteral src); - - void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } - void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } - void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } - void movsd(XMMRegister dst, AddressLiteral src); - - void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } - void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } - void ucomiss(XMMRegister dst, AddressLiteral src); - - void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } - void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } - void ucomisd(XMMRegister dst, AddressLiteral src); - - // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values - void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); } - void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } - void xorpd(XMMRegister dst, AddressLiteral src); - - // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values - void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); } - void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } - void xorps(XMMRegister dst, AddressLiteral src); - - // Data - - void movoop(Register dst, jobject obj); - void movoop(Address dst, jobject obj); - - void movptr(ArrayAddress dst, Register src); - // can this do an lea? - void movptr(Register dst, ArrayAddress src); - - void movptr(Register dst, AddressLiteral src); - - // to avoid hiding movl - void mov32(AddressLiteral dst, Register src); - void mov32(Register dst, AddressLiteral src); - // to avoid hiding movb - void movbyte(ArrayAddress dst, int src); - - // Can push value or effective address - void pushptr(AddressLiteral src); - -#undef VIRTUAL - -}; - -/** - * class SkipIfEqual: - * - * Instantiating this class will result in assembly code being output that will - * jump around any code emitted between the creation of the instance and it's - * automatic destruction at the end of a scope block, depending on the value of - * the flag passed to the constructor, which will be checked at run-time. - */ -class SkipIfEqual { - private: - MacroAssembler* _masm; - Label _label; - - public: - SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); - ~SkipIfEqual(); -}; - -#ifdef ASSERT -inline bool AbstractAssembler::pd_check_instruction_mark() { return true; } -#endif --- old/hotspot/src/cpu/x86/vm/assembler_x86_32.inline.hpp 2009-08-01 04:21:41.416313720 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,67 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)assembler_x86_32.inline.hpp 1.19 07/05/05 17:04:11 JVM" -#endif -/* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -inline void MacroAssembler::pd_patch_instruction(address branch, address target) { - unsigned char op = branch[0]; - assert(op == 0xE8 /* call */ || - op == 0xE9 /* jmp */ || - op == 0xEB /* short jmp */ || - (op & 0xF0) == 0x70 /* short jcc */ || - op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */, - "Invalid opcode at patch point"); - - if (op == 0xEB || (op & 0xF0) == 0x70) { - // short offset operators (jmp and jcc) - char* disp = (char*) &branch[1]; - int imm8 = target - (address) &disp[1]; - guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); - *disp = imm8; - } else { - int* disp = (int*) &branch[(op == 0x0F)? 2: 1]; - int imm32 = target - (address) &disp[1]; - *disp = imm32; - } -} - -#ifndef PRODUCT -inline void MacroAssembler::pd_print_patched_instruction(address branch) { - const char* s; - unsigned char op = branch[0]; - if (op == 0xE8) { - s = "call"; - } else if (op == 0xE9 || op == 0xEB) { - s = "jmp"; - } else if ((op & 0xF0) == 0x70) { - s = "jcc"; - } else if (op == 0x0F) { - s = "jcc"; - } else { - s = "????"; - } - tty->print("%s (unresolved)", s); -} -#endif // ndef PRODUCT --- old/hotspot/src/cpu/x86/vm/assembler_x86_64.cpp 2009-08-01 04:21:41.733268710 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,5163 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_x86_64.cpp 1.66 07/09/20 10:42:56 JVM" -#endif -/* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_x86_64.cpp.incl" - -// Implementation of AddressLiteral - -AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { - _is_lval = false; - _target = target; - switch (rtype) { - case relocInfo::oop_type: - // Oops are a special case. Normally they would be their own section - // but in cases like icBuffer they are literals in the code stream that - // we don't have a section for. We use none so that we get a literal address - // which is always patchable. - break; - case relocInfo::external_word_type: - _rspec = external_word_Relocation::spec(target); - break; - case relocInfo::internal_word_type: - _rspec = internal_word_Relocation::spec(target); - break; - case relocInfo::opt_virtual_call_type: - _rspec = opt_virtual_call_Relocation::spec(); - break; - case relocInfo::static_call_type: - _rspec = static_call_Relocation::spec(); - break; - case relocInfo::runtime_call_type: - _rspec = runtime_call_Relocation::spec(); - break; - case relocInfo::none: - break; - default: - ShouldNotReachHere(); - break; - } -} - -// Implementation of Address - -Address Address::make_array(ArrayAddress adr) { -#ifdef _LP64 - // Not implementable on 64bit machines - // Should have been handled higher up the call chain. - ShouldNotReachHere(); - return Address(); -#else - AddressLiteral base = adr.base(); - Address index = adr.index(); - assert(index._disp == 0, "must not have disp"); // maybe it can? - Address array(index._base, index._index, index._scale, (intptr_t) base.target()); - array._rspec = base._rspec; - return array; -#endif // _LP64 -} - -// exceedingly dangerous constructor -Address::Address(int disp, address loc, relocInfo::relocType rtype) { - _base = noreg; - _index = noreg; - _scale = no_scale; - _disp = disp; - switch (rtype) { - case relocInfo::external_word_type: - _rspec = external_word_Relocation::spec(loc); - break; - case relocInfo::internal_word_type: - _rspec = internal_word_Relocation::spec(loc); - break; - case relocInfo::runtime_call_type: - // HMM - _rspec = runtime_call_Relocation::spec(); - break; - case relocInfo::none: - break; - default: - ShouldNotReachHere(); - } -} - -// Convert the raw encoding form into the form expected by the constructor for -// Address. An index of 4 (rsp) corresponds to having no index, so convert -// that to noreg for the Address constructor. -Address Address::make_raw(int base, int index, int scale, int disp) { - bool valid_index = index != rsp->encoding(); - if (valid_index) { - Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); - return madr; - } else { - Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); - return madr; - } -} - - -// Implementation of Assembler -int AbstractAssembler::code_fill_byte() { - return (u_char)'\xF4'; // hlt -} - -// This should only be used by 64bit instructions that can use rip-relative -// it cannot be used by instructions that want an immediate value. - -bool Assembler::reachable(AddressLiteral adr) { - int64_t disp; - // None will force a 64bit literal to the code stream. Likely a placeholder - // for something that will be patched later and we need to certain it will - // always be reachable. - if (adr.reloc() == relocInfo::none) { - return false; - } - if (adr.reloc() == relocInfo::internal_word_type) { - // This should be rip relative and easily reachable. - return true; - } - if (adr.reloc() != relocInfo::external_word_type && - adr.reloc() != relocInfo::runtime_call_type ) { - return false; - } - - // Stress the correction code - if (ForceUnreachable) { - // Must be runtimecall reloc, see if it is in the codecache - // Flipping stuff in the codecache to be unreachable causes issues - // with things like inline caches where the additional instructions - // are not handled. - if (CodeCache::find_blob(adr._target) == NULL) { - return false; - } - } - // For external_word_type/runtime_call_type if it is reachable from where we - // are now (possibly a temp buffer) and where we might end up - // anywhere in the codeCache then we are always reachable. - // This would have to change if we ever save/restore shared code - // to be more pessimistic. - - disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); - if (!is_simm32(disp)) return false; - disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); - if (!is_simm32(disp)) return false; - - disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int)); - - // Because rip relative is a disp + address_of_next_instruction and we - // don't know the value of address_of_next_instruction we apply a fudge factor - // to make sure we will be ok no matter the size of the instruction we get placed into. - // We don't have to fudge the checks above here because they are already worst case. - - // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal - // + 4 because better safe than sorry. - const int fudge = 12 + 4; - if (disp < 0) { - disp -= fudge; - } else { - disp += fudge; - } - return is_simm32(disp); -} - - -// make this go away eventually -void Assembler::emit_data(jint data, - relocInfo::relocType rtype, - int format) { - if (rtype == relocInfo::none) { - emit_long(data); - } else { - emit_data(data, Relocation::spec_simple(rtype), format); - } -} - -void Assembler::emit_data(jint data, - RelocationHolder const& rspec, - int format) { - assert(imm64_operand == 0, "default format must be imm64 in this file"); - assert(imm64_operand != format, "must not be imm64"); - assert(inst_mark() != NULL, "must be inside InstructionMark"); - if (rspec.type() != relocInfo::none) { - #ifdef ASSERT - check_relocation(rspec, format); - #endif - // Do not use AbstractAssembler::relocate, which is not intended for - // embedded words. Instead, relocate to the enclosing instruction. - - // hack. call32 is too wide for mask so use disp32 - if (format == call32_operand) - code_section()->relocate(inst_mark(), rspec, disp32_operand); - else - code_section()->relocate(inst_mark(), rspec, format); - } - emit_long(data); -} - -void Assembler::emit_data64(jlong data, - relocInfo::relocType rtype, - int format) { - if (rtype == relocInfo::none) { - emit_long64(data); - } else { - emit_data64(data, Relocation::spec_simple(rtype), format); - } -} - -void Assembler::emit_data64(jlong data, - RelocationHolder const& rspec, - int format) { - assert(imm64_operand == 0, "default format must be imm64 in this file"); - assert(imm64_operand == format, "must be imm64"); - assert(inst_mark() != NULL, "must be inside InstructionMark"); - // Do not use AbstractAssembler::relocate, which is not intended for - // embedded words. Instead, relocate to the enclosing instruction. - code_section()->relocate(inst_mark(), rspec, format); -#ifdef ASSERT - check_relocation(rspec, format); -#endif - emit_long64(data); -} - -void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { - assert(isByte(op1) && isByte(op2), "wrong opcode"); - assert(isByte(imm8), "not a byte"); - assert((op1 & 0x01) == 0, "should be 8bit operation"); - int dstenc = dst->encoding(); - if (dstenc >= 8) { - dstenc -= 8; - } - emit_byte(op1); - emit_byte(op2 | dstenc); - emit_byte(imm8); -} - -void Assembler::emit_arith(int op1, int op2, Register dst, int imm32) { - assert(isByte(op1) && isByte(op2), "wrong opcode"); - assert((op1 & 0x01) == 1, "should be 32bit operation"); - assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); - int dstenc = dst->encoding(); - if (dstenc >= 8) { - dstenc -= 8; - } - if (is8bit(imm32)) { - emit_byte(op1 | 0x02); // set sign bit - emit_byte(op2 | dstenc); - emit_byte(imm32 & 0xFF); - } else { - emit_byte(op1); - emit_byte(op2 | dstenc); - emit_long(imm32); - } -} - -// immediate-to-memory forms -void Assembler::emit_arith_operand(int op1, - Register rm, Address adr, - int imm32) { - assert((op1 & 0x01) == 1, "should be 32bit operation"); - assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); - if (is8bit(imm32)) { - emit_byte(op1 | 0x02); // set sign bit - emit_operand(rm, adr, 1); - emit_byte(imm32 & 0xFF); - } else { - emit_byte(op1); - emit_operand(rm, adr, 4); - emit_long(imm32); - } -} - - -void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { - assert(isByte(op1) && isByte(op2), "wrong opcode"); - int dstenc = dst->encoding(); - int srcenc = src->encoding(); - if (dstenc >= 8) { - dstenc -= 8; - } - if (srcenc >= 8) { - srcenc -= 8; - } - emit_byte(op1); - emit_byte(op2 | dstenc << 3 | srcenc); -} - -void Assembler::emit_operand(Register reg, Register base, Register index, - Address::ScaleFactor scale, int disp, - RelocationHolder const& rspec, - int rip_relative_correction) { - relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); - int regenc = reg->encoding(); - if (regenc >= 8) { - regenc -= 8; - } - if (base->is_valid()) { - if (index->is_valid()) { - assert(scale != Address::no_scale, "inconsistent address"); - int indexenc = index->encoding(); - if (indexenc >= 8) { - indexenc -= 8; - } - int baseenc = base->encoding(); - if (baseenc >= 8) { - baseenc -= 8; - } - // [base + index*scale + disp] - if (disp == 0 && rtype == relocInfo::none && - base != rbp && base != r13) { - // [base + index*scale] - // [00 reg 100][ss index base] - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x04 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | baseenc); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [base + index*scale + imm8] - // [01 reg 100][ss index base] imm8 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x44 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | baseenc); - emit_byte(disp & 0xFF); - } else { - // [base + index*scale + disp32] - // [10 reg 100][ss index base] disp32 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x84 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | baseenc); - emit_data(disp, rspec, disp32_operand); - } - } else if (base == rsp || base == r12) { - // [rsp + disp] - if (disp == 0 && rtype == relocInfo::none) { - // [rsp] - // [00 reg 100][00 100 100] - emit_byte(0x04 | regenc << 3); - emit_byte(0x24); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [rsp + imm8] - // [01 reg 100][00 100 100] disp8 - emit_byte(0x44 | regenc << 3); - emit_byte(0x24); - emit_byte(disp & 0xFF); - } else { - // [rsp + imm32] - // [10 reg 100][00 100 100] disp32 - emit_byte(0x84 | regenc << 3); - emit_byte(0x24); - emit_data(disp, rspec, disp32_operand); - } - } else { - // [base + disp] - assert(base != rsp && base != r12, "illegal addressing mode"); - int baseenc = base->encoding(); - if (baseenc >= 8) { - baseenc -= 8; - } - if (disp == 0 && rtype == relocInfo::none && - base != rbp && base != r13) { - // [base] - // [00 reg base] - emit_byte(0x00 | regenc << 3 | baseenc); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [base + disp8] - // [01 reg base] disp8 - emit_byte(0x40 | regenc << 3 | baseenc); - emit_byte(disp & 0xFF); - } else { - // [base + disp32] - // [10 reg base] disp32 - emit_byte(0x80 | regenc << 3 | baseenc); - emit_data(disp, rspec, disp32_operand); - } - } - } else { - if (index->is_valid()) { - assert(scale != Address::no_scale, "inconsistent address"); - int indexenc = index->encoding(); - if (indexenc >= 8) { - indexenc -= 8; - } - // [index*scale + disp] - // [00 reg 100][ss index 101] disp32 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x04 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | 0x05); - emit_data(disp, rspec, disp32_operand); -#ifdef _LP64 - } else if (rtype != relocInfo::none ) { - // [disp] RIP-RELATIVE - // [00 000 101] disp32 - - emit_byte(0x05 | regenc << 3); - // Note that the RIP-rel. correction applies to the generated - // disp field, but _not_ to the target address in the rspec. - - // disp was created by converting the target address minus the pc - // at the start of the instruction. That needs more correction here. - // intptr_t disp = target - next_ip; - assert(inst_mark() != NULL, "must be inside InstructionMark"); - address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; - int64_t adjusted = (int64_t) disp - (next_ip - inst_mark()); - assert(is_simm32(adjusted), - "must be 32bit offset (RIP relative address)"); - emit_data((int) adjusted, rspec, disp32_operand); - -#endif // _LP64 - } else { - // [disp] ABSOLUTE - // [00 reg 100][00 100 101] disp32 - emit_byte(0x04 | regenc << 3); - emit_byte(0x25); - emit_data(disp, rspec, disp32_operand); - } - } -} - -void Assembler::emit_operand(XMMRegister reg, Register base, Register index, - Address::ScaleFactor scale, int disp, - RelocationHolder const& rspec, - int rip_relative_correction) { - relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); - int regenc = reg->encoding(); - if (regenc >= 8) { - regenc -= 8; - } - if (base->is_valid()) { - if (index->is_valid()) { - assert(scale != Address::no_scale, "inconsistent address"); - int indexenc = index->encoding(); - if (indexenc >= 8) { - indexenc -= 8; - } - int baseenc = base->encoding(); - if (baseenc >= 8) { - baseenc -= 8; - } - // [base + index*scale + disp] - if (disp == 0 && rtype == relocInfo::none && - base != rbp && base != r13) { - // [base + index*scale] - // [00 reg 100][ss index base] - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x04 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | baseenc); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [base + index*scale + disp8] - // [01 reg 100][ss index base] disp8 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x44 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | baseenc); - emit_byte(disp & 0xFF); - } else { - // [base + index*scale + disp32] - // [10 reg 100][ss index base] disp32 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x84 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | baseenc); - emit_data(disp, rspec, disp32_operand); - } - } else if (base == rsp || base == r12) { - // [rsp + disp] - if (disp == 0 && rtype == relocInfo::none) { - // [rsp] - // [00 reg 100][00 100 100] - emit_byte(0x04 | regenc << 3); - emit_byte(0x24); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [rsp + imm8] - // [01 reg 100][00 100 100] disp8 - emit_byte(0x44 | regenc << 3); - emit_byte(0x24); - emit_byte(disp & 0xFF); - } else { - // [rsp + imm32] - // [10 reg 100][00 100 100] disp32 - emit_byte(0x84 | regenc << 3); - emit_byte(0x24); - emit_data(disp, rspec, disp32_operand); - } - } else { - // [base + disp] - assert(base != rsp && base != r12, "illegal addressing mode"); - int baseenc = base->encoding(); - if (baseenc >= 8) { - baseenc -= 8; - } - if (disp == 0 && rtype == relocInfo::none && - base != rbp && base != r13) { - // [base] - // [00 reg base] - emit_byte(0x00 | regenc << 3 | baseenc); - } else if (is8bit(disp) && rtype == relocInfo::none) { - // [base + imm8] - // [01 reg base] disp8 - emit_byte(0x40 | regenc << 3 | baseenc); - emit_byte(disp & 0xFF); - } else { - // [base + imm32] - // [10 reg base] disp32 - emit_byte(0x80 | regenc << 3 | baseenc); - emit_data(disp, rspec, disp32_operand); - } - } - } else { - if (index->is_valid()) { - assert(scale != Address::no_scale, "inconsistent address"); - int indexenc = index->encoding(); - if (indexenc >= 8) { - indexenc -= 8; - } - // [index*scale + disp] - // [00 reg 100][ss index 101] disp32 - assert(index != rsp, "illegal addressing mode"); - emit_byte(0x04 | regenc << 3); - emit_byte(scale << 6 | indexenc << 3 | 0x05); - emit_data(disp, rspec, disp32_operand); -#ifdef _LP64 - } else if ( rtype != relocInfo::none ) { - // [disp] RIP-RELATIVE - // [00 reg 101] disp32 - emit_byte(0x05 | regenc << 3); - // Note that the RIP-rel. correction applies to the generated - // disp field, but _not_ to the target address in the rspec. - - // disp was created by converting the target address minus the pc - // at the start of the instruction. That needs more correction here. - // intptr_t disp = target - next_ip; - - assert(inst_mark() != NULL, "must be inside InstructionMark"); - address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; - - int64_t adjusted = (int64_t) disp - (next_ip - inst_mark()); - assert(is_simm32(adjusted), - "must be 32bit offset (RIP relative address)"); - emit_data((int) adjusted, rspec, disp32_operand); -#endif // _LP64 - } else { - // [disp] ABSOLUTE - // [00 reg 100][00 100 101] disp32 - emit_byte(0x04 | regenc << 3); - emit_byte(0x25); - emit_data(disp, rspec, disp32_operand); - } - } -} - -// Secret local extension to Assembler::WhichOperand: -#define end_pc_operand (_WhichOperand_limit) - -address Assembler::locate_operand(address inst, WhichOperand which) { - // Decode the given instruction, and return the address of - // an embedded 32-bit operand word. - - // If "which" is disp32_operand, selects the displacement portion - // of an effective address specifier. - // If "which" is imm64_operand, selects the trailing immediate constant. - // If "which" is call32_operand, selects the displacement of a call or jump. - // Caller is responsible for ensuring that there is such an operand, - // and that it is 32/64 bits wide. - - // If "which" is end_pc_operand, find the end of the instruction. - - address ip = inst; - bool is_64bit = false; - - debug_only(bool has_disp32 = false); - int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn - - again_after_prefix: - switch (0xFF & *ip++) { - - // These convenience macros generate groups of "case" labels for the switch. -#define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 -#define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ - case (x)+4: case (x)+5: case (x)+6: case (x)+7 -#define REP16(x) REP8((x)+0): \ - case REP8((x)+8) - - case CS_segment: - case SS_segment: - case DS_segment: - case ES_segment: - case FS_segment: - case GS_segment: - assert(0, "shouldn't have that prefix"); - assert(ip == inst + 1 || ip == inst + 2, "only two prefixes allowed"); - goto again_after_prefix; - - case 0x67: - case REX: - case REX_B: - case REX_X: - case REX_XB: - case REX_R: - case REX_RB: - case REX_RX: - case REX_RXB: -// assert(ip == inst + 1, "only one prefix allowed"); - goto again_after_prefix; - - case REX_W: - case REX_WB: - case REX_WX: - case REX_WXB: - case REX_WR: - case REX_WRB: - case REX_WRX: - case REX_WRXB: - is_64bit = true; -// assert(ip == inst + 1, "only one prefix allowed"); - goto again_after_prefix; - - case 0xFF: // pushq a; decl a; incl a; call a; jmp a - case 0x88: // movb a, r - case 0x89: // movl a, r - case 0x8A: // movb r, a - case 0x8B: // movl r, a - case 0x8F: // popl a - debug_only(has_disp32 = true); - break; - - case 0x68: // pushq #32 - if (which == end_pc_operand) { - return ip + 4; - } - assert(0, "pushq has no disp32 or imm64"); - ShouldNotReachHere(); - - case 0x66: // movw ... (size prefix) - again_after_size_prefix2: - switch (0xFF & *ip++) { - case REX: - case REX_B: - case REX_X: - case REX_XB: - case REX_R: - case REX_RB: - case REX_RX: - case REX_RXB: - case REX_W: - case REX_WB: - case REX_WX: - case REX_WXB: - case REX_WR: - case REX_WRB: - case REX_WRX: - case REX_WRXB: - goto again_after_size_prefix2; - case 0x8B: // movw r, a - case 0x89: // movw a, r - break; - case 0xC7: // movw a, #16 - tail_size = 2; // the imm16 - break; - case 0x0F: // several SSE/SSE2 variants - ip--; // reparse the 0x0F - goto again_after_prefix; - default: - ShouldNotReachHere(); - } - break; - - case REP8(0xB8): // movl/q r, #32/#64(oop?) - if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); - assert((which == call32_operand || which == imm64_operand) && is_64bit, ""); - return ip; - - case 0x69: // imul r, a, #32 - case 0xC7: // movl a, #32(oop?) - tail_size = 4; - debug_only(has_disp32 = true); // has both kinds of operands! - break; - - case 0x0F: // movx..., etc. - switch (0xFF & *ip++) { - case 0x12: // movlps - case 0x28: // movaps - case 0x2E: // ucomiss - case 0x2F: // comiss - case 0x54: // andps - case 0x57: // xorps - case 0x6E: // movd - case 0x7E: // movd - case 0xAE: // ldmxcsr a - debug_only(has_disp32 = true); // has both kinds of operands! - break; - case 0xAD: // shrd r, a, %cl - case 0xAF: // imul r, a - case 0xBE: // movsbl r, a - case 0xBF: // movswl r, a - case 0xB6: // movzbl r, a - case 0xB7: // movzwl r, a - case REP16(0x40): // cmovl cc, r, a - case 0xB0: // cmpxchgb - case 0xB1: // cmpxchg - case 0xC1: // xaddl - case 0xC7: // cmpxchg8 - case REP16(0x90): // setcc a - debug_only(has_disp32 = true); - // fall out of the switch to decode the address - break; - case 0xAC: // shrd r, a, #8 - debug_only(has_disp32 = true); - tail_size = 1; // the imm8 - break; - case REP16(0x80): // jcc rdisp32 - if (which == end_pc_operand) return ip + 4; - assert(which == call32_operand, "jcc has no disp32 or imm64"); - return ip; - default: - ShouldNotReachHere(); - } - break; - - case 0x81: // addl a, #32; addl r, #32 - // also: orl, adcl, sbbl, andl, subl, xorl, cmpl - tail_size = 4; - debug_only(has_disp32 = true); // has both kinds of operands! - break; - - case 0x83: // addl a, #8; addl r, #8 - // also: orl, adcl, sbbl, andl, subl, xorl, cmpl - debug_only(has_disp32 = true); // has both kinds of operands! - tail_size = 1; - break; - - case 0x9B: - switch (0xFF & *ip++) { - case 0xD9: // fnstcw a - debug_only(has_disp32 = true); - break; - default: - ShouldNotReachHere(); - } - break; - - case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a - case REP4(0x10): // adc... - case REP4(0x20): // and... - case REP4(0x30): // xor... - case REP4(0x08): // or... - case REP4(0x18): // sbb... - case REP4(0x28): // sub... - case 0xF7: // mull a - case 0x87: // xchg r, a - debug_only(has_disp32 = true); - break; - case REP4(0x38): // cmp... - case 0x8D: // lea r, a - case 0x85: // test r, a - debug_only(has_disp32 = true); // has both kinds of operands! - break; - - case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 - case 0xC6: // movb a, #8 - case 0x80: // cmpb a, #8 - case 0x6B: // imul r, a, #8 - debug_only(has_disp32 = true); // has both kinds of operands! - tail_size = 1; // the imm8 - break; - - case 0xE8: // call rdisp32 - case 0xE9: // jmp rdisp32 - if (which == end_pc_operand) return ip + 4; - assert(which == call32_operand, "call has no disp32 or imm32"); - return ip; - - case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 - case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl - case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a - case 0xDD: // fld_d a; fst_d a; fstp_d a - case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a - case 0xDF: // fild_d a; fistp_d a - case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a - case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a - case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a - debug_only(has_disp32 = true); - break; - - case 0xF3: // For SSE - case 0xF2: // For SSE2 - switch (0xFF & *ip++) { - case REX: - case REX_B: - case REX_X: - case REX_XB: - case REX_R: - case REX_RB: - case REX_RX: - case REX_RXB: - case REX_W: - case REX_WB: - case REX_WX: - case REX_WXB: - case REX_WR: - case REX_WRB: - case REX_WRX: - case REX_WRXB: - ip++; - default: - ip++; - } - debug_only(has_disp32 = true); // has both kinds of operands! - break; - - default: - ShouldNotReachHere(); - -#undef REP8 -#undef REP16 - } - - assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); - assert(which != imm64_operand, "instruction is not a movq reg, imm64"); - assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); - - // parse the output of emit_operand - int op2 = 0xFF & *ip++; - int base = op2 & 0x07; - int op3 = -1; - const int b100 = 4; - const int b101 = 5; - if (base == b100 && (op2 >> 6) != 3) { - op3 = 0xFF & *ip++; - base = op3 & 0x07; // refetch the base - } - // now ip points at the disp (if any) - - switch (op2 >> 6) { - case 0: - // [00 reg 100][ss index base] - // [00 reg 100][00 100 esp] - // [00 reg base] - // [00 reg 100][ss index 101][disp32] - // [00 reg 101] [disp32] - - if (base == b101) { - if (which == disp32_operand) - return ip; // caller wants the disp32 - ip += 4; // skip the disp32 - } - break; - - case 1: - // [01 reg 100][ss index base][disp8] - // [01 reg 100][00 100 esp][disp8] - // [01 reg base] [disp8] - ip += 1; // skip the disp8 - break; - - case 2: - // [10 reg 100][ss index base][disp32] - // [10 reg 100][00 100 esp][disp32] - // [10 reg base] [disp32] - if (which == disp32_operand) - return ip; // caller wants the disp32 - ip += 4; // skip the disp32 - break; - - case 3: - // [11 reg base] (not a memory addressing mode) - break; - } - - if (which == end_pc_operand) { - return ip + tail_size; - } - - assert(0, "fix locate_operand"); - return ip; -} - -address Assembler::locate_next_instruction(address inst) { - // Secretly share code with locate_operand: - return locate_operand(inst, end_pc_operand); -} - -#ifdef ASSERT -void Assembler::check_relocation(RelocationHolder const& rspec, int format) { - address inst = inst_mark(); - assert(inst != NULL && inst < pc(), - "must point to beginning of instruction"); - address opnd; - - Relocation* r = rspec.reloc(); - if (r->type() == relocInfo::none) { - return; - } else if (r->is_call() || format == call32_operand) { - opnd = locate_operand(inst, call32_operand); - } else if (r->is_data()) { - assert(format == imm64_operand || format == disp32_operand, "format ok"); - opnd = locate_operand(inst, (WhichOperand) format); - } else { - assert(format == 0, "cannot specify a format"); - return; - } - assert(opnd == pc(), "must put operand where relocs can find it"); -} -#endif - -int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { - if (reg_enc >= 8) { - prefix(REX_B); - reg_enc -= 8; - } else if (byteinst && reg_enc >= 4) { - prefix(REX); - } - return reg_enc; -} - -int Assembler::prefixq_and_encode(int reg_enc) { - if (reg_enc < 8) { - prefix(REX_W); - } else { - prefix(REX_WB); - reg_enc -= 8; - } - return reg_enc; -} - -int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { - if (dst_enc < 8) { - if (src_enc >= 8) { - prefix(REX_B); - src_enc -= 8; - } else if (byteinst && src_enc >= 4) { - prefix(REX); - } - } else { - if (src_enc < 8) { - prefix(REX_R); - } else { - prefix(REX_RB); - src_enc -= 8; - } - dst_enc -= 8; - } - return dst_enc << 3 | src_enc; -} - -int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { - if (dst_enc < 8) { - if (src_enc < 8) { - prefix(REX_W); - } else { - prefix(REX_WB); - src_enc -= 8; - } - } else { - if (src_enc < 8) { - prefix(REX_WR); - } else { - prefix(REX_WRB); - src_enc -= 8; - } - dst_enc -= 8; - } - return dst_enc << 3 | src_enc; -} - -void Assembler::prefix(Register reg) { - if (reg->encoding() >= 8) { - prefix(REX_B); - } -} - -void Assembler::prefix(Address adr) { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_XB); - } else { - prefix(REX_B); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_X); - } - } -} - -void Assembler::prefixq(Address adr) { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_WXB); - } else { - prefix(REX_WB); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_WX); - } else { - prefix(REX_W); - } - } -} - - -void Assembler::prefix(Address adr, Register reg, bool byteinst) { - if (reg->encoding() < 8) { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_XB); - } else { - prefix(REX_B); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_X); - } else if (reg->encoding() >= 4 ) { - prefix(REX); - } - } - } else { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_RXB); - } else { - prefix(REX_RB); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_RX); - } else { - prefix(REX_R); - } - } - } -} - -void Assembler::prefixq(Address adr, Register src) { - if (src->encoding() < 8) { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_WXB); - } else { - prefix(REX_WB); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_WX); - } else { - prefix(REX_W); - } - } - } else { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_WRXB); - } else { - prefix(REX_WRB); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_WRX); - } else { - prefix(REX_WR); - } - } - } -} - -void Assembler::prefix(Address adr, XMMRegister reg) { - if (reg->encoding() < 8) { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_XB); - } else { - prefix(REX_B); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_X); - } - } - } else { - if (adr.base_needs_rex()) { - if (adr.index_needs_rex()) { - prefix(REX_RXB); - } else { - prefix(REX_RB); - } - } else { - if (adr.index_needs_rex()) { - prefix(REX_RX); - } else { - prefix(REX_R); - } - } - } -} - -void Assembler::emit_operand(Register reg, Address adr, - int rip_relative_correction) { - emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, - adr._rspec, - rip_relative_correction); -} - -void Assembler::emit_operand(XMMRegister reg, Address adr, - int rip_relative_correction) { - emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, - adr._rspec, - rip_relative_correction); -} - -void Assembler::emit_farith(int b1, int b2, int i) { - assert(isByte(b1) && isByte(b2), "wrong opcode"); - assert(0 <= i && i < 8, "illegal stack offset"); - emit_byte(b1); - emit_byte(b2 + i); -} - -// pushad is invalid, use this instead. -// NOTE: Kills flags!! -void Assembler::pushaq() { - // we have to store original rsp. ABI says that 128 bytes - // below rsp are local scratch. - movq(Address(rsp, -5 * wordSize), rsp); - - subq(rsp, 16 * wordSize); - - movq(Address(rsp, 15 * wordSize), rax); - movq(Address(rsp, 14 * wordSize), rcx); - movq(Address(rsp, 13 * wordSize), rdx); - movq(Address(rsp, 12 * wordSize), rbx); - // skip rsp - movq(Address(rsp, 10 * wordSize), rbp); - movq(Address(rsp, 9 * wordSize), rsi); - movq(Address(rsp, 8 * wordSize), rdi); - movq(Address(rsp, 7 * wordSize), r8); - movq(Address(rsp, 6 * wordSize), r9); - movq(Address(rsp, 5 * wordSize), r10); - movq(Address(rsp, 4 * wordSize), r11); - movq(Address(rsp, 3 * wordSize), r12); - movq(Address(rsp, 2 * wordSize), r13); - movq(Address(rsp, wordSize), r14); - movq(Address(rsp, 0), r15); -} - -// popad is invalid, use this instead -// NOTE: Kills flags!! -void Assembler::popaq() { - movq(r15, Address(rsp, 0)); - movq(r14, Address(rsp, wordSize)); - movq(r13, Address(rsp, 2 * wordSize)); - movq(r12, Address(rsp, 3 * wordSize)); - movq(r11, Address(rsp, 4 * wordSize)); - movq(r10, Address(rsp, 5 * wordSize)); - movq(r9, Address(rsp, 6 * wordSize)); - movq(r8, Address(rsp, 7 * wordSize)); - movq(rdi, Address(rsp, 8 * wordSize)); - movq(rsi, Address(rsp, 9 * wordSize)); - movq(rbp, Address(rsp, 10 * wordSize)); - // skip rsp - movq(rbx, Address(rsp, 12 * wordSize)); - movq(rdx, Address(rsp, 13 * wordSize)); - movq(rcx, Address(rsp, 14 * wordSize)); - movq(rax, Address(rsp, 15 * wordSize)); - - addq(rsp, 16 * wordSize); -} - -void Assembler::pushfq() { - emit_byte(0x9C); -} - -void Assembler::popfq() { - emit_byte(0x9D); -} - -void Assembler::pushq(int imm32) { - emit_byte(0x68); - emit_long(imm32); -} - -void Assembler::pushq(Register src) { - int encode = prefix_and_encode(src->encoding()); - - emit_byte(0x50 | encode); -} - -void Assembler::pushq(Address src) { - InstructionMark im(this); - prefix(src); - emit_byte(0xFF); - emit_operand(rsi, src); -} - -void Assembler::popq(Register dst) { - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0x58 | encode); -} - -void Assembler::popq(Address dst) { - InstructionMark im(this); - prefix(dst); - emit_byte(0x8F); - emit_operand(rax, dst); -} - -void Assembler::prefix(Prefix p) { - a_byte(p); -} - -void Assembler::movb(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst, true); - emit_byte(0x8A); - emit_operand(dst, src); -} - -void Assembler::movb(Address dst, int imm8) { - InstructionMark im(this); - prefix(dst); - emit_byte(0xC6); - emit_operand(rax, dst, 1); - emit_byte(imm8); -} - -void Assembler::movb(Address dst, Register src) { - InstructionMark im(this); - prefix(dst, src, true); - emit_byte(0x88); - emit_operand(src, dst); -} - -void Assembler::movw(Address dst, int imm16) { - InstructionMark im(this); - emit_byte(0x66); // switch to 16-bit mode - prefix(dst); - emit_byte(0xC7); - emit_operand(rax, dst, 2); - emit_word(imm16); -} - -void Assembler::movw(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(src, dst); - emit_byte(0x8B); - emit_operand(dst, src); -} - -void Assembler::movw(Address dst, Register src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(dst, src); - emit_byte(0x89); - emit_operand(src, dst); -} - -// Uses zero extension. -void Assembler::movl(Register dst, int imm32) { - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xB8 | encode); - emit_long(imm32); -} - -void Assembler::movl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x8B); - emit_byte(0xC0 | encode); -} - -void Assembler::movl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x8B); - emit_operand(dst, src); -} - -void Assembler::movl(Address dst, int imm32) { - InstructionMark im(this); - prefix(dst); - emit_byte(0xC7); - emit_operand(rax, dst, 4); - emit_long(imm32); -} - -void Assembler::movl(Address dst, Register src) { - InstructionMark im(this); - prefix(dst, src); - emit_byte(0x89); - emit_operand(src, dst); -} - -void Assembler::mov64(Register dst, int64_t imm64) { - InstructionMark im(this); - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xB8 | encode); - emit_long64(imm64); -} - -void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { - InstructionMark im(this); - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xB8 | encode); - emit_data64(imm64, rspec); -} - -void Assembler::movq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x8B); - emit_byte(0xC0 | encode); -} - -void Assembler::movq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x8B); - emit_operand(dst, src); -} - -void Assembler::mov64(Address dst, int64_t imm32) { - assert(is_simm32(imm32), "lost bits"); - InstructionMark im(this); - prefixq(dst); - emit_byte(0xC7); - emit_operand(rax, dst, 4); - emit_long(imm32); -} - -void Assembler::movq(Address dst, Register src) { - InstructionMark im(this); - prefixq(dst, src); - emit_byte(0x89); - emit_operand(src, dst); -} - -void Assembler::movsbl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0xBE); - emit_operand(dst, src); -} - -void Assembler::movsbl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); - emit_byte(0x0F); - emit_byte(0xBE); - emit_byte(0xC0 | encode); -} - -void Assembler::movswl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0xBF); - emit_operand(dst, src); -} - -void Assembler::movswl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0xBF); - emit_byte(0xC0 | encode); -} - -void Assembler::movslq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x63); - emit_operand(dst, src); -} - -void Assembler::movslq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x63); - emit_byte(0xC0 | encode); -} - -void Assembler::movzbl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0xB6); - emit_operand(dst, src); -} - -void Assembler::movzbl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); - emit_byte(0x0F); - emit_byte(0xB6); - emit_byte(0xC0 | encode); -} - -void Assembler::movzwl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0xB7); - emit_operand(dst, src); -} - -void Assembler::movzwl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0xB7); - emit_byte(0xC0 | encode); -} - -void Assembler::movss(XMMRegister dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x10); - emit_byte(0xC0 | encode); -} - -void Assembler::movss(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF3); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x10); - emit_operand(dst, src); -} - -void Assembler::movss(Address dst, XMMRegister src) { - InstructionMark im(this); - emit_byte(0xF3); - prefix(dst, src); - emit_byte(0x0F); - emit_byte(0x11); - emit_operand(src, dst); -} - -void Assembler::movsd(XMMRegister dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x10); - emit_byte(0xC0 | encode); -} - -void Assembler::movsd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF2); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x10); - emit_operand(dst, src); -} - -void Assembler::movsd(Address dst, XMMRegister src) { - InstructionMark im(this); - emit_byte(0xF2); - prefix(dst, src); - emit_byte(0x0F); - emit_byte(0x11); - emit_operand(src, dst); -} - -// New cpus require to use movsd and movss to avoid partial register stall -// when loading from memory. But for old Opteron use movlpd instead of movsd. -// The selection is done in MacroAssembler::movdbl() and movflt(). -void Assembler::movlpd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x12); - emit_operand(dst, src); -} - -void Assembler::movapd(XMMRegister dst, XMMRegister src) { - int dstenc = dst->encoding(); - int srcenc = src->encoding(); - emit_byte(0x66); - if (dstenc < 8) { - if (srcenc >= 8) { - prefix(REX_B); - srcenc -= 8; - } - } else { - if (srcenc < 8) { - prefix(REX_R); - } else { - prefix(REX_RB); - srcenc -= 8; - } - dstenc -= 8; - } - emit_byte(0x0F); - emit_byte(0x28); - emit_byte(0xC0 | dstenc << 3 | srcenc); -} - -void Assembler::movaps(XMMRegister dst, XMMRegister src) { - int dstenc = dst->encoding(); - int srcenc = src->encoding(); - if (dstenc < 8) { - if (srcenc >= 8) { - prefix(REX_B); - srcenc -= 8; - } - } else { - if (srcenc < 8) { - prefix(REX_R); - } else { - prefix(REX_RB); - srcenc -= 8; - } - dstenc -= 8; - } - emit_byte(0x0F); - emit_byte(0x28); - emit_byte(0xC0 | dstenc << 3 | srcenc); -} - -void Assembler::movdl(XMMRegister dst, Register src) { - emit_byte(0x66); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x6E); - emit_byte(0xC0 | encode); -} - -void Assembler::movdl(Register dst, XMMRegister src) { - emit_byte(0x66); - // swap src/dst to get correct prefix - int encode = prefix_and_encode(src->encoding(), dst->encoding()); - emit_byte(0x0F); - emit_byte(0x7E); - emit_byte(0xC0 | encode); -} - -void Assembler::movdq(XMMRegister dst, Register src) { - emit_byte(0x66); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x6E); - emit_byte(0xC0 | encode); -} - -void Assembler::movdq(Register dst, XMMRegister src) { - emit_byte(0x66); - // swap src/dst to get correct prefix - int encode = prefixq_and_encode(src->encoding(), dst->encoding()); - emit_byte(0x0F); - emit_byte(0x7E); - emit_byte(0xC0 | encode); -} - -void Assembler::pxor(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0xEF); - emit_operand(dst, src); -} - -void Assembler::pxor(XMMRegister dst, XMMRegister src) { - InstructionMark im(this); - emit_byte(0x66); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0xEF); - emit_byte(0xC0 | encode); -} - -void Assembler::movdqa(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x6F); - emit_operand(dst, src); -} - -void Assembler::movdqa(XMMRegister dst, XMMRegister src) { - emit_byte(0x66); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x6F); - emit_byte(0xC0 | encode); -} - -void Assembler::movdqa(Address dst, XMMRegister src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(dst, src); - emit_byte(0x0F); - emit_byte(0x7F); - emit_operand(src, dst); -} - -void Assembler::movq(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF3); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x7E); - emit_operand(dst, src); -} - -void Assembler::movq(Address dst, XMMRegister src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(dst, src); - emit_byte(0x0F); - emit_byte(0xD6); - emit_operand(src, dst); -} - -void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { - assert(isByte(mode), "invalid value"); - emit_byte(0x66); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x70); - emit_byte(0xC0 | encode); - emit_byte(mode & 0xFF); -} - -void Assembler::pshufd(XMMRegister dst, Address src, int mode) { - assert(isByte(mode), "invalid value"); - InstructionMark im(this); - emit_byte(0x66); - emit_byte(0x0F); - emit_byte(0x70); - emit_operand(dst, src); - emit_byte(mode & 0xFF); -} - -void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { - assert(isByte(mode), "invalid value"); - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x70); - emit_byte(0xC0 | encode); - emit_byte(mode & 0xFF); -} - -void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { - assert(isByte(mode), "invalid value"); - InstructionMark im(this); - emit_byte(0xF2); - emit_byte(0x0F); - emit_byte(0x70); - emit_operand(dst, src); - emit_byte(mode & 0xFF); -} - -void Assembler::cmovl(Condition cc, Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x40 | cc); - emit_byte(0xC0 | encode); -} - -void Assembler::cmovl(Condition cc, Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x40 | cc); - emit_operand(dst, src); -} - -void Assembler::cmovq(Condition cc, Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x40 | cc); - emit_byte(0xC0 | encode); -} - -void Assembler::cmovq(Condition cc, Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x0F); - emit_byte(0x40 | cc); - emit_operand(dst, src); -} - -void Assembler::prefetch_prefix(Address src) { - prefix(src); - emit_byte(0x0F); -} - -void Assembler::prefetcht0(Address src) { - InstructionMark im(this); - prefetch_prefix(src); - emit_byte(0x18); - emit_operand(rcx, src); // 1, src -} - -void Assembler::prefetcht1(Address src) { - InstructionMark im(this); - prefetch_prefix(src); - emit_byte(0x18); - emit_operand(rdx, src); // 2, src -} - -void Assembler::prefetcht2(Address src) { - InstructionMark im(this); - prefetch_prefix(src); - emit_byte(0x18); - emit_operand(rbx, src); // 3, src -} - -void Assembler::prefetchnta(Address src) { - InstructionMark im(this); - prefetch_prefix(src); - emit_byte(0x18); - emit_operand(rax, src); // 0, src -} - -void Assembler::prefetchw(Address src) { - InstructionMark im(this); - prefetch_prefix(src); - emit_byte(0x0D); - emit_operand(rcx, src); // 1, src -} - -void Assembler::adcl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xD0, dst, imm32); -} - -void Assembler::adcl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x13); - emit_operand(dst, src); -} - -void Assembler::adcl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x13, 0xC0, dst, src); -} - -void Assembler::adcq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xD0, dst, imm32); -} - -void Assembler::adcq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x13); - emit_operand(dst, src); -} - -void Assembler::adcq(Register dst, Register src) { - (int) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x13, 0xC0, dst, src); -} - -void Assembler::addl(Address dst, int imm32) { - InstructionMark im(this); - prefix(dst); - emit_arith_operand(0x81, rax, dst,imm32); -} - -void Assembler::addl(Address dst, Register src) { - InstructionMark im(this); - prefix(dst, src); - emit_byte(0x01); - emit_operand(src, dst); -} - -void Assembler::addl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xC0, dst, imm32); -} - -void Assembler::addl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x03); - emit_operand(dst, src); -} - -void Assembler::addl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x03, 0xC0, dst, src); -} - -void Assembler::addq(Address dst, int imm32) { - InstructionMark im(this); - prefixq(dst); - emit_arith_operand(0x81, rax, dst,imm32); -} - -void Assembler::addq(Address dst, Register src) { - InstructionMark im(this); - prefixq(dst, src); - emit_byte(0x01); - emit_operand(src, dst); -} - -void Assembler::addq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xC0, dst, imm32); -} - -void Assembler::addq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x03); - emit_operand(dst, src); -} - -void Assembler::addq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x03, 0xC0, dst, src); -} - -void Assembler::andl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xE0, dst, imm32); -} - -void Assembler::andl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x23); - emit_operand(dst, src); -} - -void Assembler::andl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x23, 0xC0, dst, src); -} - -void Assembler::andq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xE0, dst, imm32); -} - -void Assembler::andq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x23); - emit_operand(dst, src); -} - -void Assembler::andq(Register dst, Register src) { - (int) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x23, 0xC0, dst, src); -} - -void Assembler::cmpb(Address dst, int imm8) { - InstructionMark im(this); - prefix(dst); - emit_byte(0x80); - emit_operand(rdi, dst, 1); - emit_byte(imm8); -} - -void Assembler::cmpl(Address dst, int imm32) { - InstructionMark im(this); - prefix(dst); - emit_byte(0x81); - emit_operand(rdi, dst, 4); - emit_long(imm32); -} - -void Assembler::cmpl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xF8, dst, imm32); -} - -void Assembler::cmpl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x3B, 0xC0, dst, src); -} - -void Assembler::cmpl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x3B); - emit_operand(dst, src); -} - -void Assembler::cmpq(Address dst, int imm32) { - InstructionMark im(this); - prefixq(dst); - emit_byte(0x81); - emit_operand(rdi, dst, 4); - emit_long(imm32); -} - -void Assembler::cmpq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xF8, dst, imm32); -} - -void Assembler::cmpq(Address dst, Register src) { - prefixq(dst, src); - emit_byte(0x3B); - emit_operand(src, dst); -} - -void Assembler::cmpq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x3B, 0xC0, dst, src); -} - -void Assembler::cmpq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x3B); - emit_operand(dst, src); -} - -void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2E); - emit_byte(0xC0 | encode); -} - -void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { - emit_byte(0x66); - ucomiss(dst, src); -} - -void Assembler::decl(Register dst) { - // Don't use it directly. Use MacroAssembler::decrementl() instead. - // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xFF); - emit_byte(0xC8 | encode); -} - -void Assembler::decl(Address dst) { - // Don't use it directly. Use MacroAssembler::decrementl() instead. - InstructionMark im(this); - prefix(dst); - emit_byte(0xFF); - emit_operand(rcx, dst); -} - -void Assembler::decq(Register dst) { - // Don't use it directly. Use MacroAssembler::decrementq() instead. - // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xFF); - emit_byte(0xC8 | encode); -} - -void Assembler::decq(Address dst) { - // Don't use it directly. Use MacroAssembler::decrementq() instead. - InstructionMark im(this); - prefixq(dst); - emit_byte(0xFF); - emit_operand(rcx, dst); -} - -void Assembler::idivl(Register src) { - int encode = prefix_and_encode(src->encoding()); - emit_byte(0xF7); - emit_byte(0xF8 | encode); -} - -void Assembler::idivq(Register src) { - int encode = prefixq_and_encode(src->encoding()); - emit_byte(0xF7); - emit_byte(0xF8 | encode); -} - -void Assembler::cdql() { - emit_byte(0x99); -} - -void Assembler::cdqq() { - prefix(REX_W); - emit_byte(0x99); -} - -void Assembler::imull(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0xAF); - emit_byte(0xC0 | encode); -} - -void Assembler::imull(Register dst, Register src, int value) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - if (is8bit(value)) { - emit_byte(0x6B); - emit_byte(0xC0 | encode); - emit_byte(value); - } else { - emit_byte(0x69); - emit_byte(0xC0 | encode); - emit_long(value); - } -} - -void Assembler::imulq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0xAF); - emit_byte(0xC0 | encode); -} - -void Assembler::imulq(Register dst, Register src, int value) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - if (is8bit(value)) { - emit_byte(0x6B); - emit_byte(0xC0 | encode); - emit_byte(value); - } else { - emit_byte(0x69); - emit_byte(0xC0 | encode); - emit_long(value); - } -} - -void Assembler::incl(Register dst) { - // Don't use it directly. Use MacroAssembler::incrementl() instead. - // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xFF); - emit_byte(0xC0 | encode); -} - -void Assembler::incl(Address dst) { - // Don't use it directly. Use MacroAssembler::incrementl() instead. - InstructionMark im(this); - prefix(dst); - emit_byte(0xFF); - emit_operand(rax, dst); -} - -void Assembler::incq(Register dst) { - // Don't use it directly. Use MacroAssembler::incrementq() instead. - // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xFF); - emit_byte(0xC0 | encode); -} - -void Assembler::incq(Address dst) { - // Don't use it directly. Use MacroAssembler::incrementq() instead. - InstructionMark im(this); - prefixq(dst); - emit_byte(0xFF); - emit_operand(rax, dst); -} - -void Assembler::leal(Register dst, Address src) { - InstructionMark im(this); - emit_byte(0x67); // addr32 - prefix(src, dst); - emit_byte(0x8D); - emit_operand(dst, src); -} - -void Assembler::leaq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x8D); - emit_operand(dst, src); -} - -void Assembler::mull(Address src) { - InstructionMark im(this); - // was missing - prefix(src); - emit_byte(0xF7); - emit_operand(rsp, src); -} - -void Assembler::mull(Register src) { - // was missing - int encode = prefix_and_encode(src->encoding()); - emit_byte(0xF7); - emit_byte(0xE0 | encode); -} - -void Assembler::negl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xF7); - emit_byte(0xD8 | encode); -} - -void Assembler::negq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xF7); - emit_byte(0xD8 | encode); -} - -void Assembler::notl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xF7); - emit_byte(0xD0 | encode); -} - -void Assembler::notq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xF7); - emit_byte(0xD0 | encode); -} - -void Assembler::orl(Address dst, int imm32) { - InstructionMark im(this); - prefix(dst); - emit_byte(0x81); - emit_operand(rcx, dst, 4); - emit_long(imm32); -} - -void Assembler::orl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xC8, dst, imm32); -} - -void Assembler::orl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x0B); - emit_operand(dst, src); -} - -void Assembler::orl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x0B, 0xC0, dst, src); -} - -void Assembler::orq(Address dst, int imm32) { - InstructionMark im(this); - prefixq(dst); - emit_byte(0x81); - emit_operand(rcx, dst, 4); - emit_long(imm32); -} - -void Assembler::orq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xC8, dst, imm32); -} - -void Assembler::orq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x0B); - emit_operand(dst, src); -} - -void Assembler::orq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x0B, 0xC0, dst, src); -} - -void Assembler::rcll(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); - int encode = prefix_and_encode(dst->encoding()); - if (imm8 == 1) { - emit_byte(0xD1); - emit_byte(0xD0 | encode); - } else { - emit_byte(0xC1); - emit_byte(0xD0 | encode); - emit_byte(imm8); - } -} - -void Assembler::rclq(Register dst, int imm8) { - assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); - if (imm8 == 1) { - emit_byte(0xD1); - emit_byte(0xD0 | encode); - } else { - emit_byte(0xC1); - emit_byte(0xD0 | encode); - emit_byte(imm8); - } -} - -void Assembler::sarl(Register dst, int imm8) { - int encode = prefix_and_encode(dst->encoding()); - assert(isShiftCount(imm8), "illegal shift count"); - if (imm8 == 1) { - emit_byte(0xD1); - emit_byte(0xF8 | encode); - } else { - emit_byte(0xC1); - emit_byte(0xF8 | encode); - emit_byte(imm8); - } -} - -void Assembler::sarl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xD3); - emit_byte(0xF8 | encode); -} - -void Assembler::sarq(Register dst, int imm8) { - assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); - if (imm8 == 1) { - emit_byte(0xD1); - emit_byte(0xF8 | encode); - } else { - emit_byte(0xC1); - emit_byte(0xF8 | encode); - emit_byte(imm8); - } -} - -void Assembler::sarq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xD3); - emit_byte(0xF8 | encode); -} - -void Assembler::sbbl(Address dst, int imm32) { - InstructionMark im(this); - prefix(dst); - emit_arith_operand(0x81, rbx, dst, imm32); -} - -void Assembler::sbbl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xD8, dst, imm32); -} - -void Assembler::sbbl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x1B); - emit_operand(dst, src); -} - -void Assembler::sbbl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x1B, 0xC0, dst, src); -} - -void Assembler::sbbq(Address dst, int imm32) { - InstructionMark im(this); - prefixq(dst); - emit_arith_operand(0x81, rbx, dst, imm32); -} - -void Assembler::sbbq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xD8, dst, imm32); -} - -void Assembler::sbbq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x1B); - emit_operand(dst, src); -} - -void Assembler::sbbq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x1B, 0xC0, dst, src); -} - -void Assembler::shll(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); - int encode = prefix_and_encode(dst->encoding()); - if (imm8 == 1 ) { - emit_byte(0xD1); - emit_byte(0xE0 | encode); - } else { - emit_byte(0xC1); - emit_byte(0xE0 | encode); - emit_byte(imm8); - } -} - -void Assembler::shll(Register dst) { - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xD3); - emit_byte(0xE0 | encode); -} - -void Assembler::shlq(Register dst, int imm8) { - assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); - if (imm8 == 1) { - emit_byte(0xD1); - emit_byte(0xE0 | encode); - } else { - emit_byte(0xC1); - emit_byte(0xE0 | encode); - emit_byte(imm8); - } -} - -void Assembler::shlq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xD3); - emit_byte(0xE0 | encode); -} - -void Assembler::shrl(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xC1); - emit_byte(0xE8 | encode); - emit_byte(imm8); -} - -void Assembler::shrl(Register dst) { - int encode = prefix_and_encode(dst->encoding()); - emit_byte(0xD3); - emit_byte(0xE8 | encode); -} - -void Assembler::shrq(Register dst, int imm8) { - assert(isShiftCount(imm8 >> 1), "illegal shift count"); - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xC1); - emit_byte(0xE8 | encode); - emit_byte(imm8); -} - -void Assembler::shrq(Register dst) { - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xD3); - emit_byte(0xE8 | encode); -} - -void Assembler::subl(Address dst, int imm32) { - InstructionMark im(this); - prefix(dst); - if (is8bit(imm32)) { - emit_byte(0x83); - emit_operand(rbp, dst, 1); - emit_byte(imm32 & 0xFF); - } else { - emit_byte(0x81); - emit_operand(rbp, dst, 4); - emit_long(imm32); - } -} - -void Assembler::subl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xE8, dst, imm32); -} - -void Assembler::subl(Address dst, Register src) { - InstructionMark im(this); - prefix(dst, src); - emit_byte(0x29); - emit_operand(src, dst); -} - -void Assembler::subl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x2B); - emit_operand(dst, src); -} - -void Assembler::subl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x2B, 0xC0, dst, src); -} - -void Assembler::subq(Address dst, int imm32) { - InstructionMark im(this); - prefixq(dst); - if (is8bit(imm32)) { - emit_byte(0x83); - emit_operand(rbp, dst, 1); - emit_byte(imm32 & 0xFF); - } else { - emit_byte(0x81); - emit_operand(rbp, dst, 4); - emit_long(imm32); - } -} - -void Assembler::subq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xE8, dst, imm32); -} - -void Assembler::subq(Address dst, Register src) { - InstructionMark im(this); - prefixq(dst, src); - emit_byte(0x29); - emit_operand(src, dst); -} - -void Assembler::subq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x2B); - emit_operand(dst, src); -} - -void Assembler::subq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x2B, 0xC0, dst, src); -} - -void Assembler::testb(Register dst, int imm8) { - (void) prefix_and_encode(dst->encoding(), true); - emit_arith_b(0xF6, 0xC0, dst, imm8); -} - -void Assembler::testl(Register dst, int imm32) { - // not using emit_arith because test - // doesn't support sign-extension of - // 8bit operands - int encode = dst->encoding(); - if (encode == 0) { - emit_byte(0xA9); - } else { - encode = prefix_and_encode(encode); - emit_byte(0xF7); - emit_byte(0xC0 | encode); - } - emit_long(imm32); -} - -void Assembler::testl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x85, 0xC0, dst, src); -} - -void Assembler::testq(Register dst, int imm32) { - // not using emit_arith because test - // doesn't support sign-extension of - // 8bit operands - int encode = dst->encoding(); - if (encode == 0) { - prefix(REX_W); - emit_byte(0xA9); - } else { - encode = prefixq_and_encode(encode); - emit_byte(0xF7); - emit_byte(0xC0 | encode); - } - emit_long(imm32); -} - -void Assembler::testq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x85, 0xC0, dst, src); -} - -void Assembler::xaddl(Address dst, Register src) { - InstructionMark im(this); - prefix(dst, src); - emit_byte(0x0F); - emit_byte(0xC1); - emit_operand(src, dst); -} - -void Assembler::xaddq(Address dst, Register src) { - InstructionMark im(this); - prefixq(dst, src); - emit_byte(0x0F); - emit_byte(0xC1); - emit_operand(src, dst); -} - -void Assembler::xorl(Register dst, int imm32) { - prefix(dst); - emit_arith(0x81, 0xF0, dst, imm32); -} - -void Assembler::xorl(Register dst, Register src) { - (void) prefix_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x33, 0xC0, dst, src); -} - -void Assembler::xorl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x33); - emit_operand(dst, src); -} - -void Assembler::xorq(Register dst, int imm32) { - (void) prefixq_and_encode(dst->encoding()); - emit_arith(0x81, 0xF0, dst, imm32); -} - -void Assembler::xorq(Register dst, Register src) { - (void) prefixq_and_encode(dst->encoding(), src->encoding()); - emit_arith(0x33, 0xC0, dst, src); -} - -void Assembler::xorq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x33); - emit_operand(dst, src); -} - -void Assembler::bswapl(Register reg) { - int encode = prefix_and_encode(reg->encoding()); - emit_byte(0x0F); - emit_byte(0xC8 | encode); -} - -void Assembler::bswapq(Register reg) { - int encode = prefixq_and_encode(reg->encoding()); - emit_byte(0x0F); - emit_byte(0xC8 | encode); -} - -void Assembler::lock() { - emit_byte(0xF0); -} - -void Assembler::xchgl(Register dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x87); - emit_operand(dst, src); -} - -void Assembler::xchgl(Register dst, Register src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x87); - emit_byte(0xc0 | encode); -} - -void Assembler::xchgq(Register dst, Address src) { - InstructionMark im(this); - prefixq(src, dst); - emit_byte(0x87); - emit_operand(dst, src); -} - -void Assembler::xchgq(Register dst, Register src) { - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x87); - emit_byte(0xc0 | encode); -} - -void Assembler::cmpxchgl(Register reg, Address adr) { - InstructionMark im(this); - prefix(adr, reg); - emit_byte(0x0F); - emit_byte(0xB1); - emit_operand(reg, adr); -} - -void Assembler::cmpxchgq(Register reg, Address adr) { - InstructionMark im(this); - prefixq(adr, reg); - emit_byte(0x0F); - emit_byte(0xB1); - emit_operand(reg, adr); -} - -void Assembler::hlt() { - emit_byte(0xF4); -} - - -void Assembler::addr_nop_4() { - // 4 bytes: NOP DWORD PTR [EAX+0] - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); - emit_byte(0); // 8-bits offset (1 byte) -} - -void Assembler::addr_nop_5() { - // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); - emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); - emit_byte(0); // 8-bits offset (1 byte) -} - -void Assembler::addr_nop_7() { - // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); - emit_long(0); // 32-bits offset (4 bytes) -} - -void Assembler::addr_nop_8() { - // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset - emit_byte(0x0F); - emit_byte(0x1F); - emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4); - emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); - emit_long(0); // 32-bits offset (4 bytes) -} - -void Assembler::nop(int i) { - assert(i > 0, " "); - if (UseAddressNop && VM_Version::is_intel()) { - // - // Using multi-bytes nops "0x0F 0x1F [address]" for Intel - // 1: 0x90 - // 2: 0x66 0x90 - // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) - // 4: 0x0F 0x1F 0x40 0x00 - // 5: 0x0F 0x1F 0x44 0x00 0x00 - // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - - // The rest coding is Intel specific - don't use consecutive address nops - - // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 - - while(i >= 15) { - // For Intel don't generate consecutive addess nops (mix with regular nops) - i -= 15; - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - addr_nop_8(); - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x90); // nop - } - switch (i) { - case 14: - emit_byte(0x66); // size prefix - case 13: - emit_byte(0x66); // size prefix - case 12: - addr_nop_8(); - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x90); // nop - break; - case 11: - emit_byte(0x66); // size prefix - case 10: - emit_byte(0x66); // size prefix - case 9: - emit_byte(0x66); // size prefix - case 8: - addr_nop_8(); - break; - case 7: - addr_nop_7(); - break; - case 6: - emit_byte(0x66); // size prefix - case 5: - addr_nop_5(); - break; - case 4: - addr_nop_4(); - break; - case 3: - // Don't use "0x0F 0x1F 0x00" - need patching safe padding - emit_byte(0x66); // size prefix - case 2: - emit_byte(0x66); // size prefix - case 1: - emit_byte(0x90); // nop - break; - default: - assert(i == 0, " "); - } - return; - } - if (UseAddressNop && VM_Version::is_amd()) { - // - // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. - // 1: 0x90 - // 2: 0x66 0x90 - // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) - // 4: 0x0F 0x1F 0x40 0x00 - // 5: 0x0F 0x1F 0x44 0x00 0x00 - // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - - // The rest coding is AMD specific - use consecutive address nops - - // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 - // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 - // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 - // Size prefixes (0x66) are added for larger sizes - - while(i >= 22) { - i -= 11; - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - emit_byte(0x66); // size prefix - addr_nop_8(); - } - // Generate first nop for size between 21-12 - switch (i) { - case 21: - i -= 1; - emit_byte(0x66); // size prefix - case 20: - case 19: - i -= 1; - emit_byte(0x66); // size prefix - case 18: - case 17: - i -= 1; - emit_byte(0x66); // size prefix - case 16: - case 15: - i -= 8; - addr_nop_8(); - break; - case 14: - case 13: - i -= 7; - addr_nop_7(); - break; - case 12: - i -= 6; - emit_byte(0x66); // size prefix - addr_nop_5(); - break; - default: - assert(i < 12, " "); - } - - // Generate second nop for size between 11-1 - switch (i) { - case 11: - emit_byte(0x66); // size prefix - case 10: - emit_byte(0x66); // size prefix - case 9: - emit_byte(0x66); // size prefix - case 8: - addr_nop_8(); - break; - case 7: - addr_nop_7(); - break; - case 6: - emit_byte(0x66); // size prefix - case 5: - addr_nop_5(); - break; - case 4: - addr_nop_4(); - break; - case 3: - // Don't use "0x0F 0x1F 0x00" - need patching safe padding - emit_byte(0x66); // size prefix - case 2: - emit_byte(0x66); // size prefix - case 1: - emit_byte(0x90); // nop - break; - default: - assert(i == 0, " "); - } - return; - } - - // Using nops with size prefixes "0x66 0x90". - // From AMD Optimization Guide: - // 1: 0x90 - // 2: 0x66 0x90 - // 3: 0x66 0x66 0x90 - // 4: 0x66 0x66 0x66 0x90 - // 5: 0x66 0x66 0x90 0x66 0x90 - // 6: 0x66 0x66 0x90 0x66 0x66 0x90 - // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 - // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 - // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 - // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 - // - while(i > 12) { - i -= 4; - emit_byte(0x66); // size prefix - emit_byte(0x66); - emit_byte(0x66); - emit_byte(0x90); // nop - } - // 1 - 12 nops - if(i > 8) { - if(i > 9) { - i -= 1; - emit_byte(0x66); - } - i -= 3; - emit_byte(0x66); - emit_byte(0x66); - emit_byte(0x90); - } - // 1 - 8 nops - if(i > 4) { - if(i > 6) { - i -= 1; - emit_byte(0x66); - } - i -= 3; - emit_byte(0x66); - emit_byte(0x66); - emit_byte(0x90); - } - switch (i) { - case 4: - emit_byte(0x66); - case 3: - emit_byte(0x66); - case 2: - emit_byte(0x66); - case 1: - emit_byte(0x90); - break; - default: - assert(i == 0, " "); - } -} - -void Assembler::ret(int imm16) { - if (imm16 == 0) { - emit_byte(0xC3); - } else { - emit_byte(0xC2); - emit_word(imm16); - } -} - -// copies a single word from [esi] to [edi] -void Assembler::smovl() { - emit_byte(0xA5); -} - -// copies data from [rsi] to [rdi] using rcx words (m32) -void Assembler::rep_movl() { - // REP - emit_byte(0xF3); - // MOVSL - emit_byte(0xA5); -} - -// copies data from [rsi] to [rdi] using rcx double words (m64) -void Assembler::rep_movq() { - // REP - emit_byte(0xF3); - // MOVSQ - prefix(REX_W); - emit_byte(0xA5); -} - -// sets rcx double words (m64) with rax value at [rdi] -void Assembler::rep_set() { - // REP - emit_byte(0xF3); - // STOSQ - prefix(REX_W); - emit_byte(0xAB); -} - -// scans rcx double words (m64) at [rdi] for occurance of rax -void Assembler::repne_scan() { - // REPNE/REPNZ - emit_byte(0xF2); - // SCASQ - prefix(REX_W); - emit_byte(0xAF); -} - -void Assembler::setb(Condition cc, Register dst) { - assert(0 <= cc && cc < 16, "illegal cc"); - int encode = prefix_and_encode(dst->encoding(), true); - emit_byte(0x0F); - emit_byte(0x90 | cc); - emit_byte(0xC0 | encode); -} - -void Assembler::clflush(Address adr) { - prefix(adr); - emit_byte(0x0F); - emit_byte(0xAE); - emit_operand(rdi, adr); -} - -void Assembler::call(Label& L, relocInfo::relocType rtype) { - if (L.is_bound()) { - const int long_size = 5; - int offs = (int)( target(L) - pc() ); - assert(offs <= 0, "assembler error"); - InstructionMark im(this); - // 1110 1000 #32-bit disp - emit_byte(0xE8); - emit_data(offs - long_size, rtype, disp32_operand); - } else { - InstructionMark im(this); - // 1110 1000 #32-bit disp - L.add_patch_at(code(), locator()); - - emit_byte(0xE8); - emit_data(int(0), rtype, disp32_operand); - } -} - -void Assembler::call_literal(address entry, RelocationHolder const& rspec) { - assert(entry != NULL, "call most probably wrong"); - InstructionMark im(this); - emit_byte(0xE8); - intptr_t disp = entry - (_code_pos + sizeof(int32_t)); - assert(is_simm32(disp), "must be 32bit offset (call2)"); - // Technically, should use call32_operand, but this format is - // implied by the fact that we're emitting a call instruction. - emit_data((int) disp, rspec, disp32_operand); -} - - -void Assembler::call(Register dst) { - // This was originally using a 32bit register encoding - // and surely we want 64bit! - // this is a 32bit encoding but in 64bit mode the default - // operand size is 64bit so there is no need for the - // wide prefix. So prefix only happens if we use the - // new registers. Much like push/pop. - int encode = prefixq_and_encode(dst->encoding()); - emit_byte(0xFF); - emit_byte(0xD0 | encode); -} - -void Assembler::call(Address adr) { - InstructionMark im(this); - prefix(adr); - emit_byte(0xFF); - emit_operand(rdx, adr); -} - -void Assembler::jmp(Register reg) { - int encode = prefix_and_encode(reg->encoding()); - emit_byte(0xFF); - emit_byte(0xE0 | encode); -} - -void Assembler::jmp(Address adr) { - InstructionMark im(this); - prefix(adr); - emit_byte(0xFF); - emit_operand(rsp, adr); -} - -void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { - InstructionMark im(this); - emit_byte(0xE9); - assert(dest != NULL, "must have a target"); - intptr_t disp = dest - (_code_pos + sizeof(int32_t)); - assert(is_simm32(disp), "must be 32bit offset (jmp)"); - emit_data(disp, rspec.reloc(), call32_operand); -} - -void Assembler::jmp(Label& L, relocInfo::relocType rtype) { - if (L.is_bound()) { - address entry = target(L); - assert(entry != NULL, "jmp most probably wrong"); - InstructionMark im(this); - const int short_size = 2; - const int long_size = 5; - intptr_t offs = entry - _code_pos; - if (rtype == relocInfo::none && is8bit(offs - short_size)) { - emit_byte(0xEB); - emit_byte((offs - short_size) & 0xFF); - } else { - emit_byte(0xE9); - emit_long(offs - long_size); - } - } else { - // By default, forward jumps are always 32-bit displacements, since - // we can't yet know where the label will be bound. If you're sure that - // the forward jump will not run beyond 256 bytes, use jmpb to - // force an 8-bit displacement. - InstructionMark im(this); - relocate(rtype); - L.add_patch_at(code(), locator()); - emit_byte(0xE9); - emit_long(0); - } -} - -void Assembler::jmpb(Label& L) { - if (L.is_bound()) { - const int short_size = 2; - address entry = target(L); - assert(is8bit((entry - _code_pos) + short_size), - "Dispacement too large for a short jmp"); - assert(entry != NULL, "jmp most probably wrong"); - intptr_t offs = entry - _code_pos; - emit_byte(0xEB); - emit_byte((offs - short_size) & 0xFF); - } else { - InstructionMark im(this); - L.add_patch_at(code(), locator()); - emit_byte(0xEB); - emit_byte(0); - } -} - -void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) { - InstructionMark im(this); - relocate(rtype); - assert((0 <= cc) && (cc < 16), "illegal cc"); - if (L.is_bound()) { - address dst = target(L); - assert(dst != NULL, "jcc most probably wrong"); - - const int short_size = 2; - const int long_size = 6; - intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos; - if (rtype == relocInfo::none && is8bit(offs - short_size)) { - // 0111 tttn #8-bit disp - emit_byte(0x70 | cc); - emit_byte((offs - short_size) & 0xFF); - } else { - // 0000 1111 1000 tttn #32-bit disp - assert(is_simm32(offs - long_size), - "must be 32bit offset (call4)"); - emit_byte(0x0F); - emit_byte(0x80 | cc); - emit_long(offs - long_size); - } - } else { - // Note: could eliminate cond. jumps to this jump if condition - // is the same however, seems to be rather unlikely case. - // Note: use jccb() if label to be bound is very close to get - // an 8-bit displacement - L.add_patch_at(code(), locator()); - emit_byte(0x0F); - emit_byte(0x80 | cc); - emit_long(0); - } -} - -void Assembler::jccb(Condition cc, Label& L) { - if (L.is_bound()) { - const int short_size = 2; - const int long_size = 6; - address entry = target(L); - assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)), - "Dispacement too large for a short jmp"); - intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos; - // 0111 tttn #8-bit disp - emit_byte(0x70 | cc); - emit_byte((offs - short_size) & 0xFF); - } else { - InstructionMark im(this); - L.add_patch_at(code(), locator()); - emit_byte(0x70 | cc); - emit_byte(0); - } -} - -// FP instructions - -void Assembler::fxsave(Address dst) { - prefixq(dst); - emit_byte(0x0F); - emit_byte(0xAE); - emit_operand(as_Register(0), dst); -} - -void Assembler::fxrstor(Address src) { - prefixq(src); - emit_byte(0x0F); - emit_byte(0xAE); - emit_operand(as_Register(1), src); -} - -void Assembler::ldmxcsr(Address src) { - InstructionMark im(this); - prefix(src); - emit_byte(0x0F); - emit_byte(0xAE); - emit_operand(as_Register(2), src); -} - -void Assembler::stmxcsr(Address dst) { - InstructionMark im(this); - prefix(dst); - emit_byte(0x0F); - emit_byte(0xAE); - emit_operand(as_Register(3), dst); -} - -void Assembler::addss(XMMRegister dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x58); - emit_byte(0xC0 | encode); -} - -void Assembler::addss(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF3); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x58); - emit_operand(dst, src); -} - -void Assembler::subss(XMMRegister dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x5C); - emit_byte(0xC0 | encode); -} - -void Assembler::subss(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF3); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x5C); - emit_operand(dst, src); -} - -void Assembler::mulss(XMMRegister dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x59); - emit_byte(0xC0 | encode); -} - -void Assembler::mulss(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF3); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x59); - emit_operand(dst, src); -} - -void Assembler::divss(XMMRegister dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x5E); - emit_byte(0xC0 | encode); -} - -void Assembler::divss(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF3); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x5E); - emit_operand(dst, src); -} - -void Assembler::addsd(XMMRegister dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x58); - emit_byte(0xC0 | encode); -} - -void Assembler::addsd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF2); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x58); - emit_operand(dst, src); -} - -void Assembler::subsd(XMMRegister dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x5C); - emit_byte(0xC0 | encode); -} - -void Assembler::subsd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF2); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x5C); - emit_operand(dst, src); -} - -void Assembler::mulsd(XMMRegister dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x59); - emit_byte(0xC0 | encode); -} - -void Assembler::mulsd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF2); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x59); - emit_operand(dst, src); -} - -void Assembler::divsd(XMMRegister dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x5E); - emit_byte(0xC0 | encode); -} - -void Assembler::divsd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF2); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x5E); - emit_operand(dst, src); -} - -void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x51); - emit_byte(0xC0 | encode); -} - -void Assembler::sqrtsd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0xF2); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x51); - emit_operand(dst, src); -} - -void Assembler::xorps(XMMRegister dst, XMMRegister src) { - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x57); - emit_byte(0xC0 | encode); -} - -void Assembler::xorps(XMMRegister dst, Address src) { - InstructionMark im(this); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x57); - emit_operand(dst, src); -} - -void Assembler::xorpd(XMMRegister dst, XMMRegister src) { - emit_byte(0x66); - xorps(dst, src); -} - -void Assembler::xorpd(XMMRegister dst, Address src) { - InstructionMark im(this); - emit_byte(0x66); - prefix(src, dst); - emit_byte(0x0F); - emit_byte(0x57); - emit_operand(dst, src); -} - -void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2A); - emit_byte(0xC0 | encode); -} - -void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { - emit_byte(0xF3); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2A); - emit_byte(0xC0 | encode); -} - -void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2A); - emit_byte(0xC0 | encode); -} - -void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { - emit_byte(0xF2); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2A); - emit_byte(0xC0 | encode); -} - -void Assembler::cvttss2sil(Register dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2C); - emit_byte(0xC0 | encode); -} - -void Assembler::cvttss2siq(Register dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2C); - emit_byte(0xC0 | encode); -} - -void Assembler::cvttsd2sil(Register dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2C); - emit_byte(0xC0 | encode); -} - -void Assembler::cvttsd2siq(Register dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefixq_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x2C); - emit_byte(0xC0 | encode); -} - -void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { - emit_byte(0xF3); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x5A); - emit_byte(0xC0 | encode); -} - -void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { - emit_byte(0xF2); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x5A); - emit_byte(0xC0 | encode); -} - -void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { - emit_byte(0x66); - int encode = prefix_and_encode(dst->encoding(), src->encoding()); - emit_byte(0x0F); - emit_byte(0x60); - emit_byte(0xC0 | encode); -} - -// Implementation of MacroAssembler - -// On 32 bit it returns a vanilla displacement on 64 bit is a rip relative displacement -Address MacroAssembler::as_Address(AddressLiteral adr) { - assert(!adr.is_lval(), "must be rval"); - assert(reachable(adr), "must be"); - return Address((int)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc()); -} - -Address MacroAssembler::as_Address(ArrayAddress adr) { -#ifdef _LP64 - AddressLiteral base = adr.base(); - lea(rscratch1, base); - Address index = adr.index(); - assert(index._disp == 0, "must not have disp"); // maybe it can? - Address array(rscratch1, index._index, index._scale, index._disp); - return array; -#else - return Address::make_array(adr); -#endif // _LP64 - -} - -void MacroAssembler::fat_nop() { - // A 5 byte nop that is safe for patching (see patch_verified_entry) - // Recommened sequence from 'Software Optimization Guide for the AMD - // Hammer Processor' - emit_byte(0x66); - emit_byte(0x66); - emit_byte(0x90); - emit_byte(0x66); - emit_byte(0x90); -} - -static Assembler::Condition reverse[] = { - Assembler::noOverflow /* overflow = 0x0 */ , - Assembler::overflow /* noOverflow = 0x1 */ , - Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , - Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , - Assembler::notZero /* zero = 0x4, equal = 0x4 */ , - Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , - Assembler::above /* belowEqual = 0x6 */ , - Assembler::belowEqual /* above = 0x7 */ , - Assembler::positive /* negative = 0x8 */ , - Assembler::negative /* positive = 0x9 */ , - Assembler::noParity /* parity = 0xa */ , - Assembler::parity /* noParity = 0xb */ , - Assembler::greaterEqual /* less = 0xc */ , - Assembler::less /* greaterEqual = 0xd */ , - Assembler::greater /* lessEqual = 0xe */ , - Assembler::lessEqual /* greater = 0xf, */ - -}; - -// 32bit can do a case table jump in one instruction but we no longer allow the base -// to be installed in the Address class -void MacroAssembler::jump(ArrayAddress entry) { -#ifdef _LP64 - lea(rscratch1, entry.base()); - Address dispatch = entry.index(); - assert(dispatch._base == noreg, "must be"); - dispatch._base = rscratch1; - jmp(dispatch); -#else - jmp(as_Address(entry)); -#endif // _LP64 -} - -void MacroAssembler::jump(AddressLiteral dst) { - if (reachable(dst)) { - jmp_literal(dst.target(), dst.rspec()); - } else { - lea(rscratch1, dst); - jmp(rscratch1); - } -} - -void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { - if (reachable(dst)) { - InstructionMark im(this); - relocate(dst.reloc()); - const int short_size = 2; - const int long_size = 6; - int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos); - if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { - // 0111 tttn #8-bit disp - emit_byte(0x70 | cc); - emit_byte((offs - short_size) & 0xFF); - } else { - // 0000 1111 1000 tttn #32-bit disp - emit_byte(0x0F); - emit_byte(0x80 | cc); - emit_long(offs - long_size); - } - } else { -#ifdef ASSERT - warning("reversing conditional branch"); -#endif /* ASSERT */ - Label skip; - jccb(reverse[cc], skip); - lea(rscratch1, dst); - Assembler::jmp(rscratch1); - bind(skip); - } -} - -// Wouldn't need if AddressLiteral version had new name -void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { - Assembler::call(L, rtype); -} - -// Wouldn't need if AddressLiteral version had new name -void MacroAssembler::call(Register entry) { - Assembler::call(entry); -} - -void MacroAssembler::call(AddressLiteral entry) { - if (reachable(entry)) { - Assembler::call_literal(entry.target(), entry.rspec()); - } else { - lea(rscratch1, entry); - Assembler::call(rscratch1); - } -} - -void MacroAssembler::cmp8(AddressLiteral src1, int8_t src2) { - if (reachable(src1)) { - cmpb(as_Address(src1), src2); - } else { - lea(rscratch1, src1); - cmpb(Address(rscratch1, 0), src2); - } -} - -void MacroAssembler::cmp32(AddressLiteral src1, int32_t src2) { - if (reachable(src1)) { - cmpl(as_Address(src1), src2); - } else { - lea(rscratch1, src1); - cmpl(Address(rscratch1, 0), src2); - } -} - -void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { - if (reachable(src2)) { - cmpl(src1, as_Address(src2)); - } else { - lea(rscratch1, src2); - cmpl(src1, Address(rscratch1, 0)); - } -} - -void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { -#ifdef _LP64 - if (src2.is_lval()) { - movptr(rscratch1, src2); - Assembler::cmpq(src1, rscratch1); - } else if (reachable(src2)) { - cmpq(src1, as_Address(src2)); - } else { - lea(rscratch1, src2); - Assembler::cmpq(src1, Address(rscratch1, 0)); - } -#else - if (src2.is_lval()) { - cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); - } else { - cmpl(src1, as_Address(src2)); - } -#endif // _LP64 -} - -void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { - assert(src2.is_lval(), "not a mem-mem compare"); -#ifdef _LP64 - // moves src2's literal address - movptr(rscratch1, src2); - Assembler::cmpq(src1, rscratch1); -#else - cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); -#endif // _LP64 -} - -void MacroAssembler::cmp64(Register src1, AddressLiteral src2) { - assert(!src2.is_lval(), "should use cmpptr"); - - if (reachable(src2)) { -#ifdef _LP64 - cmpq(src1, as_Address(src2)); -#else - ShouldNotReachHere(); -#endif // _LP64 - } else { - lea(rscratch1, src2); - Assembler::cmpq(src1, Address(rscratch1, 0)); - } -} - -void MacroAssembler::cmpxchgptr(Register reg, AddressLiteral adr) { - if (reachable(adr)) { -#ifdef _LP64 - cmpxchgq(reg, as_Address(adr)); -#else - cmpxchgl(reg, as_Address(adr)); -#endif // _LP64 - } else { - lea(rscratch1, adr); - cmpxchgq(reg, Address(rscratch1, 0)); - } -} - -void MacroAssembler::incrementl(AddressLiteral dst) { - if (reachable(dst)) { - incrementl(as_Address(dst)); - } else { - lea(rscratch1, dst); - incrementl(Address(rscratch1, 0)); - } -} - -void MacroAssembler::incrementl(ArrayAddress dst) { - incrementl(as_Address(dst)); -} - -void MacroAssembler::lea(Register dst, Address src) { -#ifdef _LP64 - leaq(dst, src); -#else - leal(dst, src); -#endif // _LP64 -} - -void MacroAssembler::lea(Register dst, AddressLiteral src) { -#ifdef _LP64 - mov_literal64(dst, (intptr_t)src.target(), src.rspec()); -#else - mov_literal32(dst, (intptr_t)src.target(), src.rspec()); -#endif // _LP64 -} - -void MacroAssembler::mov32(AddressLiteral dst, Register src) { - if (reachable(dst)) { - movl(as_Address(dst), src); - } else { - lea(rscratch1, dst); - movl(Address(rscratch1, 0), src); - } -} - -void MacroAssembler::mov32(Register dst, AddressLiteral src) { - if (reachable(src)) { - movl(dst, as_Address(src)); - } else { - lea(rscratch1, src); - movl(dst, Address(rscratch1, 0)); - } -} - -void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { - if (reachable(src)) { - if (UseXmmLoadAndClearUpper) { - movsd (dst, as_Address(src)); - } else { - movlpd(dst, as_Address(src)); - } - } else { - lea(rscratch1, src); - if (UseXmmLoadAndClearUpper) { - movsd (dst, Address(rscratch1, 0)); - } else { - movlpd(dst, Address(rscratch1, 0)); - } - } -} - -void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { - if (reachable(src)) { - movss(dst, as_Address(src)); - } else { - lea(rscratch1, src); - movss(dst, Address(rscratch1, 0)); - } -} - -void MacroAssembler::movoop(Register dst, jobject obj) { - mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); -} - -void MacroAssembler::movoop(Address dst, jobject obj) { - mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate()); - movq(dst, rscratch1); -} - -void MacroAssembler::movptr(Register dst, AddressLiteral src) { -#ifdef _LP64 - if (src.is_lval()) { - mov_literal64(dst, (intptr_t)src.target(), src.rspec()); - } else { - if (reachable(src)) { - movq(dst, as_Address(src)); - } else { - lea(rscratch1, src); - movq(dst, Address(rscratch1,0)); - } - } -#else - if (src.is_lval()) { - mov_literal32(dst, (intptr_t)src.target(), src.rspec()); - } else { - movl(dst, as_Address(src)); - } -#endif // LP64 -} - -void MacroAssembler::movptr(ArrayAddress dst, Register src) { -#ifdef _LP64 - movq(as_Address(dst), src); -#else - movl(as_Address(dst), src); -#endif // _LP64 -} - -void MacroAssembler::pushoop(jobject obj) { -#ifdef _LP64 - movoop(rscratch1, obj); - pushq(rscratch1); -#else - push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); -#endif // _LP64 -} - -void MacroAssembler::pushptr(AddressLiteral src) { -#ifdef _LP64 - lea(rscratch1, src); - if (src.is_lval()) { - pushq(rscratch1); - } else { - pushq(Address(rscratch1, 0)); - } -#else - if (src.is_lval()) { - push_literal((int32_t)src.target(), src.rspec()); - else { - pushl(as_Address(src)); - } -#endif // _LP64 -} - -void MacroAssembler::ldmxcsr(AddressLiteral src) { - if (reachable(src)) { - Assembler::ldmxcsr(as_Address(src)); - } else { - lea(rscratch1, src); - Assembler::ldmxcsr(Address(rscratch1, 0)); - } -} - -void MacroAssembler::movlpd(XMMRegister dst, AddressLiteral src) { - if (reachable(src)) { - movlpd(dst, as_Address(src)); - } else { - lea(rscratch1, src); - movlpd(dst, Address(rscratch1, 0)); - } -} - -void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { - if (reachable(src)) { - movss(dst, as_Address(src)); - } else { - lea(rscratch1, src); - movss(dst, Address(rscratch1, 0)); - } -} -void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) { - if (reachable(src)) { - xorpd(dst, as_Address(src)); - } else { - lea(rscratch1, src); - xorpd(dst, Address(rscratch1, 0)); - } -} - -void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { - if (reachable(src)) { - xorps(dst, as_Address(src)); - } else { - lea(rscratch1, src); - xorps(dst, Address(rscratch1, 0)); - } -} - -void MacroAssembler::null_check(Register reg, int offset) { - if (needs_explicit_null_check(offset)) { - // provoke OS NULL exception if reg = NULL by - // accessing M[reg] w/o changing any (non-CC) registers - cmpq(rax, Address(reg, 0)); - // Note: should probably use testl(rax, Address(reg, 0)); - // may be shorter code (however, this version of - // testl needs to be implemented first) - } else { - // nothing to do, (later) access of M[reg + offset] - // will provoke OS NULL exception if reg = NULL - } -} - -int MacroAssembler::load_unsigned_byte(Register dst, Address src) { - int off = offset(); - movzbl(dst, src); - return off; -} - -int MacroAssembler::load_unsigned_word(Register dst, Address src) { - int off = offset(); - movzwl(dst, src); - return off; -} - -int MacroAssembler::load_signed_byte(Register dst, Address src) { - int off = offset(); - movsbl(dst, src); - return off; -} - -int MacroAssembler::load_signed_word(Register dst, Address src) { - int off = offset(); - movswl(dst, src); - return off; -} - -void MacroAssembler::incrementl(Register reg, int value) { - if (value == min_jint) { addl(reg, value); return; } - if (value < 0) { decrementl(reg, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { incl(reg) ; return; } - /* else */ { addl(reg, value) ; return; } -} - -void MacroAssembler::decrementl(Register reg, int value) { - if (value == min_jint) { subl(reg, value); return; } - if (value < 0) { incrementl(reg, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { decl(reg) ; return; } - /* else */ { subl(reg, value) ; return; } -} - -void MacroAssembler::incrementq(Register reg, int value) { - if (value == min_jint) { addq(reg, value); return; } - if (value < 0) { decrementq(reg, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { incq(reg) ; return; } - /* else */ { addq(reg, value) ; return; } -} - -void MacroAssembler::decrementq(Register reg, int value) { - if (value == min_jint) { subq(reg, value); return; } - if (value < 0) { incrementq(reg, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { decq(reg) ; return; } - /* else */ { subq(reg, value) ; return; } -} - -void MacroAssembler::incrementl(Address dst, int value) { - if (value == min_jint) { addl(dst, value); return; } - if (value < 0) { decrementl(dst, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { incl(dst) ; return; } - /* else */ { addl(dst, value) ; return; } -} - -void MacroAssembler::decrementl(Address dst, int value) { - if (value == min_jint) { subl(dst, value); return; } - if (value < 0) { incrementl(dst, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { decl(dst) ; return; } - /* else */ { subl(dst, value) ; return; } -} - -void MacroAssembler::incrementq(Address dst, int value) { - if (value == min_jint) { addq(dst, value); return; } - if (value < 0) { decrementq(dst, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { incq(dst) ; return; } - /* else */ { addq(dst, value) ; return; } -} - -void MacroAssembler::decrementq(Address dst, int value) { - if (value == min_jint) { subq(dst, value); return; } - if (value < 0) { incrementq(dst, -value); return; } - if (value == 0) { ; return; } - if (value == 1 && UseIncDec) { decq(dst) ; return; } - /* else */ { subq(dst, value) ; return; } -} - -void MacroAssembler::align(int modulus) { - if (offset() % modulus != 0) { - nop(modulus - (offset() % modulus)); - } -} - -void MacroAssembler::enter() { - pushq(rbp); - movq(rbp, rsp); -} - -void MacroAssembler::leave() { - emit_byte(0xC9); // LEAVE -} - -// C++ bool manipulation - -void MacroAssembler::movbool(Register dst, Address src) { - if(sizeof(bool) == 1) - movb(dst, src); - else if(sizeof(bool) == 2) - movw(dst, src); - else if(sizeof(bool) == 4) - movl(dst, src); - else { - // unsupported - ShouldNotReachHere(); - } -} - -void MacroAssembler::movbool(Address dst, bool boolconst) { - if(sizeof(bool) == 1) - movb(dst, (int) boolconst); - else if(sizeof(bool) == 2) - movw(dst, (int) boolconst); - else if(sizeof(bool) == 4) - movl(dst, (int) boolconst); - else { - // unsupported - ShouldNotReachHere(); - } -} - -void MacroAssembler::movbool(Address dst, Register src) { - if(sizeof(bool) == 1) - movb(dst, src); - else if(sizeof(bool) == 2) - movw(dst, src); - else if(sizeof(bool) == 4) - movl(dst, src); - else { - // unsupported - ShouldNotReachHere(); - } -} - -void MacroAssembler::testbool(Register dst) { - if(sizeof(bool) == 1) - testb(dst, (int) 0xff); - else if(sizeof(bool) == 2) { - // need testw impl - ShouldNotReachHere(); - } else if(sizeof(bool) == 4) - testl(dst, dst); - else { - // unsupported - ShouldNotReachHere(); - } -} - -void MacroAssembler::set_last_Java_frame(Register last_java_sp, - Register last_java_fp, - address last_java_pc) { - // determine last_java_sp register - if (!last_java_sp->is_valid()) { - last_java_sp = rsp; - } - - // last_java_fp is optional - if (last_java_fp->is_valid()) { - movq(Address(r15_thread, JavaThread::last_Java_fp_offset()), - last_java_fp); - } - - // last_java_pc is optional - if (last_java_pc != NULL) { - Address java_pc(r15_thread, - JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); - lea(rscratch1, InternalAddress(last_java_pc)); - movq(java_pc, rscratch1); - } - - movq(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); -} - -void MacroAssembler::reset_last_Java_frame(bool clear_fp, - bool clear_pc) { - // we must set sp to zero to clear frame - movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); - // must clear fp, so that compiled frames are not confused; it is - // possible that we need it only for debugging - if (clear_fp) { - movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); - } - - if (clear_pc) { - movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); - } -} - - -// Implementation of call_VM versions - -void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { - Label L, E; - -#ifdef _WIN64 - // Windows always allocates space for it's register args - assert(num_args <= 4, "only register arguments supported"); - subq(rsp, frame::arg_reg_save_area_bytes); -#endif - - // Align stack if necessary - testl(rsp, 15); - jcc(Assembler::zero, L); - - subq(rsp, 8); - { - call(RuntimeAddress(entry_point)); - } - addq(rsp, 8); - jmp(E); - - bind(L); - { - call(RuntimeAddress(entry_point)); - } - - bind(E); - -#ifdef _WIN64 - // restore stack pointer - addq(rsp, frame::arg_reg_save_area_bytes); -#endif - -} - - -void MacroAssembler::call_VM_base(Register oop_result, - Register java_thread, - Register last_java_sp, - address entry_point, - int num_args, - bool check_exceptions) { - // determine last_java_sp register - if (!last_java_sp->is_valid()) { - last_java_sp = rsp; - } - - // debugging support - assert(num_args >= 0, "cannot have negative number of arguments"); - assert(r15_thread != oop_result, - "cannot use the same register for java_thread & oop_result"); - assert(r15_thread != last_java_sp, - "cannot use the same register for java_thread & last_java_sp"); - - // set last Java frame before call - - // This sets last_Java_fp which is only needed from interpreted frames - // and should really be done only from the interp_masm version before - // calling the underlying call_VM. That doesn't happen yet so we set - // last_Java_fp here even though some callers don't need it and - // also clear it below. - set_last_Java_frame(last_java_sp, rbp, NULL); - - { - Label L, E; - - // Align stack if necessary -#ifdef _WIN64 - assert(num_args <= 4, "only register arguments supported"); - // Windows always allocates space for it's register args - subq(rsp, frame::arg_reg_save_area_bytes); -#endif - testl(rsp, 15); - jcc(Assembler::zero, L); - - subq(rsp, 8); - { - call(RuntimeAddress(entry_point)); - } - addq(rsp, 8); - jmp(E); - - - bind(L); - { - call(RuntimeAddress(entry_point)); - } - - bind(E); - -#ifdef _WIN64 - // restore stack pointer - addq(rsp, frame::arg_reg_save_area_bytes); -#endif - } - -#ifdef ASSERT - pushq(rax); - { - Label L; - get_thread(rax); - cmpq(r15_thread, rax); - jcc(Assembler::equal, L); - stop("MacroAssembler::call_VM_base: register not callee saved?"); - bind(L); - } - popq(rax); -#endif - - // reset last Java frame - // This really shouldn't have to clear fp set note above at the - // call to set_last_Java_frame - reset_last_Java_frame(true, false); - - check_and_handle_popframe(noreg); - check_and_handle_earlyret(noreg); - - if (check_exceptions) { - cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); - // This used to conditionally jump to forward_exception however it is - // possible if we relocate that the branch will not reach. So we must jump - // around so we can always reach - Label ok; - jcc(Assembler::equal, ok); - jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - bind(ok); - } - - // get oop result if there is one and reset the value in the thread - if (oop_result->is_valid()) { - movq(oop_result, Address(r15_thread, JavaThread::vm_result_offset())); - movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD); - verify_oop(oop_result); - } -} - -void MacroAssembler::check_and_handle_popframe(Register java_thread) {} -void MacroAssembler::check_and_handle_earlyret(Register java_thread) {} - -void MacroAssembler::call_VM_helper(Register oop_result, - address entry_point, - int num_args, - bool check_exceptions) { - // Java thread becomes first argument of C function - movq(c_rarg0, r15_thread); - - // We've pushed one address, correct last_Java_sp - leaq(rax, Address(rsp, wordSize)); - - call_VM_base(oop_result, noreg, rax, entry_point, num_args, - check_exceptions); -} - - -void MacroAssembler::call_VM(Register oop_result, - address entry_point, - bool check_exceptions) { - Label C, E; - Assembler::call(C, relocInfo::none); - jmp(E); - - bind(C); - call_VM_helper(oop_result, entry_point, 0, check_exceptions); - ret(0); - - bind(E); -} - - -void MacroAssembler::call_VM(Register oop_result, - address entry_point, - Register arg_1, - bool check_exceptions) { - assert(rax != arg_1, "smashed argument"); - assert(c_rarg0 != arg_1, "smashed argument"); - - Label C, E; - Assembler::call(C, relocInfo::none); - jmp(E); - - bind(C); - // c_rarg0 is reserved for thread - if (c_rarg1 != arg_1) { - movq(c_rarg1, arg_1); - } - call_VM_helper(oop_result, entry_point, 1, check_exceptions); - ret(0); - - bind(E); -} - -void MacroAssembler::call_VM(Register oop_result, - address entry_point, - Register arg_1, - Register arg_2, - bool check_exceptions) { - assert(rax != arg_1, "smashed argument"); - assert(rax != arg_2, "smashed argument"); - assert(c_rarg0 != arg_1, "smashed argument"); - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg1 != arg_2, "smashed argument"); - assert(c_rarg2 != arg_1, "smashed argument"); - - Label C, E; - Assembler::call(C, relocInfo::none); - jmp(E); - - bind(C); - // c_rarg0 is reserved for thread - if (c_rarg1 != arg_1) { - movq(c_rarg1, arg_1); - } - if (c_rarg2 != arg_2) { - movq(c_rarg2, arg_2); - } - call_VM_helper(oop_result, entry_point, 2, check_exceptions); - ret(0); - - bind(E); -} - - -void MacroAssembler::call_VM(Register oop_result, - address entry_point, - Register arg_1, - Register arg_2, - Register arg_3, - bool check_exceptions) { - assert(rax != arg_1, "smashed argument"); - assert(rax != arg_2, "smashed argument"); - assert(rax != arg_3, "smashed argument"); - assert(c_rarg0 != arg_1, "smashed argument"); - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg0 != arg_3, "smashed argument"); - assert(c_rarg1 != arg_2, "smashed argument"); - assert(c_rarg1 != arg_3, "smashed argument"); - assert(c_rarg2 != arg_1, "smashed argument"); - assert(c_rarg2 != arg_3, "smashed argument"); - assert(c_rarg3 != arg_1, "smashed argument"); - assert(c_rarg3 != arg_2, "smashed argument"); - - Label C, E; - Assembler::call(C, relocInfo::none); - jmp(E); - - bind(C); - // c_rarg0 is reserved for thread - if (c_rarg1 != arg_1) { - movq(c_rarg1, arg_1); - } - if (c_rarg2 != arg_2) { - movq(c_rarg2, arg_2); - } - if (c_rarg3 != arg_3) { - movq(c_rarg3, arg_3); - } - call_VM_helper(oop_result, entry_point, 3, check_exceptions); - ret(0); - - bind(E); -} - -void MacroAssembler::call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - int num_args, - bool check_exceptions) { - call_VM_base(oop_result, noreg, last_java_sp, entry_point, num_args, - check_exceptions); -} - -void MacroAssembler::call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - Register arg_1, - bool check_exceptions) { - assert(c_rarg0 != arg_1, "smashed argument"); - assert(c_rarg1 != last_java_sp, "smashed argument"); - // c_rarg0 is reserved for thread - if (c_rarg1 != arg_1) { - movq(c_rarg1, arg_1); - } - call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); -} - -void MacroAssembler::call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - Register arg_1, - Register arg_2, - bool check_exceptions) { - assert(c_rarg0 != arg_1, "smashed argument"); - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg1 != arg_2, "smashed argument"); - assert(c_rarg1 != last_java_sp, "smashed argument"); - assert(c_rarg2 != arg_1, "smashed argument"); - assert(c_rarg2 != last_java_sp, "smashed argument"); - // c_rarg0 is reserved for thread - if (c_rarg1 != arg_1) { - movq(c_rarg1, arg_1); - } - if (c_rarg2 != arg_2) { - movq(c_rarg2, arg_2); - } - call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); -} - - -void MacroAssembler::call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - Register arg_1, - Register arg_2, - Register arg_3, - bool check_exceptions) { - assert(c_rarg0 != arg_1, "smashed argument"); - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg0 != arg_3, "smashed argument"); - assert(c_rarg1 != arg_2, "smashed argument"); - assert(c_rarg1 != arg_3, "smashed argument"); - assert(c_rarg1 != last_java_sp, "smashed argument"); - assert(c_rarg2 != arg_1, "smashed argument"); - assert(c_rarg2 != arg_3, "smashed argument"); - assert(c_rarg2 != last_java_sp, "smashed argument"); - assert(c_rarg3 != arg_1, "smashed argument"); - assert(c_rarg3 != arg_2, "smashed argument"); - assert(c_rarg3 != last_java_sp, "smashed argument"); - // c_rarg0 is reserved for thread - if (c_rarg1 != arg_1) { - movq(c_rarg1, arg_1); - } - if (c_rarg2 != arg_2) { - movq(c_rarg2, arg_2); - } - if (c_rarg3 != arg_3) { - movq(c_rarg2, arg_3); - } - call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); -} - -void MacroAssembler::call_VM_leaf(address entry_point, int num_args) { - call_VM_leaf_base(entry_point, num_args); -} - -void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { - if (c_rarg0 != arg_1) { - movq(c_rarg0, arg_1); - } - call_VM_leaf(entry_point, 1); -} - -void MacroAssembler::call_VM_leaf(address entry_point, - Register arg_1, - Register arg_2) { - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg1 != arg_1, "smashed argument"); - if (c_rarg0 != arg_1) { - movq(c_rarg0, arg_1); - } - if (c_rarg1 != arg_2) { - movq(c_rarg1, arg_2); - } - call_VM_leaf(entry_point, 2); -} - -void MacroAssembler::call_VM_leaf(address entry_point, - Register arg_1, - Register arg_2, - Register arg_3) { - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg0 != arg_3, "smashed argument"); - assert(c_rarg1 != arg_1, "smashed argument"); - assert(c_rarg1 != arg_3, "smashed argument"); - assert(c_rarg2 != arg_1, "smashed argument"); - assert(c_rarg2 != arg_2, "smashed argument"); - if (c_rarg0 != arg_1) { - movq(c_rarg0, arg_1); - } - if (c_rarg1 != arg_2) { - movq(c_rarg1, arg_2); - } - if (c_rarg2 != arg_3) { - movq(c_rarg2, arg_3); - } - call_VM_leaf(entry_point, 3); -} - - -// Calls to C land -// -// When entering C land, the rbp & rsp of the last Java frame have to -// be recorded in the (thread-local) JavaThread object. When leaving C -// land, the last Java fp has to be reset to 0. This is required to -// allow proper stack traversal. -void MacroAssembler::store_check(Register obj) { - // Does a store check for the oop in register obj. The content of - // register obj is destroyed afterwards. - store_check_part_1(obj); - store_check_part_2(obj); -} - -void MacroAssembler::store_check(Register obj, Address dst) { - store_check(obj); -} - -// split the store check operation so that other instructions can be -// scheduled inbetween -void MacroAssembler::store_check_part_1(Register obj) { - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - shrq(obj, CardTableModRefBS::card_shift); -} - -void MacroAssembler::store_check_part_2(Register obj) { - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); - CardTableModRefBS* ct = (CardTableModRefBS*)bs; - assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); - ExternalAddress cardtable((address)ct->byte_map_base); - Address index(noreg, obj, Address::times_1); - movb(as_Address(ArrayAddress(cardtable, index)), 0); -} - -void MacroAssembler::c2bool(Register x) { - // implements x == 0 ? 0 : 1 - // note: must only look at least-significant byte of x - // since C-style booleans are stored in one byte - // only! (was bug) - andl(x, 0xFF); - setb(Assembler::notZero, x); -} - -int MacroAssembler::corrected_idivl(Register reg) { - // Full implementation of Java idiv and irem; checks for special - // case as described in JVM spec., p.243 & p.271. The function - // returns the (pc) offset of the idivl instruction - may be needed - // for implicit exceptions. - // - // normal case special case - // - // input : eax: dividend min_int - // reg: divisor (may not be eax/edx) -1 - // - // output: eax: quotient (= eax idiv reg) min_int - // edx: remainder (= eax irem reg) 0 - assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); - const int min_int = 0x80000000; - Label normal_case, special_case; - - // check for special case - cmpl(rax, min_int); - jcc(Assembler::notEqual, normal_case); - xorl(rdx, rdx); // prepare edx for possible special case (where - // remainder = 0) - cmpl(reg, -1); - jcc(Assembler::equal, special_case); - - // handle normal case - bind(normal_case); - cdql(); - int idivl_offset = offset(); - idivl(reg); - - // normal and special case exit - bind(special_case); - - return idivl_offset; -} - -int MacroAssembler::corrected_idivq(Register reg) { - // Full implementation of Java ldiv and lrem; checks for special - // case as described in JVM spec., p.243 & p.271. The function - // returns the (pc) offset of the idivl instruction - may be needed - // for implicit exceptions. - // - // normal case special case - // - // input : rax: dividend min_long - // reg: divisor (may not be eax/edx) -1 - // - // output: rax: quotient (= rax idiv reg) min_long - // rdx: remainder (= rax irem reg) 0 - assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); - static const int64_t min_long = 0x8000000000000000; - Label normal_case, special_case; - - // check for special case - cmp64(rax, ExternalAddress((address) &min_long)); - jcc(Assembler::notEqual, normal_case); - xorl(rdx, rdx); // prepare rdx for possible special case (where - // remainder = 0) - cmpq(reg, -1); - jcc(Assembler::equal, special_case); - - // handle normal case - bind(normal_case); - cdqq(); - int idivq_offset = offset(); - idivq(reg); - - // normal and special case exit - bind(special_case); - - return idivq_offset; -} - -void MacroAssembler::push_IU_state() { - pushfq(); // Push flags first because pushaq kills them - subq(rsp, 8); // Make sure rsp stays 16-byte aligned - pushaq(); -} - -void MacroAssembler::pop_IU_state() { - popaq(); - addq(rsp, 8); - popfq(); -} - -void MacroAssembler::push_FPU_state() { - subq(rsp, FPUStateSizeInWords * wordSize); - fxsave(Address(rsp, 0)); -} - -void MacroAssembler::pop_FPU_state() { - fxrstor(Address(rsp, 0)); - addq(rsp, FPUStateSizeInWords * wordSize); -} - -// Save Integer and Float state -// Warning: Stack must be 16 byte aligned -void MacroAssembler::push_CPU_state() { - push_IU_state(); - push_FPU_state(); -} - -void MacroAssembler::pop_CPU_state() { - pop_FPU_state(); - pop_IU_state(); -} - -void MacroAssembler::sign_extend_short(Register reg) { - movswl(reg, reg); -} - -void MacroAssembler::sign_extend_byte(Register reg) { - movsbl(reg, reg); -} - -void MacroAssembler::division_with_shift(Register reg, int shift_value) { - assert (shift_value > 0, "illegal shift value"); - Label _is_positive; - testl (reg, reg); - jcc (Assembler::positive, _is_positive); - int offset = (1 << shift_value) - 1 ; - - if (offset == 1) { - incrementl(reg); - } else { - addl(reg, offset); - } - - bind (_is_positive); - sarl(reg, shift_value); -} - -void MacroAssembler::round_to_l(Register reg, int modulus) { - addl(reg, modulus - 1); - andl(reg, -modulus); -} - -void MacroAssembler::round_to_q(Register reg, int modulus) { - addq(reg, modulus - 1); - andq(reg, -modulus); -} - -void MacroAssembler::verify_oop(Register reg, const char* s) { - if (!VerifyOops) { - return; - } - - // Pass register number to verify_oop_subroutine - char* b = new char[strlen(s) + 50]; - sprintf(b, "verify_oop: %s: %s", reg->name(), s); - - pushq(rax); // save rax, restored by receiver - - // pass args on stack, only touch rax - pushq(reg); - - // avoid using pushptr, as it modifies scratch registers - // and our contract is not to modify anything - ExternalAddress buffer((address)b); - movptr(rax, buffer.addr()); - pushq(rax); - - // call indirectly to solve generation ordering problem - movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); - call(rax); // no alignment requirement - // everything popped by receiver -} - -void MacroAssembler::verify_oop_addr(Address addr, const char* s) { - if (!VerifyOops) return; - // Pass register number to verify_oop_subroutine - char* b = new char[strlen(s) + 50]; - sprintf(b, "verify_oop_addr: %s", s); - pushq(rax); // save rax - movq(addr, rax); - pushq(rax); // pass register argument - - - // avoid using pushptr, as it modifies scratch registers - // and our contract is not to modify anything - ExternalAddress buffer((address)b); - movptr(rax, buffer.addr()); - pushq(rax); - - // call indirectly to solve generation ordering problem - movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); - call(rax); // no alignment requirement - // everything popped by receiver -} - - -void MacroAssembler::stop(const char* msg) { - address rip = pc(); - pushaq(); // get regs on stack - lea(c_rarg0, ExternalAddress((address) msg)); - lea(c_rarg1, InternalAddress(rip)); - movq(c_rarg2, rsp); // pass pointer to regs array - andq(rsp, -16); // align stack as required by ABI - call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug))); - hlt(); -} - -void MacroAssembler::warn(const char* msg) { - pushq(r12); - movq(r12, rsp); - andq(rsp, -16); // align stack as required by push_CPU_state and call - - push_CPU_state(); // keeps alignment at 16 bytes - lea(c_rarg0, ExternalAddress((address) msg)); - call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); - pop_CPU_state(); - - movq(rsp, r12); - popq(r12); -} - -void MacroAssembler::debug(char* msg, int64_t pc, int64_t regs[]) { - // In order to get locks to work, we need to fake a in_VM state - if (ShowMessageBoxOnError ) { - JavaThread* thread = JavaThread::current(); - JavaThreadState saved_state = thread->thread_state(); - thread->set_thread_state(_thread_in_vm); - ttyLocker ttyl; -#ifndef PRODUCT - if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { - BytecodeCounter::print(); - } -#endif - // To see where a verify_oop failed, get $ebx+40/X for this frame. - // XXX correct this offset for amd64 - // This is the value of eip which points to where verify_oop will return. - if (os::message_box(msg, "Execution stopped, print registers?")) { - tty->print_cr("rip = 0x%016lx", pc); - tty->print_cr("rax = 0x%016lx", regs[15]); - tty->print_cr("rbx = 0x%016lx", regs[12]); - tty->print_cr("rcx = 0x%016lx", regs[14]); - tty->print_cr("rdx = 0x%016lx", regs[13]); - tty->print_cr("rdi = 0x%016lx", regs[8]); - tty->print_cr("rsi = 0x%016lx", regs[9]); - tty->print_cr("rbp = 0x%016lx", regs[10]); - tty->print_cr("rsp = 0x%016lx", regs[11]); - tty->print_cr("r8 = 0x%016lx", regs[7]); - tty->print_cr("r9 = 0x%016lx", regs[6]); - tty->print_cr("r10 = 0x%016lx", regs[5]); - tty->print_cr("r11 = 0x%016lx", regs[4]); - tty->print_cr("r12 = 0x%016lx", regs[3]); - tty->print_cr("r13 = 0x%016lx", regs[2]); - tty->print_cr("r14 = 0x%016lx", regs[1]); - tty->print_cr("r15 = 0x%016lx", regs[0]); - BREAKPOINT; - } - ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); - } else { - ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", - msg); - } -} - -void MacroAssembler::os_breakpoint() { - // instead of directly emitting a breakpoint, call os:breakpoint for - // better debugability - // This shouldn't need alignment, it's an empty function - call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); -} - -// Write serialization page so VM thread can do a pseudo remote membar. -// We use the current thread pointer to calculate a thread specific -// offset to write to within the page. This minimizes bus traffic -// due to cache line collision. -void MacroAssembler::serialize_memory(Register thread, - Register tmp) { - - movl(tmp, thread); - shrl(tmp, os::get_serialize_page_shift_count()); - andl(tmp, (os::vm_page_size() - sizeof(int))); - - Address index(noreg, tmp, Address::times_1); - ExternalAddress page(os::get_memory_serialize_page()); - - movptr(ArrayAddress(page, index), tmp); -} - -void MacroAssembler::verify_tlab() { -#ifdef ASSERT - if (UseTLAB) { - Label next, ok; - Register t1 = rsi; - - pushq(t1); - - movq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); - cmpq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset()))); - jcc(Assembler::aboveEqual, next); - stop("assert(top >= start)"); - should_not_reach_here(); - - bind(next); - movq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); - cmpq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); - jcc(Assembler::aboveEqual, ok); - stop("assert(top <= end)"); - should_not_reach_here(); - - bind(ok); - - popq(t1); - } -#endif -} - -// Defines obj, preserves var_size_in_bytes -void MacroAssembler::eden_allocate(Register obj, - Register var_size_in_bytes, - int con_size_in_bytes, - Register t1, - Label& slow_case) { - assert(obj == rax, "obj must be in rax for cmpxchg"); - assert_different_registers(obj, var_size_in_bytes, t1); - Register end = t1; - Label retry; - bind(retry); - ExternalAddress heap_top((address) Universe::heap()->top_addr()); - movptr(obj, heap_top); - if (var_size_in_bytes == noreg) { - leaq(end, Address(obj, con_size_in_bytes)); - } else { - leaq(end, Address(obj, var_size_in_bytes, Address::times_1)); - } - // if end < obj then we wrapped around => object too long => slow case - cmpq(end, obj); - jcc(Assembler::below, slow_case); - cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr())); - - jcc(Assembler::above, slow_case); - // Compare obj with the top addr, and if still equal, store the new - // top addr in end at the address of the top addr pointer. Sets ZF - // if was equal, and clears it otherwise. Use lock prefix for - // atomicity on MPs. - if (os::is_MP()) { - lock(); - } - cmpxchgptr(end, heap_top); - // if someone beat us on the allocation, try again, otherwise continue - jcc(Assembler::notEqual, retry); -} - -// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. -void MacroAssembler::tlab_allocate(Register obj, - Register var_size_in_bytes, - int con_size_in_bytes, - Register t1, - Register t2, - Label& slow_case) { - assert_different_registers(obj, t1, t2); - assert_different_registers(obj, var_size_in_bytes, t1); - Register end = t2; - - verify_tlab(); - - movq(obj, Address(r15_thread, JavaThread::tlab_top_offset())); - if (var_size_in_bytes == noreg) { - leaq(end, Address(obj, con_size_in_bytes)); - } else { - leaq(end, Address(obj, var_size_in_bytes, Address::times_1)); - } - cmpq(end, Address(r15_thread, JavaThread::tlab_end_offset())); - jcc(Assembler::above, slow_case); - - // update the tlab top pointer - movq(Address(r15_thread, JavaThread::tlab_top_offset()), end); - - // recover var_size_in_bytes if necessary - if (var_size_in_bytes == end) { - subq(var_size_in_bytes, obj); - } - verify_tlab(); -} - -// Preserves rbx and rdx. -void MacroAssembler::tlab_refill(Label& retry, - Label& try_eden, - Label& slow_case) { - Register top = rax; - Register t1 = rcx; - Register t2 = rsi; - Register t3 = r10; - Register thread_reg = r15_thread; - assert_different_registers(top, thread_reg, t1, t2, t3, - /* preserve: */ rbx, rdx); - Label do_refill, discard_tlab; - - if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { - // No allocation in the shared eden. - jmp(slow_case); - } - - movq(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - movq(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); - - // calculate amount of free space - subq(t1, top); - shrq(t1, LogHeapWordSize); - - // Retain tlab and allocate object in shared space if - // the amount free in the tlab is too large to discard. - cmpq(t1, Address(thread_reg, // size_t - in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); - jcc(Assembler::lessEqual, discard_tlab); - - // Retain - mov64(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment()); - addq(Address(thread_reg, // size_t - in_bytes(JavaThread::tlab_refill_waste_limit_offset())), - t2); - if (TLABStats) { - // increment number of slow_allocations - addl(Address(thread_reg, // unsigned int - in_bytes(JavaThread::tlab_slow_allocations_offset())), - 1); - } - jmp(try_eden); - - bind(discard_tlab); - if (TLABStats) { - // increment number of refills - addl(Address(thread_reg, // unsigned int - in_bytes(JavaThread::tlab_number_of_refills_offset())), - 1); - // accumulate wastage -- t1 is amount free in tlab - addl(Address(thread_reg, // unsigned int - in_bytes(JavaThread::tlab_fast_refill_waste_offset())), - t1); - } - - // if tlab is currently allocated (top or end != null) then - // fill [top, end + alignment_reserve) with array object - testq(top, top); - jcc(Assembler::zero, do_refill); - - // set up the mark word - mov64(t3, (int64_t) markOopDesc::prototype()->copy_set_hash(0x2)); - movq(Address(top, oopDesc::mark_offset_in_bytes()), t3); - // set the length to the remaining space - subq(t1, typeArrayOopDesc::header_size(T_INT)); - addq(t1, (int)ThreadLocalAllocBuffer::alignment_reserve()); - shlq(t1, log2_intptr(HeapWordSize / sizeof(jint))); - movq(Address(top, arrayOopDesc::length_offset_in_bytes()), t1); - // set klass to intArrayKlass - movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr())); - movq(Address(top, oopDesc::klass_offset_in_bytes()), t1); - - // refill the tlab with an eden allocation - bind(do_refill); - movq(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); - shlq(t1, LogHeapWordSize); - // add object_size ?? - eden_allocate(top, t1, 0, t2, slow_case); - - // Check that t1 was preserved in eden_allocate. -#ifdef ASSERT - if (UseTLAB) { - Label ok; - Register tsize = rsi; - assert_different_registers(tsize, thread_reg, t1); - pushq(tsize); - movq(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); - shlq(tsize, LogHeapWordSize); - cmpq(t1, tsize); - jcc(Assembler::equal, ok); - stop("assert(t1 != tlab size)"); - should_not_reach_here(); - - bind(ok); - popq(tsize); - } -#endif - movq(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top); - movq(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); - addq(top, t1); - subq(top, (int)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - movq(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); - verify_tlab(); - jmp(retry); -} - - -int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg, - bool swap_reg_contains_mark, - Label& done, Label* slow_case, - BiasedLockingCounters* counters) { - assert(UseBiasedLocking, "why call this otherwise?"); - assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq"); - assert(tmp_reg != noreg, "tmp_reg must be supplied"); - assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); - assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); - Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); - Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); - Address saved_mark_addr(lock_reg, 0); - - if (PrintBiasedLockingStatistics && counters == NULL) - counters = BiasedLocking::counters(); - - // Biased locking - // See whether the lock is currently biased toward our thread and - // whether the epoch is still valid - // Note that the runtime guarantees sufficient alignment of JavaThread - // pointers to allow age to be placed into low bits - // First check to see whether biasing is even enabled for this object - Label cas_label; - int null_check_offset = -1; - if (!swap_reg_contains_mark) { - null_check_offset = offset(); - movq(swap_reg, mark_addr); - } - movq(tmp_reg, swap_reg); - andq(tmp_reg, markOopDesc::biased_lock_mask_in_place); - cmpq(tmp_reg, markOopDesc::biased_lock_pattern); - jcc(Assembler::notEqual, cas_label); - // The bias pattern is present in the object's header. Need to check - // whether the bias owner and the epoch are both still current. - movq(tmp_reg, klass_addr); - movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - orq(tmp_reg, r15_thread); - xorq(tmp_reg, swap_reg); - andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place)); - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); - } - jcc(Assembler::equal, done); - - Label try_revoke_bias; - Label try_rebias; - - // At this point we know that the header has the bias pattern and - // that we are not the bias owner in the current epoch. We need to - // figure out more details about the state of the header in order to - // know what operations can be legally performed on the object's - // header. - - // If the low three bits in the xor result aren't clear, that means - // the prototype header is no longer biased and we have to revoke - // the bias on this object. - testq(tmp_reg, markOopDesc::biased_lock_mask_in_place); - jcc(Assembler::notZero, try_revoke_bias); - - // Biasing is still enabled for this data type. See whether the - // epoch of the current bias is still valid, meaning that the epoch - // bits of the mark word are equal to the epoch bits of the - // prototype header. (Note that the prototype header's epoch bits - // only change at a safepoint.) If not, attempt to rebias the object - // toward the current thread. Note that we must be absolutely sure - // that the current epoch is invalid in order to do this because - // otherwise the manipulations it performs on the mark word are - // illegal. - testq(tmp_reg, markOopDesc::epoch_mask_in_place); - jcc(Assembler::notZero, try_rebias); - - // The epoch of the current bias is still valid but we know nothing - // about the owner; it might be set or it might be clear. Try to - // acquire the bias of the object using an atomic operation. If this - // fails we will go in to the runtime to revoke the object's bias. - // Note that we first construct the presumed unbiased header so we - // don't accidentally blow away another thread's valid bias. - andq(swap_reg, - markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); - movq(tmp_reg, swap_reg); - orq(tmp_reg, r15_thread); - if (os::is_MP()) { - lock(); - } - cmpxchgq(tmp_reg, Address(obj_reg, 0)); - // If the biasing toward our thread failed, this means that - // another thread succeeded in biasing it toward itself and we - // need to revoke that bias. The revocation will occur in the - // interpreter runtime in the slow case. - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); - } - if (slow_case != NULL) { - jcc(Assembler::notZero, *slow_case); - } - jmp(done); - - bind(try_rebias); - // At this point we know the epoch has expired, meaning that the - // current "bias owner", if any, is actually invalid. Under these - // circumstances _only_, we are allowed to use the current header's - // value as the comparison value when doing the cas to acquire the - // bias in the current epoch. In other words, we allow transfer of - // the bias from one thread to another directly in this situation. - // - // FIXME: due to a lack of registers we currently blow away the age - // bits in this situation. Should attempt to preserve them. - movq(tmp_reg, klass_addr); - movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - orq(tmp_reg, r15_thread); - if (os::is_MP()) { - lock(); - } - cmpxchgq(tmp_reg, Address(obj_reg, 0)); - // If the biasing toward our thread failed, then another thread - // succeeded in biasing it toward itself and we need to revoke that - // bias. The revocation will occur in the runtime in the slow case. - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address) counters->rebiased_lock_entry_count_addr())); - } - if (slow_case != NULL) { - jcc(Assembler::notZero, *slow_case); - } - jmp(done); - - bind(try_revoke_bias); - // The prototype mark in the klass doesn't have the bias bit set any - // more, indicating that objects of this data type are not supposed - // to be biased any more. We are going to try to reset the mark of - // this object to the prototype value and fall through to the - // CAS-based locking scheme. Note that if our CAS fails, it means - // that another thread raced us for the privilege of revoking the - // bias of this particular object, so it's okay to continue in the - // normal locking code. - // - // FIXME: due to a lack of registers we currently blow away the age - // bits in this situation. Should attempt to preserve them. - movq(tmp_reg, klass_addr); - movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); - if (os::is_MP()) { - lock(); - } - cmpxchgq(tmp_reg, Address(obj_reg, 0)); - // Fall through to the normal CAS-based lock, because no matter what - // the result of the above CAS, some thread must have succeeded in - // removing the bias bit from the object's header. - if (counters != NULL) { - cond_inc32(Assembler::zero, - ExternalAddress((address) counters->revoked_lock_entry_count_addr())); - } - - bind(cas_label); - - return null_check_offset; -} - - -void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { - assert(UseBiasedLocking, "why call this otherwise?"); - - // Check for biased locking unlock case, which is a no-op - // Note: we do not have to check the thread ID for two reasons. - // First, the interpreter checks for IllegalMonitorStateException at - // a higher level. Second, if the bias was revoked while we held the - // lock, the object could not be rebiased toward another thread, so - // the bias bit would be clear. - movq(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); - andq(temp_reg, markOopDesc::biased_lock_mask_in_place); - cmpq(temp_reg, markOopDesc::biased_lock_pattern); - jcc(Assembler::equal, done); -} - - -Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { - switch (cond) { - // Note some conditions are synonyms for others - case Assembler::zero: return Assembler::notZero; - case Assembler::notZero: return Assembler::zero; - case Assembler::less: return Assembler::greaterEqual; - case Assembler::lessEqual: return Assembler::greater; - case Assembler::greater: return Assembler::lessEqual; - case Assembler::greaterEqual: return Assembler::less; - case Assembler::below: return Assembler::aboveEqual; - case Assembler::belowEqual: return Assembler::above; - case Assembler::above: return Assembler::belowEqual; - case Assembler::aboveEqual: return Assembler::below; - case Assembler::overflow: return Assembler::noOverflow; - case Assembler::noOverflow: return Assembler::overflow; - case Assembler::negative: return Assembler::positive; - case Assembler::positive: return Assembler::negative; - case Assembler::parity: return Assembler::noParity; - case Assembler::noParity: return Assembler::parity; - } - ShouldNotReachHere(); return Assembler::overflow; -} - - -void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { - Condition negated_cond = negate_condition(cond); - Label L; - jcc(negated_cond, L); - atomic_incl(counter_addr); - bind(L); -} - -void MacroAssembler::atomic_incl(AddressLiteral counter_addr) { - pushfq(); - if (os::is_MP()) - lock(); - incrementl(counter_addr); - popfq(); -} - -SkipIfEqual::SkipIfEqual( - MacroAssembler* masm, const bool* flag_addr, bool value) { - _masm = masm; - _masm->cmp8(ExternalAddress((address)flag_addr), value); - _masm->jcc(Assembler::equal, _label); -} - -SkipIfEqual::~SkipIfEqual() { - _masm->bind(_label); -} - -void MacroAssembler::bang_stack_size(Register size, Register tmp) { - movq(tmp, rsp); - // Bang stack for total size given plus shadow page size. - // Bang one page at a time because large size can bang beyond yellow and - // red zones. - Label loop; - bind(loop); - movl(Address(tmp, (-os::vm_page_size())), size ); - subq(tmp, os::vm_page_size()); - subl(size, os::vm_page_size()); - jcc(Assembler::greater, loop); - - // Bang down shadow pages too. - // The -1 because we already subtracted 1 page. - for (int i = 0; i< StackShadowPages-1; i++) { - movq(Address(tmp, (-i*os::vm_page_size())), size ); - } -} --- old/hotspot/src/cpu/x86/vm/assembler_x86_64.hpp 2009-08-01 04:21:42.139516818 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,1450 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)assembler_x86_64.hpp 1.46 07/09/20 10:42:56 JVM" -#endif -/* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -class BiasedLockingCounters; - -// Contains all the definitions needed for amd64 assembly code generation. - -#ifdef _LP64 -// Calling convention -class Argument VALUE_OBJ_CLASS_SPEC { - public: - enum { -#ifdef _WIN64 - n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) - n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) -#else - n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) - n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) -#endif - n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... - n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... - }; -}; - - -// Symbolically name the register arguments used by the c calling convention. -// Windows is different from linux/solaris. So much for standards... - -#ifdef _WIN64 - -REGISTER_DECLARATION(Register, c_rarg0, rcx); -REGISTER_DECLARATION(Register, c_rarg1, rdx); -REGISTER_DECLARATION(Register, c_rarg2, r8); -REGISTER_DECLARATION(Register, c_rarg3, r9); - -REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); -REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); -REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); -REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); - -#else - -REGISTER_DECLARATION(Register, c_rarg0, rdi); -REGISTER_DECLARATION(Register, c_rarg1, rsi); -REGISTER_DECLARATION(Register, c_rarg2, rdx); -REGISTER_DECLARATION(Register, c_rarg3, rcx); -REGISTER_DECLARATION(Register, c_rarg4, r8); -REGISTER_DECLARATION(Register, c_rarg5, r9); - -REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); -REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); -REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); -REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); -REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); -REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); -REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); -REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); - -#endif - -// Symbolically name the register arguments used by the Java calling convention. -// We have control over the convention for java so we can do what we please. -// What pleases us is to offset the java calling convention so that when -// we call a suitable jni method the arguments are lined up and we don't -// have to do little shuffling. A suitable jni method is non-static and a -// small number of arguments (two fewer args on windows) -// -// |-------------------------------------------------------| -// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | -// |-------------------------------------------------------| -// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) -// | rdi rsi rdx rcx r8 r9 | solaris/linux -// |-------------------------------------------------------| -// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | -// |-------------------------------------------------------| - -REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); -REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); -REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); -// Windows runs out of register args here -#ifdef _WIN64 -REGISTER_DECLARATION(Register, j_rarg3, rdi); -REGISTER_DECLARATION(Register, j_rarg4, rsi); -#else -REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); -REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); -#endif /* _WIN64 */ -REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); - -REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); -REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); -REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); -REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); -REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); -REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); -REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); -REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); - -REGISTER_DECLARATION(Register, rscratch1, r10); // volatile -REGISTER_DECLARATION(Register, rscratch2, r11); // volatile - -REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved - -#endif // _LP64 - -// Address is an abstraction used to represent a memory location -// using any of the amd64 addressing modes with one object. -// -// Note: A register location is represented via a Register, not -// via an address for efficiency & simplicity reasons. - -class ArrayAddress; - -class Address VALUE_OBJ_CLASS_SPEC { - public: - enum ScaleFactor { - no_scale = -1, - times_1 = 0, - times_2 = 1, - times_4 = 2, - times_8 = 3 - }; - - private: - Register _base; - Register _index; - ScaleFactor _scale; - int _disp; - RelocationHolder _rspec; - - // Easily misused constructors make them private - Address(int disp, address loc, relocInfo::relocType rtype); - Address(int disp, address loc, RelocationHolder spec); - - public: - // creation - Address() - : _base(noreg), - _index(noreg), - _scale(no_scale), - _disp(0) { - } - - // No default displacement otherwise Register can be implicitly - // converted to 0(Register) which is quite a different animal. - - Address(Register base, int disp) - : _base(base), - _index(noreg), - _scale(no_scale), - _disp(disp) { - } - - Address(Register base, Register index, ScaleFactor scale, int disp = 0) - : _base (base), - _index(index), - _scale(scale), - _disp (disp) { - assert(!index->is_valid() == (scale == Address::no_scale), - "inconsistent address"); - } - - // The following two overloads are used in connection with the - // ByteSize type (see sizes.hpp). They simplify the use of - // ByteSize'd arguments in assembly code. Note that their equivalent - // for the optimized build are the member functions with int disp - // argument since ByteSize is mapped to an int type in that case. - // - // Note: DO NOT introduce similar overloaded functions for WordSize - // arguments as in the optimized mode, both ByteSize and WordSize - // are mapped to the same type and thus the compiler cannot make a - // distinction anymore (=> compiler errors). - -#ifdef ASSERT - Address(Register base, ByteSize disp) - : _base(base), - _index(noreg), - _scale(no_scale), - _disp(in_bytes(disp)) { - } - - Address(Register base, Register index, ScaleFactor scale, ByteSize disp) - : _base(base), - _index(index), - _scale(scale), - _disp(in_bytes(disp)) { - assert(!index->is_valid() == (scale == Address::no_scale), - "inconsistent address"); - } -#endif // ASSERT - - // accessors - bool uses(Register reg) const { - return _base == reg || _index == reg; - } - - // Convert the raw encoding form into the form expected by the constructor for - // Address. An index of 4 (rsp) corresponds to having no index, so convert - // that to noreg for the Address constructor. - static Address make_raw(int base, int index, int scale, int disp); - - static Address make_array(ArrayAddress); - - private: - bool base_needs_rex() const { - return _base != noreg && _base->encoding() >= 8; - } - - bool index_needs_rex() const { - return _index != noreg &&_index->encoding() >= 8; - } - - relocInfo::relocType reloc() const { return _rspec.type(); } - - friend class Assembler; - friend class MacroAssembler; - friend class LIR_Assembler; // base/index/scale/disp -}; - -// -// AddressLiteral has been split out from Address because operands of this type -// need to be treated specially on 32bit vs. 64bit platforms. By splitting it out -// the few instructions that need to deal with address literals are unique and the -// MacroAssembler does not have to implement every instruction in the Assembler -// in order to search for address literals that may need special handling depending -// on the instruction and the platform. As small step on the way to merging i486/amd64 -// directories. -// -class AddressLiteral VALUE_OBJ_CLASS_SPEC { - friend class ArrayAddress; - RelocationHolder _rspec; - // Typically we use AddressLiterals we want to use their rval - // However in some situations we want the lval (effect address) of the item. - // We provide a special factory for making those lvals. - bool _is_lval; - - // If the target is far we'll need to load the ea of this to - // a register to reach it. Otherwise if near we can do rip - // relative addressing. - - address _target; - - protected: - // creation - AddressLiteral() - : _is_lval(false), - _target(NULL) - {} - - public: - - - AddressLiteral(address target, relocInfo::relocType rtype); - - AddressLiteral(address target, RelocationHolder const& rspec) - : _rspec(rspec), - _is_lval(false), - _target(target) - {} - - AddressLiteral addr() { - AddressLiteral ret = *this; - ret._is_lval = true; - return ret; - } - - - private: - - address target() { return _target; } - bool is_lval() { return _is_lval; } - - relocInfo::relocType reloc() const { return _rspec.type(); } - const RelocationHolder& rspec() const { return _rspec; } - - friend class Assembler; - friend class MacroAssembler; - friend class Address; - friend class LIR_Assembler; -}; - -// Convience classes -class RuntimeAddress: public AddressLiteral { - - public: - - RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} - -}; - -class OopAddress: public AddressLiteral { - - public: - - OopAddress(address target) : AddressLiteral(target, relocInfo::oop_type){} - -}; - -class ExternalAddress: public AddressLiteral { - - public: - - ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){} - -}; - -class InternalAddress: public AddressLiteral { - - public: - - InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} - -}; - -// x86 can do array addressing as a single operation since disp can be an absolute -// address but amd64 can't [e.g. array_base(rx, ry:width) ]. We create a class -// that expresses the concept but does extra magic on amd64 to get the final result - -class ArrayAddress VALUE_OBJ_CLASS_SPEC { - private: - - AddressLiteral _base; - Address _index; - - public: - - ArrayAddress() {}; - ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; - AddressLiteral base() { return _base; } - Address index() { return _index; } - -}; - -// The amd64 Assembler: Pure assembler doing NO optimizations on -// the instruction level (e.g. mov rax, 0 is not translated into xor -// rax, rax!); i.e., what you write is what you get. The Assembler is -// generating code into a CodeBuffer. - -const int FPUStateSizeInWords = 512 / wordSize; - -class Assembler : public AbstractAssembler { - friend class AbstractAssembler; // for the non-virtual hack - friend class StubGenerator; - - - protected: -#ifdef ASSERT - void check_relocation(RelocationHolder const& rspec, int format); -#endif - - inline void emit_long64(jlong x); - - void emit_data(jint data, relocInfo::relocType rtype, int format /* = 1 */); - void emit_data(jint data, RelocationHolder const& rspec, int format /* = 1 */); - void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); - void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); - - // Helper functions for groups of instructions - void emit_arith_b(int op1, int op2, Register dst, int imm8); - - void emit_arith(int op1, int op2, Register dst, int imm32); - // only x86?? - void emit_arith(int op1, int op2, Register dst, jobject obj); - void emit_arith(int op1, int op2, Register dst, Register src); - - void emit_operand(Register reg, - Register base, Register index, Address::ScaleFactor scale, - int disp, - RelocationHolder const& rspec, - int rip_relative_correction = 0); - void emit_operand(Register reg, Address adr, - int rip_relative_correction = 0); - void emit_operand(XMMRegister reg, - Register base, Register index, Address::ScaleFactor scale, - int disp, - RelocationHolder const& rspec, - int rip_relative_correction = 0); - void emit_operand(XMMRegister reg, Address adr, - int rip_relative_correction = 0); - - // Immediate-to-memory forms - void emit_arith_operand(int op1, Register rm, Address adr, int imm32); - - void emit_farith(int b1, int b2, int i); - - bool reachable(AddressLiteral adr); - - // These are all easily abused and hence protected - - // Make these disappear in 64bit mode since they would never be correct -#ifndef _LP64 - void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); - void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); - - void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); - void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); - - void push_literal32(int32_t imm32, RelocationHolder const& rspec); -#endif // _LP64 - - - void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); - - // These are unique in that we are ensured by the caller that the 32bit - // relative in these instructions will always be able to reach the potentially - // 64bit address described by entry. Since they can take a 64bit address they - // don't have the 32 suffix like the other instructions in this class. - void jmp_literal(address entry, RelocationHolder const& rspec); - void call_literal(address entry, RelocationHolder const& rspec); - - public: - enum Condition { // The amd64 condition codes used for conditional jumps/moves. - zero = 0x4, - notZero = 0x5, - equal = 0x4, - notEqual = 0x5, - less = 0xc, - lessEqual = 0xe, - greater = 0xf, - greaterEqual = 0xd, - below = 0x2, - belowEqual = 0x6, - above = 0x7, - aboveEqual = 0x3, - overflow = 0x0, - noOverflow = 0x1, - carrySet = 0x2, - carryClear = 0x3, - negative = 0x8, - positive = 0x9, - parity = 0xa, - noParity = 0xb - }; - - enum Prefix { - // segment overrides - // XXX remove segment prefixes - CS_segment = 0x2e, - SS_segment = 0x36, - DS_segment = 0x3e, - ES_segment = 0x26, - FS_segment = 0x64, - GS_segment = 0x65, - - REX = 0x40, - - REX_B = 0x41, - REX_X = 0x42, - REX_XB = 0x43, - REX_R = 0x44, - REX_RB = 0x45, - REX_RX = 0x46, - REX_RXB = 0x47, - - REX_W = 0x48, - - REX_WB = 0x49, - REX_WX = 0x4A, - REX_WXB = 0x4B, - REX_WR = 0x4C, - REX_WRB = 0x4D, - REX_WRX = 0x4E, - REX_WRXB = 0x4F - }; - - enum WhichOperand { - // input to locate_operand, and format code for relocations - imm64_operand = 0, // embedded 64-bit immediate operand - disp32_operand = 1, // embedded 32-bit displacement - call32_operand = 2, // embedded 32-bit self-relative displacement - _WhichOperand_limit = 3 - }; - - public: - - // Creation - Assembler(CodeBuffer* code) - : AbstractAssembler(code) { - } - - // Decoding - static address locate_operand(address inst, WhichOperand which); - static address locate_next_instruction(address inst); - - // Utilities - - static bool is_simm(int64_t x, int nbits) { return -( CONST64(1) << (nbits-1) ) <= x && x < ( CONST64(1) << (nbits-1) ); } - static bool is_simm32 (int64_t x) { return x == (int64_t)(int32_t)x; } - - - // Stack - void pushaq(); - void popaq(); - - void pushfq(); - void popfq(); - - void pushq(int imm32); - - void pushq(Register src); - void pushq(Address src); - - void popq(Register dst); - void popq(Address dst); - - // Instruction prefixes - void prefix(Prefix p); - - int prefix_and_encode(int reg_enc, bool byteinst = false); - int prefixq_and_encode(int reg_enc); - - int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false); - int prefixq_and_encode(int dst_enc, int src_enc); - - void prefix(Register reg); - void prefix(Address adr); - void prefixq(Address adr); - - void prefix(Address adr, Register reg, bool byteinst = false); - void prefixq(Address adr, Register reg); - - void prefix(Address adr, XMMRegister reg); - - // Moves - void movb(Register dst, Address src); - void movb(Address dst, int imm8); - void movb(Address dst, Register src); - - void movw(Address dst, int imm16); - void movw(Register dst, Address src); - void movw(Address dst, Register src); - - void movl(Register dst, int imm32); - void movl(Register dst, Register src); - void movl(Register dst, Address src); - void movl(Address dst, int imm32); - void movl(Address dst, Register src); - - void movq(Register dst, Register src); - void movq(Register dst, Address src); - void movq(Address dst, Register src); - // These prevent using movq from converting a zero (like NULL) into Register - // by giving the compiler two choices it can't resolve - void movq(Address dst, void* dummy); - void movq(Register dst, void* dummy); - - void mov64(Register dst, intptr_t imm64); - void mov64(Address dst, intptr_t imm64); - - void movsbl(Register dst, Address src); - void movsbl(Register dst, Register src); - void movswl(Register dst, Address src); - void movswl(Register dst, Register src); - void movslq(Register dst, Address src); - void movslq(Register dst, Register src); - - void movzbl(Register dst, Address src); - void movzbl(Register dst, Register src); - void movzwl(Register dst, Address src); - void movzwl(Register dst, Register src); - - protected: // Avoid using the next instructions directly. - // New cpus require use of movsd and movss to avoid partial register stall - // when loading from memory. But for old Opteron use movlpd instead of movsd. - // The selection is done in MacroAssembler::movdbl() and movflt(). - void movss(XMMRegister dst, XMMRegister src); - void movss(XMMRegister dst, Address src); - void movss(Address dst, XMMRegister src); - void movsd(XMMRegister dst, XMMRegister src); - void movsd(Address dst, XMMRegister src); - void movsd(XMMRegister dst, Address src); - void movlpd(XMMRegister dst, Address src); - // New cpus require use of movaps and movapd to avoid partial register stall - // when moving between registers. - void movapd(XMMRegister dst, XMMRegister src); - void movaps(XMMRegister dst, XMMRegister src); - public: - - void movdl(XMMRegister dst, Register src); - void movdl(Register dst, XMMRegister src); - void movdq(XMMRegister dst, Register src); - void movdq(Register dst, XMMRegister src); - - void cmovl(Condition cc, Register dst, Register src); - void cmovl(Condition cc, Register dst, Address src); - void cmovq(Condition cc, Register dst, Register src); - void cmovq(Condition cc, Register dst, Address src); - - // Prefetches - private: - void prefetch_prefix(Address src); - public: - void prefetcht0(Address src); - void prefetcht1(Address src); - void prefetcht2(Address src); - void prefetchnta(Address src); - void prefetchw(Address src); - - // Arithmetics - void adcl(Register dst, int imm32); - void adcl(Register dst, Address src); - void adcl(Register dst, Register src); - void adcq(Register dst, int imm32); - void adcq(Register dst, Address src); - void adcq(Register dst, Register src); - - void addl(Address dst, int imm32); - void addl(Address dst, Register src); - void addl(Register dst, int imm32); - void addl(Register dst, Address src); - void addl(Register dst, Register src); - void addq(Address dst, int imm32); - void addq(Address dst, Register src); - void addq(Register dst, int imm32); - void addq(Register dst, Address src); - void addq(Register dst, Register src); - - void andl(Register dst, int imm32); - void andl(Register dst, Address src); - void andl(Register dst, Register src); - void andq(Register dst, int imm32); - void andq(Register dst, Address src); - void andq(Register dst, Register src); - - void cmpb(Address dst, int imm8); - void cmpl(Address dst, int imm32); - void cmpl(Register dst, int imm32); - void cmpl(Register dst, Register src); - void cmpl(Register dst, Address src); - void cmpq(Address dst, int imm32); - void cmpq(Address dst, Register src); - void cmpq(Register dst, int imm32); - void cmpq(Register dst, Register src); - void cmpq(Register dst, Address src); - - void ucomiss(XMMRegister dst, XMMRegister src); - void ucomisd(XMMRegister dst, XMMRegister src); - - protected: - // Don't use next inc() and dec() methods directly. INC & DEC instructions - // could cause a partial flag stall since they don't set CF flag. - // Use MacroAssembler::decrement() & MacroAssembler::increment() methods - // which call inc() & dec() or add() & sub() in accordance with - // the product flag UseIncDec value. - - void decl(Register dst); - void decl(Address dst); - void decq(Register dst); - void decq(Address dst); - - void incl(Register dst); - void incl(Address dst); - void incq(Register dst); - void incq(Address dst); - - public: - void idivl(Register src); - void idivq(Register src); - void cdql(); - void cdqq(); - - void imull(Register dst, Register src); - void imull(Register dst, Register src, int value); - void imulq(Register dst, Register src); - void imulq(Register dst, Register src, int value); - - void leal(Register dst, Address src); - void leaq(Register dst, Address src); - - void mull(Address src); - void mull(Register src); - - void negl(Register dst); - void negq(Register dst); - - void notl(Register dst); - void notq(Register dst); - - void orl(Address dst, int imm32); - void orl(Register dst, int imm32); - void orl(Register dst, Address src); - void orl(Register dst, Register src); - void orq(Address dst, int imm32); - void orq(Register dst, int imm32); - void orq(Register dst, Address src); - void orq(Register dst, Register src); - - void rcll(Register dst, int imm8); - void rclq(Register dst, int imm8); - - void sarl(Register dst, int imm8); - void sarl(Register dst); - void sarq(Register dst, int imm8); - void sarq(Register dst); - - void sbbl(Address dst, int imm32); - void sbbl(Register dst, int imm32); - void sbbl(Register dst, Address src); - void sbbl(Register dst, Register src); - void sbbq(Address dst, int imm32); - void sbbq(Register dst, int imm32); - void sbbq(Register dst, Address src); - void sbbq(Register dst, Register src); - - void shll(Register dst, int imm8); - void shll(Register dst); - void shlq(Register dst, int imm8); - void shlq(Register dst); - - void shrl(Register dst, int imm8); - void shrl(Register dst); - void shrq(Register dst, int imm8); - void shrq(Register dst); - - void subl(Address dst, int imm32); - void subl(Address dst, Register src); - void subl(Register dst, int imm32); - void subl(Register dst, Address src); - void subl(Register dst, Register src); - void subq(Address dst, int imm32); - void subq(Address dst, Register src); - void subq(Register dst, int imm32); - void subq(Register dst, Address src); - void subq(Register dst, Register src); - - void testb(Register dst, int imm8); - void testl(Register dst, int imm32); - void testl(Register dst, Register src); - void testq(Register dst, int imm32); - void testq(Register dst, Register src); - - void xaddl(Address dst, Register src); - void xaddq(Address dst, Register src); - - void xorl(Register dst, int imm32); - void xorl(Register dst, Address src); - void xorl(Register dst, Register src); - void xorq(Register dst, int imm32); - void xorq(Register dst, Address src); - void xorq(Register dst, Register src); - - // Miscellaneous - void bswapl(Register reg); - void bswapq(Register reg); - void lock(); - - void xchgl(Register reg, Address adr); - void xchgl(Register dst, Register src); - void xchgq(Register reg, Address adr); - void xchgq(Register dst, Register src); - - void cmpxchgl(Register reg, Address adr); - void cmpxchgq(Register reg, Address adr); - - void nop(int i = 1); - void addr_nop_4(); - void addr_nop_5(); - void addr_nop_7(); - void addr_nop_8(); - - void hlt(); - void ret(int imm16); - void smovl(); - void rep_movl(); - void rep_movq(); - void rep_set(); - void repne_scan(); - void setb(Condition cc, Register dst); - - void clflush(Address adr); - - enum Membar_mask_bits { - StoreStore = 1 << 3, - LoadStore = 1 << 2, - StoreLoad = 1 << 1, - LoadLoad = 1 << 0 - }; - - // Serializes memory. - void membar(Membar_mask_bits order_constraint) { - // We only have to handle StoreLoad and LoadLoad - if (order_constraint & StoreLoad) { - // MFENCE subsumes LFENCE - mfence(); - } /* [jk] not needed currently: else if (order_constraint & LoadLoad) { - lfence(); - } */ - } - - void lfence() { - emit_byte(0x0F); - emit_byte(0xAE); - emit_byte(0xE8); - } - - void mfence() { - emit_byte(0x0F); - emit_byte(0xAE); - emit_byte(0xF0); - } - - // Identify processor type and features - void cpuid() { - emit_byte(0x0F); - emit_byte(0xA2); - } - - void cld() { emit_byte(0xfc); - } - - void std() { emit_byte(0xfd); - } - - - // Calls - - void call(Label& L, relocInfo::relocType rtype); - void call(Register reg); - void call(Address adr); - - // Jumps - - void jmp(Register reg); - void jmp(Address adr); - - // Label operations & relative jumps (PPUM Appendix D) - // unconditional jump to L - void jmp(Label& L, relocInfo::relocType rtype = relocInfo::none); - - - // Unconditional 8-bit offset jump to L. - // WARNING: be very careful using this for forward jumps. If the label is - // not bound within an 8-bit offset of this instruction, a run-time error - // will occur. - void jmpb(Label& L); - - // jcc is the generic conditional branch generator to run- time - // routines, jcc is used for branches to labels. jcc takes a branch - // opcode (cc) and a label (L) and generates either a backward - // branch or a forward branch and links it to the label fixup - // chain. Usage: - // - // Label L; // unbound label - // jcc(cc, L); // forward branch to unbound label - // bind(L); // bind label to the current pc - // jcc(cc, L); // backward branch to bound label - // bind(L); // illegal: a label may be bound only once - // - // Note: The same Label can be used for forward and backward branches - // but it may be bound only once. - - void jcc(Condition cc, Label& L, - relocInfo::relocType rtype = relocInfo::none); - - // Conditional jump to a 8-bit offset to L. - // WARNING: be very careful using this for forward jumps. If the label is - // not bound within an 8-bit offset of this instruction, a run-time error - // will occur. - void jccb(Condition cc, Label& L); - - // Floating-point operations - - void fxsave(Address dst); - void fxrstor(Address src); - void ldmxcsr(Address src); - void stmxcsr(Address dst); - - void addss(XMMRegister dst, XMMRegister src); - void addss(XMMRegister dst, Address src); - void subss(XMMRegister dst, XMMRegister src); - void subss(XMMRegister dst, Address src); - void mulss(XMMRegister dst, XMMRegister src); - void mulss(XMMRegister dst, Address src); - void divss(XMMRegister dst, XMMRegister src); - void divss(XMMRegister dst, Address src); - void addsd(XMMRegister dst, XMMRegister src); - void addsd(XMMRegister dst, Address src); - void subsd(XMMRegister dst, XMMRegister src); - void subsd(XMMRegister dst, Address src); - void mulsd(XMMRegister dst, XMMRegister src); - void mulsd(XMMRegister dst, Address src); - void divsd(XMMRegister dst, XMMRegister src); - void divsd(XMMRegister dst, Address src); - - // We only need the double form - void sqrtsd(XMMRegister dst, XMMRegister src); - void sqrtsd(XMMRegister dst, Address src); - - void xorps(XMMRegister dst, XMMRegister src); - void xorps(XMMRegister dst, Address src); - void xorpd(XMMRegister dst, XMMRegister src); - void xorpd(XMMRegister dst, Address src); - - void cvtsi2ssl(XMMRegister dst, Register src); - void cvtsi2ssq(XMMRegister dst, Register src); - void cvtsi2sdl(XMMRegister dst, Register src); - void cvtsi2sdq(XMMRegister dst, Register src); - void cvttss2sil(Register dst, XMMRegister src); // truncates - void cvttss2siq(Register dst, XMMRegister src); // truncates - void cvttsd2sil(Register dst, XMMRegister src); // truncates - void cvttsd2siq(Register dst, XMMRegister src); // truncates - void cvtss2sd(XMMRegister dst, XMMRegister src); - void cvtsd2ss(XMMRegister dst, XMMRegister src); - - void pxor(XMMRegister dst, Address src); // Xor Packed Byte Integer Values - void pxor(XMMRegister dst, XMMRegister src); // Xor Packed Byte Integer Values - - void movdqa(XMMRegister dst, Address src); // Move Aligned Double Quadword - void movdqa(XMMRegister dst, XMMRegister src); - void movdqa(Address dst, XMMRegister src); - - void movq(XMMRegister dst, Address src); - void movq(Address dst, XMMRegister src); - - void pshufd(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Doublewords - void pshufd(XMMRegister dst, Address src, int mode); - void pshuflw(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Low Words - void pshuflw(XMMRegister dst, Address src, int mode); - - void psrlq(XMMRegister dst, int shift); // Shift Right Logical Quadword Immediate - - void punpcklbw(XMMRegister dst, XMMRegister src); // Interleave Low Bytes - void punpcklbw(XMMRegister dst, Address src); -}; - - -// MacroAssembler extends Assembler by frequently used macros. -// -// Instructions for which a 'better' code sequence exists depending -// on arguments should also go in here. - -class MacroAssembler : public Assembler { - friend class LIR_Assembler; - protected: - - Address as_Address(AddressLiteral adr); - Address as_Address(ArrayAddress adr); - - // Support for VM calls - // - // This is the base routine called by the different versions of - // call_VM_leaf. The interpreter may customize this version by - // overriding it for its purposes (e.g., to save/restore additional - // registers when doing a VM call). - - virtual void call_VM_leaf_base( - address entry_point, // the entry point - int number_of_arguments // the number of arguments to - // pop after the call - ); - - // This is the base routine called by the different versions of - // call_VM. The interpreter may customize this version by overriding - // it for its purposes (e.g., to save/restore additional registers - // when doing a VM call). - // - // If no java_thread register is specified (noreg) than rdi will be - // used instead. call_VM_base returns the register which contains - // the thread upon return. If a thread register has been specified, - // the return value will correspond to that register. If no - // last_java_sp is specified (noreg) than rsp will be used instead. - virtual void call_VM_base( // returns the register - // containing the thread upon - // return - Register oop_result, // where an oop-result ends up - // if any; use noreg otherwise - Register java_thread, // the thread if computed - // before ; use noreg otherwise - Register last_java_sp, // to set up last_Java_frame in - // stubs; use noreg otherwise - address entry_point, // the entry point - int number_of_arguments, // the number of arguments (w/o - // thread) to pop after the - // call - bool check_exceptions // whether to check for pending - // exceptions after return - ); - - // This routines should emit JVMTI PopFrame handling and ForceEarlyReturn code. - // The implementation is only non-empty for the InterpreterMacroAssembler, - // as only the interpreter handles PopFrame and ForceEarlyReturn requests. - virtual void check_and_handle_popframe(Register java_thread); - virtual void check_and_handle_earlyret(Register java_thread); - - void call_VM_helper(Register oop_result, - address entry_point, - int number_of_arguments, - bool check_exceptions = true); - - public: - MacroAssembler(CodeBuffer* code) : Assembler(code) {} - - // Support for NULL-checks - // - // Generates code that causes a NULL OS exception if the content of - // reg is NULL. If the accessed location is M[reg + offset] and the - // offset is known, provide the offset. No explicit code generation - // is needed if the offset is within a certain range (0 <= offset <= - // page_size). - void null_check(Register reg, int offset = -1); - static bool needs_explicit_null_check(int offset); - - // Required platform-specific helpers for Label::patch_instructions. - // They _shadow_ the declarations in AbstractAssembler, which are undefined. - void pd_patch_instruction(address branch, address target); -#ifndef PRODUCT - static void pd_print_patched_instruction(address branch); -#endif - - - // The following 4 methods return the offset of the appropriate move - // instruction. Note: these are 32 bit instructions - - // Support for fast byte/word loading with zero extension (depending - // on particular CPU) - int load_unsigned_byte(Register dst, Address src); - int load_unsigned_word(Register dst, Address src); - - // Support for fast byte/word loading with sign extension (depending - // on particular CPU) - int load_signed_byte(Register dst, Address src); - int load_signed_word(Register dst, Address src); - - // Support for inc/dec with optimal instruction selection depending - // on value - void incrementl(Register reg, int value = 1); - void decrementl(Register reg, int value = 1); - void incrementq(Register reg, int value = 1); - void decrementq(Register reg, int value = 1); - - void incrementl(Address dst, int value = 1); - void decrementl(Address dst, int value = 1); - void incrementq(Address dst, int value = 1); - void decrementq(Address dst, int value = 1); - - // Support optimal SSE move instructions. - void movflt(XMMRegister dst, XMMRegister src) { - if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } - else { movss (dst, src); return; } - } - - void movflt(XMMRegister dst, Address src) { movss(dst, src); } - - void movflt(XMMRegister dst, AddressLiteral src); - - void movflt(Address dst, XMMRegister src) { movss(dst, src); } - - void movdbl(XMMRegister dst, XMMRegister src) { - if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } - else { movsd (dst, src); return; } - } - - void movdbl(XMMRegister dst, AddressLiteral src); - - void movdbl(XMMRegister dst, Address src) { - if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } - else { movlpd(dst, src); return; } - } - - void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } - - void incrementl(AddressLiteral dst); - void incrementl(ArrayAddress dst); - - // Alignment - void align(int modulus); - - // Misc - void fat_nop(); // 5 byte nop - - - // C++ bool manipulation - - void movbool(Register dst, Address src); - void movbool(Address dst, bool boolconst); - void movbool(Address dst, Register src); - void testbool(Register dst); - - // Stack frame creation/removal - void enter(); - void leave(); - - // Support for getting the JavaThread pointer (i.e.; a reference to - // thread-local information) The pointer will be loaded into the - // thread register. - void get_thread(Register thread); - - void int3(); - - // Support for VM calls - // - // It is imperative that all calls into the VM are handled via the - // call_VM macros. They make sure that the stack linkage is setup - // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points - // while call_VM_leaf's correspond to LEAF entry points. - void call_VM(Register oop_result, - address entry_point, - bool check_exceptions = true); - void call_VM(Register oop_result, - address entry_point, - Register arg_1, - bool check_exceptions = true); - void call_VM(Register oop_result, - address entry_point, - Register arg_1, Register arg_2, - bool check_exceptions = true); - void call_VM(Register oop_result, - address entry_point, - Register arg_1, Register arg_2, Register arg_3, - bool check_exceptions = true); - - // Overloadings with last_Java_sp - void call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - int number_of_arguments = 0, - bool check_exceptions = true); - void call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - Register arg_1, bool - check_exceptions = true); - void call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - Register arg_1, Register arg_2, - bool check_exceptions = true); - void call_VM(Register oop_result, - Register last_java_sp, - address entry_point, - Register arg_1, Register arg_2, Register arg_3, - bool check_exceptions = true); - - void call_VM_leaf(address entry_point, - int number_of_arguments = 0); - void call_VM_leaf(address entry_point, - Register arg_1); - void call_VM_leaf(address entry_point, - Register arg_1, Register arg_2); - void call_VM_leaf(address entry_point, - Register arg_1, Register arg_2, Register arg_3); - - // last Java Frame (fills frame anchor) - void set_last_Java_frame(Register last_java_sp, - Register last_java_fp, - address last_java_pc); - void reset_last_Java_frame(bool clear_fp, bool clear_pc); - - // Stores - void store_check(Register obj); // store check for - // obj - register is - // destroyed - // afterwards - void store_check(Register obj, Address dst); // same as above, dst - // is exact store - // location (reg. is - // destroyed) - - // split store_check(Register obj) to enhance instruction interleaving - void store_check_part_1(Register obj); - void store_check_part_2(Register obj); - - // C 'boolean' to Java boolean: x == 0 ? 0 : 1 - void c2bool(Register x); - - // Int division/reminder for Java - // (as idivl, but checks for special case as described in JVM spec.) - // returns idivl instruction offset for implicit exception handling - int corrected_idivl(Register reg); - // Long division/reminder for Java - // (as idivq, but checks for special case as described in JVM spec.) - // returns idivq instruction offset for implicit exception handling - int corrected_idivq(Register reg); - - // Push and pop integer/fpu/cpu state - void push_IU_state(); - void pop_IU_state(); - - void push_FPU_state(); - void pop_FPU_state(); - - void push_CPU_state(); - void pop_CPU_state(); - - // Sign extension - void sign_extend_short(Register reg); - void sign_extend_byte(Register reg); - - // Division by power of 2, rounding towards 0 - void division_with_shift(Register reg, int shift_value); - - // Round up to a power of two - void round_to_l(Register reg, int modulus); - void round_to_q(Register reg, int modulus); - - // allocation - void eden_allocate( - Register obj, // result: pointer to object after - // successful allocation - Register var_size_in_bytes, // object size in bytes if unknown at - // compile time; invalid otherwise - int con_size_in_bytes, // object size in bytes if known at - // compile time - Register t1, // temp register - Label& slow_case // continuation point if fast - // allocation fails - ); - void tlab_allocate( - Register obj, // result: pointer to object after - // successful allocation - Register var_size_in_bytes, // object size in bytes if unknown at - // compile time; invalid otherwise - int con_size_in_bytes, // object size in bytes if known at - // compile time - Register t1, // temp register - Register t2, // temp register - Label& slow_case // continuation point if fast - // allocation fails - ); - void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); - - //---- - - // Debugging - - // only if +VerifyOops - void verify_oop(Register reg, const char* s = "broken oop"); - void verify_oop_addr(Address addr, const char * s = "broken oop addr"); - - // only if +VerifyFPU - void verify_FPU(int stack_depth, const char* s = "illegal FPU state") {} - - // prints msg, dumps registers and stops execution - void stop(const char* msg); - - // prints message and continues - void warn(const char* msg); - - static void debug(char* msg, int64_t pc, int64_t regs[]); - - void os_breakpoint(); - - void untested() - { - stop("untested"); - } - - void unimplemented(const char* what = "") - { - char* b = new char[1024]; - sprintf(b, "unimplemented: %s", what); - stop(b); - } - - void should_not_reach_here() - { - stop("should not reach here"); - } - - // Stack overflow checking - void bang_stack_with_offset(int offset) - { - // stack grows down, caller passes positive offset - assert(offset > 0, "must bang with negative offset"); - movl(Address(rsp, (-offset)), rax); - } - - // Writes to stack successive pages until offset reached to check for - // stack overflow + shadow pages. Also, clobbers tmp - void bang_stack_size(Register offset, Register tmp); - - // Support for serializing memory accesses between threads. - void serialize_memory(Register thread, Register tmp); - - void verify_tlab(); - - // Biased locking support - // lock_reg and obj_reg must be loaded up with the appropriate values. - // swap_reg must be rax and is killed. - // tmp_reg must be supplied and is killed. - // If swap_reg_contains_mark is true then the code assumes that the - // mark word of the object has already been loaded into swap_reg. - // Optional slow case is for implementations (interpreter and C1) which branch to - // slow case directly. Leaves condition codes set for C2's Fast_Lock node. - // Returns offset of first potentially-faulting instruction for null - // check info (currently consumed only by C1). If - // swap_reg_contains_mark is true then returns -1 as it is assumed - // the calling code has already passed any potential faults. - int biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg, - bool swap_reg_contains_mark, - Label& done, Label* slow_case = NULL, - BiasedLockingCounters* counters = NULL); - void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); - - Condition negate_condition(Condition cond); - - // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit - // operands. In general the names are modified to avoid hiding the instruction in Assembler - // so that we don't need to implement all the varieties in the Assembler with trivial wrappers - // here in MacroAssembler. The major exception to this rule is call - - // Arithmetics - - void cmp8(AddressLiteral src1, int8_t imm32); - - void cmp32(AddressLiteral src1, int32_t src2); - // compare reg - mem, or reg - &mem - void cmp32(Register src1, AddressLiteral src2); - - void cmp32(Register src1, Address src2); - -#ifndef _LP64 - void cmpoop(Address dst, jobject obj); - void cmpoop(Register dst, jobject obj); -#endif // _LP64 - - // NOTE src2 must be the lval. This is NOT an mem-mem compare - void cmpptr(Address src1, AddressLiteral src2); - - void cmpptr(Register src1, AddressLiteral src); - - // will be cmpreg(?) - void cmp64(Register src1, AddressLiteral src); - - void cmpxchgptr(Register reg, Address adr); - void cmpxchgptr(Register reg, AddressLiteral adr); - - // Helper functions for statistics gathering. - // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. - void cond_inc32(Condition cond, AddressLiteral counter_addr); - // Unconditional atomic increment. - void atomic_incl(AddressLiteral counter_addr); - - - void lea(Register dst, AddressLiteral src); - void lea(Register dst, Address src); - - - // Calls - void call(Label& L, relocInfo::relocType rtype); - void call(Register entry); - void call(AddressLiteral entry); - - // Jumps - - // 32bit can do a case table jump in one instruction but we no longer allow the base - // to be installed in the Address class - void jump(ArrayAddress entry); - - void jump(AddressLiteral entry); - void jump_cc(Condition cc, AddressLiteral dst); - - // Floating - - void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } - void ldmxcsr(AddressLiteral src); - -private: - // these are private because users should be doing movflt/movdbl - - void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } - void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } - void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } - void movss(XMMRegister dst, AddressLiteral src); - - void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } - void movlpd(XMMRegister dst, AddressLiteral src); - -public: - - - void xorpd(XMMRegister dst, XMMRegister src) {Assembler::xorpd(dst, src); } - void xorpd(XMMRegister dst, Address src) {Assembler::xorpd(dst, src); } - void xorpd(XMMRegister dst, AddressLiteral src); - - void xorps(XMMRegister dst, XMMRegister src) {Assembler::xorps(dst, src); } - void xorps(XMMRegister dst, Address src) {Assembler::xorps(dst, src); } - void xorps(XMMRegister dst, AddressLiteral src); - - - // Data - - void movoop(Register dst, jobject obj); - void movoop(Address dst, jobject obj); - - void movptr(ArrayAddress dst, Register src); - void movptr(Register dst, AddressLiteral src); - - void movptr(Register dst, intptr_t src); - void movptr(Address dst, intptr_t src); - - void movptr(Register dst, ArrayAddress src); - - // to avoid hiding movl - void mov32(AddressLiteral dst, Register src); - void mov32(Register dst, AddressLiteral src); - - void pushoop(jobject obj); - - // Can push value or effective address - void pushptr(AddressLiteral src); - -}; - -/** - * class SkipIfEqual: - * - * Instantiating this class will result in assembly code being output that will - * jump around any code emitted between the creation of the instance and it's - * automatic destruction at the end of a scope block, depending on the value of - * the flag passed to the constructor, which will be checked at run-time. - */ -class SkipIfEqual { - private: - MacroAssembler* _masm; - Label _label; - - public: - SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); - ~SkipIfEqual(); -}; - - -#ifdef ASSERT -inline bool AbstractAssembler::pd_check_instruction_mark() { return true; } -#endif --- old/hotspot/src/cpu/x86/vm/assembler_x86_64.inline.hpp 2009-08-01 04:21:42.460707167 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,89 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)assembler_x86_64.inline.hpp 1.14 07/06/19 09:08:00 JVM" -#endif -/* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -inline void Assembler::emit_long64(jlong x) { - *(jlong*) _code_pos = x; - _code_pos += sizeof(jlong); - code_section()->set_end(_code_pos); -} - -inline void MacroAssembler::pd_patch_instruction(address branch, address target) { - unsigned char op = branch[0]; - assert(op == 0xE8 /* call */ || - op == 0xE9 /* jmp */ || - op == 0xEB /* short jmp */ || - (op & 0xF0) == 0x70 /* short jcc */ || - op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */, - "Invalid opcode at patch point"); - - if (op == 0xEB || (op & 0xF0) == 0x70) { - // short offset operators (jmp and jcc) - char* disp = (char*) &branch[1]; - int imm8 = target - (address) &disp[1]; - guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); - *disp = imm8; - } else { - int* disp = (int*) &branch[(op == 0x0F)? 2: 1]; - int imm32 = target - (address) &disp[1]; - *disp = imm32; - } -} - -#ifndef PRODUCT -inline void MacroAssembler::pd_print_patched_instruction(address branch) { - const char* s; - unsigned char op = branch[0]; - if (op == 0xE8) { - s = "call"; - } else if (op == 0xE9 || op == 0xEB) { - s = "jmp"; - } else if ((op & 0xF0) == 0x70) { - s = "jcc"; - } else if (op == 0x0F) { - s = "jcc"; - } else { - s = "????"; - } - tty->print("%s (unresolved)", s); -} -#endif // ndef PRODUCT - -inline void MacroAssembler::movptr(Address dst, intptr_t src) { -#ifdef _LP64 - Assembler::mov64(dst, src); -#else - Assembler::movl(dst, src); -#endif // _LP64 -} - -inline void MacroAssembler::movptr(Register dst, intptr_t src) { -#ifdef _LP64 - Assembler::mov64(dst, src); -#else - Assembler::movl(dst, src); -#endif // _LP64 -} --- old/hotspot/src/cpu/x86/vm/disassembler_x86.cpp 2009-08-01 04:21:42.777586003 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,205 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)disassembler_x86.cpp 1.48 07/09/17 09:37:23 JVM" -#endif -/* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -# include "incls/_precompiled.incl" -# include "incls/_disassembler_x86.cpp.incl" - -#ifndef PRODUCT - -void* Disassembler::_library = NULL; -Disassembler::decode_func Disassembler::_decode_instruction = NULL; - -bool Disassembler::load_library() { - if (_library == NULL) { - char buf[1024]; - char ebuf[1024]; - sprintf(buf, "disassembler%s", os::dll_file_extension()); - _library = hpi::dll_load(buf, ebuf, sizeof ebuf); - if (_library != NULL) { - tty->print_cr("Loaded disassembler"); - _decode_instruction = CAST_TO_FN_PTR(Disassembler::decode_func, hpi::dll_lookup(_library, "decode_instruction")); - } - } - return (_library != NULL) && (_decode_instruction != NULL); -} - -class x86_env : public DisassemblerEnv { - private: - nmethod* code; - outputStream* output; - public: - x86_env(nmethod* rcode, outputStream* routput) { - code = rcode; - output = routput; - } - void print_label(intptr_t value); - void print_raw(char* str) { output->print_raw(str); } - void print(char* format, ...); - char* string_for_offset(intptr_t value); - char* string_for_constant(unsigned char* pc, intptr_t value, int is_decimal); -}; - - -void x86_env::print_label(intptr_t value) { - if (!Universe::is_fully_initialized()) { - output->print(INTPTR_FORMAT, value); - return; - } - address adr = (address) value; - if (StubRoutines::contains(adr)) { - StubCodeDesc* desc = StubCodeDesc::desc_for(adr); - const char * desc_name = "unknown stub"; - if (desc != NULL) { - desc_name = desc->name(); - } - output->print("Stub::%s", desc_name); - if (WizardMode) output->print(" " INTPTR_FORMAT, value); - } else { - output->print(INTPTR_FORMAT, value); - } -} - -void x86_env::print(char* format, ...) { - va_list ap; - va_start(ap, format); - output->vprint(format, ap); - va_end(ap); -} - -char* x86_env::string_for_offset(intptr_t value) { - stringStream st; - if (!Universe::is_fully_initialized()) { - st.print(INTX_FORMAT, value); - return st.as_string(); - } - BarrierSet* bs = Universe::heap()->barrier_set(); - BarrierSet::Name bsn = bs->kind(); - if (bs->kind() == BarrierSet::CardTableModRef && - (jbyte*) value == ((CardTableModRefBS*)(bs))->byte_map_base) { - st.print("word_map_base"); - } else { - st.print(INTX_FORMAT, value); - } - return st.as_string(); -} - -char* x86_env::string_for_constant(unsigned char* pc, intptr_t value, int is_decimal) { - stringStream st; - oop obj = NULL; - if (code && ((obj = code->embeddedOop_at(pc)) != NULL)) { - obj->print_value_on(&st); - } else { - if (is_decimal == 1) { - st.print(INTX_FORMAT, value); - } else { - st.print(INTPTR_FORMAT, value); - } - } - return st.as_string(); -} - - - -address Disassembler::decode_instruction(address start, DisassemblerEnv* env) { - return ((decode_func) _decode_instruction)(start, env); -} - - -void Disassembler::decode(CodeBlob* cb, outputStream* st) { - st = st ? st : tty; - st->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb); - decode(cb->instructions_begin(), cb->instructions_end(), st); -} - - -void Disassembler::decode(u_char* begin, u_char* end, outputStream* st) { - st = st ? st : tty; - - const int show_bytes = false; // for disassembler debugging - - if (!load_library()) { - st->print_cr("Could not load disassembler"); - return; - } - - x86_env env(NULL, st); - unsigned char* p = (unsigned char*) begin; - CodeBlob* cb = CodeCache::find_blob_unsafe(begin); - while (p < (unsigned char*) end) { - if (cb != NULL) { - cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin())); - } - - unsigned char* p0 = p; - st->print(" " INTPTR_FORMAT ": ", p); - p = decode_instruction(p, &env); - if (show_bytes) { - st->print("\t\t\t"); - while (p0 < p) st->print("%x ", *p0++); - } - st->cr(); - } -} - - -void Disassembler::decode(nmethod* nm, outputStream* st) { - st = st ? st : tty; - - st->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm); - st->print("Code:"); - st->cr(); - - if (!load_library()) { - st->print_cr("Could not load disassembler"); - return; - } - x86_env env(nm, st); - unsigned char* p = nm->instructions_begin(); - unsigned char* end = nm->instructions_end(); - while (p < end) { - if (p == nm->entry_point()) st->print_cr("[Entry Point]"); - if (p == nm->verified_entry_point()) st->print_cr("[Verified Entry Point]"); - if (p == nm->exception_begin()) st->print_cr("[Exception Handler]"); - if (p == nm->stub_begin()) st->print_cr("[Stub Code]"); - if (p == nm->consts_begin()) st->print_cr("[Constants]"); - nm->print_block_comment(st, (intptr_t)(p - nm->instructions_begin())); - unsigned char* p0 = p; - st->print(" " INTPTR_FORMAT ": ", p); - p = decode_instruction(p, &env); - nm->print_code_comment_on(st, 40, p0, p); - st->cr(); - // Output pc bucket ticks if we have any - address bucket_pc = FlatProfiler::bucket_start_for(p); - if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p) { - int bucket_count = FlatProfiler::bucket_count_for(bucket_pc); - tty->print_cr("[%d]", bucket_count); - } - } -} - -#endif // PRODUCT - --- old/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_32.cpp 2009-08-01 04:21:43.106954985 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,52 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_linux_x86_32.cpp 1.20 07/09/17 10:00:00 JVM" -#endif -/* - * Copyright 1999-2003 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_linux_x86_32.cpp.incl" - -void MacroAssembler::int3() { - call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); -} - -void MacroAssembler::get_thread(Register thread) { - movl(thread, rsp); - shrl(thread, PAGE_SHIFT); - - ExternalAddress tls_base((address)ThreadLocalStorage::sp_map_addr()); - Address index(noreg, thread, Address::times_4); - ArrayAddress tls(tls_base, index); - - movptr(thread, tls); -} - -bool MacroAssembler::needs_explicit_null_check(int offset) { - // Linux kernel guarantees that the first page is always unmapped. Don't - // assume anything more than that. - bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size(); - return !offset_in_first_page; -} - --- old/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp 2009-08-01 04:21:43.432502976 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,76 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_linux_x86_64.cpp 1.10 07/09/17 10:00:00 JVM" -#endif -/* - * Copyright 2003 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_linux_x86_64.cpp.incl" - -void MacroAssembler::int3() { - call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); -} - -void MacroAssembler::get_thread(Register thread) { - // call pthread_getspecific - // void * pthread_getspecific(pthread_key_t key); - if (thread != rax) { - pushq(rax); - } - pushq(rdi); - pushq(rsi); - pushq(rdx); - pushq(rcx); - pushq(r8); - pushq(r9); - pushq(r10); - // XXX - movq(r10, rsp); - andq(rsp, -16); - pushq(r10); - pushq(r11); - - movl(rdi, ThreadLocalStorage::thread_index()); - call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific))); - - popq(r11); - popq(rsp); - popq(r10); - popq(r9); - popq(r8); - popq(rcx); - popq(rdx); - popq(rsi); - popq(rdi); - if (thread != rax) { - movq(thread, rax); - popq(rax); - } -} - -// NOTE: since the linux kernel resides at the low end of -// user address space, no null pointer check is needed. -bool MacroAssembler::needs_explicit_null_check(int offset) { - return offset < 0 || offset >= 0x100000; -} --- old/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_32.cpp 2009-08-01 04:21:43.757893028 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,93 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_solaris_x86_32.cpp 1.23 07/09/17 10:00:19 JVM" -#endif -/* - * Copyright 1999-2003 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_solaris_x86_32.cpp.incl" - - -void MacroAssembler::int3() { - pushl(rax); - pushl(rdx); - pushl(rcx); - call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); - popl(rcx); - popl(rdx); - popl(rax); -} - -void MacroAssembler::get_thread(Register thread) { - - // Try to emit a Solaris-specific fast TSD/TLS accessor. - ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode () ; - if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1 - // Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset] - emit_byte (Assembler::GS_segment) ; - // ExternalAddress doesn't work because it can't take NULL - AddressLiteral null(0, relocInfo::none); - movptr (thread, null); - movl (thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ; - return ; - } else - if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2 - // mov r, gs:[tlsOffset] - emit_byte (Assembler::GS_segment) ; - AddressLiteral tls((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none); - movptr (thread, tls); - return ; - } - - // slow call to of thr_getspecific - // int thr_getspecific(thread_key_t key, void **value); - // Consider using pthread_getspecific instead. - - pushl(0); // allocate space for return value - if (thread != rax) pushl(rax); // save rax, if caller still wants it - pushl(rcx); // save caller save - pushl(rdx); // save caller save - if (thread != rax) { - leal(thread, Address(rsp, 3 * sizeof(int))); // address of return value - } else { - leal(thread, Address(rsp, 2 * sizeof(int))); // address of return value - } - pushl(thread); // and pass the address - pushl(ThreadLocalStorage::thread_index()); // the key - call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific))); - increment(rsp, 2 * wordSize); - popl(rdx); - popl(rcx); - if (thread != rax) popl(rax); - popl(thread); -} - -bool MacroAssembler::needs_explicit_null_check(int offset) { - // Identical to Sparc/Solaris code - bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size(); - return !offset_in_first_page; -} - - - --- old/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp 2009-08-01 04:21:44.066691879 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,98 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_solaris_x86_64.cpp 1.11 07/09/17 10:00:19 JVM" -#endif -/* - * Copyright 2004-2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_solaris_x86_64.cpp.incl" - -void MacroAssembler::int3() { - call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); -} - -void MacroAssembler::get_thread(Register thread) { - // Try to emit a Solaris-specific fast TSD/TLS accessor. - ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode(); - if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1 - // Use thread as a temporary: mov r, fs:[0]; mov r, [r+tlsOffset] - emit_byte(Assembler::FS_segment); - movq(thread, Address(NULL, relocInfo::none)); - movq(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())); - return; - } else if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2 - // mov r, fs:[tlsOffset] - emit_byte(Assembler::FS_segment); - ExternalAddress tls_off((address) ThreadLocalStorage::pd_getTlsOffset()); - movptr(thread, tls_off); - return; - } - - // slow call to of thr_getspecific - // int thr_getspecific(thread_key_t key, void **value); - // Consider using pthread_getspecific instead. - - if (thread != rax) { - pushq(rax); - } - pushq(0); // space for return value - pushq(rdi); - pushq(rsi); - leaq(rsi, Address(rsp, 16)); // pass return value address - pushq(rdx); - pushq(rcx); - pushq(r8); - pushq(r9); - pushq(r10); - // XXX - movq(r10, rsp); - andq(rsp, -16); - pushq(r10); - pushq(r11); - - movl(rdi, ThreadLocalStorage::thread_index()); - call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific))); - - popq(r11); - popq(rsp); - popq(r10); - popq(r9); - popq(r8); - popq(rcx); - popq(rdx); - popq(rsi); - popq(rdi); - popq(thread); // load return value - if (thread != rax) { - popq(rax); - } -} - -bool MacroAssembler::needs_explicit_null_check(int offset) { - // Identical to Sparc/Solaris code - bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size(); - return !offset_in_first_page; -} - - --- old/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_32.cpp 2009-08-01 04:21:44.375499159 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,68 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_windows_x86_32.cpp 1.18 07/09/17 10:00:34 JVM" -#endif -/* - * Copyright 1999-2003 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_windows_x86_32.cpp.incl" - - -void MacroAssembler::int3() { - emit_byte(0xCC); -} - -// The current scheme to accelerate access to the thread -// pointer is to store the current thread in the os_exception_wrapper -// and reference the current thread from stubs and compiled code -// via the FS register. FS[0] contains a pointer to the structured -// exception block which is actually a stack address. The first time -// we call the os exception wrapper, we calculate and store the -// offset from this exception block and use that offset here. -// -// The last mechanism we used was problematic in that the -// the offset we had hard coded in the VM kept changing as Microsoft -// evolved the OS. -// -// Warning: This mechanism assumes that we only attempt to get the -// thread when we are nested below a call wrapper. -// -// movl reg, fs:[0] Get exeception pointer -// movl reg, [reg + thread_ptr_offset] Load thread -// -void MacroAssembler::get_thread(Register thread) { - // can't use ExternalAddress because it can't take NULL - AddressLiteral null(0, relocInfo::none); - - prefix(FS_segment); - movptr(thread, null); - assert(ThreadLocalStorage::get_thread_ptr_offset() != 0, - "Thread Pointer Offset has not been initialized"); - movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset())); -} - -bool MacroAssembler::needs_explicit_null_check(int offset) { - return offset < 0 || (int)os::vm_page_size() <= offset; -} - --- old/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_64.cpp 2009-08-01 04:21:44.682238306 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,75 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_SRC -#pragma ident "@(#)assembler_windows_x86_64.cpp 1.14 07/09/17 10:00:34 JVM" -#endif -/* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_assembler_windows_x86_64.cpp.incl" - - -void MacroAssembler::int3() { - emit_byte(0xCC); -} - -// call (Thread*)TlsGetValue(thread_index()); -void MacroAssembler::get_thread(Register thread) { - if (thread != rax) { - pushq(rax); - } - pushq(rdi); - pushq(rsi); - pushq(rdx); - pushq(rcx); - pushq(r8); - pushq(r9); - pushq(r10); - // XXX - movq(r10, rsp); - andq(rsp, -16); - pushq(r10); - pushq(r11); - - movl(c_rarg0, ThreadLocalStorage::thread_index()); - call(RuntimeAddress((address)TlsGetValue)); - - popq(r11); - popq(rsp); - popq(r10); - popq(r9); - popq(r8); - popq(rcx); - popq(rdx); - popq(rsi); - popq(rdi); - if (thread != rax) { - movq(thread, rax); - popq(rax); - } -} - -bool MacroAssembler::needs_explicit_null_check(int offset) { - return offset < 0 || (int)os::vm_page_size() <= offset; -} - --- old/hotspot/src/share/vm/compiler/disassemblerEnv.hpp 2009-08-01 04:21:45.004576459 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,39 +0,0 @@ -#ifdef USE_PRAGMA_IDENT_HDR -#pragma ident "@(#)disassemblerEnv.hpp 1.18 07/05/05 17:05:24 JVM" -#endif -/* - * Copyright 1997-2002 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -// Call-back interface for external disassembler -class DisassemblerEnv { - public: - // printing - virtual void print_label(intptr_t value) = 0; - virtual void print_raw(char* str) = 0; - virtual void print(char* format, ...) = 0; - // helpers - virtual char* string_for_offset(intptr_t value) = 0; - virtual char* string_for_constant(unsigned char* pc, intptr_t value, int is_decimal) = 0; -}; - --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.cpp 2009-08-01 04:21:45.318125649 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,314 +0,0 @@ -/* - * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -// CopyrightVersion 1.2 - -# include "incls/_precompiled.incl" -# include "incls/_concurrentGCThread.cpp.incl" - -bool ConcurrentGCThread::_should_terminate = false; -bool ConcurrentGCThread::_has_terminated = false; -int ConcurrentGCThread::_CGC_flag = CGC_nil; - -SuspendibleThreadSet ConcurrentGCThread::_sts; - -ConcurrentGCThread::ConcurrentGCThread() { - _sts.initialize(); -}; - -void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) { - MutexLockerEx x(Heap_lock, - Mutex::_no_safepoint_check_flag); - // warning("CGC: about to try stopping world"); - SafepointSynchronize::begin(); - // warning("CGC: successfully stopped world"); - op->do_void(); - SafepointSynchronize::end(); - // warning("CGC: successfully restarted world"); -} - -void ConcurrentGCThread::safepoint_synchronize() { - _sts.suspend_all(); -} - -void ConcurrentGCThread::safepoint_desynchronize() { - _sts.resume_all(); -} - -void ConcurrentGCThread::create_and_start() { - if (os::create_thread(this, os::cgc_thread)) { - // XXX: need to set this to low priority - // unless "agressive mode" set; priority - // should be just less than that of VMThread. - os::set_priority(this, NearMaxPriority); - if (!_should_terminate && !DisableStartThread) { - os::start_thread(this); - } - } -} - -void ConcurrentGCThread::initialize_in_thread() { - this->record_stack_base_and_size(); - this->initialize_thread_local_storage(); - this->set_active_handles(JNIHandleBlock::allocate_block()); - // From this time Thread::current() should be working. - assert(this == Thread::current(), "just checking"); -} - -void ConcurrentGCThread::wait_for_universe_init() { - MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); - while (!is_init_completed() && !_should_terminate) { - CGC_lock->wait(Mutex::_no_safepoint_check_flag, 200); - } -} - -void ConcurrentGCThread::terminate() { - // Signal that it is terminated - { - MutexLockerEx mu(Terminator_lock, - Mutex::_no_safepoint_check_flag); - _has_terminated = true; - Terminator_lock->notify(); - } - - // Thread destructor usually does this.. - ThreadLocalStorage::set_thread(NULL); -} - - -void SuspendibleThreadSet::initialize_work() { - MutexLocker x(STS_init_lock); - if (!_initialized) { - _m = new Monitor(Mutex::leaf, - "SuspendibleThreadSetLock", true); - _async = 0; - _async_stop = false; - _async_stopped = 0; - _initialized = true; - } -} - -void SuspendibleThreadSet::join() { - initialize(); - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag); - _async++; - assert(_async > 0, "Huh."); -} - -void SuspendibleThreadSet::leave() { - assert(_initialized, "Must be initialized."); - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - _async--; - assert(_async >= 0, "Huh."); - if (_async_stop) _m->notify_all(); -} - -void SuspendibleThreadSet::yield(const char* id) { - assert(_initialized, "Must be initialized."); - if (_async_stop) { - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - if (_async_stop) { - _async_stopped++; - assert(_async_stopped > 0, "Huh."); - if (_async_stopped == _async) { - if (ConcGCYieldTimeout > 0) { - double now = os::elapsedTime(); - guarantee((now - _suspend_all_start) * 1000.0 < - (double)ConcGCYieldTimeout, - "Long delay; whodunit?"); - } - } - _m->notify_all(); - while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag); - _async_stopped--; - assert(_async >= 0, "Huh"); - _m->notify_all(); - } - } -} - -void SuspendibleThreadSet::suspend_all() { - initialize(); // If necessary. - if (ConcGCYieldTimeout > 0) { - _suspend_all_start = os::elapsedTime(); - } - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - assert(!_async_stop, "Only one at a time."); - _async_stop = true; - while (_async_stopped < _async) _m->wait(Mutex::_no_safepoint_check_flag); -} - -void SuspendibleThreadSet::resume_all() { - assert(_initialized, "Must be initialized."); - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - assert(_async_stopped == _async, "Huh."); - _async_stop = false; - _m->notify_all(); -} - -static void _sltLoop(JavaThread* thread, TRAPS) { - SurrogateLockerThread* slt = (SurrogateLockerThread*)thread; - slt->loop(); -} - -SurrogateLockerThread::SurrogateLockerThread() : - JavaThread(&_sltLoop), - _monitor(Mutex::nonleaf, "SLTMonitor"), - _buffer(empty) -{} - -SurrogateLockerThread* SurrogateLockerThread::make(TRAPS) { - klassOop k = - SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), - true, CHECK_NULL); - instanceKlassHandle klass (THREAD, k); - instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL); - - const char thread_name[] = "Surrogate Locker Thread (CMS)"; - Handle string = java_lang_String::create_from_str(thread_name, CHECK_NULL); - - // Initialize thread_oop to put it into the system threadGroup - Handle thread_group (THREAD, Universe::system_thread_group()); - JavaValue result(T_VOID); - JavaCalls::call_special(&result, thread_oop, - klass, - vmSymbolHandles::object_initializer_name(), - vmSymbolHandles::threadgroup_string_void_signature(), - thread_group, - string, - CHECK_NULL); - - SurrogateLockerThread* res; - { - MutexLocker mu(Threads_lock); - res = new SurrogateLockerThread(); - - // At this point it may be possible that no osthread was created for the - // JavaThread due to lack of memory. We would have to throw an exception - // in that case. However, since this must work and we do not allow - // exceptions anyway, check and abort if this fails. - if (res == NULL || res->osthread() == NULL) { - vm_exit_during_initialization("java.lang.OutOfMemoryError", - "unable to create new native thread"); - } - java_lang_Thread::set_thread(thread_oop(), res); - java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); - java_lang_Thread::set_daemon(thread_oop()); - - res->set_threadObj(thread_oop()); - Threads::add(res); - Thread::start(res); - } - os::yield(); // This seems to help with initial start-up of SLT - return res; -} - -void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) { - MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag); - assert(_buffer == empty, "Should be empty"); - assert(msg != empty, "empty message"); - _buffer = msg; - while (_buffer != empty) { - _monitor.notify(); - _monitor.wait(Mutex::_no_safepoint_check_flag); - } -} - -// ======= Surrogate Locker Thread ============= - -void SurrogateLockerThread::loop() { - BasicLock pll_basic_lock; - SLT_msg_type msg; - debug_only(unsigned int owned = 0;) - - while (/* !isTerminated() */ 1) { - { - MutexLocker x(&_monitor); - // Since we are a JavaThread, we can't be here at a safepoint. - assert(!SafepointSynchronize::is_at_safepoint(), - "SLT is a JavaThread"); - // wait for msg buffer to become non-empty - while (_buffer == empty) { - _monitor.notify(); - _monitor.wait(); - } - msg = _buffer; - } - switch(msg) { - case acquirePLL: { - instanceRefKlass::acquire_pending_list_lock(&pll_basic_lock); - debug_only(owned++;) - break; - } - case releaseAndNotifyPLL: { - assert(owned > 0, "Don't have PLL"); - instanceRefKlass::release_and_notify_pending_list_lock(&pll_basic_lock); - debug_only(owned--;) - break; - } - case empty: - default: { - guarantee(false,"Unexpected message in _buffer"); - break; - } - } - { - MutexLocker x(&_monitor); - // Since we are a JavaThread, we can't be here at a safepoint. - assert(!SafepointSynchronize::is_at_safepoint(), - "SLT is a JavaThread"); - _buffer = empty; - _monitor.notify(); - } - } - assert(!_monitor.owned_by_self(), "Should unlock before exit."); -} - - -// ===== STS Access From Outside CGCT ===== - -void ConcurrentGCThread::stsYield(const char* id) { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - _sts.yield(id); -} - -bool ConcurrentGCThread::stsShouldYield() { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - return _sts.should_yield(); -} - -void ConcurrentGCThread::stsJoin() { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - _sts.join(); -} - -void ConcurrentGCThread::stsLeave() { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - _sts.leave(); -} --- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.hpp 2009-08-01 04:21:45.650329329 +0100 +++ /dev/null 2009-07-29 04:07:07.141863912 +0100 @@ -1,167 +0,0 @@ -/* - * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - */ - -class VoidClosure; - -// A SuspendibleThreadSet is (obviously) a set of threads that can be -// suspended. A thread can join and later leave the set, and periodically -// yield. If some thread (not in the set) requests, via suspend_all, that -// the threads be suspended, then the requesting thread is blocked until -// all the threads in the set have yielded or left the set. (Threads may -// not enter the set when an attempted suspension is in progress.) The -// suspending thread later calls resume_all, allowing the suspended threads -// to continue. - -class SuspendibleThreadSet { - Monitor* _m; - int _async; - bool _async_stop; - int _async_stopped; - bool _initialized; - double _suspend_all_start; - - void initialize_work(); - - public: - SuspendibleThreadSet() : _initialized(false) {} - - // Add the current thread to the set. May block if a suspension - // is in progress. - void join(); - // Removes the current thread from the set. - void leave(); - // Returns "true" iff an suspension is in progress. - bool should_yield() { return _async_stop; } - // Suspends the current thread if a suspension is in progress (for - // the duration of the suspension.) - void yield(const char* id); - // Return when all threads in the set are suspended. - void suspend_all(); - // Allow suspended threads to resume. - void resume_all(); - // Redundant initializations okay. - void initialize() { - // Double-check dirty read idiom. - if (!_initialized) initialize_work(); - } -}; - - -class ConcurrentGCThread: public NamedThread { - friend class VMStructs; - -protected: - static bool _should_terminate; - static bool _has_terminated; - - enum CGC_flag_type { - CGC_nil = 0x0, - CGC_dont_suspend = 0x1, - CGC_CGC_safepoint = 0x2, - CGC_VM_safepoint = 0x4 - }; - - static int _CGC_flag; - - static bool CGC_flag_is_set(int b) { return (_CGC_flag & b) != 0; } - static int set_CGC_flag(int b) { return _CGC_flag |= b; } - static int reset_CGC_flag(int b) { return _CGC_flag &= ~b; } - - void stopWorldAndDo(VoidClosure* op); - - // All instances share this one set. - static SuspendibleThreadSet _sts; - - // Create and start the thread (setting it's priority high.) - void create_and_start(); - - // Do initialization steps in the thread: record stack base and size, - // init thread local storage, set JNI handle block. - void initialize_in_thread(); - - // Wait until Universe::is_fully_initialized(); - void wait_for_universe_init(); - - // Record that the current thread is terminating, and will do more - // concurrent work. - void terminate(); - -public: - // Constructor - - ConcurrentGCThread(); - ~ConcurrentGCThread() {} // Exists to call NamedThread destructor. - - // Tester - bool is_ConcurrentGC_thread() const { return true; } - - static void safepoint_synchronize(); - static void safepoint_desynchronize(); - - // All overridings should probably do _sts::yield, but we allow - // overriding for distinguished debugging messages. Default is to do - // nothing. - virtual void yield() {} - - bool should_yield() { return _sts.should_yield(); } - - // they are prefixed by sts since there are already yield() and - // should_yield() (non-static) methods in this class and it was an - // easy way to differentiate them. - static void stsYield(const char* id); - static bool stsShouldYield(); - static void stsJoin(); - static void stsLeave(); - -}; - -// The SurrogateLockerThread is used by concurrent GC threads for -// manipulating Java monitors, in particular, currently for -// manipulating the pending_list_lock. XXX -class SurrogateLockerThread: public JavaThread { - friend class VMStructs; - public: - enum SLT_msg_type { - empty = 0, // no message - acquirePLL, // acquire pending list lock - releaseAndNotifyPLL // notify and release pending list lock - }; - private: - // the following are shared with the CMSThread - SLT_msg_type _buffer; // communication buffer - Monitor _monitor; // monitor controlling buffer - BasicLock _basicLock; // used for PLL locking - - public: - static SurrogateLockerThread* make(TRAPS); - - SurrogateLockerThread(); - - bool is_hidden_from_external_view() const { return true; } - - void loop(); // main method - - void manipulatePLL(SLT_msg_type msg); - -};